filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_24427 | from setuptools import find_packages, setup
with open('README.md', 'r') as readme:
long_description = readme.read()
setup(
name='epa-sld-update',
package_dir={"": "src"},
packages=find_packages('src'),
version='0.0.0',
description='EPA Access to Jobs',
long_description=long_description,
author='Renaissance Planning',
license='Apache 2.0',
)
|
the-stack_106_24428 | from dagster_graphql.client.util import parse_raw_log_lines
from dagster_k8s.utils import get_pod_names_in_job, retrieve_pod_logs, wait_for_job_success
from dagster import check
def wait_for_job_and_get_logs(job_name, namespace):
'''Wait for a dagster-k8s job to complete, ensure it launched only one pod,
and then grab the logs from the pod it launched.
'''
check.str_param(job_name, 'job_name')
check.str_param(namespace, 'namespace')
wait_for_job_success(job_name, namespace=namespace)
pod_names = get_pod_names_in_job(job_name, namespace)
assert len(pod_names) == 1
pod_name = pod_names[0]
raw_logs = retrieve_pod_logs(pod_name, namespace=namespace)
return parse_raw_log_lines(raw_logs.split('\n'))
|
the-stack_106_24429 | #classe para tratar os dados do dataset
from dataset import Dataset
#classe para tratar os dados do modelo
from model import Model
#classe para logs e metricas
from log import Log
dataset = Dataset('dados1', 'dados2')
x, y = dataset.normalize_dataset_train()
real_cpf, normalized_df_production = dataset.normalize_dataset_production()
model = Model(x, y, normalized_df_production)
predict_value = model.model_learning()
log = Log(real_cpf, predict_value)
log.results() |
the-stack_106_24431 | from django.utils.deprecation import MiddlewareMixin
from django.shortcuts import HttpResponse, redirect, reverse
from django.conf import settings
import re
from rbac import models
class RbacMiddleware(MiddlewareMixin):
def process_request(self, request):
# 获取当前访问url地址
url = request.path_info
request.current_menu_id = None
request.breadcrumb_list = [{'title': '首页', 'url': '/index/'}, ]
# 白名单
for i in settings.WHITE_LIST:
if re.match(i, url):
return
# 没有登录 跳转登录页面
is_login = request.session.get('is_login', False)
if not is_login:
return redirect(reverse('host:login'))
# 已经登录 可以访问不需要权限校验的URL
for i in settings.NO_PERMISSION_LIST:
if re.match(i, url):
return
# 真正需要进行权限校验的地址
permission = request.session.get(settings.PERMISSION_SESSION_KEY)
for i in permission.values():
if re.match(r'^{}$'.format(i['url']), url):
# {'url': '/department_list/', 'id': 1, 'pid': None} 二级菜单 父权限
# {'url': '/department_add/', 'id': 2, 'pid': 1} 子权限
id = i['id']
pid = i['pid']
pname = i['pname']
if pid:
# 有PID 当前访问的是子权限
request.current_menu_id = pid
# 添加父权限的信息
# parent = models.Permission.objects.get(pk=pid)
# request.breadcrumb_list.append({'title': parent.title, 'url': parent.url})
request.breadcrumb_list.append(
{'title': permission[pname]['title'], 'url': permission[pname]['url']})
request.breadcrumb_list.append({'title': i['title'], 'url': i['url']})
else:
# 没有PID 当前访问的是父权限
request.current_menu_id = id
request.breadcrumb_list.append({'title': i['title'], 'url': i['url']})
return
return HttpResponse('没有权限,请联系管理员')
|
the-stack_106_24437 | """Sensor platform for Trakt"""
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, CONF_CLIENT_ID
from homeassistant.helpers.entity import Entity
from .const import ATTRIBUTION, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up device tracker for Mikrotik component."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities([TraktUpcomingCalendarSensor(coordinator)], True)
class TraktUpcomingCalendarSensor(Entity):
"""Representation of a Trakt Upcoming Calendar sensor."""
def __init__(self, coordinator):
"""Initialize the sensor."""
self.coordinator = coordinator
self._name = coordinator.config_entry.data[CONF_NAME]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the entity."""
return self.coordinator.config_entry.data[CONF_CLIENT_ID]
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def state(self):
"""Return the state of the sensor."""
return len(self.coordinator.calendar) if self.coordinator.calendar else 0
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:calendar"
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return "shows"
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
attributes = {
"data": self.coordinator.data,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
return attributes
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.async_on_remove(
self.coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Request coordinator to update data."""
await self.coordinator.async_request_refresh()
|
the-stack_106_24439 | #!/usr/bin/env python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example adds a Universal App campaign.
To get campaigns, run get_campaigns.py. To upload image assets for this
campaign, run upload_image.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
import datetime
import uuid
from googleads import adwords
def main(client):
# Initialize appropriate services.
campaign_service = client.GetService('CampaignService', version='v201802')
budget_id = CreateBudget(client)
# Create the Universal App campaign.
universal_app_campaign = {
'name': 'Interplanetary Cruise App #%s' % uuid.uuid4(),
# Recommendation: Set the campaign to PAUSED when creating it to stop the
# ads from immediately serving. Set to ENABLED once you've added targeting
# and the ads are ready to serve.
'status': 'PAUSED',
'advertisingChannelType': 'MULTI_CHANNEL',
'advertisingChannelSubType': 'UNIVERSAL_APP_CAMPAIGN',
# Set the campaign's bidding strategy. Universal app campaigns only
# support TARGET_CPA bidding strategy.
'biddingStrategyConfiguration': {
# Set the target CPA to $1 / app install.
'biddingScheme': {
'xsi_type': 'TargetCpaBiddingScheme',
'targetCpa': {
'microAmount': '1000000'
}
},
'biddingStrategyType': 'TARGET_CPA'
},
# Note that only the budgetId is required
'budget': {
'budgetId': budget_id
},
# Optional fields
'startDate': (datetime.datetime.now() +
datetime.timedelta(1)).strftime('%Y%m%d'),
'endDate': (datetime.datetime.now() +
datetime.timedelta(365)).strftime('%Y%m%d'),
}
universal_app_campaign['settings'] = [
# Set the campaign's assets and ad text ideas. These values will
# be used to generate ads.
{
'xsi_type': 'UniversalAppCampaignSetting',
'appId': 'com.labpixies.colordrips',
'appVendor': 'VENDOR_GOOGLE_MARKET',
'description1': 'A cool puzzle game',
'description2': 'Remove connected blocks',
'description3': '3 difficulty levels',
'description4': '4 colorful fun skins',
# Optional: You can set up to 20 image assets for your campaign.
# See upload_image.py for an example on how to upload images.
#
# 'imageMediaIds': [INSERT_IMAGE_MEDIA_ID(s)_HERE]
}
]
# Optimize this campaign for getting new users for your app.
universal_app_campaign_setting = universal_app_campaign['settings'][0]
universal_app_campaign_setting['universalAppBiddingStrategyGoalType'] = (
'OPTIMIZE_FOR_INSTALL_CONVERSION_VOLUME')
# Optional: If you select the OPTIMIZE_FOR_IN_APP_CONVERSION_VOLUME goal type,
# then also specify your in-app conversion types so AdWords can focus your
# campaign on people who are most likely to complete the corresponding in-app
# actions.
#
# Conversions type IDs can be retrieved using ConversionTrackerService.get.
# universal_app_campaign['selectiveOptimization'] = {
# 'conversionTypeIds': [INSERT_CONVERSION_TYPE_ID(s)_HERE]
# }
# Optional: Set the campaign settings for Advanced location options.
universal_app_campaign['settings'].append({
'xsi_type': 'GeoTargetTypeSetting',
'positiveGeoTargetType': 'DONT_CARE',
'negativeGeoTargetType': 'DONT_CARE'
})
# Construct operations and add campaigns.
operations = [{
'operator': 'ADD',
'operand': universal_app_campaign
}]
campaigns = campaign_service.mutate(operations)['value']
# Display results.
if campaigns:
for campaign in campaigns:
print(('Universal App Campaign with name "%s" and id "%s" was added.'
% (campaign['name'], campaign['id'])))
# Optional: Set the campaign's location and language targeting. No other
# targeting criteria can be used for Universal App campaigns.
SetCampaignTargetingCriteria(client, campaign)
else:
print('No Universal App campaigns were added.')
def CreateBudget(client):
"""Creates a budget and returns its budgetId.
Args:
client: An AdWordsClient instance.
Returns:
An int budgetId for the created Budget.
"""
budget_service = client.GetService('BudgetService', version='v201802')
# Create a budget.
budget = {
'name': 'Interplanetary Cruise App Budget #%s' % uuid.uuid4(),
'amount': {
'microAmount': '50000000'
},
'deliveryMethod': 'STANDARD',
'isExplicitlyShared': False
}
budget_operations = [{
'operator': 'ADD',
'operand': budget
}]
# Create the budget and return its ID.
budget_id = budget_service.mutate(budget_operations)['value'][0]['budgetId']
return budget_id
def SetCampaignTargetingCriteria(client, campaign):
"""Sets targeting criteria for the given campaign.
Args:
client: An AdWordsClient instance.
campaign: A suds object representing the campaign we wish to attach
targeting criteria.
"""
campaign_criterion_service = client.GetService('CampaignCriterionService')
# Create locations. The IDs can be found in the documentation or retrieved
# with the LocationCriterionService.
criteria = [
{
'xsi_type': 'Location',
'id': 21137 # California
},
{
'xsi_type': 'Location',
'id': 2484 # Mexico
},
{
'xsi_type': 'Language',
'id': 1000 # English
},
{
'xsi_type': 'Language',
'id': 1003 # Spanish
}
]
operations = [{
'operator': 'ADD',
'operand': {
'campaignId': campaign['id'],
'criterion': criterion
}
} for criterion in criteria]
response = campaign_criterion_service.mutate(operations)
if response and 'value' in response:
# Display the added campaign targets.
for criterion in response['value']:
print(('Campaign criteria of type "%s" and id "%s" was added.'
% (criterion['criterion']['type'],
criterion['criterion']['id'])))
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
the-stack_106_24441 | #!/usr/bin/env python
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Import libraries and modules
import tensorflow as tf
# Set logging verbosity to INFO for richer output
tf.logging.set_verbosity(tf.logging.INFO)
# The number of video classes
NUM_CLASSES = 4716
# Create an input function to read our training and validation data
# Then provide the results to the Estimator API
def read_dataset_video(file_pattern, mode, batch_size):
def _input_fn():
print("\nread_dataset_video: _input_fn: file_pattern = {}".format(file_pattern))
print("read_dataset_video: _input_fn: mode = {}".format(mode))
print("read_dataset_video: _input_fn: batch_size = {}".format(batch_size))
# This function will decode frame examples from the frame level TF Records
def decode_example(serialized_examples):
# Create feature map
feature_map = {
'video_id': tf.FixedLenFeature(shape = [], dtype = tf.string),
'labels': tf.VarLenFeature(dtype = tf.int64),
'mean_rgb': tf.FixedLenFeature(shape = [1024], dtype = tf.float32),
'mean_audio': tf.FixedLenFeature(shape = [128], dtype = tf.float32)
}
# Parse TF Records into our features
features = tf.parse_single_example(serialized = serialized_examples, features = feature_map)
print("\nread_dataset_video: _input_fn: decode_example: features = {}".format(features)) # shape = video_id = (), mean_rgb = (1024,), mean_audio = (128,), labels = SparseTensor object
# Extract and format labels
sparse_labels = features.pop("labels") # SparseTensor object
print("read_dataset_video: _input_fn: decode_example: sparse_labels = {}\n".format(sparse_labels))
labels = tf.cast(x = tf.sparse_to_dense(sparse_indices = sparse_labels.values, output_shape = (NUM_CLASSES,), sparse_values = 1, validate_indices = False), dtype = tf.float32)
print("read_dataset_video: _input_fn: decode_example: labels = {}\n".format(labels)) # shape = (NUM_CLASSES,)
return features, labels
# Create list of files from file pattern
file_list = tf.gfile.Glob(filename = file_pattern)
#print("read_dataset_video: _input_fn: file_list = {}".format(file_list))
# Create dataset from file list
dataset = tf.data.TFRecordDataset(filenames = file_list)
print("read_dataset_video: _input_fn: dataset.TFRecordDataset = {}".format(dataset))
# Decode TF Record dataset examples
dataset = dataset.map(map_func = lambda x: decode_example(serialized_examples = x))
print("read_dataset_video: _input_fn: dataset.map = {}".format(dataset))
# Determine amount of times to repeat file and if we should shuffle based on if we are training or evaluating
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # read files forever
# Shuffle the dataset within a buffer
dataset = dataset.shuffle(buffer_size = batch_size * 10, seed = None)
print("read_dataset_video: _input_fn: dataset.shuffle = {}".format(dataset))
else:
num_epochs = 1 # read files only once
# Repeat files num_epoch times
dataset = dataset.repeat(count = num_epochs)
print("read_dataset_video: _input_fn: dataset.repeat = {}".format(dataset))
# Group the data into batches
dataset = dataset.batch(batch_size = batch_size)
print("read_dataset_video: _input_fn: dataset.batch = {}".format(dataset))
# Create a iterator and then pull the next batch of features and labels from the example queue
batch_features, batch_labels = dataset.make_one_shot_iterator().get_next()
print("read_dataset_video: _input_fn: batch_features = {}".format(batch_features))
print("read_dataset_video: _input_fn: batch_labels = {}\n".format(batch_labels))
return batch_features, batch_labels
return _input_fn
# Create our model function to be used in our custom estimator
def video_level_model(features, labels, mode, params):
print("\nvideo_level_model: features = {}".format(features))
print("video_level_model: labels = {}".format(labels))
print("video_level_model: mode = {}".format(mode))
# 0. Configure network
# Get dynamic batch size
current_batch_size = tf.shape(features['mean_rgb'])[0]
print("video_level_model: current_batch_size = {}".format(current_batch_size))
# Stack all of the features into a 3-D tensor
combined_features = tf.concat(values = [features['mean_rgb'], features['mean_audio']], axis = 1) # shape = (current_batch_size, 1024 + 128)
print("video_level_model: combined_features = {}".format(combined_features))
# 1. Create the DNN structure now
# Create the input layer to our frame DNN
network = combined_features # shape = (current_batch_size, 1024 + 128)
print("video_level_model: network = combined_features = {}".format(network))
# Add hidden layers with the given number of units/neurons per layer
for units in params['hidden_units']:
network = tf.layers.dense(inputs = network, units = units, activation = tf.nn.relu) # shape = (current_batch_size, units)
print("video_level_model: network = {}, units = {}".format(network, units))
# Connect the final hidden layer to a dense layer with no activation to get the logits
logits = tf.layers.dense(inputs = network, units = NUM_CLASSES, activation = None) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: logits = {}".format(logits))
# Select the top k logits in descending order
top_k_logits = tf.nn.top_k(input = logits, k = params['top_k'], sorted = True) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_logits = {}".format(top_k_logits))
# Since this is a multi-class, multi-label problem we will apply a sigmoid, not a softmax, to each logit to get its own probability
probabilities = tf.sigmoid(logits) # shape = (current_batch_size, NUM_CLASSES)
print("video_level_model: probabilities = {}".format(probabilities))
# Select the top k probabilities in descending order
top_k_probabilities = tf.sigmoid(top_k_logits.values) # shape = (current_batch_size, top_k)
print("video_level_model: top_k_probabilities = {}".format(top_k_probabilities))
# Select the top k classes in descending order of likelihood
top_k_classes = top_k_logits.indices # shape = (current_batch_size, top_k)
print("video_level_model: top_k_classes = {}".format(top_k_classes))
# The 0/1 predictions based on a threshold, in this case the threshold is if the probability it greater than random chance
predictions = tf.where(
condition = probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, NUM_CLASSES)
x = tf.ones_like(tensor = probabilities),
y = tf.zeros_like(tensor = probabilities))
print("video_level_model: predictions = {}".format(predictions))
top_k_predictions = tf.where(
condition = top_k_probabilities > 1.0 / NUM_CLASSES, # shape = (current_batch_size, top_k)
x = tf.ones_like(tensor = top_k_probabilities),
y = tf.zeros_like(tensor = top_k_probabilities))
print("video_level_model: top_k_predictions = {}\n".format(top_k_predictions))
# 2. Loss function, training/eval ops
if mode in [tf.estimator.ModeKeys.TRAIN, tf.estimator.ModeKeys.EVAL]:
# Since this is a multi-class, multi-label problem, we will use sigmoid activation and cross entropy loss
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels = labels, logits = logits)
train_op = tf.contrib.layers.optimize_loss(
loss = loss,
global_step = tf.train.get_global_step(),
learning_rate = 0.01,
optimizer = "Adam")
eval_metric_ops = {
"accuracy": tf.metrics.mean_per_class_accuracy(labels = labels, predictions = predictions, num_classes = NUM_CLASSES)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"logits": top_k_logits.values,
"probabilities": top_k_probabilities,
"predictions": top_k_predictions,
"classes": top_k_classes}
# 4. Create export outputs
export_outputs = {"predict_export_outputs": tf.estimator.export.PredictOutput(outputs = predictions_dict)}
# 5. Return EstimatorSpec
return tf.estimator.EstimatorSpec(
mode = mode,
predictions = predictions_dict,
loss = loss,
train_op = train_op,
eval_metric_ops = eval_metric_ops,
export_outputs = export_outputs)
# Create our serving input function to accept the data at serving and send it in the right format to our custom estimator
def serving_input_fn():
# This function fixes the shape and type of our input strings
def fix_shape_and_type_for_serving(placeholder):
# String split each string in the batch and output the values from the resulting SparseTensors
split_string = tf.map_fn(
fn = lambda x: tf.string_split(source = [placeholder[x]], delimiter=',').values,
elems = tf.range(start = 0, limit = tf.shape(input = placeholder)[0]),
dtype = tf.string) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: split_string = {}".format(split_string))
# Convert each string in the split tensor to float
feature_tensor = tf.string_to_number(string_tensor = split_string, out_type = tf.float32) # shape = (batch_size, input_sequence_length)
print("serving_input_fn: fix_shape_and_type_for_serving: feature_tensor = {}".format(feature_tensor))
return feature_tensor
# This function fixes dynamic shape ambiguity of last dimension so that we will be able to use it in our DNN (since tf.layers.dense require the last dimension to be known)
def get_shape_and_set_modified_shape_2D(tensor, additional_dimension_sizes):
# Get static shape for tensor and convert it to list
shape = tensor.get_shape().as_list()
# Set outer shape to additional_dimension_sizes[0] since we know that this is the correct size
shape[1] = additional_dimension_sizes[0]
# Set the shape of tensor to our modified shape
tensor.set_shape(shape = shape) # shape = (batch_size, additional_dimension_sizes[0])
print("serving_input_fn: get_shape_and_set_modified_shape_2D: tensor = {}, additional_dimension_sizes = {}".format(tensor, additional_dimension_sizes))
return tensor
# Create placeholders to accept the data sent to the model at serving time
feature_placeholders = { # all features come in as a batch of strings, shape = (batch_size,), this was so because of passing the arrays to online ml-engine prediction
'video_id': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_rgb': tf.placeholder(dtype = tf.string, shape = [None]),
'mean_audio': tf.placeholder(dtype = tf.string, shape = [None])
}
print("\nserving_input_fn: feature_placeholders = {}".format(feature_placeholders))
# Create feature tensors
features = {
"video_id": feature_placeholders["video_id"],
"mean_rgb": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_rgb"]),
"mean_audio": fix_shape_and_type_for_serving(placeholder = feature_placeholders["mean_audio"])
}
print("serving_input_fn: features = {}".format(features))
# Fix dynamic shape ambiguity of feature tensors for our DNN
features["mean_rgb"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_rgb"], additional_dimension_sizes = [1024])
features["mean_audio"] = get_shape_and_set_modified_shape_2D(tensor = features["mean_audio"], additional_dimension_sizes = [128])
print("serving_input_fn: features = {}\n".format(features))
return tf.estimator.export.ServingInputReceiver(features = features, receiver_tensors = feature_placeholders)
# Create custom estimator's train and evaluate function
def train_and_evaluate(args):
# Create custom estimator's train and evaluate function
estimator = tf.estimator.Estimator(
model_fn = video_level_model,
model_dir = args['output_dir'],
params = {'hidden_units': args['hidden_units'], 'top_k': args['top_k']})
# Create train spec to read in our training data
train_spec = tf.estimator.TrainSpec(
input_fn = read_dataset_video(
file_pattern = args['train_file_pattern'],
mode = tf.estimator.ModeKeys.TRAIN,
batch_size = args['batch_size']),
max_steps = args['train_steps'])
# Create exporter to save out the complete model to disk
exporter = tf.estimator.LatestExporter(name = 'exporter', serving_input_receiver_fn = serving_input_fn)
# Create eval spec to read in our validation data and export our model
eval_spec = tf.estimator.EvalSpec(
input_fn = read_dataset_video(
file_pattern = args['eval_file_pattern'],
mode = tf.estimator.ModeKeys.EVAL,
batch_size = args['batch_size']),
steps = None,
exporters = exporter,
start_delay_secs = args['start_delay_secs'],
throttle_secs = args['throttle_secs'])
# Create train and evaluate loop to train and evaluate our estimator
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec) |
the-stack_106_24443 | print(' '*20+'Version: String Encoder v0.0.1.5')
print('.'*150)
print('@copyright Bijoy_Maji')
print('By - Bijoy Maji. Email:- [email protected]')
print('Note: There have a problem with some key. if you find it , please mail me the key at majibijoy00@gmail.')
print('.'*150)
import random
import math as m
#input stering
in_put=input('Enter your string:')
#genarate a key for encription
output=[in_put[i] for i in range(len(in_put))]
#gggggsjgfdvghggjgsjg
words_list1=[['l', 'z', 'x', 'c', 'v', 'b','u','i','o','p','a','s'],
['d', 'f', 'g', 'h', 'j', 'k', 'q','w','e','r','t','y'],
['n', 'm', '~', '!', '#', '$', '%', '^', '&', '*', '(', ')'],
['_', '+', '@', '-', '=', '{', '}', '|', '[', ']', ':', '"'],
['`', ';', "'", '<', '>', '?', ',', '.', '/', '€', '¤', ' '],
['Z', 'X', 'C', 'V', 'B', 'Y', 'U', 'I', 'O', 'P', 'A', 'S'],
['D', 'F', 'G', 'H', 'J', 'K', 'L','Q', 'W', 'E', 'R', 'T' ],
['N', 'M', '0','5', '6', '7', '8', '9','1', '2', '3', '4']]
lower_words="qwertyuiopasdfghjklzxcvbnm"
upper_words=lower_words.upper()
number="0123456789"
symbols='''~!#$%^&*()_+@-={}|[]:"`;'<>?,./€¤ '''
words=lower_words+symbols+upper_words+number
words_list=[words[i] for i in range(len(words))]
output1=[words_list.index(i) for i in output]
xp,yp,zp=[],[],[]
chack_key=input("Have you a key ? (y/n):")
yes1=['y','Y','yes','Yes']
no1=['n','N','No','no']
key=[]
keyerror=0
try:
if chack_key in no1:
#key work
key=[str(int(random.uniform(0,9))) for i in range (10) ]
elif chack_key in yes1:
key_in=input('Input our key: ')
key_2=['@','$', 'G','A','#','%','X','!','_','+','/','&','z','r','H','Q','W','D','Y','~']
key_1=[key_in[i] for i in range(len(key_in))]
j=int(key_1[10])
key_3=[]
key_4=[]
position=[]
z=[]
for i in range(12,len(key_1),2):
key_3.append(key_1[i])
for k in key_1[:10]:
if k in key_2:
key_4.append(k)
z.append(key_1.index(k))
position.append(key_2.index(k)-j)
y=[]
for l in range (min(position),max(position)+1):
y.append(key_3[position.index(l)])
c=0
for a in z:
key_1[a]=y[c]
c=c+1
key=key_1[:10]
else:
print("Sorry! only 'y' and 'n' are accepted")
keyerror +=1
# mixing word with key
word_with_key=[]
for p in key:
word_with_key.append(8-int(p))
for q in word_with_key:
if q==8:
word_with_key.remove(q)
for i in word_with_key:
if word_with_key.count(i)>1:
word_with_key.remove(i)
key_len_en=len(word_with_key)
for i in range(8):
if i not in word_with_key:
word_with_key.append(i)
#print ('word_with_key: ',word_with_key)
new_words_list=[]
for i in word_with_key:
new_words_list.append(words_list1[i])
for j in output1 :
a1=int((j+1)/12)
b1=int((j+1)%12)
yp.append(b1-1)
if (j+1)%12==0:
xp.append(a1-1)
zp.append(new_words_list[a1-1][b1-1])
else:
xp.append(a1)
zp.append(new_words_list[a1][b1-1])
key_2=['@','$', 'G','A','#','%','X','!','_','+','/','&','z','r','H','Q','W','D','Y','~']
j=int(random.uniform(-1,5))
main_constant=j
x=[]
for i in key:
position=key.index(i)
counter =0
if key.count(i)>1:
x.append(i)
j3=int(random.uniform(0,len(key_2)))
x.append(key_2[j3])
position1=key.index(i)
j+=1
key[key.index(i,position1+1)]=key_2[j]
key.append(str(main_constant))
j2=int(random.uniform(11,len(key_2)))
key.append(key_2[j2])
key=key+x
key_3=key[0]
for key_in_word in range(1,len(key)):
key_3=key_3+key[key_in_word]
if chack_key in no1:
print('Your key is: ',key_3)
#print('Words=',new_words_list)
#print('Your encoded string:',output)
#print('Out put:',output1)
#print(xp,yp,zp)
result=zp[0]
for key_in_word in range(1,len(zp)):
result=result+zp[key_in_word]
if keyerror ==0:
print('Encoded string is :',result)
except:
print ("some error in input key ! Enter right key and try again !")
|
the-stack_106_24445 | #!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# (C) 2017, The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import re
import sys
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
from xml.etree import ElementTree
product = sys.argv[1]
if len(sys.argv) > 2:
depsonly = sys.argv[2]
else:
depsonly = None
try:
device = product[product.index("_") + 1:]
except:
device = product
if not depsonly:
print("Device %s not found. Attempting to retrieve device repository from TeamHorizon Github (http://github.com/TeamHorizon)." % device)
repositories = []
try:
authtuple = netrc.netrc().authenticators("api.github.com")
if authtuple:
auth_string = ('%s:%s' % (authtuple[0], authtuple[2])).encode()
githubauth = base64.encodestring(auth_string).decode().replace('\n', '')
else:
githubauth = None
except:
githubauth = None
def add_auth(githubreq):
if githubauth:
githubreq.add_header("Authorization","Basic %s" % githubauth)
if not depsonly:
githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:TeamHorizon+in:name+fork:true" % device)
add_auth(githubreq)
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit()
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit()
for res in result.get('items', []):
repositories.append(res)
local_manifests = r'.repo/local_manifests'
if not os.path.exists(local_manifests): os.makedirs(local_manifests)
def exists_in_tree(lm, path):
for child in lm.getchildren():
if child.attrib['path'] == path:
return True
return False
# in-place prettyprint formatter
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_manifest_path():
'''Find the current manifest path
In old versions of repo this is at .repo/manifest.xml
In new versions, .repo/manifest.xml includes an include
to some arbitrary file in .repo/manifests'''
m = ElementTree.parse(".repo/manifest.xml")
try:
m.findall('default')[0]
return '.repo/manifest.xml'
except IndexError:
return ".repo/manifests/{}".format(m.find("include").get("name"))
def get_default_revision():
r = ('p')
return r.replace('refs/heads/', '').replace('refs/tags/', '')
def get_from_manifest(devicename):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if re.search("android_device_.*_%s$" % device, localpath.get("name")):
return localpath.get("path")
return None
def is_in_manifest(projectpath):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# Search in main manifest, too
try:
lm = ElementTree.parse(get_manifest_path())
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# ... and don't forget the lineage snippet
try:
lm = ElementTree.parse(".repo/manifests/snippets/lineage.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
return False
def add_to_manifest(repositories, fallback_branch = None):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for repository in repositories:
repo_name = repository['repository']
repo_target = repository['target_path']
print('Checking if %s is fetched from %s' % (repo_target, repo_name))
if is_in_manifest(repo_target):
print('TeamHorizon/%s already fetched to %s' % (repo_name, repo_target))
continue
print('Adding dependency: TeamHorizon/%s -> %s' % (repo_name, repo_target))
project = ElementTree.Element("project", attrib = { "path": repo_target,
"remote": "github", "name": "TeamHorizon/%s" % repo_name, "revision": "p"})
if 'branch' in repository:
project.set('revision',repository['branch'])
elif fallback_branch:
print("Using fallback branch %s for %s" % (fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
lm.append(project)
indent(lm, 0)
raw_xml = ElementTree.tostring(lm).decode()
raw_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + raw_xml
f = open('.repo/local_manifests/roomservice.xml', 'w')
f.write(raw_xml)
f.close()
def fetch_dependencies(repo_path, fallback_branch = None):
print('Looking for dependencies in %s' % repo_path)
dependencies_path = repo_path + '/xenonhd.dependencies'
syncable_repos = []
verify_repos = []
if os.path.exists(dependencies_path):
dependencies_file = open(dependencies_path, 'r')
dependencies = json.loads(dependencies_file.read())
fetch_list = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
verify_repos.append(dependency['target_path'])
else:
verify_repos.append(dependency['target_path'])
dependencies_file.close()
if len(fetch_list) > 0:
print('Adding dependencies to manifest')
add_to_manifest(fetch_list, fallback_branch)
else:
print('%s has no additional dependencies.' % repo_path)
if len(syncable_repos) > 0:
print('Syncing dependencies')
os.system('repo sync --force-sync %s' % ' '.join(syncable_repos))
for deprepo in verify_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in [branch['name'] for branch in branches]
if depsonly:
repo_path = get_from_manifest(device)
if repo_path:
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a non-existing device tree?")
sys.exit()
else:
for repository in repositories:
repo_name = repository['name']
if re.match(r"^android_device_[^_]*_" + device + "$", repo_name):
print("Found repository: %s" % repository['name'])
manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "")
default_revision = get_default_revision()
print("Default revision: %s" % default_revision)
print("Checking branch info")
githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
## Try tags, too, since that's what releases use
if not has_branch(result, default_revision):
githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', ''))
add_auth(githubreq)
result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode()))
repo_path = "device/%s/%s" % (manufacturer, device)
adding = {'repository':repo_name,'target_path':repo_path}
fallback_branch = None
if not has_branch(result, default_revision):
if os.getenv('ROOMSERVICE_BRANCHES'):
fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')))
for fallback in fallbacks:
if has_branch(result, fallback):
print("Using fallback branch: %s" % fallback)
fallback_branch = fallback
break
if not fallback_branch:
print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name))
print("Branches found:")
for branch in [branch['name'] for branch in result]:
print(branch)
print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.")
sys.exit()
add_to_manifest([adding], fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the TeamHorizon Github repository list. If this is in error, you may need to manually add it to your local_manifests/roomservice.xml." % device)
|
the-stack_106_24446 | # Write a function that extracts the words from a given text as a parameter.
# A word is defined as a sequence of alpha-numeric characters.
import re, os
def extract_words(text):
# pattern = re.compile('\w+\')
# words = re.findall(pattern, text)
# return words
return re.split("[^\w]+", text)
# print(extract_words("Today I'm shopping for 2 notebooks and 10 kilos of onions"))
# Write a function that receives as a parameter a regex string, a text string and a whole number x,
# and returns those long-length x substrings that match the regular expression.
def get_substrings_that_matches(regex, text, x):
# pattern = re.compile(regex)
# matches = re.findall(pattern, text)
# return [m for m in matches if len(m) is x]
return [word for word in re.findall(regex, text) if len(word) is x]
# print(get_substrings_that_matches('(\w+)', "I am a computer science student", 7))
# Write a function that receives as a parameter a string of text characters and a list of regular expressions and
# returns a list of strings that match on at least one regular expression given as a parameter.
def strings_that_matches_at_least_one_patterns(list_of_strings, patterns):
def match_at_least_one_regex(s, patterns):
for pattern in patterns:
if re.search(pattern, s) is not None:
return True
return False
return [s for s in list_of_strings if match_at_least_one_regex(s, patterns)]
# print(strings_that_matches_at_least_one_patterns(["I", "payed", "150 dollars for 1 night", "at the museum"],
# ["(\d+)"]))
# Write a function that, for a text given as a parameter, censures words that begin and end with vowels.
# Censorship means replacing characters from odd positions with *.
def censure_words(text):
words = extract_words(text)
censored = []
for word in words:
pattern = '^[aeiou].*[aeiou]$'
if re.match(pattern, word, flags=re.IGNORECASE):
word = word.replace(word[0], '*')
word = word.replace(word[len(word) - 1], '*')
censored.append(word)
return censored
# print(censure_words('Ana are mere dulci, are apatie'))
# Write a function that recursively scrolls a directory and displays those files whose name
# matches a regular expression given as a parameter or contains a string that matches the same expression.
# Files that satisfy both conditions will be prefixed with ">>"
def get_files_that_match_pattern(regex):
def fully_matches_regex(file_name, pattern):
if re.match(pattern + "$", file_name) is not None:
return True
return False
def matches_regex(file_name, pattern):
if re.search(pattern, file_name) is not None:
return True
return False
path = 'C:\\facultate\\an3\\sem1\\python\\python\\labs'
for (root, directories, files) in os.walk(path):
for filename in files:
file_path = os.path.join(root, filename)
if os.path.isfile(file_path):
if fully_matches_regex(filename, regex) and matches_regex(filename, regex):
print('>>' + filename)
elif fully_matches_regex(filename, regex) or matches_regex(filename, regex):
print(filename)
print(get_files_that_match_pattern('problem5.py'))
|
the-stack_106_24447 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more§
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import with_statement
import os
import sys
import tempfile
from libcloud import _init_once
from libcloud.test import LibcloudTestCase
from libcloud.test import unittest
from libcloud.compute.ssh import ParamikoSSHClient
from libcloud.compute.ssh import ShellOutSSHClient
from libcloud.compute.ssh import have_paramiko
from libcloud.utils.py3 import StringIO
from libcloud.utils.py3 import u
from libcloud.utils.py3 import assertRaisesRegex
from mock import patch, Mock, MagicMock
if not have_paramiko:
ParamikoSSHClient = None # NOQA
paramiko_version = '0.0.0'
else:
import paramiko
paramiko_version = paramiko.__version__
@unittest.skipIf(not have_paramiko, 'Skipping because paramiko is not available')
class ParamikoSSHClientTests(LibcloudTestCase):
@patch('paramiko.SSHClient', Mock)
def setUp(self):
"""
Creates the object patching the actual connection.
"""
conn_params = {'hostname': 'dummy.host.org',
'port': 8822,
'username': 'ubuntu',
'key': '~/.ssh/ubuntu_ssh',
'timeout': '600'}
_, self.tmp_file = tempfile.mkstemp()
os.environ['LIBCLOUD_DEBUG'] = self.tmp_file
_init_once()
self.ssh_cli = ParamikoSSHClient(**conn_params)
def tearDown(self):
if 'LIBCLOUD_DEBUG' in os.environ:
del os.environ['LIBCLOUD_DEBUG']
@patch('paramiko.SSHClient', Mock)
def test_create_with_password(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_deprecated_key_argument(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
def test_key_files_and_key_material_arguments_are_mutual_exclusive(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa',
'key_material': 'key'}
expected_msg = ('key_files and key_material arguments are mutually '
'exclusive')
assertRaisesRegex(self, ValueError, expected_msg,
ParamikoSSHClient, **conn_params)
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument(self):
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc', 'test_rsa.key')
with open(path, 'r') as fp:
private_key = fp.read()
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': private_key}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
pkey = paramiko.RSAKey.from_private_key(StringIO(private_key))
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'pkey': pkey,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_key_material_argument_invalid_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
expected_msg = 'Invalid or unsupported key type'
assertRaisesRegex(self, paramiko.ssh_exception.SSHException,
expected_msg, mock.connect)
@patch('paramiko.SSHClient', Mock)
@unittest.skipIf(paramiko_version >= '2.7.0',
'New versions of paramiko support OPENSSH key format')
def test_key_file_non_pem_format_error(self):
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_rsa_non_pem_format.key')
# Supplied as key_material
with open(path, 'r') as fp:
private_key = fp.read()
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_material': private_key}
mock = ParamikoSSHClient(**conn_params)
expected_msg = 'Invalid or unsupported key type'
assertRaisesRegex(self, paramiko.ssh_exception.SSHException,
expected_msg, mock.connect)
def test_key_material_valid_pem_keys_invalid_header_auto_conversion(self):
# Test a scenario where valid PEM keys with invalid headers which is
# not recognized by paramiko are automatically converted in a format
# which is recognized by paramiko
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
# 1. RSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_rsa_non_paramiko_recognized_header.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.RSAKey))
# 2. DSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_dsa_non_paramiko_recognized_header.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.DSSKey))
# 3. ECDSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_ecdsa_non_paramiko_recognized_header.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.ECDSAKey))
def test_key_material_valid_pem_keys(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
# 1. RSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_rsa.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.RSAKey))
# 2. DSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_dsa.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.DSSKey))
# 3. ECDSA key type with header which is not supported by paramiko
path = os.path.join(os.path.dirname(__file__),
'fixtures', 'misc',
'test_ecdsa.key')
with open(path, 'r') as fp:
private_key = fp.read()
pkey = client._get_pkey_object(key=private_key)
self.assertTrue(pkey)
self.assertTrue(isinstance(pkey, paramiko.ECDSAKey))
@patch('paramiko.SSHClient', Mock)
def test_create_with_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'key_files': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_with_password_and_key(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu',
'password': 'ubuntu',
'key': 'id_rsa'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'password': 'ubuntu',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'key_filename': 'id_rsa',
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
self.assertLogMsg('Connecting to server')
@patch('paramiko.SSHClient', Mock)
def test_create_without_credentials(self):
"""
Initialize object with no credentials.
Just to have better coverage, initialize the object
without 'password' neither 'key'.
"""
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
mock = ParamikoSSHClient(**conn_params)
mock.connect()
expected_conn = {'username': 'ubuntu',
'hostname': 'dummy.host.org',
'allow_agent': True,
'look_for_keys': True,
'port': 22}
mock.client.connect.assert_called_once_with(**expected_conn)
@patch.object(ParamikoSSHClient, '_consume_stdout',
MagicMock(return_value=StringIO('')))
@patch.object(ParamikoSSHClient, '_consume_stderr',
MagicMock(return_value=StringIO('')))
def test_basic_usage_absolute_path(self):
"""
Basic execution.
"""
mock = self.ssh_cli
# script to execute
sd = "/root/random_script.sh"
# Connect behavior
mock.connect()
mock_cli = mock.client # The actual mocked object: SSHClient
expected_conn = {'username': 'ubuntu',
'key_filename': '~/.ssh/ubuntu_ssh',
'allow_agent': False,
'hostname': 'dummy.host.org',
'look_for_keys': False,
'timeout': '600',
'port': 8822}
mock_cli.connect.assert_called_once_with(**expected_conn)
mock.put(sd)
# Make assertions over 'put' method
mock_cli.open_sftp().chdir.assert_called_with('root')
mock_cli.open_sftp().file.assert_called_once_with('random_script.sh',
mode='w')
mock.run(sd)
# Make assertions over 'run' method
mock_cli.get_transport().open_session().exec_command \
.assert_called_once_with(sd)
self.assertLogMsg('Executing command (cmd=/root/random_script.sh)')
self.assertLogMsg('Command finished')
mock.close()
def test_delete_script(self):
"""
Provide a basic test with 'delete' action.
"""
mock = self.ssh_cli
# script to execute
sd = '/root/random_script.sh'
mock.connect()
mock.delete(sd)
# Make assertions over the 'delete' method
mock.client.open_sftp().unlink.assert_called_with(sd)
self.assertLogMsg('Deleting file')
mock.close()
self.assertLogMsg('Closing server connection')
def assertLogMsg(self, expected_msg):
with open(self.tmp_file, 'r') as fp:
content = fp.read()
self.assertTrue(content.find(expected_msg) != -1)
def test_consume_stdout(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_ready.side_effect = [True, True, False]
chan.recv.side_effect = ['123', '456']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual(u('123456'), stdout)
self.assertEqual(len(stdout), 6)
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_ready.side_effect = [True, True, False]
chan.recv.side_effect = ['987', '6543210']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual(u('9876543210'), stdout)
self.assertEqual(len(stdout), 10)
def test_consume_stderr(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, False]
chan.recv_stderr.side_effect = ['123', '456']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual(u('123456'), stderr)
self.assertEqual(len(stderr), 6)
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1024
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, False]
chan.recv_stderr.side_effect = ['987', '6543210']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual(u('9876543210'), stderr)
self.assertEqual(len(stderr), 10)
def test_consume_stdout_chunk_contains_part_of_multi_byte_utf8_character(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1
chan = Mock()
chan.recv_ready.side_effect = [True, True, True, True, False]
chan.recv.side_effect = ['\xF0', '\x90', '\x8D', '\x88']
stdout = client._consume_stdout(chan).getvalue()
self.assertEqual('ð\x90\x8d\x88', stdout)
self.assertEqual(len(stdout), 4)
def test_consume_stderr_chunk_contains_part_of_multi_byte_utf8_character(self):
conn_params = {'hostname': 'dummy.host.org',
'username': 'ubuntu'}
client = ParamikoSSHClient(**conn_params)
client.CHUNK_SIZE = 1
chan = Mock()
chan.recv_stderr_ready.side_effect = [True, True, True, True, False]
chan.recv_stderr.side_effect = ['\xF0', '\x90', '\x8D', '\x88']
stderr = client._consume_stderr(chan).getvalue()
self.assertEqual('ð\x90\x8d\x88', stderr)
self.assertEqual(len(stderr), 4)
class ShellOutSSHClientTests(LibcloudTestCase):
def test_password_auth_not_supported(self):
try:
ShellOutSSHClient(hostname='localhost', username='foo',
password='bar')
except ValueError as e:
msg = str(e)
self.assertTrue('ShellOutSSHClient only supports key auth' in msg)
else:
self.fail('Exception was not thrown')
def test_ssh_executable_not_available(self):
class MockChild(object):
returncode = 127
def communicate(*args, **kwargs):
pass
def mock_popen(*args, **kwargs):
return MockChild()
with patch('subprocess.Popen', mock_popen):
try:
ShellOutSSHClient(hostname='localhost', username='foo')
except ValueError as e:
msg = str(e)
self.assertTrue('ssh client is not available' in msg)
else:
self.fail('Exception was not thrown')
def test_connect_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.connect())
def test_close_success(self):
client = ShellOutSSHClient(hostname='localhost', username='root')
self.assertTrue(client.close())
def test_get_base_ssh_command(self):
client1 = ShellOutSSHClient(hostname='localhost', username='root')
client2 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key')
client3 = ShellOutSSHClient(hostname='localhost', username='root',
key='/home/my.key', timeout=5)
cmd1 = client1._get_base_ssh_command()
cmd2 = client2._get_base_ssh_command()
cmd3 = client3._get_base_ssh_command()
self.assertEqual(cmd1, ['ssh', 'root@localhost'])
self.assertEqual(cmd2, ['ssh', '-i', '/home/my.key',
'root@localhost'])
self.assertEqual(cmd3, ['ssh', '-i', '/home/my.key',
'-oConnectTimeout=5', 'root@localhost'])
if __name__ == '__main__':
sys.exit(unittest.main())
|
the-stack_106_24448 | from pathlib import Path
from setuptools import setup
from csv_dataset import __version__
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(Path(__file__).parent / 'docs' / 'README.md').read()
def read_requirements(filename):
with open(filename) as f:
return f.read().splitlines()
settings = dict(
name='csv-dataset',
packages=['csv_dataset'],
version=__version__,
author='kaelzhang',
author_email='',
description=('csv-dataset helps to read csv files and create descriptive and efficient input pipelines for deep learning in a streaming fashion'),
license='MIT',
keywords='csv-dataset',
url='https://github.com/kaelzhang/python-csv-dataset',
long_description=read('README.md'),
long_description_content_type='text/markdown',
python_requires='>=3.7',
install_requires=read_requirements('requirements.txt'),
tests_require=read_requirements('test-requirements.txt'),
classifiers=[
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
]
)
if __name__ == '__main__':
setup(**settings)
|
the-stack_106_24449 | from typing import Union, Any
from pyqtgraph.Qt import QtWidgets, QtGui
from amitypes import Array1d, Array2d, Array3d
from ami.flowchart.library.common import CtrlNode, GroupedNode
from ami.flowchart.library.CalculatorWidget import CalculatorWidget, FilterWidget, gen_filter_func, sanitize_name
import ami.graph_nodes as gn
import numpy as np
import itertools
import collections
class Constant(CtrlNode):
"""
Constant
"""
nodeName = "Constant"
uiTemplate = [('constant', 'doubleSpin')]
def __init__(self, name):
super().__init__(name, terminals={"Out": {'io': 'out', 'ttype': float}})
def to_operation(self, **kwargs):
constant = self.values['constant']
return gn.Map(name=self.name()+"_operation", **kwargs, func=lambda: constant)
class Identity(GroupedNode):
"""
Identity
"""
nodeName = "Identity"
def __init__(self, name):
super().__init__(name, terminals={"In": {'io': 'in', 'ttype': Any},
"Out": {'io': 'out', 'ttype': Any}},
allowAddInput=True)
def to_operation(self, **kwargs):
return gn.Map(name=self.name()+"_operation", **kwargs, func=lambda *args: args)
class MeanVsScan(CtrlNode):
"""
MeanVsScan creates a histogram using a variable number of bins.
Returns a dict with keys Bins and values mean of bins.
"""
nodeName = "MeanVsScan"
uiTemplate = [('binned', 'check', {'checked': False}),
('bins', 'intSpin', {'value': 10, 'min': 1}),
('min', 'intSpin', {'value': 0}),
('max', 'intSpin', {'value': 10})]
def __init__(self, name):
super().__init__(name, global_op=True,
terminals={
'Bin': {'io': 'in', 'ttype': float},
'Value': {'io': 'in', 'ttype': float},
'Bins': {'io': 'out', 'ttype': Array1d},
'Counts': {'io': 'out', 'ttype': Array1d}
})
def to_operation(self, inputs, outputs, **kwargs):
outputs = self.output_vars()
if self.values['binned']:
bins = np.histogram_bin_edges(np.arange(self.values['min'], self.values['max']),
bins=self.values['bins'],
range=(self.values['min'], self.values['max']))
map_outputs = [self.name()+'_bin', self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def func(k, v):
return np.digitize(k, bins), (v, 1)
def mean(d):
res = {bins[i]: 0 for i in range(0, bins.size)}
for k, v in d.items():
try:
res[bins[k]] = v[0]/v[1]
except IndexError:
pass
keys, values = zip(*sorted(res.items()))
return np.array(keys), np.array(values)
nodes = [
gn.Map(name=self.name()+'_map', inputs=inputs, outputs=map_outputs,
func=func, **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=map_outputs, outputs=reduce_outputs,
reduction=lambda cv, v: (cv[0]+v[0], cv[1]+v[1]), **kwargs),
gn.Map(name=self.name()+'_mean', inputs=reduce_outputs, outputs=outputs, func=mean,
**kwargs)
]
else:
map_outputs = [self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def mean(d):
res = {}
for k, v in d.items():
res[k] = v[0]/v[1]
keys, values = zip(*sorted(res.items()))
return np.array(keys), np.array(values)
nodes = [
gn.Map(name=self.name()+'_map', inputs=[inputs['Value']], outputs=map_outputs,
func=lambda a: (a, 1), **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=[inputs['Bin']]+map_outputs, outputs=reduce_outputs,
reduction=lambda cv, v: (cv[0]+v[0], cv[1]+v[1]), **kwargs),
gn.Map(name=self.name()+'_mean', inputs=reduce_outputs, outputs=outputs, func=mean,
**kwargs)
]
return nodes
class MeanWaveformVsScan(CtrlNode):
"""
MeanWaveformVsScan creates a 2d histogram using a variable number of bins.
Returns a dict with keys Bins and values mean waveform of bins.
"""
nodeName = "MeanWaveformVsScan"
uiTemplate = [('binned', 'check', {'checked': False}),
('bins', 'intSpin', {'value': 10, 'min': 1}),
('min', 'intSpin', {'value': 0}),
('max', 'intSpin', {'value': 10})]
def __init__(self, name):
super().__init__(name, global_op=True,
terminals={
'Bin': {'io': 'in', 'ttype': float},
'Value': {'io': 'in', 'ttype': Array1d},
'X Bins': {'io': 'out', 'ttype': Array1d},
'Y Bins': {'io': 'out', 'ttype': Array1d},
'Counts': {'io': 'out', 'ttype': Array2d}
})
def to_operation(self, inputs, outputs, **kwargs):
if self.values['binned']:
bins = np.histogram_bin_edges(np.arange(self.values['min'], self.values['max']),
bins=self.values['bins'],
range=(self.values['min'], self.values['max']))
map_outputs = [self.name()+'_bin', self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def func(k, v):
return np.digitize(k, bins), (v, 1)
def mean(d):
res = {}
for k, v in d.items():
try:
res[bins[k]] = v[0]/v[1]
except IndexError:
pass
missing_keys = set(bins).difference(res.keys())
k, v = d.popitem()
for k in missing_keys:
res[k] = np.zeros(v[0].shape)
keys, values = zip(*sorted(res.items()))
stack = np.stack(values, axis=1)
return np.arange(0, stack.shape[0]), np.array(keys), stack
nodes = [
gn.Map(name=self.name()+'_map', inputs=inputs, outputs=map_outputs,
func=func, **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=map_outputs, outputs=reduce_outputs,
reduction=lambda cv, v: (cv[0]+v[0], cv[1]+v[1]), **kwargs),
gn.Map(name=self.name()+'_mean', inputs=reduce_outputs, outputs=outputs, func=mean,
**kwargs)
]
else:
map_outputs = [self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def mean(d):
res = {}
for k, v in d.items():
res[k] = v[0]/v[1]
keys, values = zip(*sorted(res.items()))
stack = np.stack(values, axis=1)
return np.arange(0, stack.shape[0]), np.array(keys), stack
nodes = [
gn.Map(name=self.name()+'_map', inputs=[inputs['Value']], outputs=map_outputs,
func=lambda a: (a, 1), **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=[inputs['Bin']]+map_outputs, outputs=reduce_outputs,
reduction=lambda cv, v: (cv[0]+v[0], cv[1]+v[1]), **kwargs),
gn.Map(name=self.name()+'_mean', inputs=reduce_outputs, outputs=outputs, func=mean,
**kwargs)
]
return nodes
class StatsVsScan(CtrlNode):
"""
StatsVsScan creates a histogram using a variable number of bins.
Returns a dict with keys Bins and values mean, std, error of bins.
"""
nodeName = "StatsVsScan"
uiTemplate = [('binned', 'check', {'checked': False}),
('bins', 'intSpin', {'value': 10, 'min': 1}),
('min', 'intSpin', {'value': 0}),
('max', 'intSpin', {'value': 10})]
def __init__(self, name):
super().__init__(name, global_op=True,
terminals={
'Bin': {'io': 'in', 'ttype': float},
'Value': {'io': 'in', 'ttype': float},
'Bins': {'io': 'out', 'ttype': Array1d},
'Mean': {'io': 'out', 'ttype': Array1d},
'Stdev': {'io': 'out', 'ttype': Array1d},
'Error': {'io': 'out', 'ttype': Array1d},
})
def to_operation(self, inputs, outputs, **kwargs):
outputs = self.output_vars()
def reduction(cv, v):
cv.extend(v)
return cv
if self.values['binned']:
bins = np.histogram_bin_edges(np.arange(self.values['min'], self.values['max']),
bins=self.values['bins'],
range=(self.values['min'], self.values['max']))
map_outputs = [self.name()+'_bin', self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def func(k, v):
return np.digitize(k, bins), [v]
def stats(d):
res = {bins[i]: (0, 0, 0) for i in range(0, bins.size)}
for k, v in d.items():
try:
stddev = np.std(v)
res[bins[k]] = (np.mean(v), stddev, stddev/np.sqrt(len(v)))
except IndexError:
pass
keys, values = zip(*sorted(res.items()))
mean, stddev, error = zip(*values)
return np.array(keys), np.array(mean), np.array(stddev), np.array(error)
nodes = [
gn.Map(name=self.name()+'_map', inputs=inputs, outputs=map_outputs,
func=func, **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=map_outputs, outputs=reduce_outputs,
reduction=reduction, **kwargs),
gn.Map(name=self.name()+'_stats', inputs=reduce_outputs, outputs=outputs, func=stats,
**kwargs)
]
else:
map_outputs = [self.name()+'_map_count']
reduce_outputs = [self.name()+'_reduce_count']
def stats(d):
res = {}
for k, v in d.items():
stddev = np.std(v)
res[k] = (np.mean(v), stddev, stddev/np.sqrt(len(v)))
keys, values = zip(*sorted(res.items()))
mean, stddev, error = zip(*values)
return np.array(keys), np.array(mean), np.array(stddev), np.array(error)
nodes = [
gn.Map(name=self.name()+'_map', inputs=[inputs['Value']], outputs=map_outputs,
func=lambda a: [a], **kwargs),
gn.ReduceByKey(name=self.name()+'_reduce',
inputs=[inputs['Bin']]+map_outputs, outputs=reduce_outputs,
reduction=reduction,
**kwargs),
gn.Map(name=self.name()+'_stats', inputs=reduce_outputs, outputs=outputs, func=stats,
**kwargs)
]
return nodes
class Combinations(CtrlNode):
"""
Generate combinations using itertools.combinations.
"""
nodeName = "Combinations"
uiTemplate = [('length', 'intSpin', {'value': 1, 'min': 1})]
def __init__(self, name):
super().__init__(name, terminals={'In': {'io': 'in', 'ttype': Array1d},
'Out': {'io': 'out', 'ttype': Array1d}})
self.output_terms = []
def state_changed(self, *args, **kwargs):
super().state_changed(*args, **kwargs)
while len(self.output_vars()) > self.values['length']:
self.removeTerminal(self.output_terms.pop())
while len(self.output_vars()) < self.values['length']:
self.output_terms.append(self.addOutput())
def to_operation(self, **kwargs):
length = self.values['length']
def func(*args):
r = list(map(np.array, zip(*itertools.combinations(*args, length))))
if r:
return r
else:
return [np.array([])]*length
return gn.Map(name=self.name()+"_operation", func=func, **kwargs)
class Export(CtrlNode):
"""
Send data back to worker.
"""
nodeName = "Export"
uiTemplate = [('alias', 'text')]
def __init__(self, name):
super().__init__(name, terminals={"In": {'io': 'in', 'ttype': Any},
"Out": {'io': 'out', 'ttype': Any}},
exportable=True)
try:
import sympy
class CalcProc():
def __init__(self, params):
self.params = params
self.func = None
def __call__(self, *args, **kwargs):
# note: args get passed in order of input terminals on node from top to bottom
# sympy symbols need to be defined in same order for this to work correctly
if self.func is None:
self.func = sympy.lambdify(**self.params, modules=["numpy", "scipy"])
args = list(args)
for idx, arg in enumerate(args):
if type(arg) is np.ndarray:
args[idx] = arg.astype(np.float64, copy=False)
return self.func(*args, **kwargs)
class Calculator(CtrlNode):
"""
Calculator
"""
nodeName = "Calculator"
def __init__(self, name):
super().__init__(name,
terminals={'In': {'io': 'in', 'ttype': Union[float, Array1d,
Array2d, Array3d]},
'Out': {'io': 'out', 'ttype': Any}},
allowAddInput=True)
self.values = {'operation': ''}
def isChanged(self, restore_ctrl, restore_widget):
return restore_widget
def display(self, topics, terms, addr, win, **kwargs):
if self.widget is None:
self.widget = CalculatorWidget(terms, win, self.values['operation'])
self.widget.sigStateChanged.connect(self.state_changed)
return self.widget
def to_operation(self, **kwargs):
args = []
expr = self.values['operation']
# sympy doesn't like symbols name likes Sum.0.Out, need to remove dots.
for arg in self.input_vars().values():
rarg = sanitize_name(arg)
args.append(rarg)
expr = expr.replace(arg, rarg)
params = {'args': args,
'expr': expr}
return gn.Map(name=self.name()+"_operation", **kwargs, func=CalcProc(params))
except ImportError as e:
print(e)
try:
from ami.flowchart.library.PythonEditorWidget import PythonEditorWidget, PythonEditorProc
class PythonEditor(CtrlNode):
"""
Write a python function.
"""
nodeName = "PythonEditor"
def __init__(self, name):
super().__init__(name,
allowAddInput=True,
allowAddOutput=True)
self.values = {'text': ''}
self.input_prompt = None
self.output_prompt = None
def terminal_prompt(self, name='', title='', **kwargs):
prompt = QtWidgets.QWidget()
prompt.layout = QtWidgets.QFormLayout(parent=prompt)
prompt.name = QtGui.QLineEdit(name, parent=prompt)
prompt.type_selector = QtGui.QComboBox(prompt)
prompt.ok = QtGui.QPushButton('Ok', parent=prompt)
for typ in [Any, bool, float, Array1d, Array2d, Array3d]:
prompt.type_selector.addItem(str(typ), typ)
prompt.layout.addRow("Name:", prompt.name)
prompt.layout.addRow("Type:", prompt.type_selector)
prompt.layout.addRow("", prompt.ok)
prompt.setLayout(prompt.layout)
prompt.setWindowTitle("Add " + name)
return prompt
def onCreate(self):
self.addInput()
self.addOutput()
def addInput(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = self.nextTerminalName('In')
self.input_prompt = self.terminal_prompt(**kwargs)
self.input_prompt.ok.clicked.connect(self._addInput)
self.input_prompt.show()
def _addInput(self, **kwargs):
name = self.input_prompt.name.text()
ttype = self.input_prompt.type_selector.currentData()
kwargs['name'] = name
kwargs['ttype'] = ttype
kwargs['removable'] = True
self.input_prompt.close()
return super().addInput(**kwargs)
def addOutput(self, **kwargs):
if 'name' not in kwargs:
kwargs['name'] = self.nextTerminalName('Out')
self.output_prompt = self.terminal_prompt(**kwargs)
self.output_prompt.ok.clicked.connect(self._addOutput)
self.output_prompt.show()
def _addOutput(self, **kwargs):
name = self.output_prompt.name.text()
ttype = self.output_prompt.type_selector.currentData()
kwargs['name'] = name
kwargs['ttype'] = ttype
kwargs['removable'] = True
self.output_prompt.close()
return super().addOutput(**kwargs)
def isChanged(self, restore_ctrl, restore_widget):
return restore_widget
def display(self, topics, terms, addr, win, **kwargs):
if self.widget is None:
if not self.values['text']:
self.values['text'] = self.generate_template(self.inputs().keys(), self.outputs().keys())
self.widget = PythonEditorWidget(win, self.values['text'], export=True, node=self)
self.widget.sigStateChanged.connect(self.state_changed)
return self.widget
def generate_template(self, inputs, outputs):
args = []
for arg in inputs:
rarg = sanitize_name(arg)
args.append(rarg)
args = ', '.join(args)
template = f"""
class EventProcessor():
def __init__(self):
pass
def begin_run(self):
pass
def end_run(self):
pass
def begin_step(self, step):
pass
def end_step(self, step):
pass
def on_event(self, {args}, *args, **kwargs):
# return {len(self.outputs())} output(s)
return"""
return template
def to_operation(self, **kwargs):
proc = PythonEditorProc(self.values['text'])
return gn.Map(name=self.name()+"_operation",
**kwargs,
func=proc,
begin_run=proc.begin_run,
end_run=proc.end_run,
begin_step=proc.begin_step,
end_step=proc.end_step)
class Filter(CtrlNode):
"""
Filter
"""
nodeName = "Filter"
def __init__(self, name):
super().__init__(name,
terminals={'In': {'io': 'in', 'ttype': Any},
'Out': {'io': 'out', 'ttype': Any}},
allowAddInput=True,
allowAddOutput=True)
self.values = collections.defaultdict(dict)
def display(self, topics, terms, addr, win, **kwargs):
if self.widget is None:
self.widget = FilterWidget(terms, self.output_vars(), win)
self.widget.sigStateChanged.connect(self.state_changed)
return self.widget
def to_operation(self, **kwargs):
values = self.values
inputs = list(self.input_vars().values())
outputs = list(self.output_vars())
for idx, inp in enumerate(inputs):
inputs[idx] = sanitize_name(inp)
func = gen_filter_func(values, inputs, outputs)
return gn.Map(name=self.name()+"_operation", **kwargs, func=PythonEditorProc(func))
except ImportError as e:
print(e)
|
the-stack_106_24450 | # Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import csv
import random
import argparse
import operator
import numpy as np
import os, sys, json
import os.path as osp
from tqdm import tqdm
from scipy import spatial
from numpy.random import choice
from random import shuffle
from house_parse import HouseParse
from question_string_builder import QuestionStringBuilder
from nltk.stem import WordNetLemmatizer
# for reproducibility
random.seed(0)
np.random.seed(0)
# hard thresholding
OBJECT_RATIO_THRESH = 1.5
ROOM_RATIO_TRHESH = 1.5
DIST_LOW_THRESH, DIST_HIGH_THRESH = 0.2, 1.2
blacklist_objects = {
'color': [
'container', 'containers', 'stationary_container', 'candle',
'coffee_table', 'column', 'door', 'floor_lamp', 'mirror', 'person',
'rug', 'sofa', 'stairs', 'outdoor_seating', 'kitchen_cabinet',
'kitchen_set', 'switch', 'storage_bench', 'table_lamp', 'vase',
'candle', 'roof', 'stand', 'beer', 'chair', 'chandelier',
'coffee_table', 'column', 'trinket', 'grill', 'book', 'books',
'curtain', 'desk', 'door', 'floor_lamp', 'hanger', 'workplace',
'glass', 'headstone', 'kitchen_set', 'mirror', 'plant', 'shelving',
'place_setting', 'ceiling_fan', 'stairs', 'storage_bench',
'switch', 'table_lamp', 'vase', 'decoration', 'coffin',
'wardrobe_cabinet', 'window', 'pet', 'cup', 'arch',
'household_appliance'
],
'dist_compare': [
'column', 'door', 'switch', 'person', 'household_appliance',
'decoration', 'trinket', 'place_setting', 'coffin', 'book'
'cup', 'chandelier', 'arch', 'pet', 'container', 'containers',
'stationary_container', 'shelving', 'stand', 'kitchen_set',
'books', 'ceiling_fan', 'workplace', 'glass', 'grill', 'roof',
'outdoor_seating', 'kitchen_cabinet', 'headstone', 'beer'
],
'object_compare': [
'container', 'containers', 'stationary_container', 'candle',
'coffee_table', 'column', 'door', 'floor_lamp', 'mirror', 'person',
'rug', 'sofa', 'stairs', 'outdoor_seating', 'kitchen_cabinet',
'kitchen_set', 'switch', 'storage_bench', 'table_lamp', 'vase',
'candle', 'roof', 'stand', 'beer', 'chair', 'chandelier',
'coffee_table', 'column', 'trinket', 'grill', 'book', 'books',
'curtain', 'desk', 'door', 'floor_lamp', 'hanger', 'workplace',
'glass', 'headstone', 'kitchen_set', 'mirror', 'plant', 'shelving',
'place_setting', 'ceiling_fan', 'stairs', 'storage_bench',
'switch', 'table_lamp', 'vase', 'decoration', 'coffin',
'wardrobe_cabinet', 'window', 'pet', 'cup', 'arch',
'household_appliance', 'garage_door'
]
}
blacklist_rooms = [
'loggia', 'storage', 'guest room', 'hallway', 'wardrobe', 'hall',
'boiler room', 'terrace', 'room', 'entryway', 'aeration', 'lobby',
'office', 'freight elevator', 'passenger elevator'
]
class roomEntity():
translations = {
'toilet': 'bathroom',
'guest room': 'bedroom',
'child room': 'bedroom',
}
def __init__(self, name, bbox, meta):
self.name = list(
set([
self.translations[str(x)]
if str(x) in self.translations else str(x) for x in name
]))
self.id = meta['id']
self.bbox = bbox
self.meta = meta # {id, type, valid, modelId, nodeIndices, roomTypes, bbox}
self.type = 'room'
self.name.sort(key=str.lower)
self.entities = self.objects = []
def addObject(self, object_ent):
self.objects.append(object_ent)
def isValid(self):
return len(self.objects) != 0
class objectEntity():
translations = {
'bread': 'food',
'hanging_kitchen_cabinet': 'kitchen_cabinet',
'teapot': 'kettle',
'coffee_kettle': 'kettle',
'range_hood_with_cabinet': 'range_hood',
'dining_table': 'table',
'coffee_table': 'table',
'game_table': 'table',
'office_chair': 'chair',
'bench_chair': 'chair',
'chair_set': 'chair',
'armchair': 'chair',
'fishbowl': 'fish_tank/bowl',
'fish_tank': 'fish_tank/bowl',
'single_bed': 'bed',
'double_bed': 'bed',
'baby_bed': 'bed'
}
def __init__(self, name, bbox, meta, obj_id, room_id):
if name in self.translations: self.name = self.translations[name]
else: self.name = name
self.bbox = bbox
self.meta = meta
self.type = 'object'
self.id = obj_id
self.room_id = room_id
assert self.id == meta['id'] # check if named_id equals to provided id
self.entities = self.rooms = []
def addRoom(self, room_ent):
self.rooms.append(room_ent)
def isValid(self):
return len(self.rooms) != 0
class Engine():
'''
Templates and functional forms.
'''
template_defs = {
'object_dist_compare': [
'filter.rooms', 'unique.rooms', 'filter.objects', 'unique.objects', 'blacklist.dist_compare',
'object_dist_pair', 'query.object_dist_compare'
],
'object_color_compare': [
'filter.rooms', 'unique.rooms', 'filter.objects', 'unique.objects', 'blacklist.object_compare',
'object_color_pair', 'query.object_color_compare'
],
'object_size_compare' : [
'filter.rooms', 'unique.rooms', 'filter.objects', 'unique.objects', 'blacklist.object_compare',
'object_size_pair', 'query.object_size_compare'
],
'room_size_compare': [
'filter.rooms', 'unique.rooms', 'room_size_pair', 'query.room_size_compare'
],
'room_dist_compare': [
'filter.rooms', 'unique.rooms', 'room_dist_pair', 'query.room_dist_compare'
],
}
templates = {
# object distance comparisons
'object_closer_inroom': 'is the <OBJ> closer to the <OBJ> than to the <OBJ> in the <ROOM>?',
'object_farther_inroom': 'is the <OBJ> farther from the <OBJ> than from the <OBJ> in the <ROOM>?',
# object color comparison
'object_color_compare_inroom': 'does the <OBJ> have same color as the <OBJ> in the <ROOM>?',
'object_color_compare_xroom': 'does the <OBJ1> in the <ROOM1> have same color as the <OBJ2> in the <ROOM2>?',
# object size comparison
'object_bigger_inroom': 'is the <OBJ> bigger than <OBJ> in the <ROOM>?',
'object_smaller_inroom': 'is the <OBJ> smaller than <OBJ> in the <ROOM>?',
'object_bigger_xroom': 'is the <OBJ1> in the <ROOM1> bigger than <OBJ2> in the <ROOM2>?',
'object_smaller_xroom': 'is the <OBJ1> in the <ROOM1> smaller than <OBJ2> in the <ROOM2>?',
# room size comparison
'room_bigger': 'is the <ROOM1> bigger than the <ROOM2>?',
'room_smaller': 'is the <ROOM1> smaller than the <ROOM2>?',
# room distance comparison
'room_closer': 'is the <ROOM> closer to <ROOM> than to the <ROOM>?',
'room_farther': 'is the <ROOM> farther from <ROOM> than from the <ROOM>?',
}
def __init__(self, object_counts_by_room_file, env_obj_colors_file, debug=False):
self.template_fns = {
'filter': self.filter,
'unique': self.unique,
'query': self.query,
'blacklist': self.blacklist,
'thresholdSize': self.thresholdSize,
'object_dist_pair': self.objectDistPair,
'object_color_pair': self.objectColorPair,
'object_size_pair': self.objectSizePair,
'room_size_pair': self.roomSizePair,
'room_dist_pair': self.roomDistPair,
}
self.query_fns = {
'query_object_dist_compare': self.queryObjectDistCompare,
'query_object_color_compare': self.queryObjectColorCompare,
'query_object_size_compare': self.queryObjectSizeCompare,
'query_room_size_compare': self.queryRoomSizeCompare,
'query_room_dist_compare': self.queryRoomDistCompare,
}
self.blacklist_objects = blacklist_objects
self.blacklist_rooms = blacklist_rooms
self.use_threshold_size = True
self.use_blacklist = True
self.debug = debug
self.ent_queue = None
self.q_str_builder = QuestionStringBuilder()
self.q_obj_builder = self.questionObjectBuilder
# update
if os.path.isfile(object_counts_by_room_file) == True:
self.global_obj_by_room = json.load(open(object_counts_by_room_file, 'r'))
self.negative_exists = {}
else:
print('Not loading env_lists/800env_object_counts_by_room.json')
# load colors
assert osp.isfile(env_obj_colors_file)
self.env_obj_color_map = json.load(open(env_obj_colors_file, 'r'))
def cacheHouse(self, Hp):
"""
Get objects and rooms info for current parsed house.
"""
self.house = Hp
self.entities = {'rooms': [], 'objects': []}
for i in self.house.rooms:
room = roomEntity(i['type'], i['bbox'], i)
for j in room.meta['nodes']:
obj = objectEntity(
self.house.objects['0_' + str(j)]['fine_class'],
self.house.objects['0_' + str(j)]['bbox'],
self.house.objects['0_' + str(j)],
obj_id='0_' + str(j),
room_id=room.id)
room.addObject(obj)
obj.addRoom(room)
self.entities['objects'].append(obj)
self.entities['rooms'].append(room)
self.isValid()
def isValid(self):
# print('checking validity...')
for i in self.entities['rooms']:
if i.isValid() == False and self.debug == True:
print('ERROR', i.meta)
continue
for i in self.entities['objects']:
if i.isValid() == False and self.debug == True:
print('ERROR', i.meta)
continue
def clearQueue(self):
self.ent_queue = None
def remain_single_name_rooms(self):
"""filter those elements with no/multiple room names."""
ent = self.ent_queue['elements']
if self.ent_queue['type'] == 'objects':
self.ent_queue['elements'] = [x for x in ent if len(x.rooms) == 1 and len(x.rooms[0].name) == 1]
if self.ent_queue['type'] == 'rooms':
self.ent_queue['elements'] = [x for x in ent if len(x.name) == 1]
def executeFn(self, template):
for i in template:
if '.' in i:
_ = i.split('.')
fn = _[0]
param = _[1]
else:
fn = i
param = None
res = self.template_fns[fn](param)
if isinstance(res, dict):
return res
else:
# return unique questions only
return list({x['question']: x for x in res}.values())
def getVolume(self, bbox):
# return volume of bbox
return (bbox['max'][0]-bbox['min'][0]) * (bbox['max'][1]-bbox['min'][1]) * (bbox['max'][2]-bbox['min'][2])
def getArea(self, bbox):
# return 2D bird-view area
return (bbox['max'][0]-bbox['min'][0]) * (bbox['max'][2]-bbox['min'][2])
def thresholdSize(self, *args):
assert self.ent_queue != None
assert self.ent_queue['type'] == 'objects'
ent = self.ent_queue
sizes = [self.getVolume(x.bbox) for x in ent['elements']]
idx = [i for i, v in enumerate(sizes) if v < 0.0005]
for i in idx[::-1]:
del ent['elements'][i]
self.ent_queue = ent
return self.ent_queue
def blacklist(self, *args):
assert self.ent_queue != None
ent = self.ent_queue
if ent['type'] == 'objects':
template = args[0]
names = [x.name for x in ent['elements']]
idx = [i for i, v in enumerate(names) if v in self.blacklist_objects[template]]
for i in idx[::-1]:
del ent['elements'][i]
elif ent['type'] == 'rooms':
names = [x.name for x in ent['elements']]
idx = [
i for i, v in enumerate([
any([k for k in x if k in self.blacklist_rooms])
for x in names
]) if v == True
]
for i in idx[::-1]:
del ent['elements'][i]
self.ent_queue = ent
return self.ent_queue
def filter(self, *args):
"""select object/rooms according to args[0] or self.ent_queue['type']"""
# if ent_queue is empty, execute on parent env entitites
if self.ent_queue == None:
self.ent_queue = {'type': args[0], 'elements': self.entities[args[0]]}
else:
ent = self.ent_queue
assert args[0] != ent['type']
ent = {
'type': args[0],
'elements': [z for y in [x.entities for x in ent['elements']] for z in y]
}
self.ent_queue = ent
# remove blacklisted rooms
if self.ent_queue['type'] == 'rooms' and self.use_blacklist == True:
self.ent_queue = self.blacklist()
if self.ent_queue['type'] == 'objects' and self.use_threshold_size == True:
self.ent_queue = self.thresholdSize()
return self.ent_queue
def unique(self, *args):
"""select those objects/rooms that occurs only once in this house"""
assert self.ent_queue != None
ent = self.ent_queue
# unique based on either rooms or objects (only)
if ent['type'] == 'objects':
names = [x.name for x in ent['elements']]
idx = [i for i, v in enumerate([names.count(x) for x in names]) if v != 1]
elif ent['type'] == 'rooms':
# for room = ['dining room', 'kitchen'], we count all appeared room names
names = [name for x in ent['elements'] for name in x.name]
idx = []
for i, x in enumerate(ent['elements']):
for name in x.name:
if names.count(name) != 1:
idx.append(i)
break
else:
raise NotImplementedError
for i in idx[::-1]:
del ent['elements'][i]
names = [x.name for x in ent['elements']]
self.ent_queue = ent
return self.ent_queue
def query(self, *args):
assert self.ent_queue != None
ent = self.ent_queue
return self.query_fns['query_' + args[0]](ent)
"""
Returned ent_queue is list of (obj1, obj2, obj3, 'closer') and (obj3, obj2, obj1, 'farther')
where d(obj1, obj2) < d(obj2, obj3)
"""
# only works with objectEntities for now
def objectDistPair(self, *args):
self.remain_single_name_rooms() # remove 0/multiple-name rooms
ent = self.ent_queue
assert ent['type'] == 'objects'
h_low_threshold, h_high_threshold = DIST_LOW_THRESH, DIST_HIGH_THRESH
pairwise_distances = self.house.getAllPairwiseDistances(ent['elements']) # list of [(obj1, obj2, distance)]
updated_ent_queue = {'type': ent['type'], 'elements': []}
for i in ent['elements']:
sub_list = [
x for x in pairwise_distances if x[0].meta['id'] == i.meta['id'] or x[1].meta['id'] == i.meta['id']
]
sub_list = [
x for x in sub_list if x[0].rooms[0].name == x[1].rooms[0].name
]
far = [x for x in sub_list if x[2] >= h_high_threshold]
close = [x for x in sub_list if x[2] <= h_low_threshold]
if len(far) == 0 or len(close) == 0:
continue
for j in far:
far_ent = 1 if j[0].name == i.name else 0
for k in close:
close_ent = 1 if k[0].name == i.name else 0
updated_ent_queue['elements'].append([k[close_ent], i, j[far_ent], 'closer'])
updated_ent_queue['elements'].append([j[far_ent], i, k[close_ent], 'farther'])
self.ent_queue = updated_ent_queue
return self.ent_queue
def queryObjectDistCompare(self, ent):
qns = []
for i in ent['elements']:
template = 'object_%s_inroom' % i[3]
qns.append(self.q_obj_builder(template, i[:3], 'yes', 'object_dist_compare_inroom'))
qns.append(self.q_obj_builder(template, i[:3][::-1], 'no', 'object_dist_compare_inroom'))
return qns
"""
Returned ent_queue is list of [(room1, room2, room3, farther/closer)]
"""
def roomDistPair(self, *args):
self.remain_single_name_rooms() # remove 0/multiple-name rooms
ent = self.ent_queue
assert ent['type'] == 'rooms'
h_low_threshold, h_high_threshold = 2, 8
# TODO: replace geodesic distance with shortest path
pairwise_distances = self.house.getAllPairwiseRoomDistances(ent['elements']) # list of [(room1, room2, distance)]
updated_ent_queue = {'type': ent['type'], 'elements': []}
for i in ent['elements']:
sub_list = [
x for x in pairwise_distances if x[0].meta['id'] == i.meta['id'] or x[1].meta['id'] == i.meta['id']
]
far = [x for x in sub_list if x[2] >= h_high_threshold]
close = [x for x in sub_list if x[2] <= h_low_threshold]
if len(far) == 0 or len(close) == 0:
continue
for j in far:
far_ent = 1 if j[0].name == i.name else 0
for k in close:
close_ent = 1 if k[0].name == i.name else 0
updated_ent_queue['elements'].append([k[close_ent], i, j[far_ent], 'closer'])
updated_ent_queue['elements'].append([j[far_ent], i, k[close_ent], 'farther'])
self.ent_queue = updated_ent_queue
return self.ent_queue
def queryRoomDistCompare(self, ent):
qns = []
for i in ent['elements']:
template = 'room_%s' % i[3]
qns.append(self.q_obj_builder(template, i[:3], 'yes', 'room_dist_compare'))
qns.append(self.q_obj_builder(template, i[:3][::-1], 'no', 'room_dist_compare'))
return qns
"""
Returned ent_queue is list of (obj1, color1, obj2, color2)
"""
def objectColorPair(self, *args):
self.remain_single_name_rooms() # remove 0/multiple-name rooms
ent = self.ent_queue
assert ent['type'] == 'objects'
updated_ent_queue = {'type': ent['type'], 'elements': []}
num_objects = len(ent['elements'])
for i in range(num_objects):
for j in range(num_objects):
object_i, object_j = ent['elements'][i], ent['elements'][j]
if object_i.id == object_j.id:
continue
if (self.house.id + '.' + object_i.id not in self.env_obj_color_map) or \
(self.house.id + '.' + object_j.id not in self.env_obj_color_map):
continue
# get colors
color_i = self.env_obj_color_map[self.house.id + '.' + object_i.id]
color_j = self.env_obj_color_map[self.house.id + '.' + object_j.id]
updated_ent_queue['elements'].append([object_i, color_i, object_j, color_j])
self.ent_queue = updated_ent_queue
return self.ent_queue
def queryObjectColorCompare(self, ent):
# ent = {type, elements: [(object1, color1, object2, color2)]}
qns = []
for obj1, color1, obj2, color2 in ent['elements']:
rm = 'inroom' if obj1.rooms[0].name == obj2.rooms[0].name else 'xroom'
template = 'object_color_compare_%s' % rm
ans = 'yes' if color1 == color2 else 'no'
q_type = 'object_color_compare_%s' % rm
qns.append(self.q_obj_builder(template , [obj1, obj2], ans, q_type))
return qns
"""
Returned ent_queue is list of [(obj1, obj2, size_cmp)]
"""
def objectSizePair(self, *args):
self.remain_single_name_rooms() # remove 0/multiple-name rooms
RATIO_TRHESH = OBJECT_RATIO_THRESH
ent = self.ent_queue
assert ent['type'] == 'objects'
updated_ent_queue = {'type': 'objects', 'elements': []}
num_objects = len(ent['elements'])
for i in range(num_objects):
for j in range(num_objects):
object_i, object_j = ent['elements'][i], ent['elements'][j]
if object_i.id == object_j.id:
continue
# get 3D volume
size_i = self.getVolume(object_i.bbox)
size_j = self.getVolume(object_j.bbox)
if max(size_i, size_j) > min(size_i, size_j) * RATIO_TRHESH:
size_cmp = 'bigger' if size_i > size_j else 'smaller'
updated_ent_queue['elements'].append([object_i, object_j, size_cmp])
self.ent_queue = updated_ent_queue
return self.ent_queue
def queryObjectSizeCompare(self, ent):
# ent = {type, elements: [(object1, object2, bigger/smaller)]}
qns = []
for obj1, obj2, size_cmp in ent['elements']:
rm = 'inroom' if obj1.rooms[0].name==obj2.rooms[0].name else 'xroom'
template = 'object_%s_%s' % (size_cmp, rm)
q_type = 'object_size_compare_%s' % rm
qns.append(self.q_obj_builder(template, [obj1, obj2], 'yes', q_type))
qns.append(self.q_obj_builder(template, [obj2, obj1], 'no', q_type))
return qns
"""
Returned ent_queue is list of [(room1, room2, size_cmp)]
"""
def roomSizePair(self, ent):
self.remain_single_name_rooms() # remove 0/multiple-name rooms
RATIO_TRHESH = ROOM_RATIO_TRHESH
ent = self.ent_queue
assert ent['type'] == 'rooms'
updated_ent_queue = {'type': 'rooms', 'elements': []}
num_rooms = len(ent['elements'])
for i in range(num_rooms):
for j in range(num_rooms):
room_i, room_j = ent['elements'][i], ent['elements'][j]
if room_i.id == room_j.id: continue
# get 2D bird-view area
size_i = self.getArea(room_i.bbox)
size_j = self.getArea(room_j.bbox)
if max(size_i, size_j) > min(size_i, size_j) * RATIO_TRHESH:
size_cmp = 'bigger' if size_i > size_j else 'smaller'
updated_ent_queue['elements'].append([room_i, room_j, size_cmp])
self.ent_queue = updated_ent_queue
return self.ent_queue
def queryRoomSizeCompare(self, ent):
# ent = {type: rooms, elements: [(room1, room2, bigger/smaller)]}
qns = []
for room1, room2, size_cmp in ent['elements']:
template = 'room_%s' % size_cmp
q_type = 'room_size_compare'
qns.append(self.q_obj_builder(template, [room1, room2], 'yes', q_type))
qns.append(self.q_obj_builder(template, [room2, room1], 'no', q_type))
return qns
"""
Question Builder
"""
def questionObjectBuilder(self, template, q_ent, a_str, q_type=None):
if q_type == None:
q_type = template
q_str = self.templates[template]
bbox = []
# object_dist_compare (we don't need xroom here)
if q_type in ['object_dist_compare_inroom']:
for ent in q_ent:
q_str = self.q_str_builder.prepareString(q_str, ent.name, ent.rooms[0].name[0])
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name, 'room_id': ent.room_id, 'target': True})
mat = {}
# room_dist_compare
if q_type == 'room_dist_compare':
for ent in q_ent:
q_str = self.q_str_builder.prepareString(q_str, '', ent.name[0])
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name[0], 'target': True})
mat = {}
# object_color_compare
if q_type in ['object_color_compare_inroom', 'object_color_compare_xroom']:
if 'inroom' in template:
for ent in q_ent:
q_str = self.q_str_builder.prepareString(q_str, ent.name, ent.rooms[0].name[0])
color = self.env_obj_color_map[self.house.id + '.' + ent.id]
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name, 'color': color, 'room_id': ent.room_id, 'target': True})
else:
q_str = self.q_str_builder.prepareStringForTwo(q_str, q_ent[0].name, q_ent[1].name,
q_ent[0].rooms[0].name[0], q_ent[1].rooms[0].name[0])
for ent in q_ent:
color = self.env_obj_color_map[self.house.id + '.' + ent.id]
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name, 'color': color, 'room_id': ent.room_id, 'target': True})
mat = {}
# object_size_compare
if q_type in ['object_size_compare_inroom', 'object_size_compare_xroom']:
if 'inroom' in template:
for ent in q_ent:
q_str = self.q_str_builder.prepareString(q_str, ent.name, ent.rooms[0].name[0])
size = self.getVolume(ent.bbox)
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name, 'size': size, 'room_id': ent.room_id, 'target': True})
else:
q_str = self.q_str_builder.prepareStringForTwo(q_str, q_ent[0].name, q_ent[1].name,
q_ent[0].rooms[0].name[0], q_ent[1].rooms[0].name[0])
for ent in q_ent:
size = self.getVolume(ent.bbox)
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name, 'size': size, 'room_id': ent.room_id, 'target': True})
mat = {}
# room_size_compare
if q_type == 'room_size_compare':
q_str = self.q_str_builder.prepareStringForTwo(q_str, '', '', q_ent[0].name[0], q_ent[1].name[0])
for ent in q_ent:
size = self.getArea(ent.bbox)
bbox.append({'id': ent.id, 'type': ent.type, 'box': ent.bbox, 'name': ent.name[0], 'size': size, 'target': True})
mat = {}
return {
'question': q_str,
'answer': a_str,
'type': q_type,
'meta': mat,
'bbox': bbox
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--dataDir', default='../data', help='Data directory')
parser.add_argument('--dataJson', default='eqa_v1.json', help='questions and splits')
parser.add_argument('--HouseApiDir', default='../pyutils/House3D', help='house3d api dir')
parser.add_argument('--cacheDir', default='../cache/question-gen-outputs', help='directory for saving generated questions')
parser.add_argument('--outputJson', default='questions_from_engine_v2.json', help='output json file')
parser.add_argument('--object_counts_by_room_file', default='env_lists/800env_object_counts_by_room.json', help='roomTp to objT to cnt')
parser.add_argument('--env_obj_colors_file', default='env_lists/env_obj_colors_v2.json', help='obj to color mapping')
args = parser.parse_args()
# load splits
splits = json.load(open(osp.join(args.dataDir, 'eqa_v1', args.dataJson), 'r'))['splits']
for split, hids in splits.items():
print('There are %s %s house_ids.' % (len(hids), split))
house_ids = [hid for split, hids in splits.items() for hid in hids]
print('There are in all %s house_ids.' % len(house_ids))
# HouseParse and QA-engine
Hp = HouseParse(dataDir=osp.join(args.dataDir, 'SUNCGdata'),
objrenderPath=osp.join(args.HouseApiDir, 'House3D'))
E = Engine(args.object_counts_by_room_file, args.env_obj_colors_file)
# # try one house
# hid = splits['train'][2]
# Hp.parse(hid); E.cacheHouse(Hp)
# qns = E.executeFn(E.template_defs['room_size_compare'])
# pprint(qns)
# SAVE QUESTIONS TO A JSON FILE
T = ['object_dist_compare', 'object_color_compare', 'object_size_compare', 'room_size_compare']
# T = E.template_defs.keys()
num_envs = len(house_ids)
idx, all_qns = 0, []
empty_envs = []
for i in tqdm(range(num_envs)):
Hp.parse(house_ids[i])
num_qns_for_house = 0
for t in T:
E.cacheHouse(Hp)
qns = E.executeFn(E.template_defs[t])
num_qns_for_house += len(qns)
E.clearQueue()
for k in qns:
k['id'] = idx
k['house'] = house_ids[i]
idx += 1
all_qns.append(k)
if num_qns_for_house == 0:
empty_envs.append(house_ids[i])
print('Houses with no questions generated (if any) : %d' % len(empty_envs))
print('%s qns generated for %s.' % (len(all_qns), T))
# simple stats for each type
qtype_to_qns = {}
for qn in all_qns:
if qn['type'] not in qtype_to_qns: qtype_to_qns[qn['type']] = []
qtype_to_qns[qn['type']] += [qn]
for qtype in qtype_to_qns.keys():
print('%s questions for [%s]' % (len(qtype_to_qns[qtype]), qtype))
# save
if not osp.isdir(args.cacheDir):
os.makedirs(args.cacheDir)
output_file = osp.join(args.cacheDir, args.outputJson)
json.dump(all_qns, open(output_file, 'w'))
print('Written to %s.' % output_file)
|
the-stack_106_24452 | # -*- coding: utf-8 -*-
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('taxbrain', '0045_taxsaveinputs_ald_invinc_ec_base_ryanbrady'),
]
operations = [
migrations.RemoveField(
model_name='taxsaveinputs',
name='CTC_new_ps',
),
]
|
the-stack_106_24453 | import BirdRoostLocation.LoadSettings as settings
import os
import pandas
def create_subset_labels(csv_input_path, subset_path, csv_output_path):
full = pandas.read_csv(csv_input_path)
subset = pandas.read_csv(subset_path)
full_basenames = {}
subset_basenames = []
full_file_list = list(full["AWS_file"])
subset_file_list = list(subset["file_names"])
for i, file_name in enumerate(full_file_list):
fbasename = file_name[:23]
full_basenames[fbasename] = i
for i, file_name in enumerate(subset_file_list):
sbasename = file_name[2:25]
if sbasename in full_basenames:
subset_basenames.append(full_basenames[sbasename])
output_pd = full.loc[subset_basenames]
output_pd.to_csv(csv_output_path, index=False)
def main():
create_subset_labels(
csv_input_path=settings.LABEL_CSV,
subset_path=settings.SUBSET_CSV,
csv_output_path=settings.SUBSET_LABEL_CSV,
)
if __name__ == "__main__":
os.chdir(settings.WORKING_DIRECTORY)
main()
|
the-stack_106_24454 | import tweepy
import random
import time
# Twitter API Keys
import os
consumer_key = os.getenv("consumer_key")
consumer_secret = os.getenv("consumer_secret")
access_token = os.getenv("access_token")
access_token_secret = os.getenv("access_token_secret")
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
happy_quotes = [
"For every minute you are angry you lose sixty seconds of happiness. - Ralph Waldo Emerson",
"Folks are usually about as happy as they make their minds up to be. - Abraham Lincoln",
"Happiness is when what you think, what you say, and what you do are in harmony. - Mahatma Gandhi",
"Count your age by friends, not years. Count your life by smiles, not tears. - John Lennon",
"Happiness is a warm puppy. - Charles M. Schulz",
"The happiness of your life depends upon the quality of your thoughts. - Marcus Aurelius",
"Now and then it's good to pause in our pursuit of happiness and just be happy. - Guillaume Apollinaire"]
def happy_it_up():
"""Tweet a Random Happiness Quote."""
# Tweet a random quote
msg = f"Happiness Tweet {time.time()}: {random.choice(happy_quotes)}"
api.update_status(msg)
# Print success message
print("Tweeted successfully!")
print(msg)
t_end = time.time() + 60 * 5
while time.time() < t_end:
happy_it_up()
time.sleep(60) |
the-stack_106_24455 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making GameAISDK available.
This source code file is licensed under the GNU General Public License Version 3.
For full details, please refer to the file "LICENSE.txt" which is provided as part of this source code package.
Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
"""
from manage.server import Server
def main():
server = Server()
if server.init() is True:
server.run()
server.finish()
if __name__ == '__main__':
main()
|
the-stack_106_24460 | import os
import sys
from PySide6 import QtCore, QtWidgets, QtGui
import idaapi
from . import exceptions
def capture_widget(widget, path=None):
"""Grab an image of a Qt widget
Args:
widget: The Qt Widget to capture
path (optional): The path to save to. If not provided - will return image data.
Returns:
If a path is provided, the image will be saved to it.
If not, the PNG buffer will be returned.
"""
pixmap = widget.grab()
if path:
pixmap.save(path)
else:
image_buffer = QtCore.QBuffer()
image_buffer.open(QtCore.QIODevice.ReadWrite)
pixmap.save(image_buffer, "PNG")
return image_buffer.data().data()
def get_widget(title):
"""Get the Qt widget of the IDA window with the given title."""
tform = idaapi.find_widget(title)
if not tform:
raise exceptions.FormNotFound("No form titled {!r} found.".format(title))
return idaapi.PluginForm.FormToPyQtWidget(tform)
def resize_widget(widget, width, height):
"""Resize a Qt widget."""
widget.setGeometry(0, 0, width, height)
def get_window():
"""Get IDA's top level window."""
tform = idaapi.get_current_widget()
# Required sometimes when closing IDBs and not IDA.
if not tform:
tform = idaapi.find_widget("Output window")
widget = idaapi.PluginForm.FormToPyQtWidget(tform)
window = widget.window()
return window
class MenuManager(object):
"""IDA Menu Manipulation
Use this class to add your own top-level menus.
While this is discouraged by the SDK:
> You should not change top level menu, or the Edit,Plugins submenus
(documentation for `attach_action_to_menu`, kernwin.hpp)
Adding top-level menus is useful sometimes.
Nonetheless, you should be careful and make sure to remove all your menus
when you are done. Leaving them handing would force users to restart IDA
to remove them.
Usage of this class should be as follows:
>>> # Use the manager to add top-level menus
>>> menu_manager = MenuManager()
>>> menu_manager.add_menu("My Menu")
>>> # Use the standard API to add menu items
>>> idaapi.attach_action_to_menu("My Menu/", ":My-Action:", idaapi.SETMENU_APP)
>>> # When a menu is not needed, remove it
>>> menu_manager.remove_menu("My Menu")
>>> # When you are done with the manager (and want to remove all menus you added,)
>>> # clear it before deleting.
>>> menu_manager.clear()
"""
def __init__(self):
super(MenuManager, self).__init__()
self._window = get_window()
self._menu = self._window.findChild(QtWidgets.QMenuBar)
self._menus = {}
def add_menu(self, name):
"""Add a top-level menu.
The menu manager only allows one menu of the same name. However, it does
not make sure that there are no pre-existing menus of that name.
"""
if name in self._menus:
raise exceptions.MenuAlreadyExists("Menu name {!r} already exists.".format(name))
menu = self._menu.addMenu(name)
self._menus[name] = menu
def remove_menu(self, name):
"""Remove a top-level menu.
Only removes menus created by the same menu manager.
"""
if name not in self._menus:
raise exceptions.MenuNotFound(
"Menu {!r} was not found. It might be deleted, or belong to another menu manager.".format(name))
self._menu.removeAction(self._menus[name].menuAction())
del self._menus[name]
def clear(self):
"""Clear all menus created by this manager."""
for menu in self._menus.values():
self._menu.removeAction(menu.menuAction())
self._menus = {}
|
the-stack_106_24463 | # Sampling a truncated multivariate Gaussian by Rejection sampling from Mode
# ("A New Rejection Sampling Method for Truncated Multivariate Gaussian Random Variables
# Restricted to Convex Sets" https://hal.archives-ouvertes.fr/hal-01063978/document)
# Author: Liaowang Huang <[email protected]>
import numpy as np
import cvxpy as cp
class RsmSampler:
def __init__(self, mu, Sigma, f, g):
"""
:param mu: (m,) mean
:param Sigma: (m,m) covariance matrix
:param f: (q,m) matrix, where q is the number of linear constraints. The constraints require each component
of the m-dimensional vector fX+g to be non-negative
:param g: (q,) vector with the constant terms in the above linear constraints.
"""
self.mu = mu
self.Sigma = Sigma
if f is not None:
valid = np.logical_and(g < np.inf, g > -np.inf)
g = g[valid]
f = f[valid]
self.mode = mu
self.f = f
self.g = g
self.reject = 0
self.mode = self.mode_solver()
def mode_solver(self):
"""
mode = arg min x^T*Sigma^-1*x
"""
if self.f is None:
return self.mu
m = len(self.mu)
xi = cp.Variable(m)
obj = cp.Minimize(cp.matrix_frac(xi, self.Sigma))
constraints = [self.f * (xi + self.mu) + self.g >= 0]
prob = cp.Problem(obj, constraints)
prob.solve()
# print("status:", prob.status)
if prob.status != "optimal":
raise ValueError('cannot compute the mode')
return xi.value
def rsm_tmg(self):
"""
Sampling from a multivariate normal N(mu, Sigma) with constraints f*x+g >= 0
the rejection sampling method in paper
A New Rejection Sampling Method for Truncated Multivariate Gaussian Random Variables Restricted to Convex Set.
is used.
"""
if self.f is None:
return np.random.multivariate_normal(self.mu, self.Sigma)
while True:
state = np.random.multivariate_normal(self.mode, self.Sigma)
u = np.random.uniform()
if np.all(self.f @ (state + self.mu) + self.g >= 0) and \
u <= np.exp(self.mode.dot(np.linalg.solve(self.Sigma, self.mode)) -
state.dot(np.linalg.solve(self.Sigma, self.mode))):
break
self.reject += 1
return state + self.mu
def rsm(n, mu, Sigma, f, g, verbose=False):
"""
Sampling from a multivariate normal N(mu, Sigma) with constraints f*x+g >= 0
the rejection sampling method is used.
:param n: Number of samples.
:param mu: (m,) mean
:param Sigma: (m,m) covariance matrix.
:param f: (q,m), f*x+g >= 0 must be satisfied.
:param g: (q,)
:param verbose: print acceptance rate if true.
"""
rsm_sampler = RsmSampler(mu, Sigma, f, g)
dim = len(mu)
samples = np.zeros((n, dim))
for i in range(n):
samples[i] = rsm_sampler.rsm_tmg()
if verbose:
print("Acceptance rate is {}".format(n / (n + rsm_sampler.reject)))
return samples
|
the-stack_106_24464 | def copy_untouched_quantities(old_state, new_state):
for key in old_state.keys():
if key not in new_state:
new_state[key] = old_state[key]
def add(state_1, state_2):
out_state = {}
if 'time' in state_1.keys():
out_state['time'] = state_1['time']
for key in state_1.keys():
if key != 'time':
out_state[key] = state_1[key] + state_2[key]
if hasattr(out_state[key], 'attrs'):
out_state[key].attrs = state_1[key].attrs
return out_state
def multiply(scalar, state):
out_state = {}
if 'time' in state.keys():
out_state['time'] = state['time']
for key in state.keys():
if key != 'time':
out_state[key] = scalar * state[key]
if hasattr(out_state[key], 'attrs'):
out_state[key].attrs = state[key].attrs
return out_state
|
the-stack_106_24466 | """
Voice rooms module.
Gives users the power to create and manage their own voice chat channels
instead of relying on pre-defined channels.
"""
import asyncio
import copy
import functools
import os
import re
from typing import Callable, Dict, List, Optional, Tuple, Union
import yaml
from discord import (
Colour, Guild, HTTPException, Member, Message,
Role, TextChannel,
VoiceChannel, VoiceState
)
from discord.abc import GuildChannel
from discord.ext import commands
from discord.ext.commands import Context
from loguru import logger
from ophelia import settings
from ophelia.output.error_handler import OpheliaCommandError
from ophelia.output.output import (
disp_str, get_input, response_config, response_options, response_switch,
send_message,
send_message_embed,
send_simple_embed
)
from ophelia.settings import voiceroom_max_mute_time
from ophelia.utils.discord_utils import (
ARGUMENT_FAIL_EXCEPTIONS, in_vc, vc_is_empty, vc_members
)
from ophelia.utils.text_utils import escape_formatting
from ophelia.voicerooms.config_options import VOICEROOMS_GENERATOR_CONFIG
from ophelia.voicerooms.message_buffer import MessageBuffer
from ophelia.voicerooms.mute_manager import MuteManager
from ophelia.voicerooms.name_filter import NameFilterManager
from ophelia.voicerooms.rooms.generator import (
Generator, GeneratorLoadError,
RoomCreationError
)
from ophelia.voicerooms.rooms.roompair import (
RoomMode, RoomPair,
RoomRateLimited
)
CONFIG_VOICEROOM_TIMEOUT = settings.voiceroom_empty_timeout
CONFIG_TIMEOUT_SECONDS = settings.long_timeout
CONFIG_MAX_TRIES = settings.max_tries
CONFIG_PATH = settings.file_voicerooms_config
class VoiceroomsCog(commands.Cog, name="voicerooms"):
"""
Custom Voice Rooms.
Manage user-initiated voice rooms and accompanying text rooms.
"""
__slots__ = [
"bot",
"generators",
"rooms",
"vc_room_map",
"text_room_map",
"message_buffer",
"name_filter",
"generator_lock",
"mute_managers"
]
# Using forward references to avoid cyclic imports
# noinspection PyUnresolvedReferences
def __init__(self, bot: "OpheliaBot") -> None:
"""
Initializer for the VoiceroomsCog class.
:param bot: Ophelia bot object
"""
self.bot = bot
self.generators: Dict[int, Generator] = {}
self.rooms: Dict[int, RoomPair] = {}
self.vc_room_map: Dict[int, int] = {}
self.text_room_map: Dict[int, int] = {}
self.message_buffer = MessageBuffer()
self.name_filter = NameFilterManager.load_filters()
self.generator_lock = asyncio.Lock()
# We have to keep track of users who will be unmuted on their
# next VC join because we are unable to unmute them if they
# leave a VC in a muted state. This is a limitation set by
# Discord and we can't really circumvent it because the other
# kind of muting (using permissions) doesn't allow instant
# muting.
self.mute_managers: Dict[int, MuteManager] = {}
def get_mute(self, guild_id: int) -> MuteManager:
"""
Get guild mute manager from guild ID.
:param guild_id: Guild ID
:return: Mute manager corresponding to guild ID
"""
return self.mute_managers.setdefault(guild_id, MuteManager(guild_id))
# pylint: disable=too-few-public-methods
class VoiceroomsDecorators:
"""
Voiceroom decorators for checking relevant voicerooms before
going to command logic.
"""
@classmethod
def pass_voiceroom(cls, func: Callable) -> Callable:
"""
Decorator to pass voiceroom pair under a given owner
directly to the command.
:param func: Async function to be wrapped
:return: Wrapped function
"""
@functools.wraps(func)
async def wrapped(
self: "VoiceroomsCog",
context: Context,
*args,
**kwargs
) -> None:
"""
Inner function.
:param self: VoiceroomsCog instance
:param context: Command context
"""
author = context.author
if author.id not in self.rooms:
raise OpheliaCommandError("voicerooms_no_room")
await func(
self,
context=context,
room=self.rooms[author.id],
*args,
**kwargs
)
return wrapped
# pylint: enable=too-few-public-methods
async def cog_save_all(self) -> None:
"""Save all generator configurations before bot shutdown."""
# Dump all logs
await self.message_buffer.dump()
# Delete all channels
for room_key in copy.copy(list(self.rooms.keys())):
await self.rooms[room_key].destroy()
# Saves all generators
await self.save_generators()
# Saves all room name filters
await self.name_filter.save_filters()
async def save_generators(self) -> None:
"""Save all generator configurations for future use."""
generators_dict = {}
for channel_id, generator in self.generators.items():
generators_dict[str(channel_id)] = await generator.to_dict()
with open(CONFIG_PATH, "w", encoding="utf-8") as save_target:
yaml.dump(
generators_dict,
save_target,
default_flow_style=False
)
async def load_generators(self) -> None:
"""Load all generator configurations for use."""
if not os.path.exists(CONFIG_PATH):
return
with open(CONFIG_PATH, "r", encoding="utf-8") as file:
generators_dict = yaml.safe_load(file)
if generators_dict is None:
return
for channel_id_str, gen_dict in generators_dict.items():
try:
channel_id = int(channel_id_str)
generator = await Generator.from_dict(self.bot, gen_dict)
self.generators[channel_id] = generator
# Create new guild mute manager
guild_id: int = generator.generator_channel.guild.id
if guild_id not in self.mute_managers:
self.mute_managers[guild_id] = MuteManager(guild_id)
except GeneratorLoadError:
logger.warning(
"Failed to load generator with ID {}",
channel_id_str
)
@commands.Cog.listener()
async def on_message(self, message: Message) -> None:
"""
Checks incoming messages for any that require logging in
temporary text channels.
:param message: Message
"""
channel_id = message.channel.id
if channel_id in self.text_room_map:
if message.author.bot:
return
owner_id = self.text_room_map[channel_id]
try:
log_channel = self.rooms[owner_id].log_channel
await self.message_buffer.log_message(log_channel, message)
except KeyError:
# Zombie room, delete channel.
await message.channel.delete()
self.text_room_map.pop(channel_id, None)
@commands.Cog.listener()
async def on_guild_channel_delete(self, channel: GuildChannel) -> None:
"""
Cleans up any generators or rooms associated with deleted
channels.
:param channel: Deleted channel
"""
# noinspection PyUnresolvedReferences
channel_id = channel.id
if channel_id in self.vc_room_map:
await self.delete_room(self.vc_room_map[channel_id])
elif channel_id in self.text_room_map:
await self.delete_room(self.text_room_map[channel_id])
elif channel_id in self.rooms:
await self.delete_room(channel_id)
elif channel_id in self.generators:
self.generators.pop(channel_id, None)
await self.save_generators()
@commands.Cog.listener()
async def on_voice_state_update(
self,
member: Member,
before: VoiceState,
after: VoiceState
) -> None:
"""
Detects when a user joins or leaves a voice channel.
:param member: Guild member
:param before: Previous voice state
:param after: New voice state
"""
if before.channel == after.channel:
if before.mute and not after.mute:
await self.get_mute(member.guild.id).register_unmute(member)
elif not before.mute and after.mute:
await self.get_mute(member.guild.id).register_mute(member)
return
if after.channel is not None:
await self.on_voice_join(member, after.channel)
if before.channel is not None:
await self.on_voice_leave(member, before.channel, after.channel)
async def on_voice_join(
self,
member: Member,
channel: VoiceChannel
) -> None:
"""
Handles members joining voice channels.
:param member: Guild member
:param channel: Voice channel joined
"""
channel_id = channel.id
guild_id = channel.guild.id
# Unmute first, even if the user is gonna be muted again
# later by another VC room
await self.get_mute(guild_id).handle_join(member)
if channel_id in self.generators:
await self.on_generator_join(member, self.generators[channel_id])
if channel_id in self.vc_room_map:
owner_id = self.vc_room_map[channel_id]
if member.id == owner_id:
return
try:
room: RoomPair = self.rooms[owner_id]
text_channel = room.text_channel
voice_channel = room.voice_channel
# Grant read and write permissions:
overwrite = text_channel.overwrites_for(member)
overwrite.update(send_messages=True, read_messages=True)
await text_channel.set_permissions(member, overwrite=overwrite)
# Let room object do its thing
await room.handle_join(member, self.get_mute(guild_id))
# If on joinmute, mute user and set a timer for unmuting:
# This won't apply to the owner due to the filter
# a few lines back (intentional)
if room.current_mode == RoomMode.JOINMUTE:
# Welcome user
if room.is_tempmute():
await send_message(
channel=text_channel,
text=disp_str("voicerooms_joinmute_welcome").format(
mention=member.mention,
name=voice_channel.name,
time=room.joinmute_seconds
),
mass_ping_guard=True
)
# Schedule unmute
await room.schedule_unmute(
member,
self.get_mute(guild_id)
)
else:
message = await send_message(
channel=text_channel,
text=disp_str("voicerooms_permmute_welcome").format(
mention=member.mention,
name=voice_channel.name
),
mass_ping_guard=True
)
# Prepare unmute reaction
asyncio.create_task(room.react_unmute(
bot=self.bot,
message=message,
owner_id=owner_id,
member=member,
mute_manager=self.get_mute(guild_id)
))
except KeyError:
# Invalid zombie room; delete VC.
await channel.delete()
self.vc_room_map.pop(channel_id, None)
async def on_generator_join(
self,
member: Member,
generator: Generator
) -> None:
"""
handles members joining generator channels.
:param member: Guild member
:param generator: Room generator
"""
async with self.generator_lock:
if member.id in self.rooms:
return
try:
room: RoomPair = await generator.create_room(
member,
self.name_filter
)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_create_room").format(
name=member.display_name,
id=member.id
)
)
except RoomCreationError:
# Room has already been destroyed; fail silently.
return
self.rooms[member.id] = room
self.vc_room_map[room.voice_channel.id] = member.id
self.text_room_map[room.text_channel.id] = member.id
# Check if room is occupied, or if the user has left the
# room during channel creation.
if vc_is_empty(room.voice_channel):
await self.delete_room(member.id)
return
async def on_voice_leave(
self,
member: Member,
channel: VoiceChannel,
to_channel: Optional[VoiceChannel]
) -> None:
"""
Handles members leaving voice channels.
:param member: Guild member
:param channel: Voice channel left
:param to_channel: Voice channel that the member moved to
"""
channel_id = channel.id
guild_id = channel.guild.id
if channel_id in self.vc_room_map:
owner_id = self.vc_room_map[channel_id]
try:
room: RoomPair = self.rooms[owner_id]
text_channel = room.text_channel
voice_channel = room.voice_channel
# Let room object do its thing
to_room: Optional[RoomPair] = None
if to_channel is not None and to_channel.id in self.vc_room_map:
to_owner_id = self.vc_room_map[to_channel.id]
to_room = self.rooms[to_owner_id]
await room.handle_leave(
member,
to_room,
self.get_mute(guild_id)
)
# Before we remove permissions, we check that the user is
# not an owner.
if member.id != owner_id:
await text_channel.set_permissions(member, overwrite=None)
# In case of a misclick, give the user a chance to rejoin:
await asyncio.sleep(CONFIG_VOICEROOM_TIMEOUT)
# While waiting, the channel might have been deleted.
if channel_id not in self.vc_room_map:
return
# Check if channel is empty:
if vc_is_empty(voice_channel):
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_delete_room").format(
room=room.voice_channel.name,
name=member.display_name,
id=member.id
)
)
await self.delete_room(owner_id)
return
except KeyError:
# Zombie room.
await channel.delete()
self.vc_room_map.pop(channel_id, None)
@commands.group(
"voiceroom",
invoke_without_command=True,
aliases=["voicechat", "voice", "vr", "vc"]
)
@commands.bot_has_permissions(administrator=True)
@commands.guild_only()
async def voiceroom(self, context: Context, *_) -> None:
"""
Main voice room command, displays list of subcommands.
:param context: Command context
"""
await send_simple_embed(context, "voicerooms_commands")
@staticmethod
async def update_room_membership(
room: RoomPair,
member_or_role: Union[Member, Role],
new_value: Optional[bool] = True,
) -> None:
"""
Update a member or a role's permissions in a room.
Used for adding or removing roles from a private room, including
the default @everyone role which is used to set a room public
or private.
:param room: Room pair
:param member_or_role: Member or role to be added or removed
:param new_value: New permissions value
"""
voice_channel = room.voice_channel
voice_overwrite = voice_channel.overwrites_for(member_or_role)
if new_value is not None:
voice_overwrite.update(connect=new_value)
await voice_channel.set_permissions(
member_or_role,
overwrite=voice_overwrite
)
else:
await voice_channel.set_permissions(member_or_role, overwrite=None)
@voiceroom.command(name="public")
@VoiceroomsDecorators.pass_voiceroom
async def set_public(self, context: Context, *_, **kwargs) -> None:
"""
Set current voice room to public.
This sets the default connection permissions to None instead of
True since what we want is to default to the server base perms
which would allow the server to control which roles get to join
using role perms.
:param context: Command context
"""
room: RoomPair = kwargs["room"]
everyone = context.guild.default_role
await self.update_room_membership(room, everyone, None)
await send_simple_embed(context, "voicerooms_public")
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_public").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id
)
)
room.current_mode = RoomMode.PUBLIC
await room.unmute_all(self.get_mute(context.guild.id))
@voiceroom.command(name="joinmute")
@VoiceroomsDecorators.pass_voiceroom
async def set_joinmute(
self,
context: Context,
*,
mute_time: Optional[int],
**kwargs
) -> None:
"""
Set the current voice room to joinmute mode.
This will temporarily mute every new member who joins the room
for a configured amount of time
:param context: Command context
:param mute_time: Amount of time new joins are muted for
"""
room: RoomPair = kwargs["room"]
# Check if mute_time is valid
if mute_time is None:
room.joinmute_seconds = 0
await send_simple_embed(context, "voicerooms_permmute")
elif 0 < mute_time <= voiceroom_max_mute_time:
room.joinmute_seconds = mute_time
await send_simple_embed(context, "voicerooms_joinmute", mute_time)
else:
raise OpheliaCommandError(
"voicerooms_mute_too_long",
voiceroom_max_mute_time
)
# Underlying permissions are the same as public rooms
everyone = context.guild.default_role
await self.update_room_membership(room, everyone, None)
# Update room mode
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_joinmute").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
time=mute_time
)
)
room.current_mode = RoomMode.JOINMUTE
@voiceroom.command(name="private")
@VoiceroomsDecorators.pass_voiceroom
async def set_private(self, context: Context, *_, **kwargs) -> None:
"""
Set current voice room to private.
:param context: Command context
"""
room: RoomPair = kwargs["room"]
everyone = context.guild.default_role
await self.update_room_membership(room, everyone, False)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_private").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id
)
)
await send_simple_embed(context, "voicerooms_private")
room.current_mode = RoomMode.PRIVATE
await room.unmute_all(self.get_mute(context.guild.id))
for member in vc_members(room.voice_channel):
await self.update_room_membership(room, member, True)
@voiceroom.command(name="end")
@VoiceroomsDecorators.pass_voiceroom
async def room_end(self, context: Context, *_, **kwargs) -> None:
"""
Delete current room.
:param context: Command context
"""
room: RoomPair = kwargs["room"]
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_delete_room").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id
)
)
await self.delete_room(context.author.id)
@voiceroom.command(name="add")
@VoiceroomsDecorators.pass_voiceroom
async def room_add(
self,
context: Context,
*,
added: Union[Member, Role],
**kwargs
) -> None:
"""
Add a role or user to a voice room.
:param context: Command context
:param added: Discord member or role
"""
room: RoomPair = kwargs["room"]
await self.update_room_membership(room, added, True)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_add").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(added.name),
target_id=added.id
)
)
await send_simple_embed(context, "voicerooms_add", added.mention)
@voiceroom.command(name="remove", aliases=["kick"])
@VoiceroomsDecorators.pass_voiceroom
async def room_remove(
self,
context: Context,
*,
removed: Union[Member, Role],
**kwargs
) -> None:
"""
Remove a role or user from a voice room.
:param context: Command context
:param removed: Discord member or role
"""
room: RoomPair = kwargs["room"]
if isinstance(removed, Role) and "mod" in removed.name.lower():
raise OpheliaCommandError("voicerooms_remove_mod")
if context.author == removed:
raise OpheliaCommandError("voicerooms_remove_self")
await self.update_room_membership(room, removed, None)
await send_simple_embed(context, "voicerooms_remove", removed.mention)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_remove").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(removed.name),
target_id=removed.id
)
)
if isinstance(removed, Member):
if in_vc(removed, room.voice_channel):
await removed.move_to(None)
else:
for member in vc_members(room.voice_channel):
if removed in member.roles:
await member.move_to(None)
@voiceroom.command(name="blacklist", aliases=["ban"])
@VoiceroomsDecorators.pass_voiceroom
async def room_blacklist(
self,
context: Context,
*,
removed: Member,
**kwargs
) -> None:
"""
Blacklist a member from a voiceroom.
:param context: Command context
:param removed: Discord member or role
"""
room: RoomPair = kwargs["room"]
if context.author == removed:
raise OpheliaCommandError("voicerooms_remove_self")
await self.update_room_membership(room, removed, False)
await send_simple_embed(
context,
"voicerooms_blacklist",
removed.mention
)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_blacklist").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(removed.name),
target_id=removed.id
)
)
if in_vc(removed, room.voice_channel):
await removed.move_to(None)
@voiceroom.command(name="unblacklist", aliases=["unban"])
@VoiceroomsDecorators.pass_voiceroom
async def room_unblacklist(
self,
context: Context,
*,
removed: Member,
**kwargs
) -> None:
"""
Remove a member from a room blacklist.
:param context: Command context
:param removed: Discord member to remove from blacklist
"""
room: RoomPair = kwargs["room"]
await self.update_room_membership(room, removed, None)
await send_simple_embed(
context,
"voicerooms_unblacklist",
removed.mention
)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_unblacklist").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(removed.name),
target_id=removed.id
)
)
@voiceroom.command(name="mute", aliases=["silence"])
@VoiceroomsDecorators.pass_voiceroom
async def room_mute(
self,
context: Context,
*,
member: Member,
**kwargs
) -> None:
"""
Mute a member.
:param context: Command context
:param member: Member to mute
"""
room: RoomPair = kwargs["room"]
if context.author == member:
raise OpheliaCommandError("voicerooms_mute_self")
if not in_vc(member, room.voice_channel):
raise OpheliaCommandError("voicerooms_mute_not_present")
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_mute").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(member.name),
target_id=member.id
)
)
await room.mute_user(member, self.get_mute(context.guild.id))
await send_simple_embed(context, "voicerooms_mute", member.mention)
@voiceroom.command(name="unmute", aliases=["unsilence"])
@VoiceroomsDecorators.pass_voiceroom
async def room_unmute(
self,
context: Context,
*,
member: Member,
**kwargs
) -> None:
"""
Unmte a member.
:param context: Command context
:param member: Member to mute
"""
room: RoomPair = kwargs["room"]
if context.author == member:
raise OpheliaCommandError("voicerooms_unmute_self")
if not in_vc(member, room.voice_channel):
raise OpheliaCommandError("voicerooms_unmute_not_present")
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_unmute").format(
room=room.voice_channel.name,
name=escape_formatting(context.author.name),
id=context.author.id,
target=escape_formatting(member.name),
target_id=member.id
)
)
await room.unmute_user(member, self.get_mute(context.guild.id))
await send_simple_embed(context, "voicerooms_unmute", member.mention)
@voiceroom.command(name="name", aliases=["rename"])
@VoiceroomsDecorators.pass_voiceroom
async def room_name(
self,
context: Context,
*,
new_name: str,
**kwargs
) -> None:
"""
Set the name of the voice room.
:param context: Command context
:param new_name: New room name
"""
if await self.name_filter.bad_name(context.guild.id, new_name):
raise OpheliaCommandError("voicerooms_name_invalid")
room: RoomPair = kwargs["room"]
prev: str = room.voice_channel.name
try:
await room.rename(new_name)
except RoomRateLimited as e:
raise OpheliaCommandError("voicerooms_ratelimited") from e
except HTTPException as e:
raise OpheliaCommandError("voicerooms_name_invalid") from e
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_rename").format(
name=escape_formatting(context.author.name),
id=context.author.id,
prev=escape_formatting(prev),
curr=escape_formatting(new_name)
)
)
await send_simple_embed(context, "voicerooms_name", new_name)
@voiceroom.command(name="size", aliases=["resize"])
@VoiceroomsDecorators.pass_voiceroom
async def room_size(
self,
context: Context,
*,
size: int,
**kwargs
) -> None:
"""
Set the size of the voice room.
:param context: Command context
:param size: New room size
"""
room: RoomPair = kwargs["room"]
prev: int = room.voice_channel.user_limit
try:
await room.voice_channel.edit(user_limit=size)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_resize").format(
name=escape_formatting(context.author.name),
id=context.author.id,
prev=prev,
curr=size
)
)
await send_simple_embed(context, "voicerooms_size")
except (*ARGUMENT_FAIL_EXCEPTIONS, KeyError, ValueError) as e:
raise OpheliaCommandError("voicerooms_size_invalid") from e
@voiceroom.command(name="bitrate")
@VoiceroomsDecorators.pass_voiceroom
async def room_bitrate(
self,
context: Context,
*,
bitrate: int,
**kwargs
) -> None:
"""
Set the bitrate of the voice room.
:param context: Command context
:param bitrate: New bitrate
"""
room: RoomPair = kwargs["room"]
prev: int = room.voice_channel.bitrate
try:
await room.voice_channel.edit(bitrate=bitrate * 1000)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_bitrate").format(
name=escape_formatting(context.author.name),
id=context.author.id,
prev=prev / 1000,
curr=bitrate
)
)
await send_simple_embed(context, "voicerooms_bitrate", bitrate)
except ARGUMENT_FAIL_EXCEPTIONS as e:
raise OpheliaCommandError("voicerooms_bitrate_invalid") from e
async def transfer_room(
self,
room: RoomPair,
old_owner: Member,
new_owner: Member
) -> None:
"""
Transfer ownership of a room from one user to another user.
:param room: Pair of rooms
:param old_owner: Previous owner
:param new_owner: New Owner
"""
# Check if new owner is a bot
if new_owner.bot:
raise OpheliaCommandError("voicerooms_transfer_bot")
# Check if member already owns a room
if new_owner.id in self.rooms:
raise OpheliaCommandError("voicerooms_transfer_already_owner")
try:
await room.transfer(
old_owner,
new_owner,
self.get_mute(new_owner.guild.id)
)
except RoomRateLimited as e:
raise OpheliaCommandError("voicerooms_ratelimited") from e
old_id = old_owner.id
new_id = new_owner.id
self.text_room_map[room.text_channel.id] = new_id
self.vc_room_map[room.voice_channel.id] = new_id
self.rooms[new_id] = self.rooms.pop(old_id)
await self.message_buffer.log_system_msg(
log_channel=room.log_channel,
text_channel=room.text_channel,
text=disp_str("voicerooms_log_transfer").format(
name=escape_formatting(old_owner.name),
id=old_id,
new_name=escape_formatting(new_owner.name),
new_id=new_id
)
)
await room.text_channel.edit(
topic=disp_str(
"voicerooms_topic_format"
).format(new_owner.display_name)
)
@voiceroom.command(name="transfer")
@VoiceroomsDecorators.pass_voiceroom
async def ownership_transfer(
self,
context: Context,
*,
new_owner: Member,
**kwargs
) -> None:
"""
Command to transfer ownership of a room to another user.
:param context: Command context
:param new_owner: New owner
"""
room: RoomPair = kwargs["room"]
# Check if member is in the voice room
if not in_vc(new_owner, room.voice_channel):
raise OpheliaCommandError("voicerooms_transfer_bad_owner")
await self.transfer_room(room, context.author, new_owner)
await send_simple_embed(
context,
"voicerooms_transfer",
new_owner.mention
)
@voiceroom.command(name="list", aliases=["rooms", "knock"])
async def list_rooms(self, context: Context) -> None:
"""
Command to list all voicerooms and to allow users to knock on
them to request for access.
:param context: Command context
"""
non_private_rooms: List[Tuple[int, RoomPair]] = []
private_rooms: List[Tuple[int, RoomPair]] = []
for owner_id, room in self.rooms.items():
if room.voice_channel.guild.id == context.guild.id:
if room.current_mode == RoomMode.PRIVATE:
private_rooms.append((owner_id, room))
else:
non_private_rooms.append((owner_id, room))
if not non_private_rooms and not private_rooms:
await send_simple_embed(context, "voicerooms_list_no_rooms")
return
rooms_desc: str = ""
pub_rooms_descs: List[str] = []
if non_private_rooms:
for (owner_id, room) in non_private_rooms:
pub_rooms_descs.append(
disp_str("voicerooms_public_room").format(
name=room.voice_channel.name,
owner_id=owner_id
)
)
rooms_desc += disp_str("voicerooms_public_list").format(
pub="\n".join(pub_rooms_descs)
)
priv_rooms_descs: List[str] = []
if private_rooms:
owner_id: int
room: RoomPair
for num, (owner_id, room) in enumerate(private_rooms):
priv_rooms_descs.append(
disp_str("voicerooms_private_room").format(
num=num + 1, # So that it starts from 1
name=room.voice_channel.name,
owner_id=owner_id
)
)
rooms_desc += disp_str("voicerooms_private_list").format(
priv="\n".join(priv_rooms_descs)
)
await send_simple_embed(context, "voicerooms_list_rooms", rooms_desc)
# Wait for knocking
if private_rooms:
try:
message = await get_input(
self.bot,
context,
settings.short_timeout,
check=lambda txt: (
txt.isnumeric()
and 0 < int(txt) <= len(private_rooms)
)
)
owner_id, room = private_rooms[int(message.content) - 1]
# Knock confirm
await send_simple_embed(
context,
"voicerooms_knock_confirm",
owner_id
)
# Sending the knocking message
await send_message(
channel=room.text_channel,
text=disp_str("voicerooms_knock").format(
owner_id=owner_id,
mention=context.author.mention
)
)
except asyncio.exceptions.TimeoutError:
return
@voiceroom.command(name="forcetransfer", aliases=["ftransfer"])
@commands.has_guild_permissions(administrator=True)
async def force_transfer(
self,
context: Context,
old_owner: Member,
new_owner: Member
) -> None:
"""
Command to force a transfer form a member to another member.
:param context: Command context
:param old_owner: Old owner
:param new_owner: New owner
"""
if old_owner.id not in self.rooms:
raise OpheliaCommandError("voicerooms_transfer_bad_old_owner")
room: RoomPair = self.rooms[old_owner.id]
await self.transfer_room(room, old_owner, new_owner)
await send_simple_embed(
context,
"voicerooms_transfer",
new_owner.mention
)
@voiceroom.command(name="setup", aliases=["generator", "gen"])
@commands.has_guild_permissions(administrator=True)
async def create_generator(self, context: Context) -> None:
"""
Configure voice room generator.
:param context: Command context
"""
message = await send_simple_embed(
context,
"voicerooms_generator",
colour=Colour(settings.embed_color_important)
)
await response_config(
bot=self.bot,
context=context,
message=message,
config_items=VOICEROOMS_GENERATOR_CONFIG,
response_call=self.confirm_create_generator,
timeout_seconds=CONFIG_TIMEOUT_SECONDS,
timeout_exception=OpheliaCommandError("voicerooms_timeout"),
response_tries=CONFIG_MAX_TRIES
)
async def confirm_create_generator(
self,
context: Context,
config_vars: dict
) -> None:
"""
Confirm creation of generator.
:param context: Command context
:param config_vars: Configuration variables
"""
author = context.author
text_category = config_vars["text_category"]
voice_category = config_vars["voice_category"]
generator_channel: VoiceChannel = config_vars["generator_channel"]
log_channel = config_vars["log_channel"]
sample_voice_channel: VoiceChannel = config_vars["sample_voice_channel"]
sample_text_channel: TextChannel = config_vars["sample_text_channel"]
default_voice_perms = sample_voice_channel.overwrites
owner_voice_perms = sample_voice_channel.overwrites_for(author)
default_voice_perms.pop(author, None)
default_text_perms = sample_text_channel.overwrites
owner_text_perms = sample_voice_channel.overwrites_for(author)
default_text_perms.pop(author, None)
generator = Generator(
voice_category=voice_category,
text_category=text_category,
generator_channel=generator_channel,
default_voice_perms=default_voice_perms,
owner_voice_perms=owner_voice_perms,
default_text_perms=default_text_perms,
owner_text_perms=owner_text_perms,
log_channel=log_channel
)
self.generators[generator_channel.id] = generator
await send_simple_embed(
context,
"voicerooms_generator_success",
generator_channel.name
)
@voiceroom.command(name="updategen", aliases=["updategenerator", "ug"])
@VoiceroomsDecorators.pass_voiceroom
@commands.has_guild_permissions(administrator=True)
async def generator_update(
self,
context: Context,
*,
channel_id_str: str,
**kwargs
) -> None:
"""
Update generator permission settings.
:param context: Command context
:param channel_id_str: Generator ID as a string
"""
guild: Guild = context.guild
if not channel_id_str.isnumeric():
raise OpheliaCommandError("voicerooms_error_not_channel_id")
channel_id = int(channel_id_str)
channel = guild.get_channel(channel_id)
if channel_id not in self.generators or channel is None:
raise OpheliaCommandError("voicerooms_error_invalid_channel")
room: RoomPair = kwargs["room"]
author = context.author
prompt = await send_simple_embed(context.channel, "voicerooms_update")
await response_switch(
bot=self.bot,
context=context,
message=prompt,
options={
"y": self.confirm_update_generator(room, author, channel),
"n": self.cancel_update_generator
},
timeout_seconds=CONFIG_TIMEOUT_SECONDS,
timeout_exception=OpheliaCommandError("voicerooms_timeout")
)
def confirm_update_generator(
self,
room: RoomPair,
author: Member,
generator_channel: VoiceChannel
) -> Callable:
"""
Confirm generator update.
:param room: Room pair used as a template
:param author: Room owner
:param generator_channel: Generator to be updated
"""
async def func(context: Context) -> None:
"""Internal function."""
default_voice_perms = room.voice_channel.overwrites
owner_voice_perms = room.voice_channel.overwrites_for(author)
default_voice_perms.pop(author, None)
default_text_perms = room.text_channel.overwrites
owner_text_perms = room.text_channel.overwrites_for(author)
default_text_perms.pop(author, None)
self.generators[generator_channel.id].update_perms(
default_voice_perms=default_voice_perms,
owner_voice_perms=owner_voice_perms,
default_text_perms=default_text_perms,
owner_text_perms=owner_text_perms
)
await send_simple_embed(context, "voicerooms_update_confirm")
return func
@staticmethod
async def cancel_update_generator(context: Context) -> None:
"""
Cancel generator update.
:param context: Command context
:param config_vars: Ignored
"""
await send_simple_embed(context, "voicerooms_update_cancel")
@voiceroom.command(name="listgen", aliases=["listgenerators", "lg"])
@commands.has_guild_permissions(administrator=True)
async def generator_list(self, context: Context) -> None:
"""
List all generators in a server.
:param context: Command context
"""
guild: Guild = context.guild
string_builder = []
for channel_id in self.generators:
channel = guild.get_channel(channel_id)
if channel is not None:
string_builder.append(
disp_str("voicerooms_list_item").format(
channel.id,
channel.name
)
)
await send_message_embed(
channel=context,
title=disp_str("voicerooms_list_title"),
desc="\n".join(string_builder)
)
@voiceroom.command(name="listall", aliases=["la"])
@commands.is_owner()
async def generator_listall(self, context: Context) -> None:
"""
List all generators.
:param context: Command context
"""
string_builder = []
for channel_id in self.generators:
channel = self.bot.get_channel(channel_id)
name = (
disp_str("voicerooms_list_none")
if channel is None else channel.name
)
string_builder.append(
disp_str("voicerooms_list_item").format(channel_id, name)
)
await send_message_embed(
channel=context,
title=disp_str("voicerooms_listall_title"),
desc="\n".join(string_builder)
)
@voiceroom.command(name="admindelete", aliases=["admindel", "ad"])
@commands.has_guild_permissions(administrator=True)
async def generator_admindelete(
self,
context: Context,
*,
channel_id_str: str
) -> None:
"""
Delete a generator from a server.
:param context: Command context
:param channel_id_str: String of generator voice channel ID
"""
guild: Guild = context.guild
if not channel_id_str.isnumeric():
raise OpheliaCommandError("voicerooms_error_not_channel_id")
channel_id = int(channel_id_str)
channel = guild.get_channel(channel_id)
if channel_id not in self.generators or channel is None:
raise OpheliaCommandError("voicerooms_error_invalid_channel")
self.generators.pop(channel_id, None)
await self.save_generators()
await send_simple_embed(context, "voicerooms_delete_success")
@voiceroom.command(name="filter", aliases=["f"])
@commands.has_guild_permissions(administrator=True)
async def list_filters(
self,
context: Context,
*,
regex_str: Optional[str]
) -> None:
"""
Adds or removes a room name filter from a guild, or lists all
filters if no arguments are provided.
:param context: Command context
:param regex_str: Regex filter to add or remove
"""
if regex_str is None:
await send_simple_embed(
context,
"voicerooms_filter_list",
"\n".join(
f"`{filt}`" for filt
in await self.name_filter.list_filters(context.guild.id)
)
)
return
try:
added = await self.name_filter.add_filter(
context.guild.id,
regex_str
)
if added:
await send_simple_embed(
context,
"voicerooms_filter_added",
regex_str
)
else:
await send_simple_embed(
context,
"voicerooms_filter_deleted",
regex_str
)
except re.error as e:
raise OpheliaCommandError("voicerooms_filter_regex_error") from e
@voiceroom.command(name="forcedelete", aliases=["forcedel", "fd"])
@commands.is_owner()
async def generator_forcedelete(
self,
context: Context,
*,
channel_id_str: str
) -> None:
"""
Force delete a generator.
:param context: Command context
:param channel_id_str: String of generator voice channel ID
"""
channel_id = int(channel_id_str)
if channel_id not in self.generators:
raise OpheliaCommandError("voicerooms_error_invalid_channel")
self.generators.pop(channel_id, None)
await self.save_generators()
await send_simple_embed(context, "voicerooms_delete_success")
async def delete_room(self, owner_id: int) -> None:
"""
Delete room.
This method need not be used on bot shutdown since it would do
the necessary steps of cleaning up the dictionary mappings.
:param owner_id: Room owner
"""
room = self.rooms.pop(owner_id, None)
if room is None:
return
self.vc_room_map.pop(room.voice_channel.id, None)
self.text_room_map.pop(room.text_channel.id, None)
await room.destroy()
|
the-stack_106_24468 | """Support for SmartThings Cloud."""
import asyncio
import logging
from typing import Iterable
from aiohttp.client_exceptions import (
ClientConnectionError, ClientResponseError)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect, async_dispatcher_send)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .config_flow import SmartThingsFlowHandler # noqa
from .const import (
CONF_APP_ID, CONF_INSTALLED_APP_ID, DATA_BROKERS, DATA_MANAGER, DOMAIN,
EVENT_BUTTON, SIGNAL_SMARTTHINGS_UPDATE, SUPPORTED_PLATFORMS)
from .smartapp import (
setup_smartapp, setup_smartapp_endpoint, validate_installed_app)
REQUIREMENTS = ['pysmartapp==0.3.0', 'pysmartthings==0.6.1']
DEPENDENCIES = ['webhook']
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Initialize the SmartThings platform."""
await setup_smartapp_endpoint(hass)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Initialize config entry which represents an installed SmartApp."""
from pysmartthings import SmartThings
if not hass.config.api.base_url.lower().startswith('https://'):
_LOGGER.warning("The 'base_url' of the 'http' component must be "
"configured and start with 'https://'")
return False
api = SmartThings(async_get_clientsession(hass),
entry.data[CONF_ACCESS_TOKEN])
remove_entry = False
try:
# See if the app is already setup. This occurs when there are
# installs in multiple SmartThings locations (valid use-case)
manager = hass.data[DOMAIN][DATA_MANAGER]
smart_app = manager.smartapps.get(entry.data[CONF_APP_ID])
if not smart_app:
# Validate and setup the app.
app = await api.app(entry.data[CONF_APP_ID])
smart_app = setup_smartapp(hass, app)
# Validate and retrieve the installed app.
installed_app = await validate_installed_app(
api, entry.data[CONF_INSTALLED_APP_ID])
# Get devices and their current status
devices = await api.devices(
location_ids=[installed_app.location_id])
async def retrieve_device_status(device):
try:
await device.status.refresh()
except ClientResponseError:
_LOGGER.debug("Unable to update status for device: %s (%s), "
"the device will be ignored",
device.label, device.device_id, exc_info=True)
devices.remove(device)
await asyncio.gather(*[retrieve_device_status(d)
for d in devices.copy()])
# Setup device broker
broker = DeviceBroker(hass, devices,
installed_app.installed_app_id)
broker.event_handler_disconnect = \
smart_app.connect_event(broker.event_handler)
hass.data[DOMAIN][DATA_BROKERS][entry.entry_id] = broker
except ClientResponseError as ex:
if ex.status in (401, 403):
_LOGGER.exception("Unable to setup config entry '%s' - please "
"reconfigure the integration", entry.title)
remove_entry = True
else:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady
except (ClientConnectionError, RuntimeWarning) as ex:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady
if remove_entry:
hass.async_create_task(
hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
if not [flow for flow in flows if flow['handler'] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={'source': 'import'}))
return False
for component in SUPPORTED_PLATFORMS:
hass.async_create_task(hass.config_entries.async_forward_entry_setup(
entry, component))
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS].pop(entry.entry_id, None)
if broker and broker.event_handler_disconnect:
broker.event_handler_disconnect()
tasks = [hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS]
return all(await asyncio.gather(*tasks))
class DeviceBroker:
"""Manages an individual SmartThings config entry."""
def __init__(self, hass: HomeAssistantType, devices: Iterable,
installed_app_id: str):
"""Create a new instance of the DeviceBroker."""
self._hass = hass
self._installed_app_id = installed_app_id
self.devices = {device.device_id: device for device in devices}
self.event_handler_disconnect = None
async def event_handler(self, req, resp, app):
"""Broker for incoming events."""
from pysmartapp.event import EVENT_TYPE_DEVICE
from pysmartthings import Capability, Attribute
# Do not process events received from a different installed app
# under the same parent SmartApp (valid use-scenario)
if req.installed_app_id != self._installed_app_id:
return
updated_devices = set()
for evt in req.events:
if evt.event_type != EVENT_TYPE_DEVICE:
continue
device = self.devices.get(evt.device_id)
if not device:
continue
device.status.apply_attribute_update(
evt.component_id, evt.capability, evt.attribute, evt.value)
# Fire events for buttons
if evt.capability == Capability.button and \
evt.attribute == Attribute.button:
data = {
'component_id': evt.component_id,
'device_id': evt.device_id,
'location_id': evt.location_id,
'value': evt.value,
'name': device.label
}
self._hass.bus.async_fire(EVENT_BUTTON, data)
_LOGGER.debug("Fired button event: %s", data)
updated_devices.add(device.device_id)
_LOGGER.debug("Update received with %s events and updated %s devices",
len(req.events), len(updated_devices))
async_dispatcher_send(self._hass, SIGNAL_SMARTTHINGS_UPDATE,
updated_devices)
class SmartThingsEntity(Entity):
"""Defines a SmartThings entity."""
def __init__(self, device):
"""Initialize the instance."""
self._device = device
self._dispatcher_remove = None
async def async_added_to_hass(self):
"""Device added to hass."""
async def async_update_state(devices):
"""Update device state."""
if self._device.device_id in devices:
await self.async_update_ha_state(True)
self._dispatcher_remove = async_dispatcher_connect(
self.hass, SIGNAL_SMARTTHINGS_UPDATE, async_update_state)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect the device when removed."""
if self._dispatcher_remove:
self._dispatcher_remove()
@property
def device_info(self):
"""Get attributes about the device."""
return {
'identifiers': {
(DOMAIN, self._device.device_id)
},
'name': self._device.label,
'model': self._device.device_type_name,
'manufacturer': 'Unavailable'
}
@property
def name(self) -> str:
"""Return the name of the device."""
return self._device.label
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._device.device_id
|
the-stack_106_24469 | import abc
from itertools import chain
from pathlib import Path
from typing import Set, List, Dict, Iterator, Tuple, Any, Union, Type, Optional, Callable
from dbt.dataclass_schema import StrEnum
from .graph import UniqueId
from dbt.contracts.graph.compiled import (
CompiledSingularTestNode,
CompiledGenericTestNode,
CompileResultNode,
ManifestNode,
)
from dbt.contracts.graph.manifest import Manifest, WritableManifest
from dbt.contracts.graph.parsed import (
HasTestMetadata,
ParsedSingularTestNode,
ParsedExposure,
ParsedMetric,
ParsedGenericTestNode,
ParsedSourceDefinition,
)
from dbt.contracts.state import PreviousState
from dbt.exceptions import (
InternalException,
RuntimeException,
)
from dbt.node_types import NodeType
SELECTOR_GLOB = "*"
SELECTOR_DELIMITER = ":"
class MethodName(StrEnum):
FQN = "fqn"
Tag = "tag"
Source = "source"
Path = "path"
Package = "package"
Config = "config"
TestName = "test_name"
TestType = "test_type"
ResourceType = "resource_type"
State = "state"
Exposure = "exposure"
Metric = "metric"
Result = "result"
SourceStatus = "source_status"
def is_selected_node(fqn: List[str], node_selector: str):
# If qualified_name exactly matches model name (fqn's leaf), return True
if fqn[-1] == node_selector:
return True
# Flatten node parts. Dots in model names act as namespace separators
flat_fqn = [item for segment in fqn for item in segment.split(".")]
# Selector components cannot be more than fqn's
if len(flat_fqn) < len(node_selector.split(".")):
return False
for i, selector_part in enumerate(node_selector.split(".")):
# if we hit a GLOB, then this node is selected
if selector_part == SELECTOR_GLOB:
return True
elif flat_fqn[i] == selector_part:
continue
else:
return False
# if we get all the way down here, then the node is a match
return True
SelectorTarget = Union[ParsedSourceDefinition, ManifestNode, ParsedExposure, ParsedMetric]
class SelectorMethod(metaclass=abc.ABCMeta):
def __init__(
self, manifest: Manifest, previous_state: Optional[PreviousState], arguments: List[str]
):
self.manifest: Manifest = manifest
self.previous_state = previous_state
self.arguments: List[str] = arguments
def parsed_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, ManifestNode]]:
for key, node in self.manifest.nodes.items():
unique_id = UniqueId(key)
if unique_id not in included_nodes:
continue
yield unique_id, node
def source_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, ParsedSourceDefinition]]:
for key, source in self.manifest.sources.items():
unique_id = UniqueId(key)
if unique_id not in included_nodes:
continue
yield unique_id, source
def exposure_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, ParsedExposure]]:
for key, exposure in self.manifest.exposures.items():
unique_id = UniqueId(key)
if unique_id not in included_nodes:
continue
yield unique_id, exposure
def metric_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, ParsedMetric]]:
for key, metric in self.manifest.metrics.items():
unique_id = UniqueId(key)
if unique_id not in included_nodes:
continue
yield unique_id, metric
def all_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, SelectorTarget]]:
yield from chain(
self.parsed_nodes(included_nodes),
self.source_nodes(included_nodes),
self.exposure_nodes(included_nodes),
self.metric_nodes(included_nodes),
)
def configurable_nodes(
self, included_nodes: Set[UniqueId]
) -> Iterator[Tuple[UniqueId, CompileResultNode]]:
yield from chain(self.parsed_nodes(included_nodes), self.source_nodes(included_nodes))
def non_source_nodes(
self,
included_nodes: Set[UniqueId],
) -> Iterator[Tuple[UniqueId, Union[ParsedExposure, ManifestNode, ParsedMetric]]]:
yield from chain(
self.parsed_nodes(included_nodes),
self.exposure_nodes(included_nodes),
self.metric_nodes(included_nodes),
)
@abc.abstractmethod
def search(
self,
included_nodes: Set[UniqueId],
selector: str,
) -> Iterator[UniqueId]:
raise NotImplementedError("subclasses should implement this")
class QualifiedNameSelectorMethod(SelectorMethod):
def node_is_match(self, qualified_name: str, fqn: List[str]) -> bool:
"""Determine if a qualified name matches an fqn for all package
names in the graph.
:param str qualified_name: The qualified name to match the nodes with
:param List[str] fqn: The node's fully qualified name in the graph.
"""
unscoped_fqn = fqn[1:]
if is_selected_node(fqn, qualified_name):
return True
# Match nodes across different packages
elif is_selected_node(unscoped_fqn, qualified_name):
return True
return False
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
"""Yield all nodes in the graph that match the selector.
:param str selector: The selector or node name
"""
parsed_nodes = list(self.parsed_nodes(included_nodes))
for node, real_node in parsed_nodes:
if self.node_is_match(selector, real_node.fqn):
yield node
class TagSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
"""yields nodes from included that have the specified tag"""
for node, real_node in self.all_nodes(included_nodes):
if selector in real_node.tags:
yield node
class SourceSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
"""yields nodes from included are the specified source."""
parts = selector.split(".")
target_package = SELECTOR_GLOB
if len(parts) == 1:
target_source, target_table = parts[0], None
elif len(parts) == 2:
target_source, target_table = parts
elif len(parts) == 3:
target_package, target_source, target_table = parts
else: # len(parts) > 3 or len(parts) == 0
msg = (
'Invalid source selector value "{}". Sources must be of the '
"form `${{source_name}}`, "
"`${{source_name}}.${{target_name}}`, or "
"`${{package_name}}.${{source_name}}.${{target_name}}"
).format(selector)
raise RuntimeException(msg)
for node, real_node in self.source_nodes(included_nodes):
if target_package not in (real_node.package_name, SELECTOR_GLOB):
continue
if target_source not in (real_node.source_name, SELECTOR_GLOB):
continue
if target_table not in (None, real_node.name, SELECTOR_GLOB):
continue
yield node
class ExposureSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
parts = selector.split(".")
target_package = SELECTOR_GLOB
if len(parts) == 1:
target_name = parts[0]
elif len(parts) == 2:
target_package, target_name = parts
else:
msg = (
'Invalid exposure selector value "{}". Exposures must be of '
"the form ${{exposure_name}} or "
"${{exposure_package.exposure_name}}"
).format(selector)
raise RuntimeException(msg)
for node, real_node in self.exposure_nodes(included_nodes):
if target_package not in (real_node.package_name, SELECTOR_GLOB):
continue
if target_name not in (real_node.name, SELECTOR_GLOB):
continue
yield node
class MetricSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
parts = selector.split(".")
target_package = SELECTOR_GLOB
if len(parts) == 1:
target_name = parts[0]
elif len(parts) == 2:
target_package, target_name = parts
else:
msg = (
'Invalid metric selector value "{}". Metrics must be of '
"the form ${{metric_name}} or "
"${{metric_package.metric_name}}"
).format(selector)
raise RuntimeException(msg)
for node, real_node in self.metric_nodes(included_nodes):
if target_package not in (real_node.package_name, SELECTOR_GLOB):
continue
if target_name not in (real_node.name, SELECTOR_GLOB):
continue
yield node
class PathSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
"""Yields nodes from inclucded that match the given path."""
# use '.' and not 'root' for easy comparison
root = Path.cwd()
paths = set(p.relative_to(root) for p in root.glob(selector))
for node, real_node in self.all_nodes(included_nodes):
if Path(real_node.root_path) != root:
continue
ofp = Path(real_node.original_file_path)
if ofp in paths:
yield node
elif any(parent in paths for parent in ofp.parents):
yield node
class PackageSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
"""Yields nodes from included that have the specified package"""
for node, real_node in self.all_nodes(included_nodes):
if real_node.package_name == selector:
yield node
def _getattr_descend(obj: Any, attrs: List[str]) -> Any:
value = obj
for attr in attrs:
try:
value = getattr(value, attr)
except AttributeError:
# if it implements getitem (dict, list, ...), use that. On failure,
# raise an attribute error instead of the KeyError, TypeError, etc.
# that arbitrary getitem calls might raise
try:
value = value[attr]
except Exception as exc:
raise AttributeError(f"'{type(value)}' object has no attribute '{attr}'") from exc
return value
class CaseInsensitive(str):
def __eq__(self, other):
if isinstance(other, str):
return self.upper() == other.upper()
else:
return self.upper() == other
class ConfigSelectorMethod(SelectorMethod):
def search(
self,
included_nodes: Set[UniqueId],
selector: Any,
) -> Iterator[UniqueId]:
parts = self.arguments
# special case: if the user wanted to compare test severity,
# make the comparison case-insensitive
if parts == ["severity"]:
selector = CaseInsensitive(selector)
# search sources is kind of useless now source configs only have
# 'enabled', which you can't really filter on anyway, but maybe we'll
# add more someday, so search them anyway.
for node, real_node in self.configurable_nodes(included_nodes):
try:
value = _getattr_descend(real_node.config, parts)
except AttributeError:
continue
else:
if selector == value:
yield node
class ResourceTypeSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
try:
resource_type = NodeType(selector)
except ValueError as exc:
raise RuntimeException(f'Invalid resource_type selector "{selector}"') from exc
for node, real_node in self.parsed_nodes(included_nodes):
if real_node.resource_type == resource_type:
yield node
class TestNameSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
for node, real_node in self.parsed_nodes(included_nodes):
if isinstance(real_node, HasTestMetadata):
if real_node.test_metadata.name == selector:
yield node
class TestTypeSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
search_types: Tuple[Type, ...]
# continue supporting 'schema' + 'data' for backwards compatibility
if selector in ("generic", "schema"):
search_types = (ParsedGenericTestNode, CompiledGenericTestNode)
elif selector in ("singular", "data"):
search_types = (ParsedSingularTestNode, CompiledSingularTestNode)
else:
raise RuntimeException(
f'Invalid test type selector {selector}: expected "generic" or ' '"singular"'
)
for node, real_node in self.parsed_nodes(included_nodes):
if isinstance(real_node, search_types):
yield node
class StateSelectorMethod(SelectorMethod):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.modified_macros: Optional[List[str]] = None
def _macros_modified(self) -> List[str]:
# we checked in the caller!
if self.previous_state is None or self.previous_state.manifest is None:
raise InternalException("No comparison manifest in _macros_modified")
old_macros = self.previous_state.manifest.macros
new_macros = self.manifest.macros
modified = []
for uid, macro in new_macros.items():
if uid in old_macros:
old_macro = old_macros[uid]
if macro.macro_sql != old_macro.macro_sql:
modified.append(uid)
else:
modified.append(uid)
for uid, macro in old_macros.items():
if uid not in new_macros:
modified.append(uid)
return modified
def recursively_check_macros_modified(self, node, visited_macros):
# loop through all macros that this node depends on
for macro_uid in node.depends_on.macros:
# avoid infinite recursion if we've already seen this macro
if macro_uid in visited_macros:
continue
visited_macros.append(macro_uid)
# is this macro one of the modified macros?
if macro_uid in self.modified_macros:
return True
# if not, and this macro depends on other macros, keep looping
macro_node = self.manifest.macros[macro_uid]
if len(macro_node.depends_on.macros) > 0:
return self.recursively_check_macros_modified(macro_node, visited_macros)
# this macro hasn't been modified, but we haven't checked
# the other macros the node depends on, so keep looking
elif len(node.depends_on.macros) > len(visited_macros):
continue
else:
return False
def check_macros_modified(self, node):
# check if there are any changes in macros the first time
if self.modified_macros is None:
self.modified_macros = self._macros_modified()
# no macros have been modified, skip looping entirely
if not self.modified_macros:
return False
# recursively loop through upstream macros to see if any is modified
else:
visited_macros = []
return self.recursively_check_macros_modified(node, visited_macros)
# TODO check modifed_content and check_modified macro seems a bit redundent
def check_modified_content(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
different_contents = not new.same_contents(old) # type: ignore
upstream_macro_change = self.check_macros_modified(new)
return different_contents or upstream_macro_change
def check_modified_macros(self, _, new: SelectorTarget) -> bool:
return self.check_macros_modified(new)
@staticmethod
def check_modified_factory(
compare_method: str,
) -> Callable[[Optional[SelectorTarget], SelectorTarget], bool]:
# get a function that compares two selector target based on compare method provided
def check_modified_things(old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
if hasattr(new, compare_method):
# when old body does not exist or old and new are not the same
return not old or not getattr(new, compare_method)(old) # type: ignore
else:
return False
return check_modified_things
def check_new(self, old: Optional[SelectorTarget], new: SelectorTarget) -> bool:
return old is None
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
if self.previous_state is None or self.previous_state.manifest is None:
raise RuntimeException("Got a state selector method, but no comparison manifest")
state_checks = {
# it's new if there is no old version
"new": lambda old, _: old is None,
# use methods defined above to compare properties of old + new
"modified": self.check_modified_content,
"modified.body": self.check_modified_factory("same_body"),
"modified.configs": self.check_modified_factory("same_config"),
"modified.persisted_descriptions": self.check_modified_factory(
"same_persisted_description"
),
"modified.relation": self.check_modified_factory("same_database_representation"),
"modified.macros": self.check_modified_macros,
}
if selector in state_checks:
checker = state_checks[selector]
else:
raise RuntimeException(
f'Got an invalid selector "{selector}", expected one of ' f'"{list(state_checks)}"'
)
manifest: WritableManifest = self.previous_state.manifest
for node, real_node in self.all_nodes(included_nodes):
previous_node: Optional[SelectorTarget] = None
if node in manifest.nodes:
previous_node = manifest.nodes[node]
elif node in manifest.sources:
previous_node = manifest.sources[node]
elif node in manifest.exposures:
previous_node = manifest.exposures[node]
elif node in manifest.metrics:
previous_node = manifest.metrics[node]
if checker(previous_node, real_node):
yield node
class ResultSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
if self.previous_state is None or self.previous_state.results is None:
raise InternalException("No comparison run_results")
matches = set(
result.unique_id for result in self.previous_state.results if result.status == selector
)
for node, real_node in self.all_nodes(included_nodes):
if node in matches:
yield node
class SourceStatusSelectorMethod(SelectorMethod):
def search(self, included_nodes: Set[UniqueId], selector: str) -> Iterator[UniqueId]:
if self.previous_state is None or self.previous_state.sources is None:
raise InternalException(
"No previous state comparison freshness results in sources.json"
)
elif self.previous_state.sources_current is None:
raise InternalException(
"No current state comparison freshness results in sources.json"
)
current_state_sources = {
result.unique_id: getattr(result, "max_loaded_at", None)
for result in self.previous_state.sources_current.results
if hasattr(result, "max_loaded_at")
}
current_state_sources_runtime_error = {
result.unique_id
for result in self.previous_state.sources_current.results
if not hasattr(result, "max_loaded_at")
}
previous_state_sources = {
result.unique_id: getattr(result, "max_loaded_at", None)
for result in self.previous_state.sources.results
if hasattr(result, "max_loaded_at")
}
previous_state_sources_runtime_error = {
result.unique_id
for result in self.previous_state.sources_current.results
if not hasattr(result, "max_loaded_at")
}
matches = set()
if selector == "fresher":
for unique_id in current_state_sources:
if unique_id not in previous_state_sources:
matches.add(unique_id)
elif current_state_sources[unique_id] > previous_state_sources[unique_id]:
matches.add(unique_id)
for unique_id in matches:
if (
unique_id in previous_state_sources_runtime_error
or unique_id in current_state_sources_runtime_error
):
matches.remove(unique_id)
for node, real_node in self.all_nodes(included_nodes):
if node in matches:
yield node
class MethodManager:
SELECTOR_METHODS: Dict[MethodName, Type[SelectorMethod]] = {
MethodName.FQN: QualifiedNameSelectorMethod,
MethodName.Tag: TagSelectorMethod,
MethodName.Source: SourceSelectorMethod,
MethodName.Path: PathSelectorMethod,
MethodName.Package: PackageSelectorMethod,
MethodName.Config: ConfigSelectorMethod,
MethodName.TestName: TestNameSelectorMethod,
MethodName.TestType: TestTypeSelectorMethod,
MethodName.ResourceType: ResourceTypeSelectorMethod,
MethodName.State: StateSelectorMethod,
MethodName.Exposure: ExposureSelectorMethod,
MethodName.Metric: MetricSelectorMethod,
MethodName.Result: ResultSelectorMethod,
MethodName.SourceStatus: SourceStatusSelectorMethod,
}
def __init__(
self,
manifest: Manifest,
previous_state: Optional[PreviousState],
):
self.manifest = manifest
self.previous_state = previous_state
def get_method(self, method: MethodName, method_arguments: List[str]) -> SelectorMethod:
if method not in self.SELECTOR_METHODS:
raise InternalException(
f'Method name "{method}" is a valid node selection '
f"method name, but it is not handled"
)
cls: Type[SelectorMethod] = self.SELECTOR_METHODS[method]
return cls(self.manifest, self.previous_state, method_arguments)
|
the-stack_106_24471 | import os
import logging
import shutil
import tempfile
import re
from pathlib import Path
from ocs_ci.helpers.helpers import storagecluster_independent_check
from ocs_ci.ocs.resources.pod import get_all_pods
from ocs_ci.ocs.utils import collect_ocs_logs
from ocs_ci.ocs.must_gather.const_must_gather import GATHER_COMMANDS_VERSION
from ocs_ci.ocs.ocp import get_ocs_parsed_version
logger = logging.getLogger(__name__)
class MustGather(object):
"""
MustGather Class
"""
def __init__(self):
self.type_log = None
self.root = None
self.files_path = dict()
self.empty_files = list()
self.files_not_exist = list()
self.files_content_issue = list()
@property
def log_type(self):
return self.type_log
@log_type.setter
def log_type(self, type_log):
if not isinstance(type_log, str):
raise ValueError("log type arg must be a string")
self.type_log = type_log
def collect_must_gather(self):
"""
Collect ocs_must_gather and copy the logs to a temporary folder.
"""
temp_folder = tempfile.mkdtemp()
collect_ocs_logs(dir_name=temp_folder, ocp=False)
self.root = temp_folder + "_ocs_logs"
def search_file_path(self):
"""
Search File Path
"""
version = get_ocs_parsed_version()
if self.type_log == "OTHERS" and storagecluster_independent_check():
files = GATHER_COMMANDS_VERSION[version]["OTHERS_EXTERNAL"]
else:
files = GATHER_COMMANDS_VERSION[version][self.type_log]
for file in files:
self.files_not_exist.append(file)
for dir_name, subdir_list, files_list in os.walk(self.root):
if file in files_list:
self.files_path[file] = os.path.join(dir_name, file)
self.files_not_exist.remove(file)
break
def validate_file_size(self):
"""
Validate the file is not empty
"""
for path, subdirs, files in os.walk(self.root):
for file in files:
file_path = os.path.join(path, file)
if Path(file_path).stat().st_size == 0:
logger.error(f"log file {file} empty!")
self.empty_files.append(file)
def validate_expected_files(self):
"""
Make sure all the relevant files exist
"""
self.search_file_path()
self.verify_noobaa_diagnostics()
for file, file_path in self.files_path.items():
if not Path(file_path).is_file():
self.files_not_exist.append(file)
elif Path(file_path).stat().st_size == 0:
self.empty_files.append(file)
elif re.search(r"\.yaml$", file):
with open(file_path, "r") as f:
if "kind" not in f.read().lower():
self.files_content_issue.append(file)
def compare_running_pods(self):
"""
Compare running pods list to "/pods" subdirectories
"""
if self.type_log != "OTHERS":
return
must_gather_helper = re.compile(r"must-gather-.*.-helper")
pod_objs = get_all_pods(namespace="openshift-storage")
pod_names = []
for pod in pod_objs:
if not must_gather_helper.match(pod.name):
pod_names.append(pod.name)
for dir_name, subdir_list, files_list in os.walk(self.root):
if re.search("openshift-storage/pods$", dir_name):
pod_path = dir_name
pod_files = []
for pod_file in os.listdir(pod_path):
if (
not must_gather_helper.match(pod_file)
and (re.match(r"compute-*", pod_file) is None)
and (re.match(r"ip-*", pod_file) is None)
):
pod_files.append(pod_file)
assert set(sorted(pod_files)) == set(sorted(pod_names)), (
f"List of openshift-storage pods are not equal to list of logs "
f"directories list of pods: {pod_names} list of log directories: {pod_files}"
)
def print_invalid_files(self):
"""
Print Invalid Files
"""
if any([self.empty_files, self.files_not_exist, self.files_content_issue]):
error = (
f"Files don't exist:\n{self.files_not_exist}\n"
f"Empty files:\n{self.empty_files}\n"
f"Content issues:\n{self.files_content_issue}"
)
self.empty_files = list()
self.files_not_exist = list()
self.files_content_issue = list()
raise Exception(error)
def verify_noobaa_diagnostics(self):
"""
Verify noobaa_diagnostics folder exist
"""
if self.type_log == "OTHERS" and get_ocs_parsed_version() >= 4.6:
flag = False
logger.info("Verify noobaa_diagnostics folder exist")
for path, subdirs, files in os.walk(self.root):
for file in files:
if re.search(r"noobaa_diagnostics_.*.tar.gz", file):
flag = True
if not flag:
logger.error("noobaa_diagnostics.tar.gz does not exist")
self.files_not_exist.append("noobaa_diagnostics.tar.gz")
def validate_must_gather(self):
"""
Validate must_gather
"""
self.validate_file_size()
self.validate_expected_files()
self.print_invalid_files()
self.compare_running_pods()
def cleanup(self):
"""
Delete temporary folder.
"""
logger.info(f"Delete must gather folder {self.root}")
if re.search("_ocs_logs$", self.root):
shutil.rmtree(path=self.root, ignore_errors=False, onerror=None)
|
the-stack_106_24472 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from rasa_core import restore
def test_restoring_tracker(trained_moodbot_path, recwarn):
tracker_dump = "data/test_trackers/tracker_moodbot.json"
agent, tracker = restore.recreate_agent(trained_moodbot_path,
tracker_dump=tracker_dump)
# makes sure there are no warnings. warnings are raised, if the models
# predictions differ from the tracker when the dumped tracker is replayed
assert [e
for e in recwarn
if e._category_name == "UserWarning"] == []
assert len(tracker.events) == 7
assert tracker.latest_action_name == "action_listen"
assert not tracker.is_paused()
assert tracker.sender_id == "mysender"
assert tracker.events[-1].timestamp == 1517821726.211042
|
the-stack_106_24474 | from bc.data import Table, Prototype, BytecodeDump, Instruction, InsType, Const
from bc.stream import Stream
class DumpWriter(object):
def __init__(self, dump: BytecodeDump, filename, encoding):
self.dump = dump
self.encoding = encoding
self.filename = filename
self.stream: Stream = None
self.prototype_datas = {}
def write(self):
self.stream = Stream.open(self.filename, 'wb')
# self.stream = Stream.open(BytesIO())
self._write_header()
self._write_prototypes()
self.stream.close()
def _write_header(self):
self.stream.write_bytes(Const.MAGIC)
self.stream.write_byte(self.dump.version)
self.stream.write_uleb128(self.dump.is_big_endian | self.dump.is_stripped | self.dump.has_ffi)
if not self.dump.is_stripped:
name = self.dump.name.encode(self.encoding)
self.stream.write_uleb128(len(name))
self.stream.write_bytes(name)
self.stream.byteorder = 'big' if self.dump.is_big_endian else 'little'
def _write_prototypes(self):
for prototype in self._sorted_prototypes():
self._write_prototype(prototype)
# end of prototypes
self.stream.write_uleb128(0)
def _write_prototype(self, prototype: Prototype):
self.stream.write_byte(prototype.has_ffi | prototype.has_iloop | prototype.is_jit_disabled | prototype.has_sub_prototypes | prototype.is_variadic)
self._write_counts(prototype)
self._write_instructions(prototype)
self._write_upvalues(prototype)
self._write_constants(prototype)
self._write_numerics(prototype)
self._write_debug_info(prototype)
def _write_counts(self, prototype: Prototype):
self.stream.write_byte(len(prototype.argument_count))
self.stream.write_byte(prototype.frame_size)
self.stream.write_byte(len(prototype.upvalues))
self.stream.write_uleb128(len(prototype.constants))
self.stream.write_uleb128(len(prototype.numerics))
self.stream.write_uleb128(len(prototype.instructions) - 1)
if prototype.debug_info_size > 0:
self.stream.write_uleb128(prototype.debug_info_size)
self.stream.write_uleb128(prototype.first_line_number)
self.stream.write_uleb128(prototype.line_count)
def _write_instructions(self, prototype: Prototype):
for ins in prototype.instructions[1:]: # ignore head
self._write_instruction(prototype, ins)
def _write_instruction(self, prototype: Prototype, ins: Instruction):
a, b, cd = 0, 0, 0
arg_count = 0
if ins.A_TYPE is not None:
a = self._process_operand(prototype, ins.A_TYPE, ins.a)
arg_count += 1
if ins.B_TYPE is not None:
b = self._process_operand(prototype, ins.B_TYPE, ins.b)
arg_count += 1
if ins.CD_TYPE is not None:
cd = self._process_operand(prototype, ins.CD_TYPE, ins.cd)
arg_count += 1
if arg_count == 3:
codeword = ins.OPCODE | (a << 8) | (b << 24) | (cd << 16)
else:
codeword = ins.OPCODE | (a << 8) | (cd << 16)
self.stream.write_uint(codeword)
def _process_operand(self, prototype: Prototype, operand_type, operand):
if operand_type in (InsType.STR, InsType.TAB, InsType.FUN, InsType.CDT):
return len(prototype.constants) - operand - 1
elif operand_type == InsType.JMP:
return operand + 0x8000
elif operand_type == InsType.NUM:
return operand
else:
return operand
def _write_upvalues(self, prototype: Prototype):
for uv in prototype.upvalues:
self.stream.write_uint(uv, 2)
def _write_constants(self, prototype: Prototype):
for c in prototype.constants:
ref = c.ref
if isinstance(ref, str):
ref = ref.encode(self.encoding)
self.stream.write_uleb128(len(ref) + Const.BCDUMP_KGC_STR)
self.stream.write_bytes(ref)
elif isinstance(ref, Table):
self.stream.write_uleb128(Const.BCDUMP_KGC_TAB)
self._write_table(ref)
elif isinstance(ref, Prototype):
self.stream.write_uleb128(Const.BCDUMP_KGC_CHILD)
# elif isinstance(ref, tuple):
# stream.write_uleb128(Const.BCDUMP_KGC_COMPLEX)
# stream.write_float(ref[0])
# stream.write_float(ref[1])
# elif isinstance(ref, tuple):
# stream.write_uleb128(Const.BCDUMP_KGC_I64)
# stream.write_float(ref)
def _write_numerics(self, prototype: Prototype):
for n in prototype.numerics:
self.stream.write_uleb128_33(n)
def _write_table(self, table: Table):
self.stream.write_uleb128(len(table.array))
self.stream.write_uleb128(len(table.dictionary))
for item in table.array:
self._write_table_item(item)
for item in table.dictionary:
self._write_table_item(item[0])
self._write_table_item(item[1])
return table
def _write_table_item(self, value):
if value is True:
self.stream.write_uleb128(Const.BCDUMP_KTAB_TRUE)
elif value is False:
self.stream.write_uleb128(Const.BCDUMP_KTAB_FALSE)
elif value is None:
self.stream.write_uleb128(Const.BCDUMP_KTAB_NIL)
elif isinstance(value, str):
value = value.encode(self.encoding)
self.stream.write_uleb128(len(value) + Const.BCDUMP_KTAB_STR)
self.stream.write_bytes(value)
elif isinstance(value, int):
self.stream.write_uleb128(Const.BCDUMP_KTAB_INT)
self.stream.write_signed_int(value)
elif isinstance(value, float):
self.stream.write_uleb128(Const.BCDUMP_KTAB_NUM)
self.stream.write_float(value)
else:
print(type(value))
def _write_debug_info(self, prototype: Prototype):
if prototype.debug_info:
self._write_line_info(prototype)
self._write_upvalue_names(prototype)
self._write_variable_info(prototype)
def _write_line_info(self, prototype: Prototype):
if prototype.line_count >= 65536:
line_info_size = 4
elif prototype.line_count >= 256:
line_info_size = 2
else:
line_info_size = 1
for v in prototype.debug_info.addr_to_line_map[1:]:
self.stream.write_uint(v - prototype.first_line_number, line_info_size)
def _write_upvalue_names(self, prototype: Prototype):
for v in prototype.debug_info.upvalue_variable_names:
self.stream.write_zstring(v.encode(self.encoding))
def _write_variable_info(self, prototype: Prototype):
last_addr = 0
for info in prototype.debug_info.variable_infos:
if info.type == info.T_VISIBLE:
self.stream.write_zstring(info.name.encode(self.encoding))
else:
self.stream.write_byte(Const.INTERNAL_VARNAMES.index(info.name))
self.stream.write_uleb128(info.start_addr - last_addr)
self.stream.write_uleb128(info.end_addr - info.start_addr)
last_addr = info.start_addr
self.stream.write_byte(Const.VARNAME_END)
def _sorted_prototypes(self):
def get_prototypes(pt: Prototype):
children = []
for c in pt.constants:
if isinstance(c.ref, Prototype):
children = get_prototypes(c.ref) + children
return children + [pt]
prototypes = []
for prototype in self.dump.prototypes:
prototypes.extend(get_prototypes(prototype))
return prototypes
|
the-stack_106_24476 | # Globales
import tensorflow as tf
AUTOTUNE = tf.data.AUTOTUNE
BATCH_SIZE = 32
BUFFER_SIZE = 20000
EMBEDDING_DIM = 512
MAX_SEQ_LENGTH = 200
INPUT_VOCAB_SIZE = 8500
TARGET_VOCAB_SIZE = 8000
EPS_LAYERNORM = 1e-6 |
the-stack_106_24477 | """
=============================================================================
t-SNE: The effect of various perplexity values on the shape
=============================================================================
An illustration of t-SNE on the two concentric circles and the S-curve
datasets for different perplexity values.
We observe a tendency towards clearer shapes as the perplexity value increases.
The size, the distance and the shape of clusters may vary upon initialization,
perplexity values and does not always convey a meaning.
As shown below, t-SNE for higher perplexities finds meaningful topology of
two concentric circles, however the size and the distance of the circles varies
slightly from the original. Contrary to the two circles dataset, the shapes
visually diverge from S-curve topology on the S-curve dataset even for
larger perplexity values.
For further details, "How to Use t-SNE Effectively"
https://distill.pub/2016/misread-tsne/ provides a good discussion of the
effects of various parameters, as well as interactive plots to explore
those effects.
"""
# Author: Narine Kokhlikyan <[email protected]>
# License: BSD
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
from time import time
n_samples = 300
n_components = 2
(fig, subplots) = plt.subplots(3, 5, figsize=(15, 8))
perplexities = [5, 30, 50, 100]
X, y = datasets.make_circles(n_samples=n_samples, factor=0.5, noise=0.05)
red = y == 0
green = y == 1
ax = subplots[0][0]
ax.scatter(X[red, 0], X[red, 1], c="r")
ax.scatter(X[green, 0], X[green, 1], c="g")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis("tight")
for i, perplexity in enumerate(perplexities):
ax = subplots[0][i + 1]
t0 = time()
tsne = manifold.TSNE(
n_components=n_components, init="random", random_state=0, perplexity=perplexity
)
Y = tsne.fit_transform(X)
t1 = time()
print("circles, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[red, 0], Y[red, 1], c="r")
ax.scatter(Y[green, 0], Y[green, 1], c="g")
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis("tight")
# Another example using s-curve
X, color = datasets.make_s_curve(n_samples, random_state=0)
ax = subplots[1][0]
ax.scatter(X[:, 0], X[:, 2], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
for i, perplexity in enumerate(perplexities):
ax = subplots[1][i + 1]
t0 = time()
tsne = manifold.TSNE(
n_components=n_components, init="random", random_state=0, perplexity=perplexity
)
Y = tsne.fit_transform(X)
t1 = time()
print("S-curve, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[:, 0], Y[:, 1], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis("tight")
# Another example using a 2D uniform grid
x = np.linspace(0, 1, int(np.sqrt(n_samples)))
xx, yy = np.meshgrid(x, x)
X = np.hstack(
[
xx.ravel().reshape(-1, 1),
yy.ravel().reshape(-1, 1),
]
)
color = xx.ravel()
ax = subplots[2][0]
ax.scatter(X[:, 0], X[:, 1], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
for i, perplexity in enumerate(perplexities):
ax = subplots[2][i + 1]
t0 = time()
tsne = manifold.TSNE(
n_components=n_components, init="random", random_state=0, perplexity=perplexity
)
Y = tsne.fit_transform(X)
t1 = time()
print("uniform grid, perplexity=%d in %.2g sec" % (perplexity, t1 - t0))
ax.set_title("Perplexity=%d" % perplexity)
ax.scatter(Y[:, 0], Y[:, 1], c=color)
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
ax.axis("tight")
plt.show()
|
the-stack_106_24480 | # coding: utf-8
"""
Provides error handler for non API errors
"""
import logging
import flask
from api import helpers
import config
from main import app
@app.errorhandler(400) # Bad Request
@app.errorhandler(401) # Unauthorized
@app.errorhandler(403) # Forbidden
@app.errorhandler(404) # Not Found
@app.errorhandler(405) # Method Not Allowed
@app.errorhandler(410) # Gone
@app.errorhandler(418) # I'm a Teapot
@app.errorhandler(500) # Internal Server Error
def error_handler(err):
"""If error occured we flash it onto screen and render index"""
logging.exception(err)
try:
err.code
except AttributeError:
err.code = 500
err.name = 'Internal Server Error'
if flask.request.path.startswith('/api/'):
return helpers.handle_error(err)
flask.flash(err.name)
return flask.render_template('index.html')
if config.PRODUCTION:
@app.errorhandler(Exception)
def production_error_handler(err): # pylint: disable=missing-docstring
return error_handler(err)
|
the-stack_106_24481 | from opentrons import __version__
from opentrons.protocol_api import MAX_SUPPORTED_VERSION
def test_health(api_client, hardware):
hardware.fw_version = "FW111"
hardware.board_revision = "BR2.1"
expected = {
'name': 'opentrons-dev',
'api_version': __version__,
'fw_version': 'FW111',
'board_revision': 'BR2.1',
'logs': ['/logs/serial.log', '/logs/api.log'],
'system_version': '0.0.0',
'protocol_api_version': list(MAX_SUPPORTED_VERSION),
"links": {
"apiLog": "/logs/api.log",
"serialLog": "/logs/serial.log",
"apiSpec": "/openapi.json",
"systemTime": "/system/time"
}
}
resp = api_client.get('/health')
text = resp.json()
assert resp.status_code == 200
assert text == expected
|
the-stack_106_24483 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import uuid
from datetime import timedelta
from mock import patch
from polyaxon.managers.cli import CliConfigManager
from polyaxon.schemas.cli.cli_config import CliConfig
from polyaxon.utils.tz_utils import now
from tests.utils import BaseTestCase
@pytest.mark.managers_mark
class TestCliConfigManager(BaseTestCase):
def test_default_props(self):
assert CliConfigManager.is_global() is True
assert CliConfigManager.IS_POLYAXON_DIR is False
assert CliConfigManager.CONFIG_FILE_NAME == ".cli"
assert CliConfigManager.CONFIG == CliConfig
@pytest.mark.managers_mark
class TestCliConfigManagerMethods(BaseTestCase):
def setUp(self):
super().setUp()
self.filename = uuid.uuid4().hex
CliConfigManager.CONFIG_FILE_NAME = self.filename
def tearDown(self):
path = CliConfigManager.get_config_filepath(create=False)
if not os.path.exists(path):
return
os.remove(path)
def test_set_compatibility(self):
with patch.object(CliConfigManager, "set_config") as patch_fct:
CliConfigManager.reset(current_version=True)
assert patch_fct.call_count == 1
def test_should_check(self):
with patch.object(CliConfigManager, "reset") as patch_fct:
result = CliConfigManager.should_check()
assert patch_fct.call_count == 1
assert result is True
CliConfigManager.reset(
last_check=now(),
current_version="0.0.5",
installation={"key": "uuid", "version": "1.1.4-rc11", "dist": "foo"},
compatibility={"cli": {"min": "0.0.4", "latest": "1.1.4"}},
)
with patch.object(CliConfigManager, "reset") as patch_fct:
result = CliConfigManager.should_check()
assert patch_fct.call_count == 0
assert result is False
CliConfigManager.reset(
last_check=now() - timedelta(seconds=10000),
current_version="0.0.5",
installation={"key": "uuid", "version": "1.1.4-rc11", "dist": "foo"},
compatibility={"cli": {"min": "0.0.4", "latest": "1.1.4"}},
)
with patch.object(CliConfigManager, "reset") as patch_fct:
result = CliConfigManager.should_check()
assert patch_fct.call_count == 1
assert result is True
CliConfigManager.reset(
last_check=now(),
current_version="0.0.2",
installation={"key": "uuid", "version": "1.1.4-rc11", "dist": "foo"},
compatibility={"cli": {"min": "0.0.4", "latest": "1.1.4"}},
)
with patch.object(CliConfigManager, "reset") as patch_fct:
result = CliConfigManager.should_check()
# Although condition for showing a message, do not reset
assert patch_fct.call_count == 0
assert result is False
|
the-stack_106_24484 | """
The :class:`~allennlp.common.params.Params` class represents a dictionary of
parameters (e.g. for configuring a model), with added functionality around
logging and validation.
"""
from typing import Any, Dict, List
from collections import MutableMapping, OrderedDict
import copy
import json
import logging
import os
from overrides import overrides
import _jsonnet
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def unflatten(flat_dict: Dict[str, Any]) -> Dict[str, Any]:
"""
Given a "flattened" dict with compound keys, e.g.
{"a.b": 0}
unflatten it:
{"a": {"b": 0}}
"""
unflat: Dict[str, Any] = {}
for compound_key, value in flat_dict.items():
curr_dict = unflat
parts = compound_key.split(".")
for key in parts[:-1]:
curr_value = curr_dict.get(key)
if key not in curr_dict:
curr_dict[key] = {}
curr_dict = curr_dict[key]
elif isinstance(curr_value, dict):
curr_dict = curr_value
else:
raise ConfigurationError("flattened dictionary is invalid")
if not isinstance(curr_dict, dict) or parts[-1] in curr_dict:
raise ConfigurationError("flattened dictionary is invalid")
else:
curr_dict[parts[-1]] = value
return unflat
def with_fallback(preferred: Dict[str, Any], fallback: Dict[str, Any]) -> Dict[str, Any]:
"""
Deep merge two dicts, preferring values from `preferred`.
"""
preferred_keys = set(preferred.keys())
fallback_keys = set(fallback.keys())
common_keys = preferred_keys & fallback_keys
merged: Dict[str, Any] = {}
for key in preferred_keys - fallback_keys:
merged[key] = copy.deepcopy(preferred[key])
for key in fallback_keys - preferred_keys:
merged[key] = copy.deepcopy(fallback[key])
for key in common_keys:
preferred_value = preferred[key]
fallback_value = fallback[key]
if isinstance(preferred_value, dict) and isinstance(fallback_value, dict):
merged[key] = with_fallback(preferred_value, fallback_value)
else:
merged[key] = copy.deepcopy(preferred_value)
return merged
def parse_overrides(serialized_overrides: str) -> Dict[str, Any]:
if serialized_overrides:
ext_vars = dict(os.environ)
return unflatten(json.loads(_jsonnet.evaluate_snippet("", serialized_overrides, ext_vars=ext_vars)))
else:
return {}
class Params(MutableMapping):
"""
Represents a parameter dictionary with a history, and contains other functionality around
parameter passing and validation for AllenNLP.
There are currently two benefits of a ``Params`` object over a plain dictionary for parameter
passing:
#. We handle a few kinds of parameter validation, including making sure that parameters
representing discrete choices actually have acceptable values, and making sure no extra
parameters are passed.
#. We log all parameter reads, including default values. This gives a more complete
specification of the actual parameters used than is given in a JSON file, because
those may not specify what default values were used, whereas this will log them.
The convention for using a ``Params`` object in AllenNLP is that you will consume the parameters
as you read them, so that there are none left when you've read everything you expect. This
lets us easily validate that you didn't pass in any `extra` parameters, just by making sure
that the parameter dictionary is empty. You should do this when you're done handling
parameters, by calling :func:`Params.assert_empty`.
"""
# This allows us to check for the presence of "None" as a default argument,
# which we require because we make a distinction bewteen passing a value of "None"
# and passing no value to the default parameter of "pop".
DEFAULT = object()
def __init__(self,
params: Dict[str, Any],
history: str = "",
loading_from_archive: bool = False,
files_to_archive: Dict[str, str] = None) -> None:
self.params = _replace_none(params)
self.history = history
self.loading_from_archive = loading_from_archive
self.files_to_archive = {} if files_to_archive is None else files_to_archive
def add_file_to_archive(self, name: str) -> None:
"""
Any class in its ``from_params`` method can request that some of its
input files be added to the archive by calling this method.
For example, if some class ``A`` had an ``input_file`` parameter, it could call
```
params.add_file_to_archive("input_file")
```
which would store the supplied value for ``input_file`` at the key
``previous.history.and.then.input_file``. The ``files_to_archive`` dict
is shared with child instances via the ``_check_is_dict`` method, so that
the final mapping can be retrieved from the top-level ``Params`` object.
NOTE: You must call ``add_file_to_archive`` before you ``pop()``
the parameter, because the ``Params`` instance looks up the value
of the filename inside itself.
If the ``loading_from_archive`` flag is True, this will be a no-op.
"""
if not self.loading_from_archive:
self.files_to_archive[f"{self.history}{name}"] = cached_path(self.get(name))
@overrides
def pop(self, key: str, default: Any = DEFAULT) -> Any:
"""
Performs the functionality associated with dict.pop(key), along with checking for
returned dictionaries, replacing them with Param objects with an updated history.
If ``key`` is not present in the dictionary, and no default was specified, we raise a
``ConfigurationError``, instead of the typical ``KeyError``.
"""
if default is self.DEFAULT:
try:
value = self.params.pop(key)
except KeyError:
raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history))
else:
value = self.params.pop(key, default)
if not isinstance(value, dict):
logger.info(self.history + key + " = " + str(value)) # type: ignore
return self._check_is_dict(key, value)
def pop_int(self, key: str, default: Any = DEFAULT) -> int:
"""
Performs a pop and coerces to an int.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return int(value)
def pop_float(self, key: str, default: Any = DEFAULT) -> float:
"""
Performs a pop and coerces to a float.
"""
value = self.pop(key, default)
if value is None:
return None
else:
return float(value)
def pop_bool(self, key: str, default: Any = DEFAULT) -> bool:
"""
Performs a pop and coerces to a bool.
"""
value = self.pop(key, default)
if value is None:
return None
elif isinstance(value, bool):
return value
elif value == "true":
return True
elif value == "false":
return False
else:
raise ValueError("Cannot convert variable to bool: " + value)
@overrides
def get(self, key: str, default: Any = DEFAULT):
"""
Performs the functionality associated with dict.get(key) but also checks for returned
dicts and returns a Params object in their place with an updated history.
"""
if default is self.DEFAULT:
try:
value = self.params.get(key)
except KeyError:
raise ConfigurationError("key \"{}\" is required at location \"{}\"".format(key, self.history))
else:
value = self.params.get(key, default)
return self._check_is_dict(key, value)
def pop_choice(self, key: str, choices: List[Any], default_to_first_choice: bool = False) -> Any:
"""
Gets the value of ``key`` in the ``params`` dictionary, ensuring that the value is one of
the given choices. Note that this `pops` the key from params, modifying the dictionary,
consistent with how parameters are processed in this codebase.
Parameters
----------
key: str
Key to get the value from in the param dictionary
choices: List[Any]
A list of valid options for values corresponding to ``key``. For example, if you're
specifying the type of encoder to use for some part of your model, the choices might be
the list of encoder classes we know about and can instantiate. If the value we find in
the param dictionary is not in ``choices``, we raise a ``ConfigurationError``, because
the user specified an invalid value in their parameter file.
default_to_first_choice: bool, optional (default=False)
If this is ``True``, we allow the ``key`` to not be present in the parameter
dictionary. If the key is not present, we will use the return as the value the first
choice in the ``choices`` list. If this is ``False``, we raise a
``ConfigurationError``, because specifying the ``key`` is required (e.g., you `have` to
specify your model class when running an experiment, but you can feel free to use
default settings for encoders if you want).
"""
default = choices[0] if default_to_first_choice else self.DEFAULT
value = self.pop(key, default)
if value not in choices:
key_str = self.history + key
message = '%s not in acceptable choices for %s: %s' % (value, key_str, str(choices))
raise ConfigurationError(message)
return value
def as_dict(self, quiet=False):
"""
Sometimes we need to just represent the parameters as a dict, for instance when we pass
them to a Keras layer(so that they can be serialised).
Parameters
----------
quiet: bool, optional (default = False)
Whether to log the parameters before returning them as a dict.
"""
if quiet:
return self.params
def log_recursively(parameters, history):
for key, value in parameters.items():
if isinstance(value, dict):
new_local_history = history + key + "."
log_recursively(value, new_local_history)
else:
logger.info(history + key + " = " + str(value))
logger.info("Converting Params object to dict; logging of default "
"values will not occur when dictionary parameters are "
"used subsequently.")
logger.info("CURRENTLY DEFINED PARAMETERS: ")
log_recursively(self.params, self.history)
return self.params
def as_flat_dict(self):
"""
Returns the parameters of a flat dictionary from keys to values.
Nested structure is collapsed with periods.
"""
flat_params = {}
def recurse(parameters, path):
for key, value in parameters.items():
newpath = path + [key]
if isinstance(value, dict):
recurse(value, newpath)
else:
flat_params['.'.join(newpath)] = value
recurse(self.params, [])
return flat_params
def duplicate(self) -> 'Params':
"""
Uses ``copy.deepcopy()`` to create a duplicate (but fully distinct)
copy of these Params.
"""
return Params(copy.deepcopy(self.params))
def assert_empty(self, class_name: str):
"""
Raises a ``ConfigurationError`` if ``self.params`` is not empty. We take ``class_name`` as
an argument so that the error message gives some idea of where an error happened, if there
was one. ``class_name`` should be the name of the `calling` class, the one that got extra
parameters (if there are any).
"""
if self.params:
raise ConfigurationError("Extra parameters passed to {}: {}".format(class_name, self.params))
def __getitem__(self, key):
if key in self.params:
return self._check_is_dict(key, self.params[key])
else:
raise KeyError
def __setitem__(self, key, value):
self.params[key] = value
def __delitem__(self, key):
del self.params[key]
def __iter__(self):
return iter(self.params)
def __len__(self):
return len(self.params)
def _check_is_dict(self, new_history, value):
if isinstance(value, dict):
new_history = self.history + new_history + "."
return Params(value,
history=new_history,
loading_from_archive=self.loading_from_archive,
files_to_archive=self.files_to_archive)
if isinstance(value, list):
value = [self._check_is_dict(new_history + '.list', v) for v in value]
return value
@staticmethod
def from_file(params_file: str, params_overrides: str = "") -> 'Params':
"""
Load a `Params` object from a configuration file.
"""
# redirect to cache, if necessary
params_file = cached_path(params_file)
ext_vars = dict(os.environ)
file_dict = json.loads(_jsonnet.evaluate_file(params_file, ext_vars=ext_vars))
overrides_dict = parse_overrides(params_overrides)
param_dict = with_fallback(preferred=overrides_dict, fallback=file_dict)
return Params(param_dict)
def to_file(self, params_file: str, preference_orders: List[List[str]] = None) -> None:
with open(params_file, "w") as handle:
json.dump(self.as_ordered_dict(preference_orders), handle, indent=4)
def as_ordered_dict(self, preference_orders: List[List[str]] = None) -> OrderedDict:
"""
Returns Ordered Dict of Params from list of partial order preferences.
Parameters
----------
preference_orders: List[List[str]], optional
``preference_orders`` is list of partial preference orders. ["A", "B", "C"] means
"A" > "B" > "C". For multiple preference_orders first will be considered first.
Keys not found, will have last but alphabetical preference. Default Preferences:
``[["dataset_reader", "iterator", "model", "train_data_path", "validation_data_path",
"test_data_path", "trainer", "vocabulary"], ["type"]]``
"""
params_dict = self.as_dict(quiet=True)
if not preference_orders:
preference_orders = []
preference_orders.append(["dataset_reader", "iterator", "model",
"train_data_path", "validation_data_path", "test_data_path",
"trainer", "vocabulary"])
preference_orders.append(["type"])
def order_func(key):
# Makes a tuple to use for ordering. The tuple is an index into each of the `preference_orders`,
# followed by the key itself. This gives us integer sorting if you have a key in one of the
# `preference_orders`, followed by alphabetical ordering if not.
order_tuple = [order.index(key) if key in order else len(order) for order in preference_orders]
return order_tuple + [key]
def order_dict(dictionary, order_func):
# Recursively orders dictionary according to scoring order_func
result = OrderedDict()
for key, val in sorted(dictionary.items(), key=lambda item: order_func(item[0])):
result[key] = order_dict(val, order_func) if isinstance(val, dict) else val
return result
return order_dict(params_dict, order_func)
def pop_choice(params: Dict[str, Any],
key: str,
choices: List[Any],
default_to_first_choice: bool = False,
history: str = "?.") -> Any:
"""
Performs the same function as :func:`Params.pop_choice`, but is required in order to deal with
places that the Params object is not welcome, such as inside Keras layers. See the docstring
of that method for more detail on how this function works.
This method adds a ``history`` parameter, in the off-chance that you know it, so that we can
reproduce :func:`Params.pop_choice` exactly. We default to using "?." if you don't know the
history, so you'll have to fix that in the log if you want to actually recover the logged
parameters.
"""
value = Params(params, history).pop_choice(key, choices, default_to_first_choice)
return value
def _replace_none(dictionary: Dict[str, Any]) -> Dict[str, Any]:
for key in dictionary.keys():
if dictionary[key] == "None":
dictionary[key] = None
elif isinstance(dictionary[key], dict):
dictionary[key] = _replace_none(dictionary[key])
return dictionary
|
the-stack_106_24485 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from flask_login import current_user
from airflow.www.app import create_app
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_pools
class TestGoogleOpenID(unittest.TestCase):
def setUp(self) -> None:
with conf_vars(
{("api", "auth_backend"): "tests.test_utils.remote_user_api_auth_backend"}
), mock.patch.dict('os.environ', SKIP_DAGS_PARSING='True'), conf_vars(
{('api', 'enable_experimental_api'): 'true'}
):
self.app = create_app(testing=True)
self.appbuilder = self.app.appbuilder # pylint: disable=no-member
role_admin = self.appbuilder.sm.find_role("Admin")
tester = self.appbuilder.sm.find_user(username="test")
if not tester:
self.appbuilder.sm.add_user(
username="test",
first_name="test",
last_name="test",
email="[email protected]",
role=role_admin,
password="test",
)
def test_success_using_username(self):
clear_db_pools()
with self.app.test_client() as test_client:
response = test_client.get("/api/experimental/pools", environ_overrides={'REMOTE_USER': "test"})
assert "[email protected]" == current_user.email
assert 200 == response.status_code
assert "Default pool" in str(response.json)
def test_success_using_email(self):
clear_db_pools()
with self.app.test_client() as test_client:
response = test_client.get(
"/api/experimental/pools", environ_overrides={'REMOTE_USER': "[email protected]"}
)
assert "[email protected]" == current_user.email
assert 200 == response.status_code
assert "Default pool" in str(response.json)
def test_user_not_exists(self):
with self.app.test_client() as test_client:
response = test_client.get(
"/api/experimental/pools", environ_overrides={'REMOTE_USER': "INVALID"}
)
assert 403 == response.status_code
assert "Forbidden" == response.data.decode()
def test_missing_remote_user(self):
with self.app.test_client() as test_client:
response = test_client.get("/api/experimental/pools")
assert 403 == response.status_code
assert "Forbidden" == response.data.decode()
def test_user_should_be_logged_temporary(self):
with self.app.test_client() as test_client:
response = test_client.get(
"/api/experimental/pools", environ_overrides={'REMOTE_USER': "INVALID"}
)
assert 403 == response.status_code
assert "Forbidden" == response.data.decode()
|
the-stack_106_24486 | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""CTypes bindings for the Gumbo HTML5 parser.
This exports the raw interface of the library as a set of very thin ctypes
wrappers. It's intended to be wrapped by other libraries to provide a more
Pythonic API.
"""
__author__ = '[email protected] (Jonathan Tang)'
import sys
import contextlib
import ctypes
import os.path
import gumboc_tags
_name_of_lib = 'libgumbo.so'
if sys.platform.startswith('darwin'):
_name_of_lib = 'libgumbo.dylib'
elif sys.platform.startswith('win'):
_name_of_lib = "gumbo.dll"
try:
# First look for a freshly-built .so in the .libs directory, for development.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), '..', '..', '.libs', _name_of_lib))
except OSError:
# PyPI or setuptools install, look in the current directory.
_dll = ctypes.cdll.LoadLibrary(os.path.join(
os.path.dirname(__file__), _name_of_lib))
except OSError:
# System library, on unix or mac osx
_dll = ctypes.cdll.LoadLibrary(_name_of_lib)
# Some aliases for common types.
_bitvector = ctypes.c_uint
_Ptr = ctypes.POINTER
class EnumMetaclass(type(ctypes.c_uint)):
def __new__(metaclass, name, bases, cls_dict):
cls = type(ctypes.c_uint).__new__(metaclass, name, bases, cls_dict)
if name == 'Enum':
return cls
try:
for i, value in enumerate(cls_dict['_values_']):
setattr(cls, value, cls.from_param(i))
except KeyError:
raise ValueError('No _values_ list found inside enum type.')
except TypeError:
raise ValueError('_values_ must be a list of names of enum constants.')
return cls
def with_metaclass(mcls):
def decorator(cls):
body = vars(cls).copy()
# clean out class body
body.pop('__dict__', None)
body.pop('__weakref__', None)
return mcls(cls.__name__, cls.__bases__, body)
return decorator
@with_metaclass(EnumMetaclass)
class Enum(ctypes.c_uint):
@classmethod
def from_param(cls, param):
if isinstance(param, Enum):
if param.__class__ != cls:
raise ValueError("Can't mix enums of different types")
return param
if param < 0 or param > len(cls._values_):
raise ValueError('%d is out of range for enum type %s; max %d.' %
(param, cls.__name__, len(cls._values_)))
return cls(param)
def __eq__(self, other):
return self.value == other.value
def __ne__(self, other):
return self.value != other.value
def __hash__(self):
return hash(self.value)
def __repr__(self):
try:
return self._values_[self.value]
except IndexError:
raise IndexError('Value %d is out of range for %r' %
(self.value, self._values_))
class StringPiece(ctypes.Structure):
_fields_ = [
('data', _Ptr(ctypes.c_char)),
('length', ctypes.c_size_t),
]
def __len__(self):
return self.length
def __str__(self):
return ctypes.string_at(self.data, self.length)
class SourcePosition(ctypes.Structure):
_fields_ = [
('line', ctypes.c_uint),
('column', ctypes.c_uint),
('offset', ctypes.c_uint)
]
SourcePosition.EMPTY = SourcePosition.in_dll(_dll, 'kGumboEmptySourcePosition')
class AttributeNamespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/1999/xlink',
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2000/xmlns',
]
_values_ = ['NONE', 'XLINK', 'XML', 'XMLNS']
def to_url(self):
return self.URLS[self.value]
class Attribute(ctypes.Structure):
_fields_ = [
('namespace', AttributeNamespace),
('name', ctypes.c_char_p),
('original_name', StringPiece),
('value', ctypes.c_char_p),
('original_value', StringPiece),
('name_start', SourcePosition),
('name_end', SourcePosition),
('value_start', SourcePosition),
('value_end', SourcePosition)
]
class Vector(ctypes.Structure):
_type_ = ctypes.c_void_p
_fields_ = [
('data', _Ptr(ctypes.c_void_p)),
('length', ctypes.c_uint),
('capacity', ctypes.c_uint)
]
class Iter(object):
def __init__(self, vector):
self.current = 0
self.vector = vector
def __iter__(self):
return self
def __next__(self):
# Python 3
if self.current >= self.vector.length:
raise StopIteration
obj = self.vector[self.current]
self.current += 1
return obj
def next(self):
# Python 2
return self.__next__()
def __len__(self):
return self.length
def __getitem__(self, i):
try:
# Python 2
numeric_types = (int, long)
except NameError:
# Python 3
numeric_types = int
if isinstance(i, numeric_types):
if i < 0:
i += self.length
if i > self.length:
raise IndexError
array_type = _Ptr(_Ptr(self._type_))
return ctypes.cast(self.data, array_type)[i].contents
return list(self)[i]
def __iter__(self):
return Vector.Iter(self)
Vector.EMPTY = Vector.in_dll(_dll, 'kGumboEmptyVector')
class AttributeVector(Vector):
_type_ = Attribute
class NodeVector(Vector):
# _type_ assigned later, to avoid circular references with Node
pass
class QuirksMode(Enum):
_values_ = ['NO_QUIRKS', 'QUIRKS', 'LIMITED_QUIRKS']
class Document(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('has_doctype', ctypes.c_bool),
('name', ctypes.c_char_p),
('public_identifier', ctypes.c_char_p),
('system_identifier', ctypes.c_char_p),
('doc_type_quirks_mode', QuirksMode),
]
def __repr__(self):
return 'Document'
class Namespace(Enum):
URLS = [
'http://www.w3.org/1999/xhtml',
'http://www.w3.org/2000/svg',
'http://www.w3.org/1998/Math/MathML',
]
_values_ = ['HTML', 'SVG', 'MATHML']
def to_url(self):
return self.URLS[self.value]
class Tag(Enum):
@staticmethod
def from_str(tagname):
text_ptr = ctypes.c_char_p(tagname.encode('utf-8'))
return _tag_enum(text_ptr)
_values_ = gumboc_tags.TagNames + ['UNKNOWN', 'LAST']
class Element(ctypes.Structure):
_fields_ = [
('children', NodeVector),
('tag', Tag),
('tag_namespace', Namespace),
('original_tag', StringPiece),
('original_end_tag', StringPiece),
('start_pos', SourcePosition),
('end_pos', SourcePosition),
('attributes', AttributeVector),
]
@property
def tag_name(self):
original_tag = StringPiece.from_buffer_copy(self.original_tag)
_tag_from_original_text(ctypes.byref(original_tag))
if self.tag_namespace == Namespace.SVG:
svg_tagname = _normalize_svg_tagname(ctypes.byref(original_tag))
if svg_tagname is not None:
return str(svg_tagname)
if self.tag == Tag.UNKNOWN:
if original_tag.data is None:
return ''
return str(original_tag).lower()
return _tagname(self.tag)
def __repr__(self):
return ('<%r>\n' % self.tag +
'\n'.join(repr(child) for child in self.children) +
'</%r>' % self.tag)
class Text(ctypes.Structure):
_fields_ = [
('text', ctypes.c_char_p),
('original_text', StringPiece),
('start_pos', SourcePosition)
]
def __repr__(self):
return 'Text(%r)' % self.text
class NodeType(Enum):
_values_ = ['DOCUMENT', 'ELEMENT', 'TEXT', 'CDATA',
'COMMENT', 'WHITESPACE', 'TEMPLATE']
class NodeUnion(ctypes.Union):
_fields_ = [
('document', Document),
('element', Element),
('text', Text),
]
class Node(ctypes.Structure):
# _fields_ set later to avoid a circular reference
def _contents(self):
# Python3 enters an infinite loop if you use an @property within
# __getattr__, so we factor it out to a helper.
if self.type == NodeType.DOCUMENT:
return self.v.document
elif self.type in (NodeType.ELEMENT, NodeType.TEMPLATE):
return self.v.element
else:
return self.v.text
@property
def contents(self):
return self._contents()
def __getattr__(self, name):
return getattr(self._contents(), name)
def __setattr__(self, name, value):
return setattr(self._contents(), name, value)
def __repr__(self):
return repr(self.contents)
Node._fields_ = [
('type', NodeType),
# Set the type to Node later to avoid a circular dependency.
('parent', _Ptr(Node)),
('index_within_parent', ctypes.c_size_t),
# TODO(jdtang): Make a real list of enum constants for this.
('parse_flags', _bitvector),
('v', NodeUnion)
]
NodeVector._type_ = Node
class Options(ctypes.Structure):
_fields_ = [
# TODO(jdtang): Allow the Python API to set the allocator/deallocator
# function. Right now these are treated as opaque void pointers.
('allocator', ctypes.c_void_p),
('deallocator', ctypes.c_void_p),
('userdata', ctypes.c_void_p),
('tab_stop', ctypes.c_int),
('stop_on_first_error', ctypes.c_bool),
('max_errors', ctypes.c_int),
('fragment_context', Tag),
('fragment_namespace', Namespace),
]
class Output(ctypes.Structure):
_fields_ = [
('document', _Ptr(Node)),
('root', _Ptr(Node)),
# TODO(jdtang): Error type.
('errors', Vector),
]
@contextlib.contextmanager
def parse(text, **kwargs):
options = Options()
for field_name, _ in Options._fields_:
try:
setattr(options, field_name, kwargs[field_name])
except KeyError:
setattr(options, field_name, getattr(_DEFAULT_OPTIONS, field_name))
# We have to manually take a reference to the input text here so that it
# outlives the parse output. If we let ctypes do it automatically on function
# call, it creates a temporary buffer which is destroyed when the call
# completes, and then the original_text pointers point into invalid memory.
text_ptr = ctypes.c_char_p(text.encode('utf-8'))
output = _parse_with_options(ctypes.byref(options), text_ptr, len(text))
try:
yield output
finally:
_destroy_output(ctypes.byref(options), output)
_DEFAULT_OPTIONS = Options.in_dll(_dll, 'kGumboDefaultOptions')
_parse_with_options = _dll.gumbo_parse_with_options
_parse_with_options.argtypes = [_Ptr(Options), ctypes.c_char_p, ctypes.c_size_t]
_parse_with_options.restype = _Ptr(Output)
_tag_from_original_text = _dll.gumbo_tag_from_original_text
_tag_from_original_text.argtypes = [_Ptr(StringPiece)]
_tag_from_original_text.restype = None
_normalize_svg_tagname = _dll.gumbo_normalize_svg_tagname
_normalize_svg_tagname.argtypes = [_Ptr(StringPiece)]
_normalize_svg_tagname.restype = ctypes.c_char_p
_destroy_output = _dll.gumbo_destroy_output
_destroy_output.argtypes = [_Ptr(Options), _Ptr(Output)]
_destroy_output.restype = None
_tagname = _dll.gumbo_normalized_tagname
_tagname.argtypes = [Tag]
_tagname.restype = ctypes.c_char_p
_tag_enum = _dll.gumbo_tag_enum
_tag_enum.argtypes = [ctypes.c_char_p]
_tag_enum.restype = Tag
__all__ = ['StringPiece', 'SourcePosition', 'AttributeNamespace', 'Attribute',
'Vector', 'AttributeVector', 'NodeVector', 'QuirksMode', 'Document',
'Namespace', 'Tag', 'Element', 'Text', 'NodeType', 'Node',
'Options', 'Output', 'parse']
|
the-stack_106_24487 | # -*- coding=utf-8
import requests
import logging
import hashlib
import base64
import os
import sys
import time
import copy
import json
import xml.dom.minidom
import xml.etree.ElementTree
from requests import Request, Session
from datetime import datetime
from six.moves.urllib.parse import quote, unquote, urlencode
from six import text_type, binary_type
from hashlib import md5
from dicttoxml import dicttoxml
from .streambody import StreamBody
from .xml2dict import Xml2Dict
from .cos_auth import CosS3Auth
from .cos_comm import *
from .cos_threadpool import SimpleThreadPool
from .cos_exception import CosClientError
from .cos_exception import CosServiceError
from .version import __version__
from .select_event_stream import EventStream
from .resumable_downloader import ResumableDownLoader
logger = logging.getLogger(__name__)
class CosConfig(object):
"""config类,保存用户相关信息"""
def __init__(self, Appid=None, Region=None, SecretId=None, SecretKey=None, Token=None, Scheme=None, Timeout=None,
Access_id=None, Access_key=None, Secret_id=None, Secret_key=None, Endpoint=None, IP=None, Port=None,
Anonymous=None, UA=None, Proxies=None, Domain=None, ServiceDomain=None, PoolConnections=10, PoolMaxSize=10):
"""初始化,保存用户的信息
:param Appid(string): 用户APPID.
:param Region(string): 地域信息.
:param SecretId(string): 秘钥SecretId.
:param SecretKey(string): 秘钥SecretKey.
:param Token(string): 临时秘钥使用的token.
:param Scheme(string): http/https
:param Timeout(int): http超时时间.
:param Access_id(string): 秘钥AccessId(兼容).
:param Access_key(string): 秘钥AccessKey(兼容).
:param Secret_id(string): 秘钥SecretId(兼容).
:param Secret_key(string): 秘钥SecretKey(兼容).
:param Endpoint(string): endpoint.
:param IP(string): 访问COS的ip
:param Port(int): 访问COS的port
:param Anonymous(bool): 是否使用匿名访问COS
:param UA(string): 使用自定义的UA来访问COS
:param Proxies(dict): 使用代理来访问COS
:param Domain(string): 使用自定义的域名来访问COS
:param ServiceDomain(string): 使用自定义的域名来访问cos service
:param PoolConnections(int): 连接池个数
:param PoolMaxSize(int): 连接池中最大连接数
"""
self._appid = to_unicode(Appid)
self._token = to_unicode(Token)
self._timeout = Timeout
self._region = Region
self._endpoint = Endpoint
self._ip = to_unicode(IP)
self._port = Port
self._anonymous = Anonymous
self._ua = UA
self._proxies = Proxies
self._domain = Domain
self._service_domain = ServiceDomain
self._pool_connections = PoolConnections
self._pool_maxsize = PoolMaxSize
if self._domain is None:
self._endpoint = format_endpoint(Endpoint, Region)
if Scheme is None:
Scheme = u'https'
Scheme = to_unicode(Scheme)
if(Scheme != u'http' and Scheme != u'https'):
raise CosClientError('Scheme can be only set to http/https')
self._scheme = Scheme
# 兼容(SecretId,SecretKey)以及(AccessId,AccessKey)
if(SecretId and SecretKey):
self._secret_id = to_unicode(SecretId)
self._secret_key = to_unicode(SecretKey)
elif(Secret_id and Secret_key):
self._secret_id = to_unicode(Secret_id)
self._secret_key = to_unicode(Secret_key)
elif(Access_id and Access_key):
self._secret_id = to_unicode(Access_id)
self._secret_key = to_unicode(Access_key)
else:
raise CosClientError('SecretId and SecretKey is Required!')
def uri(self, bucket, path=None, endpoint=None, domain=None):
"""拼接url
:param bucket(string): 存储桶名称.
:param path(string): 请求COS的路径.
:return(string): 请求COS的URL地址.
"""
scheme = self._scheme
# 拼接请求的url,默认使用bucket和endpoint拼接请求域名
# 使用自定义域名时则使用自定义域名访问
# 指定ip和port时,则使用ip:port方式访问,优先级最高
if domain is None:
domain = self._domain
if domain is not None:
url = domain
else:
bucket = format_bucket(bucket, self._appid)
if endpoint is None:
endpoint = self._endpoint
url = u"{bucket}.{endpoint}".format(bucket=bucket, endpoint=endpoint)
if self._ip is not None:
url = self._ip
if self._port is not None:
url = u"{ip}:{port}".format(ip=self._ip, port=self._port)
if path is not None:
if not path:
raise CosClientError("Key is required not empty")
path = to_unicode(path)
if path[0] == u'/':
path = path[1:]
path = quote(to_bytes(path), '/-_.~')
path = path.replace('./', '.%2F')
request_url = u"{scheme}://{url}/{path}".format(
scheme=to_unicode(scheme),
url=to_unicode(url),
path=to_unicode(path)
)
else:
request_url = u"{scheme}://{url}/".format(
scheme=to_unicode(scheme),
url=to_unicode(url)
)
return request_url
def get_host(self, Bucket):
"""传入bucket名称,根据endpoint获取Host名称
:param Bucket(string): bucket名称
:return (string): Host名称
"""
return u"{bucket}.{endpoint}".format(bucket=format_bucket(Bucket, self._appid), endpoint=self._endpoint)
def set_ip_port(self, IP, Port=None):
"""设置直接访问的ip:port,可以不指定Port,http默认为80,https默认为443
:param IP(string): 访问COS的ip
:param Port(int): 访问COS的port
:return None
"""
self._ip = to_unicode(IP)
self._port = Port
def set_credential(self, SecretId, SecretKey, Token=None):
"""设置访问的身份,包括secret_id,secret_key,临时秘钥token默认为空
:param SecretId(string): 秘钥SecretId.
:param SecretKey(string): 秘钥SecretKey.
:param Token(string): 临时秘钥使用的token.
"""
self._secret_id = to_unicode(SecretId)
self._secret_key = to_unicode(SecretKey)
self._token = to_unicode(Token)
class CosS3Client(object):
"""cos客户端类,封装相应请求"""
def __init__(self, conf, retry=1, session=None):
"""初始化client对象
:param conf(CosConfig): 用户的配置.
:param retry(int): 失败重试的次数.
:param session(object): http session.
"""
self._conf = conf
self._retry = retry # 重试的次数,分片上传时可适当增大
if session is None:
self._session = requests.session()
self._session.mount('http://', requests.adapters.HTTPAdapter(pool_connections=self._conf._pool_connections, pool_maxsize=self._conf._pool_maxsize))
self._session.mount('https://', requests.adapters.HTTPAdapter(pool_connections=self._conf._pool_connections, pool_maxsize=self._conf._pool_maxsize))
else:
self._session = session
def get_conf(self):
"""获取配置"""
return self._conf
def get_auth(self, Method, Bucket, Key, Expired=300, Headers={}, Params={}):
"""获取签名
:param Method(string): http method,如'PUT','GET'.
:param Bucket(string): 存储桶名称.
:param Key(string): 请求COS的路径.
:param Expired(int): 签名有效时间,单位为s.
:param headers(dict): 签名中的http headers.
:param params(dict): 签名中的http params.
:return (string): 计算出的V5签名.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取上传请求的签名
auth_string = client.get_auth(
Method='PUT',
Bucket='bucket',
Key='test.txt',
Expired=600,
Headers={'header1': 'value1'},
Params={'param1': 'value1'}
)
print (auth_string)
"""
url = self._conf.uri(bucket=Bucket, path=Key)
r = Request(Method, url, headers=Headers, params=Params)
auth = CosS3Auth(self._conf, Key, Params, Expired)
return auth(r).headers['Authorization']
def send_request(self, method, url, bucket, timeout=30, cos_request=True, **kwargs):
"""封装request库发起http请求"""
if self._conf._timeout is not None: # 用户自定义超时时间
timeout = self._conf._timeout
if self._conf._ua is not None:
kwargs['headers']['User-Agent'] = self._conf._ua
else:
kwargs['headers']['User-Agent'] = 'cos-python-sdk-v' + __version__
if self._conf._token is not None:
kwargs['headers']['x-cos-security-token'] = self._conf._token
if self._conf._ip is not None: # 使用IP访问时需要设置请求host
if self._conf._domain is not None:
kwargs['headers']['Host'] = self._conf._domain
elif bucket is not None:
kwargs['headers']['Host'] = self._conf.get_host(bucket)
kwargs['headers'] = format_values(kwargs['headers'])
file_position = None
if 'data' in kwargs:
body = kwargs['data']
if hasattr(body, 'tell') and hasattr(body, 'seek') and hasattr(body, 'read'):
file_position = body.tell() # 记录文件当前位置
kwargs['data'] = to_bytes(kwargs['data'])
if self._conf._ip is not None and self._conf._scheme == 'https':
kwargs['verify'] = False
for j in range(self._retry + 1):
try:
if j != 0:
time.sleep(j)
if method == 'POST':
res = self._session.post(url, timeout=timeout, proxies=self._conf._proxies, **kwargs)
elif method == 'GET':
res = self._session.get(url, timeout=timeout, proxies=self._conf._proxies, **kwargs)
elif method == 'PUT':
res = self._session.put(url, timeout=timeout, proxies=self._conf._proxies, **kwargs)
elif method == 'DELETE':
res = self._session.delete(url, timeout=timeout, proxies=self._conf._proxies, **kwargs)
elif method == 'HEAD':
res = self._session.head(url, timeout=timeout, proxies=self._conf._proxies, **kwargs)
if res.status_code < 400: # 2xx和3xx都认为是成功的
return res
elif res.status_code < 500: # 4xx 不重试
break
except Exception as e: # 捕获requests抛出的如timeout等客户端错误,转化为客户端错误
logger.exception('url:%s, retry_time:%d exception:%s' % (url, j, str(e)))
can_retry = False
if 'data' in kwargs:
body = kwargs['data']
if hasattr(body, 'tell') and hasattr(body, 'seek') and hasattr(body, 'read'):
can_retry = True
elif isinstance(body, text_type) or isinstance(body, binary_type):
can_retry = True
if j < self._retry and can_retry:
if file_position is not None:
try:
kwargs['data'].seek(file_position)
except IOError as ioe:
raise CosClientError(str(ioe))
continue
raise CosClientError(str(e))
if not cos_request:
return res
if res.status_code >= 400: # 所有的4XX,5XX都认为是COSServiceError
if method == 'HEAD' and res.status_code == 404: # Head 需要处理
info = dict()
info['code'] = 'NoSuchResource'
info['message'] = 'The Resource You Head Not Exist'
info['resource'] = url
if 'x-cos-request-id' in res.headers:
info['requestid'] = res.headers['x-cos-request-id']
if 'x-cos-trace-id' in res.headers:
info['traceid'] = res.headers['x-cos-trace-id']
logger.warn(info)
raise CosServiceError(method, info, res.status_code)
else:
msg = res.text
if msg == u'': # 服务器没有返回Error Body时 给出头部的信息
msg = res.headers
logger.error(msg)
raise CosServiceError(method, msg, res.status_code)
return None
# s3 object interface begin
def put_object(self, Bucket, Body, Key, EnableMD5=False, **kwargs):
"""单文件上传接口,适用于小文件,最大不得超过5GB
:param Bucket(string): 存储桶名称.
:param Body(file|string): 上传的文件内容,类型为文件流或字节流.
:param Key(string): COS路径.
:param EnableMD5(bool): 是否需要SDK计算Content-MD5,打开此开关会增加上传耗时.
:kwargs(dict): 设置上传的headers.
:return(dict): 上传成功返回的结果,包含ETag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
with open('test.txt', 'rb') as fp:
response = client.put_object(
Bucket='bucket',
Body=fp,
Key='test.txt'
)
print (response['ETag'])
"""
check_object_content_length(Body)
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("put object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
if EnableMD5:
md5_str = get_content_md5(Body)
if md5_str:
headers['Content-MD5'] = md5_str
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key),
data=Body,
headers=headers)
response = dict(**rt.headers)
return response
def get_object(self, Bucket, Key, **kwargs):
"""单文件下载接口
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param kwargs(dict): 设置下载的headers.
:return(dict): 下载成功返回的结果,包含Body对应的StreamBody,可以获取文件流或下载文件到本地.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 下载cos上的文件到本地
response = client.get_object(
Bucket='bucket',
Key='test.txt'
)
response['Body'].get_stream_to_file('local_file.txt')
"""
headers = mapped(kwargs)
final_headers = {}
params = {}
for key in headers:
if key.startswith("response"):
params[key] = headers[key]
else:
final_headers[key] = headers[key]
headers = final_headers
if 'versionId' in headers:
params['versionId'] = headers['versionId']
del headers['versionId']
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("get object, url=:{url} ,headers=:{headers}, params=:{params}".format(
url=url,
headers=headers,
params=params))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
stream=True,
auth=CosS3Auth(self._conf, Key, params=params),
params=params,
headers=headers)
response = dict(**rt.headers)
response['Body'] = StreamBody(rt)
return response
def get_object_sensitive_content_recognition(self, Bucket, Key, DetectType, **kwargs):
"""文件内容识别接口 https://cloud.tencent.com/document/product/460/37318
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param DetectType(int): 内容识别标志,位计算 1:porn, 2:terrorist, 4:politics, 8:ads
:param kwargs(dict): 设置下载的headers.
:return(dict): 下载成功返回的结果,dict类型.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 下载cos上的文件到本地
response = client.get_object_sensitive_content_recognition(
Bucket='bucket',
DetectType=CiDetectType.PORN | CiDetectType.POLITICS,
Key='test.png'
)
print response
"""
headers = mapped(kwargs)
final_headers = {}
params = {}
for key in headers:
if key.startswith("response"):
params[key] = headers[key]
else:
final_headers[key] = headers[key]
headers = final_headers
if 'versionId' in headers:
params['versionId'] = headers['versionId']
del headers['versionId']
params['ci-process'] = 'sensitive-content-recognition'
detect_type = ''
if DetectType & CiDetectType.PORN > 0:
detect_type += 'porn'
if DetectType & CiDetectType.TERRORIST > 0:
if len(detect_type) > 0:
detect_type += ','
detect_type += 'terrorist'
if DetectType & CiDetectType.POLITICS > 0:
if len(detect_type) > 0:
detect_type += ','
detect_type += 'politics'
if DetectType & CiDetectType.ADS > 0:
if len(detect_type) > 0:
detect_type += ','
detect_type += 'ads'
params['detect-type'] = detect_type
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("get object sensitive content recognition, url=:{url} ,headers=:{headers}, params=:{params}".format(
url=url,
headers=headers,
params=params))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
stream=True,
auth=CosS3Auth(self._conf, Key, params=params),
params=params,
headers=headers)
data = xml_to_dict(rt.content)
return data
def get_presigned_url(self, Bucket, Key, Method, Expired=300, Params={}, Headers={}):
"""生成预签名的url
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param Method(string): HTTP请求的方法, 'PUT'|'POST'|'GET'|'DELETE'|'HEAD'
:param Expired(int): 签名过期时间.
:param Params(dict): 签入签名的参数
:param Headers(dict): 签入签名的头部
:return(string): 预先签名的URL.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取预签名链接
response = client.get_presigned_url(
Bucket='bucket',
Key='test.txt',
Method='PUT'
)
"""
url = self._conf.uri(bucket=Bucket, path=Key)
sign = self.get_auth(Method=Method, Bucket=Bucket, Key=Key, Expired=Expired, Headers=Headers, Params=Params)
sign = urlencode(dict([item.split('=', 1) for item in sign.split('&')]))
url = url + '?' + sign
if Params:
url = url + '&' + urlencode(Params)
return url
def get_presigned_download_url(self, Bucket, Key, Expired=300, Params={}, Headers={}):
"""生成预签名的下载url
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param Expired(int): 签名过期时间.
:param Params(dict): 签入签名的参数
:param Headers(dict): 签入签名的头部
:return(string): 预先签名的下载URL.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取预签名文件下载链接
response = client.get_presigned_download_url(
Bucket='bucket',
Key='test.txt'
)
"""
return self.get_presigned_url(Bucket, Key, 'GET', Expired, Params, Headers)
def delete_object(self, Bucket, Key, **kwargs):
"""单文件删除接口
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param kwargs(dict): 设置请求headers.
:return: dict.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除一个文件
response = client.delete_object(
Bucket='bucket',
Key='test.txt'
)
"""
headers = mapped(kwargs)
params = {}
if 'versionId' in headers:
params['versionId'] = headers['versionId']
del headers['versionId']
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("delete object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key),
headers=headers,
params=params)
data = dict(**rt.headers)
return data
def delete_objects(self, Bucket, Delete={}, **kwargs):
"""文件批量删除接口,单次最多支持1000个object
:param Bucket(string): 存储桶名称.
:param Delete(dict): 批量删除的object信息.
:param kwargs(dict): 设置请求headers.
:return(dict): 批量删除的结果.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 批量删除文件
objects = {
"Quiet": "true",
"Object": [
{
"Key": "file_name1"
},
{
"Key": "file_name2"
}
]
}
response = client.delete_objects(
Bucket='bucket',
Delete=objects
)
"""
lst = ['<Object>', '</Object>'] # 类型为list的标签
xml_config = format_xml(data=Delete, root='Delete', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'delete': ''}
params = format_values(params)
url = self._conf.uri(bucket=Bucket)
logger.info("delete objects, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='POST',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['Deleted', 'Error'])
return data
def head_object(self, Bucket, Key, **kwargs):
"""获取文件信息
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param kwargs(dict): 设置请求headers.
:return(dict): 文件的metadata信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 查询文件属性
response = client.head_object(
Bucket='bucket',
Key='test.txt'
)
"""
headers = mapped(kwargs)
params = {}
if 'versionId' in headers:
params['versionId'] = headers['versionId']
del headers['versionId']
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("head object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='HEAD',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
return dict(**rt.headers)
def copy_object(self, Bucket, Key, CopySource, CopyStatus='Copy', **kwargs):
"""文件拷贝,文件信息修改
:param Bucket(string): 存储桶名称.
:param Key(string): 上传COS路径.
:param CopySource(dict): 拷贝源,包含Appid,Bucket,Region,Key.
:param CopyStatus(string): 拷贝状态,可选值'Copy'|'Replaced'.
:param kwargs(dict): 设置请求headers.
:return(dict): 拷贝成功的结果.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 文件拷贝
copy_source = {'Bucket': 'test04-1252448703', 'Key': '/test.txt', 'Region': 'ap-beijing-1'}
response = client.copy_object(
Bucket='bucket',
Key='test.txt',
CopySource=copy_source
)
"""
headers = mapped(kwargs)
headers['x-cos-copy-source'] = gen_copy_source_url(CopySource)
if CopyStatus != 'Copy' and CopyStatus != 'Replaced':
raise CosClientError('CopyStatus must be Copy or Replaced')
headers['x-cos-metadata-directive'] = CopyStatus
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("copy object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key),
headers=headers)
body = xml_to_dict(rt.content)
if 'ETag' not in body:
logger.error(rt.content)
raise CosServiceError('PUT', rt.content, 200)
data = dict(**rt.headers)
data.update(body)
return data
def upload_part_copy(self, Bucket, Key, PartNumber, UploadId, CopySource, CopySourceRange='', **kwargs):
"""拷贝指定文件至分块上传
:param Bucket(string): 存储桶名称.
:param Key(string): 上传COS路径.
:param PartNumber(int): 上传分块的编号.
:param UploadId(string): 分块上传创建的UploadId.
:param CopySource(dict): 拷贝源,包含Appid,Bucket,Region,Key.
:param CopySourceRange(string): 拷贝源的字节范围,bytes=first-last。
:param kwargs(dict): 设置请求headers.
:return(dict): 拷贝成功的结果.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 分块上传拷贝分块
copy_source = {'Bucket': 'test04-1252448703', 'Key': '/test.txt', 'Region': 'ap-beijing-1'}
response = client.upload_part_copy(
Bucket='bucket',
Key='test.txt',
PartNumber=1,
UploadId='your uploadid',
CopySource=copy_source
)
"""
headers = mapped(kwargs)
headers['x-cos-copy-source'] = gen_copy_source_url(CopySource)
headers['x-cos-copy-source-range'] = CopySourceRange
params = {'partNumber': PartNumber, 'uploadId': UploadId}
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("upload part copy, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
headers=headers,
params=params,
auth=CosS3Auth(self._conf, Key, params=params))
body = xml_to_dict(rt.content)
data = dict(**rt.headers)
data.update(body)
return data
def create_multipart_upload(self, Bucket, Key, **kwargs):
"""创建分块上传,适用于大文件上传
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param kwargs(dict): 设置请求headers.
:return(dict): 初始化分块上传返回的结果,包含UploadId等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 创建分块上传
response = client.create_multipart_upload(
Bucket='bucket',
Key='test.txt'
)
"""
headers = mapped(kwargs)
params = {'uploads': ''}
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("create multipart upload, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='POST',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
return data
def upload_part(self, Bucket, Key, Body, PartNumber, UploadId, EnableMD5=False, **kwargs):
"""上传分块,单个大小不得超过5GB
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param Body(file|string): 上传分块的内容,可以为文件流或者字节流.
:param PartNumber(int): 上传分块的编号.
:param UploadId(string): 分块上传创建的UploadId.
:param kwargs(dict): 设置请求headers.
:param EnableMD5(bool): 是否需要SDK计算Content-MD5,打开此开关会增加上传耗时.
:return(dict): 上传成功返回的结果,包含单个分块ETag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 分块上传
with open('test.txt', 'rb') as fp:
data = fp.read(1024*1024)
response = client.upload_part(
Bucket='bucket',
Body=data,
Key='test.txt'
)
"""
check_object_content_length(Body)
headers = mapped(kwargs)
params = {'partNumber': PartNumber, 'uploadId': UploadId}
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("upload part, url=:{url} ,headers=:{headers}, params=:{params}".format(
url=url,
headers=headers,
params=params))
if EnableMD5:
md5_str = get_content_md5(Body)
if md5_str:
headers['Content-MD5'] = md5_str
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
headers=headers,
params=params,
auth=CosS3Auth(self._conf, Key, params=params),
data=Body)
response = dict(**rt.headers)
return response
def complete_multipart_upload(self, Bucket, Key, UploadId, MultipartUpload={}, **kwargs):
"""完成分片上传,除最后一块分块块大小必须大于等于1MB,否则会返回错误.
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param UploadId(string): 分块上传创建的UploadId.
:param MultipartUpload(dict): 所有分块的信息,包含Etag和PartNumber.
:param kwargs(dict): 设置请求headers.
:return(dict): 上传成功返回的结果,包含整个文件的ETag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 分块上传
response = client.complete_multipart_upload(
Bucket='bucket',
Key='multipartfile.txt',
UploadId='uploadid',
MultipartUpload={'Part': lst}
)
"""
headers = mapped(kwargs)
params = {'uploadId': UploadId}
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("complete multipart upload, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='POST',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
data=dict_to_xml(MultipartUpload),
timeout=1200, # 分片上传大文件的时间比较长,设置为20min
headers=headers,
params=params)
body = xml_to_dict(rt.content)
# 分块上传文件返回200OK并不能代表文件上传成功,返回的body里面如果没有ETag则认为上传失败
if 'ETag' not in body:
logger.error(rt.content)
raise CosServiceError('POST', rt.content, 200)
data = dict(**rt.headers)
data.update(body)
return data
def abort_multipart_upload(self, Bucket, Key, UploadId, **kwargs):
"""放弃一个已经存在的分片上传任务,删除所有已经存在的分片.
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param UploadId(string): 分块上传创建的UploadId.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 分块上传
response = client.abort_multipart_upload(
Bucket='bucket',
Key='multipartfile.txt',
UploadId='uploadid'
)
"""
headers = mapped(kwargs)
params = {'uploadId': UploadId}
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("abort multipart upload, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
return None
def list_parts(self, Bucket, Key, UploadId, EncodingType='', MaxParts=1000, PartNumberMarker=0, **kwargs):
"""列出已上传的分片.
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param UploadId(string): 分块上传创建的UploadId.
:param EncodingType(string): 设置返回结果编码方式,只能设置为url.
:param MaxParts(int): 设置单次返回最大的分块数量,最大为1000.
:param PartNumberMarker(int): 设置返回的开始处,从PartNumberMarker下一个分块开始列出.
:param kwargs(dict): 设置请求headers.
:return(dict): 分块的相关信息,包括Etag和PartNumber等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 列出分块
response = client.list_parts(
Bucket='bucket',
Key='multipartfile.txt',
UploadId='uploadid'
)
"""
headers = mapped(kwargs)
decodeflag = True
params = {
'uploadId': UploadId,
'part-number-marker': PartNumberMarker,
'max-parts': MaxParts}
if EncodingType:
if EncodingType != 'url':
raise CosClientError('EncodingType must be url')
params['encoding-type'] = EncodingType
decodeflag = False
else:
params['encoding-type'] = 'url'
params = format_values(params)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("list multipart upload parts, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['Part'])
if decodeflag:
decode_result(data, ['Key'], [])
return data
def put_object_acl(self, Bucket, Key, AccessControlPolicy={}, **kwargs):
"""设置object ACL
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param AccessControlPolicy(dict): 设置object ACL规则.
:param kwargs(dict): 通过headers来设置ACL.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置 object ACL
response = client.put_object_acl(
Bucket='bucket',
Key='multipartfile.txt',
ACL='public-read',
GrantRead='id="qcs::cam::uin/123:uin/456",id="qcs::cam::uin/123:uin/123"'
)
"""
lst = [ # 类型为list的标签
'<Grant>',
'</Grant>']
xml_config = ""
if AccessControlPolicy:
xml_config = format_xml(data=AccessControlPolicy, root='AccessControlPolicy', lst=lst)
headers = mapped(kwargs)
params = {'acl': ''}
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("put object acl, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
return None
def get_object_acl(self, Bucket, Key, **kwargs):
"""获取object ACL
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param kwargs(dict): 设置请求headers.
:return(dict): Object对应的ACL信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取object ACL
response = client.get_object_acl(
Bucket='bucket',
Key='multipartfile.txt'
)
"""
headers = mapped(kwargs)
params = {'acl': ''}
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("get object acl, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content, "type", "Type")
if data['AccessControlList'] is not None and isinstance(data['AccessControlList']['Grant'], dict):
lst = []
lst.append(data['AccessControlList']['Grant'])
data['AccessControlList']['Grant'] = lst
return data
def restore_object(self, Bucket, Key, RestoreRequest={}, **kwargs):
"""取回沉降到CAS中的object到COS
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param RestoreRequest(dict): 取回object的属性设置
:param kwargs(dict): 设置请求headers.
:return: None.
"""
params = {'restore': ''}
headers = mapped(kwargs)
if 'versionId' in headers:
params['versionId'] = headers['versionId']
headers.pop('versionId')
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("restore_object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
xml_config = format_xml(data=RestoreRequest, root='RestoreRequest')
rt = self.send_request(
method='POST',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
return None
def select_object_content(self, Bucket, Key, Expression, ExpressionType, InputSerialization, OutputSerialization, RequestProgress=None, **kwargs):
"""从指定文对象中检索内容
:param Bucket(string): 存储桶名称.
:param Key(string): 检索的路径.
:param Expression(string): 查询语句
:param ExpressionType(string): 查询语句的类型
:param RequestProgress(dict): 查询进度设置
:param InputSerialization(dict): 输入格式设置
:param OutputSerialization(dict): 输出格式设置
:param kwargs(dict): 设置请求headers.
:return(dict): 检索内容.
"""
params = {'select': '', 'select-type': 2}
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("select object content, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
SelectRequest = {
'Expression': Expression,
'ExpressionType': ExpressionType,
'InputSerialization': InputSerialization,
'OutputSerialization': OutputSerialization
}
if RequestProgress is not None:
SelectRequest['RequestProgress'] = RequestProgress
xml_config = format_xml(data=SelectRequest, root='SelectRequest')
rt = self.send_request(
method='POST',
url=url,
stream=True,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, Key, params=params),
headers=headers,
params=params)
data = {'Payload': EventStream(rt)}
return data
# s3 bucket interface begin
def create_bucket(self, Bucket, **kwargs):
"""创建一个bucket
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 创建bucket
response = client.create_bucket(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket)
logger.info("create bucket, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf),
headers=headers)
return None
def delete_bucket(self, Bucket, **kwargs):
"""删除一个bucket,bucket必须为空
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket
response = client.delete_bucket(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf),
headers=headers)
return None
def list_objects(self, Bucket, Prefix="", Delimiter="", Marker="", MaxKeys=1000, EncodingType="", **kwargs):
"""获取文件列表
:param Bucket(string): 存储桶名称.
:param Prefix(string): 设置匹配文件的前缀.
:param Delimiter(string): 分隔符.
:param Marker(string): 从marker开始列出条目.
:param MaxKeys(int): 设置单次返回最大的数量,最大为1000.
:param EncodingType(string): 设置返回结果编码方式,只能设置为url.
:param kwargs(dict): 设置请求headers.
:return(dict): 文件的相关信息,包括Etag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 列出bucket
response = client.list_objects(
Bucket='bucket',
MaxKeys=100,
Prefix='中文',
Delimiter='/'
)
"""
decodeflag = True # 是否需要对结果进行decode
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket)
logger.info("list objects, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
params = {
'prefix': Prefix,
'delimiter': Delimiter,
'marker': Marker,
'max-keys': MaxKeys
}
if EncodingType:
if EncodingType != 'url':
raise CosClientError('EncodingType must be url')
decodeflag = False # 用户自己设置了EncodingType不需要去decode
params['encoding-type'] = EncodingType
else:
params['encoding-type'] = 'url'
params = format_values(params)
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
params=params,
headers=headers,
auth=CosS3Auth(self._conf, params=params))
data = xml_to_dict(rt.content)
format_dict(data, ['Contents', 'CommonPrefixes'])
if decodeflag:
decode_result(
data,
[
'Prefix',
'Marker',
'NextMarker'
],
[
['Contents', 'Key'],
['CommonPrefixes', 'Prefix']
]
)
return data
def list_objects_versions(self, Bucket, Prefix="", Delimiter="", KeyMarker="", VersionIdMarker="", MaxKeys=1000, EncodingType="", **kwargs):
"""获取文件列表
:param Bucket(string): 存储桶名称.
:param Prefix(string): 设置匹配文件的前缀.
:param Delimiter(string): 分隔符.
:param KeyMarker(string): 从KeyMarker指定的Key开始列出条目.
:param VersionIdMarker(string): 从VersionIdMarker指定的版本开始列出条目.
:param MaxKeys(int): 设置单次返回最大的数量,最大为1000.
:param EncodingType(string): 设置返回结果编码方式,只能设置为url.
:param kwargs(dict): 设置请求headers.
:return(dict): 文件的相关信息,包括Etag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 列出bucket带版本
response = client.list_objects_versions(
Bucket='bucket',
MaxKeys=100,
Prefix='中文',
Delimiter='/'
)
"""
headers = mapped(kwargs)
decodeflag = True
url = self._conf.uri(bucket=Bucket)
logger.info("list objects versions, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
params = {
'versions': '',
'prefix': Prefix,
'delimiter': Delimiter,
'key-marker': KeyMarker,
'version-id-marker': VersionIdMarker,
'max-keys': MaxKeys
}
if EncodingType:
if EncodingType != 'url':
raise CosClientError('EncodingType must be url')
decodeflag = False
params['encoding-type'] = EncodingType
else:
params['encoding-type'] = 'url'
params = format_values(params)
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
params=params,
headers=headers,
auth=CosS3Auth(self._conf, params=params))
data = xml_to_dict(rt.content)
format_dict(data, ['Version', 'DeleteMarker', 'CommonPrefixes'])
if decodeflag:
decode_result(
data,
[
'Prefix',
'KeyMarker',
'NextKeyMarker',
'VersionIdMarker',
'NextVersionIdMarker'
],
[
['Version', 'Key'],
['CommonPrefixes', 'Prefix'],
['DeleteMarker', 'Key']
]
)
return data
def list_multipart_uploads(self, Bucket, Prefix="", Delimiter="", KeyMarker="", UploadIdMarker="", MaxUploads=1000, EncodingType="", **kwargs):
"""获取Bucket中正在进行的分块上传
:param Bucket(string): 存储桶名称.
:param Prefix(string): 设置匹配文件的前缀.
:param Delimiter(string): 分隔符.
:param KeyMarker(string): 从KeyMarker指定的Key开始列出条目.
:param UploadIdMarker(string): 从UploadIdMarker指定的UploadID开始列出条目.
:param MaxUploads(int): 设置单次返回最大的数量,最大为1000.
:param EncodingType(string): 设置返回结果编码方式,只能设置为url.
:param kwargs(dict): 设置请求headers.
:return(dict): 文件的相关信息,包括Etag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 列出所有分块上传
response = client.list_multipart_uploads(
Bucket='bucket',
MaxUploads=100,
Prefix='中文',
Delimiter='/'
)
"""
headers = mapped(kwargs)
decodeflag = True
url = self._conf.uri(bucket=Bucket)
logger.info("get multipart uploads, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
params = {
'uploads': '',
'prefix': Prefix,
'delimiter': Delimiter,
'key-marker': KeyMarker,
'upload-id-marker': UploadIdMarker,
'max-uploads': MaxUploads
}
if EncodingType:
if EncodingType != 'url':
raise CosClientError('EncodingType must be url')
decodeflag = False
params['encoding-type'] = EncodingType
else:
params['encoding-type'] = 'url'
params = format_values(params)
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
params=params,
headers=headers,
auth=CosS3Auth(self._conf, params=params))
data = xml_to_dict(rt.content)
format_dict(data, ['Upload', 'CommonPrefixes'])
if decodeflag:
decode_result(
data,
[
'Prefix',
'KeyMarker',
'NextKeyMarker',
'UploadIdMarker',
'NextUploadIdMarker'
],
[
['Upload', 'Key'],
['CommonPrefixes', 'Prefix']
]
)
return data
def head_bucket(self, Bucket, **kwargs):
"""确认bucket是否存在
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 确认bucket是否存在
response = client.head_bucket(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
url = self._conf.uri(bucket=Bucket)
logger.info("head bucket, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='HEAD',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf),
headers=headers)
return None
def put_bucket_acl(self, Bucket, AccessControlPolicy={}, **kwargs):
"""设置bucket ACL
:param Bucket(string): 存储桶名称.
:param AccessControlPolicy(dict): 设置bucket ACL规则.
:param kwargs(dict): 通过headers来设置ACL.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置 object ACL
response = client.put_bucket_acl(
Bucket='bucket',
ACL='private',
GrantRead='id="qcs::cam::uin/123:uin/456",id="qcs::cam::uin/123:uin/123"'
)
"""
lst = [ # 类型为list的标签
'<Grant>',
'</Grant>']
xml_config = ""
if AccessControlPolicy:
xml_config = format_xml(data=AccessControlPolicy, root='AccessControlPolicy', lst=lst)
headers = mapped(kwargs)
params = {'acl': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket acl, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_acl(self, Bucket, **kwargs):
"""获取bucket ACL
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置headers.
:return(dict): Bucket对应的ACL信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置 object ACL
response = client.get_bucket_acl(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'acl': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket acl, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content, "type", "Type")
if data['AccessControlList'] is not None and not isinstance(data['AccessControlList']['Grant'], list):
lst = []
lst.append(data['AccessControlList']['Grant'])
data['AccessControlList']['Grant'] = lst
return data
def put_bucket_cors(self, Bucket, CORSConfiguration={}, **kwargs):
"""设置bucket CORS
:param Bucket(string): 存储桶名称.
:param CORSConfiguration(dict): 设置Bucket跨域规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket跨域配置
cors_config = {
'CORSRule': [
{
'ID': '1234',
'AllowedOrigin': ['http://www.qq.com'],
'AllowedMethod': ['GET', 'PUT'],
'AllowedHeader': ['x-cos-meta-test'],
'ExposeHeader': ['x-cos-meta-test1'],
'MaxAgeSeconds': 500
}
]
}
response = client.put_bucket_cors(
Bucket='bucket',
CORSConfiguration=cors_config
)
"""
lst = [ # 类型为list的标签
'<CORSRule>',
'<AllowedOrigin>',
'<AllowedMethod>',
'<AllowedHeader>',
'<ExposeHeader>',
'</CORSRule>',
'</AllowedOrigin>',
'</AllowedMethod>',
'</AllowedHeader>',
'</ExposeHeader>']
xml_config = format_xml(data=CORSConfiguration, root='CORSConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'cors': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket cors, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_cors(self, Bucket, **kwargs):
"""获取bucket CORS
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): 获取Bucket对应的跨域配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket跨域配置
response = client.get_bucket_cors(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'cors': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket cors, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
if 'CORSRule' in data and not isinstance(data['CORSRule'], list):
lst = []
lst.append(data['CORSRule'])
data['CORSRule'] = lst
if 'CORSRule' in data:
allow_lst = ['AllowedOrigin', 'AllowedMethod', 'AllowedHeader', 'ExposeHeader']
for rule in data['CORSRule']:
for text in allow_lst:
if text in rule and not isinstance(rule[text], list):
lst = []
lst.append(rule[text])
rule[text] = lst
return data
def delete_bucket_cors(self, Bucket, **kwargs):
"""删除bucket CORS
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket跨域配置
response = client.delete_bucket_cors(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'cors': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket cors, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_lifecycle(self, Bucket, LifecycleConfiguration={}, **kwargs):
"""设置bucket LifeCycle
:param Bucket(string): 存储桶名称.
:param LifecycleConfiguration(dict): 设置Bucket的生命周期规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket生命周期配置
lifecycle_config = {
'Rule': [
{
'Expiration': {'Date': get_date(2018, 4, 24)},
'ID': '123',
'Filter': {'Prefix': ''},
'Status': 'Enabled',
}
]
}
response = client.put_bucket_lifecycle(
Bucket='bucket',
LifecycleConfiguration=lifecycle_config
)
"""
# 类型为list的标签
lst = [
'<Rule>',
'<Tag>',
'<Transition>',
'<NoncurrentVersionTransition>',
'</NoncurrentVersionTransition>',
'</Transition>',
'</Tag>',
'</Rule>'
]
xml_config = format_xml(data=LifecycleConfiguration, root='LifecycleConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'lifecycle': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket lifecycle, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_lifecycle(self, Bucket, **kwargs):
"""获取bucket LifeCycle
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的生命周期配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket生命周期配置
response = client.get_bucket_lifecycle(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'lifecycle': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket lifecycle, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['Rule'])
if 'Rule' in data:
for rule in data['Rule']:
format_dict(rule, ['Transition', 'NoncurrentVersionTransition'])
if 'Filter' in rule:
format_dict(rule['Filter'], ['Tag'])
return data
def delete_bucket_lifecycle(self, Bucket, **kwargs):
"""删除bucket LifeCycle
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket生命周期配置
response = client.delete_bucket_lifecycle(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'lifecycle': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket lifecycle, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_versioning(self, Bucket, Status, **kwargs):
"""设置bucket版本控制
:param Bucket(string): 存储桶名称.
:param Status(string): 设置Bucket版本控制的状态,可选值为'Enabled'|'Suspended'.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 打开多版本配置
response = client.put_bucket_versioning(
Bucket='bucket',
Status='Enabled'
)
"""
headers = mapped(kwargs)
params = {'versioning': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket versioning, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
if Status != 'Enabled' and Status != 'Suspended':
raise CosClientError('versioning status must be set to Enabled or Suspended!')
config = dict()
config['Status'] = Status
xml_config = format_xml(data=config, root='VersioningConfiguration')
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_versioning(self, Bucket, **kwargs):
"""查询bucket版本控制
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): 获取Bucket版本控制的配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取多版本配置
response = client.get_bucket_versioning(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'versioning': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket versioning, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
return data
def get_bucket_location(self, Bucket, **kwargs):
"""查询bucket所属地域
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): 存储桶的地域信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket所在地域信息
response = client.get_bucket_location(
Bucket='bucket'
)
print (response['LocationConstraint'])
"""
headers = mapped(kwargs)
params = {'location': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket location, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
root = xml.etree.ElementTree.fromstring(rt.content)
data = dict()
data['LocationConstraint'] = root.text
return data
def put_bucket_replication(self, Bucket, ReplicationConfiguration={}, **kwargs):
"""设置bucket跨区域复制配置
:param Bucket(string): 存储桶名称.
:param ReplicationConfiguration(dict): 设置Bucket的跨区域复制规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket跨区域复制配置
replication_config = {
'Role': 'qcs::cam::uin/735905558:uin/735905558',
'Rule': [
{
'ID': '123',
'Status': 'Enabled',
'Prefix': 'replication',
'Destination': {
'Bucket': 'qcs:id/0:cos:cn-south:appid/1252448703:replicationsouth'
}
}
]
}
response = client.put_bucket_replication(
Bucket='bucket',
ReplicationConfiguration=replication_config
)
"""
lst = ['<Rule>', '</Rule>'] # 类型为list的标签
xml_config = format_xml(data=ReplicationConfiguration, root='ReplicationConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'replication': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket replication, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_replication(self, Bucket, **kwargs):
"""获取bucket 跨区域复制配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的跨区域复制配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket跨区域复制配置
response = client.get_bucket_replication(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'replication': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket replication, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['Rule'])
return data
def delete_bucket_replication(self, Bucket, **kwargs):
"""删除bucket 跨区域复制配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket跨区域复制配置
response = client.delete_bucket_replication(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'replication': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket replication, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_website(self, Bucket, WebsiteConfiguration={}, **kwargs):
"""设置bucket静态网站配置
:param Bucket(string): 存储桶名称.
:param ReplicationConfiguration(dict): 设置Bucket的静态网站规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket跨区域复制配置
website_config = {
'IndexDocument': {
'Suffix': 'string'
},
'ErrorDocument': {
'Key': 'string'
},
'RedirectAllRequestsTo': {
'HostName': 'string',
'Protocol': 'http'|'https'
},
'RoutingRules': [
{
'Condition': {
'HttpErrorCodeReturnedEquals': 'string',
'KeyPrefixEquals': 'string'
},
'Redirect': {
'HostName': 'string',
'HttpRedirectCode': 'string',
'Protocol': 'http'|'https',
'ReplaceKeyPrefixWith': 'string',
'ReplaceKeyWith': 'string'
}
}
]
}
response = client.put_bucket_website(
Bucket='bucket',
WebsiteConfiguration=website_config
)
"""
xml_config = format_xml(data=WebsiteConfiguration, root='WebsiteConfiguration', parent_child=True)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'website': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket website, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_website(self, Bucket, **kwargs):
"""获取bucket 静态网站配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的静态网站配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket静态网站配置
response = client.get_bucket_website(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'website': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket website, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
if 'RoutingRules' in data and not isinstance(data['RoutingRules']['RoutingRule'], list):
lst = []
lst.append(data['RoutingRules']['RoutingRule'])
data['RoutingRules']['RoutingRule'] = lst
if 'RoutingRules' in data:
data['RoutingRules'] = data['RoutingRules']['RoutingRule']
return data
def delete_bucket_website(self, Bucket, **kwargs):
"""删除bucket 静态网站配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket静态网站配置
response = client.delete_bucket_website(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'website': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket website, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_logging(self, Bucket, BucketLoggingStatus={}, **kwargs):
"""设置bucket logging
:param Bucket(string): 存储桶名称.
:param BucketLoggingStatus(dict): 设置Bucket的日志配置.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket logging服务
logging_bucket = 'logging-beijing-1250000000'
logging_config = {
'LoggingEnabled': {
'TargetBucket': logging_bucket,
'TargetPrefix': 'test'
}
}
response = logging_client.put_bucket_logging(
Bucket=logging_bucket,
BucketLoggingStatus=logging_config
)
"""
xml_config = format_xml(data=BucketLoggingStatus, root='BucketLoggingStatus')
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'logging': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket logging, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
logging_rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_logging(self, Bucket, **kwargs):
"""获取bucket logging
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的logging配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket logging服务配置
response = logging_client.get_bucket_logging(
Bucket=logging_bucket
)
"""
headers = mapped(kwargs)
params = {'logging': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket logging, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
return data
def put_bucket_policy(self, Bucket, Policy, **kwargs):
"""设置bucket policy
:param Bucket(string): 存储桶名称.
:param Policy(dict): 设置Bucket的Policy配置.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket policy服务
bucket = 'test-1252448703'
response = client.put_bucket_policy(
Bucket=bucket,
Policy=policy
)
"""
# Policy必须是一个json字符串(str)或者json对象(dict)
body = Policy
policy_type = type(body)
if policy_type != str and policy_type != dict:
raise CosClientError("Policy must be a json format string or json format dict")
if policy_type == dict:
body = json.dumps(body)
headers = mapped(kwargs)
headers['Content-Type'] = 'application/json'
params = {'policy': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket policy, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=body,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_policy(self, Bucket, **kwargs):
"""获取bucket policy
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的policy配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket policy服务配置
response = client.get_bucket_policy(
Bucket=bucket
)
"""
headers = mapped(kwargs)
params = {'policy': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket policy, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = {'Policy': json.dumps(rt.json())}
return data
def put_bucket_domain(self, Bucket, DomainConfiguration={}, **kwargs):
"""设置bucket的自定义域名
:param Bucket(string): 存储桶名称.
:param DomainConfiguration(dict): 设置Bucket的自定义域名规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket自定义域名配置
domain_config = {
'DomainRule': [
{
'Name': 'www.abc.com',
'Type': 'REST',
'Status': 'ENABLED',
'ForcedReplacement': 'CNAME'
},
]
}
response = client.put_bucket_domain(
Bucket='bucket',
DomainConfiguration=domain_config
)
"""
lst = ['<DomainRule>', '</DomainRule>'] # 类型为list的标签
xml_config = format_xml(data=DomainConfiguration, root='DomainConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'domain': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket domain, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_domain(self, Bucket, **kwargs):
"""获取bucket 自定义域名配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的自定义域名配置.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket自定义域名配置
response = client.get_bucket_domain(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'domain': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket domain, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['DomainRule'])
if 'x-cos-domain-txt-verification' in rt.headers:
data['x-cos-domain-txt-verification'] = rt.headers['x-cos-domain-txt-verification']
return data
def delete_bucket_domain(self, Bucket, **kwargs):
"""删除bucket 自定义域名配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除ucket自定义域名配置
response = client.delete_bucket_domain(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'domain': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket domain, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_origin(self, Bucket, OriginConfiguration={}, **kwargs):
"""设置bucket的回源规则
:param Bucket(string): 存储桶名称.
:param OriginConfiguration(dict): 设置Bucket的回源规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket回源规则
origin_config = {}
response = client.put_bucket_origin(
Bucket='bucket',
OriginConfiguration=origin_config
)
"""
lst = ['<OriginRule>', '</OriginRule>'] # 类型为list的标签
xml_config = format_xml(data=OriginConfiguration, root='OriginConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'origin': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket origin, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_origin(self, Bucket, **kwargs):
"""获取bucket 回源配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的回源规则.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket回源规则
response = client.get_bucket_origin(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'origin': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket origin, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['OriginRule'])
return data
def delete_bucket_origin(self, Bucket, **kwargs):
"""删除bucket 回源配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket回源规则
response = client.delete_bucket_origin(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'origin': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket origin, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_inventory(self, Bucket, Id, InventoryConfiguration={}, **kwargs):
"""设置bucket的清单规则
:param Bucket(string): 存储桶名称.
:param Id(string): 清单规则名称.
:param InventoryConfiguration(dict): Bucket的清单规则.
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket清单规则
inventory_config = {
'Destination': {
'COSBucketDestination': {
'AccountId': '100000000001',
'Bucket': 'qcs::cos:ap-guangzhou::examplebucket-1250000000',
'Format': 'CSV',
'Prefix': 'list1',
'Encryption': {
'SSECOS': {}
}
},
'IsEnabled': 'True',
'Filter': {
'Prefix': 'filterPrefix'
},
'IncludedObjectVersions':'All',
'OptionalFields': {
'Field': [
'Size',
'LastModifiedDate',
'ETag',
'StorageClass',
'IsMultipartUploaded',
'ReplicationStatus'
]
},
'Schedule': {
'Frequency': 'Daily'
}
}
response = client.put_bucket_inventory(
Bucket='bucket',
Id='list1',
InventoryConfiguration=inventory_config
)
"""
lst = ['<Field>', '</Field>'] # 类型为list的标签
InventoryConfiguration['Id'] = Id
xml_config = format_xml(data=InventoryConfiguration, root='InventoryConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'inventory': '', 'id': Id}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket inventory, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_inventory(self, Bucket, Id, **kwargs):
"""获取bucket清单规则
:param Bucket(string): 存储桶名称.
:param Id(string): 清单规则名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的清单规则.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket清单规则
response = client.get_bucket_inventory(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'inventory': '', 'id': Id}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket inventory, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data['OptionalFields'], ['Field'])
return data
def delete_bucket_inventory(self, Bucket, Id, **kwargs):
"""删除bucket 回源配置
:param Bucket(string): 存储桶名称.
:param Id(string): 清单规则名称.
:param kwargs(dict): 设置请求headers.
:return(dict): None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket清单规则
response = client.delete_bucket_origin(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'inventory': '', 'id': Id}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket inventory, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_tagging(self, Bucket, Tagging={}, **kwargs):
"""设置bucket的标签
:param Bucket(string): 存储桶名称.
:param Tagging(dict): Bucket的标签集合
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket标签
tagging_set = {
'TagSet': {
'Tag': [
{
'Key': 'string',
'Value': 'string'
}
]
}
}
response = client.put_bucket_tagging(
Bucket='bucket',
Tagging=tagging_set
)
"""
lst = ['<Tag>', '</Tag>'] # 类型为list的标签
xml_config = format_xml(data=Tagging, root='Tagging', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'tagging': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket tagging, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_tagging(self, Bucket, **kwargs):
"""获取bucket标签
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的标签.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket标签
response = client.get_bucket_tagging(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'tagging': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket tagging, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
if 'TagSet' in data:
format_dict(data['TagSet'], ['Tag'])
return data
def delete_bucket_tagging(self, Bucket, **kwargs):
"""删除bucket 回源配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 删除bucket标签
response = client.delete_bucket_tagging(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'tagging': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket tagging, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_bucket_referer(self, Bucket, RefererConfiguration={}, **kwargs):
"""设置bucket的防盗链规则
:param Bucket(string): 存储桶名称.
:param RefererConfiguration(dict): Bucket的防盗链规则
:param kwargs(dict): 设置请求headers.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 设置bucket标签
referer_config = {
'Status': 'Enabled',
'RefererType': 'White-List',
'EmptyReferConfiguration': 'Allow',
'DomainList': {
'Domain': [
'*.qq.com',
'*.qcloud.com'
]
}
}
response = client.put_bucket_referer(
Bucket='bucket',
RefererConfiguration=referer_config
)
"""
lst = ['<Domain>', '</Domain>'] # 类型为list的标签
xml_config = format_xml(data=RefererConfiguration, root='RefererConfiguration', lst=lst)
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'referer': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket referer, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def get_bucket_referer(self, Bucket, **kwargs):
"""获取bucket防盗链规则
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): Bucket对应的防盗链规则.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket标签
response = client.get_bucket_referer(
Bucket='bucket'
)
"""
headers = mapped(kwargs)
params = {'referer': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket referer, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
if 'DomainList' in data:
format_dict(data['DomainList'], ['Domain'])
return data
def delete_bucket_referer(self, Bucket, **kwargs):
"""删除bucket防盗链规则
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求headers.
:return(dict): None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取bucket标签
response = client.delete_bucket_referer(
Bucket='bucket'
)
"""
xml_config = ''
headers = mapped(kwargs)
headers['Content-MD5'] = get_md5(xml_config)
headers['Content-Type'] = 'application/xml'
params = {'referer': ''}
url = self._conf.uri(bucket=Bucket)
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
data=xml_config,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
# service interface begin
def list_buckets(self, **kwargs):
"""列出所有bucket
:return(dict): 账号下bucket相关信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取账户下所有存储桶信息
response = client.list_buckets()
"""
headers = mapped(kwargs)
url = '{scheme}://service.cos.myqcloud.com/'.format(scheme=self._conf._scheme)
if self._conf._service_domain is not None:
url = '{scheme}://{domain}/'.format(scheme=self._conf._scheme, domain=self._conf._service_domain)
rt = self.send_request(
method='GET',
url=url,
bucket=None,
headers=headers,
auth=CosS3Auth(self._conf),
)
data = xml_to_dict(rt.content)
if data['Buckets'] is not None and not isinstance(data['Buckets']['Bucket'], list):
lst = []
lst.append(data['Buckets']['Bucket'])
data['Buckets']['Bucket'] = lst
return data
# Advanced interface
def _upload_part(self, bucket, key, local_path, offset, size, part_num, uploadid, md5_lst, resumable_flag, already_exist_parts, enable_md5, traffic_limit):
"""从本地文件中读取分块, 上传单个分块,将结果记录在md5——list中
:param bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:param local_path(string): 本地文件路径名.
:param offset(int): 读取本地文件的分块偏移量.
:param size(int): 读取本地文件的分块大小.
:param part_num(int): 上传分块的序号.
:param uploadid(string): 分块上传的uploadid.
:param md5_lst(list): 保存上传成功分块的MD5和序号.
:param resumable_flag(bool): 是否为断点续传.
:param already_exist_parts(dict): 断点续传情况下,保存已经上传的块的序号和Etag.
:param enable_md5(bool): 是否开启md5校验.
:return: None.
"""
# 如果是断点续传且该分块已经上传了则不用实际上传
if resumable_flag and part_num in already_exist_parts:
md5_lst.append({'PartNumber': part_num, 'ETag': already_exist_parts[part_num]})
else:
with open(local_path, 'rb') as fp:
fp.seek(offset, 0)
data = fp.read(size)
rt = self.upload_part(bucket, key, data, part_num, uploadid, enable_md5, TrafficLimit=traffic_limit)
md5_lst.append({'PartNumber': part_num, 'ETag': rt['ETag']})
return None
def _get_resumable_uploadid(self, bucket, key):
"""从服务端获取未完成的分块上传任务,获取断点续传的uploadid
:param bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:return(string): 断点续传的uploadid,如果不存在则返回None.
"""
if key and key[0] == '/':
key = key[1:]
multipart_response = self.list_multipart_uploads(
Bucket=bucket,
Prefix=key
)
if 'Upload' in multipart_response:
# 取最后一个(最新的)uploadid
index = len(multipart_response['Upload']) - 1
while index >= 0:
if multipart_response['Upload'][index]['Key'] == key:
return multipart_response['Upload'][index]['UploadId']
index -= 1
return None
def _check_single_upload_part(self, local_path, offset, local_part_size, remote_part_size, remote_etag):
"""从本地文件中读取分块, 校验本地分块和服务端的分块信息
:param local_path(string): 本地文件路径名.
:param offset(int): 读取本地文件的分块偏移量.
:param local_part_size(int): 读取本地文件的分块大小.
:param remote_part_size(int): 服务端的文件的分块大小.
:param remote_etag(string): 服务端的文件Etag.
:return(bool): 本地单个分块的信息是否和服务端的分块信息一致
"""
if local_part_size != remote_part_size:
return False
with open(local_path, 'rb') as fp:
fp.seek(offset, 0)
local_etag = get_raw_md5(fp.read(local_part_size))
if local_etag == remote_etag:
return True
return False
def _check_all_upload_parts(self, bucket, key, uploadid, local_path, parts_num, part_size, last_size, already_exist_parts):
"""获取所有已经上传的分块的信息,和本地的文件进行对比
:param bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:param uploadid(string): 分块上传的uploadid
:param local_path(string): 本地文件的大小
:param parts_num(int): 本地文件的分块数
:param part_size(int): 本地文件的分块大小
:param last_size(int): 本地文件的最后一块分块大小
:param already_exist_parts(dict): 保存已经上传的分块的part_num和Etag
:return(bool): 本地文件是否通过校验,True为可以进行断点续传,False为不能进行断点续传
"""
parts_info = []
part_number_marker = 0
list_over_status = False
while list_over_status is False:
response = self.list_parts(
Bucket=bucket,
Key=key,
UploadId=uploadid,
PartNumberMarker=part_number_marker
)
# 已经存在的分块上传,有可能一个分块都没有上传,判断一下
if 'Part' in response:
parts_info.extend(response['Part'])
if response['IsTruncated'] == 'false':
list_over_status = True
else:
part_number_marker = int(response['NextPartNumberMarker'])
for part in parts_info:
part_num = int(part['PartNumber'])
# 如果分块数量大于本地计算出的最大数量,校验失败
if part_num > parts_num:
return False
offset = (part_num - 1) * part_size
local_part_size = part_size
if part_num == parts_num:
local_part_size = last_size
# 有任何一块没有通过校验,则校验失败
if not self._check_single_upload_part(local_path, offset, local_part_size, int(part['Size']), part['ETag']):
return False
already_exist_parts[part_num] = part['ETag']
return True
def download_file(self, Bucket, Key, DestFilePath, PartSize=20, MAXThread=5, EnableCRC=False, **Kwargs):
"""小于等于20MB的文件简单下载,大于20MB的文件使用续传下载
:param Bucket(string): 存储桶名称.
:param key(string): COS文件的路径名.
:param DestFilePath(string): 下载文件的目的路径.
:param PartSize(int): 分块下载的大小设置,单位为MB.
:param MAXThread(int): 并发下载的最大线程数.
:param EnableCRC(bool): 校验下载文件与源文件是否一致
:param kwargs(dict): 设置请求headers.
"""
logger.debug("Start to download file, bucket: {0}, key: {1}, dest_filename: {2}, part_size: {3}MB,\
max_thread: {4}".format(Bucket, Key, DestFilePath, PartSize, MAXThread))
object_info = self.head_object(Bucket, Key)
file_size = int(object_info['Content-Length'])
if file_size <= 1024*1024*20:
response = self.get_object(Bucket, Key, **Kwargs)
response['Body'].get_stream_to_file(DestFilePath)
return
downloader = ResumableDownLoader(self, Bucket, Key, DestFilePath, object_info, PartSize, MAXThread, EnableCRC, **Kwargs)
downloader.start()
def upload_file(self, Bucket, Key, LocalFilePath, PartSize=1, MAXThread=5, EnableMD5=False, **kwargs):
"""小于等于20MB的文件简单上传,大于20MB的文件使用分块上传
:param Bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:param LocalFilePath(string): 本地文件路径名.
:param PartSize(int): 分块的大小设置,单位为MB.
:param MAXThread(int): 并发上传的最大线程数.
:param EnableMD5(bool): 是否打开MD5校验.
:param kwargs(dict): 设置请求headers.
:return(dict): 成功上传文件的元信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 根据文件大小自动选择分块大小,多线程并发上传提高上传速度
file_name = 'thread_1GB_test'
response = client.upload_file(
Bucket='bucket',
Key=file_name,
LocalFilePath=file_name,
PartSize=10,
MAXThread=10,
)
"""
file_size = os.path.getsize(LocalFilePath)
if file_size <= 1024*1024*20:
with open(LocalFilePath, 'rb') as fp:
rt = self.put_object(Bucket=Bucket, Key=Key, Body=fp, EnableMD5=EnableMD5, **kwargs)
return rt
else:
part_size = 1024*1024*PartSize # 默认按照1MB分块,最大支持10G的文件,超过10G的分块数固定为10000
last_size = 0 # 最后一块可以小于1MB
parts_num = file_size // part_size
last_size = file_size % part_size
if last_size != 0:
parts_num += 1
else: # 如果刚好整除,最后一块的大小等于分块大小
last_size = part_size
if parts_num > 10000:
parts_num = 10000
part_size = file_size // parts_num
last_size = file_size % parts_num
last_size += part_size
# 创建分块上传
# 判断是否可以断点续传
resumable_flag = False
already_exist_parts = {}
uploadid = self._get_resumable_uploadid(Bucket, Key)
if uploadid is not None:
logger.info("fetch an existed uploadid in remote cos, uploadid={uploadid}".format(uploadid=uploadid))
# 校验服务端返回的每个块的信息是否和本地的每个块的信息相同,只有校验通过的情况下才可以进行断点续传
resumable_flag = self._check_all_upload_parts(Bucket, Key, uploadid, LocalFilePath, parts_num, part_size, last_size, already_exist_parts)
# 如果不能断点续传,则创建一个新的分块上传
if not resumable_flag:
rt = self.create_multipart_upload(Bucket=Bucket, Key=Key, **kwargs)
uploadid = rt['UploadId']
logger.info("create a new uploadid in upload_file, uploadid={uploadid}".format(uploadid=uploadid))
# 上传分块
# 增加限速功能
traffic_limit = None
if 'TrafficLimit' in kwargs:
traffic_limit = kwargs['TrafficLimit']
offset = 0 # 记录文件偏移量
lst = list() # 记录分块信息
pool = SimpleThreadPool(MAXThread)
for i in range(1, parts_num+1):
if i == parts_num: # 最后一块
pool.add_task(self._upload_part, Bucket, Key, LocalFilePath, offset, file_size-offset, i, uploadid, lst, resumable_flag, already_exist_parts, EnableMD5, traffic_limit)
else:
pool.add_task(self._upload_part, Bucket, Key, LocalFilePath, offset, part_size, i, uploadid, lst, resumable_flag, already_exist_parts, EnableMD5, traffic_limit)
offset += part_size
pool.wait_completion()
result = pool.get_result()
if not result['success_all'] or len(lst) != parts_num:
raise CosClientError('some upload_part fail after max_retry, please upload_file again')
lst = sorted(lst, key=lambda x: x['PartNumber']) # 按PartNumber升序排列
# 完成分块上传
rt = self.complete_multipart_upload(Bucket=Bucket, Key=Key, UploadId=uploadid, MultipartUpload={'Part': lst})
return rt
def _inner_head_object(self, CopySource):
"""查询源文件的长度"""
bucket, path, endpoint, versionid = get_copy_source_info(CopySource)
params = {}
if versionid != '':
params['versionId'] = versionid
url = u"{scheme}://{bucket}.{endpoint}/{path}".format(scheme=self._conf._scheme, bucket=bucket, endpoint=endpoint, path=path)
rt = self.send_request(
method='HEAD',
url=url,
bucket=bucket,
auth=CosS3Auth(self._conf, path, params=params),
headers={},
params=params)
storage_class = 'standard'
if 'x-cos-storage-class' in rt.headers:
storage_class = rt.headers['x-cos-storage-class'].lower()
return int(rt.headers['Content-Length']), storage_class
def _upload_part_copy(self, bucket, key, part_number, upload_id, copy_source, copy_source_range, md5_lst):
"""拷贝指定文件至分块上传,记录结果到lst中去
:param bucket(string): 存储桶名称.
:param key(string): 上传COS路径.
:param part_number(int): 上传分块的编号.
:param upload_id(string): 分块上传创建的UploadId.
:param copy_source(dict): 拷贝源,包含Appid,Bucket,Region,Key.
:param copy_source_range(string): 拷贝源的字节范围,bytes=first-last。
:param md5_lst(list): 保存上传成功分块的MD5和序号.
:return: None.
"""
rt = self.upload_part_copy(bucket, key, part_number, upload_id, copy_source, copy_source_range)
md5_lst.append({'PartNumber': part_number, 'ETag': rt['ETag']})
return None
def _check_same_region(self, dst_endpoint, CopySource):
src_endpoint = get_copy_source_info(CopySource)[2]
if src_endpoint == dst_endpoint:
return True
return False
def copy(self, Bucket, Key, CopySource, CopyStatus='Copy', PartSize=10, MAXThread=5, **kwargs):
"""文件拷贝,小于5G的文件调用copy_object,大于等于5G的文件调用分块上传的upload_part_copy
:param Bucket(string): 存储桶名称.
:param Key(string): 上传COS路径.
:param CopySource(dict): 拷贝源,包含Appid,Bucket,Region,Key.
:param CopyStatus(string): 拷贝状态,可选值'Copy'|'Replaced'.
:param PartSize(int): 分块的大小设置.
:param MAXThread(int): 并发上传的最大线程数.
:param kwargs(dict): 设置请求headers.
:return(dict): 拷贝成功的结果.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 根据拷贝源文件的大小自动选择拷贝策略
copy_source = {'Bucket': 'testcopt-1252468703', 'Key': '/thread_1MB', 'Region': 'ap-guangzhou'}
response = client.copy(
Bucket='test',
Key='copy_10G.txt',
CopySource=copy_source,
MAXThread=10
)
"""
# 先查询下拷贝源object的content-length
file_size, src_storage_class = self._inner_head_object(CopySource)
dst_storage_class = 'standard'
if 'StorageClass' in kwargs:
dst_storage_class = kwargs['StorageClass'].lower()
# 同园区且不改存储类型的情况下直接走copy_object
if self._check_same_region(self._conf._endpoint, CopySource) and src_storage_class == dst_storage_class:
response = self.copy_object(Bucket=Bucket, Key=Key, CopySource=CopySource, CopyStatus=CopyStatus, **kwargs)
return response
# 如果源文件大小小于5G,则直接调用copy_object接口
if file_size < SINGLE_UPLOAD_LENGTH:
response = self.copy_object(Bucket=Bucket, Key=Key, CopySource=CopySource, CopyStatus=CopyStatus, **kwargs)
return response
# 如果源文件大小大于等于5G,则先创建分块上传,在调用upload_part
part_size = 1024*1024*PartSize # 默认按照10MB分块
last_size = 0 # 最后一块可以小于1MB
parts_num = file_size // part_size
last_size = file_size % part_size
if last_size != 0:
parts_num += 1
if parts_num > 10000:
parts_num = 10000
part_size = file_size // parts_num
last_size = file_size % parts_num
last_size += part_size
# 创建分块上传
rt = self.create_multipart_upload(Bucket=Bucket, Key=Key, **kwargs)
uploadid = rt['UploadId']
# 上传分块拷贝
offset = 0 # 记录文件偏移量
lst = list() # 记录分块信息
pool = SimpleThreadPool(MAXThread)
for i in range(1, parts_num+1):
if i == parts_num: # 最后一块
copy_range = gen_copy_source_range(offset, file_size-1)
pool.add_task(self._upload_part_copy, Bucket, Key, i, uploadid, CopySource, copy_range, lst)
else:
copy_range = gen_copy_source_range(offset, offset+part_size-1)
pool.add_task(self._upload_part_copy, Bucket, Key, i, uploadid, CopySource, copy_range, lst)
offset += part_size
pool.wait_completion()
result = pool.get_result()
if not result['success_all']:
raise CosClientError('some upload_part_copy fail after max_retry')
lst = sorted(lst, key=lambda x: x['PartNumber']) # 按PartNumber升序排列
# 完成分片上传
try:
rt = self.complete_multipart_upload(Bucket=Bucket, Key=Key, UploadId=uploadid, MultipartUpload={'Part': lst})
except Exception as e:
abort_response = self.abort_multipart_upload(Bucket=Bucket, Key=Key, UploadId=uploadid)
raise e
return rt
def _upload_part_from_buffer(self, bucket, key, data, part_num, uploadid, md5_lst):
"""从内存中读取分块, 上传单个分块,将结果记录在md5——list中
:param bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:param data(string): 数据块.
:param part_num(int): 上传分块的序号.
:param uploadid(string): 分块上传的uploadid.
:param md5_lst(list): 保存上传成功分块的MD5和序号.
:return: None.
"""
rt = self.upload_part(bucket, key, data, part_num, uploadid)
md5_lst.append({'PartNumber': part_num, 'ETag': rt['ETag']})
return None
def upload_file_from_buffer(self, Bucket, Key, Body, MaxBufferSize=100, PartSize=10, MAXThread=5, **kwargs):
"""小于分块大小的的文件简单上传,大于等于分块大小的文件使用分块上传
:param Bucket(string): 存储桶名称.
:param key(string): 分块上传路径名.
:param Body(fp): 文件流,必须实现了read方法.
:param MaxBufferSize(int): 缓存文件的大小,单位为MB,MaxBufferSize/PartSize决定线程池中最大等待调度的任务数量
:param PartSize(int): 分块的大小设置,单位为MB
:param MAXThread(int): 并发上传的最大线程数.
:param kwargs(dict): 设置请求headers.
:return(dict): 成功上传的文件的结果.
"""
if not hasattr(Body, 'read'):
raise CosClientError("Body must has attr read")
part_size = 1024*1024*PartSize
# 先读一个块,如果直接EOF了就调用简单文件上传
part_num = 1
data = Body.read(part_size)
if len(data) < part_size:
rt = self.put_object(Bucket=Bucket, Key=Key, Body=data, **kwargs)
return rt
# 创建分块上传
rt = self.create_multipart_upload(Bucket=Bucket, Key=Key, **kwargs)
uploadid = rt['UploadId']
lst = list() # 记录分块信息
MAXQueue = MaxBufferSize//PartSize
if MAXQueue == 0:
MAXQueue = 1
pool = SimpleThreadPool(MAXThread, MAXQueue)
while True:
if not data:
break
pool.add_task(self._upload_part_from_buffer, Bucket, Key, data, part_num, uploadid, lst)
part_num += 1
data = Body.read(part_size)
pool.wait_completion()
result = pool.get_result()
if not result['success_all']:
raise CosClientError('some upload_part fail after max_retry')
lst = sorted(lst, key=lambda x: x['PartNumber']) # 按PartNumber升序排列
# 完成分片上传
try:
rt = self.complete_multipart_upload(Bucket=Bucket, Key=Key, UploadId=uploadid, MultipartUpload={'Part': lst})
except Exception as e:
abort_response = self.abort_multipart_upload(Bucket=Bucket, Key=Key, UploadId=uploadid)
raise e
return rt
def append_object(self, Bucket, Key, Position, Data, **kwargs):
"""文件块追加接口
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param Position(int): 追加内容的起始位置.
:param Data(string): 追加的内容
:kwargs(dict): 设置上传的headers.
:return(dict): 上传成功返回的结果,包含ETag等信息.
"""
check_object_content_length(Data)
headers = mapped(kwargs)
params = {'append': '', 'position': Position}
url = self._conf.uri(bucket=Bucket, path=Key)
logger.info("append object, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='POST',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, Key, params=params),
data=Data,
headers=headers,
params=params)
response = dict(**rt.headers)
return response
def put_object_from_local_file(self, Bucket, LocalFilePath, Key, EnableMD5=False, **kwargs):
"""本地文件上传接口,适用于小文件,最大不得超过5GB
:param Bucket(string): 存储桶名称.
:param LocalFilePath(string): 上传文件的本地路径.
:param Key(string): COS路径.
:param EnableMD5(bool): 是否需要SDK计算Content-MD5,打开此开关会增加上传耗时.
:kwargs(dict): 设置上传的headers.
:return(dict): 上传成功返回的结果,包含ETag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
response = client.put_object_from_local_file(
Bucket='bucket',
LocalFilePath='local.txt',
Key='test.txt'
)
print (response['ETag'])
"""
with open(LocalFilePath, 'rb') as fp:
return self.put_object(Bucket, fp, Key, EnableMD5, **kwargs)
def object_exists(self, Bucket, Key):
"""判断一个文件是否存在
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:return(bool): 文件是否存在,返回True为存在,返回False为不存在
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
status = client.object_exists(
Bucket='bucket',
Key='test.txt'
)
"""
try:
self.head_object(Bucket, Key)
return True
except CosServiceError as e:
if e.get_status_code() == 404:
return False
else:
raise e
def bucket_exists(self, Bucket):
"""判断一个存储桶是否存在
:param Bucket(string): 存储桶名称.
:return(bool): 存储桶是否存在,返回True为存在,返回False为不存在.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
status = client.bucket_exists(
Bucket='bucket'
)
"""
try:
self.head_bucket(Bucket)
return True
except CosServiceError as e:
if e.get_status_code() == 404:
return False
else:
raise e
def change_object_storage_class(self, Bucket, Key, StorageClass):
"""改变文件的存储类型
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:param StorageClass(bool): 是否需要SDK计算Content-MD5,打开此开关会增加上传耗时.
:kwargs(dict): 设置上传的headers.
:return(dict): 上传成功返回的结果,包含ETag等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
response = client.change_object_storage_class(
Bucket='bucket',
Key='test.txt',
StorageClass='STANDARD'
)
"""
copy_source = {
'Bucket': Bucket,
'Key': Key,
'Endpoint': self._conf._endpoint,
'Appid': self._conf._appid
}
response = self.copy_object(
Bucket=Bucket,
Key=Key,
CopySource=copy_source,
CopyStatus='Replaced',
StorageClass=StorageClass
)
return response
def update_object_meta(self, Bucket, Key, **kwargs):
"""改变文件的存储类型
:param Bucket(string): 存储桶名称.
:param Key(string): COS路径.
:kwargs(dict): 设置文件的元属性.
:return: None.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 上传本地文件到cos
response = client.update_object_meta(
Bucket='bucket',
Key='test.txt',
ContentType='text/html'
)
"""
copy_source = {
'Bucket': Bucket,
'Key': Key,
'Endpoint': self._conf._endpoint,
'Appid': self._conf._appid
}
response = self.copy_object(
Bucket=Bucket,
Key=Key,
CopySource=copy_source,
CopyStatus='Replaced',
**kwargs
)
return response
def put_bucket_encryption(self, Bucket, ServerSideEncryptionConfiguration={}, **kwargs):
"""设置执行存储桶下的默认加密配置
:param Bucket(string): 存储桶名称.
:param ServerSideEncryptionConfiguration(dict): 设置Bucket的加密规则
:param kwargs(dict): 设置请求的headers.
:return: None.
"""
# 类型为list的标签
lst = [
'<Rule>',
'</Rule>'
]
xml_config = format_xml(data=ServerSideEncryptionConfiguration, root='ServerSideEncryptionConfiguration', lst=lst)
headers = mapped(kwargs)
params = {'encryption': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("put bucket encryption, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='PUT',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
data=xml_config,
headers=headers,
params=params)
return None
def get_bucket_encryption(self, Bucket, **kwargs):
"""获取存储桶下的默认加密配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求的headers.
:return(dict): 返回bucket的加密规则.
"""
headers = mapped(kwargs)
params = {'encryption': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("get bucket encryption, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='GET',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
data = xml_to_dict(rt.content)
format_dict(data, ['Rule'])
return data
def delete_bucket_encryption(self, Bucket, **kwargs):
"""用于删除指定存储桶下的默认加密配置
:param Bucket(string): 存储桶名称.
:param kwargs(dict): 设置请求的headers.
:return: None.
"""
headers = mapped(kwargs)
params = {'encryption': ''}
url = self._conf.uri(bucket=Bucket)
logger.info("delete bucket encryption, url=:{url} ,headers=:{headers}".format(
url=url,
headers=headers))
rt = self.send_request(
method='DELETE',
url=url,
bucket=Bucket,
auth=CosS3Auth(self._conf, params=params),
headers=headers,
params=params)
return None
def put_async_fetch_task(self, Bucket, FetchTaskConfiguration={}, **kwargs):
"""发起异步拉取对象到COS的任务
:param Bucket(string): 存储桶名称.
:param FetchTaskConfiguration(dict): 异步拉取任务的配置.
:kwargs(dict): 扩展参数.
:return(dict): 异步任务成功返回的结果,包含Taskid等信息.
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 发起异步拉取任务
response = client.put_async_fetch_task(
Bucket='bucket',
FetchTaskConfiguration={
'Url':
'Key':
'MD5':
'SuccessCallbackUrl':
'FailureCallbackUrl':
}
)
"""
url = '{scheme}://{region}.migration.myqcloud.com/{bucket}/'.format(scheme=self._conf._scheme, region=self._conf._region, bucket=Bucket)
if self._conf._domain is not None:
url = '{scheme}://{domain}/{bucket}/'.format(scheme=self._conf._scheme, domain=self._conf._domain, bucket=Bucket)
headers = {'Content-Type': 'application/json'}
signed_key = Bucket + '/'
rt = self.send_request(
method='POST',
url=url,
bucket=None,
data=json.dumps(FetchTaskConfiguration),
headers=headers,
auth=CosS3Auth(self._conf, signed_key),
cos_request=False
)
data = rt.json()
return data
def get_async_fetch_task(self, Bucket, TaskId, **kwargs):
"""获取异步拉取对象到COS的任务状态
:param Bucket(string): 存储桶名称.
:param TaskId(string): 异步拉取任务查询的唯一标识.
:kwargs(dict): 扩展参数.
:return(dict): 异步任务的状态
.. code-block:: python
config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token) # 获取配置对象
client = CosS3Client(config)
# 获取异步拉取任务
response = client.get_async_fetch_task(
Bucket='bucket',
TaskId='string'
)
"""
url = '{scheme}://{region}.migration.myqcloud.com/{bucket}/{task_id}'.format(scheme=self._conf._scheme, region=self._conf._region, bucket=Bucket, task_id=TaskId)
if self._conf._domain is not None:
url = '{scheme}://{domain}/{bucket}/{task_id}'.format(scheme=self._conf._scheme, domain=self._conf._domain, bucket=Bucket, task_id=TaskId)
headers = {'Content-Type': 'application/json'}
signed_key = '{bucket}/{task_id}'.format(bucket=Bucket, task_id=TaskId)
rt = self.send_request(
method='GET',
url=url,
bucket=None,
headers=headers,
auth=CosS3Auth(self._conf, signed_key),
cos_request=False
)
data = rt.json()
return data
if __name__ == "__main__":
pass
|
the-stack_106_24489 | #!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import types
from pypeline.common.utilities import \
fragment, \
split_before, \
Immutable, \
TotallyOrdered
from pypeline.common.fileutils import open_ro
from pypeline.common.formats._common import FormatError
class FASTAError(FormatError):
pass
class FASTA(TotallyOrdered, Immutable):
def __init__(self, name, meta, sequence):
if not (name and isinstance(name, types.StringTypes)):
raise FASTAError("FASTA name must be a non-empty string")
elif not (isinstance(meta, types.StringTypes) or (meta is None)):
raise FASTAError("FASTA meta-information must be a string, or None")
elif not isinstance(sequence, types.StringTypes):
raise FASTAError("FASTA sequence must be a string")
Immutable.__init__(self,
name = name,
meta = meta,
sequence = sequence)
def write(self, fileobj = sys.stdout):
"""Prints a FASTA sequence (iterable), wrapping long sequences at 60 chars."""
fileobj.write(repr(self))
@classmethod
def from_lines(cls, lines):
"""Parses FASTA sequences found in a sequence of lines, and returns
a tuple for each FASTA record: ((name, meta-information), sequence)
No assumptions are made about the line-lengths."""
lines = (line.rstrip() for line in lines)
for record in split_before(lines, lambda v: v.startswith(">")):
name = record[0]
if (not name.startswith(">")) or (len(name) == 1):
raise FASTAError("Unnamed FASTA record")
elif len(record) == 1:
raise FASTAError("FASTA record does not contain sequence: " + name[1:])
# Split out any meta information
name_and_meta = name[1:].split(None, 1)
if len(name_and_meta) < 2:
name_and_meta.append(None)
name, meta = name_and_meta
yield FASTA(name = name,
meta = meta,
sequence = "".join(record[1:]))
@classmethod
def from_file(cls, filename):
"""Reads an unindexed FASTA file, returning a sequence of
tuples containing the name and sequence of each entry in
the file. The FASTA file may be GZIP/BZ2 compressed."""
fasta_file = open_ro(filename)
try:
for record in FASTA.from_lines(fasta_file):
yield record
finally:
fasta_file.close()
def __lt__(self, other):
if not isinstance(other, FASTA):
return NotImplemented
return (self.name, self.meta, self.sequence) \
< (other.name, other.meta, other.sequence)
def __hash__(self):
return hash((self.name, self.meta, self.sequence))
def __repr__(self):
"""Process a printable FASTA sequence, wrapping long sequences at 60 chars."""
name = self.name
if self.meta:
name = "%s %s" % (name, self.meta)
return ">%s\n%s\n" % (name, "\n".join(fragment(60, self.sequence)))
|
the-stack_106_24490 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.dns.base import DNSDriver, Zone, Record
from libcloud.dns.types import RecordType
from libcloud.dns.types import ZoneDoesNotExistError, ZoneAlreadyExistsError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.types import RecordAlreadyExistsError
class DummyDNSDriver(DNSDriver):
"""
Dummy DNS driver.
>>> from libcloud.dns.drivers.dummy import DummyDNSDriver
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.name
'Dummy DNS Provider'
"""
name = 'Dummy DNS Provider'
website = 'http://example.com'
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
self._zones = {}
def list_record_types(self):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.list_record_types()
['A']
@inherits: :class:`DNSDriver.list_record_types`
"""
return [RecordType.A]
def list_zones(self):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.list_zones()
[]
@inherits: :class:`DNSDriver.list_zones`
"""
return [zone['zone'] for zone in list(self._zones.values())]
def list_records(self, zone):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> list(zone.list_records())
[]
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> list(zone.list_records()) #doctest: +ELLIPSIS
[<Record: zone=id-apache.org, name=libcloud, type=A...>]
"""
return self._zones[zone.id]['records'].values()
def get_zone(self, zone_id):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.get_zone(zone_id='foobar')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.get_zone`
"""
if zone_id not in self._zones:
raise ZoneDoesNotExistError(driver=self, value=None,
zone_id=zone_id)
return self._zones[zone_id]['zone']
def get_record(self, zone_id, record_id):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> driver.get_record(zone_id='doesnotexist', record_id='exists')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.get_record`
"""
self.get_zone(zone_id=zone_id)
zone_records = self._zones[zone_id]['records']
if record_id not in zone_records:
raise RecordDoesNotExistError(record_id=record_id, value=None,
driver=self)
return zone_records[record_id]
def create_zone(self, domain, type='master', ttl=None, extra=None):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> zone
<Zone: domain=apache.org, ttl=100, provider=Dummy DNS Provider ...>
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneAlreadyExistsError:
@inherits: :class:`DNSDriver.create_zone`
"""
id = 'id-%s' % (domain)
if id in self._zones:
raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self)
zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={},
driver=self)
self._zones[id] = {'zone': zone,
'records': {}}
return zone
def create_record(self, name, zone, type, data, extra=None):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> record #doctest: +ELLIPSIS
<Record: zone=id-apache.org, name=libcloud, type=A, data=127.0.0.1...>
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
... #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
RecordAlreadyExistsError:
@inherits: :class:`DNSDriver.create_record`
"""
id = 'id-%s' % (name)
zone = self.get_zone(zone_id=zone.id)
if id in self._zones[zone.id]['records']:
raise RecordAlreadyExistsError(record_id=id, value=None,
driver=self)
record = Record(id=id, name=name, type=type, data=data, extra=extra,
zone=zone, driver=self)
self._zones[zone.id]['records'][id] = record
return record
def delete_zone(self, zone):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> driver.delete_zone(zone)
True
>>> driver.delete_zone(zone) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
ZoneDoesNotExistError:
@inherits: :class:`DNSDriver.delete_zone`
"""
self.get_zone(zone_id=zone.id)
del self._zones[zone.id]
return True
def delete_record(self, record):
"""
>>> driver = DummyDNSDriver('key', 'secret')
>>> zone = driver.create_zone(domain='apache.org', type='master',
... ttl=100)
>>> record = driver.create_record(name='libcloud', zone=zone,
... type=RecordType.A, data='127.0.0.1')
>>> driver.delete_record(record)
True
>>> driver.delete_record(record) #doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
RecordDoesNotExistError:
@inherits: :class:`DNSDriver.delete_record`
"""
self.get_record(zone_id=record.zone.id, record_id=record.id)
del self._zones[record.zone.id]['records'][record.id]
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
|
the-stack_106_24493 | import cv2
import os
import imutils
import numpy as np
class FindItConfig(object):
cv_method = cv2.TM_CCORR_NORMED
def load_from_path(pic_path):
""" load grey picture (with cv2) from path """
raw_img = cv2.imread(pic_path)
raw_img = raw_img.astype(np.uint8)
grey_img = cv2.cvtColor(raw_img, cv2.COLOR_BGR2GRAY)
return grey_img
class FindIt(object):
def __init__(self):
# config
self.config = FindItConfig()
# template pic dict,
# { pic_name: pic_cv_object }
self.template = dict()
def load_template(self, pic_path=None, pic_object=None):
""" load template picture """
assert (pic_path is not None) or (pic_object is not None), 'need path or cv object'
if pic_object is not None:
# pic_object: ('pic_name', cv_object)
pic_name = pic_object[0]
pic_data = pic_object[1]
self.template[pic_name] = pic_data
return
abs_path = os.path.abspath(pic_path)
self.template[abs_path] = load_from_path(abs_path)
def find(self, target_pic_path=None, target_cv_object=None, scale=None):
""" start matching """
assert self.template, 'template is empty'
assert self.config.cv_method not in (cv2.TM_SQDIFF_NORMED, cv2.TM_SQDIFF), \
'TM_SQDIFF & TM_SQDIFF_NORMED not supported'
assert (target_pic_path is not None) or (target_cv_object is not None), 'need path or cv object'
# load target
if target_cv_object is not None:
target_pic = target_cv_object
else:
target_pic = load_from_path(target_pic_path)
result = list()
for each_template_path, each_template in self.template.items():
# default scale
if not scale:
scale = (1, 3, 10)
min_val, max_val, min_loc, max_loc = self.compare(target_pic, each_template, scale)
min_loc, max_loc = map(lambda i: self.fix_location(each_template, i), [min_loc, max_loc])
# add to result list
result.append({
'path': each_template_path,
'min_val': min_val,
'max_val': max_val,
'min_loc': min_loc,
'max_loc': max_loc,
})
self.reset()
return self.build_result(target_pic_path, result)
@staticmethod
def fix_location(pic_object, location):
""" location from cv2 should be left-top location, and need to fix it and make it central """
size_x, size_y = pic_object.shape
old_x, old_y = location
return old_x + size_x / 2, old_y + size_y / 2
def compare(self, pic, template_pic, scale):
"""
match template between picture and template
(https://www.pyimagesearch.com/2015/01/26/multi-scale-template-matching-using-python-opencv/)
:param pic:
:param template_pic:
:param scale: default to (1, 3, 10)
:return:
"""
pic_height, pic_width = pic.shape[:2]
result_list = list()
for each_scale in np.linspace(*scale):
# resize template
resized_pic = imutils.resize(template_pic, width=int(template_pic.shape[1] * each_scale))
# if template's size is larger than raw picture, break
if resized_pic.shape[0] > pic_height or resized_pic.shape[1] > pic_width:
break
res = cv2.matchTemplate(pic, resized_pic, self.config.cv_method)
result_list.append(cv2.minMaxLoc(res))
# return the max one
return sorted(result_list, key=lambda i: i[1])[-1]
def build_result(self, target_path, result):
""" build final result dict """
return {
'target_path': target_path,
'config': self.config.__dict__,
'data': result,
}
def reset(self):
""" reset template, target and result """
self.template = dict()
|
the-stack_106_24494 | """Copyright (c) 2020 Chengjie Wu"""
import time
import numpy as np
class GibbsLDA:
def __init__(self, n_components=3, doc_topic_prior=None,
topic_word_prior=None, iterations=1000, verbose=True):
"""Latent Dirichlet Allocation with Gibbs sampling
:param n_components: int, number of topics
:param doc_topic_prior: float, alpha. If None, 1/n_components*0.5 is set
by default.
:param topic_word_prior: float, beta. If None, 1/n_components*0.1 is set
by default.
:param iterations: int, iterations of Gibbs sampling
:param verbose: bool, whether to print intermediate results
"""
self.loaded = False
self.verbose = verbose
self.n_components = n_components
self.iterations = iterations
self.doc_topic_prior = \
doc_topic_prior if doc_topic_prior else 1/n_components*0.5
self.topic_word_prior = \
topic_word_prior if topic_word_prior else 1/n_components*0.1
self.num_docs = None
self.num_words = None
self.dict_word2ind = None
self.list_ind2word = None
self.corpus = None
self.alpha = None
self.beta = None
self.beta_sum = None
self.n_mk = None
self.n_kt = None
self.n_k = None
self.n_m = None
self.z_mn = None # current z sampling state
self.theta = None # best theta
self.phi = None # best phi
self.z_best = None # best z
self.ll_best = None # best ll
self.log = None # record log likelihood during training
def load_state(self, X, file):
"""Load LDA state from file.
:param X: a list of strings, corpus. Each string represents a document,
and words are separated with space.
:param file: string, file to load.
:return: None.
"""
self._initializing_corpus(X, file)
self.loaded = True
def save_state(self, file):
"""Save LDA state to file.
Note that we do not save the corpus. When the LDA is loaded next time,
it MUST read in exactly the same corpus. Behaviors are undefined if not.
:param file: string, file to save.
:return: None.
"""
np.savez(file, z_mn=self.z_mn, theta=self.theta, phi=self.phi,
z_best=self.z_best, ll_best=self.ll_best, log=self.log)
def fit(self, X, y=None):
""" Train LDA.
:param X: a list of strings, corpus. Each string represents a document,
and words are separated with space. If state is loaded, either
through load_state or a previous call to fit, X is ignored.
:param y: ignored.
:return: None.
"""
if not self.loaded:
self._initializing_corpus(X)
self.loaded = True
if self.verbose:
print("Before training:", self.log_likelihood())
# NOTE: we keep the best theta & phi
for it in range(self.iterations):
start_time = time.time()
self._gibbs_sampling_iteration()
theta = self.calculate_theta(
self.num_docs, self.n_components, self.alpha, self.n_mk)
phi = self.calculate_phi(
self.n_components, self.num_words, self.beta, self.n_kt)
ll = self.log_likelihood(theta, phi)
self.log.append(ll)
if self.verbose:
print("log likelihood:", self.log_likelihood(theta, phi))
if self.iterations < 30 or it >= 30:
# we keep 30 iterations of Gibbs sampling as burning in
if ll > self.ll_best:
self.ll_best = ll
self.theta = np.copy(theta)
self.phi = np.copy(phi)
self.z_best = np.copy(self.z_mn)
end_time = time.time()
if self.verbose:
print("Iteration", it, ", best log-likelihood:", self.ll_best,
", time consumed:", end_time - start_time)
def _initializing_corpus(self, X, z_file=None):
self.num_docs = len(X)
self.dict_word2ind = dict()
self.list_ind2word = list()
self.corpus = list()
max_len = 0
for line in X:
words = line.strip().split(" ")
max_len = max(max_len, len(words))
document = list()
for word in words:
if word not in self.dict_word2ind:
self.dict_word2ind[word] = len(self.list_ind2word)
self.list_ind2word.append(word)
document.append(self.dict_word2ind[word])
self.corpus.append(document)
assert len(self.corpus) == self.num_docs
for word, ind in self.dict_word2ind.items():
assert self.list_ind2word[ind] == word
self.num_words = len(self.list_ind2word)
if self.verbose:
print("Number of documents:", self.num_docs)
print("Number of words:", self.num_words)
# get alpha and beta
self.alpha = np.full(shape=(self.n_components,),
fill_value=self.doc_topic_prior, dtype=np.float32)
self.beta = np.full(shape=(self.num_words,),
fill_value=self.topic_word_prior, dtype=np.float32)
self.beta_sum = np.sum(self.beta)
self.n_mk = np.zeros(shape=(self.num_docs, self.n_components), dtype=np.int32)
self.n_kt = np.zeros(shape=(self.n_components, self.num_words), dtype=np.int32)
self.n_k = np.zeros(shape=(self.n_components,), dtype=np.int32)
self.n_m = np.zeros(shape=(self.num_docs,), dtype=np.int32)
if z_file:
db = np.load(z_file)
self.z_mn = np.array(db["z_mn"])
self.theta = db["theta"]
self.phi = db["phi"]
self.z_best = db["z_best"]
self.ll_best = db["ll_best"]
self.log = db["log"]
# initialization
for m, dm in enumerate(self.corpus):
for n, w_mn in enumerate(dm):
k = self.z_mn[m, n]
self.n_mk[m, k] += 1
self.n_m[m] += 1
self.n_kt[k, w_mn] += 1
self.n_k[k] += 1
else:
# initialization
self.z_mn = np.zeros(shape=(self.num_docs, max_len), dtype=np.int32)
for m, dm in enumerate(self.corpus):
for n, w_mn in enumerate(dm):
k = np.random.randint(self.n_components)
self.z_mn[m, n] = k
self.n_mk[m, k] += 1
self.n_m[m] += 1
self.n_kt[k, w_mn] += 1
self.n_k[k] += 1
self.theta = self.calculate_theta(
self.num_docs, self.n_components, self.alpha, self.n_mk)
self.phi = self.calculate_phi(
self.n_components, self.num_words, self.beta, self.n_kt)
self.z_best = np.copy(self.z_mn)
self.ll_best = self.log_likelihood()
self.log = list()
def _gibbs_sampling_iteration(self):
""" One Gibbs sampling step.
:return: None.
"""
for m, dm in enumerate(self.corpus):
for n, w_mn in enumerate(dm):
k = self.z_mn[m, n]
self.n_mk[m, k] -= 1
self.n_m[m] -= 1
self.n_kt[k, w_mn] -= 1
self.n_k[k] -= 1
k = self._conditional_z(
self.n_components, self.alpha, self.beta,
self.n_mk, self.n_kt, m, w_mn, self.beta_sum, self.n_k)
self.z_mn[m, n] = k
self.n_mk[m, k] += 1
self.n_m[m] += 1
self.n_kt[k, w_mn] += 1
self.n_k[k] += 1
@staticmethod
def _conditional_z(K, alpha, beta, n_mk, n_kt, m, t, beta_sum, n_k):
"""Sample new z_mt using conditional distribution."""
probability = \
(alpha + n_mk[m, :]) * ((beta[t] + n_kt[:, t]) / (beta_sum + n_k))
probability /= np.sum(probability)
assert(np.all(probability >= 0.))
assert(1.0 - 1e-6 < np.sum(probability) < 1.0 + 1e-6)
assert(len(probability) == K)
return np.random.choice(K, p=probability)
def calculate_theta(self, M=None, K=None, alpha=None, n_mk=None):
""" Calculate theta.
If parameters are not given, then corresponding parameters are picked
from current LDA.
:param M: int, number of documents.
:param K: int, number of topics.
:param alpha: numpy array of shape [K, ], alpha.
:param n_mk: numpy array of shape [M, K], n_mk.
:return: numpy array of shape [M, K], theta.
"""
M = M if M is not None else self.num_docs
K = K if K is not None else self.n_components
alpha = alpha if alpha is not None else self.alpha
n_mk = n_mk if n_mk is not None else self.n_mk
theta = n_mk + np.tile(alpha, (M, 1))
theta /= np.repeat(np.sum(theta, axis=1).reshape((-1, 1)), K, axis=1)
return theta
def calculate_phi(self, K=None, V=None, beta=None, n_kt=None):
""" Calculate phi.
If parameters are not given, then corresponding parameters are picked
from current LDA.
:param K: int, number of topics.
:param V: int, number of words (size of vocabulary).
:param beta: numpy array of shape [V, ], beta.
:param n_kt: numpy array of shape [K, V], n_kt.
:return: numpy array of shape [K, V], phi.
"""
K = K if K is not None else self.n_components
V = V if V is not None else self.num_words
beta = beta if beta is not None else self.beta
n_kt = n_kt if n_kt is not None else self.n_kt
phi = n_kt + np.tile(beta, (K, 1))
phi /= np.repeat(np.sum(phi, axis=1).reshape((-1, 1)), V, axis=1)
return phi
def log_likelihood(self, theta=None, phi=None):
""" Calculate log likelihood with respect to this LDA's corpus.
If parameters are not given, then corresponding parameters are picked
from current LDA.
:param theta: numpy array of shape [M, K], theta.
:param phi: numpy array of shape [K, V], phi.
:return: float, log likelihood.
"""
theta = theta if theta is not None else self.theta
phi = phi if phi is not None else self.phi
ret = 0.
for m, dm in enumerate(self.corpus):
for n, w_mn in enumerate(dm):
tp = 0.
for k in range(self.n_components):
tp += theta[m, k] * phi[k, w_mn]
ret += np.log(tp)
return ret
def get_representative_words(self, phi=None):
""" Get top 10 representative words in each topic.
If parameters are not given, then corresponding parameters are picked
from current LDA.
:param phi: numpy array of shape [K, V], phi.
:return: None.
"""
phi = phi if phi is not None else self.phi
for i in range(self.n_components):
print("Topic", i)
c = np.argsort(self.phi[i, :])
for j in c[-1:-11:-1]:
print(self.list_ind2word[j], phi[i, j])
|
the-stack_106_24496 | # Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Note: This module lives separate from other related ones as it has custom SDC hook logic.
import logging
import os
from uuid import uuid4
import pytest
from streamsets.testframework.markers import cluster
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
@pytest.fixture(scope='module')
def sdc_common_hook():
def hook(data_collector):
data_collector.set_user('admin')
data_collector.sdc_properties['stage.conf_hadoop.always.impersonate.current.user'] = 'true'
return hook
@pytest.fixture(autouse=True)
def impersonation_check(sdc_executor):
if sdc_executor.sdc_configuration.get('stage.conf_hadoop.always.impersonate.current.user') != 'true':
pytest.skip('Hadoop FS impersonation requires stage.conf_hadoop.'
'always.impersonate.current.user to be set to true')
@cluster('cdh', 'hdp')
def test_hadoop_fs_strict_impersonation(sdc_builder, sdc_executor, cluster):
""" Test strict impersonation (SDC-3704) of Hadoop FS target. The pipeline would look like:
dev_data_generator >> hadoop_fs
"""
hdfs_path = os.path.join(os.sep, "tmp", str(uuid4()))
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'text', 'precision': 10, 'scale': 2, 'type': 'STRING'}]
hadoop_fs = pipeline_builder.add_stage('Hadoop FS', type='destination')
hadoop_fs.set_attributes(data_format='TEXT', directory_template=hdfs_path, files_prefix='sdc-${sdc:id()}')
dev_data_generator >> hadoop_fs
pipeline = pipeline_builder.build(title='Hadoop FS impersonation pipeline').configure_for_environment(cluster)
# Some ENVs like HDP will auto-set this field which is against logic of this test and hence we have to manually
# reset that field back to empty value.
hadoop_fs.hdfs_user = ''
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True)
sdc_executor.stop_pipeline(pipeline)
# Validate that the files were created with proper user name.
hdfs_fs_files = cluster.hdfs.client.list(hdfs_path)
assert len(hdfs_fs_files) == 1
status = cluster.hdfs.client.status(f'{hdfs_path}/{hdfs_fs_files[0]}')
assert status['owner'] == 'admin'
finally:
cluster.hdfs.client.delete(hdfs_path, recursive=True)
|
the-stack_106_24497 | import sqlalchemy as db
import os
engine = db.create_engine('postgresql://user1:user1@localhost/mydb')
for root, directories, filenames in os.walk('/home/user1/pg1/bin'):
for directory in directories:
print(os.path.join(root, directory))
for filename in filenames:
print(os.path.join(root,filename))
metadata = db.MetaData()
# census = db.Table('census', metadata, autoload=True, autoload_with=engine)
files = db.Table('files', metadata, autoload=True, autoload_with=engine )
print(files.columns.keys)
user = Table('user', metadata,
Column('user_id', Integer, primary_key=True),
Column('user_name', String(16), nullable=False),
Column('email_address', String(60)),
Column('nickname', String(50), nullable=False)
)
|
the-stack_106_24498 | #!/usr/bin/env python3
from glob import glob
import json
import os
import sys
import requests
from socket import gethostname
import hashlib
import re
# Authentication for user filing issue
USE_GITHUB = True
try:
USERNAME = os.environ['GITHUB_USER']
except KeyError:
print('WARN: Environent variable GITHUB_USER unset -- cannot write to github.')
USE_GITHUB = False
try:
# Use a developer token if you have 2FA
PASSWORD = os.environ['GITHUB_PASSWORD']
except KeyError:
print('WARN: Environent variable GITHUB_PASSWORD unset -- cannot write to github.')
USE_GITHUB = False
# The repository to add this issue to
REPO_OWNER = 'CodaProtocol'
REPO_NAME = 'coda'
""" Mask out actual line numbers and collumns, generate a signature based on stripped data """
def error_sig(string):
output = ''
for line in string.splitlines(True):
if 'Called' in line or 'Raised' in line:
line = re.sub("line (\d+), characters (\d+)-(\d+)",
"line HIDDEN, characters HIDDEN", line)
output += line
sig = hashlib.md5(output.encode('utf-8')).hexdigest()
return(sig)
def make_github_issue(title, body=None, labels=None):
'''Create an issue on github.com using the given parameters.'''
# Our url to create issues via POST
url = 'https://api.github.com/repos/%s/%s/issues' % (REPO_OWNER, REPO_NAME)
# Create an authenticated session to create the issue
session = requests.Session()
session.auth = (USERNAME, PASSWORD)
# Create our issue
issue = {'title': title,
'body': body,
'labels': labels}
# Add the issue to our repository
r = session.post(url, json.dumps(issue))
if r.status_code == 201:
print ('Successfully created Issue {0:s}'.format(title))
data = r.json()
print ('URL: %s' % data['html_url'])
else:
print ('Could not create Issue {0:s}'.format(title))
print ('Response:', r.content)
def yes_or_no(question):
while "the answer is invalid":
try:
reply = str(input(question+' (y/n)[default: n]: ')).lower().strip()
except KeyboardInterrupt:
print('\nExiting')
sys.exit(1)
if len(reply) < 1:
return False
elif reply[0] == 'y':
return True
elif reply[0] == 'n':
return False
if __name__ == "__main__":
crashdirs = glob('test-coda-CRASH-*/mina.log')
seen_exns = []
for crashdir in crashdirs:
with open(crashdir, encoding="ISO-8859-1") as fp:
for count, line in enumerate(fp):
if 'Fatal' in line:
data = json.loads(line)
try:
exn_1000 = "".join(
data['metadata']['exn'].splitlines())[:1000]
exn = exn_1000[:130] + '...'
except KeyError:
exn = 'Unknown'
if exn in seen_exns:
# Duplicate
continue
else:
seen_exns.append(exn)
print('-'*80)
print(crashdir)
print('New: %s' % exn)
body = 'Details:\n\n'
body += 'Hash: %s\n' % error_sig(exn)
body += 'Crash Timestamp: %s\n' % data['timestamp']
body += 'Host: `%s`\n\n' % gethostname()
body += 'Partial trace:\n'
body += '```\n'
body += exn_1000.replace(' ', '\n')
body += '...'
body += '\n```'
print(body)
if sys.stdin.isatty():
# running interactively
if yes_or_no('Create new issue?'):
# FIXME - how to attach gz to issue.
title = 'TESTING - CRASH - TESTNET - %s' % exn.strip()
if USE_GITHUB:
make_github_issue(title=title,
body=body,
labels=['testnet', 'robot'])
else:
print('Running non-interactively.')
|
the-stack_106_24500 | import pygame
from sys import exit
from random import randint, choice
class Player(pygame.sprite.Sprite): # inherates from pygame.sprite.Sprite
def __init__(self):
super().__init__() # inherate sprite class inside itself. #Needs 2 attributes at minimum
player_walk1 = pygame.image.load('graphics/Player/player_walk_1.png').convert_alpha()
player_walk2 = pygame.image.load('graphics/Player/player_walk_2.png').convert_alpha()
self.player_walk = [player_walk1, player_walk2]
self.player_index = 0
self.player_jump = pygame.image.load('graphics/Player/jump.png').convert_alpha()
self.image = self.player_walk[self.player_index] # ALWAYS NEEDED
self.rect = self.image.get_rect(midbottom=(80, 300)) # ALWAYS NEEDED
self.gravity = 0 # player.gravity
#Sound
self.jump_sound = pygame.mixer.Sound('audio/audio_jump.mp3')
self.jump_sound.set_volume(0.2)
# Player Jump
def player_input(self): # player
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE] and self.rect.bottom >= 300: # player.rect.bottom
self.gravity = -20 # player.gravity
self.jump_sound.play()
# Gavity
def apply_gravity(self): # player
self.gravity += 1 # player.gravity
self.rect.y += self.gravity # player.rect.y / player.gravity
if self.rect.bottom >= 300: # player.rect.bottom
self.rect.bottom = 300 # player.rect.bottom
# Animation
def animation_state(self):
# Play walking animation if on floor, jump when not on floor
if self.rect.bottom < 300:
self.image = self.player_jump
else:
self.player_index += 0.1 # Increas walk picture change slowly
# If the index gets to 1, reset index to 0
if self.player_index >= len(self.player_walk): self.player_index = 0
self.image = self.player_walk[int(self.player_index)]
# Update
def update(self): # player
self.player_input()
self.apply_gravity()
self.animation_state()
class Obstacle(pygame.sprite.Sprite): # inherates from pygame.sprite.Sprite
def __init__(self, type): # Type of obsticle. Fly or Snail
super().__init__()
if type == 'fly':
fly_1 = pygame.image.load('graphics/fly/Fly1.png').convert_alpha()
fly_2 = pygame.image.load('graphics/fly/Fly2.png').convert_alpha()
self.frames = [fly_1, fly_2] # fly_frames
y_pos = 210 # y_pos of fly
else:
snail_1 = pygame.image.load('graphics/snail/snail1.png').convert_alpha()
snail_2 = pygame.image.load('graphics/snail/snail2.png').convert_alpha()
self.frames = [snail_1, snail_2] # snail_frames
y_pos = 300 # Ground possition
self.animation_index = 0
self.image = self.frames[self.animation_index] # ALWAYS NEEDED
self.rect = self.image.get_rect(midbottom=(randint(900, 1100), y_pos)) # ALWAYS NEEDED
# Animation State
def animation_state(self):
self.animation_index += 0.1
if self.animation_index >= len(self.frames): self.animation_index = 0
self.image = self.frames[int(self.animation_index)]
# Update
def update(self):
self.animation_state() # Call animation_state def
self.rect.x -= 6 # Subtract 6px every update
self.destroy()
# Remove extra
def destroy(self):
if self.rect.x <= -100: # If the X axis is greater than -100
self.kill() # Destroys Obstacle sprite
def display_score():
current_time = int(pygame.time.get_ticks() / 1000) - start_time # in milliseconds
score_surf = test_font.render(f'Score: {current_time}', False, (64, 64, 64))
score_rect = score_surf.get_rect(center=(400, 50))
screen.blit(score_surf, score_rect)
return current_time
def collision_sprite(): # sprite, group, bool
if pygame.sprite.spritecollide(player.sprite, obstacle_group, False):
obstacle_group.empty()
return False
else: return True
""" Frames Per Second Math: 1 frame/second > (10px/s * 1fps) = 10px/s """
# In Pygame, the Origin point is the Top Left at 0,0
# To go right from the top left, increase X, to go down, increase Y
pygame.init() # Initialize everything
screen = pygame.display.set_mode((800, 400)) # ((width,height))
pygame.display.set_caption("Runner") # Window Title Bar
clock = pygame.time.Clock() # Capital 'C' in Clock
test_font = pygame.font.Font('font/Pixeltype.ttf', 50) # Font type, Font size
game_active = False
start_time = 0
score = 0
bg_Music = pygame.mixer.Sound('audio/music.wav')
bg_Music.set_volume(0.3)
bg_Music.play(loops = -1)
# Groups
player = pygame.sprite.GroupSingle() # Group Player class into Single instance
player.add(Player())
obstacle_group = pygame.sprite.Group()
""" Surfaces: Display Surface (game window), Regular Surface (any images) """
sky_surf = pygame.image.load('graphics/Sky.png').convert_alpha() # converts image for pygame to handle better
ground_surf = pygame.image.load('graphics/ground.png').convert_alpha()
# score_surf = test_font.render("My Game", False, (64,64,64)).convert_alpha() #text ino, AA, color
# Intro Screen
player_stand = pygame.image.load('graphics/Player/player_stand.png').convert_alpha()
# player_stand = pygame.transform.scale(player_stand,(200,200)) #Scale Player
# player_stand = pygame.transform.scale2x(player_stand) #Scale Player
player_stand = pygame.transform.rotozoom(player_stand, 0, 2)
player_stand_rect = player_stand.get_rect(center=(400, 200))
# Game Name
game_name = test_font.render('Pixel Runner', False, (111, 196, 169))
game_name_rect = game_name.get_rect(center=(400, 80))
game_message = test_font.render('Press SPACE to start', False, (111, 196, 169))
game_message_rect = game_message.get_rect(center=(410, 330))
# Event Timer
# Add +1 to every event to avoid errors with events
obstacle_timer = pygame.USEREVENT + 1
pygame.time.set_timer(obstacle_timer,1500)
####################################################################################################
""" Game While-Loop """
while True: # draw all elements
for event in pygame.event.get(): # Check for a specific event
if event.type == pygame.QUIT: # If someone has clicked the upper X to close the window
pygame.quit() # Close the window, unintialize everything
exit() # from sys.exit, stops the graphic error message
if game_active:
if event.type == obstacle_timer:
# calling Obstacle class and type of obstacle
obstacle_group.add(Obstacle(choice(['fly','snail','snail','snail'])))
else:
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
game_active = True
start_time = int(pygame.time.get_ticks() / 1000)
if game_active: # If game_active is True, run the code below
# blit draws variable on surface at stated coordinates
screen.blit(sky_surf, (0, 0))
screen.blit(ground_surf, (0, 300)) # Over 0, Down 300
score = display_score()
player.draw(screen) # Specify 1 argument, what surface to draw on
player.update() # update sprite
obstacle_group.draw(screen)
obstacle_group.update() # update sprite
game_active= collision_sprite() # Collision
else:
screen.fill((94, 129, 162))
screen.blit(player_stand, player_stand_rect)
score_message = test_font.render(f'Your Score: {score}', False, (111, 196, 169))
score_message_rect = score_message.get_rect(center=(410, 330))
screen.blit(game_name, game_name_rect)
# If score is 0, display start message
if score == 0: screen.blit(game_message, game_message_rect)
# If score not 0, display score
else: screen.blit(score_message, score_message_rect)
pygame.display.update() # update everything
clock.tick(60) # Tells the game not to run faster than 60 FPS |
the-stack_106_24501 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', 'src')))
# -- Project information -----------------------------------------------------
project = '{{cookiecutter.project_name}}'
copyright = 'Equinor'
author = '{{cookiecutter.project_name}}'
# The full version, including alpha/beta/rc tags
release = 'version'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.napoleon',
'm2r'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Extensions that recomonmark will use
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# Auto generate API documentation for modules
autosummary_generate = True
# Default flags used by autodoc directives
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'special-members': '__init__',
}
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'sphinx_rtd_theme'
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# alabaster theme options: https://alabaster.readthedocs.io/en/latest/customization.html
html_theme_options = {
"description": "{{cookiecutter.project_description}}",
"extra_nav_links": {
"Index": "genindex.html",
"Module Index": "py-modindex.html",
"Search Page": "search.html"
},
"github_banner": False,
"note_bg": "#FFF59C",
"show_powered_by": False,
"show_related": False,
"sidebar_collapse": False,
}
# Custom sidebar templates (often theme specific), maps document names to template names.
# alabaster options: https://alabaster.readthedocs.io/en/latest/customization.html
html_sidebars = {
"index": [
"about.html",
"navigation.html",
"searchbox.html"
],
"**": [
"about.html",
'navigation.html',
"searchbox.html"
],
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [
# "_static"
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = False
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
|
the-stack_106_24502 | import pytest
from starlette.applications import Starlette
from starlette.middleware import Middleware
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.responses import PlainTextResponse, StreamingResponse
from starlette.routing import Mount, Route, WebSocketRoute
class CustomMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers["Custom-Header"] = "Example"
return response
def homepage(request):
return PlainTextResponse("Homepage")
def exc(request):
raise Exception("Exc")
def exc_stream(request):
return StreamingResponse(_generate_faulty_stream())
def _generate_faulty_stream():
yield b"Ok"
raise Exception("Faulty Stream")
class NoResponse:
def __init__(self, scope, receive, send):
pass
def __await__(self):
return self.dispatch().__await__()
async def dispatch(self):
pass
async def websocket_endpoint(session):
await session.accept()
await session.send_text("Hello, world!")
await session.close()
app = Starlette(
routes=[
Route("/", endpoint=homepage),
Route("/exc", endpoint=exc),
Route("/exc-stream", endpoint=exc_stream),
Route("/no-response", endpoint=NoResponse),
WebSocketRoute("/ws", endpoint=websocket_endpoint),
],
middleware=[Middleware(CustomMiddleware)],
)
def test_custom_middleware(test_client_factory):
client = test_client_factory(app)
response = client.get("/")
assert response.headers["Custom-Header"] == "Example"
with pytest.raises(Exception) as ctx:
response = client.get("/exc")
assert str(ctx.value) == "Exc"
with pytest.raises(Exception) as ctx:
response = client.get("/exc-stream")
assert str(ctx.value) == "Faulty Stream"
with pytest.raises(RuntimeError):
response = client.get("/no-response")
with client.websocket_connect("/ws") as session:
text = session.receive_text()
assert text == "Hello, world!"
def test_state_data_across_multiple_middlewares(test_client_factory):
expected_value1 = "foo"
expected_value2 = "bar"
class aMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
request.state.foo = expected_value1
response = await call_next(request)
return response
class bMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
request.state.bar = expected_value2
response = await call_next(request)
response.headers["X-State-Foo"] = request.state.foo
return response
class cMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
response = await call_next(request)
response.headers["X-State-Bar"] = request.state.bar
return response
def homepage(request):
return PlainTextResponse("OK")
app = Starlette(
routes=[Route("/", homepage)],
middleware=[
Middleware(aMiddleware),
Middleware(bMiddleware),
Middleware(cMiddleware),
],
)
client = test_client_factory(app)
response = client.get("/")
assert response.text == "OK"
assert response.headers["X-State-Foo"] == expected_value1
assert response.headers["X-State-Bar"] == expected_value2
def test_app_middleware_argument(test_client_factory):
def homepage(request):
return PlainTextResponse("Homepage")
app = Starlette(
routes=[Route("/", homepage)], middleware=[Middleware(CustomMiddleware)]
)
client = test_client_factory(app)
response = client.get("/")
assert response.headers["Custom-Header"] == "Example"
def test_middleware_repr():
middleware = Middleware(CustomMiddleware)
assert repr(middleware) == "Middleware(CustomMiddleware)"
def test_fully_evaluated_response(test_client_factory):
# Test for https://github.com/encode/starlette/issues/1022
class CustomMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
await call_next(request)
return PlainTextResponse("Custom")
app = Starlette(middleware=[Middleware(CustomMiddleware)])
client = test_client_factory(app)
response = client.get("/does_not_exist")
assert response.text == "Custom"
def test_exception_on_mounted_apps(test_client_factory):
sub_app = Starlette(routes=[Route("/", exc)])
app = Starlette(routes=[Mount("/sub", app=sub_app)])
client = test_client_factory(app)
with pytest.raises(Exception) as ctx:
client.get("/sub/")
assert str(ctx.value) == "Exc"
|
the-stack_106_24504 | # -*- coding: utf-8 -*-
# author:lyh
# datetime:2020/7/31 20:21
"""
169. 多数元素
给定一个大小为 n 的数组,找到其中的多数元素。多数元素是指在数组中出现次数大于 ⌊ n/2 ⌋ 的元素。
你可以假设数组是非空的,并且给定的数组总是存在多数元素。
示例 1:
输入: [3,2,3]
输出: 3
示例 2:
输入: [2,2,1,1,1,2,2]
输出: 2
"""
from typing import List
class Solution:
def majorityElement(self, nums: List[int]) -> int:
count = 0
res = 0
for num in nums:
if res == num:
count += 1
elif count > 0:
count -= 1
else:
count = 1
res = num
return res
if __name__ == '__main__':
print(Solution().majorityElement([3, 2, 3]), 3)
print(Solution().majorityElement([2, 2, 1, 1, 1, 2, 2]), 2)
|
the-stack_106_24506 | # Wrong answer 20%
jipes = 0
turistas = 0
action = input()
while action != "ABEND":
if action == "SALIDA":
jipes += 1
turistas += int(input())
elif action == "VUELTA":
if jipes > 0:
jipes -= 1
T = int(input())
if turistas > 0:
turistas -= T
else:
break
action = input()
print(turistas)
print(jipes)
|
the-stack_106_24507 | from database import EventStream
dataset_id = "dataset-id"
version = "version"
sink_id = "0agee"
event_stream_id = f"{dataset_id}/{version}"
event_stream_stack_name = f"event-stream-{dataset_id}-{version}"
event_subscribable_stack_name = f"event-subscribable-{dataset_id}-{version}"
event_sink_stack_name = f"event-sink-{dataset_id}-{version}-{sink_id}"
cf_stack_template = {
"description": "foo",
"resources": {"foo": {"type": "bar", "properties": {"foo": "bar"}}},
}
event_stream = EventStream(
**{
"cf_stack_template": cf_stack_template,
"cf_status": "CREATE_IN_PROGRESS",
"cf_stack_name": f"event-stream-{dataset_id}-{version}",
"id": event_stream_id,
"create_raw": True,
"updated_by": "larsmonsen",
"updated_at": "2020-01-21T09:28:57.831435",
"deleted": False,
"subscribable": {
"enabled": True,
"cf_stack_template": cf_stack_template,
"cf_stack_name": f"event-subscribable-{dataset_id}-{version}",
"cf_status": "CREATE_IN_PROGRESS",
},
"sinks": [
{
"id": "c8sh5",
"type": "s3",
"config": {"write_interval_seconds": 300},
"cf_stack_template": cf_stack_template,
"cf_stack_name": f"event-sink-{dataset_id}-{version}-c8sh5",
"cf_status": "ACTIVE",
},
{
"id": sink_id,
"type": "elasticsearch",
"config": {"es_cluster": "some-uri"},
"cf_stack_template": cf_stack_template,
"cf_stack_name": f"event-sink-{dataset_id}-{version}-{sink_id}",
"cf_status": "CREATE_IN_PROGRESS",
},
],
}
)
|
the-stack_106_24509 | import os, sys
from flask import Flask, json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def get_method():
status=200
response = app.response_class(
response=json.dumps({'status':'OK'}),
status=status,
mimetype='application/json'
)
return response
app.run(host='0.0.0.0', port=8088, debug=True) |
the-stack_106_24510 | from m5stack import *
from m5ui import *
from uiflow import *
import espnow
import wifiCfg
import json
import hat
setScreenColor(0x111111)
hat_BeetleC9 = hat.get(hat.BEETLEC)
espnow.init()
title0 = M5Title(title="Title", x=3 , fgcolor=0xFFFFFF, bgcolor=0x0000FF)
label0 = M5TextBox(8, 56, "Text", lcd.FONT_Default,0xFFFFFF, rotate=0)
label1 = M5TextBox(43, 57, "Text", lcd.FONT_Default,0xFFFFFF, rotate=0)
label2 = M5TextBox(26, 86, "Text", lcd.FONT_Default,0xFFFFFF, rotate=0)
rectangle0 = M5Rect(28, 105, 20, 20, 0xFFFFFF, 0xFFFFFF)
color = None
addr = None
data = None
message = None
left = None
right = None
def set_led():
global color, addr, data, message, left, right
color = message['color']
label2.setText(str(color))
if color == 'red':
rectangle0.setBgColor(0xff0000)
hat_BeetleC9.SetRGB(1, 0xff0000)
else:
rectangle0.setBgColor(0xffffff)
hat_BeetleC9.SetAllRGB(0x000000)
def set_dirction():
global color, addr, data, message, left, right
left = message['left']
right = message['right']
label0.setText(str(left))
label1.setText(str(right))
hat_BeetleC9.SetPulse(0, left)
hat_BeetleC9.SetPulse(1, right)
def recv_cb(_):
global color,addr,data,message,left,right
addr, _, data = espnow.recv_data(encoder='str')
message = json.loads(data)
set_dirction()
set_led()
pass
espnow.recv_cb(recv_cb)
lcd.setBrightness(1)
setScreenColor(0x000000)
title0.setBgColor(0x330099)
title0.setTitle('Beetcle-C')
|
the-stack_106_24515 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.utils import weight_norm as norm
import numpy as np
import module as mm
from CookieTTS.utils.model.layers import ConvNorm, LinearNorm
class ReferenceEncoder(nn.Module):
"""
Reference Encoder.
6 convs + GRU + FC
:param in_channels: Scalar.
:param token_embedding_size: Scalar.
:param activation_fn: activation function
"""
def __init__(self, hparams, activation_fn=None):
super(ReferenceEncoder, self).__init__()
self.token_embedding_size = hparams.token_embedding_size
self.in_channels = hparams.n_frames_per_step
# ref_enc_filters
channels = [self.in_channels] + hparams.ref_enc_filters + [self.token_embedding_size]
self.convs = nn.ModuleList([
mm.Conv2d(channels[c], channels[c+1], 3, stride=2, bn=True, bias=False, activation_fn=torch.relu)
for c in range(len(channels)-1)
]) # (Batch, Time_domain/r, 128)
self.gru = nn.GRU(self.token_embedding_size*2, self.token_embedding_size, batch_first=True)
self.fc = nn.Sequential(
nn.Linear(self.token_embedding_size, self.token_embedding_size),
)
self.activation_fn = activation_fn
def forward(self, x, hidden=None):
"""
:param x: (Batch, 1, Time_domain, n_mel_channels) Tensor. Mel Spectrogram
:param hidden: Tensor. initial hidden state for gru
Returns:
y_: (Batch, 1, Embedding) Reference Embedding
:
Time_domain = Time_domain
n_mel_channels = n_mel_channels
Batch = batch
"""
y_ = x.transpose(1, 2).unsqueeze(1) # [B, n_mel, dec_T) -> [B, 1, dec_T, n_mel]
for i in range(len(self.convs)):
y_ = self.convs[i](y_)
# [B, C, dec_T//64, n_mel//64]
y_ = y_.transpose(1, 2) # [B, C, dec_T//64, n_mel//64] -> [B, dec_T//64, C, n_mel//64]
shape = y_.shape
y_ = y_.contiguous().view(shape[0], shape[1], shape[2]*shape[3]) # [B, dec_T//64, C, n_mel//64] -> [B, dec_T//64, C*n_mel//64] merge last 2 dimensions
y_, out = self.gru(y_, hidden) # out = [1, B, T_Embed]
y_ = self.fc(out.squeeze(0)) # [1, B, T_Embed] -> [B, T_Embed]
if self.activation_fn is not None:
y_ = self.activation_fn(y_)
return y_.unsqueeze(1) # (Batch, 1, Embedding)
class MultiHeadAttention(nn.Module):
"""
Multi-head Attention
:param n_units: Scalars.
:param token_embedding_size : Scalars.
"""
def __init__(self, hparams, n_units=128, outdim=5):
super(MultiHeadAttention, self).__init__()
self.token_embedding_size = hparams.token_embedding_size
self.num_heads = hparams.num_heads
self.token_num = outdim
self.n_units = hparams.gstAtt_dim
self.split_size = n_units // self.num_heads
self.conv_Q = mm.Conv1d(self.token_embedding_size, n_units, 1) # in_channels, out_channels, kernel_size
self.conv_K = mm.Conv1d(self.token_embedding_size, n_units, 1) # in_channels, out_channels, kernel_size
self.fc_Q = nn.Sequential(
nn.Linear(n_units, n_units),
nn.Tanh(),
)
self.fc_K = nn.Sequential(
nn.Linear(n_units, n_units),
nn.Tanh(),
)
self.fc_V = nn.Sequential(
nn.Linear(self.token_embedding_size, self.split_size),
nn.Tanh(),
)
self.fc_A = nn.Sequential(
nn.Linear(n_units, self.token_num),
nn.Tanh(),
)
def forward(self, ref_embedding, token_embedding):
"""
:param ref_embedding: (B, 1, Embedding) Reference embedding
:param token_embedding: (B, token_num, embed_size) Token Embedding
Returns:
y_: (B, token_num) Tensor. Style attention weight
"""
# (B, 1, n_units)
Q = self.fc_Q(self.conv_Q(ref_embedding.transpose(1,2)).transpose(1,2)) # (B, 1, Embedding) -> (B, Embedding, 1) -> (B, Embedding, 1) ->
K = self.fc_K(self.conv_K(token_embedding.transpose(1,2)).transpose(1,2)) # (B, token_num, n_units)
V = self.fc_V(token_embedding) # (B, token_num, n_units)
Q = torch.stack(Q.split(self.split_size, dim=-1), dim=0) # (n_heads, B, 1, n_units//n_heads)
K = torch.stack(K.split(self.split_size, dim=-1), dim=0) # (n_heads, B, token_num, n_units//n_heads)
V = torch.stack(V.split(self.split_size, dim=-1), dim=0) # (n_heads, B, token_num, n_units//n_heads)
inner_A = torch.softmax(
torch.matmul(Q, K.transpose(-2, -1)) / self.split_size**0.5,
dim=-1
) # (n_heads, B, 1, token_num)
y_ = torch.matmul(inner_A, V) # (n_heads, B, 1, n_units//n_heads)
y_ = torch.cat(y_.split(1, dim=0), dim=-1).squeeze() # (B, n_units)
y_ = self.fc_A(y_) # (B, token_num)
return y_
class GST(nn.Module):
"""
Style Token Layer
Reference Encoder + Multi-head Attention, token embeddings
:param token_embedding_size: Scalar.
:param n_units: Scalar. for multihead attention ***
"""
def __init__(self, hparams):
super(GST, self).__init__()
if not hparams.ss_vae_gst:
mha_outdim = hparams.token_num * (1+hparams.gst_vae_mode)
else:
mha_outdim = len(hparams.vae_classes)
# VAE / SS-VAE
self.vae = hparams.gst_vae_mode
self.ss_vae = hparams.ss_vae_gst
self.ss_vae_zu_dim = hparams.ss_vae_zu_dim
if self.ss_vae:
self.ss_vae_layers = nn.Sequential(
nn.Linear(mha_outdim, 2*self.ss_vae_zu_dim),
nn.Tanh(),
)
# Encoder
self.token_embedding_size = hparams.token_embedding_size
self.token_num = hparams.token_num
self.ref_encoder = ReferenceEncoder(hparams, activation_fn=torch.tanh)
self.att = MultiHeadAttention(hparams, outdim=mha_outdim)
self.token_embedding = nn.Parameter(torch.zeros( [self.ss_vae_zu_dim if self.ss_vae else self.token_num,
self.token_embedding_size])) # (token_num, Embedding)
init.normal_(self.token_embedding, mean=0., std=0.5)
# Token activation function
if hparams.token_activation_func == 'softmax': self.activation_fn = 0
elif hparams.token_activation_func == 'sigmoid': self.activation_fn = 1
elif hparams.token_activation_func == 'tanh': self.activation_fn = 2
elif hparams.token_activation_func == 'linear': self.activation_fn = 3
else: print(f'token_activation_func of {hparams.token_activation_func} is invalid\nPlease use "softmax", "sigmoid" or "tanh"'); raise
# tanh on output embed
self.output_tanh = True
# torchMoji
self.torchMoji_linear = hparams.torchMoji_linear
if self.torchMoji_linear:
self.map_lin = LinearNorm(
hparams.torchMoji_attDim, self.token_num * (1+hparams.gst_vae_mode))
# Drop Tokens
self.p_drop_tokens = hparams.p_drop_tokens
self.drop_tokens_mode = hparams.drop_tokens_mode
if self.drop_tokens_mode == 'embedding':
self.embedding = nn.Embedding(1, self.token_num * (1+hparams.gst_vae_mode))
elif self.drop_tokens_mode == 'speaker_embedding':
self.speaker_embedding = nn.Embedding(hparams.n_speakers, self.token_num * (1+hparams.gst_vae_mode))
def reparameterize(self, mu, logvar, rand_sample=None):
# use for VAE sampling
if rand_sample or self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def forward(self, ref, ref_mode=1):
"""
:param ref: (Batch, Time_domain, n_mel_channels) Tensor containing reference audio or (Batch, token_num) if not ref_mode
:param ref_mode: Boolean. whether it is reference mode
Returns:
:style_embedding: (Batch, 1, Embedding) Style Embedding
:style_tokens: (Batch, token_num) Tensor. Combination weight.
"""
token_embedding = self.token_embedding.unsqueeze(0).expand(ref.size(0), -1, -1) # (Batch, token_num, Embedding)
style_embedding = None
if np.random.uniform(0.0, 1.0) <= self.p_drop_tokens and self.training: # if drop_tokens
if self.drop_tokens_mode == 'embedding':
style_tokens = self.embedding(1)
elif self.drop_tokens_mode == 'speaker_embedding':
style_tokens = self.speaker_embedding(ref) # ref = speaker_ids
elif self.drop_tokens_mode == 'zeros':
style_tokens = torch.zeros(ref.shape[0],self.token_num).cuda()
elif self.drop_tokens_mode == 'halfs':
style_tokens = torch.ones(ref.shape[0],self.token_num).cuda() * 0.5
elif self.drop_tokens_mode == 'emotion_embedding':
pass # replace with lookup table for emotions.
else: # normal reference mode
if ref_mode == 1: # get style_token from spectrogram
ref = self.ref_encoder(ref) # (Batch, 1, Embedding)
style_tokens = self.att(ref, token_embedding) # (Batch, token_num)
elif ref_mode == 0:# get style_token from user input
style_tokens = ref
elif ref_mode == 2: # infer style_tokens from input_text using torchMoji
attHidden = torchMoji(ref) # ref=input_text
style_tokens = self.map_lin(attHidden)
elif ref_mode == 3: # training for mapping torchMoji attHidden to tokens
style_tokens = self.map_lin(ref) # ref = torchMoji attention hidden, style_tokens = style_tokens
# Apply Activation function
if self.activation_fn == 0:
style_tokens = torch.softmax(style_tokens, dim=-1)
elif self.activation_fn == 1:
style_tokens = style_tokens.sigmoid()
elif self.activation_fn == 2:
style_tokens = style_tokens.tanh()
if self.vae:
if self.ss_vae:
zs = style_tokens
zu = self.ss_vae_layers(zs) # zu dist parameters
else:
zu = style_tokens
mu, logvar = zu.chunk(2, dim=1) # [B, 2*n_tokens] -> [B, n_tokens], [B, n_tokens]
style_tokens = self.reparameterize(mu, logvar) # [B, n_tokens], [B, n_tokens] -> [B, n_tokens]
if style_embedding is None:
style_embedding = torch.sum(style_tokens.unsqueeze(-1) * token_embedding, dim=1, keepdim=True) # [B, n_tokens] -> [B, 1, embed]
if self.output_tanh:
style_embedding = torch.tanh(style_embedding)
if self.vae:
if self.ss_vae:
zs = F.log_softmax(zs, dim=1)
return style_embedding, style_tokens, mu, logvar, zs
else:
return style_embedding, style_tokens, mu, logvar
else:
return style_embedding, style_tokens
|
the-stack_106_24517 | import click
from ...models import JudgeFactory
from ...utils.constants import default_judge
from ...utils.logging import logger
from ...utils.exceptions import handle_exceptions
from ...utils.launch import launch, substitute
from ...utils import config
judge_factory = JudgeFactory()
OJs = judge_factory.available_judges
@click.command(short_help="View a particular problem")
@click.option('-j', '--judge', 'judge_name', type=click.Choice(OJs),
prompt="Please provide a judge("+'|'.join(OJs)+")",
default=default_judge)
@click.option('-c', '--contest', type=click.STRING, help="contest code")
@click.option("--browser", help='Browser to launch',
default=config.read('settings.yml', 'browser_online'))
@click.argument('PROBLEM_CODE')
@handle_exceptions(BaseException)
def main(judge_name, contest, problem_code, browser):
'''
View a particular problem from the judge.
'''
judge = judge_factory.get_judge(judge_name)
problem_url = judge.get_problem_url(problem_code=problem_code,
contest_code=contest)
logger.debug('launching %s' % problem_url)
keymap = {
'URL': problem_url
}
status, browser = substitute(browser, keymap)
if status is True:
problem_url = ''
launch(browser, problem_url)
|
the-stack_106_24518 | import random
import torch
import torchvision
class Flip(object):
def __init__(self, params, data_types):
self.mode = params['Mode']
self.data_types = data_types
self.pre_torch = False
def forward(self, batch):
horizontal_flip = False
vertical_flip = False
if self.mode == 'both' or self.mode == 'horizontal':
horizontal_flip = random.random() < 0.5
if self.mode == 'both' or self.mode == 'vertical':
vertical_flip = random.random() < 0.5
for i, inputs in enumerate(batch):
if self.data_types[i] in ('image', 'video', 'array'):
flip_dims = []
if horizontal_flip:
flip_dims.append(3)
if vertical_flip:
flip_dims.append(2)
if flip_dims:
batch[i] = torch.flip(inputs, flip_dims)
elif self.data_types[i] == 'detection':
cls = inputs['detections'][:, 0]
sx = inputs['detections'][:, 1]
sy = inputs['detections'][:, 2]
ex = inputs['detections'][:, 3]
ey = inputs['detections'][:, 4]
# if we flip the coordinates, the smaller one becomes the bigger one
# so we need to swap the start/end coordinates too here, if we flip
if horizontal_flip:
sx, ex = 1-ex, 1-sx
if vertical_flip:
sy, ey = 1-ey, 1-sy
inputs['detections'] = torch.stack([cls, sx, sy, ex, ey], dim=1)
elif self.data_types[i] == 'shape':
px = inputs['points'][:, 0]
py = inputs['points'][:, 1]
if horizonal_flip:
px = 1-px
if vertical_flip:
py = 1-py
inputs['points'] = torch.stack([px, py], dim=1)
return batch
|
the-stack_106_24521 | from folium import plugins
import pandas as pd
import numpy as np
import folium
class Map:
def __init__(self, data: pd.DataFrame):
lat = data.lat.sum()/len(data)
lon = data.lon.sum()/len(data)
self.map = folium.Map(location=[lat, lon], zoom_start=13)
def print(self):
return self.map
class LocationsMap(Map):
def __init__(self, data):
# Removing outliers
data = data.loc[data.distância < 10]
super().__init__(data)
first_sixth = np.quantile(data['preço'], 1 / 6)
last_sixth = np.quantile(data['preço'], 5 / 6)
ziped_data = zip(
data['lat'], data['lon'], data['preço'],
data['quartos'], data['banheiros'],
data['vagas'], data['área'], data['link'], data['img1']
)
for lat, lon, preco, n_quartos, n_banheiros, n_vagas, area, link, img in ziped_data:
html = f'''
<img src="{img}" alt="Foto anúncio" style="width:100%;">
<table id="vertical-1" class="centerTable">
<tr>
<th>Aluguel</th>
<td>R$ {preco},00</td>
</tr>
<tr>
<th>Quartos</th>
<td>{n_quartos}</td>
</tr>
<tr>
<th>Banheiros</th>
<td>{n_banheiros}</td>
</tr>
<tr>
<th>Vagas</th>
<td>{n_vagas}</td>
</tr>
<tr>
<th>Área</th>
<td>{area} m²</td>
</tr>
</table>
<div style="text-align:center">
<a href="{link}">link</a>
</div>'''
iframe = folium.IFrame(html,
width=250,
height=400)
popup = folium.Popup(iframe)
if preco <= first_sixth:
color = "green"
elif preco < last_sixth:
color = "orange"
else:
color = "red"
folium.Marker(
location=[lat, lon],
popup=popup,
icon=folium.Icon(icon="building", prefix="fa", color=color),
).add_to(self.map)
class HeatMap(Map):
def __init__(self, data):
# Removing outliers
data = data.loc[data.distância < 10]
# Remove the announcements with the same location
data = data.groupby(by=['lat', 'lon'])['preço'].mean().reset_index()
super().__init__(data)
heat_data = zip(
data['lat'], data['lon'], data['preço']
)
heat_map = folium.plugins.HeatMap(heat_data, name=None, min_opacity=0.5, gradient=None, overlay=True,
control=True, show=True)
heat_map.add_to(self.map)
|
the-stack_106_24523 | # Copyright 2020 Toyota Research Institute. All rights reserved.
import argparse
import numpy as np
import os
import torch
from glob import glob
from cv2 import imwrite
from packnet_sfm.models.model_wrapper import ModelWrapper
from packnet_sfm.datasets.augmentations import resize_image, to_tensor
from packnet_sfm.utils.horovod import hvd_init, rank, world_size, print0
from packnet_sfm.utils.image import load_image
from packnet_sfm.utils.config import parse_test_file
from packnet_sfm.utils.load import set_debug
from packnet_sfm.utils.depth import write_depth, inv2depth, viz_inv_depth
from packnet_sfm.utils.logging import pcolor
#from packnet_sfm.datasets.kitti_based_valeo_dataset_fisheye_singleView import KITTIBasedValeoDatasetFisheye_singleView
from packnet_sfm.datasets.kitti_based_valeo_dataset_fisheye_singleView import *
from packnet_sfm.geometry.camera_fisheye_valeo import CameraFisheye
from packnet_sfm.datasets.kitti_based_valeo_dataset_utils import \
pose_from_oxts_packet, read_calib_file, read_raw_calib_files_camera_valeo, transform_from_rot_trans
from packnet_sfm.geometry.pose import Pose
import open3d as o3d
def is_image(file, ext=('.png', '.jpg',)):
"""Check if a file is an image with certain extensions"""
return file.endswith(ext)
def parse_args():
parser = argparse.ArgumentParser(description='PackNet-SfM 3D visualization of point clouds maps from images')
parser.add_argument('--checkpoint1', type=str, help='Checkpoint (.ckpt)')
parser.add_argument('--checkpoint2', type=str, help='Checkpoint (.ckpt)')
parser.add_argument('--input1', type=str, help='Input file or folder')
parser.add_argument('--input2', type=str, help='Input file or folder')
parser.add_argument('--hasGTdepth1', type=int, choices=[0, 1], default=0)
parser.add_argument('--hasGTdepth2', type=int, choices=[0, 1], default=0)
parser.add_argument('--output1', type=str, help='Output file or folder')
parser.add_argument('--output2', type=str, help='Output file or folder')
parser.add_argument('--image_shape', type=int, nargs='+', default=None,
help='Input and output image shape '
'(default: checkpoint\'s config.datasets.augmentation.image_shape)')
parser.add_argument('--half', action="store_true", help='Use half precision (fp16)')
parser.add_argument('--save', type=str, choices=['npz', 'png'], default=None,
help='Save format (npz or png). Default is None (no depth map is saved).')
args = parser.parse_args()
assert args.checkpoint1.endswith('.ckpt'), \
'You need to provide a .ckpt file as checkpoint'
assert args.checkpoint2.endswith('.ckpt'), \
'You need to provide a .ckpt file as checkpoint'
assert args.image_shape is None or len(args.image_shape) == 2, \
'You need to provide a 2-dimensional tuple as shape (H,W)'
assert (is_image(args.input1) and is_image(args.output1)) or \
(not is_image(args.input1) and not is_image(args.input1)), \
'Input and output must both be images or folders'
assert (is_image(args.input2) and is_image(args.output2)) or \
(not is_image(args.input2) and not is_image(args.input2)), \
'Input and output must both be images or folders'
return args
def get_next_file(idx, file):
"""Get next file given next idx and current file."""
base, ext = os.path.splitext(os.path.basename(file))
base_splitted = base.split('_')
base_number = base_splitted[-1]
return os.path.join(os.path.dirname(file), '_'.join(base_splitted[:-1]) + '_' + str(idx).zfill(len(base_number)) + ext)
def get_base_folder(image_file):
"""The base folder"""
return '/'.join(image_file.split('/')[:-6])
def get_frame_index_int(image_file):
"""Returns an int-type index of the image file"""
return int(image_file.split('_')[-1].split('.')[0])
def get_camera_name(image_file):
"""Returns 'cam_i', i between 0 and 4"""
return image_file.split('/')[-2]
def get_sequence_name(image_file):
"""Returns a sequence name like '20180227_185324'."""
return image_file.split('/')[-3]
def get_split_type(image_file):
"""Returns 'train', 'test' or 'test_sync'."""
return image_file.split('/')[-4]
def get_images_type(image_file):
"""Returns 'images_multiview' or 'images_multiview_frontOnly."""
return image_file.split('/')[-5]
def get_current_folder(image_file):
"""Get the current folder from image_file."""
return os.path.dirname(image_file)
def get_path_to_theta_lut(image_file):
"""Get the current folder from image_file."""
return os.path.join(get_base_folder(image_file),
'calibrations_theta_lut',
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_sequence_name(image_file) + '_' + get_camera_name(image_file) + '_1280_800.npy')
def get_path_to_ego_mask(image_file):
"""Get the current folder from image_file."""
return os.path.join(get_base_folder(image_file),
'semantic_masks',
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_sequence_name(image_file) + '_' + get_camera_name(image_file) + '.npy')
def get_intrinsics(image_file, calib_data):
"""Get intrinsics from the calib_data dictionary."""
cam = get_camera_name(image_file)
#intr = calib_data[cam]['intrinsics']
base_intr = calib_data[cam]['base_intrinsics']
intr = calib_data[cam]['intrinsics']
poly_coeffs = np.array([float(intr['c1']),
float(intr['c2']),
float(intr['c3']),
float(intr['c4'])])
principal_point = np.array([float(base_intr['cx_offset_px']),
float(base_intr['cy_offset_px'])])
scale_factors = np.array([1., float(intr['pixel_aspect_ratio'])])
return poly_coeffs, principal_point, scale_factors
def get_depth_file(image_file):
"""Get the corresponding depth file from an image file."""
base, ext = os.path.splitext(os.path.basename(image_file))
return os.path.join(get_base_folder(image_file),
'depth_maps',
'fisheye',
get_split_type(image_file),
get_sequence_name(image_file),
get_camera_name(image_file).replace('cam', 'velodyne'),
base.replace('cam', 'velodyne') + '.npz')
def get_extrinsics_pose_matrix(image_file, calib_data):
"""Get intrinsics from the calib_data dictionary."""
cam = get_camera_name(image_file)
extr = calib_data[cam]['extrinsics']
t = np.array([float(extr['pos_x_m']), float(extr['pos_y_m']), float(extr['pos_z_m'])])
x_rad = np.pi / 180. * float(extr['rot_x_deg'])
z1_rad = np.pi / 180. * float(extr['rot_z1_deg'])
z2_rad = np.pi / 180. * float(extr['rot_z2_deg'])
x_rad += np.pi # gcam
#z1_rad += np.pi # gcam
#z2_rad += np.pi # gcam
cosx = np.cos(x_rad)
sinx = np.sin(x_rad)
cosz1 = np.cos(z1_rad)
sinz1 = np.sin(z1_rad)
cosz2 = np.cos(z2_rad)
sinz2 = np.sin(z2_rad)
Rx = np.array([[ 1, 0, 0],
[ 0, cosx, sinx],
[ 0, -sinx, cosx]])
Rz1 = np.array([[ cosz1, sinz1, 0],
[-sinz1, cosz1, 0],
[ 0, 0, 1]])
Rz2 = np.array([[cosz2, -sinz2, 0],
[sinz2, cosz2, 0],
[ 0, 0, 1]])
R = np.matmul(Rz2, np.matmul(Rx, Rz1))
T_other_convention = -np.dot(R,t)
pose_matrix = transform_from_rot_trans(R, T_other_convention).astype(np.float32)
#pose_matrix = invert_pose_numpy(pose_matrix)
return pose_matrix
def display_inlier_outlier(cloud, ind):
inlier_cloud = cloud.select_by_index(ind)
outlier_cloud = cloud.select_by_index(ind, invert=True)
print("Showing outliers (red) and inliers (gray): ")
outlier_cloud.paint_uniform_color([1, 0, 0])
inlier_cloud.paint_uniform_color([0.8, 0.8, 0.8])
o3d.visualization.draw_geometries([inlier_cloud, outlier_cloud])
@torch.no_grad()
def infer_and_save_depth(input_file, output_file, model_wrapper, image_shape, half, save):
"""
Process a single input file to produce and save visualization
Parameters
----------
input_file : str
Image file
output_file : str
Output file, or folder where the output will be saved
model_wrapper : nn.Module
Model wrapper used for inference
image_shape : Image shape
Input image shape
half: bool
use half precision (fp16)
save: str
Save format (npz or png)
"""
if not is_image(output_file):
# If not an image, assume it's a folder and append the input name
os.makedirs(output_file, exist_ok=True)
output_file = os.path.join(output_file, os.path.basename(input_file))
# change to half precision for evaluation if requested
dtype = torch.float16 if half else None
# Load image
image = load_image(input_file)
# Resize and to tensor
image = resize_image(image, image_shape)
image = to_tensor(image).unsqueeze(0)
# Send image to GPU if available
if torch.cuda.is_available():
image = image.to('cuda:{}'.format(rank()), dtype=dtype)
# Depth inference (returns predicted inverse depth)
pred_inv_depth = model_wrapper.depth(image)[0]
if save == 'npz' or save == 'png':
# Get depth from predicted depth map and save to different formats
filename = '{}.{}'.format(os.path.splitext(output_file)[0], save)
print('Saving {} to {}'.format(
pcolor(input_file, 'cyan', attrs=['bold']),
pcolor(filename, 'magenta', attrs=['bold'])))
write_depth(filename, depth=inv2depth(pred_inv_depth))
else:
# Prepare RGB image
rgb = image[0].permute(1, 2, 0).detach().cpu().numpy() * 255
# Prepare inverse depth
viz_pred_inv_depth = viz_inv_depth(pred_inv_depth[0]) * 255
# Concatenate both vertically
image = np.concatenate([rgb, viz_pred_inv_depth], 0)
# Save visualization
print('Saving {} to {}'.format(
pcolor(input_file, 'cyan', attrs=['bold']),
pcolor(output_file, 'magenta', attrs=['bold'])))
imwrite(output_file, image[:, :, ::-1])
@torch.no_grad()
def infer_plot_and_save_3D_pcl(input_file1, input_file2,
output_file1, output_file2,
model_wrapper1, model_wrapper2,
hasGTdepth1, hasGTdepth2,
image_shape, half, save):
"""
Process a single input file to produce and save visualization
Parameters
----------
input_file : str
Image file
output_file : str
Output file, or folder where the output will be saved
model_wrapper : nn.Module
Model wrapper used for inference
image_shape : Image shape
Input image shape
half: bool
use half precision (fp16)
save: str
Save format (npz or png)
"""
if not is_image(output_file1):
# If not an image, assume it's a folder and append the input name
os.makedirs(output_file1, exist_ok=True)
output_file1 = os.path.join(output_file1, os.path.basename(input_file1))
if not is_image(output_file2):
# If not an image, assume it's a folder and append the input name
os.makedirs(output_file2, exist_ok=True)
output_file2 = os.path.join(output_file2, os.path.basename(input_file2))
# change to half precision for evaluation if requested
dtype = torch.float16 if half else None
# Load image
image1 = load_image(input_file1).convert('RGB')
image2 = load_image(input_file2).convert('RGB')
# Resize and to tensor
image1 = resize_image(image1, image_shape)
image2 = resize_image(image2, image_shape)
image1 = to_tensor(image1).unsqueeze(0)
image2 = to_tensor(image2).unsqueeze(0)
# Send image to GPU if available
if torch.cuda.is_available():
image1 = image1.to('cuda:{}'.format(rank()), dtype=dtype)
image2 = image2.to('cuda:{}'.format(rank()), dtype=dtype)
# Depth inference (returns predicted inverse depth)
pred_inv_depth1 = model_wrapper1.depth(image1)
pred_inv_depth2 = model_wrapper2.depth(image2)
pred_depth1 = inv2depth(pred_inv_depth1)
pred_depth2 = inv2depth(pred_inv_depth2)
base_folder_str1 = get_base_folder(input_file1)
split_type_str1 = get_split_type(input_file1)
seq_name_str1 = get_sequence_name(input_file1)
camera_str1 = get_camera_name(input_file1)
base_folder_str2 = get_base_folder(input_file2)
split_type_str2 = get_split_type(input_file2)
seq_name_str2 = get_sequence_name(input_file2)
camera_str2 = get_camera_name(input_file2)
calib_data1 = {}
calib_data2 = {}
calib_data1[camera_str1] = read_raw_calib_files_camera_valeo(base_folder_str1, split_type_str1, seq_name_str1, camera_str1)
calib_data2[camera_str2] = read_raw_calib_files_camera_valeo(base_folder_str2, split_type_str2, seq_name_str2, camera_str2)
path_to_theta_lut1 = get_path_to_theta_lut(input_file1)
path_to_ego_mask1 = get_path_to_ego_mask(input_file1)
poly_coeffs1, principal_point1, scale_factors1 = get_intrinsics(input_file1, calib_data1)
path_to_theta_lut2 = get_path_to_theta_lut(input_file2)
path_to_ego_mask2 = get_path_to_ego_mask(input_file2)
poly_coeffs2, principal_point2, scale_factors2 = get_intrinsics(input_file2, calib_data2)
poly_coeffs1 = torch.from_numpy(poly_coeffs1).unsqueeze(0)
principal_point1 = torch.from_numpy(principal_point1).unsqueeze(0)
scale_factors1 = torch.from_numpy(scale_factors1).unsqueeze(0)
poly_coeffs2 = torch.from_numpy(poly_coeffs2).unsqueeze(0)
principal_point2 = torch.from_numpy(principal_point2).unsqueeze(0)
scale_factors2 = torch.from_numpy(scale_factors2).unsqueeze(0)
pose_matrix1 = torch.from_numpy(get_extrinsics_pose_matrix(input_file1, calib_data1)).unsqueeze(0)
pose_matrix2 = torch.from_numpy(get_extrinsics_pose_matrix(input_file2, calib_data2)).unsqueeze(0)
pose_tensor1 = Pose(pose_matrix1)
pose_tensor2 = Pose(pose_matrix2)
ego_mask1 = np.load(path_to_ego_mask1)
ego_mask2 = np.load(path_to_ego_mask2)
not_masked1 = ego_mask1.astype(bool).reshape(-1)
not_masked2 = ego_mask2.astype(bool).reshape(-1)
cam1 = CameraFisheye(path_to_theta_lut=[path_to_theta_lut1],
path_to_ego_mask=[path_to_ego_mask1],
poly_coeffs=poly_coeffs1.float(),
principal_point=principal_point1.float(),
scale_factors=scale_factors1.float(),
Tcw=pose_tensor1)
cam2 = CameraFisheye(path_to_theta_lut=[path_to_theta_lut2],
path_to_ego_mask=[path_to_ego_mask2],
poly_coeffs=poly_coeffs2.float(),
principal_point=principal_point2.float(),
scale_factors=scale_factors2.float(),
Tcw=pose_tensor2)
if torch.cuda.is_available():
cam1 = cam1.to('cuda:{}'.format(rank()), dtype=dtype)
cam2 = cam2.to('cuda:{}'.format(rank()), dtype=dtype)
world_points1 = cam1.reconstruct(pred_depth1, frame='w')
world_points1 = world_points1[0].cpu().numpy()
world_points1 = world_points1.reshape((3,-1)).transpose()
world_points2 = cam2.reconstruct(pred_depth2, frame='w')
world_points2 = world_points2[0].cpu().numpy()
world_points2 = world_points2.reshape((3,-1)).transpose()
if hasGTdepth1:
gt_depth_file1 = get_depth_file(input_file1)
gt_depth1 = np.load(gt_depth_file1)['velodyne_depth'].astype(np.float32)
gt_depth1 = torch.from_numpy(gt_depth1).unsqueeze(0).unsqueeze(0)
if torch.cuda.is_available():
gt_depth1 = gt_depth1.to('cuda:{}'.format(rank()), dtype=dtype)
gt_depth_3d1 = cam1.reconstruct(gt_depth1, frame='w')
gt_depth_3d1 = gt_depth_3d1[0].cpu().numpy()
gt_depth_3d1 = gt_depth_3d1.reshape((3,-1)).transpose()
if hasGTdepth2:
gt_depth_file2 = get_depth_file(input_file2)
gt_depth2 = np.load(gt_depth_file2)['velodyne_depth'].astype(np.float32)
gt_depth2 = torch.from_numpy(gt_depth2).unsqueeze(0).unsqueeze(0)
if torch.cuda.is_available():
gt_depth2 = gt_depth2.to('cuda:{}'.format(rank()), dtype=dtype)
gt_depth_3d2 = cam2.reconstruct(gt_depth2, frame='w')
gt_depth_3d2 = gt_depth_3d2[0].cpu().numpy()
gt_depth_3d2 = gt_depth_3d2.reshape((3, -1)).transpose()
world_points1 = world_points1[not_masked1]
world_points2 = world_points2[not_masked2]
if hasGTdepth1:
gt_depth_3d1 = gt_depth_3d1[not_masked1]
if hasGTdepth2:
gt_depth_3d2 = gt_depth_3d2[not_masked1]
pcl1 = o3d.geometry.PointCloud()
pcl1.points = o3d.utility.Vector3dVector(world_points1)
img_numpy1 = image1[0].cpu().numpy()
img_numpy1 = img_numpy1.reshape((3,-1)).transpose()
pcl2 = o3d.geometry.PointCloud()
pcl2.points = o3d.utility.Vector3dVector(world_points2)
img_numpy2 = image2[0].cpu().numpy()
img_numpy2 = img_numpy2.reshape((3,-1)).transpose()
img_numpy1 = img_numpy1[not_masked1]
pcl1.colors = o3d.utility.Vector3dVector(img_numpy1)
img_numpy2 = img_numpy2[not_masked2]
pcl2.colors = o3d.utility.Vector3dVector(img_numpy2)
#pcl.paint_uniform_color([1.0, 0.0, 0])
#print("Radius oulier removal")
#cl, ind = pcl.remove_radius_outlier(nb_points=10, radius=0.5)
#display_inlier_outlier(pcl, ind)
remove_outliers = True
if remove_outliers:
cl1, ind1 = pcl1.remove_statistical_outlier(nb_neighbors=10, std_ratio=1.3)
#display_inlier_outlier(pcl, ind)
inlier_cloud1 = pcl1.select_by_index(ind1)
#inlier_cloud.paint_uniform_color([0.0, 1.0, 0])
outlier_cloud1 = pcl1.select_by_index(ind1, invert=True)
outlier_cloud1.paint_uniform_color([0.0, 0.0, 1.0])
cl2, ind2 = pcl2.remove_statistical_outlier(nb_neighbors=10, std_ratio=1.3)
#display_inlier_outlier(pcl, ind)
inlier_cloud2 = pcl2.select_by_index(ind2)
#inlier_cloud.paint_uniform_color([0.0, 1.0, 0])
outlier_cloud2 = pcl2.select_by_index(ind2, invert=True)
outlier_cloud2.paint_uniform_color([0.0, 0.0, 1.0])
if hasGTdepth1:
pcl_gt1 = o3d.geometry.PointCloud()
pcl_gt1.points = o3d.utility.Vector3dVector(gt_depth_3d1)
pcl_gt1.paint_uniform_color([1.0, 0.0, 0])
if hasGTdepth2:
pcl_gt2 = o3d.geometry.PointCloud()
pcl_gt2.points = o3d.utility.Vector3dVector(gt_depth_3d2)
pcl_gt2.paint_uniform_color([1.0, 0.0, 0])
if remove_outliers:
toPlot = [inlier_cloud1, inlier_cloud2]
if hasGTdepth1:
toPlot.append(pcl_gt1)
if hasGTdepth2:
toPlot.append(pcl_gt2)
toPlotClear = list(toPlot)
toPlot.append(outlier_cloud1)
toPlot.append(outlier_cloud2)
o3d.visualization.draw_geometries(toPlot)
o3d.visualization.draw_geometries(toPlotClear)
toPlot = [pcl1, pcl2]
if hasGTdepth1:
toPlot.append(pcl_gt1)
if hasGTdepth2:
toPlot.append(pcl_gt2)
o3d.visualization.draw_geometries(toPlot)
rgb1 = image1[0].permute(1, 2, 0).detach().cpu().numpy() * 255
rgb2 = image2[0].permute(1, 2, 0).detach().cpu().numpy() * 255
# Prepare inverse depth
viz_pred_inv_depth1 = viz_inv_depth(pred_inv_depth1[0]) * 255
viz_pred_inv_depth2 = viz_inv_depth(pred_inv_depth2[0]) * 255
# Concatenate both vertically
image1 = np.concatenate([rgb1, viz_pred_inv_depth1], 0)
image2 = np.concatenate([rgb2, viz_pred_inv_depth2], 0)
# Save visualization
print('Saving {} to {}'.format(
pcolor(input_file1, 'cyan', attrs=['bold']),
pcolor(output_file1, 'magenta', attrs=['bold'])))
imwrite(output_file1, image1[:, :, ::-1])
print('Saving {} to {}'.format(
pcolor(input_file2, 'cyan', attrs=['bold']),
pcolor(output_file2, 'magenta', attrs=['bold'])))
imwrite(output_file2, image2[:, :, ::-1])
def main(args):
# Initialize horovod
hvd_init()
# Parse arguments
config1, state_dict1 = parse_test_file(args.checkpoint1)
config2, state_dict2 = parse_test_file(args.checkpoint2)
# If no image shape is provided, use the checkpoint one
image_shape = args.image_shape
if image_shape is None:
image_shape = config1.datasets.augmentation.image_shape
# Set debug if requested
set_debug(config1.debug)
# Initialize model wrapper from checkpoint arguments
model_wrapper1 = ModelWrapper(config1, load_datasets=False)
model_wrapper2 = ModelWrapper(config2, load_datasets=False)
# Restore monodepth_model state
model_wrapper1.load_state_dict(state_dict1)
model_wrapper2.load_state_dict(state_dict2)
# change to half precision for evaluation if requested
dtype = torch.float16 if args.half else None
# Send model to GPU if available
if torch.cuda.is_available():
model_wrapper1 = model_wrapper1.to('cuda:{}'.format(rank()), dtype=dtype)
model_wrapper2 = model_wrapper2.to('cuda:{}'.format(rank()), dtype=dtype)
# Set to eval mode
model_wrapper1.eval()
model_wrapper2.eval()
if os.path.isdir(args.input1):
# If input file is a folder, search for image files
files1 = []
for ext in ['png', 'jpg']:
files1.extend(glob((os.path.join(args.input1, '*.{}'.format(ext)))))
files1.sort()
print0('Found {} files'.format(len(files1)))
else:
# Otherwise, use it as is
files1 = [args.input1]
if os.path.isdir(args.input2):
# If input file is a folder, search for image files
files2 = []
for ext in ['png', 'jpg']:
files2.extend(glob((os.path.join(args.input2, '*.{}'.format(ext)))))
files2.sort()
print0('Found {} files'.format(len(files2)))
else:
# Otherwise, use it as is
files2 = [args.input2]
n_files = len(files1)
# Process each file
for fn1, fn2 in zip(files1[rank()::world_size()], files2[rank()::world_size()]):
infer_plot_and_save_3D_pcl(fn1, fn2,
args.output1, args.output2,
model_wrapper1, model_wrapper2,
bool(int(args.hasGTdepth1)), bool(int(args.hasGTdepth2)),
image_shape, args.half, args.save)
if __name__ == '__main__':
args = parse_args()
main(args)
|
the-stack_106_24526 | #!usr/bin/python
import socket
import time
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # para UDP
udp_host = 'localhost'
udp_port = 12345
msg = "hola mundo en minuscula"
msg = bytes(msg,'utf-8')
print ("IP destino:", udp_host)
print ("Puerto:", udp_port)
sock.sendto(msg,(udp_host,udp_port)) # envio a server UDP
data,addr = sock.recvfrom(1024) #recepcion del servidor UDP
print("Mensaje del servidor: ",data," direccion: ",addr)
time.sleep(2) #esperamos dos segundos porque si para enviar mensaje de terminado
msg = ""
msg = bytes(msg,'utf-8')
sock.sendto(msg,(udp_host,udp_port)) # Sending message to UDP server |
the-stack_106_24527 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Brain that can continuously learn from human demonstrations."""
import glob
import math
import os
import platform
import random
import shutil
import time
import uuid
from learner.brains import data_protobuf_generator
from learner.brains import demonstration_buffer
from learner.brains import eval_datastore
from learner.brains import imitation_loss
from learner.brains import networks
from learner.brains import numpy_replay_buffer
from learner.brains import policies
from learner.brains import saved_model_to_tflite_model
from learner.brains import tfa_specs
from log import falken_logging
import tensorflow as tf
# Used to workaround https://github.com/tensorflow/tensorflow/issues/41380
from tensorflow.python.framework import errors as tf_errors
from tensorflow.python.lib.io import file_io as tf_file_io
from tf_agents.agents.behavioral_cloning import behavioral_cloning_agent
from tf_agents.policies import greedy_policy
from tf_agents.policies import policy_saver
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import common
_DEFAULT_REPLAY_BUFFER_CAPACITY = 2 * 3600 * 10 # 2 hours at 10 fps.
class UnknownHParamError(Exception):
"""Raised if there is an unknown hyperparameter in the assignment."""
class NoCheckpointFoundError(Exception):
"""Raised if not checkpoint was found at a specified location."""
class BCAgent(behavioral_cloning_agent.BehavioralCloningAgent):
"""A behavioral cloning based TF agent."""
def __init__(self,
brain_spec,
hparams,
use_summaries):
"""Create a new BC tf-agent with the provided hyperparameters."""
assert brain_spec
falken_logging.info(f'Creating BC Agent with hparams: {hparams}')
self.actor_net = networks.FalkenNetwork(brain_spec, hparams)
optimizer = tf.compat.v1.train.AdamOptimizer(
learning_rate=hparams['learning_rate'])
super().__init__(
ts.time_step_spec(brain_spec.observation_spec.tfa_spec),
brain_spec.action_spec.tfa_spec,
optimizer=optimizer,
cloning_network=self.actor_net,
num_outer_dims=2,
loss_fn=self._dict_loss,
debug_summaries=use_summaries,
train_step_counter=tf.Variable(0, dtype=tf.int64, trainable=False,
name='train_step_counter'),
summarize_grads_and_vars=False)
self.initialize()
# Force variable initialization. This triggers the initializers to
# be called and pruned avoiding errand placeholders in the
# checkpoint.
self.policy.variables()
def _dict_loss(self, experience, training=True):
batch_size = (
tf.compat.dimension_value(experience.step_type.shape[0]) or
tf.shape(experience.step_type)[0])
network_state = self.cloning_network.get_initial_state(batch_size)
values, _ = self.cloning_network(
experience.observation,
experience.step_type,
training=training,
network_state=network_state)
targets = experience.action
return imitation_loss.dict_loss(values, targets)
@staticmethod
def update_derived_hparams(hparams):
"""Update a hyperparameter dictionary with derived parameters.
Args:
hparams: Hyperparameter dictionary to modify.
Returns:
Dictionary updated with derived hparameters.
"""
hparams['training_steps'] = (
int(math.ceil(hparams['training_examples'] / hparams['batch_size'])))
return hparams
@staticmethod
def default_hparams():
"""Return the default hyperparameters for BC Agent."""
return BCAgent.update_derived_hparams(dict(
learning_rate=5e-4,
fc_layers=[32],
# 'feelers_version' is one of:
# 'v1': Classic feelers.
# 'v2': New feelers, as defined in go/updated-feelers.
feelers_version='v1',
feelers_v2_output_channels=3,
feelers_v2_kernel_size=5,
dropout=None,
activation_fn='swish',
# 'policy_type' is one of the following:
# 'greedy': Selects the mode of the action distributions.
# 'noisy': Stochastically samples from the action distributions.
# 'greedy_float': Uses 'greedy' for float-valued actions and
# 'noisy' for all others (e.g., categorical actions).
policy_type='greedy_float',
# Number of trajectories in each training batch processed by train().
batch_size=500,
# Number of trajectories to process on each call to train().
training_examples=1000000,
# Whether to compile the graphs.
use_tf_function=True,
# 'use_xla_jit' indicates whether we want to translate the inner
# training loop using XLA JIT.
use_xla_jit=True,
# 'num_batches_to_sample' is the number of batches to sample before
# entering the inner training loop. Presampled batches are used in a
# round-robin fashion. A value of 50 should ensure that the sample is
# representative even for larger replay buffers while adding around
# ~35% to required memory compared to the replay buffer.
num_batches_to_sample=50))
def evaluate(self, data):
"""Compute average eval loss for a trajectory of data."""
with tf.name_scope('eval_loss'):
loss = self._dict_loss(data)
return tf.math.reduce_mean(loss)
def _outer_dim_length(nested_tensor):
"""Return length of the outer (time or batch) dimension."""
flat = tf.nest.flatten(nested_tensor)
if not flat:
return 0
else:
return len(flat[0])
def _select_sub_trajectories(traj_generator,
select_fraction,
rng):
"""Selects a random subtrajectory of a specified length.
This selects a contiguous segment from the input trajectory and returns the
trajectory split into three sub-trajectories: Before the selected fragment,
the selected fragment and after the selected fragment.
Args:
traj_generator: Generates pairs (trajectory, length) where length indicates
the size of the outermost (=time) dimension of the trajectory data.
select_fraction: Fraction of the trajectory to select.
rng: Random number generator.
Yields:
Triplets of trajectory (before, selected, after), which randomly subdivide
the trajectory object into three subtrajectories. The middle element has
length select_fraction * trajectory_length. Note that tuple elements yielded
will be None if any trajectory segment would have length 0.
"""
for trajectory, frames in traj_generator:
select_frames = int(select_fraction * frames)
if not select_frames:
yield trajectory, None, None
continue
last_select_index = frames - select_frames
start = rng.randint(0, last_select_index)
end = start + select_frames
assert end <= frames
def select(start, end):
if start == end:
return None
return tf.nest.map_structure(
lambda x: x[start: end], # pylint: disable=cell-var-from-loop
trajectory) # pylint: disable=cell-var-from-loop
yield select(0, start), select(start, end), select(end, frames)
def _create_checkpointer(checkpoint_dir, trackable_objects):
"""Create a checkpoint manager for the specified directory.
Args:
checkpoint_dir: Directory to manage and optionally load checkpoints
from.
trackable_objects: Dictionary of TensorFlow objects to save in the
checkpoint.
Returns:
(checkpointer, restored) tuple where checkpointer is an instance to a
TF agents Checkpointer instance and restored is a bool value indicating
whether the checkpointer restored from the checkpoint.
"""
checkpointer = common.Checkpointer(ckpt_dir=checkpoint_dir, max_to_keep=1,
**trackable_objects)
if not checkpointer.checkpoint_exists:
checkpointer.initialize_or_restore()
return (checkpointer, checkpointer.checkpoint_exists)
def _generate_random_steps(number_of_frames, brain_spec):
"""Generate demonstration_buffer.Step instances.
Args:
number_of_frames: Number of frames of random steps to generate.
brain_spec: BrainSpec to use to generate the data.
Yields:
demonstration_buffer.Step instance populated with random data that
conforms to the provided brain_spec.
"""
for i, step_phase in demonstration_buffer.generate_index_and_step_phase(
number_of_frames, demonstration_buffer.StepPhase.SUCCESS):
yield demonstration_buffer.Step(
observation_pb=(
data_protobuf_generator.DataProtobufGenerator.from_spec_node(
brain_spec.observation_spec.proto_node,
modify_data_proto=(
data_protobuf_generator.DataProtobufGenerator.
randomize_leaf_data_proto)))[0],
reward=0, phase=step_phase, episode_id='0',
action_pb=data_protobuf_generator.DataProtobufGenerator.from_spec_node(
brain_spec.action_spec.proto_node,
modify_data_proto=(
data_protobuf_generator.DataProtobufGenerator.
randomize_leaf_data_proto))[0],
timestamp_micros=i)
def _atomic_write_string_to_file(filename, contents, overwrite=True):
"""Write a string to a file.
Args:
filename: File to write.
contents: What to write to the file.
overwrite: Whether to overwrite the target file.
Raises:
tf_errors.OpError: If it's not possible to write the file.
"""
if not tf_file_io.has_atomic_move(filename):
tf_file_io.write_string_to_file(filename, contents)
else:
temp_pathname = filename + '.tmp' + uuid.uuid4().hex
tf_file_io.write_string_to_file(temp_pathname, contents)
try:
if overwrite and os.path.exists(filename):
os.remove(filename)
tf_file_io.rename(temp_pathname, filename, overwrite)
except tf_errors.OpError:
tf_file_io.delete_file(temp_pathname)
raise
def _patch_atomic_write_string_to_file():
"""Patch for TensorFlow 2.5.0 to workaround file saving bug on Windows.
See https://github.com/tensorflow/tensorflow/issues/41380
"""
if platform.system().lower().startswith('windows'):
tf_file_io.atomic_write_string_to_file = _atomic_write_string_to_file
class ContinuousImitationBrain:
"""An ML powered brain that continuously learns from demonstration.
Attributes:
id: string, GUID for each Brain instance.
spec_pb: BrainSpec proto that was used to create the Brain.
policy_path: string, the path where the policies are stored.
summary_path: string, the path where TF summaries are stored.
checkpoint_path: string, the path where model checkpoints are stored.
brain_spec: tfa_specs.BrainSpec created from spec_pb.
tf_agent: tf_agent object, the agent created for the purposes of training.
data_timestamp_micros: Either 0 if no data present or the timestamp up to
which episode data has been fetched.
latest_checkpoint: The latest local checkpoint or None if not present.
policy_saver: PolicySaver instance which can be used to save the current
policy.
"""
# Fraction of frames to keep in eval buffer.
_EVAL_FRACTION = 0.2
# Seed for the RNG that divides episodes into training and eval set.
# The implementation uses a fixed seed, since we sometimes offline scores need
# to be compared across sessions (e.g., during a vizier study).
_EVAL_SPLIT_SEED = 12345
# How often to write summaries in seconds.
_SUMMARIES_FLUSH_SECS = 10
def __init__(self,
brain_id,
spec_pb,
checkpoint_path,
summary_path=None,
replay_buffer_capacity=_DEFAULT_REPLAY_BUFFER_CAPACITY,
hparams=None,
write_tflite=True,
use_summaries=False,
compile_graph=True):
"""Create a new ContinuousImitationBrain.
Args:
brain_id: Deprecated, will be removed.
spec_pb: BrainSpec proto describing the properties of the brain to create.
checkpoint_path: string, path where the TF checkpoints will be stored.
summary_path: string, path where the TF summaries will be stored.
replay_buffer_capacity: Frame capacity of the replay buffer.
hparams: dict with hyperparameters for BCAgent, or None if default
hparams should be used.
write_tflite: Whether to convert saved models to tf-lite when saving
policies.
use_summaries: Whether to write TF summaries.
compile_graph: Whether to compile the graph.
Raises:
ValueError: if any of the paths isn't set.
"""
assert spec_pb
_patch_atomic_write_string_to_file()
_ = brain_id
self.spec_pb = spec_pb
self.brain_spec = tfa_specs.BrainSpec(spec_pb)
self.tf_agent = None
self._eval_split_rng = random.Random(self._EVAL_SPLIT_SEED)
self._demo_buffer = None
self._eval_datastore = None
self._replay_buffer = None
self._replay_buffer_capacity = replay_buffer_capacity
# Non-parameter members:
self._reinitialize_dataset()
self._hparams = (
BCAgent.update_derived_hparams(dict(hparams)) if hparams else
BCAgent.default_hparams())
self._write_tflite = write_tflite
self._policy_saver = None
# Stats logged while training.
self._num_train_frames = tf.Variable(0, dtype=tf.int32, trainable=False,
name='_num_train_frames')
self._num_eval_frames = tf.Variable(0, dtype=tf.int32, trainable=False,
name='_num_eval_frames')
if not checkpoint_path or (use_summaries and not summary_path):
raise ValueError(
'checkpoint_path and summary_path (when using summaries) must be '
'set.')
self._use_summaries = use_summaries
self._summary_path = None
self._train_summary_writer = None
self.summary_path = summary_path # Optionally create the summary writer.
self._checkpoint_path = checkpoint_path
self._checkpoint_trackable_objects = None
self._checkpointer = None
self._get_experiences = None
self._train_step = None
self.reinitialize_agent(compile_graph) # Also sets up checkpointer.
def _initialize_step_buffers(self):
"""Initialize the replay buffer, demo buffer and eval datastore."""
# TF Replay buffer
self._replay_buffer = numpy_replay_buffer.NumpyReplayBuffer(
self.tf_agent.collect_data_spec,
capacity=self._replay_buffer_capacity)
self._reinitialize_dataset()
# Temporary data buffer that accumulates full episodes before dividing the
# data between training and eval set
self._demo_buffer = demonstration_buffer.DemonstrationBuffer(
self.brain_spec)
# Storage for eval data.
self._eval_datastore = eval_datastore.EvalDatastore()
self.clear_step_buffers()
def reinitialize_agent(self, compile_graph=False):
"""Re-initialize the tf-agent.
If the checkpoint directory is wiped before calling this, this will trigger
a reinitialization of network weights.
Args:
compile_graph: Whether to pre-compile the graph.
"""
falken_logging.info('(Re-)initializing BCAgent.')
if not self.tf_agent:
self.tf_agent = BCAgent(
self.brain_spec,
self._hparams,
use_summaries=self._use_summaries)
# We trigger retracing of the train_step function on restart, since
# otherwise, some of the variables seem to be reused.
inital_time = time.perf_counter()
if self._hparams['use_tf_function']:
self._get_experiences = common.function(
self._py_fun_get_experiences,
autograph=True)
self._train_step = common.function(
self._py_fun_train_step,
autograph=True,
experimental_compile=self._hparams['use_xla_jit'])
else:
self._get_experiences = self._py_fun_get_experiences
self._train_step = self._py_fun_train_step
falken_logging.info('Retraced train functions in '
f'{time.perf_counter() - inital_time} secs.')
# Initialize demo, eval and replay buffers.
self._initialize_step_buffers()
# Force JIT compilation.
if compile_graph:
self._compile_graph()
# Keys are names, values are trackable objects (see tf.train.Checkpoint
# for details on trackables).
self._checkpoint_trackable_objects = {
'agent': self.tf_agent,
'train_step_counter': self.tf_agent.train_step_counter,
'actor_net': self.tf_agent.actor_net,
}
# Create a checkpointer and, if it exists, restore from a checkpoint.
self._checkpointer, restored_from_checkpoint = _create_checkpointer(
self._checkpoint_path, self._checkpoint_trackable_objects)
# If we didn't restore from a checkpoint, try restoring from the initial
# state.
if not restored_from_checkpoint:
self.tf_agent.train_step_counter.assign(0)
# Reset the model weights.
if compile_graph:
self.tf_agent.actor_net.initialize_weights()
def _compile_graph(self):
"""Force compilation of the graph."""
# Ideally we would execute the graph with a minimum number of steps (i.e set
# batch_size, num_batches_to_sample and training_steps to 1) to avoid
# biasing the initial weights and minimize initialization time.
# However, using tf.Variable instances to configure training parameters
# has a number of drawbacks:
# - It's not possible to _currently_ communicate variables across device
# boundaries with XLA enabled.
# - When variables that affect control flow in the graph are modified,
# partial recompilation occurs.
#
# So instead this populates the demonstration buffer with random data
# and trains for a single step to force compilation.
self._replay_buffer.Add(demonstration_buffer.batch_trajectories(
demonstration_buffer.episode_steps_to_trajectories(
_generate_random_steps(self._hparams['batch_size'] *
self._hparams['num_batches_to_sample'],
self.brain_spec),
self.brain_spec)))
self._reinitialize_dataset()
inital_time = time.perf_counter()
self.train()
falken_logging.info(f'Compiled in {time.perf_counter() - inital_time}s')
self.tf_agent.train_step_counter.assign(0)
self._replay_buffer.Clear()
self._reinitialize_dataset()
@property
def num_train_frames(self):
"""Number of frames collected for training."""
return int(self._num_train_frames)
@property
def num_eval_frames(self):
"""Number of frames collected for training."""
return int(self._num_eval_frames)
@property
def global_step(self):
"""Number of batched gradient updates / global steps."""
return int(self.tf_agent.train_step_counter)
@property
def hparams(self):
"""Hyperparameter settings in dict form used in the brain currently."""
return dict(self._hparams)
@property
def summary_path(self):
"""Get the directory to write summaries."""
return self._summary_path
@summary_path.setter
def summary_path(self, path):
"""Set the summary directory to write summaries.
Args:
path: Directory to write summaries if _use_summaries is True.
"""
# Create a train summary writer that flushes periodically.
if self._summary_path != path:
self._summary_path = path
if self._use_summaries:
self._train_summary_writer = tf.compat.v2.summary.create_file_writer(
self._summary_path, flush_millis=self.SUMMARIES_FLUSH_SECS * 1000)
self._train_summary_writer.set_as_default()
else:
self._train_summary_writer = None
@property
def checkpoint_path(self):
"""Get the checkpoint directory."""
return self._checkpoint_path
@checkpoint_path.setter
def checkpoint_path(self, path):
"""Set the checkpoint directory and try to load from the directory.
Args:
path: Directory to write checkpoints.
"""
self._checkpoint_path = path
self._checkpointer, _ = _create_checkpointer(
self._checkpoint_path, self._checkpoint_trackable_objects)
def _reinitialize_dataset(self):
"""Force dataset recreation from the replay buffer on _setup_dataset()."""
self._dataset = None
self._dataset_iterator = None
def _setup_dataset(self):
"""Set up dataset and iterator."""
if self._dataset:
return
# TF agents expects a time dimension, so we add one dummy dimension in
# front.
def _add_time_dim(tensor_nest):
# Add dummy time dimension of 1.
return tf.nest.map_structure(lambda x: tf.expand_dims(x, axis=0),
tensor_nest)
ds = self._replay_buffer.AsDataset().map(_add_time_dim).cache().repeat()
ds = ds.shuffle(
self._replay_buffer.size).batch(self._hparams['batch_size'])
# We apply a second batch dimension so that a batch of batches can be
# prefetched before we enter the XLA/jit translated train function.
# (which does not like the 'next' operator)
ds = ds.batch(self._hparams['num_batches_to_sample'])
ds = ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
self._dataset = ds
self._dataset_iterator = iter(ds)
def train(self):
"""Trains the brain using the collected data."""
initial_count_steps = int(self.global_step)
initial_time = time.perf_counter()
# set up dataset + iterator
self._setup_dataset()
with tf.compat.v2.summary.record_if(self._use_summaries):
# TODO(wun): Determine why this is necessary. Currently without
# this line, the default summary writer somehow gets set to
# None, resulting in an empty TensorBoard.
if self._train_summary_writer:
self._train_summary_writer.set_as_default()
self._train_step(self._get_experiences(self._dataset_iterator))
falken_logging.info(
f'Trained {int(self.global_step) - initial_count_steps} steps in '
f'{time.perf_counter() - initial_time} seconds from '
f'{self._num_train_frames.numpy()} training_frames.')
falken_logging.info(
f'Trained {self.tf_agent.train_step_counter.numpy()} steps in '
f'total from {self._num_train_frames.numpy()} training frames.')
def record_step(self, observation_pb, reward, phase, episode_id, action_pb,
timestamp_micros):
"""Records a known state+action from a given env.
Args:
observation_pb: The observation data for this step.
reward: The reward for this step.
phase: The phase of the episode this step represents.
episode_id: Id for the episode that's collecting this step.
action_pb: Action data from brain or user.
timestamp_micros: Microsecond timestamp of the step.
"""
self._demo_buffer.record_step(
demonstration_buffer.Step(
observation_pb=observation_pb,
reward=reward,
phase=phase,
episode_id=episode_id,
action_pb=action_pb,
timestamp_micros=timestamp_micros))
# Add new completed episodes to buffers.
for before, eval_trajectory, after in _select_sub_trajectories(
self._demo_buffer.flush_episode_demonstrations(),
self._EVAL_FRACTION, self._eval_split_rng):
if not eval_trajectory:
# If episode is too short for splitting, stochastically assign it to
# either eval or training.
assert not after
if self._eval_split_rng.random() < self._EVAL_FRACTION:
before, eval_trajectory = eval_trajectory, before
for trajectory in [before, after]:
if not trajectory: # Skip empty trajectory chunks.
continue
self._replay_buffer.Add(trajectory)
chunk_size = _outer_dim_length(trajectory)
self._num_train_frames.assign_add(chunk_size)
falken_logging.info(f'Added {chunk_size} training frames.')
self._reinitialize_dataset()
if eval_trajectory:
self._eval_datastore.add_trajectory(eval_trajectory)
chunk_size = _outer_dim_length(eval_trajectory)
self._num_eval_frames.assign_add(chunk_size)
self._eval_datastore.create_version()
falken_logging.info(f'Added {chunk_size} eval frames.')
def clear_step_buffers(self):
"""Clear all steps from demo, eval and replay buffers."""
# Reset training and eval counters.
self._num_train_frames.assign(0)
self._num_eval_frames.assign(0)
# Clear buffers.
self._replay_buffer.Clear()
self._demo_buffer.clear()
self._eval_datastore.clear()
self._reinitialize_dataset()
@property
def latest_checkpoint(self):
return self._checkpointer.manager.latest_checkpoint
def _create_export_policy(self):
"""Creates the policy for export based on hparams."""
assert isinstance(self.tf_agent.policy, greedy_policy.GreedyPolicy)
stochastic_policy = self.tf_agent.policy.wrapped_policy
policy_type = self._hparams['policy_type']
if policy_type == 'greedy':
return self.tf_agent.policy
elif policy_type == 'greedy_float':
return policies.GreedyFloatPolicy(stochastic_policy)
elif policy_type == 'noisy':
return stochastic_policy
else:
raise UnknownHParamError(f'unknown policy_type: {policy_type}')
@property
def policy_saver(self):
if not self._policy_saver:
policy = self._create_export_policy()
self._policy_saver = policy_saver.PolicySaver(policy, batch_size=1)
return self._policy_saver
def export_saved_model(self, path):
"""Export saved model."""
# Wrap in GreedyPolicy to make sure behavior is smooth.
falken_logging.info(f'Exporting policy with action signature '
f'{self.policy_saver.action_input_spec}')
falken_logging.info(f'Exporting policy with policy_step_spec'
f'{self.policy_saver.policy_step_spec}')
falken_logging.info(f'Exporting policy with policy_state_spec'
f'{self.policy_saver.policy_state_spec}')
os.makedirs(path, exist_ok=True)
self.policy_saver.save(path)
falken_logging.info(f'Exported SavedModel to policy to {path}')
def convert_model_to_tflite(self, saved_model_path, tflite_path):
"""Convert saved model to tf-lite model."""
filename = 'model.tflite'
os.makedirs(tflite_path, exist_ok=True)
tflite_file = os.path.join(tflite_path, filename)
with open(tflite_file, 'wb') as f:
f.write(saved_model_to_tflite_model.convert(saved_model_path, ['action']))
def save_checkpoint(self, save_dir):
"""Serializes the trained policy to disk and moves to save_dir.
Args:
save_dir: Directory to save the latest checkpoint.
This directory *must* be on the same filesystem as checkpoint_path
specified on construction of this object.
"""
global_step = self.tf_agent.train_step_counter
self._checkpointer.save(global_step)
checkpoint_path_prefix = self.latest_checkpoint
checkpoint_dir = os.path.dirname(checkpoint_path_prefix)
# Move information on the latest checkpoint from the local
# tensorflow checkpoint dir to the export location at policy_path.
# Checkpoint files start with '<checkpoint_name>.'.
os.makedirs(save_dir, exist_ok=True)
for checkpoint_filename, copy in (
[(fname, False) for fname in glob.glob(checkpoint_path_prefix + '.*')] +
# Copy 'checkpoint' metadata file (tracks state of the checkpoint
# manager).
[(os.path.join(checkpoint_dir, 'checkpoint'), True)]):
target_filename = os.path.join(
save_dir, os.path.basename(checkpoint_filename))
if copy:
shutil.copy(checkpoint_filename, target_filename)
else:
# Create a hard link to the checkpoint file in the output directory.
os.link(checkpoint_filename, target_filename)
def full_eval_from_datastore(self, eval_ds):
"""Same as compute_full_evaluation, but from an external EvalDatastore.
Args:
eval_ds: EvalDatastore instance to use to evaluate this brain instance.
Yields:
(eval_dataset_version_id, mean_score) tuples where
eval_dataset_version_id is the version of the evaluation dataset and
mean_score is the evaluation score of the brain against the dataset.
"""
prev_size = 0
prev_score = 0
# Successive versions just incrementally add data. Since we're computing an
# average score for each version, we can compute the evaluation
# incrementally as well.
for version_id, size, delta in eval_ds.get_version_deltas():
delta_score = self.tf_agent.evaluate(delta)
weight = size / (size + prev_size)
mean_score = weight * delta_score + (1 - weight) * prev_score
yield version_id, float(mean_score) # Cast to float from tensor.
prev_score, prev_size = mean_score, size + prev_size
def compute_full_evaluation(self, path=None):
"""Yields a sequence of pairs of the form (eval_version, eval_score).
Args:
path: An optional policy path if we want to evaluate a different version
rather than the currently active one.
Yields:
A sequence of pairs of the form (eval_version, eval_score).
Raises:
NoCheckpointFoundError: If the path was set but no checkpoint was found.
"""
if path is None:
yield from self.full_eval_from_datastore(self._eval_datastore)
else:
# Create a copy of this brain for eval:
with tf.name_scope('eval_brain'):
cp_path = os.path.join(os.path.join(path, 'checkpoint/'))
eval_brain = ContinuousImitationBrain(
'',
spec_pb=self.spec_pb,
checkpoint_path=cp_path,
# TODO(lph): Check if we need to use a separate summary path.
summary_path=self._summary_path,
replay_buffer_capacity=self._replay_buffer_capacity,
hparams=self._hparams)
if not eval_brain.latest_checkpoint:
raise NoCheckpointFoundError(
f'No checkpoints found in dir {cp_path}, {cp_path}/* is '
+ str(glob.glob(cp_path + '*')))
yield from eval_brain.full_eval_from_datastore(self._eval_datastore)
def _py_fun_get_experiences(self, iterator):
"""Return a batch of batches for use in _py_fun_train_step.
Args:
iterator: A dataset iterator.
Returns:
A Trajectory object representing a nest of tensors, where each tensor
has batch dimensions num_batches_to_sample x batch_size.
"""
return next(iterator)
def _py_fun_train_step(self, experiences):
"""Train for a single step.
We pre-sample batches and consider them as input so that this function
can be translated via experimental_compile=True with JIT XLA compilation.
(The 'next' function does not support JIT XLA compilation right now.)
Args:
experiences: A Trajectory with data batched across two dimensions. The
outer batch dimension is 'num_batches_to_sample' the inner batch
dimension is 'batch_size'. In each training step, we move round robin
around the outer batch dimension and feed an inner batch to tf-agents'
train function. (The datastructure is generated by calling the function
on the generating dataset - which is already batched).
"""
num_batches = self._hparams['num_batches_to_sample']
for i in tf.range(self._hparams['training_steps']):
def _select_outer_batch(tensor):
"""Selects the ith outer batch of a tensor."""
# Using the loop variable i is ok here, since we're only ever using the
# function inside the loop.
return tensor[i % num_batches] # pylint: disable=cell-var-from-loop
# Data in experiences has two batch dimensions (see the generation of the
# dataset object in _setup_dataset()). We now create an object
# that selects an outer batch, thus leaving the resulting object with
# only one batch dimension.
experience = tf.nest.map_structure(_select_outer_batch, experiences)
self.tf_agent.train(experience=experience)
|
the-stack_106_24528 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from .base import *
DEBUG = False
TEMPLATE_DEBUG = False
ALLOWED_HOSTS = ["*"]
STATIC_ROOT = '/home/vagrant/static'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/db.sqlite3',
}
}
|
the-stack_106_24531 | """Deprecated Magic functions.
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (c) 2012 The IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Our own packages
from IPython.core.magic import Magics, magics_class, line_magic
#-----------------------------------------------------------------------------
# Magic implementation classes
#-----------------------------------------------------------------------------
@magics_class
class DeprecatedMagics(Magics):
"""Magics slated for later removal."""
@line_magic
def install_profiles(self, parameter_s=''):
"""%install_profiles has been deprecated."""
print('\n'.join([
"%install_profiles has been deprecated.",
"Use `ipython profile list` to view available profiles.",
"Requesting a profile with `ipython profile create <name>`",
"or `ipython --profile=<name>` will start with the bundled",
"profile of that name if it exists."
]))
@line_magic
def install_default_config(self, parameter_s=''):
"""%install_default_config has been deprecated."""
print('\n'.join([
"%install_default_config has been deprecated.",
"Use `ipython profile create <name>` to initialize a profile",
"with the default config files.",
"Add `--reset` to overwrite already existing config files with defaults."
]))
|
the-stack_106_24535 | import unittest
from apiai_assistant.widgets import SelectItem
from apiai_assistant.widgets import OptionInfo
class SelectItemTestCase(unittest.TestCase):
def test_basic(self):
key = "foobar"
title = "bario"
w_slct_item = SelectItem(title=title, option_info=OptionInfo(key))
self.assertEqual(
w_slct_item.render(),
{
"title": title,
"description": None,
"optionInfo": {
"key": key,
"synonyms": []
},
"image": None
}
)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_24536 | # -*- coding: utf-8 -*-
"""
Created on Wed May 30 13:41:15 2018
@author: mzw06
"""
import torch.nn as nn
import torch.nn.functional as F
class RNNModel(nn.Module):
"""Container a recurrent module."""
def __init__(self, rnn_type, ninp, ntag, nhid, nlayers, dropout=0):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
if rnn_type in ['LSTM', 'GRU']:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntag)
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.fill_(0)
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
output, hidden = self.rnn(input, hidden)
output = self.drop(output)
decoded = self.decoder(output)
return decoded, hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
class CNNModel(nn.Module):
"""Container of a CNN module. """
def __init__(self, activation=F.relu, num_classes=3, dropout=0.5):
super(CNNModel, self).__init__()
self.activation = activation
# convolution layers
self.conv1 = nn.Conv2d(1, 8, kernel_size=4)
self.bn1 = nn.BatchNorm2d(8)
self.conv2 = nn.Conv1d(8, 8, kernel_size=4)
self.bn2 = nn.BatchNorm1d(8)
self.conv3 = nn.Conv1d(8, 16, kernel_size=4)
self.bn3 = nn.BatchNorm1d(16)
# fully connected layers
self.fc1 = nn.Linear(16*22, 32)
self.bn5 = nn.BatchNorm1d(32)
self.fc2 = nn.Linear(32, num_classes)
# max pooling
self.pool = nn.MaxPool1d(2, 2)
# dropout
self.drop = nn.Dropout(dropout)
def forward(self, x):
x = self.bn1(self.conv1(x))
x = self.activation(x)
x = x.view(x.size(0), x.size(1), -1)
x = self.bn2(self.conv2(x))
x = self.pool(self.activation(x))
x = self.bn3(self.conv3(x))
x = self.pool(self.activation(x))
x = x.view(x.size(0), -1)
x = self.bn5(self.fc1(x))
x = self.activation(x)
x = self.drop(x)
x = self.fc2(x)
return x
|
the-stack_106_24537 | import newspaper
from newspaper import Article
import requests
from dragnet import content_extractor, content_comments_extractor
from eatiht import etv2
from eatiht import v2
import eatiht
from readability.readability import Document
import urllib
from bs4 import BeautifulSoup
TARGET = 'http://giaitri.vnexpress.net'
# ARTICLE = 'http://suckhoe.vnexpress.net/tin-tuc/dinh-duong/uong-nuoc-lanh' \
# '-giup-giam-can-3536234.html'
ARTICLE = 'http://www.baomoi.com/ios-cach-khac-phuc-van-de-dinh-ma-doc-tu' \
'-redirect-quang-cao-khi-vao-bat-ki-website-nao/c/19620631.epi'
# ARTICLE = 'https://vnhacker.blogspot.com/2017/01/nuoc-my-va-nguoi-nhap-cu' \
# '.html'
# Dragnet only works with py27
def try_dragnet():
# fetch HTML
r = requests.get(ARTICLE)
# get main article without comments
content = content_extractor.analyze(r.content)
print("======")
print(content)
# get article and comments
content_comments = content_comments_extractor.analyze(r.content)
print("======")
print(content_comments)
def try_newspaper():
vne = newspaper.build(TARGET)
a = Article(ARTICLE, language='vi')
for article in vne.articles:
print(article.url)
a.download()
a.parse()
print("FIN")
def try_readability():
html = urllib.request.urlopen(ARTICLE).read()
doc = Document(html)
con = BeautifulSoup(doc.summary()).get_text()
tit = doc.short_title()
print("===READABILITY===")
print("=CONTENT=")
print(con)
print("=TITLE=")
print(tit)
def try_eatiht():
print("===EATIHT V2===")
tree = etv2.extract(ARTICLE)
tree.bootstrapify()
print(tree.get_html_string())
print("===V2===")
print(v2.extract(ARTICLE))
print("===V1===")
print(eatiht.extract(ARTICLE))
def main():
try_newspaper()
if __name__ == '__main__':
if __name__ == '__main__':
main()
# newspaper load target --> detect categories --> cached --> filter by domain
# --> detect article list --> filter for remove temp URL (#, ?..) --> pick
# random articles and detect format --> validate format --> send to worker
# --> worker start crawling --> if error happen, notify back to master
|
the-stack_106_24538 | """
Author: Dr. John T. Hwang <[email protected]>
This package is distributed under New BSD license.
LHS sampling; uses the pyDOE2 package.
"""
from __future__ import division
from pyDOE2 import lhs
from six.moves import range
from scipy.spatial.distance import pdist, cdist
import numpy as np
from smt.sampling_methods.sampling_method import SamplingMethod
class LHS(SamplingMethod):
def _initialize(self):
self.options.declare('criterion', 'c', values=['center', 'maximin', 'centermaximin',
'correlation', 'c', 'm', 'cm', 'corr','ese'],
types=str, desc='criterion used to construct the LHS design '+
'c, m, cm and corr are abbreviation of center, maximin, centermaximin and correlation, respectively')
def _compute(self, nt):
"""
Compute the requested number of sampling points.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the input space.
"""
xlimits = self.options['xlimits']
nx = xlimits.shape[0]
if self.options['criterion'] != 'ese':
return lhs(nx, samples=nt, criterion=self.options['criterion'])
elif self.options['criterion'] == 'ese':
return self._ese(nx,nt)
def _maximinESE(self, X, T0=None, outer_loop=None, inner_loop=None, J=20,
tol=1e-3, p=10, return_hist=False, fixed_index=[]):
"""
Returns an optimized design starting from design X. For more information,
see R. Jin, W. Chen and A. Sudjianto (2005):
An efficient algorithm for constructing optimal design of computer
experiments. Journal of Statistical Planning and Inference, 134:268-287.
Parameters
----------
X : array
The design to be optimized
T0 : double, optional
Initial temperature of the algorithm.
If set to None, a standard temperature is used.
outer_loop : integer, optional
The number of iterations of the outer loop. If None, set to
min(1.5*dimension of LHS, 30)
inner_loop : integer, optional
The number of iterations of the inner loop. If None, set to
min(20*dimension of LHS, 100)
J : integer, optional
Number of replications of the plan in the inner loop. Default to 20
tol : double, optional
Tolerance for modification of Temperature T. Default to 0.001
p : integer, optional
Power used in the calculation of the PhiP criterion. Default to 10
return_hist : boolean, optional
If set to True, the function returns information about the behaviour of
temperature, PhiP criterion and probability of acceptance during the
process of optimization. Default to False
Returns
------
X_best : array
The optimized design
hist : dictionnary
If return_hist is set to True, returns a dictionnary containing the phiP
('PhiP') criterion, the temperature ('T') and the probability of
acceptance ('proba') during the optimization.
"""
# Initialize parameters if not defined
if T0 is None:
T0 = 0.005*self._PhiP(X, p=p)
if inner_loop is None:
inner_loop = min(20*X.shape[1], 100)
if outer_loop is None:
outer_loop = min(int(1.5*X.shape[1]), 30)
T = T0
X_ = X[:] # copy of initial plan
X_best = X_[:]
d = X.shape[1]
PhiP_ = self._PhiP(X_best, p=p)
PhiP_best = PhiP_
hist_T = list()
hist_proba = list()
hist_PhiP = list()
hist_PhiP.append(PhiP_best)
# Outer loop
for z in range(outer_loop):
PhiP_oldbest = PhiP_best
n_acpt = 0
n_imp = 0
# Inner loop
for i in range(inner_loop):
modulo = (i+1)%d
l_X = list()
l_PhiP = list()
# Build J different plans with a single exchange procedure
# See description of PhiP_exchange procedure
for j in range(J):
l_X.append(X_.copy())
l_PhiP.append(self._PhiP_exchange(l_X[j], k=modulo, PhiP_=PhiP_, p=p,
fixed_index=fixed_index))
l_PhiP = np.asarray(l_PhiP)
k = np.argmin(l_PhiP)
PhiP_try = l_PhiP[k]
# Threshold of acceptance
if PhiP_try - PhiP_ <= T * np.random.rand(1)[0]:
PhiP_ = PhiP_try
n_acpt = n_acpt + 1
X_ = l_X[k]
# Best plan retained
if PhiP_ < PhiP_best:
X_best = X_
PhiP_best = PhiP_
n_imp = n_imp + 1
hist_PhiP.append(PhiP_best)
p_accpt = float(n_acpt) / inner_loop # probability of acceptance
p_imp = float(n_imp) / inner_loop # probability of improvement
hist_T.extend(inner_loop*[T])
hist_proba.extend(inner_loop*[p_accpt])
if PhiP_best - PhiP_oldbest < tol:
# flag_imp = 1
if p_accpt>=0.1 and p_imp<p_accpt:
T = 0.8*T
elif p_accpt>=0.1 and p_imp==p_accpt:
pass
else:
T = T/0.8
else:
# flag_imp = 0
if p_accpt<=0.1:
T = T/0.7
else:
T = 0.9*T
hist = {'PhiP': hist_PhiP, 'T': hist_T, 'proba': hist_proba}
if return_hist:
return X_best, hist
else:
return X_best
def _PhiP(self,X, p=10):
"""
Calculates the PhiP criterion of the design X with power p.
X : array_like
The design where to calculate PhiP
p : integer
The power used for the calculation of PhiP (default to 10)
"""
return ((pdist(X)**(-p)).sum()) ** (1./p)
def _PhiP_exchange(self,X, k, PhiP_, p, fixed_index):
"""
Modifies X with a single exchange algorithm and calculates the corresponding
PhiP criterion. Internal use.
Optimized calculation of the PhiP criterion. For more information, see:
R. Jin, W. Chen and A. Sudjianto (2005):
An efficient algorithm for constructing optimal design of computer
experiments. Journal of Statistical Planning and Inference, 134:268-287.
Parameters
----------
X : array_like
The initial design (will be modified during procedure)
k : integer
The column where the exchange is proceeded
PhiP_ : double
The PhiP criterion of the initial design X
p : integer
The power used for the calculation of PhiP
Returns
------
res : double
The PhiP criterion of the modified design X
"""
# Choose two (different) random rows to perform the exchange
i1 = np.random.randint(X.shape[0])
while i1 in fixed_index:
i1 = np.random.randint(X.shape[0])
i2 = np.random.randint(X.shape[0])
while i2 == i1 or i2 in fixed_index:
i2 = np.random.randint(X.shape[0])
X_ = np.delete(X, [i1,i2], axis=0)
dist1 = cdist([X[i1,:]], X_)
dist2 = cdist([X[i2,:]], X_)
d1 = np.sqrt(dist1**2 + (X[i2,k] - X_[:,k])**2 - (X[i1,k] - X_[:,k])**2)
d2 = np.sqrt(dist2**2 - (X[i2,k] - X_[:,k])**2 + (X[i1,k] - X_[:,k])**2)
res = (PhiP_**p + (d1**(-p) - dist1**(-p) + d2**(-p) - dist2**(-p)).sum())**(1./p)
X[i1,k], X[i2,k] = X[i2,k], X[i1,k]
return res
def _ese(self,dim,nt):
# Parameters of maximinESE procedure
P0 = lhs(dim, nt, criterion = None)
J = 20
outer_loop = min(int(1.5*dim), 30)
inner_loop = min(20*dim, 100)
D0 = pdist(P0)
R0 = np.corrcoef(P0)
corr0 = np.max(np.abs(R0[R0!=1]))
phip0 = self._PhiP(P0)
P, historic = self._maximinESE(P0, outer_loop=outer_loop, inner_loop=inner_loop,
J=J, tol=1e-3, p=10, return_hist=True)
return P
|
the-stack_106_24541 | import markdown
from peewee import DoesNotExist, fn
import tornado.web
from model.model import Post, User, Tag, PostTag
from config.config import conf
from util.gravatar import Gravatar
class BaseHandler(tornado.web.RequestHandler):
"""基础 Handler
所有Handler都要继承此类以获取必要的应用内通用方法和数据
"""
@property
def gravatar(self):
return Gravatar
@property
def markdown(self):
return markdown.markdown
def get(self):
"""捕获404"""
self.send_error(404)
def get_current_user(self):
"""用户验证
如果存在 cookie 则根据用户 cookie 获取用户信息并返回,
否则返回 None
"""
uid = self.get_secure_cookie('uid')
if not uid:
return None
else:
user = ''
try:
user = User.get(User.id == uid)
except DoesNotExist:
self.clear_cookie('uid')
return user
def write_error(self, status_code, **kwargs):
"""重写404错误页
"""
if status_code == 404:
self.render('public/404.html')
elif status_code == 500:
self.render('public/500.html')
else:
self.write('error:' + str(status_code))
@staticmethod
def get_side():
"""获取侧边栏内容
通用侧边栏数据
"""
side = {
'recent_post': Post.select().limit(conf['RECENT_POST_NUM']),
'random_post': Post.select().order_by(fn.Rand()).limit(conf['RANDOM_POST_NUM']),
'tags': Tag.select(Tag, fn.Count(Post.id).alias('count')).join(PostTag).join(Post).group_by(Tag),
}
return side
def success(self, message='操作成功', url=''):
"""操作成功提示
"""
self.render('message/success.html', message=message, url=url)
def failure(self, message="操作失败", url=''):
self.render('message/failure.html', message=message, url=url)
def error(self, message="出现错误了", url=''):
"""操作失败提示
"""
self.render('message/error.html', message=message, url=url) |
the-stack_106_24542 | # coding=utf-8
from typing import List
from src.data_structure.data_structure import ListNode
class Solution:
"""
反转链表
"""
def reverse_list(self, head: ListNode) -> ListNode:
"""
:param head:
:return:
"""
pre, cur = None, head
while cur:
next = cur.next
cur.next = pre
pre = cur
cur = next
return pre
|
the-stack_106_24545 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
module: ec2_vpc_nacl
short_description: create and delete Network ACLs.
version_added: 1.0.0
description:
- Read the AWS documentation for Network ACLS
U(https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html)
options:
name:
description:
- Tagged name identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
type: str
nacl_id:
description:
- NACL id identifying a network ACL.
- One and only one of the I(name) or I(nacl_id) is required.
required: false
type: str
vpc_id:
description:
- VPC id of the requesting VPC.
- Required when state present.
required: false
type: str
subnets:
description:
- The list of subnets that should be associated with the network ACL.
- Must be specified as a list
- Each subnet can be specified as subnet ID, or its tagged name.
required: false
type: list
elements: str
egress:
description:
- A list of rules for outgoing traffic. Each rule must be specified as a list.
Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']),
the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny,
the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
See examples.
default: []
required: false
type: list
elements: list
ingress:
description:
- List of rules for incoming traffic. Each rule must be specified as a list.
Each rule may contain the rule number (integer 1-32766), protocol (one of ['tcp', 'udp', 'icmp', 'ipv6-icmp', '-1', 'all']),
the rule action ('allow' or 'deny') the CIDR of the IPv4 or IPv6 network range to allow or deny,
the ICMP type (-1 means all types), the ICMP code (-1 means all codes), the last port in the range for
TCP or UDP protocols, and the first port in the range for TCP or UDP protocols.
See examples.
default: []
required: false
type: list
elements: list
tags:
description:
- Dictionary of tags to look for and apply when creating a network ACL.
required: false
type: dict
state:
description:
- Creates or modifies an existing NACL
- Deletes a NACL and reassociates subnets to the default NACL
required: false
type: str
choices: ['present', 'absent']
default: present
author: Mike Mochan (@mmochan)
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# Complete example to create and delete a network ACL
# that allows SSH, HTTP and ICMP in, and all traffic out.
- name: "Create and associate production DMZ network ACL with DMZ subnets"
community.aws.ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets: ['prod-dmz-1', 'prod-dmz-2']
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
ingress:
# rule no, protocol, allow/deny, cidr, icmp_type, icmp_code,
# port from, port to
- [100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]
- [200, 'tcp', 'allow', '0.0.0.0/0', null, null, 80, 80]
- [205, 'tcp', 'allow', '::/0', null, null, 80, 80]
- [300, 'icmp', 'allow', '0.0.0.0/0', 0, 8]
- [305, 'ipv6-icmp', 'allow', '::/0', 0, 8]
egress:
- [100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]
- [105, 'all', 'allow', '::/0', null, null, null, null]
state: 'present'
- name: "Remove the ingress and egress rules - defaults to deny all"
community.aws.ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
region: ap-southeast-2
subnets:
- prod-dmz-1
- prod-dmz-2
tags:
CostCode: CC1234
Project: phoenix
Description: production DMZ
state: present
- name: "Remove the NACL subnet associations and tags"
community.aws.ec2_vpc_nacl:
vpc_id: 'vpc-12345678'
name: prod-dmz-nacl
region: ap-southeast-2
state: present
- name: "Delete nacl and subnet associations"
community.aws.ec2_vpc_nacl:
vpc_id: vpc-12345678
name: prod-dmz-nacl
state: absent
- name: "Delete nacl by its id"
community.aws.ec2_vpc_nacl:
nacl_id: acl-33b4ee5b
state: absent
'''
RETURN = r'''
task:
description: The result of the create, or delete action.
returned: success
type: dict
nacl_id:
description: The id of the NACL (when creating or updating an ACL)
returned: success
type: str
sample: acl-123456789abcdef01
'''
try:
import botocore
except ImportError:
pass # Handled by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NUMBERS = {'all': -1, 'icmp': 1, 'tcp': 6, 'udp': 17, 'ipv6-icmp': 58}
# Utility methods
def icmp_present(entry):
if len(entry) == 6 and entry[1] in ['icmp', 'ipv6-icmp'] or entry[1] in [1, 58]:
return True
def load_tags(module):
tags = []
if module.params.get('tags'):
for name, value in module.params.get('tags').items():
tags.append({'Key': name, 'Value': str(value)})
tags.append({'Key': "Name", 'Value': module.params.get('name')})
else:
tags.append({'Key': "Name", 'Value': module.params.get('name')})
return tags
def subnets_removed(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnet_ids if subnet not in subnets]
def subnets_added(nacl_id, subnets, client, module):
results = find_acl_by_id(nacl_id, client, module)
associations = results['NetworkAcls'][0]['Associations']
subnet_ids = [assoc['SubnetId'] for assoc in associations]
return [subnet for subnet in subnets if subnet not in subnet_ids]
def subnets_changed(nacl, client, module):
changed = False
vpc_id = module.params.get('vpc_id')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
subnets = subnets_to_associate(nacl, client, module)
if not subnets:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
subnets = find_subnet_ids_by_nacl_id(nacl_id, client, module)
if subnets:
replace_network_acl_association(default_nacl_id, subnets, client, module)
changed = True
return changed
changed = False
return changed
subs_added = subnets_added(nacl_id, subnets, client, module)
if subs_added:
replace_network_acl_association(nacl_id, subs_added, client, module)
changed = True
subs_removed = subnets_removed(nacl_id, subnets, client, module)
if subs_removed:
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)[0]
replace_network_acl_association(default_nacl_id, subs_removed, client, module)
changed = True
return changed
def nacls_changed(nacl, client, module):
changed = False
params = dict()
params['egress'] = module.params.get('egress')
params['ingress'] = module.params.get('ingress')
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
nacl = describe_network_acl(client, module)
entries = nacl['NetworkAcls'][0]['Entries']
egress = [rule for rule in entries if rule['Egress'] is True and rule['RuleNumber'] < 32767]
ingress = [rule for rule in entries if rule['Egress'] is False and rule['RuleNumber'] < 32767]
if rules_changed(egress, params['egress'], True, nacl_id, client, module):
changed = True
if rules_changed(ingress, params['ingress'], False, nacl_id, client, module):
changed = True
return changed
def tags_changed(nacl_id, client, module):
changed = False
tags = dict()
if module.params.get('tags'):
tags = module.params.get('tags')
if module.params.get('name') and not tags.get('Name'):
tags['Name'] = module.params['name']
nacl = find_acl_by_id(nacl_id, client, module)
if nacl['NetworkAcls']:
nacl_values = [t.values() for t in nacl['NetworkAcls'][0]['Tags']]
nacl_tags = [item for sublist in nacl_values for item in sublist]
tag_values = [[key, str(value)] for key, value in tags.items()]
tags = [item for sublist in tag_values for item in sublist]
if sorted(nacl_tags) == sorted(tags):
changed = False
return changed
else:
delete_tags(nacl_id, client, module)
create_tags(nacl_id, client, module)
changed = True
return changed
return changed
def rules_changed(aws_rules, param_rules, Egress, nacl_id, client, module):
changed = False
rules = list()
for entry in param_rules:
rules.append(process_rule_entry(entry, Egress))
if rules == aws_rules:
return changed
else:
removed_rules = [x for x in aws_rules if x not in rules]
if removed_rules:
params = dict()
for rule in removed_rules:
params['NetworkAclId'] = nacl_id
params['RuleNumber'] = rule['RuleNumber']
params['Egress'] = Egress
delete_network_acl_entry(params, client, module)
changed = True
added_rules = [x for x in rules if x not in aws_rules]
if added_rules:
for rule in added_rules:
rule['NetworkAclId'] = nacl_id
create_network_acl_entry(rule, client, module)
changed = True
return changed
def is_ipv6(cidr):
return ':' in cidr
def process_rule_entry(entry, Egress):
params = dict()
params['RuleNumber'] = entry[0]
params['Protocol'] = str(PROTOCOL_NUMBERS[entry[1]])
params['RuleAction'] = entry[2]
params['Egress'] = Egress
if is_ipv6(entry[3]):
params['Ipv6CidrBlock'] = entry[3]
else:
params['CidrBlock'] = entry[3]
if icmp_present(entry):
params['IcmpTypeCode'] = {"Type": int(entry[4]), "Code": int(entry[5])}
else:
if entry[6] or entry[7]:
params['PortRange'] = {"From": entry[6], 'To': entry[7]}
return params
def restore_default_associations(assoc_ids, default_nacl_id, client, module):
if assoc_ids:
params = dict()
params['NetworkAclId'] = default_nacl_id[0]
for assoc_id in assoc_ids:
params['AssociationId'] = assoc_id
restore_default_acl_association(params, client, module)
return True
def construct_acl_entries(nacl, client, module):
for entry in module.params.get('ingress'):
params = process_rule_entry(entry, Egress=False)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
for rule in module.params.get('egress'):
params = process_rule_entry(rule, Egress=True)
params['NetworkAclId'] = nacl['NetworkAcl']['NetworkAclId']
create_network_acl_entry(params, client, module)
# Module invocations
def setup_network_acl(client, module):
changed = False
nacl = describe_network_acl(client, module)
if not nacl['NetworkAcls']:
nacl = create_network_acl(module.params.get('vpc_id'), client, module)
nacl_id = nacl['NetworkAcl']['NetworkAclId']
create_tags(nacl_id, client, module)
subnets = subnets_to_associate(nacl, client, module)
replace_network_acl_association(nacl_id, subnets, client, module)
construct_acl_entries(nacl, client, module)
changed = True
return(changed, nacl['NetworkAcl']['NetworkAclId'])
else:
changed = False
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
changed |= subnets_changed(nacl, client, module)
changed |= nacls_changed(nacl, client, module)
changed |= tags_changed(nacl_id, client, module)
return (changed, nacl_id)
def remove_network_acl(client, module):
changed = False
result = dict()
nacl = describe_network_acl(client, module)
if nacl['NetworkAcls']:
nacl_id = nacl['NetworkAcls'][0]['NetworkAclId']
vpc_id = nacl['NetworkAcls'][0]['VpcId']
associations = nacl['NetworkAcls'][0]['Associations']
assoc_ids = [a['NetworkAclAssociationId'] for a in associations]
default_nacl_id = find_default_vpc_nacl(vpc_id, client, module)
if not default_nacl_id:
result = {vpc_id: "Default NACL ID not found - Check the VPC ID"}
return changed, result
if restore_default_associations(assoc_ids, default_nacl_id, client, module):
delete_network_acl(nacl_id, client, module)
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
if not assoc_ids:
delete_network_acl(nacl_id, client, module)
changed = True
result[nacl_id] = "Successfully deleted"
return changed, result
return changed, result
# Boto3 client methods
@AWSRetry.jittered_backoff()
def _create_network_acl(client, *args, **kwargs):
return client.create_network_acl(*args, **kwargs)
def create_network_acl(vpc_id, client, module):
try:
if module.check_mode:
nacl = dict(NetworkAcl=dict(NetworkAclId="nacl-00000000"))
else:
nacl = _create_network_acl(client, VpcId=vpc_id)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
return nacl
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _create_network_acl_entry(client, *args, **kwargs):
return client.create_network_acl_entry(*args, **kwargs)
def create_network_acl_entry(params, client, module):
try:
if not module.check_mode:
_create_network_acl_entry(client, **params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _create_tags(client, *args, **kwargs):
return client.create_tags(*args, **kwargs)
def create_tags(nacl_id, client, module):
try:
delete_tags(nacl_id, client, module)
if not module.check_mode:
_create_tags(client, Resources=[nacl_id], Tags=load_tags(module))
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff()
def _delete_network_acl(client, *args, **kwargs):
return client.delete_network_acl(*args, **kwargs)
def delete_network_acl(nacl_id, client, module):
try:
if not module.check_mode:
_delete_network_acl(client, NetworkAclId=nacl_id)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _delete_network_acl_entry(client, *args, **kwargs):
return client.delete_network_acl_entry(*args, **kwargs)
def delete_network_acl_entry(params, client, module):
try:
if not module.check_mode:
_delete_network_acl_entry(client, **params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _delete_tags(client, *args, **kwargs):
return client.delete_tags(*args, **kwargs)
def delete_tags(nacl_id, client, module):
try:
if not module.check_mode:
_delete_tags(client, Resources=[nacl_id])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff()
def _describe_network_acls(client, **kwargs):
return client.describe_network_acls(**kwargs)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _describe_network_acls_retry_missing(client, **kwargs):
return client.describe_network_acls(**kwargs)
def describe_acl_associations(subnets, client, module):
if not subnets:
return []
try:
results = _describe_network_acls_retry_missing(client, Filters=[
{'Name': 'association.subnet-id', 'Values': subnets}
])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
associations = results['NetworkAcls'][0]['Associations']
return [a['NetworkAclAssociationId'] for a in associations if a['SubnetId'] in subnets]
def describe_network_acl(client, module):
try:
if module.params.get('nacl_id'):
nacl = _describe_network_acls(client, Filters=[
{'Name': 'network-acl-id', 'Values': [module.params.get('nacl_id')]}
])
else:
nacl = _describe_network_acls(client, Filters=[
{'Name': 'tag:Name', 'Values': [module.params.get('name')]}
])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
return nacl
def find_acl_by_id(nacl_id, client, module):
try:
return _describe_network_acls_retry_missing(client, NetworkAclIds=[nacl_id])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
def find_default_vpc_nacl(vpc_id, client, module):
try:
response = _describe_network_acls_retry_missing(client, Filters=[
{'Name': 'vpc-id', 'Values': [vpc_id]}])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
nacls = response['NetworkAcls']
return [n['NetworkAclId'] for n in nacls if n['IsDefault'] is True]
def find_subnet_ids_by_nacl_id(nacl_id, client, module):
try:
results = _describe_network_acls_retry_missing(client, Filters=[
{'Name': 'association.network-acl-id', 'Values': [nacl_id]}
])
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
if results['NetworkAcls']:
associations = results['NetworkAcls'][0]['Associations']
return [s['SubnetId'] for s in associations if s['SubnetId']]
else:
return []
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _replace_network_acl_association(client, *args, **kwargs):
return client.replace_network_acl_association(*args, **kwargs)
def replace_network_acl_association(nacl_id, subnets, client, module):
params = dict()
params['NetworkAclId'] = nacl_id
for association in describe_acl_associations(subnets, client, module):
params['AssociationId'] = association
try:
if not module.check_mode:
_replace_network_acl_association(client, **params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _replace_network_acl_entry(client, *args, **kwargs):
return client.replace_network_acl_entry(*args, **kwargs)
def replace_network_acl_entry(entries, Egress, nacl_id, client, module):
for entry in entries:
params = entry
params['NetworkAclId'] = nacl_id
try:
if not module.check_mode:
_replace_network_acl_entry(client, **params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff(catch_extra_error_codes=['InvalidNetworkAclID.NotFound'])
def _replace_network_acl_association(client, *args, **kwargs):
return client.replace_network_acl_association(*args, **kwargs)
def restore_default_acl_association(params, client, module):
try:
if not module.check_mode:
_replace_network_acl_association(client, **params)
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
@AWSRetry.jittered_backoff()
def _describe_subnets(client, *args, **kwargs):
return client.describe_subnets(*args, **kwargs)
def subnets_to_associate(nacl, client, module):
params = list(module.params.get('subnets'))
if not params:
return []
all_found = []
if any(x.startswith("subnet-") for x in params):
try:
subnets = _describe_subnets(client, Filters=[
{'Name': 'subnet-id', 'Values': params}])
all_found.extend(subnets.get('Subnets', []))
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
if len(params) != len(all_found):
try:
subnets = _describe_subnets(client, Filters=[
{'Name': 'tag:Name', 'Values': params}])
all_found.extend(subnets.get('Subnets', []))
except botocore.exceptions.ClientError as e:
module.fail_json_aws(e)
return list(set(s['SubnetId'] for s in all_found if s.get('SubnetId')))
def main():
argument_spec = dict(
vpc_id=dict(),
name=dict(),
nacl_id=dict(),
subnets=dict(required=False, type='list', default=list(), elements='str'),
tags=dict(required=False, type='dict'),
ingress=dict(required=False, type='list', default=list(), elements='list'),
egress=dict(required=False, type='list', default=list(), elements='list'),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleAWSModule(argument_spec=argument_spec,
supports_check_mode=True,
required_one_of=[['name', 'nacl_id']],
required_if=[['state', 'present', ['vpc_id']]])
state = module.params.get('state').lower()
client = module.client('ec2')
invocations = {
"present": setup_network_acl,
"absent": remove_network_acl
}
(changed, results) = invocations[state](client, module)
module.exit_json(changed=changed, nacl_id=results)
if __name__ == '__main__':
main()
|
the-stack_106_24546 | #! /usr/bin/env python
# Copyright 2019 Vimal Manohar
# Apache 2.0.
"""This script converts an RTTM with
speaker info into kaldi utt2spk and segments"""
import argparse
def get_args():
parser = argparse.ArgumentParser(
description="""This script converts an RTTM with
speaker info into kaldi utt2spk and segments"""
)
parser.add_argument(
"--use-reco-id-as-spkr",
type=str,
choices=["true", "false"],
default="false",
help="Use the recording ID based on RTTM and "
"reco2file_and_channel as the speaker",
)
parser.add_argument(
"--append-reco-id-to-spkr",
type=str,
choices=["true", "false"],
default="false",
help="Append recording ID to the speaker ID",
)
parser.add_argument(
"rttm_file",
type=str,
help="""Input RTTM file.
The format of the RTTM file is
<type> <file-id> <channel-id> <begin-time> """
"""<end-time> <NA> <NA> <speaker> <conf>""",
)
parser.add_argument(
"reco2file_and_channel",
type=str,
help="""Input reco2file_and_channel.
The format is <recording-id> <file-id> <channel-id>.""",
)
parser.add_argument("utt2spk", type=str, help="Output utt2spk file")
parser.add_argument("segments", type=str, help="Output segments file")
args = parser.parse_args()
args.use_reco_id_as_spkr = bool(args.use_reco_id_as_spkr == "true")
args.append_reco_id_to_spkr = bool(args.append_reco_id_to_spkr == "true")
if args.use_reco_id_as_spkr:
if args.append_reco_id_to_spkr:
raise Exception(
"""Appending recording ID to speaker does
not make sense when using --use-reco-id-as-spkr=true"""
)
return args
def main():
args = get_args()
file_and_channel2reco = {}
utt2spk = {}
segments = {}
for line in open(args.reco2file_and_channel):
parts = line.strip().split()
file_and_channel2reco[(parts[1], parts[2])] = parts[0]
utt2spk_writer = open(args.utt2spk, "w")
segments_writer = open(args.segments, "w")
for line in open(args.rttm_file):
parts = line.strip().split()
if parts[0] != "SPEAKER":
continue
file_id = parts[1]
channel = parts[2]
try:
reco = file_and_channel2reco[(file_id, channel)]
except KeyError as e:
raise Exception(
"Could not find recording with "
"(file_id, channel) "
"= ({0},{1}) in {2}: {3}\n".format(
file_id, channel, args.reco2file_and_channel, str(e)
)
)
start_time = float(parts[3])
end_time = start_time + float(parts[4])
if args.use_reco_id_as_spkr:
spkr = reco
else:
if args.append_reco_id_to_spkr:
spkr = parts[7] + "_" + reco
else:
spkr = parts[7]
st = int(start_time * 100)
end = int(end_time * 100)
utt = "{0}_{1:06d}_{2:06d}".format(spkr, st, end)
utt2spk[utt] = spkr
segments[utt] = (reco, start_time, end_time)
for uttid_id in sorted(utt2spk):
utt2spk_writer.write("{0} {1}\n".format(uttid_id, utt2spk[uttid_id]))
segments_writer.write(
"{0} {1} {2:7.2f} {3:7.2f}\n".format(
uttid_id,
segments[uttid_id][0],
segments[uttid_id][1],
segments[uttid_id][2],
)
)
if __name__ == "__main__":
main()
|
the-stack_106_24547 | # Copyright 2019 The TensorTrade Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from enum import Enum
from typing import Callable
from tensortrade.base import TimedIdentifiable
from tensortrade.base.exceptions import InvalidOrderQuantity, InsufficientFundsForAllocation
from tensortrade.instruments import Quantity
from tensortrade.trades import Trade, TradeSide, TradeType
class OrderStatus(Enum):
PENDING = 'pending'
OPEN = 'open'
CANCELLED = 'cancelled'
PARTIALLY_FILLED = 'partially_filled'
FILLED = 'filled'
def __str__(self):
return self.value
class Order(TimedIdentifiable):
"""
Responsibilities of the Order:
1. Confirming its own validity.
2. Tracking its trades and reporting it back to the broker.
3. Managing movement of quantities from order to order.
4. Generating the next order in its path given that there is a
'OrderSpec' for how to make the next order.
5. Managing its own state changes when it can.
"""
def __init__(self,
step: int,
side: TradeSide,
trade_type: TradeType,
pair: 'TradingPair',
quantity: 'Quantity',
portfolio: 'Portfolio',
price: float,
criteria: Callable[['Order', 'Exchange'], bool] = None,
path_id: str = None):
if quantity.size == 0:
raise InvalidOrderQuantity(quantity)
self.step = step
self.side = side
self.type = trade_type
self.pair = pair
self.quantity = quantity
self.portfolio = portfolio
self.price = price
self.criteria = criteria
self.path_id = path_id or self.id
self.status = OrderStatus.PENDING
self.filled_size = 0
self.remaining_size = self.size
self._specs = []
self._listeners = []
self._trades = []
self.quantity.lock_for(self.path_id)
@property
def size(self) -> float:
if self.pair.base is self.quantity.instrument:
return round(self.quantity.size, self.pair.base.precision)
return round(self.quantity.size * self.price, self.pair.base.precision)
@property
def price(self) -> float:
return self._price
@price.setter
def price(self, price: float):
self._price = price
@property
def base_instrument(self) -> 'Instrument':
return self.pair.base
@property
def quote_instrument(self) -> 'Instrument':
return self.pair.quote
@property
def trades(self):
return self._trades
@property
def is_buy(self) -> bool:
return self.side == TradeSide.BUY
@property
def is_sell(self) -> bool:
return self.side == TradeSide.SELL
@property
def is_limit_order(self) -> bool:
return self.type == TradeType.LIMIT
@property
def is_market_order(self) -> bool:
return self.type == TradeType.MARKET
def is_executable_on(self, exchange: 'Exchange'):
return self.criteria is None or self.criteria(self, exchange)
def is_complete(self):
return self.remaining_size == 0
def add_order_spec(self, order_spec: 'OrderSpec') -> 'Order':
self._specs = [order_spec] + self._specs
return self
def attach(self, listener: 'OrderListener'):
self._listeners += [listener]
def detach(self, listener: 'OrderListener'):
self._listeners.remove(listener)
def execute(self, exchange: 'Exchange'):
self.status = OrderStatus.OPEN
instrument = self.pair.base if self.side == TradeSide.BUY else self.pair.quote
wallet = self.portfolio.get_wallet(exchange.id, instrument=instrument)
if self.path_id not in wallet.locked.keys():
try:
wallet -= self.size * instrument
except InsufficientFundsForAllocation:
size = wallet.balance.size
wallet -= size * instrument
self.quantity = Quantity(instrument, size, path_id=self.path_id)
wallet += self.quantity
if self.portfolio.order_listener:
self.attach(self.portfolio.order_listener)
for listener in self._listeners or []:
listener.on_execute(self, exchange)
exchange.execute_order(self, self.portfolio)
def fill(self, exchange: 'Exchange', trade: Trade):
self.status = OrderStatus.PARTIALLY_FILLED
fill_size = round(trade.size + trade.commission.size, self.pair.base.precision)
self.filled_size += fill_size
self.remaining_size -= fill_size
for listener in self._listeners or []:
listener.on_fill(self, exchange, trade)
def complete(self, exchange: 'Exchange') -> 'Order':
self.status = OrderStatus.FILLED
order = None
if self._specs:
order_spec = self._specs.pop()
order = order_spec.create_order(self, exchange)
for listener in self._listeners or []:
listener.on_complete(self, exchange)
self._listeners = []
return order or self.release()
def cancel(self, exchange: 'Exchange'):
self.status = OrderStatus.CANCELLED
for listener in self._listeners or []:
listener.on_cancel(self, exchange)
self._listeners = []
self.release()
def release(self):
for wallet in self.portfolio.wallets:
wallet.deallocate(self.path_id)
def to_dict(self):
return {
"id": self.id,
"step": self.step,
"status": self.status,
"type": self.type,
"side": self.side,
"pair": self.pair,
"quantity": self.quantity,
"size": self.size,
"filled_size": self.filled_size,
"price": self.price,
"criteria": self.criteria,
"path_id": self.path_id
}
def to_json(self):
return {
"id": str(self.id),
"step": self.step,
"status": str(self.status),
"type": str(self.type),
"side": str(self.side),
"base_symbol": str(self.pair.base.symbol),
"quote_symbol": str(self.pair.quote.symbol),
"quantity": str(self.quantity),
"size": str(self.size),
"filled_size": str(self.filled_size),
"price": str(self.price),
"criteria": str(self.criteria),
"path_id": str(self.path_id)
}
def __iadd__(self, recipe: 'OrderSpec') -> 'Order':
return self.add_order_spec(recipe)
def __str__(self):
data = ['{}={}'.format(k, v) for k, v in self.to_dict().items()]
return '<{}: {}>'.format(self.__class__.__name__, ', '.join(data))
def __repr__(self):
return str(self)
|
the-stack_106_24550 | """
This module is an example of a barebones numpy reader plugin for napari.
It implements the ``napari_get_reader`` hook specification, (to create
a reader plugin) but your plugin may choose to implement any of the hook
specifications offered by napari.
see: https://napari.org/docs/plugins/hook_specifications.html
Replace code below accordingly. For complete documentation see:
https://napari.org/docs/plugins/for_plugin_developers.html
"""
import numpy as np
import dask.array as da
import dask
from napari_plugin_engine import napari_hook_implementation
from aicsimageio import AICSImage, imread
color_maps = ["bop purple", "bop orange", "bop blue", "green", "blue"]
SUPPORTED_FORMATS = ('.lif', '.czi')
@napari_hook_implementation
def napari_get_reader(path):
"""A basic implementation of the napari_get_reader hook specification.
Parameters
----------
path : str or list of str
Path to file, or list of paths.
Returns
-------
function or None
If the path is a recognized format, return a function that accepts the
same path or list of paths, and returns a list of layer data tuples.
"""
# we are not loading multiple files at a time
if isinstance(path, str) and path.endswith(SUPPORTED_FORMATS):
return reader_function
return None
def reader_function(path):
"""Take a path or list of paths and return a list of LayerData tuples.
Readers are expected to return data as a list of tuples, where each tuple
is (data, [add_kwargs, [layer_type]]), "add_kwargs" and "layer_type" are
both optional.
Parameters
----------
path : str or list of str
Path to file, or list of paths.
Returns
-------
layer_data : list of tuples
A list of LayerData tuples where each tuple in the list contains
(data, metadata, layer_type), where data is a numpy array, metadata is
a dict of keyword arguments for the corresponding viewer.add_* method
in napari, and layer_type is a lower-case string naming the type of layer.
Both "meta", and "layer_type" are optional. napari will default to
layer_type=="image" if not provided
"""
print("reading file ", path)
aics_img = AICSImage(path)
# dims are normaly in "STCZYX"
number_of_channels = aics_img.size_c
number_of_time_points = aics_img.size_t
nz = aics_img.size_z
ny = aics_img.size_y
nx = aics_img.size_x
name_of_channels = aics_img.get_channel_names()
pixel_x, pixel_y, pixel_z = aics_img.get_physical_pixel_size()
scale = [1, pixel_z, pixel_y, pixel_x]
print("number_of_channels", number_of_channels)
print("number_of_time_points", number_of_time_points)
print("name_of_channels", name_of_channels)
print("scale", scale)
print("nz", nz)
layer_list = []
channel_dict = {}
# for channel in name_of_channels:
# channel_dict[channel] = {}
if number_of_channels > 1:
print("number_of_channels > 1")
for cindex , channel_name in enumerate(name_of_channels):
if number_of_time_points > 1:
print("number_of_time_points > 1")
if nz > 1:
arr = da.stack(
[
aics_img.get_image_dask_data('ZYX', S=0, C=cindex, T=tindex)
for tindex in range(number_of_time_points)
]
)
else:
arr = da.stack(
[
aics_img.get_image_dask_data('YX', S=0, C=cindex, T=tindex, Z=0)
for tindex in range(number_of_time_points)
]
)
scale = [1, pixel_y, pixel_x]
else :
if nz > 1:
arr = aics_img.get_image_dask_data('ZYX', S=0, C=cindex, T=0)
scale = [pixel_z, pixel_y, pixel_x]
else:
print("number_of_time_points < 1")
print("nz < 1")
print("cindex: ", cindex)
print("channel_name: ", channel_name)
arr = aics_img.get_image_dask_data('YX', S=0, C=cindex, T=0, Z=0)
scale = [pixel_y, pixel_x]
print("arr.shape",arr.shape)
channel_dict[channel_name] = {
"data" : dask.optimize(arr)[0],
"colormap": color_maps[cindex % len(color_maps)]
}
else:
if number_of_time_points > 1:
if nz > 1:
arr = da.stack(
[
aics_img.get_image_dask_data('ZYX', S=0, C=0, T=tindex)
for tindex in range(number_of_time_points)
]
)
else:
arr = da.stack(
[
aics_img.get_image_dask_data('YX', S=0, C=0, T=tindex, Z=0)
for tindex in range(number_of_time_points)
]
)
scale = [1, pixel_y, pixel_x]
else :
if nz > 1:
arr = aics_img.get_image_dask_data('ZYX', S=0, C=0, T=0)
scale = [pixel_z, pixel_y, pixel_x]
else:
arr = aics_img.get_image_dask_data('YX', S=0, C=0, T=0, Z=0)
scale = [pixel_y, pixel_x]
channel_dict[channel_name] = {
"data" : dask.optimize(arr)[0],
"colormap": color_maps[0]
}
for channel_name, channel in channel_dict.items():
print("creating layer channel_name", channel_name)
add_kwargs = {
"name": channel_name,
"blending" : 'additive',
"rendering" : "mip",
"scale": scale,
"colormap": channel['colormap']
}
print("channel['data'].shape", channel['data'].shape)
layer_list.append(
(
channel['data'], #data
add_kwargs, # kwargs
"image" # layer type
)
)
return layer_list
|
the-stack_106_24551 | # MIT License
# Copyright (c) 2021 AWS Cloud Community LPU
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from datetime import datetime
import time
import secrets as keys
import random
from os import path
import feedparser
import tweepy
import constants as C
def message_creator(entry) -> str:
"""Returns news in a proper format
Keyword arguments:
entry : a perticular entry of rss feed used for extracting data.
"""
cleanr = re.compile(
'<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')
summary = re.sub(cleanr, '', entry.summary)
title_length = len(entry.title) + 40
summary_length = 280 - title_length
message = entry.title + "\n\n" + \
summary[:summary_length] + "... " + entry.link
return message
def check_time():
while True:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
time.sleep(30)
if str(current_time) in ("06:00:00", "06:00:01", "06:00:02"):
time.sleep(1)
return "morning"
if str(current_time) in ("14:00:00", "14:00:01", "14:00:02"):
time.sleep(1)
return "afternoon"
if str(current_time) in ("22:00:00", "22:00:01", "22:00:02"):
time.sleep(1)
return "night"
def feed_parser():
"""Parses feed of AWS What's new and gives non duplicate news.
"""
if not path.exists(C.TITLE_STORE):
open(C.TITLE_STORE, 'a').close()
news_feed = feedparser.parse(C.AWS_FEED_URL)
with open(C.TITLE_STORE, "r") as title_file:
line_titles = title_file.readlines()
for entry in news_feed.entries:
flag = 0
for line_title in line_titles:
if str(entry.title)+"\n" == line_title:
flag = 1
if flag == 0:
return entry
return news_feed.entries[0]
def main():
auth = tweepy.OAuthHandler(keys.API_KEY, keys.API_SECRET_KEY)
auth.set_access_token(keys.ACCESS_TOKEN, keys.ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
while True:
entry = feed_parser()
time_status = check_time()
print(entry.title, file=open(C.TITLE_STORE, 'a+'))
message = message_creator(entry)
print(len(message))
print(message)
api.update_status(message)
if __name__ == "__main__":
main()
|
the-stack_106_24554 | # -*- coding: utf-8 -*-
"""Version information for PyKEEN."""
import os
import sys
from functools import lru_cache
from subprocess import CalledProcessError, check_output # noqa: S404
from typing import Optional
__all__ = [
'VERSION',
'get_version',
'get_git_hash',
'get_git_branch',
'env',
]
VERSION = '1.4.1-dev'
@lru_cache(maxsize=2)
def get_git_hash(terse: bool = True) -> str:
"""Get the PyKEEN git hash.
:return:
The git hash, equals 'UNHASHED' if encountered CalledProcessError, signifying that the
code is not installed in development mode.
"""
rv = _run('git', 'rev-parse', 'HEAD')
if rv is None:
return 'UNHASHED'
if terse:
return rv[:8]
return rv
@lru_cache(maxsize=1)
def get_git_branch() -> Optional[str]:
"""Get the PyKEEN branch, if installed from git in editable mode.
:return:
Returns the name of the current branch, or None if not installed in development mode.
"""
return _run('git', 'branch', '--show-current')
def _run(*args: str) -> Optional[str]:
with open(os.devnull, 'w') as devnull:
try:
ret = check_output( # noqa: S603,S607
args,
cwd=os.path.dirname(__file__),
stderr=devnull,
)
except CalledProcessError:
return None
else:
return ret.strip().decode('utf-8')
def get_version(with_git_hash: bool = False) -> str:
"""Get the PyKEEN version string, including a git hash.
:param with_git_hash:
If set to True, the git hash will be appended to the version.
:return: The PyKEEN version as well as the git hash, if the parameter with_git_hash was set to true.
"""
return f'{VERSION}-{get_git_hash(terse=True)}' if with_git_hash else VERSION
def env_table(tablefmt='github', headers=('Key', 'Value')) -> str:
"""Generate a table describing the environment in which PyKEEN is being run."""
import torch
import platform
from tabulate import tabulate
import getpass
import time
rows = [
('OS', os.name),
('Platform', platform.system()),
('Release', platform.release()),
('User', getpass.getuser()),
('Time', str(time.asctime())),
('Python', f'{sys.version_info[0]}.{sys.version_info[1]}.{sys.version_info[2]}'),
('PyKEEN', get_version()),
('PyKEEN Hash', get_git_hash()),
('PyKEEN Branch', get_git_branch()),
('PyTorch', torch.__version__),
('CUDA Available?', str(torch.cuda.is_available()).lower()),
('CUDA Version', torch.version.cuda or 'N/A'),
('cuDNN Version', torch.backends.cudnn.version() or 'N/A'),
]
return tabulate(rows, tablefmt=tablefmt, headers=headers)
def env_html():
"""Output the environment table as HTML for usage in Jupyter."""
from IPython.display import HTML
return HTML(env_table(tablefmt='html'))
def env(file=None):
"""Print the env or output as HTML if in Jupyter.
:param: The file to print to if not in a Jupyter setting. Defaults to sys.stdout
:returns: A :class:`IPython.display.HTML` if in a Jupyter notebook setting, otherwise none.
"""
if _in_jupyter():
return env_html()
else:
print(env_table(), file=file)
def _in_jupyter() -> bool:
try:
get_ipython = sys.modules['IPython'].get_ipython # type: ignore
if 'IPKernelApp' not in get_ipython().config:
raise ImportError("console")
if 'VSCODE_PID' in os.environ:
raise ImportError("vscode")
except Exception:
return False
else:
return True
if __name__ == '__main__':
print(get_version(with_git_hash=True))
|
the-stack_106_24555 | import os
import logging
import subprocess
from mlflow.exceptions import MlflowException
from mlflow.utils.rest_utils import MlflowHostCreds
from databricks_cli.configure import provider
from mlflow.utils._spark_utils import _get_active_spark_session
from mlflow.utils.uri import get_db_info_from_uri
_logger = logging.getLogger(__name__)
def _get_dbutils():
try:
import IPython
ip_shell = IPython.get_ipython()
if ip_shell is None:
raise _NoDbutilsError
return ip_shell.ns_table["user_global"]["dbutils"]
except ImportError:
raise _NoDbutilsError
except KeyError:
raise _NoDbutilsError
class _NoDbutilsError(Exception):
pass
def _get_java_dbutils():
dbutils = _get_dbutils()
return dbutils.notebook.entry_point.getDbutils()
def _get_command_context():
return _get_java_dbutils().notebook().getContext()
def _get_extra_context(context_key):
return _get_command_context().extraContext().get(context_key).get()
def _get_context_tag(context_tag_key):
tag_opt = _get_command_context().tags().get(context_tag_key)
if tag_opt.isDefined():
return tag_opt.get()
else:
return None
def acl_path_of_acl_root():
try:
return _get_command_context().aclPathOfAclRoot().get()
except Exception: # pylint: disable=broad-except
return _get_extra_context("aclPathOfAclRoot")
def _get_property_from_spark_context(key):
try:
from pyspark import TaskContext # pylint: disable=import-error
task_context = TaskContext.get()
if task_context:
return task_context.getLocalProperty(key)
except Exception: # pylint: disable=broad-except
return None
def is_databricks_default_tracking_uri(tracking_uri):
return tracking_uri.lower().strip() == "databricks"
def is_in_databricks_notebook():
if _get_property_from_spark_context("spark.databricks.notebook.id") is not None:
return True
try:
return acl_path_of_acl_root().startswith("/workspace")
except Exception: # pylint: disable=broad-except
return False
def is_in_databricks_job():
try:
return _get_context_tag("jobId") is not None and _get_context_tag("idInJob") is not None
except Exception: # pylint: disable=broad-except
return False
def is_dbfs_fuse_available():
with open(os.devnull, "w") as devnull_stderr, open(os.devnull, "w") as devnull_stdout:
try:
return (
subprocess.call(
["mountpoint", "/dbfs"], stderr=devnull_stderr, stdout=devnull_stdout
)
== 0
)
except Exception: # pylint: disable=broad-except
return False
def is_in_cluster():
try:
spark_session = _get_active_spark_session()
return (
spark_session is not None
and spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId") is not None
)
except Exception: # pylint: disable=broad-except
return False
def get_notebook_id():
"""Should only be called if is_in_databricks_notebook is true"""
notebook_id = _get_property_from_spark_context("spark.databricks.notebook.id")
if notebook_id is not None:
return notebook_id
acl_path = acl_path_of_acl_root()
if acl_path.startswith("/workspace"):
return acl_path.split("/")[-1]
return None
def get_notebook_path():
"""Should only be called if is_in_databricks_notebook is true"""
path = _get_property_from_spark_context("spark.databricks.notebook.path")
if path is not None:
return path
try:
return _get_command_context().notebookPath().get()
except Exception: # pylint: disable=broad-except
return _get_extra_context("notebook_path")
def get_cluster_id():
spark_session = _get_active_spark_session()
if spark_session is None:
return None
return spark_session.conf.get("spark.databricks.clusterUsageTags.clusterId")
def get_job_id():
"""Should only be called if is_in_databricks_job is true"""
return _get_context_tag("jobId")
def get_job_run_id():
"""Should only be called if is_in_databricks_job is true"""
return _get_context_tag("idInJob")
def get_job_type():
"""Should only be called if is_in_databricks_job is true"""
return _get_context_tag("jobTaskType")
def get_webapp_url():
"""Should only be called if is_in_databricks_notebook is true"""
url = _get_property_from_spark_context("spark.databricks.api.url")
if url is not None:
return url
try:
return _get_command_context().apiUrl().get()
except Exception: # pylint: disable=broad-except
return _get_extra_context("api_url")
def get_workspace_id():
try:
return _get_command_context().workspaceId().get()
except Exception: # pylint: disable=broad-except
return _get_context_tag("orgId")
def get_browser_hostname():
try:
return _get_command_context().browserHostName().get()
except Exception: # pylint: disable=broad-except
return _get_context_tag("browserHostName")
def get_workspace_info_from_dbutils():
dbutils = _get_dbutils()
if dbutils:
workspace_host = get_browser_hostname()
workspace_id = get_workspace_id()
return workspace_host, workspace_id
return None, None
def get_workspace_info_from_databricks_secrets(tracking_uri):
profile, key_prefix = get_db_info_from_uri(tracking_uri)
if key_prefix:
dbutils = _get_dbutils()
if dbutils:
workspace_id = dbutils.secrets.get(scope=profile, key=key_prefix + "-workspace-id")
workspace_host = dbutils.secrets.get(scope=profile, key=key_prefix + "-host")
return workspace_host, workspace_id
return None, None
def _fail_malformed_databricks_auth(profile):
raise MlflowException(
"Got malformed Databricks CLI profile '%s'. Please make sure the "
"Databricks CLI is properly configured as described at "
"https://github.com/databricks/databricks-cli." % profile
)
def get_databricks_host_creds(server_uri=None):
"""
Reads in configuration necessary to make HTTP requests to a Databricks server. This
uses the Databricks CLI's ConfigProvider interface to load the DatabricksConfig object.
If no Databricks CLI profile is found corresponding to the server URI, this function
will attempt to retrieve these credentials from the Databricks Secret Manager. For that to work,
the server URI will need to be of the following format: "databricks://scope:prefix". In the
Databricks Secret Manager, we will query for a secret in the scope "<scope>" for secrets with
keys of the form "<prefix>-host" and "<prefix>-token". Note that this prefix *cannot* be empty
if trying to authenticate with this method. If found, those host credentials will be used. This
method will throw an exception if sufficient auth cannot be found.
:param server_uri: A URI that specifies the Databricks profile you want to use for making
requests.
:return: :py:class:`mlflow.rest_utils.MlflowHostCreds` which includes the hostname and
authentication information necessary to talk to the Databricks server.
"""
profile, path = get_db_info_from_uri(server_uri)
if not hasattr(provider, "get_config"):
_logger.warning(
"Support for databricks-cli<0.8.0 is deprecated and will be removed"
" in a future version."
)
config = provider.get_config_for_profile(profile)
elif profile:
config = provider.ProfileConfigProvider(profile).get_config()
else:
config = provider.get_config()
# if a path is specified, that implies a Databricks tracking URI of the form:
# databricks://profile-name/path-specifier
if (not config or not config.host) and path:
dbutils = _get_dbutils()
if dbutils:
# Prefix differentiates users and is provided as path information in the URI
key_prefix = path
host = dbutils.secrets.get(scope=profile, key=key_prefix + "-host")
token = dbutils.secrets.get(scope=profile, key=key_prefix + "-token")
if host and token:
config = provider.DatabricksConfig.from_token(
host=host, token=token, insecure=False
)
if not config or not config.host:
_fail_malformed_databricks_auth(profile)
insecure = hasattr(config, "insecure") and config.insecure
if config.username is not None and config.password is not None:
return MlflowHostCreds(
config.host,
username=config.username,
password=config.password,
ignore_tls_verification=insecure,
)
elif config.token:
return MlflowHostCreds(config.host, token=config.token, ignore_tls_verification=insecure)
_fail_malformed_databricks_auth(profile)
|
the-stack_106_24556 | # encoding: utf-8
# Sample-based Monte Carlo Denoising using a Kernel-Splatting Network
# Michaël Gharbi Tzu-Mao Li Miika Aittala Jaakko Lehtinen Frédo Durand
# Siggraph 2019
#
# Copyright (c) 2019 Michaël Gharbi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Material classes for the PBRT scene generator."""
__all__ = ["MatteMaterial", "UberMaterial", "MirrorMaterial", "GlassMaterial",
"MetalMaterial", "PlasticMaterial", "SubstrateMaterial"]
class Material():
"""Base material.
Args:
id(str): unique id to identify the material.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, bump_texture=None):
self.id = id
self.textures = []
self.bump_texture = bump_texture
if self.bump_texture is not None:
if self.bump_texture.type != "float":
# LOG.error("Bump texture should be of type `float`, got %s",
# self.bump_texture.type)
raise RuntimeError("Incorrect bump type error")
self.textures.append(bump_texture)
def pbrt(self):
"""Outputs PBRTv2 string representation.
Returns:
str: pbrt format.
"""
out = ""
for tex in self.textures:
out += tex.pbrt()
if self.id:
return out + 'MakeNamedMaterial "{}" "string type"'.format(self.id)
else:
return out + "Material"
def suffix(self):
"""Generic PBRT parameters shared by all materials.
Returns:
(str): the pbrt string suffix parameterizing the texture.
"""
out = " "
if self.bump_texture is not None:
out += '"texture bumpmap" "{}"\n'.format(self.bump_texture.id)
return out
class MatteMaterial(Material):
"""PBRT Matte material.
Args:
id(str): unique id to identify the material.
diffuse(list of 3 floats or None): color of the diffuse component.
diffuse_texture(sbmc.scene_generator.Texture): texture for the diffuse
component. Overrides `diffuse` if provided.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, diffuse=None, diffuse_texture=None,
bump_texture=None):
super(MatteMaterial, self).__init__(id=id, bump_texture=bump_texture)
self.diffuse = None
self.diffuse_texture = None
if diffuse_texture is None:
if diffuse is None:
self.diffuse = [1, 1, 1]
else:
self.diffuse = diffuse
else:
self.diffuse_texture = diffuse_texture
self.textures.append(diffuse_texture)
def pbrt(self):
out = super(MatteMaterial, self).pbrt()
out += ' "matte" '
if self.diffuse_texture is not None:
out += '"texture Kd" "{}"\n'.format(self.diffuse_texture.id)
else:
out += '"rgb Kd" [{} {} {}]\n'.format(*self.diffuse)
out += super(MatteMaterial, self).suffix()
return out
class UberMaterial(MatteMaterial):
"""PBRT Uber material.
Args:
id(str): unique id to identify the material.
diffuse(list of 3 floats or None): color of the diffuse component.
diffuse_texture(sbmc.scene_generator.Texture): texture for the diffuse
component. Overrides `diffuse` if provided.
glossy_reflection(float): intensity of the glossy reflection.
specular_reflection(float): intensity of the specular reflection.
roughness(float): roughness of the material.
index(float): index of refraction of the material.
opacity(float): opacity of the material (0 is transparent, 1 opaque).
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, diffuse=None, diffuse_texture=None,
glossy_reflection=0.25, specular_reflection=0,
roughness=0.1, index=1.5, opacity=1.0, bump_texture=None):
super(UberMaterial, self).__init__(
id=id, diffuse=diffuse, diffuse_texture=diffuse_texture,
bump_texture=bump_texture)
self.glossy_reflection = [glossy_reflection]*3
self.specular_reflection = [specular_reflection]*3
self.roughness = roughness
self.index = index
self.opacity = [opacity]*3
def pbrt(self):
out = super(UberMaterial, self).pbrt()
out = out.replace('"matte"', '"uber"').strip()
out += ' "rgb Ks" [{} {} {}]'.format(*self.glossy_reflection)
out += ' "rgb Kr" [{} {} {}]'.format(*self.specular_reflection)
out += ' "float roughness" [{}]'.format(self.roughness)
out += ' "float index" [{}]'.format(self.index)
out += ' "rgb opacity" [{} {} {}]'.format(*self.opacity)
out += super(UberMaterial, self).suffix()
out += '\n'
return out
class MirrorMaterial(Material):
"""PBRT Mirror material.
Args:
id(str): unique id to identify the material.
specular(list of 3 floats or None): color of the specular component.
specular_texture(sbmc.scene_generator.Texture): texture for the
specular component. Overrides `diffuse` if provided.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, specular=None, specular_texture=None,
bump_texture=None):
super(MirrorMaterial, self).__init__(id=id, bump_texture=bump_texture)
self.specular = None
self.specular_texture = None
if specular_texture is None:
if specular is None:
self.specular = [1, 1, 1]
else:
self.specular = specular
else:
self.specular_texture = specular_texture
self.textures.append(specular_texture)
def pbrt(self):
out = super(MirrorMaterial, self).pbrt()
out += ' "mirror" '
if self.specular_texture is not None:
out += '"texture Kr" "{}"\n'.format(self.specular_texture.id)
else:
out += '"rgb Kr" [{} {} {}]\n'.format(*self.specular)
out += super(MirrorMaterial, self).suffix()
return out
class GlassMaterial(Material):
"""PBRT Glass material.
Args:
id(str): unique id to identify the material.
reflection(list of 3 floats or None): color of the reflection
component.
reflection_texture(sbmc.scene_generator.Texture): texture for the
reflection component. Overrides `reflection` if provided.
transmission(list of 3 floats or None): color of the transmission
component. transmission_texture(sbmc.scene_generator.Texture):
texture for the transmission component. Overrides `transmission` if
provided.
index(float): index of refraction of the material.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, reflection=None, reflection_texture=None,
transmission=None, transmission_texture=None,
index=1.5, bump_texture=None):
super(GlassMaterial, self).__init__(id=id, bump_texture=bump_texture)
self.index = index
self.reflection = None
self.reflection_texture = None
self.transmission = None
self.transmission_texture = None
if reflection_texture is None:
if reflection is None:
self.reflection = [1, 1, 1]
else:
self.reflection = reflection
else:
self.reflection_texture = reflection_texture
self.textures.append(reflection_texture)
if transmission_texture is None:
if transmission is None:
self.transmission = [1, 1, 1]
else:
self.transmission = transmission
else:
self.transmission_texture = transmission_texture
self.textures.append(transmission_texture)
def pbrt(self):
out = super(GlassMaterial, self).pbrt()
out += ' "glass" '
out += ' "float index" [{}] '.format(self.index)
if self.reflection_texture is not None:
out += '"texture Kr" "{}"\n'.format(self.reflection_texture.id)
else:
out += '"rgb Kr" [{} {} {}]\n'.format(*self.reflection)
if self.transmission_texture is not None:
out += '"texture Kt" "{}"\n'.format(self.transmission_texture.id)
else:
out += '"rgb Kt" [{} {} {}]\n'.format(*self.transmission)
out += super(GlassMaterial, self).suffix()
return out
class MetalMaterial(Material):
"""PBRT Metal material.
Args:
id(str): unique id to identify the material.
eta(list of 3 floats or None): colored index of refraction
eta_texture(sbmc.scene_generator.Texture): texture for the colored
index of refraction. Overrides `eta` if provided.
k(list of 3 floats or None): colored absorption coefficient.
k_texture(sbmc.scene_generator.Texture): texture for the k
coefficient. Overrides `k` if provided.
roughness(float): roughness of the material.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, eta=None, eta_texture=None,
k=None, k_texture=None,
roughness=0.01, bump_texture=None):
super(MetalMaterial, self).__init__(id=id, bump_texture=bump_texture)
self.roughness = roughness
self.eta = None
self.eta_texture = None
self.k = None
self.k_texture = None
if eta_texture is None:
if eta is None:
self.eta = [0.6, 0.5, 0.4]
else:
self.eta = eta
else:
self.eta_texture = eta_texture
self.textures.append(eta_texture)
if k_texture is None:
if k is None:
self.k = [2, 2, 3]
else:
self.k = k
else:
self.k_texture = k_texture
self.textures.append(k_texture)
def pbrt(self):
out = super(MetalMaterial, self).pbrt()
out += ' "metal" '
out += ' "float roughness" [{}] '.format(self.roughness)
if self.eta_texture is not None:
out += '"texture eta" "{}"\n'.format(self.eta_texture.id)
else:
out += '"rgb eta" [{} {} {}]\n'.format(*self.eta)
if self.k_texture is not None:
out += '"texture k" "{}"\n'.format(self.k_texture.id)
else:
out += '"rgb k" [{} {} {}]\n'.format(*self.k)
out += super(MetalMaterial, self).suffix()
return out
class PlasticMaterial(Material):
"""PBRT Plastic material.
Args:
id(str): unique id to identify the material.
diffuse(list of 3 floats or None): color of the diffuse component.
diffuse_texture(sbmc.scene_generator.Texture): texture for the diffuse
component. Overrides `diffuse` if provided.
specular(list of 3 floats or None): color of the specular component.
specular_texture(sbmc.scene_generator.Texture): texture for the
specular component. Overrides `specular` if provided.
roughness(float): roughness of the material.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, diffuse=None, diffuse_texture=None,
specular=None, specular_texture=None,
roughness=0.01, bump_texture=None):
super(PlasticMaterial, self).__init__(id=id, bump_texture=bump_texture)
self.roughness = roughness
self.diffuse = None
self.diffuse_texture = None
self.specular = None
self.specular_texture = None
if diffuse_texture is None:
if diffuse is None:
self.diffuse = [1.0, 1.0, 1.0]
else:
self.diffuse = diffuse
else:
self.diffuse_texture = diffuse_texture
self.textures.append(diffuse_texture)
if specular_texture is None:
if specular is None:
self.specular = [1, 1, 1]
else:
self.specular = specular
else:
self.specular_texture = specular_texture
self.textures.append(specular_texture)
def pbrt(self):
out = super(PlasticMaterial, self).pbrt()
out += ' "plastic" '
out += ' "float roughness" [{}] '.format(self.roughness)
if self.diffuse_texture is not None:
out += '"texture Kd" "{}"\n'.format(self.diffuse_texture.id)
else:
out += '"rgb Kd" [{} {} {}]\n'.format(*self.diffuse)
if self.specular_texture is not None:
out += '"texture Ks" "{}"\n'.format(self.specular_texture.id)
else:
out += '"rgb Ks" [{} {} {}]\n'.format(*self.specular)
out += super(PlasticMaterial, self).suffix()
return out
class SubstrateMaterial(Material):
"""PBRT Substrate material.
Args:
id(str): unique id to identify the material.
diffuse(list of 3 floats or None): color of the diffuse component.
diffuse_texture(sbmc.scene_generator.Texture): texture for the diffuse
component. Overrides `diffuse` if provided.
specular(list of 3 floats or None): color of the specular component.
specular_texture(sbmc.scene_generator.Texture): texture for the
specular component. Overrides `specular` if provided.
uroughness(float): roughness of the material in the u direction.
vroughness(float): roughness of the material in the v direction.
bump_texture(Texture or None): float texture to use as bump map.
"""
def __init__(self, id=None, diffuse=None, diffuse_texture=None,
specular=None, specular_texture=None,
uroughness=0.1, vroughness=0.1, bump_texture=None):
super(SubstrateMaterial, self).__init__(
id=id, bump_texture=bump_texture)
self.uroughness = uroughness
self.vroughness = vroughness
self.diffuse = None
self.diffuse_texture = None
self.specular = None
self.specular_texture = None
if diffuse_texture is None:
if diffuse is None:
self.diffuse = [0.5, 0.5, 0.5]
else:
self.diffuse = diffuse
else:
self.diffuse_texture = diffuse_texture
self.textures.append(diffuse_texture)
if specular_texture is None:
if specular is None:
self.specular = [0.5, 0.5, 0.5]
else:
self.specular = specular
else:
self.specular_texture = specular_texture
self.textures.append(specular_texture)
def pbrt(self):
out = super(SubstrateMaterial, self).pbrt()
out += ' "substrate" '
out += ' "float uroughness" [{}] '.format(self.uroughness)
out += ' "float vroughness" [{}] '.format(self.vroughness)
if self.diffuse_texture is not None:
out += '"texture Kd" "{}"\n'.format(self.diffuse_texture.id)
else:
out += '"rgb Kd" [{} {} {}]\n'.format(*self.diffuse)
if self.specular_texture is not None:
out += '"texture Ks" "{}"\n'.format(self.specular_texture.id)
else:
out += '"rgb Ks" [{} {} {}]\n'.format(*self.specular)
out += super(SubstrateMaterial, self).suffix()
return out
|
the-stack_106_24557 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import os
from django.db import models
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from .. import settings as filer_settings
from ..utils.compatibility import GTE_DJANGO_1_10, PILImage
from ..utils.filer_easy_thumbnails import FilerThumbnailer
from ..utils.pil_exif import get_exif_for_file
from .filemodels import File
logger = logging.getLogger(__name__)
class BaseImage(File):
SIDEBAR_IMAGE_WIDTH = 210
DEFAULT_THUMBNAILS = {
'admin_clipboard_icon': {'size': (32, 32), 'crop': True,
'upscale': True},
'admin_sidebar_preview': {'size': (SIDEBAR_IMAGE_WIDTH, 0), 'upscale': True},
'admin_directory_listing_icon': {'size': (48, 48),
'crop': True, 'upscale': True},
'admin_tiny_icon': {'size': (32, 32), 'crop': True, 'upscale': True},
}
file_type = 'Image'
_icon = "image"
_height = models.IntegerField(null=True, blank=True)
_width = models.IntegerField(null=True, blank=True)
default_alt_text = models.CharField(_('default alt text'), max_length=255, blank=True, null=True)
default_caption = models.CharField(_('default caption'), max_length=255, blank=True, null=True)
subject_location = models.CharField(_('subject location'), max_length=64, blank=True,
default='')
file_ptr = models.OneToOneField(
to='filer.File', parent_link=True,
related_name='%(app_label)s_%(class)s_file',
on_delete=models.CASCADE,
)
@classmethod
def matches_file_type(cls, iname, ifile, request):
# This was originally in admin/clipboardadmin.py it was inside of a try
# except, I have moved it here outside of a try except because I can't
# figure out just what kind of exception this could generate... all it was
# doing for me was obscuring errors...
# --Dave Butler <[email protected]>
iext = os.path.splitext(iname)[1].lower()
return iext in ['.jpg', '.jpeg', '.png', '.gif']
def file_data_changed(self, post_init=False):
attrs_updated = super(BaseImage, self).file_data_changed(post_init=post_init)
if attrs_updated:
try:
try:
imgfile = self.file.file
except ValueError:
imgfile = self.file_ptr.file
imgfile.seek(0)
self._width, self._height = PILImage.open(imgfile).size
imgfile.seek(0)
except Exception:
if post_init is False:
# in case `imgfile` could not be found, unset dimensions
# but only if not initialized by loading a fixture file
self._width, self._height = None, None
return attrs_updated
def save(self, *args, **kwargs):
self.has_all_mandatory_data = self._check_validity()
super(BaseImage, self).save(*args, **kwargs)
def _check_validity(self):
if not self.name:
return False
return True
def sidebar_image_ratio(self):
if self.width:
return float(self.width) / float(self.SIDEBAR_IMAGE_WIDTH)
else:
return 1.0
def _get_exif(self):
if hasattr(self, '_exif_cache'):
return self._exif_cache
else:
if self.file:
self._exif_cache = get_exif_for_file(self.file)
else:
self._exif_cache = {}
return self._exif_cache
exif = property(_get_exif)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
image. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated():
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
elif self.folder:
return self.folder.has_generic_permission(request, permission_type)
else:
return False
@property
def label(self):
if self.name in ['', None]:
return self.original_filename or 'unnamed file'
else:
return self.name
@property
def width(self):
return self._width or 0
@property
def height(self):
return self._height or 0
def _generate_thumbnails(self, required_thumbnails):
_thumbnails = {}
for name, opts in six.iteritems(required_thumbnails):
try:
opts.update({'subject_location': self.subject_location})
thumb = self.file.get_thumbnail(opts)
_thumbnails[name] = thumb.url
except Exception as e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while generating thumbnail: %s', e)
if filer_settings.FILER_DEBUG:
raise
return _thumbnails
@property
def icons(self):
required_thumbnails = dict(
(size, {'size': (int(size), int(size)),
'crop': True,
'upscale': True,
'subject_location': self.subject_location})
for size in filer_settings.FILER_ADMIN_ICON_SIZES)
return self._generate_thumbnails(required_thumbnails)
@property
def thumbnails(self):
return self._generate_thumbnails(BaseImage.DEFAULT_THUMBNAILS)
@property
def easy_thumbnails_thumbnailer(self):
tn = FilerThumbnailer(
file=self.file, name=self.file.name,
source_storage=self.file.source_storage,
thumbnail_storage=self.file.thumbnail_storage,
thumbnail_basedir=self.file.thumbnail_basedir)
return tn
class Meta(object):
app_label = 'filer'
verbose_name = _('image')
verbose_name_plural = _('images')
abstract = True
if GTE_DJANGO_1_10:
default_manager_name = 'objects'
|
the-stack_106_24558 | from core.NDUDE_2D_sup_te import Test_NDUDE_2D_sup
# window size = k^2-1
k_arr = [3,5,7,9,11,13,15,17]
delta_arr = [0.05, 0.1, 0.2, 0.25]
ep_ = 15
# Available test dataset : 1) Set13_256, 2) Set13_512, 3) BSD20
test_data = 'BSD20'
# if not a blind case
is_blind_ = False
case_ = None
for delta_ in delta_arr:
for k_ in k_arr:
te_NDUDE_2D_sup = Test_NDUDE_2D_sup(case = case_, delta=delta_, k = k_, test_data = test_data, ep = ep_, is_blind = is_blind_)
te_NDUDE_2D_sup.test_model()
|
the-stack_106_24559 |
from typing import NamedTuple
import tensorflow as tf
from .types import *
from .query import *
from ..args import ACTIVATION_FNS
from ..attention import *
from ..input import get_table_with_embedding
from ..const import EPSILON
from ..util import *
from ..layers import *
from ..activations import *
MP_State = tf.Tensor
class MP_Node(NamedTuple):
id: str
properties: tf.Tensor
state: MP_State
use_message_passing_fn = False
use_self_reference = False
def layer_normalize(tensor):
'''Apologies if I've abused this term'''
in_shape = tf.shape(tensor)
axes = list(range(1, len(tensor.shape)))
# Keep batch axis
t = tf.reduce_sum(tensor, axis=axes )
t += EPSILON
t = tf.reciprocal(t)
t = tf.check_numerics(t, "1/sum")
tensor = tf.einsum('brc,b->brc', tensor, t)
tensor = dynamic_assert_shape(tensor, in_shape, "layer_normalize_tensor")
return tensor
def calc_normalized_adjacency(context, node_state):
# Aggregate via adjacency matrix with normalisation (that does not include self-edges)
adj = tf.cast(context.features["kb_adjacency"], tf.float32)
degree = tf.reduce_sum(adj, -1, keepdims=True)
inv_degree = tf.reciprocal(degree)
node_mask = tf.expand_dims(tf.sequence_mask(context.features["kb_nodes_len"], context.args["kb_node_max_len"]), -1)
inv_degree = tf.where(node_mask, inv_degree, tf.zeros(tf.shape(inv_degree)))
inv_degree = tf.where(tf.greater(degree, 0), inv_degree, tf.zeros(tf.shape(inv_degree)))
inv_degree = tf.check_numerics(inv_degree, "inv_degree")
adj_norm = inv_degree * adj
adj_norm = tf.cast(adj_norm, node_state.dtype)
adj_norm = tf.check_numerics(adj_norm, "adj_norm")
node_incoming = tf.einsum('bnw,bnm->bmw', node_state, adj_norm)
return node_incoming
def mp_matmul(state, mat, name):
return tf.nn.conv1d(state, mat, 1, 'VALID', name=name)
def calc_right_shift(node_incoming):
shape = tf.shape(node_incoming)
node_incoming = tf.concat([node_incoming[:,:,1:],node_incoming[:,:,0:1]], axis=-1)
node_incoming = dynamic_assert_shape(node_incoming, shape, "node_incoming")
return node_incoming
def node_dense(nodes, units, name, activation="linear"):
with tf.variable_scope(name):
assert nodes.shape[-1].value is not None, "Nodes must have fixed last dimension"
w = tf.get_variable("w", [1, nodes.shape[-1], units], initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0))
b = tf.get_variable("b", [1, units], initializer=tf.initializers.random_uniform)
r = mp_matmul(nodes, w, 'matmul') + b
r = ACTIVATION_FNS[activation](r)
return r
def node_gru(context, node_state, node_incoming, padded_node_table):
all_inputs = [node_state, node_incoming]
if context.args["use_mp_node_id"]:
all_inputs.append(padded_node_table[:,:,:context.args["embed_width"]])
old_and_new = tf.concat(all_inputs, axis=-1)
input_width = old_and_new.shape[-1]
forget_w = tf.get_variable("mp_forget_w", [1, input_width, context.args["mp_state_width"]])
forget_b = tf.get_variable("mp_forget_b", [1, context.args["mp_state_width"]])
reuse_w = tf.get_variable("mp_reuse_w", [1, input_width, context.args["mp_state_width"]])
transform_w = tf.get_variable("mp_transform_w", [1, 2 * context.args["mp_state_width"], context.args["mp_state_width"]])
# Initially likely to be zero
forget_signal = tf.nn.sigmoid(mp_matmul(old_and_new , forget_w, 'forget_signal') + forget_b)
reuse_signal = tf.nn.sigmoid(mp_matmul(old_and_new , reuse_w, 'reuse_signal'))
reuse_and_new = tf.concat([reuse_signal * node_state, node_incoming], axis=-1)
proposed_new_state = ACTIVATION_FNS[context.args["mp_activation"]](mp_matmul(reuse_and_new, transform_w, 'proposed_new_state'))
node_state = (1-forget_signal) * node_state + (forget_signal) * proposed_new_state
return node_state
|
the-stack_106_24563 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Carlos Jenkins <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from setuptools import setup
def read(filename):
"""
Read a file relative to setup.py location.
"""
import os
here = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(here, filename)) as fd:
return fd.read()
def find_version(filename):
"""
Find package version in file.
"""
import re
content = read(filename)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", content, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
setup(
# Main
name='webdev',
version=find_version('webdev'),
scripts=['webdev'],
# Extra metadata
author='Carlos Jenkins',
author_email='[email protected]',
url='https://github.com/carlos-jenkins/webdev',
description='Small Development Web Server',
long_description=read('README.rst'),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Topic :: Utilities',
],
)
|
the-stack_106_24564 | # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from abc import abstractmethod
from ...quantization import FakeQuantize, Observer, QConfig
from ...tensor import Tensor
from ..module import Module
class QATModule(Module):
r"""
Base class of quantized-float related Module, basically for QAT and Calibration.
Use :meth:`~.QATModule.from_float_module` to generate a instance from float :class:`~.Module`.
Or use :func:`~.quantize.quantize_qat` to do it recursively and automatically.
Can also be converted to :class:`~.QuantizedModule` for deployment using
:func:`~.quantize.quantize` further.
"""
with_weight = True
with_act = True
def __init__(self):
super().__init__()
self.weight_observer = None # type: Observer
self.act_observer = None # type: Observer
self.weight_fake_quant = None # type: FakeQuantize
self.act_fake_quant = None # type: FakeQuantize
def set_qconfig(self, qconfig: QConfig):
r"""
Set quantization related configs with ``qconfig``, including
observer and fake_quant for weight and activation.
"""
def safe_call(func):
return func() if func is not None else None
if self.with_act:
self.act_observer = safe_call(qconfig.act_observer)
self.act_fake_quant = safe_call(qconfig.act_fake_quant)
if self.with_weight:
self.weight_observer = safe_call(qconfig.weight_observer)
self.weight_fake_quant = safe_call(qconfig.weight_fake_quant)
def _enable_exec(self, with_module, func, enable):
if not with_module or not func:
return
if enable:
func.enable()
else:
func.disable()
def set_fake_quant(self, enable):
self._enable_exec(self.with_act, self.act_fake_quant, enable)
self._enable_exec(self.with_weight, self.weight_fake_quant, enable)
def set_observer(self, enable):
self._enable_exec(self.with_act, self.act_observer, enable)
self._enable_exec(self.with_weight, self.weight_observer, enable)
def _apply_fakequant_with_observer(
self, target: Tensor, fake_quant: FakeQuantize, observer: Observer
):
# do observer
if observer is None:
oup = target
q_dict = None
else:
oup = observer(target)
q_dict = observer.get_qparams()
# do fake quant
if fake_quant is not None:
oup = fake_quant(oup, q_dict)
# use qparams of fake_quant if have.
if hasattr(fake_quant, "get_qparams"):
q_dict = fake_quant.get_qparams()
# set to tensor qparams.
if q_dict is not None:
oup.q_dict.update(q_dict)
return oup
def apply_quant_weight(self, target: Tensor):
r"""
Apply weight's observer and fake_quant from ``qconfig`` on ``target``.
"""
return self._apply_fakequant_with_observer(
target, self.weight_fake_quant, self.weight_observer
)
def apply_quant_activation(self, target: Tensor):
r"""
Apply weight's observer and fake_quant from ``qconfig`` on ``target``.
"""
return self._apply_fakequant_with_observer(
target, self.act_fake_quant, self.act_observer
)
def _get_method_result(
self, method: str, fake_quant: FakeQuantize, observer: Observer
):
if hasattr(fake_quant, method):
return getattr(fake_quant, method)()
elif hasattr(observer, method):
return getattr(observer, method)()
return None
def get_weight_dtype(self):
r"""
Get weight's quantization dtype as the method from ``qconfig``.
"""
return self._get_method_result(
"get_dtype", self.weight_fake_quant, self.weight_observer
)
def get_activation_dtype(self):
r"""
Get activation's quantization dtype as the method from ``qconfig``.
"""
return self._get_method_result(
"get_dtype", self.act_fake_quant, self.act_observer
)
def get_weight_qparams(self):
r"""
Get weight's quantization parameters.
"""
return self._get_method_result(
"get_qparams", self.weight_fake_quant, self.weight_observer
)
def get_activation_qparams(self):
r"""
Get activation's quantization parameters.
"""
return self._get_method_result(
"get_qparams", self.act_fake_quant, self.act_observer
)
@classmethod
@abstractmethod
def from_float_module(cls, float_module: Module):
r"""
Return a :class:`~.QATModule` instance converted from
a float :class:`~.Module` instance.
"""
|
the-stack_106_24565 | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and correspding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
# Data download
English to French sentence pairs.
http://www.manythings.org/anki/fra-eng.zip
Lots of neat sentence pairs datasets can be found at:
http://www.manythings.org/anki/
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function
import argparse
import os
import numpy as np
from neuralnets.seq2seq import Seq2SeqAE, Seq2SeqRNN, Seq2SeqNoMaskRNN, Seq2SeqDeepRNN
from neuralnets.grammar import TilingGrammar
from neuralnets.utils import load_categories_dataset, decode_smiles_from_indexes, from_one_hot_array
from neuralnets.shape_graph import smiles_variations
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, Callback
from keras.utils import plot_model
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = ['Verdana']
from collections import Counter
def most_common_elem(lst):
data = Counter(lst)
return data.most_common(1)[0][0]
class PlotLearning(Callback):
def set_filename(self, name='filename'):
self.filename = name
def on_train_begin(self, logs={}):
self.i = 0
self.x = []
self.losses = []
self.val_losses = []
self.acc = []
self.val_acc = []
self.fig = plt.figure()
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(self.i)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
self.acc.append(logs.get('acc'))
self.val_acc.append(logs.get('val_acc'))
self.i += 1
f, (ax1, ax2) = plt.subplots(1, 2, sharex=True)
ax1.plot(self.x, self.losses, label="loss")
ax1.plot(self.x, self.val_losses, label="val_loss")
ax1.legend()
ax2.plot(self.x, self.acc, label="accuracy")
ax2.plot(self.x, self.val_acc, label="validation accuracy")
ax2.legend()
plt.savefig(self.filename + '_loss_history.pdf', bbox_inches='tight')
plt.close()
NUM_EPOCHS = 1
BATCH_SIZE = 200
LSTM_SIZE = 512
WORD_LENGTH = 120
MODEL = 'rnn'
def get_arguments():
parser = argparse.ArgumentParser(description='Sequence to sequence autoencoder network')
parser.add_argument('data', type=str, help='The HDF5 file containing preprocessed data.')
parser.add_argument('out', type=str,
help='Where to save the trained model. If this file exists, it will be opened and resumed.')
parser.add_argument('grammar', type=str, help='The HDF5 file with the tiling grammar.')
parser.add_argument('--model', type=str, default=MODEL,
help='What model to train: autoencoder, rnn, deep_rnn, no_mask_rnn.')
parser.add_argument('--epochs', type=int, metavar='N', default=NUM_EPOCHS,
help='Number of epochs to run during training.')
parser.add_argument('--word_length', type=int, metavar='N', default=WORD_LENGTH,
help='Length of input sequences')
parser.add_argument('--batch_size', type=int, metavar='N', default=BATCH_SIZE,
help='Number of samples to process per minibatch during training.')
return parser.parse_args()
def decode_sequence_ae(model,
input_seq,
input_mask,
input_len,
output_charset,
bounds=None,
max_length=WORD_LENGTH):
num_decoder_tokens = len(output_charset)
max_category = max(output_charset)
# Encode the input as state vectors.
#states_value = model.encoder.predict(input_seq)
states_value = model.encoder.predict([input_seq, input_mask])#mask
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_mask = np.zeros((1, 1, num_decoder_tokens))#mask
# Populate the first character of target sequence with the start character.
#target_seq[0, 0, max_category] = 1.
target_min_bound = np.full(input_len, 0, dtype=int)
target_max_bound = np.full(input_len, -1, dtype=int)
if bounds != None:
target_min_bound = np.array([pair[0] for pair in bounds])
target_max_bound = np.array([pair[1] for pair in bounds])
#print('input mask', input_mask)
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sequence = []
while not stop_condition:
#Update the target mask
char_id = len(decoded_sequence)
target_mask[0][0] = input_mask[0][char_id]
#print('target mask', target_mask[0][0])
output_tokens, h, c = model.decoder.predict([target_seq, target_mask] + states_value)
min_bound = target_min_bound[char_id]
max_bound = target_max_bound[char_id]
# if bounds != None:
# min_bound = max_category - target_max_bound[char_id] + 1
# max_bound = max_category - target_min_bound[char_id] + 1
# Sample a token
sampled_token_index = num_decoder_tokens - 1
if min_bound < max_bound:
sampled_token_index = min_bound + np.argmax(output_tokens[0, -1, min_bound:max_bound])
sampled_category = output_charset[sampled_token_index]
decoded_sequence.append(sampled_category)
elif min_bound == 0 and max_bound == -1:
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_category = output_charset[sampled_token_index]
decoded_sequence.append(sampled_category)
else:
decoded_sequence.append(max_category)
# Exit condition: either hit max length
# or find stop character.
if len(decoded_sequence) >= input_len:
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sequence
def predict_sequence(model,
input_seq,
input_mask=None):
if input_mask is None:
return model.rnn.predict(input_seq)
else:
return model.rnn.predict([input_seq, input_mask])
def decode_sequence_rnn(model,
input_seq,
input_len,
output_charset,
input_mask=None):
output_sequence = predict_sequence(model, input_seq, input_mask)
decoded_sequence = []
while len(decoded_sequence) < input_len:
char_id = len(decoded_sequence)
sampled_token_index = np.argmax(output_sequence[0, char_id, :])
sampled_category = output_charset[sampled_token_index]
decoded_sequence.append(sampled_category)
return decoded_sequence
def decode_sequence(model,
grammar,
input_charset,
input_word,
max_length=WORD_LENGTH,
num_variants=10):
if num_variants <= 1:
num_variants = 1
##############################################################################################################
#Generate multiple string variants for the input graph
##############################################################################################################
padded_node_ids = []
num_nodes = 0
for char_id, _ in enumerate(input_word):
if input_word[char_id] in grammar.charset:
padded_node_ids.append(num_nodes)
num_nodes += 1
else:
padded_node_ids.append(max_length)
dummy_node_id = num_nodes
for i, _ in enumerate(padded_node_ids):
if padded_node_ids[i] == max_length:
padded_node_ids[i] = dummy_node_id
padded_node_ids.append(dummy_node_id) #ensure at least one occurrence
smiles_variants, node_variants = smiles_variations(input_word, padded_node_ids, grammar, num_variants - 1)
smiles_strings = [input_word] + smiles_variants
node_lists = [padded_node_ids] + node_variants
edge_lists = []
for word, nodes in zip(smiles_strings, node_lists):
edge_lists.append(grammar.smiles_to_edges(word, nodes))
input_sequences = np.empty(dtype='float32', shape=(num_variants, max_length, len(input_charset)))
input_masks = np.empty(dtype='float32', shape=(num_variants, max_length, grammar.categories_prefix[-1] + 1))
for i, word in enumerate(smiles_strings):
input_sequences[i] = grammar.smiles_to_one_hot(word.ljust(max_length), input_charset)
input_masks[i] = grammar.smiles_to_mask(word, max_length)
##############################################################################################################
#Classify each string (estimate edge configurations)
##############################################################################################################
output_charset = list(range(0, grammar.categories_prefix[-1] + 1, 1))
decoded_sequences = []
for i in range(num_variants):
decoded_sequences.append(decode_sequence_rnn(model, input_sequences[i:i+1], len(smiles_strings[i]), output_charset, input_masks[i:i+1]))
output_sequence = []
per_edge_categories = []
for edge_id, edge in enumerate(edge_lists[0]):
local_categories = [decoded_sequences[0][edge_id]]
if edge[0] != dummy_node_id or edge[1] != dummy_node_id:
for j in range(1, num_variants):
if edge in edge_lists[j]: #edge direction can be reversed in the other list
idx = edge_lists[j].index(edge)
local_categories.append(decoded_sequences[j][idx])
per_edge_categories.append(local_categories)
output_sequence.append(most_common_elem(local_categories))
return output_sequence
def main():
args = get_arguments()
tile_grammar = TilingGrammar([])
tile_grammar.load(args.grammar)
data_train, categories_train, masks_train, data_test, categories_test, masks_test, charset, charset_cats = load_categories_dataset(args.data)
num_encoder_tokens = len(charset)
num_decoder_tokens = len(charset_cats)
#max_category = max(charset_cats)
if categories_train.shape != masks_train.shape or data_train.shape[0] != categories_train.shape[0] or data_train.shape[1] != categories_train.shape[1]:
print('Incompatible input array dimensions')
print('Sample categories shape: ', categories_train.shape)
print('Sample masks shape: ', masks_train.shape)
print('Sample data shape: ', data_train.shape)
print('Number of unique input tokens: ', num_encoder_tokens)
print('Number of unique output tokens: ', num_decoder_tokens)
encoder_input_data = data_train.astype(dtype='float32')
decoder_input_masks = masks_train.astype(dtype='float32')
decoder_input_data = categories_train.astype(dtype='float32')
encoder_test_data = data_test.astype(dtype='float32')
decoder_test_masks = masks_test.astype(dtype='float32')
##############################################################################################################
#Sequence to sequence autoencoder
##############################################################################################################
if args.model == 'autoencoder':
decoder_target_data = np.zeros(categories_train.shape, dtype='float32')
for w_id in range(decoder_input_data.shape[0]):
for c_id in range(decoder_input_data.shape[1]):
for one_h_id_c in range(decoder_input_data.shape[2]):
if c_id > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[w_id][c_id-1][one_h_id_c] = 1.
model = Seq2SeqAE()
if os.path.isfile(args.out):
model.load(charset, charset_cats, args.out, lstm_size=LSTM_SIZE)
else:
model.create(charset, charset_cats, lstm_size=LSTM_SIZE)
if args.epochs > 0:
checkpointer = ModelCheckpoint(filepath=args.out,
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.2,
patience = 3,
min_lr = 0.000001)
filename, ext = os.path.splitext(args.out)
plot_model(model.autoencoder, to_file=filename + '_autoencoder_nn.pdf', show_shapes=True)
plot_model(model.decoder, to_file=filename + '_decoder_nn.pdf', show_shapes=True)
plot = PlotLearning()
plot.set_filename(filename)
history = model.autoencoder.fit([encoder_input_data, decoder_input_data, decoder_input_masks], decoder_target_data,
batch_size=args.batch_size,
epochs=args.epochs,
validation_split=0.2,
callbacks=[checkpointer, reduce_lr, plot])
# Save model
model.autoencoder.save(args.out)
#test-decode a couple of train examples
sample_ids = np.random.randint(0, len(data_train), 4)
for word_id in sample_ids:
print ('===============================')
train_string = decode_smiles_from_indexes(map(from_one_hot_array, data_train[word_id]), charset)
print ('train string: ', train_string)
train_sequence = []
for char_id in range(categories_train[word_id].shape[0]):
token_index = np.argmax(categories_train[word_id][char_id, :])
train_category = charset_cats[token_index]
train_sequence.append(train_category)
input_seq = encoder_input_data[word_id: word_id + 1]
input_mask = decoder_input_masks[word_id: word_id + 1]
category_bounds = tile_grammar.smiles_to_categories_bounds(train_string)
decoded_seq_1 = decode_sequence_ae(model, input_seq, input_mask, len(train_string), charset_cats, category_bounds)
#print ('decoded categories (w/ bounds):', decoded_seq_1)
decoded_seq_2 = decode_sequence_ae(model, input_seq, input_mask, len(train_string), charset_cats)
#print ('decoded categories (no bounds):', decoded_seq_2)
print ('[train, decoded, decoded] categories :', zip(train_sequence[:len(train_string)], decoded_seq_1, decoded_seq_2))
# print ('categories bounds:', tile_grammar.smiles_to_categories_bounds(train_string))
#test-decode a couple of test examples
sample_ids = np.random.randint(0, len(data_test), 8)
for word_id in sample_ids:
print ('===============================')
test_string = decode_smiles_from_indexes(map(from_one_hot_array, data_test[word_id]), charset)
print ('test string: ', test_string)
test_sequence = []
for char_id in range(categories_test[word_id].shape[0]):
token_index = np.argmax(categories_test[word_id][char_id, :])
test_category = charset_cats[token_index]
test_sequence.append(test_category)
#print ('test categories :', test_sequence[:len(test_string)])
input_seq = encoder_test_data[word_id: word_id + 1]
input_mask = decoder_test_masks[word_id: word_id + 1]
category_bounds = tile_grammar.smiles_to_categories_bounds(test_string)
decoded_seq_1 = decode_sequence_ae(model, input_seq, input_mask, len(test_string), charset_cats, category_bounds)
#print ('decoded categories (w/ bounds):', decoded_seq_1)
decoded_seq_2 = decode_sequence_ae(model, input_seq, input_mask, len(test_string), charset_cats)
#print ('decoded categories (no bounds):', decoded_seq_2)
print ('[train, decoded, decoded] categories :', zip(test_sequence[:len(test_string)], decoded_seq_1, decoded_seq_2))
# print ('categories bounds:', tile_grammar.smiles_to_categories_bounds(test_string))
##############################################################################################################
#Simple (deep) RNN
##############################################################################################################
elif args.model == 'rnn':
model = Seq2SeqRNN()
if os.path.isfile(args.out):
model.load(charset, charset_cats, args.out, lstm_size=LSTM_SIZE)
else:
model.create(charset, charset_cats, lstm_size=LSTM_SIZE)
if args.epochs > 0:
checkpointer = ModelCheckpoint(filepath=args.out,
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.2,
patience = 3,
min_lr = 0.000001)
filename, ext = os.path.splitext(args.out)
plot_model(model.rnn, to_file=filename + '_rnn.pdf', show_shapes=True)
plot = PlotLearning()
plot.set_filename(filename)
history = model.rnn.fit([encoder_input_data, decoder_input_masks], decoder_input_data,
batch_size=args.batch_size,
epochs=args.epochs,
validation_split=0.2,
callbacks=[checkpointer, reduce_lr, plot])
# Save model
model.rnn.save(args.out)
#test-decode a couple of train examples
sample_ids = np.random.randint(0, len(data_train), 2)
for word_id in sample_ids:
print ('===============================')
train_string = decode_smiles_from_indexes(map(from_one_hot_array, data_train[word_id]), charset)
print ('train string: ', train_string)
train_sequence = []
for char_id in range(categories_train[word_id].shape[0]):
token_index = np.argmax(categories_train[word_id][char_id, :])
train_category = charset_cats[token_index]
train_sequence.append(train_category)
input_seq = encoder_input_data[word_id: word_id + 1]
input_mask = decoder_input_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(train_string), charset_cats, input_mask)
print ('(train, decoded) categories :', zip(train_sequence, decoded_seq_1))
#test-decode a couple of test examples
sample_ids = np.random.randint(0, len(data_test), 2)
for word_id in sample_ids:
print ('===============================')
test_string = decode_smiles_from_indexes(map(from_one_hot_array, data_test[word_id]), charset)
print ('test string: ', test_string)
test_sequence = []
for char_id in range(categories_test[word_id].shape[0]):
token_index = np.argmax(categories_test[word_id][char_id, :])
test_category = charset_cats[token_index]
test_sequence.append(test_category)
input_seq = encoder_test_data[word_id: word_id + 1]
input_mask = decoder_test_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(test_string), charset_cats, input_mask)
print ('(test, decoded) categories :', zip(test_sequence, decoded_seq_1))
num_smiles_variants = 32
decoded_seq_2 = decode_sequence(model, tile_grammar, charset, test_string, max_length=args.word_length, num_variants=num_smiles_variants)
print ('(test, decoded_1, decoded_' + str(num_smiles_variants) + ') categories :', zip(test_sequence, decoded_seq_1, decoded_seq_2))
###############################################################################################################
#Deep RNN
###############################################################################################################
elif args.model == 'deep_rnn':
model = Seq2SeqDeepRNN()
if os.path.isfile(args.out):
model.load(charset, charset_cats, args.out, lstm_size=LSTM_SIZE)
else:
model.create(charset, charset_cats, lstm_size=LSTM_SIZE)
if args.epochs > 0:
checkpointer = ModelCheckpoint(filepath=args.out,
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.2,
patience = 3,
min_lr = 0.000001)
filename, ext = os.path.splitext(args.out)
plot_model(model.rnn, to_file=filename + '_rnn.pdf', show_shapes=True)
plot = PlotLearning()
plot.set_filename(filename)
history = model.rnn.fit([encoder_input_data, decoder_input_masks], decoder_input_data,
batch_size=args.batch_size,
epochs=args.epochs,
validation_split=0.2,
callbacks=[checkpointer, reduce_lr, plot])
# Save model
model.rnn.save(args.out)
#test-decode a couple of train examples
sample_ids = np.random.randint(0, len(data_train), 2)
for word_id in sample_ids:
print ('===============================')
train_string = decode_smiles_from_indexes(map(from_one_hot_array, data_train[word_id]), charset)
print ('train string: ', train_string)
train_sequence = []
for char_id in range(categories_train[word_id].shape[0]):
token_index = np.argmax(categories_train[word_id][char_id, :])
train_category = charset_cats[token_index]
train_sequence.append(train_category)
input_seq = encoder_input_data[word_id: word_id + 1]
input_mask = decoder_input_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(train_string), charset_cats, input_mask)
print ('(train, decoded) categories :', zip(train_sequence, decoded_seq_1))
#test-decode a couple of test examples
sample_ids = np.random.randint(0, len(data_test), 2)
for word_id in sample_ids:
print ('===============================')
test_string = decode_smiles_from_indexes(map(from_one_hot_array, data_test[word_id]), charset)
print ('test string: ', test_string)
test_sequence = []
for char_id in range(categories_test[word_id].shape[0]):
token_index = np.argmax(categories_test[word_id][char_id, :])
test_category = charset_cats[token_index]
test_sequence.append(test_category)
input_seq = encoder_test_data[word_id: word_id + 1]
input_mask = decoder_test_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(test_string), charset_cats, input_mask)
print ('(test, decoded) categories :', zip(test_sequence, decoded_seq_1))
num_smiles_variants = 32
decoded_seq_2 = decode_sequence(model, tile_grammar, charset, test_string, max_length=args.word_length, num_variants=num_smiles_variants)
print ('(test, decoded_1, decoded_' + str(num_smiles_variants) + ') categories :', zip(test_sequence, decoded_seq_1, decoded_seq_2))
###############################################################################################################
#Simple RNN without masking
###############################################################################################################
elif args.model == 'no_mask_rnn':
model = Seq2SeqNoMaskRNN()
if os.path.isfile(args.out):
model.load(charset, charset_cats, args.out, lstm_size=LSTM_SIZE)
else:
model.create(charset, charset_cats, lstm_size=LSTM_SIZE)
if args.epochs > 0:
checkpointer = ModelCheckpoint(filepath=args.out,
verbose=1,
save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor = 'val_loss',
factor = 0.2,
patience = 3,
min_lr = 0.000001)
filename, ext = os.path.splitext(args.out)
plot_model(model.rnn, to_file=filename + '_rnn.pdf', show_shapes=True)
plot = PlotLearning()
plot.set_filename(filename)
history = model.rnn.fit(encoder_input_data, decoder_input_data,
batch_size=args.batch_size,
epochs=args.epochs,
validation_split=0.2,
callbacks=[checkpointer, reduce_lr, plot])
# Save model
model.rnn.save(args.out)
#test-decode a couple of train examples
sample_ids = np.random.randint(0, len(data_train), 2)
for word_id in sample_ids:
print ('===============================')
train_string = decode_smiles_from_indexes(map(from_one_hot_array, data_train[word_id]), charset)
print ('train string: ', train_string)
train_sequence = []
for char_id in range(categories_train[word_id].shape[0]):
token_index = np.argmax(categories_train[word_id][char_id, :])
train_category = charset_cats[token_index]
train_sequence.append(train_category)
input_seq = encoder_input_data[word_id: word_id + 1]
input_mask = decoder_input_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(train_string), charset_cats)
print ('(train, decoded) categories :', zip(train_sequence, decoded_seq_1))
#test-decode a couple of test examples
sample_ids = np.random.randint(0, len(data_test), 2)
for word_id in sample_ids:
print ('===============================')
test_string = decode_smiles_from_indexes(map(from_one_hot_array, data_test[word_id]), charset)
print ('test string: ', test_string)
test_sequence = []
for char_id in range(categories_test[word_id].shape[0]):
token_index = np.argmax(categories_test[word_id][char_id, :])
test_category = charset_cats[token_index]
test_sequence.append(test_category)
input_seq = encoder_test_data[word_id: word_id + 1]
input_mask = decoder_test_masks[word_id: word_id + 1]
decoded_seq_1 = decode_sequence_rnn(model, input_seq, len(test_string), charset_cats)
print ('(train, decoded) categories :', zip(test_sequence, decoded_seq_1))
if __name__ == '__main__':
main()
|
the-stack_106_24566 | # -*- coding: utf-8 -*-
from .types import Types
from ..Helpers.commands import Dup, Store, Push, BLoad, Load, Add, DBLoad, Label, Jump, Pop, Jz
class Loop:
""" Генератор команд для организации циклов """
load_commands = {
'stack': BLoad,
'heap': DBLoad
}
@staticmethod
def base(commands, data, check_break_condition, callback, load_counter=True, return_counter=False):
"""
Генерация команд для организации цикла.
Условие останова - динамическое, передается извне.
"""
# Создаем метки и переменные, необходимые для прохождения цикла.
counter = data.var(Types.INT)
start_label = data.label()
finish_label = data.label()
continue_label = data.label()
# Инициализируем счетчик цикла
commands.add(Push, 0) \
.add(Store, counter) \
.add(Label, start_label)
# Выполняем тело цикла
if callback is not None:
callback(counter, finish_label, continue_label)
commands.add(Label, continue_label)
# Инкрементируем счетчик цикла
commands.add(Load, counter) \
.add(Push, 1) \
.add(Add) \
.add(Store, counter)
# Выполняем переданное условие останова
if check_break_condition is not None:
check_break_condition(start_label, finish_label, counter)
commands.add(Jump, start_label) \
.add(Label, finish_label)
# Если требуется, загружаем на стек количество совершенных итераций
if load_counter:
commands.add(Load, counter)
# Если требуется, возвращаем переменную, в которой содержится кол-во совершенных итераций
if return_counter:
return counter
@staticmethod
def simple(commands, data, callback, return_counter=False):
"""
Генерация команд для организация произвольного цикла.
Критерий отстанова и сам останов должен реализовываться внутри callback.
"""
return Loop.base(commands, data, None, callback, False, return_counter)
@staticmethod
def stack(commands, data, callback, load_counter=True, return_counter=False):
"""
Генерация команд для организация цикла на стеке.
Цикл завершается, если на стеке оказалось 0.
"""
def check_break_condition(a, finish_label, b):
# Если после выполнения callback на стеке 0 - завершаем цикл.
commands.add(Dup).add(Jz, finish_label)
result = Loop.base(commands, data, check_break_condition, callback, load_counter, return_counter)
# Очищаем оставшийся на стеке 0 (поскольку перед Jz использовали Dup).
commands.add(Pop)
return result
@staticmethod
def data(commands, data, start_pointer, callback=None, load_counter=True, return_counter=False, memory_type='heap'):
"""
Генерация команд для организация цикла в памяти.
Цикл завершается, если в очередной ячейке стековой памяти оказалось 0.
"""
def check_break_condition(a, finish_label, _counter):
# Если после выполнения callback в ячейке памяти 0 - завершаем цикл.
commands.add(Load, start_pointer) \
.add(Load, _counter) \
.add(Add) \
.add(Loop.load_commands[memory_type], 0) \
.add(Jz, finish_label)
return Loop.base(commands, data, check_break_condition, callback, load_counter, return_counter)
@staticmethod
def data_stack(commands, data, start_pointer, callback=None, load_counter=True, return_counter=False):
"""
Генерация команд для организация цикла в памяти.
Цикл завершается, если в очередной ячейке стековой памяти оказалось 0.
"""
return Loop.data(commands, data, start_pointer, callback, load_counter, return_counter, memory_type='stack')
@staticmethod
def data_heap(commands, data, start_pointer, callback=None, load_counter=True, return_counter=False):
"""
Генерация команд для организация цикла в памяти.
Цикл завершается, если в очередной ячейке heap-памяти оказалось 0.
"""
return Loop.data(commands, data, start_pointer, callback, load_counter, return_counter)
|
the-stack_106_24570 | import six
import json
from kubernetes import watch
from kubernetes.client.rest import ApiException
from .apply import apply
from .discovery import EagerDiscoverer, LazyDiscoverer
from .exceptions import api_exception, KubernetesValidateMissing, ApplyException
from .resource import Resource, ResourceList, Subresource, ResourceInstance, ResourceField
try:
import kubernetes_validate
HAS_KUBERNETES_VALIDATE = True
except ImportError:
HAS_KUBERNETES_VALIDATE = False
try:
from kubernetes_validate.utils import VersionNotSupportedError
except ImportError:
class VersionNotSupportedError(NotImplementedError):
pass
__all__ = [
'DynamicClient',
'ResourceInstance',
'Resource',
'ResourceList',
'Subresource',
'EagerDiscoverer',
'LazyDiscoverer',
'ResourceField',
]
def meta_request(func):
""" Handles parsing response structure and translating API Exceptions """
def inner(self, *args, **kwargs):
serialize_response = kwargs.pop('serialize', True)
serializer = kwargs.pop('serializer', ResourceInstance)
try:
resp = func(self, *args, **kwargs)
except ApiException as e:
raise api_exception(e)
if serialize_response:
try:
if six.PY2:
return serializer(self, json.loads(resp.data))
return serializer(self, json.loads(resp.data.decode('utf8')))
except ValueError:
if six.PY2:
return resp.data
return resp.data.decode('utf8')
return resp
return inner
class DynamicClient(object):
""" A kubernetes client that dynamically discovers and interacts with
the kubernetes API
"""
def __init__(self, client, cache_file=None, discoverer=None):
# Setting default here to delay evaluation of LazyDiscoverer class
# until constructor is called
discoverer = discoverer or LazyDiscoverer
self.client = client
self.configuration = client.configuration
self.__discoverer = discoverer(self, cache_file)
@property
def resources(self):
return self.__discoverer
@property
def version(self):
return self.__discoverer.version
def ensure_namespace(self, resource, namespace, body):
namespace = namespace or body.get('metadata', {}).get('namespace')
if not namespace:
raise ValueError("Namespace is required for {}.{}".format(resource.group_version, resource.kind))
return namespace
def serialize_body(self, body):
if hasattr(body, 'to_dict'):
return body.to_dict()
return body or {}
def get(self, resource, name=None, namespace=None, **kwargs):
path = resource.path(name=name, namespace=namespace)
return self.request('get', path, **kwargs)
def create(self, resource, body=None, namespace=None, **kwargs):
body = self.serialize_body(body)
if resource.namespaced:
namespace = self.ensure_namespace(resource, namespace, body)
path = resource.path(namespace=namespace)
return self.request('post', path, body=body, **kwargs)
def delete(self, resource, name=None, namespace=None, label_selector=None, field_selector=None, **kwargs):
if not (name or label_selector or field_selector):
raise ValueError("At least one of name|label_selector|field_selector is required")
if resource.namespaced and not (label_selector or field_selector or namespace):
raise ValueError("At least one of namespace|label_selector|field_selector is required")
path = resource.path(name=name, namespace=namespace)
return self.request('delete', path, label_selector=label_selector, field_selector=field_selector, **kwargs)
def replace(self, resource, body=None, name=None, namespace=None, **kwargs):
body = self.serialize_body(body)
name = name or body.get('metadata', {}).get('name')
if not name:
raise ValueError("name is required to replace {}.{}".format(resource.group_version, resource.kind))
if resource.namespaced:
namespace = self.ensure_namespace(resource, namespace, body)
path = resource.path(name=name, namespace=namespace)
return self.request('put', path, body=body, **kwargs)
def patch(self, resource, body=None, name=None, namespace=None, **kwargs):
body = self.serialize_body(body)
name = name or body.get('metadata', {}).get('name')
if not name:
raise ValueError("name is required to patch {}.{}".format(resource.group_version, resource.kind))
if resource.namespaced:
namespace = self.ensure_namespace(resource, namespace, body)
content_type = kwargs.pop('content_type', 'application/strategic-merge-patch+json')
path = resource.path(name=name, namespace=namespace)
return self.request('patch', path, body=body, content_type=content_type, **kwargs)
def apply(self, resource, body=None, name=None, namespace=None):
body = self.serialize_body(body)
body['metadata'] = body.get('metadata', dict())
name = name or body['metadata'].get('name')
if not name:
raise ValueError("name is required to apply {}.{}".format(resource.group_version, resource.kind))
if resource.namespaced:
body['metadata']['namespace'] = self.ensure_namespace(resource, namespace, body)
try:
return apply(resource, body)
except ApplyException as e:
raise ValueError("Could not apply strategic merge to %s/%s: %s" %
(body['kind'], body['metadata']['name'], e))
def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None):
"""
Stream events for a resource from the Kubernetes API
:param resource: The API resource object that will be used to query the API
:param namespace: The namespace to query
:param name: The name of the resource instance to query
:param label_selector: The label selector with which to filter results
:param field_selector: The field selector with which to filter results
:param resource_version: The version with which to filter results. Only events with
a resource_version greater than this value will be returned
:param timeout: The amount of time in seconds to wait before terminating the stream
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A ResourceInstance wrapping raw_object.
Example:
client = DynamicClient(k8s_client)
v1_pods = client.resources.get(api_version='v1', kind='Pod')
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5):
print(e['type'])
print(e['object'].metadata)
"""
watcher = watch.Watch()
for event in watcher.stream(
resource.get,
namespace=namespace,
name=name,
field_selector=field_selector,
label_selector=label_selector,
resource_version=resource_version,
serialize=False,
timeout_seconds=timeout
):
event['object'] = ResourceInstance(resource, event['object'])
yield event
@meta_request
def request(self, method, path, body=None, **params):
if not path.startswith('/'):
path = '/' + path
path_params = params.get('path_params', {})
query_params = params.get('query_params', [])
if params.get('pretty') is not None:
query_params.append(('pretty', params['pretty']))
if params.get('_continue') is not None:
query_params.append(('continue', params['_continue']))
if params.get('include_uninitialized') is not None:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if params.get('field_selector') is not None:
query_params.append(('fieldSelector', params['field_selector']))
if params.get('label_selector') is not None:
query_params.append(('labelSelector', params['label_selector']))
if params.get('limit') is not None:
query_params.append(('limit', params['limit']))
if params.get('resource_version') is not None:
query_params.append(('resourceVersion', params['resource_version']))
if params.get('timeout_seconds') is not None:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if params.get('watch') is not None:
query_params.append(('watch', params['watch']))
header_params = params.get('header_params', {})
form_params = []
local_var_files = {}
# HTTP header `Accept`
header_params['Accept'] = self.client.select_header_accept([
'application/json',
'application/yaml',
'application/vnd.kubernetes.protobuf'
])
# HTTP header `Content-Type`
if params.get('content_type'):
header_params['Content-Type'] = params['content_type']
else:
header_params['Content-Type'] = self.client.select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.client.call_api(
path,
method.upper(),
path_params,
query_params,
header_params,
body=body,
post_params=form_params,
async_req=params.get('async_req'),
files=local_var_files,
auth_settings=auth_settings,
_preload_content=False,
_return_http_data_only=params.get('_return_http_data_only', True)
)
def validate(self, definition, version=None, strict=False):
"""validate checks a kubernetes resource definition
Args:
definition (dict): resource definition
version (str): version of kubernetes to validate against
strict (bool): whether unexpected additional properties should be considered errors
Returns:
warnings (list), errors (list): warnings are missing validations, errors are validation failures
"""
if not HAS_KUBERNETES_VALIDATE:
raise KubernetesValidateMissing()
errors = list()
warnings = list()
try:
if version is None:
try:
version = self.version['kubernetes']['gitVersion']
except KeyError:
version = kubernetes_validate.latest_version()
kubernetes_validate.validate(definition, version, strict)
except kubernetes_validate.utils.ValidationError as e:
errors.append("resource definition validation error at %s: %s" % ('.'.join([str(item) for item in e.path]), e.message)) # noqa: B306
except VersionNotSupportedError:
errors.append("Kubernetes version %s is not supported by kubernetes-validate" % version)
except kubernetes_validate.utils.SchemaNotFoundError as e:
warnings.append("Could not find schema for object kind %s with API version %s in Kubernetes version %s (possibly Custom Resource?)" %
(e.kind, e.api_version, e.version))
return warnings, errors
|
the-stack_106_24571 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
import six
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
SHELVE = 0
UNSHELVE = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class DeleteInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "delete"
classes = ("btn-danger",)
icon = "remove"
policy_rules = (("compute", "compute:delete"),)
help_text = _("Deleted instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Instance",
u"Delete Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Instance",
u"Scheduled deletion of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow delete action if instance not currently being deleted."""
return not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-danger', 'btn-reboot')
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy = (("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy = (("compute", "compute_extension:admin_actions:pause"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy = (("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy = (("compute", "compute_extension:admin_actions:suspend"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class ToggleShelve(tables.BatchAction):
name = "shelve"
icon = "shelve"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Shelve Instance",
u"Shelve Instances",
count
),
ungettext_lazy(
u"Unshelve Instance",
u"Unshelve Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Shelved Instance",
u"Shelved Instances",
count
),
ungettext_lazy(
u"Unshelved Instance",
u"Unshelved Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('Shelve', request):
return False
if not instance:
return False
self.shelved = instance.status == "SHELVED_OFFLOADED"
if self.shelved:
self.current_present_action = UNSHELVE
policy = (("compute", "compute_extension:unshelve"),)
else:
self.current_present_action = SHELVE
policy = (("compute", "compute_extension:shelve"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.shelved)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.shelved:
api.nova.server_unshelve(request, obj_id)
self.current_past_action = UNSHELVE
else:
api.nova.server_shelve(request, obj_id)
self.current_past_action = SHELVE
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
url = "horizon:project:instances:index"
ajax = False
classes = ("btn-launch", )
def get_default_attrs(self):
url = urlresolvers.reverse(self.url)
ngclick = "modal.openLaunchInstanceWizard(" \
"{ successUrl: '%s' })" % url
self.attrs.update({
'ng-controller': 'LaunchInstanceModalController as modal',
'ng-click': ngclick
})
return super(LaunchLinkNG, self).get_default_attrs()
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
if instance.status == "ERROR":
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
for addresses in instance.addresses.values():
for address in addresses:
if address.get('OS-EXT-IPS:type') == "floating":
return not is_deleting(instance)
return False
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
class UpdateMetadata(policy.PolicyTargetMixin, tables.LinkAction):
name = "update_metadata"
verbose_name = _("Update Metadata")
ajax = False
icon = "pencil"
attrs = {"ng-controller": "MetadataModalHelperController as modal"}
policy_rules = (("compute", "compute:update_instance_metadata"),)
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(UpdateMetadata, self).__init__(attrs, **kwargs)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
self.attrs['ng-click'] = (
"modal.openMetadataModal('instance', '%s', true, 'metadata')"
% instance_id)
return "javascript:void(0);"
def allowed(self, request, instance=None):
return (instance and
instance.status.lower() != 'error')
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
try:
api.network.servers_update_addresses(request, [instance])
except Exception:
exceptions.handle(request,
_('Unable to retrieve Network information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
classes = ('btn-danger',)
policy_rules = (("compute", "compute:stop"),)
help_text = _("The instance(s) will be shut off.")
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
class AttachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "attach_interface"
verbose_name = _("Attach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:attach_interface"
policy_rules = (("compute", "compute_extension:attach_interfaces"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
# TODO(lyj): the policy for detach interface not exists in nova.json,
# once it's added, it should be added here.
class DetachInterface(policy.PolicyTargetMixin, tables.LinkAction):
name = "detach_interface"
verbose_name = _("Detach Interface")
classes = ("btn-confirm", "ajax-modal")
url = "horizon:project:instances:detach_interface"
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and api.base.is_service_enabled(request, 'network'))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in six.iteritems(instance.addresses):
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("soft-delete", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
# these vm states are used when generating CSV usage summary
("building", pgettext_lazy("Current status of an Instance", u"Building")),
("stopped", pgettext_lazy("Current status of an Instance", u"Stopped")),
("rescued", pgettext_lazy("Current status of an Instance", u"Rescued")),
("resized", pgettext_lazy("Current status of an Instance", u"Resized")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Rebooting Hard")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Pending Hard")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Started Hard")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Instance Name ="), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size, sortable=False, verbose_name=_("Size"))
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (DeleteInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, AttachInterface,
DetachInterface, EditInstance, UpdateMetadata,
DecryptInstancePassword, EditInstanceSecurityGroups,
ConsoleLink, LogLink, TogglePause, ToggleSuspend,
ToggleShelve, ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, DeleteInstance)
|
the-stack_106_24572 | # Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from extensions.back.ReshapeMutation import ReshapeMutation
from extensions.back.ReverseInputChannels import ApplyReverseChannels
from mo.back.replacement import BackReplacementPattern
from mo.front.common.partial_infer.utils import int64_array
from mo.front.tf.graph_utils import create_op_node_with_second_input, create_op_with_const_inputs
from mo.graph.graph import Graph
from mo.ops.const import Const
from mo.ops.reshape import Reshape
from mo.ops.strided_slice import StridedSlice
class ConvolutionNormalizer(BackReplacementPattern):
enabled = True
def pattern(self):
return dict(
nodes=[
('node', dict(kind='op', type='Convolution'))
],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['node']
if node.has_valid('kernel_spatial'):
del node['kernel_spatial']
class V7ConvolutionWithGroupsResolver(BackReplacementPattern):
"""
Normalizes grouped convolution weights shape to fit special weights format [G*O I X Y]
"""
enabled = False
@staticmethod
def pattern():
return dict(
nodes=[
('node', dict(type='Convolution', group=lambda g: g is not None and g != 1))
],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['node']
group = node.group
assert group > 1
weights_shape = node.in_port(1).data.get_shape()
assert weights_shape is not None
assert weights_shape[0] % group == 0
if weights_shape[0] == node.output:
# weights are already is in [G*O I X Y] format
return
new_shape = int64_array([node.output, -1, *weights_shape[2:]])
reshape = create_op_node_with_second_input(graph, Reshape, int64_array(new_shape),
{'override_output_shape': True})
node.in_port(1).get_connection().insert_node(reshape)
class V10ConvolutionWithGroupsResolver(BackReplacementPattern):
"""
Normalizes grouped convolution weights shape to fit special weights format
V10 IR: [G O I X Y]
"""
enabled = False
@staticmethod
def pattern():
return dict(
nodes=[
('node', dict(type='Convolution', group=lambda g: g is not None and g != 1))
],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['node']
group = node.group
assert group > 1
weights_shape = node.in_port(1).data.get_shape()
assert weights_shape is not None
assert weights_shape[0] % group == 0
I = node.in_port(0).data.get_shape()[1]
new_shape = int64_array([group, node.output / group, I / group, *weights_shape[2:]])
assert np.prod(weights_shape) == np.prod(new_shape), \
'Initial weights shape {}, grouped weights shape {}'.format(weights_shape, new_shape)
del node['group']
node['type'] = 'GroupConvolution'
reshape = create_op_node_with_second_input(graph, Reshape, int64_array(new_shape),
{'override_output_shape': True})
node.in_port(1).get_connection().insert_node(reshape)
class ConvolutionWithGroupsResolver(BackReplacementPattern):
"""
Normalizes grouped convolution weights shape to fit special weights format
V10 IR: [G O I X Y]
lower IR versions: [G*O I X Y]
"""
enabled = True
force_clean_up = True
def run_before(self):
return [ReshapeMutation]
def run_after(self):
return [ApplyReverseChannels]
def find_and_replace_pattern(self, graph: Graph):
V7ConvolutionWithGroupsResolver().find_and_replace_pattern(graph)
PullReshapeThroughFQ().find_and_replace_pattern(graph)
V10ConvolutionWithGroupsResolver().find_and_replace_pattern(graph)
class PullReshapeThroughFQ(BackReplacementPattern):
"""
Before:
... -> FQ -> Reshape -> Convolution -> ...
After:
... -> Reshape -> FQ (with aligned limits) -> Convolution -> ...
"""
enabled = False
@staticmethod
def pattern():
return dict(
nodes=[
('FQ', dict(type='FakeQuantize')),
('FQed', dict()),
('reshape', dict(type='Reshape')),
('reshaped', dict()),
('node', dict(type=lambda t: t in ['Convolution', 'GroupConvolution'])),
],
edges=[
('FQ', 'FQed'),
('FQed', 'reshape', {'in': 0}),
('reshape', 'reshaped'),
('reshaped', 'node', {'in': 1}),
]
)
def replace_pattern(self, graph: Graph, match: dict):
FQ = match['FQ']
reshape = match['reshape']
conv = match['node']
rank_reshape = reshape.in_port(0).data.get_shape().size != reshape.out_port(0).data.get_shape().size
if not all([np.prod(FQ.in_port(i).data.get_shape()) == 1 for i in range(1, 5)]):
# FakeQuantize has limits with multiple values, that should be reshaped too
# Pulling Reshape through such FQ is a complex procedure because of broadcasting rules
return
new_rank = reshape.out_port(0).data.get_shape().size
reshape.in_port(0).disconnect()
reshape.out_port(0).disconnect()
FQ.out_port(0).connect(conv.in_port(1))
FQ.in_port(0).get_connection().insert_node(reshape)
reshape['need_shape_inference'] = True
reshape['override_output_shape'] = True
FQ['need_shape_inference'] = True
FQ['override_output_shape'] = True
if rank_reshape:
# force rank of limit inputs to match 0-input rank
# reshaping to lower range needs it the most due to FQ inner broadcast semantics
for i in range(1, 5):
reshape = create_op_node_with_second_input(graph, Reshape, int64_array([1] * new_rank),
{'override_output_shape': True})
FQ.in_port(i).get_connection().insert_node(reshape)
class DeconvolutionNormalizer(BackReplacementPattern):
enabled = True
force_clean_up = True
def run_before(self):
return [ReshapeMutation]
def run_after(self):
return [ApplyReverseChannels]
@staticmethod
def pattern():
return dict(
nodes=[
('node', dict(type='Deconvolution'))
],
edges=[]
)
def replace_pattern(self, graph: Graph, match: dict):
node = match['node']
node_name = node.soft_get('name', node.id)
if 2 in node.in_ports() and not node.in_port(2).disconnected():
# Third input represents output shape. Cutting its value according to scheme:
# [N, C, spatial_dim_0, ..., spatial_dim_n] -> [spatial_dim_0, ..., spatial_dim_n]
in_rank = node.in_port(0).data.get_shape().size
shape_src = node.in_port(2).get_source()
node.in_port(2).disconnect()
ss_0 = create_op_with_const_inputs(graph, StridedSlice, {1: np.array([2], dtype=np.int32),
2: np.array([in_rank], dtype=np.int32),
3: np.array([1], dtype=np.int32)},
{'name': node_name + '/ss_0_port',
'begin_mask': np.array([1], dtype=np.int32),
'end_mask': np.array([0], dtype=np.int32),
'new_axis_mask': np.array([0], dtype=np.int32),
'shrink_axis_mask': np.array([0], dtype=np.int32),
'ellipsis_mask': np.array([0], dtype=np.int32)})
shape_src.connect(ss_0.in_port(0))
ss_0.out_port(0).connect(node.in_port(2))
# Specification: *padding amount* is deduced from relation of input and output spatial shapes
del node['pad']
elif node.has_valid('original_output_spatial_shape'):
# node had fixed output spatial shape set in original framework, so we restore it here
const = Const(graph, {'value': int64_array(node.original_output_spatial_shape),
'name': node_name + '/original_spatial_shape'}).create_node()
node.add_input_port(2, skip_if_exist=True)
const.out_port(0).connect(node.in_port(2))
# Specification: *padding amount* is deduced from relation of input and output spatial shapes
del node['pad']
group = node.soft_get('group', 1)
if group != 1:
assert group > 1
weights_shape = node.in_port(1).data.get_shape()
assert weights_shape is not None
I = node.in_port(0).data.get_shape()[1]
assert I % group == 0
assert node.output % group == 0
new_shape = int64_array([group, I / group, node.output / group, *weights_shape[2:]])
assert np.prod(weights_shape) == np.prod(new_shape), \
'Initial weights shape {}, grouped weights shape {}'.format(weights_shape, new_shape)
reshape = create_op_node_with_second_input(graph, Reshape, int64_array(new_shape),
{'override_output_shape': True},
node.in_port(1).get_source().node)
node.in_port(1).get_connection().set_source(reshape.out_port(0))
node['type'] = 'GroupConvolutionBackpropData'
else:
node['type'] = 'ConvolutionBackpropData'
|
the-stack_106_24573 | @staticmethod
def fetch_statement(symbol, query='income-statement'):
r = requests.get(
'http://www.nasdaq.com/symbol/{symbol}/financials?query={query}'.format(
symbol=symbol, query=query))
soup = BeautifulSoup(r.content, 'html.parser')
div = soup.find('div', attrs={'class': 'genTable'})
table = div.find('table')
rows = table.find_all('tr')
statements = []
result = []
for row in rows:
header = row.find('th')
if header:
header = header.text.lower().replace(' ', '_').replace(':',
'').replace(
"'", "").replace('/',
'-').replace(
'.', '').replace(',', '')
cols = row.find_all('td')
if header == 'period_ending':
cols = row.find_all('th')
cols = [ele.text.strip().split('/')[-1] for ele in cols if ele]
# cols.insert(0, header)
attribute = [header] + cols[-4:]
statements.append(attribute)
# print(len(cols[-4:]))
for i in range(1, len(cols[-4:])):
# print(i+1)
yearly = {item[0]: item[i] for item in statements if len(item) > 1}
result.append(yearly)
return result
|
the-stack_106_24574 | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
This module contains the definition of a base class for
feature map. Several types of commonly used approaches.
"""
import itertools
import logging
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Pauli
from qiskit.qasm import pi
from qiskit.aqua.operators import evolution_instruction
from qiskit.aqua.components.feature_maps import FeatureMap, self_product
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class PauliExpansion(FeatureMap):
"""
Mapping data with the second order expansion followed by entangling gates.
Refer to https://arxiv.org/pdf/1804.11326.pdf for details.
"""
CONFIGURATION = {
'name': 'PauliExpansion',
'description': 'Pauli expansion for feature map (any order)',
'input_schema': {
'$schema': 'http://json-schema.org/draft-07/schema#',
'id': 'Pauli_Expansion_schema',
'type': 'object',
'properties': {
'depth': {
'type': 'integer',
'default': 2,
'minimum': 1
},
'entangler_map': {
'type': ['array', 'null'],
'default': None
},
'entanglement': {
'type': 'string',
'default': 'full',
'enum': ['full', 'linear']
},
'paulis': {
'type': ['array', 'null'],
'items': {
'type': 'string'
},
'default': None
}
},
'additionalProperties': False
}
}
def __init__(self, feature_dimension, depth=2, entangler_map=None,
entanglement='full', paulis=None, data_map_func=self_product):
"""Constructor.
Args:
feature_dimension (int): number of features
depth (Optional(int)): the number of repeated circuits. Defaults to 2
entangler_map (Optional(list[list])): describe the connectivity of qubits,
each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
entanglement (Optional((str)): ['full', 'linear'], generate the qubit
connectivity by predefined topology.
Defaults to full
paulis (Optional(list[str])): a list of strings for to-be-used paulis.
Defaults to None. If None, ['Z', 'ZZ'] will be used.
data_map_func (Optional(Callable)): a mapping function for data x
"""
paulis = paulis if paulis is not None else ['Z', 'ZZ']
self.validate(locals())
super().__init__()
self._num_qubits = self._feature_dimension = feature_dimension
self._depth = depth
if entangler_map is None:
self._entangler_map = self.get_entangler_map(entanglement, feature_dimension)
else:
self._entangler_map = self.validate_entangler_map(entangler_map, feature_dimension)
self._pauli_strings = self._build_subset_paulis_string(paulis)
self._data_map_func = data_map_func
self._support_parameterized_circuit = True
def _build_subset_paulis_string(self, paulis):
# fill out the paulis to the number of qubits
temp_paulis = []
for pauli in paulis:
len_pauli = len(pauli)
for possible_pauli_idx in itertools.combinations(range(self._num_qubits), len_pauli):
string_temp = ['I'] * self._num_qubits
for idx, _ in enumerate(possible_pauli_idx):
string_temp[-possible_pauli_idx[idx] - 1] = pauli[-idx - 1]
temp_paulis.append(''.join(string_temp))
# clean up string that can not be entangled.
final_paulis = []
for pauli in temp_paulis:
where_z = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
if len(where_z) == 1:
final_paulis.append(pauli)
else:
is_valid = True
for src, targ in itertools.combinations(where_z, 2):
if [src, targ] not in self._entangler_map:
is_valid = False
break
if is_valid:
final_paulis.append(pauli)
else:
logger.warning("Due to the limited entangler_map,"
" %s is skipped.", pauli)
logger.info("Pauli terms include: %s", final_paulis)
return final_paulis
def _extract_data_for_rotation(self, pauli, x):
where_non_i = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
x = np.asarray(x)
return x[where_non_i]
def construct_circuit(self, x, qr=None, inverse=False):
"""
Construct the second order expansion based on given data.
Args:
x (Union(numpy.ndarray, list[Parameter], ParameterVector)): 1-D to-be-transformed data.
qr (QuantumRegister, optional): the QuantumRegister object for the circuit, if None,
generate new registers with name q.
inverse (bool, optional): whether or not inverse the circuit
Returns:
QuantumCircuit: a quantum circuit transform data x.
Raises:
TypeError: invalid input
ValueError: invalid input
"""
if len(x) != self._num_qubits:
raise ValueError("number of qubits and data dimension must be the same.")
if qr is None:
qr = QuantumRegister(self._num_qubits, name='q')
qc = QuantumCircuit(qr)
for _ in range(self._depth):
for i in range(self._num_qubits):
qc.u2(0, pi, qr[i])
for pauli in self._pauli_strings:
coeff = self._data_map_func(self._extract_data_for_rotation(pauli, x))
p = Pauli.from_label(pauli)
inst = evolution_instruction([[1, p]], coeff, 1)
qc.append(inst, qr)
return qc
|
the-stack_106_24575 | """Broker API."""
import logging
import time
from binascii import hexlify
import zmq
from . import zhelpers
from . import definitions
from . import broker_worker_api
from . import broker_service_api
# pylint: disable=R0902,E1101,R1705,R0912
_logger = logging.getLogger(__name__)
class Broker:
"""Broker API.
Implements the Majordomo Protocol broker of http:#rfc.zeromq.org/spec:7
and spec:8
"""
# We'd normally pull these from config data
INTERNAL_SERVICE_PREFIX = b"mmi."
HEARTBEAT_LIVENESS = 5 # 3-5 is reasonable
HEARTBEAT_INTERVAL = 2500 # msecs
HEARTBEAT_EXPIRY = HEARTBEAT_INTERVAL * HEARTBEAT_LIVENESS
# ---------------------------------------------------------------------
ctx = None # Our context
socket = None # Socket for clients & workers
poller = None # our Poller
heartbeat_at = None # When to send HEARTBEAT
services = None # known services
workers = None # known workers
waiting = None # idle workers
verbose = False # Print activity to stdout
# ---------------------------------------------------------------------
def __init__(self, verbose=False):
"""
Initialize the broker state.
@param verbose: boolean variable to turn on more verbose logging
"""
self.verbose = verbose
self.services = {}
self.workers = {}
self.waiting = []
self.heartbeat_at = time.time() + 1e-3 * self.HEARTBEAT_INTERVAL
self.ctx = zmq.Context()
self.socket = self.ctx.socket(zmq.ROUTER)
self.socket.linger = 0
self.poller = zmq.Poller()
self.poller.register(self.socket, zmq.POLLIN)
self.packets_client_in = 0
self.packets_client_out = 0
self.packets_workers_in = 0
self.packets_workers_out = 0
self.packets_processed = 0
# ---------------------------------------------------------------------
def mediate(self):
"""Main broker work happens here."""
while True:
try:
items = self.poller.poll(self.HEARTBEAT_INTERVAL)
except KeyboardInterrupt:
break # Interrupted
# if there is an request to the broker
if items:
msg = self.socket.recv_multipart()
if self.verbose:
logging.info("I: received message:")
zhelpers.dump(msg)
# get the data from the packet
sender = msg.pop(0)
empty = msg.pop(0)
if empty != b"":
_logger.error("E: invalid empty space in message")
header = msg.pop(0)
if definitions.C_CLIENT == header:
self.process_client(sender, msg)
elif definitions.W_WORKER == header:
self.process_worker(sender, msg)
else:
logging.error("E: invalid message: %s", header)
zhelpers.dump(msg)
self.purge_workers()
self.send_heartbeats()
def destroy(self):
"""Disconnect all workers, destroy context."""
while self.workers:
self.delete_worker(self.workers.values()[0], True)
self.ctx.destroy(0)
def process_client(self, sender, msg):
"""
Process a request coming from a client.
@param sender:
@param msg:
@return:
"""
if len(msg) < 2:
_logger.warning("E: did not receive the right msg length from the client")
service = msg.pop(0)
# Set reply return address to client sender
msg = [sender, b""] + msg
# if self.verbose:
# self.packets_clients_in += 1
if service.startswith(self.INTERNAL_SERVICE_PREFIX):
self.service_internal(service, msg)
else:
self.dispatch(self.require_service(service), msg)
def process_worker(self, sender, msg):
"""
Process message sent to us by a worker.
@param sender:
@param msg:
@return:
"""
if len(msg) < 1:
_logger.error("E: msg length is <1, invalid msg.")
# if self.verbose:
# self.packets_workers_in += 1
command = msg.pop(0)
# print(command)
worker_ready = hexlify(sender) in self.workers
worker = self.require_worker(sender)
if definitions.W_READY == command:
if len(msg) < 1:
_logger.error("E: invalid service name.")
service = msg.pop(0)
# Not first command in session or Reserved service name
if worker_ready or service.startswith(self.INTERNAL_SERVICE_PREFIX):
self.delete_worker(worker, True)
else:
# Attach worker to service and mark as idle
worker.service = self.require_service(service)
self.worker_waiting(worker)
elif definitions.W_REPLY == command:
if worker_ready:
# Remove & save client return envelope and insert the
# protocol header and service name, then rewrap envelope.
client = msg.pop(0)
# empty = msg.pop(0) # ?
msg = [client, b"", definitions.C_CLIENT, worker.service.name] + msg
self.socket.send_multipart(msg)
self.worker_waiting(worker)
else:
self.delete_worker(worker, True)
elif definitions.W_HEARTBEAT == command:
if worker_ready:
worker.expiry = time.time() + 1e-3 * self.HEARTBEAT_EXPIRY
else:
self.delete_worker(worker, True)
elif definitions.W_DISCONNECT == command:
self.delete_worker(worker, False)
else:
_logger.error("E: invalid message command: %s", command)
zhelpers.dump(msg)
def delete_worker(self, worker, disconnect):
"""
Delete worker from all data structures, and deletes worker.
@param worker:
@param disconnect:
@return:
"""
if worker is None:
_logger.error("E: Worker is None, invalid msg.")
if disconnect:
self.send_to_worker(worker, definitions.W_DISCONNECT, None, None)
if worker.service is not None:
worker.service.waiting.remove(worker)
self.workers.pop(worker.identity)
def require_worker(self, address):
"""
Find the worker (creates if necessary).
@param address:
@return:
"""
if address is None:
_logger.error("E: adders is None, invalid msg.")
identity = hexlify(address)
worker = self.workers.get(identity)
if worker is None:
worker = broker_worker_api.Worker(identity, address, self.HEARTBEAT_EXPIRY)
self.workers[identity] = worker
if self.verbose:
_logger.info("I: registering new worker: %s", identity)
return worker
def require_service(self, name):
"""
Locate the service (creates if necessary).
@param name:
@return:
"""
if name is None:
_logger.error("E: name is None, invalid msg.")
service = self.services.get(name)
if service is None:
service = broker_service_api.Service(name)
self.services[name] = service
return service
def bind(self, endpoint):
"""
Bind broker to endpoint, can call this multiple times.
We use a single socket for both clients and workers.
@param endpoint:
@return:
"""
self.socket.bind(endpoint)
_logger.info("I: broker/0.1.1 is active at %s", endpoint)
def service_internal(self, service, msg):
"""
Handle internal service according to 8/MMI specification.
@param service:
@param msg:
@return:
"""
returncode = b"501"
_logger.debug("D : Handling internal request.")
if service == b"mmi.service":
name = msg[-1]
returncode = b"200" if name in self.services else b"404"
msg[-1] = returncode
# insert the protocol header and service name after the routing envelope ([client, ''])
msg = msg[:2] + [definitions.C_CLIENT, service] + msg[2:]
self.socket.send_multipart(msg)
def send_heartbeats(self):
"""Send heartbeats to idle workers if it's time."""
if time.time() > self.heartbeat_at:
for worker in self.waiting:
self.send_to_worker(worker, definitions.W_HEARTBEAT, None, None)
self.heartbeat_at = time.time() + 1e-3 * self.HEARTBEAT_INTERVAL
def purge_workers(self):
"""Look for & kill expired workers.
Workers are oldest to most recent, so we stop at the first alive worker.
"""
while self.waiting:
worker = self.waiting[0]
if worker.expiry < time.time():
_logger.info("I: deleting expired worker: %s", worker.identity)
self.delete_worker(worker, False)
self.waiting.pop(0)
else:
break
def worker_waiting(self, worker):
"""
Worker is now waiting for work.
@param worker:
@return:
"""
# Queue to broker and service waiting lists
self.waiting.append(worker)
worker.service.waiting.append(worker)
worker.expiry = time.time() + 1e-3 * self.HEARTBEAT_EXPIRY
self.dispatch(worker.service, None)
def dispatch(self, service, msg):
"""
Dispatch requests to waiting workers as possible.
@param service:
@param msg:
@return:
"""
if service is None:
_logger.error("E: service is None, msg invalid.")
# Queue message if any
if msg is not None:
service.requests.append(msg)
self.purge_workers()
while service.waiting and service.requests:
msg = service.requests.pop(0)
worker = service.waiting.pop(0)
self.waiting.remove(worker)
self.send_to_worker(worker, definitions.W_REQUEST, None, msg)
def send_to_worker(self, worker, command, option, msg=None):
"""
Send message to worker.
If message is provided, sends that message.
@param worker:
@param command:
@param option:
@param msg:
@return:
"""
if msg is None:
msg = []
elif not isinstance(msg, list):
msg = [msg]
# Stack routing and protocol envelopes to start of message
# and routing envelope
if option is not None:
msg = [option] + msg
msg = [worker.address, b"", definitions.W_WORKER, command] + msg
if self.verbose:
_logger.info("I: sending %r to worker", command)
zhelpers.dump(msg)
self.socket.send_multipart(msg)
|
the-stack_106_24576 | # -*- coding: utf-8 -*-
from collections import Counter
__author__ = "Sergey Aganezov"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
class KBreak(object):
""" A generic object that can represent any k-break ( k>= 2)
A notion of k-break arises from the bioinformatics combinatorial object BreakpointGraph and is first mentioned in http://home.gwu.edu/~maxal/ap_tcs08.pdf
A generic k-break operates on k specified edges of spisific multicolor and replaces them with another set of k edges with the same multicolor on the same set of vertices in way, that the degree of vertices is kept intact.
Initialization of the instance of :class:`KBreak` is performed with a validity check of supplied data, which must comply with the definition of k-break.
Class carries following attributes carrying information about k-break structure:
* :attr:`KBreak.start_edges`: a list of edges (in terms of paired vertices) that are to be removed by current :class:`KBreak`
* :attr:`KBreak.result_edges`: a list of edges (in terms of paired vertices) that are to be created by current :class:`KBreak`
* :attr:`KBreak.multicolor`: a :class:`bg.multicolor.Multicolor` instance, that specifies the multicolor of edges that are to be removed / created by current :class:`KBreak`
Main operations:
* :meth:`KBreak.valid_kbreak_matchings`: a method that checks if provided sets of started / resulted edges comply with the notions ob k-break definition
"""
def __init__(self, start_edges, result_edges, multicolor, data=None):
""" Initialization of :class:`KBreak` object.
The initialization process consists of multiple checks, before any assignment and initialization itself is performed.
First checks the fact, that information about start / result edges is supplied in form of paired vertices.
Then check is performed to make sure, that degrees of vertices, that current :class:`KBreak` operates on, is preserved.
:param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by current :class:`KBreak`
:type start_edges: ``list(tuple(vertex, vertex), ...)``
:param result_edges: a list of pairs of vertices, that specifies where edges shall be created by current :class:`KBreak`
:type result_edges: ``list(tuple(vertex, vertex), ...)``
:param multicolor: a multicolor, that specifies which edges between specified pairs of vertices are to be removed / created
:type multicolor: :class:`bg.multicolor.Multicolor`
:return: a new instance of :class:`Kbreak`
:rtype: :class:`KBreak`
:raises: ``ValueError``
"""
self.start_edges = start_edges
self.result_edges = result_edges
self.multicolor = multicolor
if data is None:
data = self.create_default_data_dict()
self.data = data
for vertex_pair in self.start_edges:
if len(vertex_pair) != 2:
raise ValueError("Expected edges in a form of pairs of vertices.\n "
"Not a pair of vertices ({issue}) in start edges."
"".format(issue=str(vertex_pair)))
for vertex_pair in self.result_edges:
if len(vertex_pair) != 2:
raise ValueError("Expected edges in a form of pairs of vertices.\n "
"Not a pair of vertices ({issue}) in result edges."
"".format(issue=str(vertex_pair)))
if not KBreak.valid_kbreak_matchings(start_edges=self.start_edges,
result_edges=self.result_edges):
raise ValueError("Supplied sets of start and result edges do not correspond to "
"correct k-break operation (either the set of vertices is not consistent, or "
"the degrees of vertices change)")
@property
def is_a_two_break(self):
return len(self.start_edges) == 2
@property
def is_a_fusion(self):
return self.is_a_two_break and any(map(lambda vertex_set: all(map(lambda vertex: vertex.is_irregular_vertex, vertex_set)), self.result_edges))
@classmethod
def create_default_data_dict(cls):
return {
"origin": None
}
@staticmethod
def valid_kbreak_matchings(start_edges, result_edges):
""" A staticmethod check implementation that makes sure that degrees of vertices, that are affected by current :class:`KBreak`
By the notion of k-break, it shall keep the degree of vertices in :class:`bg.breakpoint_graph.BreakpointGraph` the same, after its application.
By utilizing the Counter class, such check is performed, as the number the vertex is mentioned corresponds to its degree.
:param start_edges: a list of pairs of vertices, that specifies where edges shall be removed by :class:`KBreak`
:type start_edges: ``list(tuple(vertex, vertex), ...)``
:param result_edges: a list of pairs of vertices, that specifies where edges shall be created by :class:`KBreak`
:type result_edges: ``list(tuple(vertex, vertex), ...)``
:return: a flag indicating if the degree of vertices are equal in start / result edges, targeted by :class:`KBreak`
:rtype: ``Boolean``
"""
start_stats = Counter(vertex for vertex_pair in start_edges for vertex in vertex_pair)
result_stats = Counter(vertex for vertex_pair in result_edges for vertex in vertex_pair)
return start_stats == result_stats |
the-stack_106_24577 | from tacticalrmm.test import TacticalTestCase
from .serializers import InstalledSoftwareSerializer
from model_bakery import baker
from unittest.mock import patch
from .models import InstalledSoftware, ChocoLog
from agents.models import Agent
class TestSoftwareViews(TacticalTestCase):
def setUp(self):
self.authenticate()
def test_chocos_get(self):
url = "/software/chocos/"
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.check_not_authenticated("get", url)
@patch("software.tasks.install_program.delay")
def test_chocos_install(self, install_program):
url = "/software/install/"
agent = baker.make_recipe("agents.agent")
# test a call where agent doesn't exist
invalid_data = {"pk": 500, "name": "Test Software", "version": "1.0.0"}
resp = self.client.post(url, invalid_data, format="json")
self.assertEqual(resp.status_code, 404)
data = {"pk": agent.pk, "name": "Test Software", "version": "1.0.0"}
resp = self.client.post(url, data, format="json")
self.assertEqual(resp.status_code, 200)
install_program.assert_called_with(data["pk"], data["name"], data["version"])
self.check_not_authenticated("post", url)
def test_chocos_installed(self):
# test a call where agent doesn't exist
resp = self.client.get("/software/installed/500/", format="json")
self.assertEqual(resp.status_code, 404)
agent = baker.make_recipe("agents.agent")
url = f"/software/installed/{agent.pk}/"
# test without agent software
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEquals(resp.data, [])
# make some software
software = baker.make(
"software.InstalledSoftware",
agent=agent,
software={},
)
serializer = InstalledSoftwareSerializer(software)
resp = self.client.get(url, format="json")
self.assertEqual(resp.status_code, 200)
self.assertEquals(resp.data, serializer.data)
self.check_not_authenticated("get", url)
class TestSoftwareTasks(TacticalTestCase):
@patch("agents.models.Agent.salt_api_cmd")
def test_install_chocolatey(self, salt_api_cmd):
from .tasks import install_chocolatey
agent = baker.make_recipe("agents.agent")
# test failed attempt
salt_api_cmd.return_value = "timeout"
ret = install_chocolatey(agent.pk)
salt_api_cmd.assert_called_with(
timeout=120, func="chocolatey.bootstrap", arg="force=True"
)
self.assertFalse(ret)
# test successful
salt_api_cmd.return_value = "chocolatey is now ready"
ret = install_chocolatey(agent.pk)
salt_api_cmd.assert_called_with(
timeout=120, func="chocolatey.bootstrap", arg="force=True"
)
self.assertTrue(ret)
self.assertTrue(Agent.objects.get(pk=agent.pk).choco_installed)
@patch("agents.models.Agent.salt_api_cmd")
def test_update_chocos(self, salt_api_cmd):
from .tasks import update_chocos
# initialize data
online_agent = baker.make_recipe("agents.online_agent", choco_installed=True)
baker.make("software.ChocoSoftware", chocos={})
# return data
chocolately_list = {
"git": "2.3.4",
"docker": "1.0.2",
}
# test failed attempt
salt_api_cmd.return_value = "timeout"
ret = update_chocos()
salt_api_cmd.assert_called_with(timeout=10, func="test.ping")
self.assertTrue(ret)
self.assertEquals(salt_api_cmd.call_count, 1)
salt_api_cmd.reset_mock()
# test successful attempt
salt_api_cmd.side_effect = [True, chocolately_list]
ret = update_chocos()
self.assertTrue(ret)
salt_api_cmd.assert_any_call(timeout=10, func="test.ping")
salt_api_cmd.assert_any_call(timeout=200, func="chocolatey.list")
self.assertEquals(salt_api_cmd.call_count, 2)
@patch("agents.models.Agent.nats_cmd")
def test_get_installed_software(self, nats_cmd):
from .tasks import get_installed_software
agent = baker.make_recipe("agents.agent")
nats_return = [
{
"name": "Mozilla Maintenance Service",
"size": "336.9 kB",
"source": "",
"version": "73.0.1",
"location": "",
"publisher": "Mozilla",
"uninstall": '"C:\\Program Files (x86)\\Mozilla Maintenance Service\\uninstall.exe"',
"install_date": "0001-01-01 00:00:00 +0000 UTC",
},
{
"name": "OpenVPN 2.4.9-I601-Win10 ",
"size": "8.7 MB",
"source": "",
"version": "2.4.9-I601-Win10",
"location": "C:\\Program Files\\OpenVPN\\",
"publisher": "OpenVPN Technologies, Inc.",
"uninstall": "C:\\Program Files\\OpenVPN\\Uninstall.exe",
"install_date": "0001-01-01 00:00:00 +0000 UTC",
},
{
"name": "Microsoft Office Professional Plus 2019 - en-us",
"size": "0 B",
"source": "",
"version": "16.0.10368.20035",
"location": "C:\\Program Files\\Microsoft Office",
"publisher": "Microsoft Corporation",
"uninstall": '"C:\\Program Files\\Common Files\\Microsoft Shared\\ClickToRun\\OfficeClickToRun.exe" scenario=install scenariosubtype=ARP sourcetype=None productstoremove=ProPlus2019Volume.16_en-us_x-none culture=en-us version.16=16.0',
"install_date": "0001-01-01 00:00:00 +0000 UTC",
},
]
# test failed attempt
nats_cmd.return_value = "timeout"
ret = get_installed_software(agent.pk)
self.assertFalse(ret)
nats_cmd.assert_called_with({"func": "softwarelist"}, timeout=20)
nats_cmd.reset_mock()
# test successful attempt
nats_cmd.return_value = nats_return
ret = get_installed_software(agent.pk)
self.assertTrue(ret)
nats_cmd.assert_called_with({"func": "softwarelist"}, timeout=20)
@patch("agents.models.Agent.salt_api_cmd")
@patch("software.tasks.get_installed_software.delay")
def test_install_program(self, get_installed_software, salt_api_cmd):
from .tasks import install_program
agent = baker.make_recipe("agents.agent")
# failed attempt
salt_api_cmd.return_value = "timeout"
ret = install_program(agent.pk, "git", "2.3.4")
self.assertFalse(ret)
salt_api_cmd.assert_called_with(
timeout=900, func="chocolatey.install", arg=["git", "version=2.3.4"]
)
salt_api_cmd.reset_mock()
# successfully attempt
salt_api_cmd.return_value = "install of git was successful"
ret = install_program(agent.pk, "git", "2.3.4")
self.assertTrue(ret)
salt_api_cmd.assert_called_with(
timeout=900, func="chocolatey.install", arg=["git", "version=2.3.4"]
)
get_installed_software.assert_called_with(agent.pk)
self.assertTrue(ChocoLog.objects.filter(agent=agent, name="git").exists())
|
the-stack_106_24578 | from cereal import car
from collections import defaultdict
from common.numpy_fast import interp
from common.kalman.simple_kalman import KF1D
from opendbc.can.can_define import CANDefine
from opendbc.can.parser import CANParser
from selfdrive.config import Conversions as CV
from selfdrive.car.honda.values import CAR, DBC, STEER_THRESHOLD, SPEED_FACTOR, HONDA_BOSCH
from selfdrive.kegman_conf import kegman_conf
GearShifter = car.CarState.GearShifter
def parse_gear_shifter(gear, vals):
val_to_capnp = {'P': GearShifter.park, 'R': GearShifter.reverse, 'N': GearShifter.neutral,
'D': GearShifter.drive, 'S': GearShifter.sport, 'L': GearShifter.low}
try:
return val_to_capnp[vals[gear]]
except KeyError:
return "unknown"
def calc_cruise_offset(offset, speed):
# euristic formula so that speed is controlled to ~ 0.3m/s below pid_speed
# constraints to solve for _K0, _K1, _K2 are:
# - speed = 0m/s, out = -0.3
# - speed = 34m/s, offset = 20, out = -0.25
# - speed = 34m/s, offset = -2.5, out = -1.8
_K0 = -0.3
_K1 = -0.01879
_K2 = 0.01013
return min(_K0 + _K1 * speed + _K2 * speed * offset, 0.)
def get_can_signals(CP):
# this function generates lists for signal, messages and initial values
signals = [
("XMISSION_SPEED", "ENGINE_DATA", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING_SENSORS", 0),
("STEER_ANGLE_RATE", "STEERING_SENSORS", 0),
("STEER_TORQUE_SENSOR", "STEER_STATUS", 0),
("STEER_TORQUE_MOTOR", "STEER_STATUS", 0),
("LEFT_BLINKER", "SCM_FEEDBACK", 0),
("RIGHT_BLINKER", "SCM_FEEDBACK", 0),
("GEAR", "GEARBOX", 0),
("SEATBELT_DRIVER_LAMP", "SEATBELT_STATUS", 1),
("SEATBELT_DRIVER_LATCHED", "SEATBELT_STATUS", 0),
("BRAKE_PRESSED", "POWERTRAIN_DATA", 0),
("BRAKE_SWITCH", "POWERTRAIN_DATA", 0),
("CRUISE_BUTTONS", "SCM_BUTTONS", 0),
("HUD_LEAD", "ACC_HUD", 0),
("ESP_DISABLED", "VSA_STATUS", 1),
("USER_BRAKE", "VSA_STATUS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0),
("STEER_STATUS", "STEER_STATUS", 5),
("GEAR_SHIFTER", "GEARBOX", 0),
("PEDAL_GAS", "POWERTRAIN_DATA", 0),
("CRUISE_SETTING", "SCM_BUTTONS", 0),
("ACC_STATUS", "POWERTRAIN_DATA", 0),
]
checks = [
("ENGINE_DATA", 100),
("WHEEL_SPEEDS", 50),
("STEERING_SENSORS", 100),
("SEATBELT_STATUS", 10),
("CRUISE", 10),
("POWERTRAIN_DATA", 100),
("VSA_STATUS", 50),
]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [
("SCM_FEEDBACK", 25),
("SCM_BUTTONS", 50),
]
else:
checks += [
("SCM_FEEDBACK", 10),
("SCM_BUTTONS", 25),
]
if CP.carFingerprint == CAR.CRV_HYBRID:
checks += [
("GEARBOX", 50),
]
else:
checks += [
("GEARBOX", 100),
]
if CP.radarOffCan:
# Civic is only bosch to use the same brake message as other hondas.
if CP.carFingerprint not in (CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.INSIGHT, CAR.CRV_HYBRID):
signals += [("BRAKE_PRESSED", "BRAKE_MODULE", 0)]
checks += [("BRAKE_MODULE", 50)]
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("CRUISE_CONTROL_LABEL", "ACC_HUD", 0),
("EPB_STATE", "EPB_STATUS", 0),
("CRUISE_SPEED", "ACC_HUD", 0)]
checks += [("GAS_PEDAL_2", 100)]
else:
# Nidec signals.
signals += [("BRAKE_ERROR_1", "STANDSTILL", 1),
("BRAKE_ERROR_2", "STANDSTILL", 1),
("CRUISE_SPEED_PCM", "CRUISE", 0),
("CRUISE_SPEED_OFFSET", "CRUISE_PARAMS", 0)]
checks += [("STANDSTILL", 50)]
if CP.carFingerprint == CAR.ODYSSEY_CHN:
checks += [("CRUISE_PARAMS", 10)]
else:
checks += [("CRUISE_PARAMS", 50)]
if CP.carFingerprint in (CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH, CAR.INSIGHT):
signals += [("DRIVERS_DOOR_OPEN", "SCM_FEEDBACK", 1),
("LEAD_DISTANCE", "RADAR_HUD", 0)]
checks += [("RADAR_HUD", 50)]
elif CP.carFingerprint in (CAR.CIVIC_BOSCH, CAR.CRV_HYBRID):
signals += [("DRIVERS_DOOR_OPEN", "SCM_FEEDBACK", 1)]
checks += [("RADAR_HUD", 50)]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("DRIVERS_DOOR_OPEN", "SCM_BUTTONS", 1)]
else:
signals += [("DOOR_OPEN_FL", "DOORS_STATUS", 1),
("DOOR_OPEN_FR", "DOORS_STATUS", 1),
("DOOR_OPEN_RL", "DOORS_STATUS", 1),
("DOOR_OPEN_RR", "DOORS_STATUS", 1),
("WHEELS_MOVING", "STANDSTILL", 1)]
checks += [("DOORS_STATUS", 3)]
if CP.carFingerprint == CAR.CIVIC:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_FEEDBACK", 0),
("IMPERIAL_UNIT", "HUD_SETTING", 0),
("EPB_STATE", "EPB_STATUS", 0)]
elif CP.carFingerprint == CAR.ACURA_ILX:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0)]
elif CP.carFingerprint in (CAR.CRV, CAR.ACURA_RDX, CAR.PILOT_2019, CAR.RIDGELINE):
signals += [("MAIN_ON", "SCM_BUTTONS", 0)]
elif CP.carFingerprint == CAR.FIT:
signals += [("CAR_GAS", "GAS_PEDAL_2", 0),
("MAIN_ON", "SCM_BUTTONS", 0),
("BRAKE_HOLD_ACTIVE", "VSA_STATUS", 0)]
elif CP.carFingerprint == CAR.ODYSSEY:
signals += [("MAIN_ON", "SCM_FEEDBACK", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
elif CP.carFingerprint in (CAR.PILOT, CAR.PILOT_2018):
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("CAR_GAS", "GAS_PEDAL_2", 0)]
elif CP.carFingerprint == CAR.ODYSSEY_CHN:
signals += [("MAIN_ON", "SCM_BUTTONS", 0),
("EPB_STATE", "EPB_STATUS", 0)]
checks += [("EPB_STATUS", 50)]
# add gas interceptor reading if we are using it
if CP.enableGasInterceptor:
signals.append(("INTERCEPTOR_GAS", "GAS_SENSOR", 0))
signals.append(("INTERCEPTOR_GAS2", "GAS_SENSOR", 0))
checks.append(("GAS_SENSOR", 50))
return signals, checks
def get_can_parser(CP):
signals, checks = get_can_signals(CP)
bus_pt = 1 if CP.isPandaBlack and CP.carFingerprint in HONDA_BOSCH else 0
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, bus_pt)
def get_cam_can_parser(CP):
signals = []
if CP.carFingerprint in HONDA_BOSCH:
signals += [("ACCEL_COMMAND", "ACC_CONTROL", 0),
("AEB_STATUS", "ACC_CONTROL", 0)]
else:
signals += [("COMPUTER_BRAKE", "BRAKE_COMMAND", 0),
("AEB_REQ_1", "BRAKE_COMMAND", 0),
("FCW", "BRAKE_COMMAND", 0),
("CHIME", "BRAKE_COMMAND", 0),
("FCM_OFF", "ACC_HUD", 0),
("FCM_OFF_2", "ACC_HUD", 0),
("FCM_PROBLEM", "ACC_HUD", 0),
("ICONS", "ACC_HUD", 0)]
# all hondas except CRV, RDX and 2019 Odyssey@China use 0xe4 for steering
checks = [(0xe4, 100)]
if CP.carFingerprint in [CAR.CRV, CAR.ACURA_RDX, CAR.ODYSSEY_CHN]:
checks = [(0x194, 100)]
bus_cam = 1 if CP.carFingerprint in HONDA_BOSCH and not CP.isPandaBlack else 2
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, bus_cam)
class CarState():
def __init__(self, CP):
self.kegman = kegman_conf()
self.trMode = int(self.kegman.conf['lastTrMode']) # default to last distance interval on startup
#self.trMode = 1
self.lkMode = True
self.read_distance_lines_prev = 4
self.CP = CP
self.can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
self.shifter_values = self.can_define.dv["GEARBOX"]["GEAR_SHIFTER"]
self.steer_status_values = defaultdict(lambda: "UNKNOWN", self.can_define.dv["STEER_STATUS"]["STEER_STATUS"])
self.user_gas, self.user_gas_pressed = 0., 0
self.brake_switch_prev = 0
self.brake_switch_ts = 0
self.lead_distance = 255
self.hud_lead = 0
self.cruise_buttons = 0
self.cruise_setting = 0
self.v_cruise_pcm_prev = 0
self.blinker_on = 0
self.left_blinker_on = 0
self.right_blinker_on = 0
self.cruise_mode = 0
self.stopped = 0
# vEgo kalman filter
dt = 0.01
# Q = np.matrix([[10.0, 0.0], [0.0, 100.0]])
# R = 1e3
self.v_ego_kf = KF1D(x0=[[0.0], [0.0]],
A=[[1.0, dt], [0.0, 1.0]],
C=[1.0, 0.0],
K=[[0.12287673], [0.29666309]])
self.v_ego = 0.0
def update(self, cp, cp_cam):
# car params
v_weight_v = [0., 1.] # don't trust smooth speed at low values to avoid premature zero snapping
v_weight_bp = [1., 6.] # smooth blending, below ~0.6m/s the smooth speed snaps to zero
# update prevs, update must run once per loop
self.prev_cruise_buttons = self.cruise_buttons
self.prev_blinker_on = self.blinker_on
self.prev_lead_distance = self.lead_distance
self.prev_left_blinker_on = self.left_blinker_on
self.prev_right_blinker_on = self.right_blinker_on
# ******************* parse out can *******************
if self.CP.carFingerprint in (CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH, CAR.INSIGHT): # TODO: find wheels moving bit in dbc
self.standstill = cp.vl["ENGINE_DATA"]['XMISSION_SPEED'] < 0.1
self.door_all_closed = not cp.vl["SCM_FEEDBACK"]['DRIVERS_DOOR_OPEN']
self.lead_distance = cp.vl["RADAR_HUD"]['LEAD_DISTANCE']
elif self.CP.carFingerprint in (CAR.CIVIC_BOSCH, CAR.CRV_HYBRID):
self.standstill = cp.vl["ENGINE_DATA"]['XMISSION_SPEED'] < 0.1
self.door_all_closed = not cp.vl["SCM_FEEDBACK"]['DRIVERS_DOOR_OPEN']
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
self.standstill = cp.vl["ENGINE_DATA"]['XMISSION_SPEED'] < 0.1
self.door_all_closed = not cp.vl["SCM_BUTTONS"]['DRIVERS_DOOR_OPEN']
else:
self.standstill = not cp.vl["STANDSTILL"]['WHEELS_MOVING']
self.door_all_closed = not any([cp.vl["DOORS_STATUS"]['DOOR_OPEN_FL'], cp.vl["DOORS_STATUS"]['DOOR_OPEN_FR'],
cp.vl["DOORS_STATUS"]['DOOR_OPEN_RL'], cp.vl["DOORS_STATUS"]['DOOR_OPEN_RR']])
self.seatbelt = not cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_LAMP'] and cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_LATCHED']
steer_status = self.steer_status_values[cp.vl["STEER_STATUS"]['STEER_STATUS']]
self.steer_error = steer_status not in ['NORMAL', 'NO_TORQUE_ALERT_1', 'NO_TORQUE_ALERT_2', 'LOW_SPEED_LOCKOUT', 'TMP_FAULT']
# NO_TORQUE_ALERT_2 can be caused by bump OR steering nudge from driver
self.steer_not_allowed = steer_status not in ['NORMAL', 'NO_TORQUE_ALERT_2']
# LOW_SPEED_LOCKOUT is not worth a warning
self.steer_warning = steer_status not in ['NORMAL', 'LOW_SPEED_LOCKOUT', 'NO_TORQUE_ALERT_2']
if self.CP.radarOffCan:
self.brake_error = 0
else:
self.brake_error = cp.vl["STANDSTILL"]['BRAKE_ERROR_1'] or cp.vl["STANDSTILL"]['BRAKE_ERROR_2']
self.esp_disabled = cp.vl["VSA_STATUS"]['ESP_DISABLED']
# calc best v_ego estimate, by averaging two opposite corners
speed_factor = SPEED_FACTOR[self.CP.carFingerprint]
self.v_wheel_fl = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_FL'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_fr = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_FR'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_rl = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_RL'] * CV.KPH_TO_MS * speed_factor
self.v_wheel_rr = cp.vl["WHEEL_SPEEDS"]['WHEEL_SPEED_RR'] * CV.KPH_TO_MS * speed_factor
v_wheel = (self.v_wheel_fl + self.v_wheel_fr + self.v_wheel_rl + self.v_wheel_rr)/4.
# blend in transmission speed at low speed, since it has more low speed accuracy
self.v_weight = interp(v_wheel, v_weight_bp, v_weight_v)
speed = (1. - self.v_weight) * cp.vl["ENGINE_DATA"]['XMISSION_SPEED'] * CV.KPH_TO_MS * speed_factor + \
self.v_weight * v_wheel
if abs(speed - self.v_ego) > 2.0: # Prevent large accelerations when car starts at non zero speed
self.v_ego_kf.x = [[speed], [0.0]]
self.v_ego_raw = speed
v_ego_x = self.v_ego_kf.update(speed)
self.v_ego = float(v_ego_x[0])
self.a_ego = float(v_ego_x[1])
# this is a hack for the interceptor. This is now only used in the simulation
# TODO: Replace tests by toyota so this can go away
if self.CP.enableGasInterceptor:
self.user_gas = (cp.vl["GAS_SENSOR"]['INTERCEPTOR_GAS'] + cp.vl["GAS_SENSOR"]['INTERCEPTOR_GAS2']) / 2.
self.user_gas_pressed = self.user_gas > 0 # this works because interceptor read < 0 when pedal position is 0. Once calibrated, this will change
self.gear = 0 if self.CP.carFingerprint == CAR.CIVIC else cp.vl["GEARBOX"]['GEAR']
self.angle_steers = cp.vl["STEERING_SENSORS"]['STEER_ANGLE']
self.angle_steers_rate = cp.vl["STEERING_SENSORS"]['STEER_ANGLE_RATE']
#self.cruise_setting = cp.vl["SCM_BUTTONS"]['CRUISE_SETTING']
self.cruise_buttons = cp.vl["SCM_BUTTONS"]['CRUISE_BUTTONS']
self.blinker_on = cp.vl["SCM_FEEDBACK"]['LEFT_BLINKER'] or cp.vl["SCM_FEEDBACK"]['RIGHT_BLINKER']
self.left_blinker_on = cp.vl["SCM_FEEDBACK"]['LEFT_BLINKER']
self.right_blinker_on = cp.vl["SCM_FEEDBACK"]['RIGHT_BLINKER']
self.brake_hold = cp.vl["VSA_STATUS"]['BRAKE_HOLD_ACTIVE']
if self.CP.carFingerprint in (CAR.CIVIC, CAR.ODYSSEY, CAR.CRV_5G, CAR.ACCORD, CAR.ACCORD_15, CAR.ACCORDH, CAR.CIVIC_BOSCH, CAR.INSIGHT, CAR.CRV_HYBRID):
self.park_brake = cp.vl["EPB_STATUS"]['EPB_STATE'] != 0
self.main_on = cp.vl["SCM_FEEDBACK"]['MAIN_ON']
elif self.CP.carFingerprint == CAR.ODYSSEY_CHN:
self.park_brake = cp.vl["EPB_STATUS"]['EPB_STATE'] != 0
self.main_on = cp.vl["SCM_BUTTONS"]['MAIN_ON']
else:
self.park_brake = 0 # TODO
self.main_on = cp.vl["SCM_BUTTONS"]['MAIN_ON']
can_gear_shifter = int(cp.vl["GEARBOX"]['GEAR_SHIFTER'])
self.gear_shifter = parse_gear_shifter(can_gear_shifter, self.shifter_values)
self.pedal_gas = cp.vl["POWERTRAIN_DATA"]['PEDAL_GAS']
# crv doesn't include cruise control
if self.CP.carFingerprint in (CAR.CRV, CAR.ODYSSEY, CAR.ACURA_RDX, CAR.RIDGELINE, CAR.PILOT_2019, CAR.ODYSSEY_CHN):
self.car_gas = self.pedal_gas
else:
self.car_gas = cp.vl["GAS_PEDAL_2"]['CAR_GAS']
self.steer_torque_driver = cp.vl["STEER_STATUS"]['STEER_TORQUE_SENSOR']
self.steer_torque_motor = cp.vl["STEER_STATUS"]['STEER_TORQUE_MOTOR']
self.steer_override = abs(self.steer_torque_driver) > STEER_THRESHOLD[self.CP.carFingerprint]
self.brake_switch = cp.vl["POWERTRAIN_DATA"]['BRAKE_SWITCH']
if self.CP.radarOffCan:
self.cruise_mode = cp.vl["ACC_HUD"]['CRUISE_CONTROL_LABEL']
self.stopped = cp.vl["ACC_HUD"]['CRUISE_SPEED'] == 252.
self.cruise_speed_offset = calc_cruise_offset(0, self.v_ego)
if self.CP.carFingerprint in (CAR.CIVIC_BOSCH, CAR.ACCORDH, CAR.INSIGHT, CAR.CRV_HYBRID):
self.brake_switch = cp.vl["POWERTRAIN_DATA"]['BRAKE_SWITCH']
self.brake_pressed = cp.vl["POWERTRAIN_DATA"]['BRAKE_PRESSED'] or \
(self.brake_switch and self.brake_switch_prev and \
cp.ts["POWERTRAIN_DATA"]['BRAKE_SWITCH'] != self.brake_switch_ts)
self.brake_switch_prev = self.brake_switch
self.brake_switch_ts = cp.ts["POWERTRAIN_DATA"]['BRAKE_SWITCH']
if self.CP.carFingerprint in (CAR.CIVIC_BOSCH):
self.hud_lead = cp.vl["ACC_HUD"]['HUD_LEAD']
else:
self.brake_pressed = cp.vl["BRAKE_MODULE"]['BRAKE_PRESSED']
# On set, cruise set speed pulses between 254~255 and the set speed prev is set to avoid this.
self.v_cruise_pcm = self.v_cruise_pcm_prev if cp.vl["ACC_HUD"]['CRUISE_SPEED'] > 160.0 else cp.vl["ACC_HUD"]['CRUISE_SPEED']
self.v_cruise_pcm_prev = self.v_cruise_pcm
else:
self.brake_switch = cp.vl["POWERTRAIN_DATA"]['BRAKE_SWITCH']
self.cruise_speed_offset = calc_cruise_offset(cp.vl["CRUISE_PARAMS"]['CRUISE_SPEED_OFFSET'], self.v_ego)
self.v_cruise_pcm = cp.vl["CRUISE"]['CRUISE_SPEED_PCM']
# brake switch has shown some single time step noise, so only considered when
# switch is on for at least 2 consecutive CAN samples
self.brake_pressed = cp.vl["POWERTRAIN_DATA"]['BRAKE_PRESSED'] or \
(self.brake_switch and self.brake_switch_prev and \
cp.ts["POWERTRAIN_DATA"]['BRAKE_SWITCH'] != self.brake_switch_ts)
self.brake_switch_prev = self.brake_switch
self.brake_switch_ts = cp.ts["POWERTRAIN_DATA"]['BRAKE_SWITCH']
self.user_brake = cp.vl["VSA_STATUS"]['USER_BRAKE']
self.pcm_acc_status = cp.vl["POWERTRAIN_DATA"]['ACC_STATUS']
# Gets rid of Pedal Grinding noise when brake is pressed at slow speeds for some models
if self.CP.carFingerprint in (CAR.PILOT, CAR.PILOT_2018, CAR.PILOT_2019, CAR.RIDGELINE):
if self.user_brake > 0.05:
self.brake_pressed = 1
# when user presses distance button on steering wheel
if self.cruise_setting == 3:
if cp.vl["SCM_BUTTONS"]["CRUISE_SETTING"] == 0:
self.trMode = (self.trMode + 1 ) % 4
self.kegman = kegman_conf()
self.kegman.conf['lastTrMode'] = str(self.trMode) # write last distance bar setting to file
self.kegman.write_config(self.kegman.conf)
# when user presses LKAS button on steering wheel
if self.cruise_setting == 1:
if cp.vl["SCM_BUTTONS"]["CRUISE_SETTING"] == 0:
if self.lkMode:
self.lkMode = False
else:
self.lkMode = True
self.prev_cruise_setting = self.cruise_setting
self.cruise_setting = cp.vl["SCM_BUTTONS"]['CRUISE_SETTING']
self.read_distance_lines = self.trMode + 1
if self.read_distance_lines != self.read_distance_lines_prev:
self.read_distance_lines_prev = self.read_distance_lines
# TODO: discover the CAN msg that has the imperial unit bit for all other cars
self.is_metric = not cp.vl["HUD_SETTING"]['IMPERIAL_UNIT'] if self.CP.carFingerprint in (CAR.CIVIC) else False
if self.CP.carFingerprint in HONDA_BOSCH:
self.stock_aeb = bool(cp_cam.vl["ACC_CONTROL"]["AEB_STATUS"] and cp_cam.vl["ACC_CONTROL"]["ACCEL_COMMAND"] < -1e-5)
else:
self.stock_aeb = bool(cp_cam.vl["BRAKE_COMMAND"]["AEB_REQ_1"] and cp_cam.vl["BRAKE_COMMAND"]["COMPUTER_BRAKE"] > 1e-5)
if self.CP.carFingerprint in HONDA_BOSCH:
self.stock_hud = False
self.stock_fcw = False
else:
self.stock_fcw = bool(cp_cam.vl["BRAKE_COMMAND"]["FCW"] != 0)
self.stock_hud = cp_cam.vl["ACC_HUD"]
self.stock_brake = cp_cam.vl["BRAKE_COMMAND"]
|
the-stack_106_24580 | import os
import signal
import socket
import sys
import py
import pytest
import tox
from tox.logs import ResultLog
@pytest.fixture(name="pkg")
def create_fake_pkg(tmpdir):
pkg = tmpdir.join("hello-1.0.tar.gz")
pkg.write("whatever")
return pkg
def test_pre_set_header():
replog = ResultLog()
d = replog.dict
assert replog.dict == d
assert replog.dict["reportversion"] == "1"
assert replog.dict["toxversion"] == tox.__version__
assert replog.dict["platform"] == sys.platform
assert replog.dict["host"] == socket.getfqdn()
data = replog.dumps_json()
replog2 = ResultLog.from_json(data)
assert replog2.dict == replog.dict
def test_set_header(pkg):
replog = ResultLog()
d = replog.dict
assert replog.dict == d
assert replog.dict["reportversion"] == "1"
assert replog.dict["toxversion"] == tox.__version__
assert replog.dict["platform"] == sys.platform
assert replog.dict["host"] == socket.getfqdn()
expected = {"basename": "hello-1.0.tar.gz", "sha256": pkg.computehash("sha256")}
env_log = replog.get_envlog("a")
env_log.set_header(installpkg=pkg)
assert env_log.dict["installpkg"] == expected
data = replog.dumps_json()
replog2 = ResultLog.from_json(data)
assert replog2.dict == replog.dict
def test_addenv_setpython(pkg):
replog = ResultLog()
envlog = replog.get_envlog("py36")
envlog.set_python_info(py.path.local(sys.executable))
envlog.set_header(installpkg=pkg)
assert envlog.dict["python"]["version_info"] == list(sys.version_info)
assert envlog.dict["python"]["version"] == sys.version
assert envlog.dict["python"]["executable"] == sys.executable
def test_get_commandlog(pkg):
replog = ResultLog()
envlog = replog.get_envlog("py36")
assert "setup" not in envlog.dict
setuplog = envlog.get_commandlog("setup")
envlog.set_header(installpkg=pkg)
setuplog.add_command(["virtualenv", "..."], "venv created", 0)
expected = [{"command": ["virtualenv", "..."], "output": "venv created", "retcode": 0}]
assert setuplog.list == expected
assert envlog.dict["setup"]
setuplog2 = replog.get_envlog("py36").get_commandlog("setup")
assert setuplog2.list == setuplog.list
@pytest.mark.parametrize("exit_code", [None, 0, 5, 128 + signal.SIGTERM, 1234])
@pytest.mark.parametrize("os_name", ["posix", "nt"])
def test_invocation_error(exit_code, os_name, mocker, monkeypatch):
monkeypatch.setattr(os, "name", value=os_name)
mocker.spy(tox.exception, "exit_code_str")
result = str(tox.exception.InvocationError("<command>", exit_code=exit_code))
# check that mocker works, because it will be our only test in
# test_z_cmdline.py::test_exit_code needs the mocker.spy above
assert tox.exception.exit_code_str.call_count == 1
call_args = tox.exception.exit_code_str.call_args
assert call_args == mocker.call("InvocationError", "<command>", exit_code)
if exit_code is None:
assert "(exited with code" not in result
else:
assert "(exited with code %d)" % exit_code in result
note = "Note: this might indicate a fatal error signal"
if (os_name == "posix") and (exit_code == 128 + signal.SIGTERM):
assert note in result
assert "({} - 128 = {}: SIGTERM)".format(exit_code, signal.SIGTERM) in result
else:
assert note not in result
|
the-stack_106_24582 | from datetime import datetime, timedelta
from glosowania.models import Decyzja, ZebranePodpisy, KtoJuzGlosowal
from django.shortcuts import get_object_or_404
from django.db import IntegrityError
from django.shortcuts import render
from glosowania.forms import DecyzjaForm
from django.http import HttpResponseRedirect, HttpResponse
from django.contrib.auth.decorators import login_required
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from django.core.mail import EmailMessage
from django.conf import settings as s
from django.contrib.auth.models import User
from django.template.loader import get_template
from django.urls import reverse
from django.contrib import messages
from django.shortcuts import redirect
import logging as l
from django.utils import translation
l.basicConfig(filename='wiki.log', datefmt='%d-%b-%y %H:%M:%S', format='%(asctime)s %(levelname)s %(funcName)s() %(message)s', level=l.INFO)
HOST = s.ALLOWED_HOSTS[0]
# ROOT = s.BASE_DIR
# Dodaj nową propozycję przepisu:
@login_required
def dodaj(request):
# nowy = DecyzjaForm(request.POST or None)
if request.method == 'POST':
form = DecyzjaForm(request.POST)
if form.is_valid():
form = form.save(commit=False)
form.autor = request.user
form.data_powstania = datetime.today()
form.ile_osob_podpisalo += 1
form.save()
signed = ZebranePodpisy.objects.create(projekt=form, podpis_uzytkownika = request.user)
# l.warning(f"{form.autor} _('added new law proposal:' form.tresc)")
message = _("New proposal has been saved.")
messages.success(request, (message))
SendEmail(
_('New law proposal'),
f'{request.user.username.capitalize()} ' + str(_('added new law proposal\nYou can read it here:')) + f' http://{HOST}/glosowania/details/{str(form.id)}'
)
return redirect('glosowania:status', 1)
else:
form = DecyzjaForm()
return render(request, 'glosowania/dodaj.html', {'form': form})
# Wyświetl głosowania:
@login_required
def status(request, pk):
filtered_glosowania = Decyzja.objects.filter(status=pk)
lang = get_language()
zliczaj_wszystko()
return render(request, 'glosowania/status.html', {
'filtered_glosowania': filtered_glosowania,
'lang': lang[0:2], # just en instead of en-us
'signatures': s.WYMAGANYCH_PODPISOW,
'signatures_span': timedelta(days=s.CZAS_NA_ZEBRANIE_PODPISOW).days,
'queue_span': timedelta(days=s.KOLEJKA).days,
'referendum_span': timedelta(days=s.CZAS_TRWANIA_REFERENDUM).days,
'vacatio_legis_span': timedelta(days=s.VACATIO_LEGIS).days,
})
# Pokaż szczegóły przepisu
@login_required
def details(request, pk):
szczegoly = get_object_or_404(Decyzja, pk=pk)
if request.GET.get('podpisz'):
nowy_projekt = Decyzja.objects.get(pk=pk)
osoba_podpisujaca = request.user
podpis = ZebranePodpisy(projekt=nowy_projekt, podpis_uzytkownika=osoba_podpisujaca)
nowy_projekt.ile_osob_podpisalo += 1
podpis.save()
nowy_projekt.save()
message = _('Your signature has been saved.')
messages.success(request, (message))
return redirect('glosowania:details', pk)
if request.GET.get('tak'):
nowy_projekt = Decyzja.objects.get(pk=pk)
osoba_glosujaca = request.user
glos = KtoJuzGlosowal(projekt=nowy_projekt, ktory_uzytkownik_juz_zaglosowal=osoba_glosujaca)
nowy_projekt.za += 1
glos.save()
nowy_projekt.save()
message = _('Your vote has been saved. You voted Yes.')
messages.success(request, (message))
return redirect('glosowania:details', pk)
if request.GET.get('nie'):
nowy_projekt = Decyzja.objects.get(pk=pk)
osoba_glosujaca = request.user
glos = KtoJuzGlosowal(projekt=nowy_projekt, ktory_uzytkownik_juz_zaglosowal=osoba_glosujaca)
nowy_projekt.przeciw += 1
glos.save()
nowy_projekt.save()
message = _('Your vote has been saved. You voted No.')
messages.success(request, (message))
return redirect('glosowania:details', pk)
# check if already signed
signed = ZebranePodpisy.objects.filter(projekt=pk, podpis_uzytkownika=request.user).exists()
# check if already voted
voted = KtoJuzGlosowal.objects.filter(projekt=pk, ktory_uzytkownik_juz_zaglosowal=request.user).exists()
return render(request, 'glosowania/szczegoly.html', {'id': szczegoly, 'signed': signed, 'voted': voted})
def zliczaj_wszystko():
'''Jeśli propozycja zostanie zatwierdzona w niedzielę
to głosowanie odbędzie się za 2 tygodnie'''
propozycja = 1
brak_poparcia = 2
w_kolejce = 3
referendum = 4
odrzucone = 5
zatwierdzone = 6 # Vacatio Legis
obowiazuje = 7
dzisiaj = datetime.today().date()
decyzje = Decyzja.objects.all()
for i in decyzje:
if i.status != brak_poparcia and i.status != odrzucone and i.status != obowiazuje:
# Jeśli nie jest w jakiś sposób zatwierdzone/odrzucone to procesujemy:
# FROM PROPOSITION TO QUEUE
if i.status == propozycja and i.ile_osob_podpisalo >= s.WYMAGANYCH_PODPISOW:
i.status = w_kolejce
i.data_zebrania_podpisow = dzisiaj
# TODO: Referendum odbędzie się za 1 tydzień w niedzielę
# 0 = monday, 1 = tuesday, ..., 6 = sunday
i.data_referendum_start = i.data_zebrania_podpisow + timedelta(days=s.KOLEJKA) + timedelta(days=-dzisiaj.weekday()+0, weeks=1)
i.data_referendum_stop = i.data_referendum_start + timedelta(days=s.CZAS_TRWANIA_REFERENDUM)
i.save()
SendEmail(
str(_("Proposal no. ")) + str(i.id) + str(_(" is approved for referendum")),
str(_("Proposal no. ")) + str(i.id) +
str(_(" gathered required amount of signatures and will be voted from ")) +
str(i.data_referendum_start) + str(_(' to ')) + str(i.data_referendum_stop) +
'\n' + str(_("Click here to read proposal: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
# FROM PROPOSITION TO NO_INTREST
if i.status == propozycja and i.data_powstania + timedelta(days=s.CZAS_NA_ZEBRANIE_PODPISOW) <= dzisiaj:
i.status = brak_poparcia
i.save()
# log('Propozycja ' + str(i.id) + ' zmieniła status na "brak poparcia".')
SendEmail(
# _(f"Proposal {str(i.id)} didn't gathered required amount of signatures"), # translation doesn't work this way
str(_("Proposal no. ")) + str(i.id) + str(_(" didn't gathered required amount of signatures")),
str(_("Proposal no. ")) + str(i.id) +
str(_(" didn't gathered required amount of signatures")) + str(_(" and was removed from queue. ")) +
str(_("Feel free to improve it and send it again.")) +
'\n' + str(_("Click here to read proposal: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
# FROM QUEUE TO REFERENDUM
if i.status == w_kolejce and i.data_referendum_start <= dzisiaj:
i.status = referendum
i.save()
# log('Propozycja ' + str(i.id) + ' zmieniła status na "referendum".')
SendEmail(
str(_("Referendum on proposal no. ")) + str(i.id) + str(_(" is starting now")),
str(_("It is time to vote on proposal no. ")) + str(i.id) + '\n' +
str(_("Referendum ends at ")) +
str(i.data_referendum_stop) + '\n' +
str(_("Click here to vote: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
# FROM REFERENDUM TO VACATIO_LEGIS OR NOT_APPROVED
if i.status == referendum and i.data_referendum_stop <= dzisiaj:
if i.za > i.przeciw:
i.status = zatwierdzone
i.data_zatwierdzenia = i.data_referendum_stop
i.data_obowiazuje_od = i.data_referendum_stop + timedelta(days=s.VACATIO_LEGIS)
i.save()
# log('Propozycja ' + str(i.id) + ' zmieniła status na "zatwierdzone".')
SendEmail(
str(_("Proposal no. ")) + str(i.id) + str(_("was approved")),
str(_("Proposal no. ")) + str(i.id) +
str(_("was approved in referendum and is now in Vacatio Legis period")) + '.\n' +
str(_("The law will take effect on")) +
i.data_obowiazuje_od + '\n' + str(_("Click here to read proposal: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
else:
i.status = odrzucone
i.save()
# log('Propozycja ' + str(i.id) + ' zmieniła status na "odrzucone"')
SendEmail(
str(_("Proposal no. ")) + str(i.id) + str(_("was rejected")),
str(_("Proposal no. ")) + str(i.id) +
str(_(" was rejected in referendum.")) + '\n' +
str(_("Feel free to improve it and send it again.")) +
'\n' + str(_("Click here to read proposal: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
# FROM VACATIO_LEGIS TO LAW
if i.status == zatwierdzone and i.data_obowiazuje_od <= dzisiaj:
i.status = obowiazuje
i.save()
# log('Propozycja ' + str(i.id) + ' zmieniła status na "obowiązuje".')
SendEmail(
str(_("Proposal no. ")) + str(i.id) + str(_(" is in efect from today")),
str(_("Proposal no. ")) + str(i.id) + str(_(" became abiding law today")) + '.\n' +
str(_("Click here to read it: http://")) +
f"{HOST}/glosowania/details/{str(i.id)}"
)
continue
def SendEmail(subject, message):
# bcc: all active users
# subject: Custom
# message: Custom
translation.activate(s.LANGUAGE_CODE)
email_message = EmailMessage(
from_email=str(s.DEFAULT_FROM_EMAIL),
bcc = list(User.objects.filter(is_active=True).values_list('email', flat=True)),
subject=f'{HOST} - {subject}',
body=message,
)
# l.warning(f'subject: {subject} \n message: {message}')
email_message.send(fail_silently=False)
|
the-stack_106_24584 | # Copyright 2020-2021 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The only purpose of this file is to create a wrapper around report.py and config.py and make them usable by flow.tcl
import argparse
import os
import re
import scripts.utils.utils as utils
from scripts.report.report import Report
from scripts.config.config import ConfigHandler
from scripts.report.get_file_name import get_name
parser = argparse.ArgumentParser(
description="Creates manufacturability and runtime summary reports for a given design and OpenLane run."
)
parser.add_argument("--design", "-d", required=True, help="Design Path")
parser.add_argument("--design_name", "-n", required=True, help="Design Name")
parser.add_argument("--tag", "-t", required=True, help="Run Tag")
parser.add_argument("--run_path", "-r", default=None, help="Run Path")
parser.add_argument(
"--output_file", "-o", required=True, help="Output Final Summary Report"
)
parser.add_argument(
"--man_report", "-m", required=True, help="Output Manufacturability Reports"
)
parser.add_argument(
"--runtime_summary", "-s", required=True, help="Output Runtime Summary Reports"
)
args = parser.parse_args()
design = args.design
design_name = args.design_name
tag = args.tag
run_path = args.run_path
output_file = args.output_file
man_report = args.man_report
runtime_summary = args.runtime_summary
# Extracting Configurations
params = ConfigHandler.get_config(design, tag, run_path)
# Extracting Report
report = Report(design, tag, design_name, params, run_path).get_report()
# write into file
with open(output_file, "w") as f:
f.write(Report.get_header() + "," + ConfigHandler.get_header())
f.write("\n")
f.write(report)
# Adding Extra Attributes computed from configs and reported statistics
utils.addComputedStatistics(output_file)
# Tracking Magic DRC, LVS, Antenna Logs:
run_path = run_path or utils.get_run_path(design, tag)
magic_drc_report = get_name(str(run_path) + "/reports/magic/", "magic.drc")
lvs_report = str(run_path) + "/results/lvs/" + design_name + ".lvs_parsed.lef.log"
arc_antenna_report = get_name(str(run_path) + "/reports/routing/", "antenna.rpt")
if not os.path.exists(lvs_report):
lvs_report = str(run_path) + "/results/lvs/" + design_name + ".lvs_parsed.gds.log"
magic_antenna_report = get_name(
str(run_path) + "/reports/magic/", "magic.antenna_violators.rpt"
)
printArr = []
printArr.append("Design Name: " + design_name)
printArr.append("Run Directory: " + str(run_path))
splitLine = "----------------------------------------"
# Summarizing Magic DRC
drcVioDict = dict()
cnt = 0
if os.path.exists(magic_drc_report):
drcFileOpener = open(magic_drc_report)
if drcFileOpener.mode == "r":
drcContent = drcFileOpener.read()
drcFileOpener.close()
# design name
# violation message
# list of violations
# Total Count:
printArr.append(splitLine)
printArr.append("\nMagic DRC Summary:")
printArr.append("Source: " + str(magic_drc_report))
if drcContent is not None:
drcSections = drcContent.split(splitLine)
if len(drcSections) > 2:
for i in range(1, len(drcSections) - 1, 2):
drcVioDict[drcSections[i]] = len(drcSections[i + 1].split("\n")) - 2
for key in drcVioDict:
val = drcVioDict[key]
cnt += val
printArr.append(
'Violation Message "'
+ str(key.strip())
+ ' "found '
+ str(val)
+ " Times."
)
printArr.append("Total Magic DRC violations is " + str(cnt))
else:
printArr.append("Source not found.")
# Summarizing LVS
printArr.append(splitLine)
printArr.append("\nLVS Summary:")
printArr.append("Source: " + str(lvs_report))
if os.path.exists(lvs_report):
lvsFileOpener = open(lvs_report)
if lvsFileOpener.mode == "r":
lvsContent = lvsFileOpener.read()
lvsFileOpener.close()
flag = False
for line in lvsContent.split("\n"):
if line.find("Total errors =") != -1:
flag = True
printArr.append(line)
elif line.find("net") != -1:
printArr.append(line)
if not flag:
printArr.append("Design is LVS clean.")
else:
printArr.append("Source not found.")
# Summarizing Antennas
printArr.append(splitLine)
printArr.append("\nAntenna Summary:")
if os.path.exists(arc_antenna_report):
printArr.append("Source: " + str(arc_antenna_report))
antFileOpener = open(arc_antenna_report)
if antFileOpener.mode == "r":
antContent = antFileOpener.read().split("\n")[-5:]
antFileOpener.close()
for line in antContent:
if line.find("violated:") != -1:
printArr.append(line)
elif os.path.exists(magic_antenna_report):
printArr.append("Source: " + str(magic_antenna_report))
antFileOpener = open(magic_antenna_report)
if antFileOpener.mode == "r":
antContent = antFileOpener.read().split("\n")
antFileOpener.close()
tot_cnt = 0
for ant in antContent:
if len(str(ant).strip()):
tot_cnt += 1
printArr.append("Number of pins violated: " + str(tot_cnt))
else:
printArr.append("No antenna report found.")
# write into file
with open(man_report, "w") as f:
f.write("\n".join(printArr))
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
allFiles = list()
listOfFile = os.listdir(dirName)
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
def timestamp_to_seconds(runtime):
pattern = re.compile(r"\s*([\d+]+)h([\d+]+)m([\d+]+)s([\d+]+)+ms")
m = pattern.match(runtime)
time = (
int(m.group(1)) * 60 * 60
+ int(m.group(2)) * 60
+ int(m.group(3))
+ int(m.group(4)) / 1000.0
)
return str(time)
# Creating a runtime summary report
logs_path = run_path + "/logs"
neededfiles = sorted(
[
(int(os.path.basename(f).split("-", 1)[0]), f)
for f in getListOfFiles(logs_path)
if os.path.isfile(os.path.join(logs_path, f))
and len(f.split("_")) > 1
and f.split("_")[-1] == "runtime.txt"
and len(os.path.basename(f).split("-")) > 1
and os.path.basename(f).split("-")[0].isnumeric()
]
)
runtimeArr = []
prasableRuntimeArr = []
for (idx, f) in neededfiles:
stagename = os.path.basename(f).split("_runtime.txt")[0]
runtimeFileOpener = open(f, "r")
if runtimeFileOpener.mode == "r":
runtimeContent = runtimeFileOpener.read().strip()
runtimeFileOpener.close()
runtimeArr.append(str(stagename) + " " + str(runtimeContent))
prasableRuntimeArr.append(str(stagename) + " " + timestamp_to_seconds(runtimeContent))
# write into file
with open(runtime_summary, "w") as f:
f.write("\n".join(runtimeArr))
with open(runtime_summary + ".parsable", "w") as f:
f.write("\n".join(prasableRuntimeArr))
|
the-stack_106_24585 | import pytest
import os
import tempfile
from contextlib import contextmanager
from warnings import catch_warnings
from distutils.version import LooseVersion
import datetime
from datetime import timedelta
import numpy as np
import pandas as pd
from pandas import (Series, DataFrame, Panel, MultiIndex, Int64Index,
RangeIndex, Categorical, bdate_range,
date_range, timedelta_range, Index, DatetimeIndex,
isna, compat, concat, Timestamp)
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
assert_series_equal,
set_timezone)
from pandas.compat import (is_platform_windows, is_platform_little_endian,
PY35, PY36, BytesIO, text_type,
range, lrange, u)
from pandas.io.formats.printing import pprint_thing
from pandas.core.dtypes.common import is_categorical_dtype
tables = pytest.importorskip('tables')
from pandas.io import pytables as pytables # noqa:E402
from pandas.io.pytables import (TableIterator, # noqa:E402
HDFStore, get_store, Term, read_hdf,
PossibleDataLossError, ClosedFileError)
_default_compressor = ('blosc' if LooseVersion(tables.__version__) >=
LooseVersion('2.2') else 'zlib')
# contextmanager to ensure the file cleanup
def safe_remove(path):
if path is not None:
try:
os.remove(path)
except:
pass
def safe_close(store):
try:
if store is not None:
store.close()
except:
pass
def create_tempfile(path):
""" create an unopened named temporary file """
return os.path.join(tempfile.gettempdir(), path)
@contextmanager
def ensure_clean_store(path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
# put in the temporary path if we don't have one already
if not len(os.path.dirname(path)):
path = create_tempfile(path)
store = HDFStore(path, mode=mode, complevel=complevel,
complib=complib, fletcher32=False)
yield store
finally:
safe_close(store)
if mode == 'w' or mode == 'a':
safe_remove(path)
@contextmanager
def ensure_clean_path(path):
"""
return essentially a named temporary file that is not opened
and deleted on existing; if path is a list, then create and
return list of filenames
"""
try:
if isinstance(path, list):
filenames = [create_tempfile(p) for p in path]
yield filenames
else:
filenames = [create_tempfile(path)]
yield filenames[0]
finally:
for f in filenames:
safe_remove(f)
# set these parameters so we don't have file sharing
tables.parameters.MAX_NUMEXPR_THREADS = 1
tables.parameters.MAX_BLOSC_THREADS = 1
tables.parameters.MAX_THREADS = 1
def _maybe_remove(store, key):
"""For tests using tables, try removing the table to be sure there is
no content from previous tests using the same table name."""
try:
store.remove(key)
except:
pass
class Base(object):
@classmethod
def setup_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.reset_testing_mode()
@classmethod
def teardown_class(cls):
# Pytables 3.0.0 deprecates lots of things
tm.set_testing_mode()
def setup_method(self, method):
self.path = 'tmp.__%s__.h5' % tm.rands(10)
def teardown_method(self, method):
pass
@pytest.mark.single
class TestHDFStore(Base):
def test_factory_fun(self):
path = create_tempfile(self.path)
try:
with catch_warnings(record=True):
with get_store(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with catch_warnings(record=True):
with get_store(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with catch_warnings(record=True):
with get_store(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(self.path)
def test_context(self):
path = create_tempfile(self.path)
try:
with HDFStore(path) as tbl:
raise ValueError('blah')
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl['a'] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl['a']) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self):
path = create_tempfile(self.path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
assert_series_equal(o, roundtrip('series', o))
o = tm.makeStringSeries()
assert_series_equal(o, roundtrip('string_series', o))
o = tm.makeDataFrame()
assert_frame_equal(o, roundtrip('frame', o))
with catch_warnings(record=True):
o = tm.makePanel()
assert_panel_equal(o, roundtrip('panel', o))
# table
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
df.to_hdf(path, 'table', append=True)
result = read_hdf(path, 'table', where=['index>2'])
assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self):
# GH6166
# unconversion of long strings was being chopped in earlier
# versions of numpy < 1.7.2
df = DataFrame({'a': tm.rands_array(100, size=10)},
index=tm.rands_array(100, size=10))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['a'])
result = store.select('df')
assert_frame_equal(df, result)
def test_api(self):
# GH4584
# API issue when to_hdf doesn't acdept append AND format args
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, 'df', append=True)
df.iloc[10:].to_hdf(path, 'df', append=True, format='table')
assert_frame_equal(read_hdf(path, 'df'), df)
# append to False
df.iloc[:10].to_hdf(path, 'df', append=False, format='table')
df.iloc[10:].to_hdf(path, 'df', append=True)
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', append=False, format='fixed')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False, format='f')
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df', append=False)
assert_frame_equal(read_hdf(path, 'df'), df)
df.to_hdf(path, 'df')
assert_frame_equal(read_hdf(path, 'df'), df)
with ensure_clean_store(self.path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=True, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# append to False
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
# formats
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format='table')
assert_frame_equal(store.select('df'), df)
_maybe_remove(store, 'df')
store.append('df', df.iloc[:10], append=False, format='table')
store.append('df', df.iloc[10:], append=True, format=None)
assert_frame_equal(store.select('df'), df)
with ensure_clean_path(self.path) as path:
# invalid
df = tm.makeDataFrame()
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='f')
pytest.raises(ValueError, df.to_hdf, path,
'df', append=True, format='fixed')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=True, format='foo')
pytest.raises(TypeError, df.to_hdf, path,
'df', append=False, format='bar')
# File path doesn't exist
path = ""
pytest.raises(compat.FileNotFoundError,
read_hdf, path, 'df')
def test_api_default_format(self):
# default_format option
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
_maybe_remove(store, 'df')
store.put('df', df)
assert not store.get_storer('df').is_table
pytest.raises(ValueError, store.append, 'df2', df)
pd.set_option('io.hdf.default_format', 'table')
_maybe_remove(store, 'df')
store.put('df', df)
assert store.get_storer('df').is_table
_maybe_remove(store, 'df2')
store.append('df2', df)
assert store.get_storer('df').is_table
pd.set_option('io.hdf.default_format', None)
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
pd.set_option('io.hdf.default_format', 'fixed')
df.to_hdf(path, 'df')
with HDFStore(path) as store:
assert not store.get_storer('df').is_table
pytest.raises(ValueError, df.to_hdf, path, 'df2', append=True)
pd.set_option('io.hdf.default_format', 'table')
df.to_hdf(path, 'df3')
with HDFStore(path) as store:
assert store.get_storer('df3').is_table
df.to_hdf(path, 'df4', append=True)
with HDFStore(path) as store:
assert store.get_storer('df4').is_table
pd.set_option('io.hdf.default_format', None)
def test_keys(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
assert len(store) == 5
expected = set(['/a', '/b', '/c', '/d', '/foo/bar'])
assert set(store.keys()) == expected
assert set(store) == expected
def test_iter_empty(self):
with ensure_clean_store(self.path) as store:
# GH 12221
assert list(store) == []
def test_repr(self):
with ensure_clean_store(self.path) as store:
repr(store)
store.info()
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeStringSeries()
store['c'] = tm.makeDataFrame()
with catch_warnings(record=True):
store['d'] = tm.makePanel()
store['foo/bar'] = tm.makePanel()
store.append('e', tm.makePanel())
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
# PerformanceWarning
with catch_warnings(record=True):
store['df'] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, 'bah')
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df)
s = store.get_storer('df')
repr(s)
str(s)
def test_contains(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
store['foo/bar'] = tm.makeDataFrame()
assert 'a' in store
assert 'b' in store
assert 'c' not in store
assert 'foo/bar' in store
assert '/foo/bar' in store
assert '/foo/b' not in store
assert 'bar' not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store['node())'] = tm.makeDataFrame()
assert 'node())' in store
def test_versioning(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
assert store.root.a._v_attrs.pandas_version == '0.15.2'
assert store.root.b._v_attrs.pandas_version == '0.15.2'
assert store.root.df1._v_attrs.pandas_version == '0.15.2'
# write a file and wipe its versioning
_maybe_remove(store, 'df2')
store.append('df2', df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node('df2')._v_attrs.pandas_version = None
pytest.raises(Exception, store.select, 'df2')
def test_mode(self):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(self.path) as path:
# constructor
if mode in ['r', 'r+']:
pytest.raises(IOError, HDFStore, path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(self.path) as path:
# context
if mode in ['r', 'r+']:
def f():
with HDFStore(path, mode=mode) as store: # noqa
pass
pytest.raises(IOError, f)
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(self.path) as path:
# conv write
if mode in ['r', 'r+']:
pytest.raises(IOError, df.to_hdf,
path, 'df', mode=mode)
df.to_hdf(path, 'df', mode='w')
else:
df.to_hdf(path, 'df', mode=mode)
# conv read
if mode in ['w']:
pytest.raises(ValueError, read_hdf,
path, 'df', mode=mode)
else:
result = read_hdf(path, 'df', mode=mode)
assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
result = read_hdf(path, 'df')
assert_frame_equal(result, df)
check('r')
check('r+')
check('a')
check('w')
check_default_mode()
def test_reopen_handle(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# invalid mode change
pytest.raises(PossibleDataLossError, store.open, 'w')
store.close()
assert not store.is_open
# truncation ok here
store.open('w')
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode='a')
store['a'] = tm.makeTimeSeries()
# reopen as read
store.open('r')
assert store.is_open
assert len(store) == 1
assert store._mode == 'r'
store.close()
assert not store.is_open
# reopen as append
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
# reopen as append (again)
store.open('a')
assert store.is_open
assert len(store) == 1
assert store._mode == 'a'
store.close()
assert not store.is_open
def test_open_args(self):
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
# create an in memory store
store = HDFStore(path, mode='a', driver='H5FD_CORE',
driver_core_backing_store=0)
store['df'] = df
store.append('df2', df)
tm.assert_frame_equal(store['df'], df)
tm.assert_frame_equal(store['df2'], df)
store.close()
# the file should not have actually been written
assert not os.path.exists(path)
def test_flush(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
store.flush()
store.flush(fsync=True)
def test_get(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeSeries()
left = store.get('a')
right = store['a']
tm.assert_series_equal(left, right)
left = store.get('/a')
right = store['/a']
tm.assert_series_equal(left, right)
pytest.raises(KeyError, store.get, 'b')
def test_getattr(self):
with ensure_clean_store(self.path) as store:
s = tm.makeTimeSeries()
store['a'] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, 'a')
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store['df'] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
pytest.raises(AttributeError, getattr, store, 'd')
for x in ['mode', 'path', 'handle', 'complib']:
pytest.raises(AttributeError, getattr, store, x)
# not stores
for x in ['mode', 'path', 'handle', 'complib']:
getattr(store, "_%s" % x)
def test_put(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
store['a'] = ts
store['b'] = df[:10]
store['foo/bar/bah'] = df[:10]
store['foo'] = df[:10]
store['/foo'] = df[:10]
store.put('c', df[:10], format='table')
# not OK, not a table
pytest.raises(
ValueError, store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False
# in this case
# _maybe_remove(store, 'f')
# pytest.raises(ValueError, store.put, 'f', df[10:],
# append=True)
# can't put to a table (use append instead)
pytest.raises(ValueError, store.put, 'c', df[10:], append=True)
# overwrite table
store.put('c', df[:10], format='table', append=False)
tm.assert_frame_equal(df[:10], store['c'])
def test_put_string_index(self):
with ensure_clean_store(self.path) as store:
index = Index(
["I am a very long string index: %s" % i for i in range(20)])
s = Series(np.arange(20), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] +
["I am a very long string index: %s" % i
for i in range(20)])
s = Series(np.arange(21), index=index)
df = DataFrame({'A': s, 'B': s})
store['a'] = s
tm.assert_series_equal(store['a'], s)
store['b'] = df
tm.assert_frame_equal(store['b'], df)
def test_put_compression(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
store.put('c', df, format='table', complib='zlib')
tm.assert_frame_equal(store['c'], df)
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='zlib')
@td.skip_if_windows_python_3
def test_put_compression_blosc(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
# can't compress if format='fixed'
pytest.raises(ValueError, store.put, 'b', df,
format='fixed', complib='blosc')
store.put('c', df, format='table', complib='blosc')
tm.assert_frame_equal(store['c'], df)
def test_complibs_default_settings(self):
# GH15943
df = tm.makeDataFrame()
# Set complevel and check if complib is automatically set to
# default value
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complevel=9)
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'zlib'
# Set complib and check to see if compression is disabled
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df', complib='zlib')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if not setting complib or complevel results in no compression
with ensure_clean_path(self.path) as tmpfile:
df.to_hdf(tmpfile, 'df')
result = pd.read_hdf(tmpfile, 'df')
tm.assert_frame_equal(result, df)
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
# Check if file-defaults can be overridden on a per table basis
with ensure_clean_path(self.path) as tmpfile:
store = pd.HDFStore(tmpfile)
store.append('dfc', df, complevel=9, complib='blosc')
store.append('df', df)
store.close()
with tables.open_file(tmpfile, mode='r') as h5file:
for node in h5file.walk_nodes(where='/df', classname='Leaf'):
assert node.filters.complevel == 0
assert node.filters.complib is None
for node in h5file.walk_nodes(where='/dfc', classname='Leaf'):
assert node.filters.complevel == 9
assert node.filters.complib == 'blosc'
def test_complibs(self):
# GH14478
df = tm.makeDataFrame()
# Building list of all complibs and complevels tuples
all_complibs = tables.filters.all_complibs
# Remove lzo if its not available on this platform
if not tables.which_lib_version('lzo'):
all_complibs.remove('lzo')
# Remove bzip2 if its not available on this platform
if not tables.which_lib_version("bzip2"):
all_complibs.remove("bzip2")
all_levels = range(0, 10)
all_tests = [(lib, lvl) for lib in all_complibs for lvl in all_levels]
for (lib, lvl) in all_tests:
with ensure_clean_path(self.path) as tmpfile:
gname = 'foo'
# Write and read file to see if data is consistent
df.to_hdf(tmpfile, gname, complib=lib, complevel=lvl)
result = pd.read_hdf(tmpfile, gname)
tm.assert_frame_equal(result, df)
# Open file and check metadata
# for correct amount of compression
h5table = tables.open_file(tmpfile, mode='r')
for node in h5table.walk_nodes(where='/' + gname,
classname='Leaf'):
assert node.filters.complevel == lvl
if lvl == 0:
assert node.filters.complib is None
else:
assert node.filters.complib == lib
h5table.close()
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_put_mixed_type(self):
df = tm.makeTimeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
# PerformanceWarning
with catch_warnings(record=True):
store.put('df', df)
expected = store.get('df')
tm.assert_frame_equal(expected, df)
def test_append(self):
with ensure_clean_store(self.path) as store:
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning):
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
_maybe_remove(store, 'df2')
store.put('df2', df[:10], format='table')
store.append('df2', df[10:])
tm.assert_frame_equal(store['df2'], df)
_maybe_remove(store, 'df3')
store.append('/df3', df[:10])
store.append('/df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
# this is allowed by almost always don't want to do it
# tables.NaturalNameWarning
_maybe_remove(store, '/df3 foo')
store.append('/df3 foo', df[:10])
store.append('/df3 foo', df[10:])
tm.assert_frame_equal(store['df3 foo'], df)
# panel
wp = tm.makePanel()
_maybe_remove(store, 'wp1')
store.append('wp1', wp.iloc[:, :10, :])
store.append('wp1', wp.iloc[:, 10:, :])
assert_panel_equal(store['wp1'], wp)
# test using differt order of items on the non-index axes
_maybe_remove(store, 'wp1')
wp_append1 = wp.iloc[:, :10, :]
store.append('wp1', wp_append1)
wp_append2 = wp.iloc[:, 10:, :].reindex(items=wp.items[::-1])
store.append('wp1', wp_append2)
assert_panel_equal(store['wp1'], wp)
# dtype issues - mizxed type in a single object column
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
df['mixed_column'] = 'testing'
df.loc[2, 'mixed_column'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df)
tm.assert_frame_equal(store['df'], df)
# uints - test storage of uints
uint_data = DataFrame({
'u08': Series(np.random.randint(0, high=255, size=5),
dtype=np.uint8),
'u16': Series(np.random.randint(0, high=65535, size=5),
dtype=np.uint16),
'u32': Series(np.random.randint(0, high=2**30, size=5),
dtype=np.uint32),
'u64': Series([2**58, 2**59, 2**60, 2**61, 2**62],
dtype=np.uint64)}, index=np.arange(5))
_maybe_remove(store, 'uints')
store.append('uints', uint_data)
tm.assert_frame_equal(store['uints'], uint_data)
# uints - test storage of uints in indexable columns
_maybe_remove(store, 'uints')
# 64-bit indices not yet supported
store.append('uints', uint_data, data_columns=[
'u08', 'u16', 'u32'])
tm.assert_frame_equal(store['uints'], uint_data)
def test_append_series(self):
with ensure_clean_store(self.path) as store:
# basic
ss = tm.makeStringSeries()
ts = tm.makeTimeSeries()
ns = Series(np.arange(100))
store.append('ss', ss)
result = store['ss']
tm.assert_series_equal(result, ss)
assert result.name is None
store.append('ts', ts)
result = store['ts']
tm.assert_series_equal(result, ts)
assert result.name is None
ns.name = 'foo'
store.append('ns', ns)
result = store['ns']
tm.assert_series_equal(result, ns)
assert result.name == ns.name
# select on the values
expected = ns[ns > 60]
result = store.select('ns', 'foo>60')
tm.assert_series_equal(result, expected)
# select on the index and values
expected = ns[(ns > 70) & (ns.index < 90)]
result = store.select('ns', 'foo>70 and index<90')
tm.assert_series_equal(result, expected)
# multi-index
mi = DataFrame(np.random.randn(5, 1), columns=['A'])
mi['B'] = np.arange(len(mi))
mi['C'] = 'foo'
mi.loc[3:5, 'C'] = 'bar'
mi.set_index(['C', 'B'], inplace=True)
s = mi.stack()
s.index = s.index.droplevel(2)
store.append('mi', s)
tm.assert_series_equal(store['mi'], s)
def test_store_index_types(self):
# GH5386
# test storing various index types
with ensure_clean_store(self.path) as store:
def check(format, index):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df.index = index(len(df))
_maybe_remove(store, 'df')
store.put('df', df, format=format)
assert_frame_equal(df, store['df'])
for index in [tm.makeFloatIndex, tm.makeStringIndex,
tm.makeIntIndex, tm.makeDateIndex]:
check('table', index)
check('fixed', index)
# period index currently broken for table
# seee GH7796 FIXME
check('fixed', tm.makePeriodIndex)
# check('table',tm.makePeriodIndex)
# unicode
index = tm.makeUnicodeIndex
if compat.PY3:
check('table', index)
check('fixed', index)
else:
# only support for fixed types (and they have a perf warning)
pytest.raises(TypeError, check, 'table', index)
# PerformanceWarning
with catch_warnings(record=True):
check('fixed', index)
@pytest.mark.skipif(not is_platform_little_endian(),
reason="reason platform is not little endian")
def test_encoding(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(A='foo', B='bar'), index=range(5))
df.loc[2, 'A'] = np.nan
df.loc[3, 'B'] = np.nan
_maybe_remove(store, 'df')
store.append('df', df, encoding='ascii')
tm.assert_frame_equal(store['df'], df)
expected = df.reindex(columns=['A'])
result = store.select('df', Term('columns=A', encoding='ascii'))
tm.assert_frame_equal(result, expected)
def test_latin_encoding(self):
if compat.PY2:
tm.assert_raises_regex(
TypeError, r'\[unicode\] is not implemented as a table column')
return
values = [[b'E\xc9, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'a', b'b', b'c'],
[b'EE, 17', b'', b'a', b'b', b'c'],
[b'E\xc9, 17', b'\xf8\xfc', b'a', b'b', b'c'],
[b'', b'a', b'b', b'c'],
[b'\xf8\xfc', b'a', b'b', b'c'],
[b'A\xf8\xfc', b'', b'a', b'b', b'c'],
[np.nan, b'', b'b', b'c'],
[b'A\xf8\xfc', np.nan, b'', b'b', b'c']]
def _try_decode(x, encoding='latin-1'):
try:
return x.decode(encoding)
except AttributeError:
return x
# not sure how to remove latin-1 from code in python 2 and 3
values = [[_try_decode(x) for x in y] for y in values]
examples = []
for dtype in ['category', object]:
for val in values:
examples.append(pd.Series(val, dtype=dtype))
def roundtrip(s, key='data', encoding='latin-1', nan_rep=''):
with ensure_clean_path(self.path) as store:
s.to_hdf(store, key, format='table', encoding=encoding,
nan_rep=nan_rep)
retr = read_hdf(store, key)
s_nan = s.replace(nan_rep, np.nan)
if is_categorical_dtype(s_nan):
assert is_categorical_dtype(retr)
assert_series_equal(s_nan, retr, check_dtype=False,
check_categorical=False)
else:
assert_series_equal(s_nan, retr)
for s in examples:
roundtrip(s)
# fails:
# for x in examples:
# roundtrip(s, nan_rep=b'\xf8\xfc')
def test_append_some_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A': Series(np.random.randn(20)).astype('int32'),
'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
# some nans
_maybe_remove(store, 'df1')
df.loc[0:15, ['A1', 'B', 'D', 'E']] = np.nan
store.append('df1', df[:10])
store.append('df1', df[10:])
tm.assert_frame_equal(store['df1'], df)
# first column
df1 = df.copy()
df1.loc[:, 'A1'] = np.nan
_maybe_remove(store, 'df1')
store.append('df1', df1[:10])
store.append('df1', df1[10:])
tm.assert_frame_equal(store['df1'], df1)
# 2nd column
df2 = df.copy()
df2.loc[:, 'A2'] = np.nan
_maybe_remove(store, 'df2')
store.append('df2', df2[:10])
store.append('df2', df2[10:])
tm.assert_frame_equal(store['df2'], df2)
# datetimes
df3 = df.copy()
df3.loc[:, 'E'] = np.nan
_maybe_remove(store, 'df3')
store.append('df3', df3[:10])
store.append('df3', df3[10:])
tm.assert_frame_equal(store['df3'], df3)
def test_append_all_nans(self):
with ensure_clean_store(self.path) as store:
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
# nan some entire rows (dropna=True)
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df[-4:])
# nan some entire rows (dropna=False)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# tests the option io.hdf.dropna_table
pd.set_option('io.hdf.dropna_table', False)
_maybe_remove(store, 'df3')
store.append('df3', df[:10])
store.append('df3', df[10:])
tm.assert_frame_equal(store['df3'], df)
pd.set_option('io.hdf.dropna_table', True)
_maybe_remove(store, 'df4')
store.append('df4', df[:10])
store.append('df4', df[10:])
tm.assert_frame_equal(store['df4'], df[-4:])
# nan some entire rows (string are still written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar'},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# nan some entire rows (but since we have dates they are still
# written!)
df = DataFrame({'A1': np.random.randn(20),
'A2': np.random.randn(20),
'B': 'foo', 'C': 'bar',
'D': Timestamp("20010101"),
'E': datetime.datetime(2001, 1, 2, 0, 0)},
index=np.arange(20))
df.loc[0:15, :] = np.nan
_maybe_remove(store, 'df')
store.append('df', df[:10], dropna=True)
store.append('df', df[10:], dropna=True)
tm.assert_frame_equal(store['df'], df)
_maybe_remove(store, 'df2')
store.append('df2', df[:10], dropna=False)
store.append('df2', df[10:], dropna=False)
tm.assert_frame_equal(store['df2'], df)
# Test to make sure defaults are to not drop.
# Corresponding to Issue 9382
df_with_missing = DataFrame(
{'col1': [0, np.nan, 2], 'col2': [1, np.nan, np.nan]})
with ensure_clean_path(self.path) as path:
df_with_missing.to_hdf(path, 'df_with_missing', format='table')
reloaded = read_hdf(path, 'df_with_missing')
tm.assert_frame_equal(df_with_missing, reloaded)
matrix = [[[np.nan, np.nan, np.nan], [1, np.nan, np.nan]],
[[np.nan, np.nan, np.nan], [np.nan, 5, 6]],
[[np.nan, np.nan, np.nan], [np.nan, 3, np.nan]]]
with catch_warnings(record=True):
panel_with_missing = Panel(matrix,
items=['Item1', 'Item2', 'Item3'],
major_axis=[1, 2],
minor_axis=['A', 'B', 'C'])
with ensure_clean_path(self.path) as path:
panel_with_missing.to_hdf(
path, 'panel_with_missing', format='table')
reloaded_panel = read_hdf(path, 'panel_with_missing')
tm.assert_panel_equal(panel_with_missing, reloaded_panel)
def test_append_frame_column_oriented(self):
with ensure_clean_store(self.path) as store:
# column oriented
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df1')
store.append('df1', df.iloc[:, :2], axes=['columns'])
store.append('df1', df.iloc[:, 2:])
tm.assert_frame_equal(store['df1'], df)
result = store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# selection on the non-indexable
result = store.select(
'df1', ('columns=A', 'index=df.index[0:4]'))
expected = df.reindex(columns=['A'], index=df.index[0:4])
tm.assert_frame_equal(expected, result)
# this isn't supported
with pytest.raises(TypeError):
store.select('df1',
'columns=A and index>df.index[4]')
def test_append_with_different_block_ordering(self):
# GH 4096; using same frames, but different block orderings
with ensure_clean_store(self.path) as store:
for i in range(10):
df = DataFrame(np.random.randn(10, 2), columns=list('AB'))
df['index'] = range(10)
df['index'] += i * 10
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
if i % 2 == 0:
del df['int64']
df['int64'] = Series([1] * len(df), dtype='int64')
if i % 3 == 0:
a = df.pop('A')
df['A'] = a
df.set_index('index', inplace=True)
store.append('df', df)
# test a different ordering but with more fields (like invalid
# combinate)
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(10, 2),
columns=list('AB'), dtype='float64')
df['int64'] = Series([1] * len(df), dtype='int64')
df['int16'] = Series([1] * len(df), dtype='int16')
store.append('df', df)
# store additional fields in different blocks
df['int16_2'] = Series([1] * len(df), dtype='int16')
pytest.raises(ValueError, store.append, 'df', df)
# store multile additional fields in different blocks
df['float_3'] = Series([1.] * len(df), dtype='float64')
pytest.raises(ValueError, store.append, 'df', df)
def test_append_with_strings(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
wp2 = wp.rename_axis(
{x: "%s_extra" % x for x in wp.minor_axis}, axis=2)
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
store.append('s1', wp, min_itemsize=20)
store.append('s1', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s1'], expected)
check_col('s1', 'minor_axis', 20)
# test dict format
store.append('s2', wp, min_itemsize={'minor_axis': 20})
store.append('s2', wp2)
expected = concat([wp, wp2], axis=2)
expected = expected.reindex(
minor_axis=sorted(expected.minor_axis))
assert_panel_equal(store['s2'], expected)
check_col('s2', 'minor_axis', 20)
# apply the wrong field (similar to #1)
store.append('s3', wp, min_itemsize={'major_axis': 20})
pytest.raises(ValueError, store.append, 's3', wp2)
# test truncation of bigger strings
store.append('s4', wp)
pytest.raises(ValueError, store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big', df)
tm.assert_frame_equal(store.select('df_big'), df)
check_col('df_big', 'values_block_1', 15)
# appending smaller string ok
df2 = DataFrame([[124, 'asdqy'], [346, 'dggnhefbdfb']])
store.append('df_big', df2)
expected = concat([df, df2])
tm.assert_frame_equal(store.select('df_big'), expected)
check_col('df_big', 'values_block_1', 15)
# avoid truncation on elements
df = DataFrame([[123, 'asdqwerty'], [345, 'dggnhebbsdfbdfb']])
store.append('df_big2', df, min_itemsize={'values': 50})
tm.assert_frame_equal(store.select('df_big2'), df)
check_col('df_big2', 'values_block_1', 50)
# bigger string on next append
store.append('df_new', df)
df_new = DataFrame(
[[124, 'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
pytest.raises(ValueError, store.append, 'df_new', df_new)
# min_itemsize on Series index (GH 11412)
df = tm.makeMixedDataFrame().set_index('C')
store.append('ss', df['B'], min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss'), df['B'])
# same as above, with data_columns=True
store.append('ss2', df['B'], data_columns=True,
min_itemsize={'index': 4})
tm.assert_series_equal(store.select('ss2'), df['B'])
# min_itemsize in index without appending (GH 10381)
store.put('ss3', df, format='table',
min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
store.append('ss3', df2)
tm.assert_frame_equal(store.select('ss3'),
pd.concat([df, df2]))
# same as above, with a Series
store.put('ss4', df['B'], format='table',
min_itemsize={'index': 6})
store.append('ss4', df2['B'])
tm.assert_series_equal(store.select('ss4'),
pd.concat([df['B'], df2['B']]))
# with nans
_maybe_remove(store, 'df')
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[1:4, 'string'] = np.nan
df['string2'] = 'bar'
df.loc[4:8, 'string2'] = np.nan
df['string3'] = 'bah'
df.loc[1:, 'string3'] = np.nan
store.append('df', df)
result = store.select('df')
tm.assert_frame_equal(result, df)
with ensure_clean_store(self.path) as store:
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize, size
df = DataFrame(dict(A='foo', B='bar'), index=range(10))
# a min_itemsize that creates a data_column
_maybe_remove(store, 'df')
store.append('df', df, min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['B'], min_itemsize={'A': 200})
check_col('df', 'A', 200)
assert store.get_storer('df').data_columns == ['B', 'A']
# a min_itemsize that creates a data_column2
_maybe_remove(store, 'df')
store.append('df', df, data_columns=[
'B'], min_itemsize={'values': 200})
check_col('df', 'B', 200)
check_col('df', 'values_block_0', 200)
assert store.get_storer('df').data_columns == ['B']
# infer the .typ on subsequent appends
_maybe_remove(store, 'df')
store.append('df', df[:5], min_itemsize=200)
store.append('df', df[5:], min_itemsize=200)
tm.assert_frame_equal(store['df'], df)
# invalid min_itemsize keys
df = DataFrame(['foo', 'foo', 'foo', 'barh',
'barh', 'barh'], columns=['A'])
_maybe_remove(store, 'df')
pytest.raises(ValueError, store.append, 'df',
df, min_itemsize={'foo': 20, 'foobar': 20})
def test_to_hdf_with_min_itemsize(self):
with ensure_clean_path(self.path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index('C')
df.to_hdf(path, 'ss3', format='table', min_itemsize={'index': 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C='longer').set_index('C')
df2.to_hdf(path, 'ss3', append=True, format='table')
tm.assert_frame_equal(pd.read_hdf(path, 'ss3'),
pd.concat([df, df2]))
# same as above, with a Series
df['B'].to_hdf(path, 'ss4', format='table',
min_itemsize={'index': 6})
df2['B'].to_hdf(path, 'ss4', append=True, format='table')
tm.assert_series_equal(pd.read_hdf(path, 'ss4'),
pd.concat([df['B'], df2['B']]))
def test_append_with_data_columns(self):
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
df.iloc[0, df.columns.get_loc('B')] = 1.
_maybe_remove(store, 'df')
store.append('df', df[:2], data_columns=['B'])
store.append('df', df[2:])
tm.assert_frame_equal(store['df'], df)
# check that we have indicies created
assert(store._handle.root.df.table.cols.index.is_indexed is True)
assert(store._handle.root.df.table.cols.B.is_indexed is True)
# data column searching
result = store.select('df', 'B>0')
expected = df[df.B > 0]
tm.assert_frame_equal(result, expected)
# data column searching (with an indexable and a data_columns)
result = store.select(
'df', 'B>0 and index>df.index[3]')
df_new = df.reindex(index=df.index[4:])
expected = df_new[df_new.B > 0]
tm.assert_frame_equal(result, expected)
# data column selection with a string data_column
df_new = df.copy()
df_new['string'] = 'foo'
df_new.loc[1:4, 'string'] = np.nan
df_new.loc[5:6, 'string'] = 'bar'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'])
result = store.select('df', "string='foo'")
expected = df_new[df_new.string == 'foo']
tm.assert_frame_equal(result, expected)
# using min_itemsize and a data column
def check_col(key, name, size):
assert getattr(store.get_storer(key)
.table.description, name).itemsize == size
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'string': 30})
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['string'], min_itemsize=30)
check_col('df', 'string', 30)
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string'],
min_itemsize={'values': 30})
check_col('df', 'string', 30)
with ensure_clean_store(self.path) as store:
df_new['string2'] = 'foobarbah'
df_new['string_block1'] = 'foobarbah1'
df_new['string_block2'] = 'foobarbah2'
_maybe_remove(store, 'df')
store.append('df', df_new, data_columns=['string', 'string2'],
min_itemsize={'string': 30, 'string2': 40,
'values': 50})
check_col('df', 'string', 30)
check_col('df', 'string2', 40)
check_col('df', 'values_block_1', 50)
with ensure_clean_store(self.path) as store:
# multiple data columns
df_new = df.copy()
df_new.iloc[0, df_new.columns.get_loc('A')] = 1.
df_new.iloc[0, df_new.columns.get_loc('B')] = -1.
df_new['string'] = 'foo'
sl = df_new.columns.get_loc('string')
df_new.iloc[1:4, sl] = np.nan
df_new.iloc[5:6, sl] = 'bar'
df_new['string2'] = 'foo'
sl = df_new.columns.get_loc('string2')
df_new.iloc[2:5, sl] = np.nan
df_new.iloc[7:8, sl] = 'bar'
_maybe_remove(store, 'df')
store.append(
'df', df_new, data_columns=['A', 'B', 'string', 'string2'])
result = store.select('df',
"string='foo' and string2='foo'"
" and A>0 and B<0")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'foo') & (df_new.A > 0) & (df_new.B < 0)]
tm.assert_frame_equal(result, expected, check_index_type=False)
# yield an empty frame
result = store.select('df', "string='foo' and string2='cool'")
expected = df_new[(df_new.string == 'foo') & (
df_new.string2 == 'cool')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example
df_dc = df.copy()
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc['string2'] = 'cool'
df_dc['datetime'] = Timestamp('20010102')
df_dc = df_dc._convert(datetime=True)
df_dc.loc[3:5, ['A', 'B', 'datetime']] = np.nan
_maybe_remove(store, 'df_dc')
store.append('df_dc', df_dc,
data_columns=['B', 'C', 'string',
'string2', 'datetime'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected, check_index_type=False)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == foo'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (
df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected, check_index_type=False)
with ensure_clean_store(self.path) as store:
# doc example part 2
np.random.seed(1234)
index = date_range('1/1/2000', periods=8)
df_dc = DataFrame(np.random.randn(8, 3), index=index,
columns=['A', 'B', 'C'])
df_dc['string'] = 'foo'
df_dc.loc[4:6, 'string'] = np.nan
df_dc.loc[7:9, 'string'] = 'bar'
df_dc.loc[:, ['B', 'C']] = df_dc.loc[:, ['B', 'C']].abs()
df_dc['string2'] = 'cool'
# on-disk operations
store.append('df_dc', df_dc, data_columns=[
'B', 'C', 'string', 'string2'])
result = store.select('df_dc', 'B>0')
expected = df_dc[df_dc.B > 0]
tm.assert_frame_equal(result, expected)
result = store.select(
'df_dc', ['B > 0', 'C > 0', 'string == "foo"'])
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) &
(df_dc.string == 'foo')]
tm.assert_frame_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# panel
# GH5717 not handling data_columns
np.random.seed(1234)
p = tm.makePanel()
store.append('p1', p)
tm.assert_panel_equal(store.select('p1'), p)
store.append('p2', p, data_columns=True)
tm.assert_panel_equal(store.select('p2'), p)
result = store.select('p2', where='ItemA>0')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
tm.assert_frame_equal(result.to_frame(), expected)
result = store.select(
'p2', where='ItemA>0 & minor_axis=["A","B"]')
expected = p.to_frame()
expected = expected[expected['ItemA'] > 0]
expected = expected[expected.reset_index(
level=['major']).index.isin(['A', 'B'])]
tm.assert_frame_equal(result.to_frame(), expected)
def test_create_table_index(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# index=False
wp = tm.makePanel()
store.append('p5', wp, index=False)
store.create_table_index('p5', columns=['major_axis'])
assert(col('p5', 'major_axis').is_indexed is True)
assert(col('p5', 'minor_axis').is_indexed is False)
# index=True
store.append('p5i', wp, index=True)
assert(col('p5i', 'major_axis').is_indexed is True)
assert(col('p5i', 'minor_axis').is_indexed is True)
# default optlevels
store.get_storer('p5').create_index()
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
# let's change the indexing scheme
store.create_table_index('p5')
assert(col('p5', 'major_axis').index.optlevel == 6)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', optlevel=9)
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'medium')
store.create_table_index('p5', kind='full')
assert(col('p5', 'major_axis').index.optlevel == 9)
assert(col('p5', 'minor_axis').index.kind == 'full')
store.create_table_index('p5', optlevel=1, kind='light')
assert(col('p5', 'major_axis').index.optlevel == 1)
assert(col('p5', 'minor_axis').index.kind == 'light')
# data columns
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df['string2'] = 'bar'
store.append('f', df, data_columns=['string', 'string2'])
assert(col('f', 'index').is_indexed is True)
assert(col('f', 'string').is_indexed is True)
assert(col('f', 'string2').is_indexed is True)
# specify index=columns
store.append(
'f2', df, index=['string'],
data_columns=['string', 'string2'])
assert(col('f2', 'index').is_indexed is False)
assert(col('f2', 'string').is_indexed is True)
assert(col('f2', 'string2').is_indexed is False)
# try to index a non-table
_maybe_remove(store, 'f2')
store.put('f2', df)
pytest.raises(TypeError, store.create_table_index, 'f2')
def test_append_diff_item_order(self):
with catch_warnings(record=True):
wp = tm.makePanel()
wp1 = wp.iloc[:, :10, :]
wp2 = wp.iloc[wp.items.get_indexer(['ItemC', 'ItemB', 'ItemA']),
10:, :]
with ensure_clean_store(self.path) as store:
store.put('panel', wp1, format='table')
pytest.raises(ValueError, store.put, 'panel', wp2,
append=True)
def test_append_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.append('mi', df)
result = store.select('mi')
tm.assert_frame_equal(result, df)
# GH 3748
result = store.select('mi', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
with ensure_clean_path('test.hdf') as path:
df.to_hdf(path, 'df', format='table')
result = read_hdf(path, 'df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(result, expected)
def test_column_multiindex(self):
# GH 4710
# recreate multi-indexes properly
index = MultiIndex.from_tuples([('A', 'a'), ('A', 'b'),
('B', 'a'), ('B', 'b')],
names=['first', 'second'])
df = DataFrame(np.arange(12).reshape(3, 4), columns=index)
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df', df)
tm.assert_frame_equal(store['df'], expected,
check_index_type=True,
check_column_type=True)
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
pytest.raises(ValueError, store.put, 'df2', df,
format='table', data_columns=['A'])
pytest.raises(ValueError, store.put, 'df3', df,
format='table', data_columns=True)
# appending multi-column on existing table (see GH 6167)
with ensure_clean_store(self.path) as store:
store.append('df2', df)
store.append('df2', df)
tm.assert_frame_equal(store['df2'], concat((df, df)))
# non_index_axes name
df = DataFrame(np.arange(12).reshape(3, 4),
columns=Index(list('ABCD'), name='foo'))
expected = df.copy()
if isinstance(expected.index, RangeIndex):
expected.index = Int64Index(expected.index)
with ensure_clean_store(self.path) as store:
store.put('df1', df, format='table')
tm.assert_frame_equal(store['df1'], expected,
check_index_type=True,
check_column_type=True)
def test_store_multiindex(self):
# validate multi-index names
# GH 5527
with ensure_clean_store(self.path) as store:
def make_index(names=None):
return MultiIndex.from_tuples([(datetime.datetime(2013, 12, d),
s, t)
for d in range(1, 3)
for s in range(2)
for t in range(3)],
names=names)
# no names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index())
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# partial names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', None, None]))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
# series
_maybe_remove(store, 's')
s = Series(np.zeros(12), index=make_index(['date', None, None]))
store.append('s', s)
xp = Series(np.zeros(12), index=make_index(
['date', 'level_1', 'level_2']))
tm.assert_series_equal(store.select('s'), xp)
# dup with column
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 'a', 't']))
pytest.raises(ValueError, store.append, 'df', df)
# fully names
_maybe_remove(store, 'df')
df = DataFrame(np.zeros((12, 2)), columns=[
'a', 'b'], index=make_index(['date', 's', 't']))
store.append('df', df)
tm.assert_frame_equal(store.select('df'), df)
def test_select_columns_in_where(self):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo_name', 'bar_name'])
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
expected = df[['A']]
tm.assert_frame_equal(store.select('df', columns=['A']), expected)
tm.assert_frame_equal(store.select(
'df', where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index,
name='A')
with ensure_clean_store(self.path) as store:
store.put('s', s, format='table')
tm.assert_series_equal(store.select('s', where="columns=['A']"), s)
def test_mi_data_columns(self):
# GH 14435
idx = pd.MultiIndex.from_arrays([date_range('2000-01-01', periods=5),
range(5)], names=['date', 'id'])
df = pd.DataFrame({'a': [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=True)
actual = store.select('df', where='id == 1')
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_pass_spec_to_storer(self):
df = tm.makeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df)
pytest.raises(TypeError, store.select, 'df', columns=['A'])
pytest.raises(TypeError, store.select,
'df', where=[('columns=A')])
def test_append_misc(self):
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
store.append('df', df, chunksize=1)
result = store.select('df')
tm.assert_frame_equal(result, df)
store.append('df1', df, expectedrows=10)
result = store.select('df1')
tm.assert_frame_equal(result, df)
# more chunksize in append tests
def check(obj, comparator):
for c in [10, 200, 1000]:
with ensure_clean_store(self.path, mode='w') as store:
store.append('obj', obj, chunksize=c)
result = store.select('obj')
comparator(result, obj)
df = tm.makeDataFrame()
df['string'] = 'foo'
df['float322'] = 1.
df['float322'] = df['float322'].astype('float32')
df['bool'] = df['float322'] > 0
df['time1'] = Timestamp('20130101')
df['time2'] = Timestamp('20130102')
check(df, tm.assert_frame_equal)
with catch_warnings(record=True):
p = tm.makePanel()
check(p, assert_panel_equal)
# empty frame, GH4273
with ensure_clean_store(self.path) as store:
# 0 len
df_empty = DataFrame(columns=list('ABC'))
store.append('df', df_empty)
pytest.raises(KeyError, store.select, 'df')
# repeated append of 0/non-zero frames
df = DataFrame(np.random.rand(10, 3), columns=list('ABC'))
store.append('df', df)
assert_frame_equal(store.select('df'), df)
store.append('df', df_empty)
assert_frame_equal(store.select('df'), df)
# store
df = DataFrame(columns=list('ABC'))
store.put('df2', df)
assert_frame_equal(store.select('df2'), df)
with catch_warnings(record=True):
# 0 len
p_empty = Panel(items=list('ABC'))
store.append('p', p_empty)
pytest.raises(KeyError, store.select, 'p')
# repeated append of 0/non-zero frames
p = Panel(np.random.randn(3, 4, 5), items=list('ABC'))
store.append('p', p)
assert_panel_equal(store.select('p'), p)
store.append('p', p_empty)
assert_panel_equal(store.select('p'), p)
# store
store.put('p2', p_empty)
assert_panel_equal(store.select('p2'), p_empty)
def test_append_raise(self):
with ensure_clean_store(self.path) as store:
# test append with invalid input to get good error messages
# list in column
df = tm.makeDataFrame()
df['invalid'] = [['a']] * len(df)
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# multiple invalid columns
df['invalid2'] = [['a']] * len(df)
df['invalid3'] = [['a']] * len(df)
pytest.raises(TypeError, store.append, 'df', df)
# datetime with embedded nans as object
df = tm.makeDataFrame()
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
s = s.astype(object)
s[0:5] = np.nan
df['invalid'] = s
assert df.dtypes['invalid'] == np.object_
pytest.raises(TypeError, store.append, 'df', df)
# directly ndarray
pytest.raises(TypeError, store.append, 'df', np.arange(10))
# series directly
pytest.raises(TypeError, store.append,
'df', Series(np.arange(10)))
# appending an incompatible table
df = tm.makeDataFrame()
store.append('df', df)
df['foo'] = 'foo'
pytest.raises(ValueError, store.append, 'df', df)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
with ensure_clean_store(self.path) as store:
store.put('frame', df1, format='table')
pytest.raises(TypeError, store.put, 'frame', df2,
format='table', append=True)
def test_table_values_dtypes_roundtrip(self):
with ensure_clean_store(self.path) as store:
df1 = DataFrame({'a': [1, 2, 3]}, dtype='f8')
store.append('df_f8', df1)
assert_series_equal(df1.dtypes, store['df_f8'].dtypes)
df2 = DataFrame({'a': [1, 2, 3]}, dtype='i8')
store.append('df_i8', df2)
assert_series_equal(df2.dtypes, store['df_i8'].dtypes)
# incompatible dtype
pytest.raises(ValueError, store.append, 'df_i8', df1)
# check creation/storage/retrieval of float32 (a bit hacky to
# actually create them thought)
df1 = DataFrame(
np.array([[1], [2], [3]], dtype='f4'), columns=['A'])
store.append('df_f4', df1)
assert_series_equal(df1.dtypes, store['df_f4'].dtypes)
assert df1.dtypes[0] == 'float32'
# check with mixed dtypes
df1 = DataFrame(dict((c, Series(np.random.randn(5), dtype=c))
for c in ['float32', 'float64', 'int32',
'int64', 'int16', 'int8']))
df1['string'] = 'foo'
df1['float322'] = 1.
df1['float322'] = df1['float322'].astype('float32')
df1['bool'] = df1['float32'] > 0
df1['time1'] = Timestamp('20130101')
df1['time2'] = Timestamp('20130102')
store.append('df_mixed_dtypes1', df1)
result = store.select('df_mixed_dtypes1').get_dtype_counts()
expected = Series({'float32': 2, 'float64': 1, 'int32': 1,
'bool': 1, 'int16': 1, 'int8': 1,
'int64': 1, 'object': 1, 'datetime64[ns]': 2})
result = result.sort_index()
result = expected.sort_index()
tm.assert_series_equal(result, expected)
def test_table_mixed_dtypes(self):
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
df['timestamp1'] = Timestamp('20010102')
df['timestamp2'] = Timestamp('20010103')
df['datetime1'] = datetime.datetime(2001, 1, 2, 0, 0)
df['datetime2'] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ['obj1']] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
store.append('df1_mixed', df)
tm.assert_frame_equal(store.select('df1_mixed'), df)
with catch_warnings(record=True):
# panel
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
wp = wp._consolidate()
with catch_warnings(record=True):
with ensure_clean_store(self.path) as store:
store.append('p1_mixed', wp)
assert_panel_equal(store.select('p1_mixed'), wp)
def test_unimplemented_dtypes_table_columns(self):
with ensure_clean_store(self.path) as store:
l = [('date', datetime.date(2001, 1, 2))]
# py3 ok for unicode
if not compat.PY3:
l.append(('unicode', u('\\u03c3')))
# currently not supported dtypes ####
for n, f in l:
df = tm.makeDataFrame()
df[n] = f
pytest.raises(
TypeError, store.append, 'df1_%s' % n, df)
# frame
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['datetime1'] = datetime.date(2001, 1, 2)
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(self.path) as store:
# this fails because we have a date in the object block......
pytest.raises(TypeError, store.append, 'df_unimplemented', df)
def test_calendar_roundtrip_issue(self):
# 8591
# doc example from tseries holiday section
weekmask_egypt = 'Sun Mon Tue Wed Thu'
holidays = ['2012-05-01',
datetime.datetime(2013, 5, 1), np.datetime64('2014-05-01')]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = (Series(dts.weekday, dts).map(
Series('Mon Tue Wed Thu Fri Sat Sun'.split())))
with ensure_clean_store(self.path) as store:
store.put('fixed', s)
result = store.select('fixed')
assert_series_equal(result, s)
store.append('table', s)
result = store.select('table')
assert_series_equal(result, s)
def test_roundtrip_tz_aware_index(self):
# GH 17618
time = pd.Timestamp('2000-01-01 01:00:00', tz='US/Eastern')
df = pd.DataFrame(data=[0], index=[time])
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='fixed')
recons = store['frame']
tm.assert_frame_equal(recons, df)
assert recons.index[0].value == 946706400000000000
def test_append_with_timedelta(self):
# GH 3577
# append timedelta
df = DataFrame(dict(A=Timestamp('20130101'), B=[Timestamp(
'20130101') + timedelta(days=i, seconds=10) for i in range(10)]))
df['C'] = df['A'] - df['B']
df.loc[3:5, 'C'] = np.nan
with ensure_clean_store(self.path) as store:
# table
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df')
assert_frame_equal(result, df)
result = store.select('df', where="C<100000")
assert_frame_equal(result, df)
result = store.select('df', where="C<pd.Timedelta('-3D')")
assert_frame_equal(result, df.iloc[3:])
result = store.select('df', "C<'-3D'")
assert_frame_equal(result, df.iloc[3:])
# a bit hacky here as we don't really deal with the NaT properly
result = store.select('df', "C<'-500000s'")
result = result.dropna(subset=['C'])
assert_frame_equal(result, df.iloc[6:])
result = store.select('df', "C<'-3.5D'")
result = result.iloc[1:]
assert_frame_equal(result, df.iloc[4:])
# fixed
_maybe_remove(store, 'df2')
store.put('df2', df)
result = store.select('df2')
assert_frame_equal(result, df)
def test_remove(self):
with ensure_clean_store(self.path) as store:
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
store['a'] = ts
store['b'] = df
_maybe_remove(store, 'a')
assert len(store) == 1
tm.assert_frame_equal(df, store['b'])
_maybe_remove(store, 'b')
assert len(store) == 0
# nonexistence
pytest.raises(KeyError, store.remove, 'a_nonexistent_store')
# pathing
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'foo')
_maybe_remove(store, 'b/foo')
assert len(store) == 1
store['a'] = ts
store['b/foo'] = df
_maybe_remove(store, 'b')
assert len(store) == 1
# __delitem__
store['a'] = ts
store['b'] = df
del store['a']
del store['b']
assert len(store) == 0
def test_remove_where(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
# non-existance
crit1 = 'index>foo'
pytest.raises(KeyError, store.remove, 'a', [crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel(30)
store.put('wp', wp, format='table')
store.remove('wp', ["minor_axis=['A', 'D']"])
rs = store.select('wp')
expected = wp.reindex(minor_axis=['B', 'C'])
assert_panel_equal(rs, expected)
# empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
# deleted number (entire table)
n = store.remove('wp', [])
assert n == 120
# non - empty where
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
pytest.raises(ValueError, store.remove,
'wp', ['foo'])
def test_remove_startstop(self):
# GH #4835 and #6177
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# start
_maybe_remove(store, 'wp1')
store.put('wp1', wp, format='t')
n = store.remove('wp1', start=32)
assert n == 120 - 32
result = store.select('wp1')
expected = wp.reindex(major_axis=wp.major_axis[:32 // 4])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='t')
n = store.remove('wp2', start=-32)
assert n == 32
result = store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis[:-32 // 4])
assert_panel_equal(result, expected)
# stop
_maybe_remove(store, 'wp3')
store.put('wp3', wp, format='t')
n = store.remove('wp3', stop=32)
assert n == 32
result = store.select('wp3')
expected = wp.reindex(major_axis=wp.major_axis[32 // 4:])
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='t')
n = store.remove('wp4', stop=-32)
assert n == 120 - 32
result = store.select('wp4')
expected = wp.reindex(major_axis=wp.major_axis[-32 // 4:])
assert_panel_equal(result, expected)
# start n stop
_maybe_remove(store, 'wp5')
store.put('wp5', wp, format='t')
n = store.remove('wp5', start=16, stop=-16)
assert n == 120 - 32
result = store.select('wp5')
expected = wp.reindex(
major_axis=(wp.major_axis[:16 // 4]
.union(wp.major_axis[-16 // 4:])))
assert_panel_equal(result, expected)
_maybe_remove(store, 'wp6')
store.put('wp6', wp, format='t')
n = store.remove('wp6', start=16, stop=16)
assert n == 0
result = store.select('wp6')
expected = wp.reindex(major_axis=wp.major_axis)
assert_panel_equal(result, expected)
# with where
_maybe_remove(store, 'wp7')
# TODO: unused?
date = wp.major_axis.take(np.arange(0, 30, 3)) # noqa
crit = 'major_axis=date'
store.put('wp7', wp, format='t')
n = store.remove('wp7', where=[crit], stop=80)
assert n == 28
result = store.select('wp7')
expected = wp.reindex(major_axis=wp.major_axis.difference(
wp.major_axis[np.arange(0, 20, 3)]))
assert_panel_equal(result, expected)
def test_remove_crit(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel(30)
# group row removal
_maybe_remove(store, 'wp3')
date4 = wp.major_axis.take([0, 1, 2, 4, 5, 6, 8, 9, 10])
crit4 = 'major_axis=date4'
store.put('wp3', wp, format='t')
n = store.remove('wp3', where=[crit4])
assert n == 36
result = store.select('wp3')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date4))
assert_panel_equal(result, expected)
# upper half
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = 'major_axis>date'
crit2 = "minor_axis=['A', 'D']"
n = store.remove('wp', where=[crit1])
assert n == 56
n = store.remove('wp', where=[crit2])
assert n == 32
result = store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
assert_panel_equal(result, expected)
# individual row elements
_maybe_remove(store, 'wp2')
store.put('wp2', wp, format='table')
date1 = wp.major_axis[1:3]
crit1 = 'major_axis=date1'
store.remove('wp2', where=[crit1])
result = store.select('wp2')
expected = wp.reindex(
major_axis=wp.major_axis.difference(date1))
assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = 'major_axis=date2'
store.remove('wp2', where=[crit2])
result = store['wp2']
expected = wp.reindex(
major_axis=(wp.major_axis
.difference(date1)
.difference(Index([date2]))
))
assert_panel_equal(result, expected)
date3 = [wp.major_axis[7], wp.major_axis[9]]
crit3 = 'major_axis=date3'
store.remove('wp2', where=[crit3])
result = store['wp2']
expected = wp.reindex(major_axis=wp.major_axis
.difference(date1)
.difference(Index([date2]))
.difference(Index(date3)))
assert_panel_equal(result, expected)
# corners
_maybe_remove(store, 'wp4')
store.put('wp4', wp, format='table')
n = store.remove(
'wp4', where="major_axis>wp.major_axis[-1]")
result = store.select('wp4')
assert_panel_equal(result, wp)
def test_invalid_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[0:4, 'string'] = 'bar'
wp = tm.makePanel()
store.put('df', df, format='table')
store.put('wp', wp, format='table')
# some invalid terms
pytest.raises(ValueError, store.select,
'wp', "minor=['A', 'B']")
pytest.raises(ValueError, store.select,
'wp', ["index=['20121114']"])
pytest.raises(ValueError, store.select, 'wp', [
"index=['20121114', '20121114']"])
pytest.raises(TypeError, Term)
# more invalid
pytest.raises(
ValueError, store.select, 'df', 'df.index[3]')
pytest.raises(SyntaxError, store.select, 'df', 'index>')
pytest.raises(
ValueError, store.select, 'wp',
"major_axis<'20000108' & minor_axis['A', 'B']")
# from the docs
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table', data_columns=True)
# check ok
read_hdf(path, 'dfq',
where="index>Timestamp('20130104') & columns=['A', 'B']")
read_hdf(path, 'dfq', where="A>0 or C>0")
# catch the invalid reference
with ensure_clean_path(self.path) as path:
dfq = DataFrame(np.random.randn(10, 4), columns=list(
'ABCD'), index=date_range('20130101', periods=10))
dfq.to_hdf(path, 'dfq', format='table')
pytest.raises(ValueError, read_hdf, path,
'dfq', where="A>0 or C>0")
def test_terms(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
wpneg = Panel.fromDict({-1: tm.makeDataFrame(),
0: tm.makeDataFrame(),
1: tm.makeDataFrame()})
store.put('wp', wp, format='table')
store.put('wpneg', wpneg, format='table')
# panel
result = store.select(
'wp',
"major_axis<'20000108' and minor_axis=['A', 'B']")
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
# with deprecation
result = store.select(
'wp', where=("major_axis<'20000108' "
"and minor_axis=['A', 'B']"))
expected = wp.truncate(
after='20000108').reindex(minor=['A', 'B'])
tm.assert_panel_equal(result, expected)
with catch_warnings(record=True):
# valid terms
terms = [('major_axis=20121114'),
('major_axis>20121114'),
(("major_axis=['20121114', '20121114']"),),
('major_axis=datetime.datetime(2012, 11, 14)'),
'major_axis> 20121114',
'major_axis >20121114',
'major_axis > 20121114',
(("minor_axis=['A', 'B']"),),
(("minor_axis=['A', 'B']"),),
((("minor_axis==['A', 'B']"),),),
(("items=['ItemA', 'ItemB']"),),
('items=ItemA'),
]
for t in terms:
store.select('wp', t)
with tm.assert_raises_regex(
TypeError, 'Only named functions are supported'):
store.select(
'wp',
'major_axis == (lambda x: x)("20130101")')
with catch_warnings(record=True):
# check USub node parsing
res = store.select('wpneg', 'items == -1')
expected = Panel({-1: wpneg[-1]})
tm.assert_panel_equal(res, expected)
with tm.assert_raises_regex(NotImplementedError,
'Unary addition '
'not supported'):
store.select('wpneg', 'items == +1')
def test_term_compat(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4), items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
result = store.select(
'wp', where=("major_axis>20000102 "
"and minor_axis=['A', 'B']"))
expected = wp.loc[:, wp.major_axis >
Timestamp('20000102'), ['A', 'B']]
assert_panel_equal(result, expected)
store.remove('wp', 'major_axis>20000103')
result = store.select('wp')
expected = wp.loc[:, wp.major_axis <= Timestamp('20000103'), :]
assert_panel_equal(result, expected)
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = Panel(np.random.randn(2, 5, 4),
items=['Item1', 'Item2'],
major_axis=date_range('1/1/2000', periods=5),
minor_axis=['A', 'B', 'C', 'D'])
store.append('wp', wp)
# stringified datetimes
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp', 'major_axis>datetime.datetime(2000, 1, 2)')
expected = wp.loc[:, wp.major_axis > Timestamp('20000102')]
assert_panel_equal(result, expected)
result = store.select(
'wp',
"major_axis=[datetime.datetime(2000, 1, 2, 0, 0), "
"datetime.datetime(2000, 1, 3, 0, 0)]")
expected = wp.loc[:, [Timestamp('20000102'),
Timestamp('20000103')]]
assert_panel_equal(result, expected)
result = store.select(
'wp', "minor_axis=['A', 'B']")
expected = wp.loc[:, :, ['A', 'B']]
assert_panel_equal(result, expected)
def test_same_name_scoping(self):
with ensure_clean_store(self.path) as store:
import pandas as pd
df = DataFrame(np.random.randn(20, 2),
index=pd.date_range('20130101', periods=20))
store.put('df', df, format='table')
expected = df[df.index > pd.Timestamp('20130105')]
import datetime # noqa
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
from datetime import datetime # noqa
# technically an error, but allow it
result = store.select('df', 'index>datetime.datetime(2013,1,5)')
assert_frame_equal(result, expected)
result = store.select('df', 'index>datetime(2013,1,5)')
assert_frame_equal(result, expected)
def test_series(self):
s = tm.makeStringSeries()
self._check_roundtrip(s, tm.assert_series_equal)
ts = tm.makeTimeSeries()
self._check_roundtrip(ts, tm.assert_series_equal)
ts2 = Series(ts.index, Index(ts.index, dtype=object))
self._check_roundtrip(ts2, tm.assert_series_equal)
ts3 = Series(ts.values, Index(np.asarray(ts.index, dtype=object),
dtype=object))
self._check_roundtrip(ts3, tm.assert_series_equal,
check_index_type=False)
def test_sparse_series(self):
s = tm.makeStringSeries()
s.iloc[3:5] = np.nan
ss = s.to_sparse()
self._check_roundtrip(ss, tm.assert_series_equal,
check_series_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_roundtrip(ss2, tm.assert_series_equal,
check_series_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_roundtrip(ss3, tm.assert_series_equal,
check_series_type=True)
def test_sparse_frame(self):
s = tm.makeDataFrame()
s.iloc[3:5, 1:3] = np.nan
s.iloc[8:10, -2] = np.nan
ss = s.to_sparse()
self._check_double_roundtrip(ss, tm.assert_frame_equal,
check_frame_type=True)
ss2 = s.to_sparse(kind='integer')
self._check_double_roundtrip(ss2, tm.assert_frame_equal,
check_frame_type=True)
ss3 = s.to_sparse(fill_value=0)
self._check_double_roundtrip(ss3, tm.assert_frame_equal,
check_frame_type=True)
def test_float_index(self):
# GH #454
index = np.random.randn(10)
s = Series(np.random.randn(10), index=index)
self._check_roundtrip(s, tm.assert_series_equal)
def test_tuple_index(self):
# GH #492
col = np.arange(10)
idx = [(0., 1.), (2., 3.), (4., 5.)]
data = np.random.randn(30).reshape((3, 10))
DF = DataFrame(data, index=idx, columns=col)
with catch_warnings(record=True):
self._check_roundtrip(DF, tm.assert_frame_equal)
def test_index_types(self):
with catch_warnings(record=True):
values = np.random.randn(2)
func = lambda l, r: tm.assert_series_equal(l, r,
check_dtype=True,
check_index_type=True,
check_series_type=True)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
with catch_warnings(record=True):
ser = Series(values, [0, 'y'])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime.today(), 0])
self._check_roundtrip(ser, func)
ser = Series(values, ['y', 0])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.date.today(), 'a'])
self._check_roundtrip(ser, func)
ser = Series(values, [1.23, 'b'])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 1.53])
self._check_roundtrip(ser, func)
ser = Series(values, [1, 5])
self._check_roundtrip(ser, func)
ser = Series(values, [datetime.datetime(
2012, 1, 1), datetime.datetime(2012, 1, 2)])
self._check_roundtrip(ser, func)
def test_timeseries_preepoch(self):
dr = bdate_range('1/1/1940', '1/1/1960')
ts = Series(np.random.randn(len(dr)), index=dr)
try:
self._check_roundtrip(ts, tm.assert_series_equal)
except OverflowError:
pytest.skip('known failer on some windows platforms')
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_frame(self, compression):
df = tm.makeDataFrame()
# put in some random NAs
df.values[0, 0] = np.nan
df.values[5, 3] = np.nan
self._check_roundtrip_table(df, tm.assert_frame_equal,
compression=compression)
self._check_roundtrip(df, tm.assert_frame_equal,
compression=compression)
tdf = tm.makeTimeDataFrame()
self._check_roundtrip(tdf, tm.assert_frame_equal,
compression=compression)
with ensure_clean_store(self.path) as store:
# not consolidated
df['foo'] = np.random.randn(len(df))
store['df'] = df
recons = store['df']
assert recons._data.is_consolidated()
# empty
self._check_roundtrip(df[:0], tm.assert_frame_equal)
def test_empty_series_frame(self):
s0 = Series()
s1 = Series(name='myseries')
df0 = DataFrame()
df1 = DataFrame(index=['a', 'b', 'c'])
df2 = DataFrame(columns=['d', 'e', 'f'])
self._check_roundtrip(s0, tm.assert_series_equal)
self._check_roundtrip(s1, tm.assert_series_equal)
self._check_roundtrip(df0, tm.assert_frame_equal)
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
def test_empty_series(self):
for dtype in [np.int64, np.float64, np.object, 'm8[ns]', 'M8[ns]']:
s = Series(dtype=dtype)
self._check_roundtrip(s, tm.assert_series_equal)
def test_can_serialize_dates(self):
rng = [x.date() for x in bdate_range('1/1/2000', '1/30/2000')]
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
self._check_roundtrip(frame, tm.assert_frame_equal)
def test_store_hierarchical(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['foo', 'bar'])
frame = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self._check_roundtrip(frame, tm.assert_frame_equal)
self._check_roundtrip(frame.T, tm.assert_frame_equal)
self._check_roundtrip(frame['A'], tm.assert_series_equal)
# check that the names are stored
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_frame_equal(recons, frame)
def test_store_index_name(self):
df = tm.makeDataFrame()
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store['frame'] = df
recons = store['frame']
tm.assert_frame_equal(recons, df)
def test_store_index_name_with_tz(self):
# GH 13884
df = pd.DataFrame({'A': [1, 2]})
df.index = pd.DatetimeIndex([1234567890123456787, 1234567890123456788])
df.index = df.index.tz_localize('UTC')
df.index.name = 'foo'
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
recons = store['frame']
tm.assert_frame_equal(recons, df)
@pytest.mark.parametrize('table_format', ['table', 'fixed'])
def test_store_index_name_numpy_str(self, table_format):
# GH #13492
idx = pd.Index(pd.to_datetime([datetime.date(2000, 1, 1),
datetime.date(2000, 1, 2)]),
name=u('cols\u05d2'))
idx1 = pd.Index(pd.to_datetime([datetime.date(2010, 1, 1),
datetime.date(2010, 1, 2)]),
name=u('rows\u05d0'))
df = pd.DataFrame(np.arange(4).reshape(2, 2), columns=idx, index=idx1)
# This used to fail, returning numpy strings instead of python strings.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format=table_format)
df2 = read_hdf(path, 'df')
assert_frame_equal(df, df2, check_names=True)
assert type(df2.index.name) == text_type
assert type(df2.columns.name) == text_type
def test_store_series_name(self):
df = tm.makeDataFrame()
series = df['A']
with ensure_clean_store(self.path) as store:
store['series'] = series
recons = store['series']
tm.assert_series_equal(recons, series)
@pytest.mark.parametrize("compression", [
False, pytest.param(True, marks=td.skip_if_windows_python_3)
])
def test_store_mixed(self, compression):
def _make_one():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['int1'] = 1
df['int2'] = 2
return df._consolidate()
df1 = _make_one()
df2 = _make_one()
self._check_roundtrip(df1, tm.assert_frame_equal)
self._check_roundtrip(df2, tm.assert_frame_equal)
with ensure_clean_store(self.path) as store:
store['obj'] = df1
tm.assert_frame_equal(store['obj'], df1)
store['obj'] = df2
tm.assert_frame_equal(store['obj'], df2)
# check that can store Series of all of these types
self._check_roundtrip(df1['obj1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['bool1'], tm.assert_series_equal,
compression=compression)
self._check_roundtrip(df1['int1'], tm.assert_series_equal,
compression=compression)
def test_wide(self):
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp, assert_panel_equal)
def test_select_with_dups(self):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=['A'])
expected = df.loc[:, ['A']]
assert_frame_equal(result, expected)
# dups across dtypes
df = concat([DataFrame(np.random.randn(10, 4),
columns=['A', 'A', 'B', 'B']),
DataFrame(np.random.randint(0, 10, size=20)
.reshape(10, 2),
columns=['A', 'C'])],
axis=1)
df.index = date_range('20130101 9:30', periods=10, freq='T')
with ensure_clean_store(self.path) as store:
store.append('df', df)
result = store.select('df')
expected = df
assert_frame_equal(result, expected, by_blocks=True)
result = store.select('df', columns=df.columns)
expected = df
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['A']]
result = store.select('df', columns=['A'])
assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ['B', 'A']]
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(self.path) as store:
store.append('df', df)
store.append('df', df)
expected = df.loc[:, ['B', 'A']]
expected = concat([expected, expected])
result = store.select('df', columns=['B', 'A'])
assert_frame_equal(result, expected, by_blocks=True)
def test_wide_table_dups(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('panel', wp, format='table')
store.put('panel', wp, format='table', append=True)
recons = store['panel']
assert_panel_equal(recons, wp)
def test_long(self):
def _check(left, right):
assert_panel_equal(left.to_panel(), right.to_panel())
with catch_warnings(record=True):
wp = tm.makePanel()
self._check_roundtrip(wp.to_frame(), _check)
def test_longpanel(self):
pass
def test_overwrite_node(self):
with ensure_clean_store(self.path) as store:
store['a'] = tm.makeTimeDataFrame()
ts = tm.makeTimeSeries()
store['a'] = ts
tm.assert_series_equal(store['a'], ts)
def test_sparse_with_compression(self):
# GH 2931
# make sparse dataframe
arr = np.random.binomial(n=1, p=.01, size=(1000, 10))
df = DataFrame(arr).to_sparse(fill_value=0)
# case 1: store uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 2: store compressed (works)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
# set one series to be completely sparse
df[0] = np.zeros(1000)
# case 3: store df with completely sparse series uncompressed
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression=False,
check_frame_type=True)
# case 4: try storing df with completely sparse series compressed
# (fails)
self._check_double_roundtrip(df, tm.assert_frame_equal,
compression='zlib',
check_frame_type=True)
def test_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
# put/select ok
_maybe_remove(store, 'wp')
store.put('wp', wp, format='table')
store.select('wp')
# non-table ok (where = None)
_maybe_remove(store, 'wp')
store.put('wp2', wp)
store.select('wp2')
# selection on the non-indexable with a large number of columns
wp = Panel(np.random.randn(100, 100, 100),
items=['Item%03d' % i for i in range(100)],
major_axis=date_range('1/1/2000', periods=100),
minor_axis=['E%03d' % i for i in range(100)])
_maybe_remove(store, 'wp')
store.append('wp', wp)
items = ['Item%03d' % i for i in range(80)]
result = store.select('wp', 'items=items')
expected = wp.reindex(items=items)
assert_panel_equal(expected, result)
# selectin non-table with a where
# pytest.raises(ValueError, store.select,
# 'wp2', ('column', ['A', 'D']))
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df', columns=['A', 'B'])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# equivalentsly
result = store.select('df', [("columns=['A', 'B']")])
expected = df.reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
result = store.select('df', ['A > 0'], columns=['A', 'B'])
expected = df[df.A > 0].reindex(columns=['A', 'B'])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['A'])
result = store.select('df', ['A > 0'], columns=['C', 'D'])
expected = df[df.A > 0].reindex(columns=['C', 'D'])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(self):
with ensure_clean_store(self.path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(dict(
ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300)))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A'])
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=['A', 'B'])
df['object'] = 'foo'
df.loc[4:5, 'object'] = 'bar'
df['boolv'] = df['A'] > 0
_maybe_remove(store, 'df')
store.append('df', df, data_columns=True)
expected = (df[df.boolv == True] # noqa
.reindex(columns=['A', 'boolv']))
for v in [True, 'true', 1]:
result = store.select('df', 'boolv == %s' % str(v),
columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
expected = (df[df.boolv == False] # noqa
.reindex(columns=['A', 'boolv']))
for v in [False, 'false', 0]:
result = store.select(
'df', 'boolv == %s' % str(v), columns=['A', 'boolv'])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
_maybe_remove(store, 'df_int')
store.append('df_int', df)
result = store.select(
'df_int', "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(dict(A=np.random.rand(
20), B=np.random.rand(20), index=np.arange(20, dtype='f8')))
_maybe_remove(store, 'df_float')
store.append('df_float', df)
result = store.select(
'df_float', "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=['A'])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(self.path) as store:
# floats w/o NaN
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
store.append('df1', df, data_columns=True)
result = store.select(
'df1', where='values>2.0')
expected = df[df['values'] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df['values'] > 2.0]
store.append('df2', df, data_columns=True, index=False)
result = store.select(
'df2', where='values>2.0')
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame(
dict(cols=range(11), values=range(11)), dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df['values'] > 2.0]
store.append('df4', df, data_columns=True)
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(self.path) as store:
df = tm.makeDataFrame()
expected = df[df['A'] > 0]
store.append('df', df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select('df', where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(ts=bdate_range('2012-01-01', periods=300),
A=np.random.randn(300),
B=range(300),
users=['a'] * 50 + ['b'] * 50 + ['c'] * 100 +
['a%03d' % i for i in range(100)]))
_maybe_remove(store, 'df')
store.append('df', df, data_columns=['ts', 'A', 'B', 'users'])
# regular select
result = store.select('df', "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp('2012-02-01')]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(['a', 'b', 'c'])]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ['a', 'b', 'c'] + ['a%03d' % i for i in range(60)]
result = store.select(
'df',
"ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp('2012-02-01')) &
df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select('df', 'B=selector')
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select('df', 'ts=selector')
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(self):
# single table
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, 'df')
store.append('df', df)
expected = store.select('df')
results = [s for s in store.select('df', iterator=True)]
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=100)]
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = [s for s in store.select('df', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df_non_table')
pytest.raises(TypeError, read_hdf, path,
'df_non_table', chunksize=100)
pytest.raises(TypeError, read_hdf, path,
'df_non_table', iterator=True)
with ensure_clean_path(self.path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, 'df', format='table')
results = [s for s in read_hdf(path, 'df', chunksize=100)]
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, 'df'))
# multiple
with ensure_clean_store(self.path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append('df1', df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(
columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
store.append('df2', df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(
['df1', 'df2'], selector='df1')
results = [s for s in store.select_as_multiple(
['df1', 'df2'], selector='df1', chunksize=150)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select('df')
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = "index >= '%s'" % beg_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = "index <= '%s'" % end_dt
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
result = store.select('df', where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = [s for s in store.select('df', chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(self):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100064, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = "index > '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert 0 == len(results)
def test_select_iterator_many_empty_frames(self):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = int(1e4)
# with iterator, range limited to the first chunk
with ensure_clean_store(self.path) as store:
expected = tm.makeTimeDataFrame(100000, 'S')
_maybe_remove(store, 'df')
store.append('df', expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = "index >= '%s'" % beg_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = "index <= '%s'" % end_dt
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = "index >= '%s' & index <= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) &
(expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = "index <= '%s' & index >= '%s'" % (beg_dt, end_dt)
results = [s for s in store.select(
'df', where=where, chunksize=chunksize)]
# should be []
assert len(results) == 0
def test_retain_index_attributes(self):
# GH 3499, losing frequency info on index recreation
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1', periods=3, freq='H'))))
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'data')
store.put('data', df, format='table')
result = store.get('data')
tm.assert_frame_equal(df, result)
for attr in ['freq', 'tz', 'name']:
for idx in ['index', 'columns']:
assert (getattr(getattr(df, idx), attr, None) ==
getattr(getattr(result, idx), attr, None))
# try to append a table with a different frequency
with catch_warnings(record=True):
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1',
periods=3, freq='D'))))
store.append('data', df2)
assert store.get_storer('data').info['index']['freq'] is None
# this is ok
_maybe_remove(store, 'df2')
df2 = DataFrame(dict(
A=Series(lrange(3),
index=[Timestamp('20010101'), Timestamp('20010102'),
Timestamp('20020101')])))
store.append('df2', df2)
df3 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
store.append('df2', df3)
def test_retain_index_attributes2(self):
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2000-1-1',
periods=3, freq='H'))))
df.to_hdf(path, 'data', mode='w', append=True)
df2 = DataFrame(dict(
A=Series(lrange(3),
index=date_range('2002-1-1', periods=3,
freq='D'))))
df2.to_hdf(path, 'data', append=True)
idx = date_range('2000-1-1', periods=3, freq='H')
idx.name = 'foo'
df = DataFrame(dict(A=Series(lrange(3), index=idx)))
df.to_hdf(path, 'data', mode='w', append=True)
assert read_hdf(path, 'data').index.name == 'foo'
with catch_warnings(record=True):
idx2 = date_range('2001-1-1', periods=3, freq='H')
idx2.name = 'bar'
df2 = DataFrame(dict(A=Series(lrange(3), index=idx2)))
df2.to_hdf(path, 'data', append=True)
assert read_hdf(path, 'data').index.name is None
def test_panel_select(self):
with ensure_clean_store(self.path) as store:
with catch_warnings(record=True):
wp = tm.makePanel()
store.put('wp', wp, format='table')
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = ('major_axis>=date')
crit2 = ("minor_axis=['A', 'D']")
result = store.select('wp', [crit1, crit2])
expected = wp.truncate(before=date).reindex(minor=['A', 'D'])
assert_panel_equal(result, expected)
result = store.select(
'wp', ['major_axis>="20000124"',
("minor_axis=['A', 'B']")])
expected = wp.truncate(
before='20000124').reindex(minor=['A', 'B'])
assert_panel_equal(result, expected)
def test_frame_select(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
date = df.index[len(df) // 2]
crit1 = Term('index>=date')
assert crit1.env.scope['date'] == date
crit2 = ("columns=['A', 'D']")
crit3 = ('columns=A')
result = store.select('frame', [crit1, crit2])
expected = df.loc[date:, ['A', 'D']]
tm.assert_frame_equal(result, expected)
result = store.select('frame', [crit3])
expected = df.loc[:, ['A']]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append('df_time', df)
pytest.raises(
ValueError, store.select, 'df_time', "index>0")
# can't select if not written as table
# store['frame'] = df
# pytest.raises(ValueError, store.select,
# 'frame', [crit1, crit2])
def test_frame_select_complex(self):
# select via complex criteria
df = tm.makeTimeDataFrame()
df['string'] = 'foo'
df.loc[df.index[0:4], 'string'] = 'bar'
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', data_columns=['string'])
# empty
result = store.select('df', 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == 'foo')]
tm.assert_frame_equal(result, expected)
# or
result = store.select('df', 'index>df.index[3] | string="bar"')
expected = df.loc[(df.index > df.index[3]) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
result = store.select('df', '(index>df.index[3] & '
'index<=df.index[6]) | string="bar"')
expected = df.loc[((df.index > df.index[3]) & (
df.index <= df.index[6])) | (df.string == 'bar')]
tm.assert_frame_equal(result, expected)
# invert
result = store.select('df', 'string!="bar"')
expected = df.loc[df.string != 'bar']
tm.assert_frame_equal(result, expected)
# invert not implemented in numexpr :(
pytest.raises(NotImplementedError,
store.select, 'df', '~(string="bar")')
# invert ok for filters
result = store.select('df', "~(columns=['A','B'])")
expected = df.loc[:, df.columns.difference(['A', 'B'])]
tm.assert_frame_equal(result, expected)
# in
result = store.select(
'df', "index>df.index[3] & columns in ['A','B']")
expected = df.loc[df.index > df.index[3]].reindex(columns=[
'A', 'B'])
tm.assert_frame_equal(result, expected)
def test_frame_select_complex2(self):
with ensure_clean_path(['parms.hdf', 'hist.hdf']) as paths:
pp, hh = paths
# use non-trivial selection criteria
parms = DataFrame({'A': [1, 1, 2, 2, 3]})
parms.to_hdf(pp, 'df', mode='w',
format='table', data_columns=['A'])
selection = read_hdf(pp, 'df', where='A=[2,3]')
hist = DataFrame(np.random.randn(25, 1),
columns=['data'],
index=MultiIndex.from_tuples(
[(i, j) for i in range(5)
for j in range(5)],
names=['l1', 'l2']))
hist.to_hdf(hh, 'df', mode='w', format='table')
expected = read_hdf(hh, 'df', where='l1=[2, 3, 4]')
# sccope with list like
l = selection.index.tolist() # noqa
store = HDFStore(hh)
result = store.select('df', where='l1=l')
assert_frame_equal(result, expected)
store.close()
result = read_hdf(hh, 'df', where='l1=l')
assert_frame_equal(result, expected)
# index
index = selection.index # noqa
result = read_hdf(hh, 'df', where='l1=index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = read_hdf(hh, 'df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
# sccope with index
store = HDFStore(hh)
result = store.select('df', where='l1=index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=selection.index.tolist()')
assert_frame_equal(result, expected)
result = store.select('df', where='l1=list(selection.index)')
assert_frame_equal(result, expected)
store.close()
def test_invalid_filtering(self):
# can't use more than one filter (atm)
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table')
# not implemented
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A'] | columns=['B']")
# in theory we could deal with this
pytest.raises(NotImplementedError, store.select,
'df', "columns=['A','B'] & columns=['C']")
def test_string_select(self):
# GH 2973
with ensure_clean_store(self.path) as store:
df = tm.makeTimeDataFrame()
# test string ==/!=
df['x'] = 'none'
df.loc[2:7, 'x'] = ''
store.append('df', df, data_columns=['x'])
result = store.select('df', 'x=none')
expected = df[df.x == 'none']
assert_frame_equal(result, expected)
try:
result = store.select('df', 'x!=none')
expected = df[df.x != 'none']
assert_frame_equal(result, expected)
except Exception as detail:
pprint_thing("[{0}]".format(detail))
pprint_thing(store)
pprint_thing(expected)
df2 = df.copy()
df2.loc[df2.x == '', 'x'] = np.nan
store.append('df2', df2, data_columns=['x'])
result = store.select('df2', 'x!=none')
expected = df2[isna(df2.x)]
assert_frame_equal(result, expected)
# int ==/!=
df['int'] = 1
df.loc[2:7, 'int'] = 2
store.append('df3', df, data_columns=['int'])
result = store.select('df3', 'int=2')
expected = df[df.int == 2]
assert_frame_equal(result, expected)
result = store.select('df3', 'int!=2')
expected = df[df.int != 2]
assert_frame_equal(result, expected)
def test_read_column(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# error
pytest.raises(KeyError, store.select_column, 'df', 'foo')
def f():
store.select_column('df', 'index', where=['index>5'])
pytest.raises(Exception, f)
# valid
result = store.select_column('df', 'index')
tm.assert_almost_equal(result.values, Series(df.index).values)
assert isinstance(result, Series)
# not a data indexable column
pytest.raises(
ValueError, store.select_column, 'df', 'values_block_0')
# a data column
df2 = df.copy()
df2['string'] = 'foo'
store.append('df2', df2, data_columns=['string'])
result = store.select_column('df2', 'string')
tm.assert_almost_equal(result.values, df2['string'].values)
# a data column with NaNs, result excludes the NaNs
df3 = df.copy()
df3['string'] = 'foo'
df3.loc[4:6, 'string'] = np.nan
store.append('df3', df3, data_columns=['string'])
result = store.select_column('df3', 'string')
tm.assert_almost_equal(result.values, df3['string'].values)
# start/stop
result = store.select_column('df3', 'string', start=2)
tm.assert_almost_equal(result.values, df3['string'].values[2:])
result = store.select_column('df3', 'string', start=-2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:])
result = store.select_column('df3', 'string', stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[:2])
result = store.select_column('df3', 'string', stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[:-2])
result = store.select_column('df3', 'string', start=2, stop=-2)
tm.assert_almost_equal(result.values, df3['string'].values[2:-2])
result = store.select_column('df3', 'string', start=-2, stop=2)
tm.assert_almost_equal(result.values, df3['string'].values[-2:2])
# GH 10392 - make sure column name is preserved
df4 = DataFrame({'A': np.random.randn(10), 'B': 'foo'})
store.append('df4', df4, data_columns=True)
expected = df4['B']
result = store.select_column('df4', 'B')
tm.assert_series_equal(result, expected)
def test_coordinates(self):
df = tm.makeTimeDataFrame()
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df')
store.append('df', df)
# all
c = store.select_as_coordinates('df')
assert((c.values == np.arange(len(df.index))).all())
# get coordinates back & test vs frame
_maybe_remove(store, 'df')
df = DataFrame(dict(A=lrange(5), B=lrange(5)))
store.append('df', df)
c = store.select_as_coordinates('df', ['index<3'])
assert((c.values == np.arange(3)).all())
result = store.select('df', where=c)
expected = df.loc[0:2, :]
tm.assert_frame_equal(result, expected)
c = store.select_as_coordinates('df', ['index>=3', 'index<=4'])
assert((c.values == np.arange(2) + 3).all())
result = store.select('df', where=c)
expected = df.loc[3:4, :]
tm.assert_frame_equal(result, expected)
assert isinstance(c, Index)
# multiple tables
_maybe_remove(store, 'df1')
_maybe_remove(store, 'df2')
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
c = store.select_as_coordinates('df1', ['A>0', 'B>0'])
df1_result = store.select('df1', c)
df2_result = store.select('df2', c)
result = concat([df1_result, df2_result], axis=1)
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# pass array/mask as the coordinates
with ensure_clean_store(self.path) as store:
df = DataFrame(np.random.randn(1000, 2),
index=date_range('20000101', periods=1000))
store.append('df', df)
c = store.select_column('df', 'index')
where = c[DatetimeIndex(c).month == 5].index
expected = df.iloc[where]
# locations
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# boolean
result = store.select('df', where=where)
tm.assert_frame_equal(result, expected)
# invalid
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df), dtype='float64'))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df) + 1))
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5)
pytest.raises(ValueError, store.select, 'df',
where=np.arange(len(df)), start=5, stop=10)
# selection with filter
selection = date_range('20000101', periods=500)
result = store.select('df', where='index in selection')
expected = df[df.index.isin(selection)]
tm.assert_frame_equal(result, expected)
# list
df = DataFrame(np.random.randn(10, 2))
store.append('df2', df)
result = store.select('df2', where=[0, 3, 5])
expected = df.iloc[[0, 3, 5]]
tm.assert_frame_equal(result, expected)
# boolean
where = [True] * 10
where[-2] = False
result = store.select('df2', where=where)
expected = df.loc[where]
tm.assert_frame_equal(result, expected)
# start/stop
result = store.select('df2', start=5, stop=10)
expected = df[5:10]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# exceptions
pytest.raises(ValueError, store.append_to_multiple,
{'df1': ['A', 'B'], 'df2': None}, df,
selector='df3')
pytest.raises(ValueError, store.append_to_multiple,
{'df1': None, 'df2': None}, df, selector='df3')
pytest.raises(
ValueError, store.append_to_multiple, 'df1', df, 'df1')
# regular operation
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1')
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = df[(df.A > 0) & (df.B > 0)]
tm.assert_frame_equal(result, expected)
def test_append_to_multiple_dropna(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=True should guarantee rows are synchronized
store.append_to_multiple(
{'df1': ['A', 'B'], 'df2': None}, df, selector='df1',
dropna=True)
result = store.select_as_multiple(['df1', 'df2'])
expected = df.dropna()
tm.assert_frame_equal(result, expected)
tm.assert_index_equal(store.select('df1').index,
store.select('df2').index)
@pytest.mark.xfail(run=False,
reason="append_to_multiple_dropna_false "
"is not raising as failed")
def test_append_to_multiple_dropna_false(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df1.iloc[1, df1.columns.get_indexer(['A', 'B'])] = np.nan
df = concat([df1, df2], axis=1)
with ensure_clean_store(self.path) as store:
# dropna=False shouldn't synchronize row indexes
store.append_to_multiple(
{'df1a': ['A', 'B'], 'df2a': None}, df, selector='df1a',
dropna=False)
with pytest.raises(ValueError):
store.select_as_multiple(['df1a', 'df2a'])
assert not store.select('df1a').index.equals(
store.select('df2a').index)
def test_select_as_multiple(self):
df1 = tm.makeTimeDataFrame()
df2 = tm.makeTimeDataFrame().rename(columns=lambda x: "%s_2" % x)
df2['foo'] = 'bar'
with ensure_clean_store(self.path) as store:
# no tables stored
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
store.append('df1', df1, data_columns=['A', 'B'])
store.append('df2', df2)
# exceptions
pytest.raises(Exception, store.select_as_multiple,
None, where=['A>0', 'B>0'], selector='df1')
pytest.raises(Exception, store.select_as_multiple,
[None], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df3'], where=['A>0', 'B>0'], selector='df1')
pytest.raises(KeyError, store.select_as_multiple,
['df1', 'df2'], where=['A>0', 'B>0'],
selector='df4')
# default select
result = store.select('df1', ['A>0', 'B>0'])
expected = store.select_as_multiple(
['df1'], where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
expected = store.select_as_multiple(
'df1', where=['A>0', 'B>0'], selector='df1')
tm.assert_frame_equal(result, expected)
# multiple
result = store.select_as_multiple(
['df1', 'df2'], where=['A>0', 'B>0'], selector='df1')
expected = concat([df1, df2], axis=1)
expected = expected[(expected.A > 0) & (expected.B > 0)]
tm.assert_frame_equal(result, expected)
# multiple (diff selector)
result = store.select_as_multiple(
['df1', 'df2'], where='index>df2.index[4]', selector='df2')
expected = concat([df1, df2], axis=1)
expected = expected[5:]
tm.assert_frame_equal(result, expected)
# test excpection for diff rows
store.append('df3', tm.makeTimeDataFrame(nper=50))
pytest.raises(ValueError, store.select_as_multiple,
['df1', 'df3'], where=['A>0', 'B>0'],
selector='df1')
@pytest.mark.skipif(
LooseVersion(tables.__version__) < LooseVersion('3.1.0'),
reason=("tables version does not support fix for nan selection "
"bug: GH 4858"))
def test_nan_selection_bug_4858(self):
with ensure_clean_store(self.path) as store:
df = DataFrame(dict(cols=range(6), values=range(6)),
dtype='float64')
df['cols'] = (df['cols'] + 10).apply(str)
df.iloc[0] = np.nan
expected = DataFrame(dict(cols=['13.0', '14.0', '15.0'], values=[
3., 4., 5.]), index=[3, 4, 5])
# write w/o the index on that particular column
store.append('df', df, data_columns=True, index=['cols'])
result = store.select('df', where='values>2.0')
assert_frame_equal(result, expected)
def test_start_stop_table(self):
with ensure_clean_store(self.path) as store:
# table
df = DataFrame(dict(A=np.random.rand(20), B=np.random.rand(20)))
store.append('df', df)
result = store.select(
'df', "columns=['A']", start=0, stop=5)
expected = df.loc[0:4, ['A']]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', "columns=['A']", start=30, stop=40)
assert len(result) == 0
expected = df.loc[30:40, ['A']]
tm.assert_frame_equal(result, expected)
def test_start_stop_multiple(self):
# GH 16209
with ensure_clean_store(self.path) as store:
df = DataFrame({"foo": [1, 2], "bar": [1, 2]})
store.append_to_multiple({'selector': ['foo'], 'data': None}, df,
selector='selector')
result = store.select_as_multiple(['selector', 'data'],
selector='selector', start=0,
stop=1)
expected = df.loc[[0], ['foo', 'bar']]
tm.assert_frame_equal(result, expected)
def test_start_stop_fixed(self):
with ensure_clean_store(self.path) as store:
# fixed, GH 8287
df = DataFrame(dict(A=np.random.rand(20),
B=np.random.rand(20)),
index=pd.date_range('20130101', periods=20))
store.put('df', df)
result = store.select(
'df', start=0, stop=5)
expected = df.iloc[0:5, :]
tm.assert_frame_equal(result, expected)
result = store.select(
'df', start=5, stop=10)
expected = df.iloc[5:10, :]
tm.assert_frame_equal(result, expected)
# out of range
result = store.select(
'df', start=30, stop=40)
expected = df.iloc[30:40, :]
tm.assert_frame_equal(result, expected)
# series
s = df.A
store.put('s', s)
result = store.select('s', start=0, stop=5)
expected = s.iloc[0:5]
tm.assert_series_equal(result, expected)
result = store.select('s', start=5, stop=10)
expected = s.iloc[5:10]
tm.assert_series_equal(result, expected)
# sparse; not implemented
df = tm.makeDataFrame()
df.iloc[3:5, 1:3] = np.nan
df.iloc[8:10, -2] = np.nan
dfs = df.to_sparse()
store.put('dfs', dfs)
with pytest.raises(NotImplementedError):
store.select('dfs', start=0, stop=5)
def test_select_filter_corner(self):
df = DataFrame(np.random.randn(50, 100))
df.index = ['%.3d' % c for c in df.index]
df.columns = ['%.3d' % c for c in df.columns]
with ensure_clean_store(self.path) as store:
store.put('frame', df, format='table')
crit = 'columns=df.columns[:75]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75]])
crit = 'columns=df.columns[:75:2]'
result = store.select('frame', [crit])
tm.assert_frame_equal(result, df.loc[:, df.columns[:75:2]])
def test_path_pathlib(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
@pytest.mark.parametrize('start, stop', [(0, 2), (1, 2), (None, None)])
def test_contiguous_mixed_data_table(self, start, stop):
# GH 17021
# ValueError when reading a contiguous mixed-data table ft. VLArray
df = DataFrame({'a': Series([20111010, 20111011, 20111012]),
'b': Series(['ab', 'cd', 'ab'])})
with ensure_clean_store(self.path) as store:
store.append('test_dataset', df)
result = store.select('test_dataset', start=start, stop=stop)
assert_frame_equal(df[start:stop], result)
def test_path_pathlib_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_pathlib(writer, reader)
tm.assert_frame_equal(df, result)
def test_pickle_path_localpath(self):
df = tm.makeDataFrame()
result = tm.round_trip_pathlib(
lambda p: df.to_hdf(p, 'df'),
lambda p: pd.read_hdf(p, 'df'))
tm.assert_frame_equal(df, result)
def test_path_localpath_hdfstore(self):
df = tm.makeDataFrame()
def writer(path):
with pd.HDFStore(path) as store:
df.to_hdf(store, 'df')
def reader(path):
with pd.HDFStore(path) as store:
return pd.read_hdf(store, 'df')
result = tm.round_trip_localpath(writer, reader)
tm.assert_frame_equal(df, result)
def _check_roundtrip(self, obj, comparator, compression=False, **kwargs):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
def _check_double_roundtrip(self, obj, comparator, compression=False,
**kwargs):
options = {}
if compression:
options['complib'] = compression or _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store['obj'] = obj
retrieved = store['obj']
comparator(retrieved, obj, **kwargs)
store['obj'] = retrieved
again = store['obj']
comparator(again, obj, **kwargs)
def _check_roundtrip_table(self, obj, comparator, compression=False):
options = {}
if compression:
options['complib'] = _default_compressor
with ensure_clean_store(self.path, 'w', **options) as store:
store.put('obj', obj, format='table')
retrieved = store['obj']
comparator(retrieved, obj)
def test_multiple_open_close(self):
# gh-4409: open & close multiple times
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
# single
store = HDFStore(path)
assert 'CLOSED' not in store.info()
assert store.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
with ensure_clean_path(self.path) as path:
if pytables._table_file_open_policy_is_strict:
# multiples
store1 = HDFStore(path)
def f():
HDFStore(path)
pytest.raises(ValueError, f)
store1.close()
else:
# multiples
store1 = HDFStore(path)
store2 = HDFStore(path)
assert 'CLOSED' not in store1.info()
assert 'CLOSED' not in store2.info()
assert store1.is_open
assert store2.is_open
store1.close()
assert 'CLOSED' in store1.info()
assert not store1.is_open
assert 'CLOSED' not in store2.info()
assert store2.is_open
store2.close()
assert 'CLOSED' in store1.info()
assert 'CLOSED' in store2.info()
assert not store1.is_open
assert not store2.is_open
# nested close
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store2.append('df2', df)
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
# double closing
store = HDFStore(path, mode='w')
store.append('df', df)
store2 = HDFStore(path)
store.close()
assert 'CLOSED' in store.info()
assert not store.is_open
store2.close()
assert 'CLOSED' in store2.info()
assert not store2.is_open
# ops on a closed store
with ensure_clean_path(self.path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, 'df', mode='w', format='table')
store = HDFStore(path)
store.close()
pytest.raises(ClosedFileError, store.keys)
pytest.raises(ClosedFileError, lambda: 'df' in store)
pytest.raises(ClosedFileError, lambda: len(store))
pytest.raises(ClosedFileError, lambda: store['df'])
pytest.raises(AttributeError, lambda: store.df)
pytest.raises(ClosedFileError, store.select, 'df')
pytest.raises(ClosedFileError, store.get, 'df')
pytest.raises(ClosedFileError, store.append, 'df2', df)
pytest.raises(ClosedFileError, store.put, 'df3', df)
pytest.raises(ClosedFileError, store.get_storer, 'df2')
pytest.raises(ClosedFileError, store.remove, 'df2')
def f():
store.select('df')
tm.assert_raises_regex(ClosedFileError, 'file is not open', f)
def test_pytables_native_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native.h5'),
mode='r') as store:
d2 = store['detector/readout']
assert isinstance(d2, DataFrame)
@pytest.mark.skipif(PY35 and is_platform_windows(),
reason="native2 read fails oddly on windows / 3.5")
def test_pytables_native2_read(self):
with ensure_clean_store(
tm.get_data_path('legacy_hdf/pytables_native2.h5'),
mode='r') as store:
str(store)
d1 = store['detector']
assert isinstance(d1, DataFrame)
def test_legacy_table_read(self):
# legacy table types
with ensure_clean_store(
tm.get_data_path('legacy_hdf/legacy_table.h5'),
mode='r') as store:
with catch_warnings(record=True):
store.select('df1')
store.select('df2')
store.select('wp1')
# force the frame
store.select('df2', typ='legacy_frame')
# old version warning
pytest.raises(
Exception, store.select, 'wp1', 'minor_axis=B')
df2 = store.select('df2')
result = store.select('df2', 'index>df2.index[2]')
expected = df2[df2.index > df2.index[2]]
assert_frame_equal(expected, result)
def test_copy(self):
with catch_warnings(record=True):
def do_copy(f, new_f=None, keys=None,
propindexes=True, **kwargs):
try:
store = HDFStore(f, 'r')
if new_f is None:
import tempfile
fd, new_f = tempfile.mkstemp()
tstore = store.copy(
new_f, keys=keys, propindexes=propindexes, **kwargs)
# check keys
if keys is None:
keys = store.keys()
assert set(keys) == set(tstore.keys())
# check indicies & nrows
for k in tstore.keys():
if tstore.get_storer(k).is_table:
new_t = tstore.get_storer(k)
orig_t = store.get_storer(k)
assert orig_t.nrows == new_t.nrows
# check propindixes
if propindexes:
for a in orig_t.axes:
if a.is_indexed:
assert new_t[a.name].is_indexed
finally:
safe_close(store)
safe_close(tstore)
try:
os.close(fd)
except:
pass
safe_remove(new_f)
# new table
df = tm.makeDataFrame()
try:
path = create_tempfile(self.path)
st = HDFStore(path)
st.append('df', df, data_columns=['A'])
st.close()
do_copy(f=path)
do_copy(f=path, propindexes=False)
finally:
safe_remove(path)
def test_store_datetime_fractional_secs(self):
with ensure_clean_store(self.path) as store:
dt = datetime.datetime(2012, 1, 2, 3, 4, 5, 123456)
series = Series([0], [dt])
store['a'] = series
assert store['a'].index[0] == dt
def test_tseries_indices_series(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
idx = tm.makePeriodIndex(10)
ser = Series(np.random.randn(len(idx)), idx)
store['a'] = ser
result = store['a']
tm.assert_series_equal(result, ser)
assert result.index.freq == ser.index.freq
tm.assert_class_equal(result.index, ser.index, obj="series index")
def test_tseries_indices_frame(self):
with ensure_clean_store(self.path) as store:
idx = tm.makeDateIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), index=idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
idx = tm.makePeriodIndex(10)
df = DataFrame(np.random.randn(len(idx), 3), idx)
store['a'] = df
result = store['a']
assert_frame_equal(result, df)
assert result.index.freq == df.index.freq
tm.assert_class_equal(result.index, df.index,
obj="dataframe index")
def test_unicode_index(self):
unicode_values = [u('\u03c3'), u('\u03c3\u03c3')]
# PerformanceWarning
with catch_warnings(record=True):
s = Series(np.random.randn(len(unicode_values)), unicode_values)
self._check_roundtrip(s, tm.assert_series_equal)
def test_unicode_longer_encoded(self):
# GH 11234
char = '\u0394'
df = pd.DataFrame({'A': [char]})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
with ensure_clean_store(self.path) as store:
store.put('df', df, format='table', encoding='utf-8')
result = store.get('df')
tm.assert_frame_equal(result, df)
def test_store_datetime_mixed(self):
df = DataFrame(
{'a': [1, 2, 3], 'b': [1., 2., 3.], 'c': ['a', 'b', 'c']})
ts = tm.makeTimeSeries()
df['d'] = ts.index[:3]
self._check_roundtrip(df, tm.assert_frame_equal)
# def test_cant_write_multiindex_table(self):
# # for now, #1848
# df = DataFrame(np.random.randn(10, 4),
# index=[np.arange(5).repeat(2),
# np.tile(np.arange(2), 5)])
# pytest.raises(Exception, store.put, 'foo', df, format='table')
def test_append_with_diff_col_name_types_raises_value_error(self):
df = DataFrame(np.random.randn(10, 1))
df2 = DataFrame({'a': np.random.randn(10)})
df3 = DataFrame({(1, 2): np.random.randn(10)})
df4 = DataFrame({('1', 2): np.random.randn(10)})
df5 = DataFrame({('1', 2, object): np.random.randn(10)})
with ensure_clean_store(self.path) as store:
name = 'df_%s' % tm.rands(10)
store.append(name, df)
for d in (df2, df3, df4, df5):
with pytest.raises(ValueError):
store.append(name, d)
def test_query_with_nested_special_character(self):
df = DataFrame({'a': ['a', 'a', 'c', 'b',
'test & test', 'c', 'b', 'e'],
'b': [1, 2, 3, 4, 5, 6, 7, 8]})
expected = df[df.a == 'test & test']
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
result = store.select('test', 'a = "test & test"')
tm.assert_frame_equal(expected, result)
def test_categorical(self):
with ensure_clean_store(self.path) as store:
# Basic
_maybe_remove(store, 's')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s', s, format='table')
result = store.select('s')
tm.assert_series_equal(s, result)
_maybe_remove(store, 's_ordered')
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=True))
store.append('s_ordered', s, format='table')
result = store.select('s_ordered')
tm.assert_series_equal(s, result)
_maybe_remove(store, 'df')
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append('df', df, format='table')
result = store.select('df')
tm.assert_frame_equal(result, df)
# Dtypes
s = Series([1, 1, 2, 2, 3, 4, 5]).astype('category')
store.append('si', s)
result = store.select('si')
tm.assert_series_equal(result, s)
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype('category')
store.append('si2', s)
result = store.select('si2')
tm.assert_series_equal(result, s)
# Multiple
df2 = df.copy()
df2['s2'] = Series(list('abcdefg')).astype('category')
store.append('df2', df2)
result = store.select('df2')
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert '/df2 ' in info
# assert '/df2/meta/values_block_0/meta' in info
assert '/df2/meta/values_block_1/meta' in info
# unordered
s = Series(Categorical(['a', 'b', 'b', 'a', 'a', 'c'], categories=[
'a', 'b', 'c', 'd'], ordered=False))
store.append('s2', s, format='table')
result = store.select('s2')
tm.assert_series_equal(result, s)
# Query
store.append('df3', df, data_columns=['s'])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['d'])]
result = store.select('df3', where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(['f'])]
result = store.select('df3', where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append('df3', df)
df = concat([df, df])
expected = df[df.s.isin(['b', 'c'])]
result = store.select('df3', where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3['s'].cat.remove_unused_categories(inplace=True)
with pytest.raises(ValueError):
store.append('df3', df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select('df3/meta/s/meta')
assert result is not None
store.remove('df3')
with pytest.raises(KeyError):
store.select('df3/meta/s/meta')
def test_categorical_conversion(self):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ['ESP_012345_6789', 'ESP_987654_3210']
imgids = ['APF00006np', 'APF0001imm']
data = [4.3, 9.8]
# Test without categories
df = DataFrame(dict(obsids=obsids, imgids=imgids, data=data))
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype('category')
df.imgids = df.imgids.astype('category')
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df', where='obsids=B')
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(self):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = pd.DataFrame({
'a': ['a', 'b', 'c', np.nan],
'b': [np.nan, np.nan, np.nan, np.nan],
'c': [1, 2, 3, 4],
'd': pd.Series([None] * 4, dtype=object)
})
df['a'] = df.a.astype('category')
df['b'] = df.b.astype('category')
df['d'] = df.b.astype('category')
expected = df
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', data_columns=True)
result = read_hdf(path, 'df')
tm.assert_frame_equal(result, expected)
def test_duplicate_column_name(self):
df = DataFrame(columns=["a", "a"], data=[[0, 0]])
with ensure_clean_path(self.path) as path:
pytest.raises(ValueError, df.to_hdf,
path, 'df', format='fixed')
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_round_trip_equals(self):
# GH 9330
df = DataFrame({"B": [1, 2], "A": ["x", "y"]})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
other = read_hdf(path, 'df')
tm.assert_frame_equal(df, other)
assert df.equals(other)
assert other.equals(df)
def test_preserve_timedeltaindex_type(self):
# GH9635
# Storing TimedeltaIndexed DataFrames in fixed stores did not preserve
# the type of the index.
df = DataFrame(np.random.normal(size=(10, 5)))
df.index = timedelta_range(
start='0s', periods=10, freq='1s', name='example')
with ensure_clean_store(self.path) as store:
store['df'] = df
assert_frame_equal(store['df'], df)
def test_columns_multiindex_modified(self):
# BUG: 7212
# read_hdf store.select modified the passed columns parameters
# when multi-indexed.
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
data_columns = df.index.names + df.columns.tolist()
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df',
mode='a',
append=True,
data_columns=data_columns,
index=False)
cols2load = list('BCD')
cols2load_original = list(cols2load)
df_loaded = read_hdf(path, 'df', columns=cols2load) # noqa
assert cols2load_original == cols2load
def test_to_hdf_with_object_column_names(self):
# GH9057
# Writing HDF5 table format should only work for string-like
# column types
types_should_fail = [tm.makeIntIndex, tm.makeFloatIndex,
tm.makeDateIndex, tm.makeTimedeltaIndex,
tm.makePeriodIndex]
types_should_run = [tm.makeStringIndex, tm.makeCategoricalIndex]
if compat.PY3:
types_should_run.append(tm.makeUnicodeIndex)
else:
types_should_fail.append(tm.makeUnicodeIndex)
for index in types_should_fail:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
with pytest.raises(
ValueError, msg=("cannot have non-object label "
"DataIndexableCol")):
df.to_hdf(path, 'df', format='table',
data_columns=True)
for index in types_should_run:
df = DataFrame(np.random.randn(10, 2), columns=index(2))
with ensure_clean_path(self.path) as path:
with catch_warnings(record=True):
df.to_hdf(path, 'df', format='table', data_columns=True)
result = pd.read_hdf(
path, 'df', where="index = [{0}]".format(df.index[0]))
assert(len(result))
def test_read_hdf_open_store(self):
# GH10330
# No check for non-string path_or-buf, and no test of open store
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w')
direct = read_hdf(path, 'df')
store = HDFStore(path, mode='r')
indirect = read_hdf(store, 'df')
tm.assert_frame_equal(direct, indirect)
assert store.is_open
store.close()
def test_read_hdf_iterator(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
df.index.name = 'letters'
df = df.set_index(keys='E', append=True)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='w', format='t')
direct = read_hdf(path, 'df')
iterator = read_hdf(path, 'df', iterator=True)
assert isinstance(iterator, TableIterator)
indirect = next(iterator.__iter__())
tm.assert_frame_equal(direct, indirect)
iterator.store.close()
def test_read_hdf_errors(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
pytest.raises(IOError, read_hdf, path, 'key')
df.to_hdf(path, 'df')
store = HDFStore(path, mode='r')
store.close()
pytest.raises(IOError, read_hdf, store, 'df')
def test_read_hdf_generic_buffer_errors(self):
pytest.raises(NotImplementedError, read_hdf, BytesIO(b''), 'df')
def test_invalid_complib(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
with pytest.raises(ValueError):
df.to_hdf(path, 'df', complib='foolib')
# GH10443
def test_read_nokey(self):
df = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
# Categorical dtype not supported for "fixed" format. So no need
# to test with that dtype in the dataframe here.
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_table(self):
# GH13231
df = DataFrame({'i': range(5),
'c': Series(list('abacd'), dtype='category')})
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', mode='a', format='table')
reread = read_hdf(path)
assert_frame_equal(df, reread)
df.to_hdf(path, 'df2', mode='a', format='table')
pytest.raises(ValueError, read_hdf, path)
def test_read_nokey_empty(self):
with ensure_clean_path(self.path) as path:
store = HDFStore(path)
store.close()
pytest.raises(ValueError, read_hdf, path)
@td.skip_if_no('pathlib')
def test_read_from_pathlib_path(self):
# GH11773
from pathlib import Path
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = Path(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
@td.skip_if_no('py.path')
def test_read_from_py_localpath(self):
# GH11773
from py.path import local as LocalPath
expected = DataFrame(np.random.rand(4, 5),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as filename:
path_obj = LocalPath(filename)
expected.to_hdf(path_obj, 'df', mode='a')
actual = read_hdf(path_obj, 'df')
tm.assert_frame_equal(expected, actual)
def test_query_long_float_literal(self):
# GH 14241
df = pd.DataFrame({'A': [1000000000.0009,
1000000000.0011,
1000000000.0015]})
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
cutoff = 1000000000.0006
result = store.select('test', "A < %.4f" % cutoff)
assert result.empty
cutoff = 1000000000.0010
result = store.select('test', "A > %.4f" % cutoff)
expected = df.loc[[1, 2], :]
tm.assert_frame_equal(expected, result)
exact = 1000000000.0011
result = store.select('test', 'A == %.4f' % exact)
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
def test_query_compare_column_type(self):
# GH 15492
df = pd.DataFrame({'date': ['2014-01-01', '2014-01-02'],
'real_date': date_range('2014-01-01', periods=2),
'float': [1.1, 1.2],
'int': [1, 2]},
columns=['date', 'real_date', 'float', 'int'])
with ensure_clean_store(self.path) as store:
store.append('test', df, format='table', data_columns=True)
ts = pd.Timestamp('2014-01-01') # noqa
result = store.select('test', where='real_date > ts')
expected = df.loc[[1], :]
tm.assert_frame_equal(expected, result)
for op in ['<', '>', '==']:
# non strings to string column always fail
for v in [2.1, True, pd.Timestamp('2014-01-01'),
pd.Timedelta(1, 's')]:
query = 'date {op} v'.format(op=op)
with pytest.raises(TypeError):
result = store.select('test', where=query)
# strings to other columns must be convertible to type
v = 'a'
for col in ['int', 'float', 'real_date']:
query = '{col} {op} v'.format(op=op, col=col)
with pytest.raises(ValueError):
result = store.select('test', where=query)
for v, col in zip(['1', '1.1', '2014-01-01'],
['int', 'float', 'real_date']):
query = '{col} {op} v'.format(op=op, col=col)
result = store.select('test', where=query)
if op == '==':
expected = df.loc[[0], :]
elif op == '>':
expected = df.loc[[1], :]
else:
expected = df.loc[[], :]
tm.assert_frame_equal(expected, result)
@pytest.mark.parametrize('format', ['fixed', 'table'])
def test_read_hdf_series_mode_r(self, format):
# GH 16583
# Tests that reading a Series saved to an HDF file
# still works if a mode='r' argument is supplied
series = tm.makeFloatSeries()
with ensure_clean_path(self.path) as path:
series.to_hdf(path, key='data', format=format)
result = pd.read_hdf(path, key='data', mode='r')
tm.assert_series_equal(result, series)
@pytest.mark.skipif(not PY36, reason="Need python 3.6")
def test_fspath(self):
with tm.ensure_clean('foo.h5') as path:
with pd.HDFStore(path) as store:
assert os.fspath(store) == str(path)
def test_read_py2_hdf_file_in_py3(self):
# GH 16781
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
# the file was generated in Python 2.7 like so:
#
# df = pd.DataFrame([1.,2,3], index=pd.PeriodIndex(
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
expected = pd.DataFrame([1., 2, 3], index=pd.PeriodIndex(
['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
with ensure_clean_store(
tm.get_data_path(
'legacy_hdf/periodindex_0.20.1_x86_64_darwin_2.7.13.h5'),
mode='r') as store:
result = store['p']
assert_frame_equal(result, expected)
class TestHDFComplexValues(Base):
# GH10447
def test_complex_fixed(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_table(self):
df = DataFrame(np.random.rand(4, 5).astype(np.complex64),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
df = DataFrame(np.random.rand(4, 5).astype(np.complex128),
index=list('abcd'),
columns=list('ABCDE'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table', mode='w')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_fixed(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_mixed_table(self):
complex64 = np.array([1.0 + 1.0j, 1.0 + 1.0j,
1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64)
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex64,
'D': complex128,
'E': [1.0, 2.0, 3.0, 4.0]},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['A', 'B'])
result = store.select('df', where='A>2')
assert_frame_equal(df.loc[df.A > 2], result)
with ensure_clean_path(self.path) as path:
df.to_hdf(path, 'df', format='table')
reread = read_hdf(path, 'df')
assert_frame_equal(df, reread)
def test_complex_across_dimensions_fixed(self):
with catch_warnings(record=True):
complex128 = np.array(
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
p = Panel({'One': df, 'Two': df})
objs = [s, df, p]
comps = [tm.assert_series_equal, tm.assert_frame_equal,
tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='fixed')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_across_dimensions(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
df = DataFrame({'A': s, 'B': s})
with catch_warnings(record=True):
p = Panel({'One': df, 'Two': df})
objs = [df, p]
comps = [tm.assert_frame_equal, tm.assert_panel_equal]
for obj, comp in zip(objs, comps):
with ensure_clean_path(self.path) as path:
obj.to_hdf(path, 'obj', format='table')
reread = read_hdf(path, 'obj')
comp(obj, reread)
def test_complex_indexing_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j],
dtype=np.complex128)
df = DataFrame({'A': [1, 2, 3, 4],
'B': ['a', 'b', 'c', 'd'],
'C': complex128},
index=list('abcd'))
with ensure_clean_store(self.path) as store:
pytest.raises(TypeError, store.append,
'df', df, data_columns=['C'])
def test_complex_series_error(self):
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
s = Series(complex128, index=list('abcd'))
with ensure_clean_path(self.path) as path:
pytest.raises(TypeError, s.to_hdf, path, 'obj', format='t')
with ensure_clean_path(self.path) as path:
s.to_hdf(path, 'obj', format='t', index=False)
reread = read_hdf(path, 'obj')
tm.assert_series_equal(s, reread)
def test_complex_append(self):
df = DataFrame({'a': np.random.randn(100).astype(np.complex128),
'b': np.random.randn(100)})
with ensure_clean_store(self.path) as store:
store.append('df', df, data_columns=['b'])
store.append('df', df)
result = store.select('df')
assert_frame_equal(pd.concat([df, df], 0), result)
class TestTimezones(Base):
def _compare_with_tz(self, a, b):
tm.assert_frame_equal(a, b)
# compare the zones on each element
for c in a.columns:
for i in a.index:
a_e = a.loc[i, c]
b_e = b.loc[i, c]
if not (a_e == b_e and a_e.tz == b_e.tz):
raise AssertionError(
"invalid tz comparison [%s] [%s]" % (a_e, b_e))
def test_append_with_timezones_dateutil(self):
from datetime import timedelta
# use maybe_get_tz instead of dateutil.tz.gettz to handle the windows
# filename issues.
from pandas._libs.tslibs.timezones import maybe_get_tz
gettz = lambda x: maybe_get_tz('dateutil/' + x)
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00', tz=gettz(
'US/Eastern')) + timedelta(hours=1) * i for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
expected = df[df.A >= df.A[3]]
result = store.select('df_tz', where='A>=df.A[3]')
self._compare_with_tz(result, expected)
# ensure we include dates in DST and STD time here.
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130603',
tz=gettz('US/Eastern'))),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('EET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102',
tz=gettz('US/Eastern')),
B=Timestamp('20130102', tz=gettz('CET'))),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz=gettz('US/Eastern')))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_append_with_timezones_pytz(self):
from datetime import timedelta
# as columns
with ensure_clean_store(self.path) as store:
_maybe_remove(store, 'df_tz')
df = DataFrame(dict(A=[Timestamp('20130102 2:00:00',
tz='US/Eastern') +
timedelta(hours=1) * i
for i in range(5)]))
store.append('df_tz', df, data_columns=['A'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# select with tz aware
self._compare_with_tz(store.select(
'df_tz', where='A>=df.A[3]'), df[df.A >= df.A[3]])
_maybe_remove(store, 'df_tz')
# ensure we include dates in DST and STD time here.
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='US/Eastern')),
index=range(5))
store.append('df_tz', df)
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='EET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# this is ok
_maybe_remove(store, 'df_tz')
store.append('df_tz', df, data_columns=['A', 'B'])
result = store['df_tz']
self._compare_with_tz(result, df)
assert_frame_equal(result, df)
# can't append with diff timezone
df = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130102', tz='CET')),
index=range(5))
pytest.raises(ValueError, store.append, 'df_tz', df)
# as index
with ensure_clean_store(self.path) as store:
# GH 4098 example
df = DataFrame(dict(A=Series(lrange(3), index=date_range(
'2000-1-1', periods=3, freq='H', tz='US/Eastern'))))
_maybe_remove(store, 'df')
store.put('df', df)
result = store.select('df')
assert_frame_equal(result, df)
_maybe_remove(store, 'df')
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
def test_tseries_select_index_column(self):
# GH7777
# selecting a UTC datetimeindex column did
# not preserve UTC tzinfo set before storing
# check that no tz still works
rng = date_range('1/1/2000', '1/30/2000')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == DatetimeIndex(result.values).tz
# check utc
rng = date_range('1/1/2000', '1/30/2000', tz='UTC')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
# double check non-utc
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store.append('frame', frame)
result = store.select_column('frame', 'index')
assert rng.tz == result.dt.tz
def test_timezones_fixed(self):
with ensure_clean_store(self.path) as store:
# index
rng = date_range('1/1/2000', '1/30/2000', tz='US/Eastern')
df = DataFrame(np.random.randn(len(rng), 4), index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
# as data
# GH11411
_maybe_remove(store, 'df')
df = DataFrame({'A': rng,
'B': rng.tz_convert('UTC').tz_localize(None),
'C': rng.tz_convert('CET'),
'D': range(len(rng))}, index=rng)
store['df'] = df
result = store['df']
assert_frame_equal(result, df)
def test_fixed_offset_tz(self):
rng = date_range('1/1/2000 00:00:00-07:00', '1/30/2000 00:00:00-07:00')
frame = DataFrame(np.random.randn(len(rng), 4), index=rng)
with ensure_clean_store(self.path) as store:
store['frame'] = frame
recons = store['frame']
tm.assert_index_equal(recons.index, rng)
assert rng.tz == recons.index.tz
@td.skip_if_windows
def test_store_timezone(self):
# GH2852
# issue storing datetime.date with a timezone as it resets when read
# back in a new timezone
# original method
with ensure_clean_store(self.path) as store:
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
result = store['obj1']
assert_frame_equal(result, df)
# with tz setting
with ensure_clean_store(self.path) as store:
with set_timezone('EST5EDT'):
today = datetime.date(2013, 9, 10)
df = DataFrame([1, 2, 3], index=[today, today, today])
store['obj1'] = df
with set_timezone('CST6CDT'):
result = store['obj1']
assert_frame_equal(result, df)
def test_legacy_datetimetz_object(self):
# legacy from < 0.17.0
# 8260
expected = DataFrame(dict(A=Timestamp('20130102', tz='US/Eastern'),
B=Timestamp('20130603', tz='CET')),
index=range(5))
with ensure_clean_store(
tm.get_data_path('legacy_hdf/datetimetz_object.h5'),
mode='r') as store:
result = store['df']
assert_frame_equal(result, expected)
def test_dst_transitions(self):
# make sure we are not failing on transaitions
with ensure_clean_store(self.path) as store:
times = pd.date_range("2013-10-26 23:00", "2013-10-27 01:00",
tz="Europe/London",
freq="H",
ambiguous='infer')
for i in [times, times + pd.Timedelta('10min')]:
_maybe_remove(store, 'df')
df = DataFrame({'A': range(len(i)), 'B': i}, index=i)
store.append('df', df)
result = store.select('df')
assert_frame_equal(result, df)
|
the-stack_106_24587 | from __future__ import with_statement
import re
import os
import subprocess
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str
from sorl.thumbnail.base import EXTENSIONS
from sorl.thumbnail.conf import settings
from sorl.thumbnail.engines.base import EngineBase
from tempfile import NamedTemporaryFile
size_re = re.compile(r'^(?:.+) (?:[A-Z]+) (?P<x>\d+)x(?P<y>\d+)')
class Engine(EngineBase):
"""
Image object is a dict with source path, options and size
"""
def write(self, image, options, thumbnail):
"""
Writes the thumbnail image
"""
if (options['format'] == 'JPEG' and options.get('progressive', settings.THUMBNAIL_PROGRESSIVE)):
image['options']['interlace'] = 'line'
image['options']['quality'] = options['quality']
args = settings.THUMBNAIL_CONVERT.split(' ')
args.append(image['source'] + '[0]')
for k in image['options']:
v = image['options'][k]
args.append('-%s' % k)
if v is not None:
args.append('%s' % v)
flatten = "on"
if 'flatten' in options:
flatten = options['flatten']
if settings.THUMBNAIL_FLATTEN and not flatten == "off":
args.append('-flatten')
suffix = '.%s' % EXTENSIONS[options['format']]
with NamedTemporaryFile(suffix=suffix, mode='rb') as fp:
args.append(fp.name)
args = map(smart_str, args)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
out, err = p.communicate()
if err:
raise Exception(err)
thumbnail.write(fp.read())
def cleanup(self, image):
os.remove(image['source']) # we should not need this now
def get_image(self, source):
"""
Returns the backend image objects from a ImageFile instance
"""
with NamedTemporaryFile(mode='wb', delete=False) as fp:
fp.write(source.read())
return {'source': fp.name, 'options': SortedDict(), 'size': None}
def get_image_size(self, image):
"""
Returns the image width and height as a tuple
"""
if image['size'] is None:
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(image['source'])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
m = size_re.match(str(p.stdout.read()))
image['size'] = int(m.group('x')), int(m.group('y'))
return image['size']
def is_valid_image(self, raw_data):
"""
This is not very good for imagemagick because it will say anything is
valid that it can use as input.
"""
with NamedTemporaryFile(mode='wb') as fp:
fp.write(raw_data)
fp.flush()
args = settings.THUMBNAIL_IDENTIFY.split(' ')
args.append(fp.name)
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
retcode = p.wait()
return retcode == 0
def _orientation(self, image):
#return image
# XXX need to get the dimensions right after a transpose.
if settings.THUMBNAIL_CONVERT.endswith('gm convert'):
args = settings.THUMBNAIL_IDENTIFY.split()
args.extend(['-format', '%[exif:orientation]', image['source']])
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
result = p.stdout.read().strip()
if result and result != 'unknown':
result = int(result)
options = image['options']
if result == 2:
options['flop'] = None
elif result == 3:
options['rotate'] = '180'
elif result == 4:
options['flip'] = None
elif result == 5:
options['rotate'] = '90'
options['flop'] = None
elif result == 6:
options['rotate'] = '90'
elif result == 7:
options['rotate'] = '-90'
options['flop'] = None
elif result == 8:
options['rotate'] = '-90'
else:
# ImageMagick also corrects the orientation exif data for
# destination
image['options']['auto-orient'] = None
return image
def _colorspace(self, image, colorspace):
"""
`Valid colorspaces
<http://www.graphicsmagick.org/GraphicsMagick.html#details-colorspace>`_.
Backends need to implement the following::
RGB, GRAY
"""
image['options']['colorspace'] = colorspace
return image
def _crop(self, image, width, height, x_offset, y_offset):
"""
Crops the image
"""
image['options']['crop'] = '%sx%s+%s+%s' % (
width, height, x_offset, y_offset
)
image['size'] = (width, height) # update image size
return image
def _scale(self, image, width, height):
"""
Does the resizing of the image
"""
image['options']['scale'] = '%sx%s!' % (width, height)
image['size'] = (width, height) # update image size
return image
def _padding(self, image, geometry, options):
"""
Pads the image
"""
# The order is important. The gravity option should come before extent.
image['options']['background'] = options.get('padding_color')
image['options']['gravity'] = 'center'
image['options']['extent'] = '%sx%s' % (geometry[0], geometry[1])
return image
|
the-stack_106_24588 | from math import pi, sqrt
import itertools
from typing import List
from raytracer.tuple import (
tuple,
point,
vector,
magnitude,
normalize,
dot,
cross,
Color,
)
from raytracer.util import equal
from raytracer.matrices import Matrix, I
from raytracer.transformations import (
translation,
scaling,
rotation_x,
rotation_y,
rotation_z,
shearing,
)
from raytracer.rays import Ray
from raytracer.materials import Material
from raytracer.shapes import Shape
from raytracer.intersections import Intersection, intersections
class Sphere(Shape):
def local_intersect(self, ray: Ray) -> List[Intersection]:
# The vector from the sphere's center to the ray origin
# Remember: the sphere is centered at the world origin
# r2 = ray.transform(self.transform.inverse())
sphere_to_ray = ray.origin - point(0, 0, 0)
a = dot(ray.direction, ray.direction)
b = 2 * dot(ray.direction, sphere_to_ray)
c = dot(sphere_to_ray, sphere_to_ray) - 1
discriminant = b*b - 4*a*c
if discriminant < 0:
return []
else:
t1 = (-b - sqrt(discriminant)) / (2*a)
t2 = (-b + sqrt(discriminant)) / (2*a)
return intersections(Intersection(t1, self), Intersection(t2, self))
def local_normal_at(self, local_point):
# object_point = self.transform.inverse() * world_point
# world_normal = self.transform.inverse().transpose() * object_normal
# world_normal.w = 0
# return normalize(world_normal)
local_normal = local_point - point(0, 0, 0)
return local_normal
|
the-stack_106_24590 | # -*- coding: utf-8 -*-
USERS = [
{"id": 0, "first_name": "Palmira", "last_name": "Haig", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 1, "first_name": "Arlette", "last_name": "Lowell", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 2, "first_name": "Neta", "last_name": "Wojcik", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 3, "first_name": "Norberto", "last_name": "Blankenship", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 4, "first_name": "Chi", "last_name": "Channel", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 5, "first_name": "Tamisha", "last_name": "Guy", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 6, "first_name": "Carley", "last_name": "Duffel", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 7, "first_name": "Kai", "last_name": "Robinette", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 8, "first_name": "Hui", "last_name": "Makuch", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 9, "first_name": "Yahaira", "last_name": "Raymer", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 10, "first_name": "Kacy", "last_name": "Mires", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 11, "first_name": "Bertie", "last_name": "Dennard", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 12, "first_name": "Adriane", "last_name": "Cho", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 13, "first_name": "Leena", "last_name": "Minard", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 14, "first_name": "Brynn", "last_name": "Rheaume", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 15, "first_name": "Michelle", "last_name": "Pompey", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 16, "first_name": "Marvis", "last_name": "Mccarron", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 17, "first_name": "Randee", "last_name": "Phenix", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 18, "first_name": "Krystle", "last_name": "Corkery", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 19, "first_name": "Brianna", "last_name": "Crump", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 20, "first_name": "Matthew", "last_name": "Keo", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 21, "first_name": "Debbra", "last_name": "Grinstead", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 22, "first_name": "Dorathy", "last_name": "Deluna", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 23, "first_name": "Kay", "last_name": "Jeans", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 24, "first_name": "Star", "last_name": "Kantz", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 25, "first_name": "Latina", "last_name": "Pursel", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 26, "first_name": "Haywood", "last_name": "So", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 27, "first_name": "Ester", "last_name": "Greenawalt", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 28, "first_name": "Alicia", "last_name": "Nez", "email": "[email protected]", "is_active": 1,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 29, "first_name": "Wallace", "last_name": "Schacht", "email": "[email protected]", "is_active": 1,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 30, "first_name": "Verda", "last_name": "Phifer", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 31, "first_name": "Keiko", "last_name": "Peay", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 32, "first_name": "Lucas", "last_name": "Zoller", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 33, "first_name": "Treasa", "last_name": "Mcduffie", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 34, "first_name": "Demetrice", "last_name": "Arcuri", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 35, "first_name": "Eboni", "last_name": "Thielen", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 36, "first_name": "Corinne", "last_name": "Foy", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 37, "first_name": "Juana", "last_name": "Laney", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 38, "first_name": "Gerald", "last_name": "Egan", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 39, "first_name": "Nickolas", "last_name": "Beeler", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 40, "first_name": "Scarlet", "last_name": "Tannehill", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 41, "first_name": "Luvenia", "last_name": "Shunk", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 42, "first_name": "Marisela", "last_name": "Gormley", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 43, "first_name": "Adena", "last_name": "Trivett", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 44, "first_name": "Ginny", "last_name": "Cuomo", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 45, "first_name": "Dominique", "last_name": "Runion", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 46, "first_name": "Karisa", "last_name": "Flurry", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 47, "first_name": "Brittany", "last_name": "Montandon", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
{"id": 48, "first_name": "Marylynn", "last_name": "Corrie", "email": "[email protected]", "is_active": 0,
"is_vip": 0, "site": "https://raccoon.ninja"},
{"id": 49, "first_name": "Kathlyn", "last_name": "Littell", "email": "[email protected]", "is_active": 0,
"is_vip": 1, "site": "https://raccoon.ninja"},
]
CATALOG = {
"json": [
{
"product": {
"@description": "Cardigan Sweater",
"@product_image": "cardigan.jpg",
"catalog_item": [
{
"@gender": "Men's",
"item_number": "QWZ5671",
"price": "39.95",
"size": [
{
"@description": "Medium",
"color_swatch": [
{
"@image": "red_cardigan.jpg",
"#text": "Red"
},
{
"@image": "burgundy_cardigan.jpg",
"#text": "Burgundy"
}
]
},
{
"@description": "Large",
"color_swatch": [
{
"@image": "red_cardigan.jpg",
"#text": "Red"
},
{
"@image": "burgundy_cardigan.jpg",
"#text": "Burgundy"
}
]
}
]
},
{
"@gender": "Women's",
"item_number": "RRX9856",
"price": "42.50",
"size": [
{
"@description": "Small",
"color_swatch": [
{
"@image": "red_cardigan.jpg",
"#text": "Red"
},
{
"@image": "navy_cardigan.jpg",
"#text": "Navy"
},
{
"@image": "burgundy_cardigan.jpg",
"#text": "Burgundy"
}
]
},
{
"@description": "Medium",
"color_swatch": [
{
"@image": "red_cardigan.jpg",
"#text": "Red"
},
{
"@image": "navy_cardigan.jpg",
"#text": "Navy"
},
{
"@image": "burgundy_cardigan.jpg",
"#text": "Burgundy"
},
{
"@image": "black_cardigan_medium.jpg",
"#text": "Black"
}
]
},
{
"@description": "Large",
"color_swatch": [
{
"@image": "navy_cardigan.jpg",
"#text": "Navy"
},
{
"@image": "black_cardigan_large.jpg",
"#text": "Black"
}
]
},
{
"@description": "Extra Large",
"color_swatch": [
{
"@image": "burgundy_cardigan.jpg",
"#text": "Burgundy"
},
{
"@image": "black_cardigan_xlarge.jpg",
"#text": "Black"
}
]
}
]
}
]
}
}
],
"xml": '<?xml version="1.0"?><catalog><product description="Cardigan Sweater" product_image="cardigan.jpg">'
'<catalog_item gender="Men\'s"><item_number>QWZ5671</item_number><price>39.95</price>'
'<size description="Medium"><color_swatch image="red_cardigan.jpg">Red</color_swatch>'
'<color_swatch image="burgundy_cardigan.jpg">Burgundy</color_swatch></size><size description="Large">'
'<color_swatch image="red_cardigan.jpg">Red</color_swatch>'
'<color_swatch image="burgundy_cardigan.jpg">Burgundy</color_swatch></size></catalog_item>'
'<catalog_item gender="Women\'s"><item_number>RRX9856</item_number><price>42.50</price>'
'<size description="Small"><color_swatch image="red_cardigan.jpg">Red</color_swatch>'
'<color_swatch image="navy_cardigan.jpg">Navy</color_swatch>'
'<color_swatch image="burgundy_cardigan.jpg">Burgundy</color_swatch></size><size description="Medium">'
'<color_swatch image="red_cardigan.jpg">Red</color_swatch>'
'<color_swatch image="navy_cardigan.jpg">Navy</color_swatch>'
'<color_swatch image="burgundy_cardigan.jpg">Burgundy</color_swatch>'
'<color_swatch image="black_cardigan_medium.jpg">Black</color_swatch></size>'
'<size description="Large"><color_swatch image="navy_cardigan.jpg">Navy</color_swatch>'
'<color_swatch image="black_cardigan_large.jpg">Black</color_swatch></size>'
'<size description="Extra Large"><color_swatch image="burgundy_cardigan.jpg">Burgundy</color_swatch>'
'<color_swatch image="black_cardigan_xlarge.jpg">Black</color_swatch></size>'
'</catalog_item></product></catalog>'
}
CD_CATALOG = {
"json": [
{
"TITLE": "Empire Burlesque",
"ARTIST": "Bob Dylan",
"COUNTRY": "USA",
"COMPANY": "Columbia",
"PRICE": "10.90",
"YEAR": "1985"
},
{
"TITLE": "Hide your heart",
"ARTIST": "Bonnie Tyler",
"COUNTRY": "UK",
"COMPANY": "CBS Records",
"PRICE": "9.90",
"YEAR": "1988"
},
{
"TITLE": "Greatest Hits",
"ARTIST": "Dolly Parton",
"COUNTRY": "USA",
"COMPANY": "RCA",
"PRICE": "9.90",
"YEAR": "1982"
},
{
"TITLE": "Still got the blues",
"ARTIST": "Gary Moore",
"COUNTRY": "UK",
"COMPANY": "Virgin records",
"PRICE": "10.20",
"YEAR": "1990"
},
{
"TITLE": "Eros",
"ARTIST": "Eros Ramazzotti",
"COUNTRY": "EU",
"COMPANY": "BMG",
"PRICE": "9.90",
"YEAR": "1997"
},
{
"TITLE": "One night only",
"ARTIST": "Bee Gees",
"COUNTRY": "UK",
"COMPANY": "Polydor",
"PRICE": "10.90",
"YEAR": "1998"
},
{
"TITLE": "Sylvias Mother",
"ARTIST": "Dr.Hook",
"COUNTRY": "UK",
"COMPANY": "CBS",
"PRICE": "8.10",
"YEAR": "1973"
},
{
"TITLE": "Maggie May",
"ARTIST": "Rod Stewart",
"COUNTRY": "UK",
"COMPANY": "Pickwick",
"PRICE": "8.50",
"YEAR": "1990"
},
{
"TITLE": "Romanza",
"ARTIST": "Andrea Bocelli",
"COUNTRY": "EU",
"COMPANY": "Polydor",
"PRICE": "10.80",
"YEAR": "1996"
},
{
"TITLE": "When a man loves a woman",
"ARTIST": "Percy Sledge",
"COUNTRY": "USA",
"COMPANY": "Atlantic",
"PRICE": "8.70",
"YEAR": "1987"
},
{
"TITLE": "Black angel",
"ARTIST": "Savage Rose",
"COUNTRY": "EU",
"COMPANY": "Mega",
"PRICE": "10.90",
"YEAR": "1995"
},
{
"TITLE": "1999 Grammy Nominees",
"ARTIST": "Many",
"COUNTRY": "USA",
"COMPANY": "Grammy",
"PRICE": "10.20",
"YEAR": "1999"
},
{
"TITLE": "For the good times",
"ARTIST": "Kenny Rogers",
"COUNTRY": "UK",
"COMPANY": "Mucik Master",
"PRICE": "8.70",
"YEAR": "1995"
},
{
"TITLE": "Big Willie style",
"ARTIST": "Will Smith",
"COUNTRY": "USA",
"COMPANY": "Columbia",
"PRICE": "9.90",
"YEAR": "1997"
},
{
"TITLE": "Tupelo Honey",
"ARTIST": "Van Morrison",
"COUNTRY": "UK",
"COMPANY": "Polydor",
"PRICE": "8.20",
"YEAR": "1971"
},
{
"TITLE": "Soulsville",
"ARTIST": "Jorn Hoel",
"COUNTRY": "Norway",
"COMPANY": "WEA",
"PRICE": "7.90",
"YEAR": "1996"
},
{
"TITLE": "The very best of",
"ARTIST": "Cat Stevens",
"COUNTRY": "UK",
"COMPANY": "Island",
"PRICE": "8.90",
"YEAR": "1990"
},
{
"TITLE": "Stop",
"ARTIST": "Sam Brown",
"COUNTRY": "UK",
"COMPANY": "A and M",
"PRICE": "8.90",
"YEAR": "1988"
},
{
"TITLE": "Bridge of Spies",
"ARTIST": "T'Pau",
"COUNTRY": "UK",
"COMPANY": "Siren",
"PRICE": "7.90",
"YEAR": "1987"
},
{
"TITLE": "Private Dancer",
"ARTIST": "Tina Turner",
"COUNTRY": "UK",
"COMPANY": "Capitol",
"PRICE": "8.90",
"YEAR": "1983"
},
{
"TITLE": "Midt om natten",
"ARTIST": "Kim Larsen",
"COUNTRY": "EU",
"COMPANY": "Medley",
"PRICE": "7.80",
"YEAR": "1983"
},
{
"TITLE": "Pavarotti Gala Concert",
"ARTIST": "Luciano Pavarotti",
"COUNTRY": "UK",
"COMPANY": "DECCA",
"PRICE": "9.90",
"YEAR": "1991"
},
{
"TITLE": "The dock of the bay",
"ARTIST": "Otis Redding",
"COUNTRY": "USA",
"COMPANY": "Stax Records",
"PRICE": "7.90",
"YEAR": "1968"
},
{
"TITLE": "Picture book",
"ARTIST": "Simply Red",
"COUNTRY": "EU",
"COMPANY": "Elektra",
"PRICE": "7.20",
"YEAR": "1985"
},
{
"TITLE": "Red",
"ARTIST": "The Communards",
"COUNTRY": "UK",
"COMPANY": "London",
"PRICE": "7.80",
"YEAR": "1987"
},
{
"TITLE": "Unchain my heart",
"ARTIST": "Joe Cocker",
"COUNTRY": "USA",
"COMPANY": "EMI",
"PRICE": "8.20",
"YEAR": "1987"
}
],
"xml": '<?xml version="1.0" encoding="UTF-8"?><CATALOG><CD><TITLE>Empire Burlesque</TITLE>'
'<ARTIST>Bob Dylan</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>Columbia</COMPANY><PRICE>10.90</PRICE>'
'<YEAR>1985</YEAR></CD><CD><TITLE>Hide your heart</TITLE><ARTIST>Bonnie Tyler</ARTIST><COUNTRY>UK</COUNTRY>'
'<COMPANY>CBS Records</COMPANY><PRICE>9.90</PRICE><YEAR>1988</YEAR></CD><CD><TITLE>Greatest Hits</TITLE>'
'<ARTIST>Dolly Parton</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>RCA</COMPANY><PRICE>9.90</PRICE>'
'<YEAR>1982</YEAR></CD><CD><TITLE>Still got the blues</TITLE><ARTIST>Gary Moore</ARTIST>'
'<COUNTRY>UK</COUNTRY><COMPANY>Virgin records</COMPANY><PRICE>10.20</PRICE><YEAR>1990</YEAR></CD>'
'<CD><TITLE>Eros</TITLE><ARTIST>Eros Ramazzotti</ARTIST><COUNTRY>EU</COUNTRY><COMPANY>BMG</COMPANY>'
'<PRICE>9.90</PRICE><YEAR>1997</YEAR></CD><CD><TITLE>One night only</TITLE><ARTIST>Bee Gees</ARTIST>'
'<COUNTRY>UK</COUNTRY><COMPANY>Polydor</COMPANY><PRICE>10.90</PRICE><YEAR>1998</YEAR></CD>'
'<CD><TITLE>Sylvias Mother</TITLE><ARTIST>Dr.Hook</ARTIST><COUNTRY>UK</COUNTRY><COMPANY>CBS</COMPANY>'
'<PRICE>8.10</PRICE><YEAR>1973</YEAR></CD><CD><TITLE>Maggie May</TITLE><ARTIST>Rod Stewart</ARTIST>'
'<COUNTRY>UK</COUNTRY><COMPANY>Pickwick</COMPANY><PRICE>8.50</PRICE><YEAR>1990</YEAR></CD>'
'<CD><TITLE>Romanza</TITLE><ARTIST>Andrea Bocelli</ARTIST><COUNTRY>EU</COUNTRY><COMPANY>Polydor</COMPANY>'
'<PRICE>10.80</PRICE><YEAR>1996</YEAR></CD><CD><TITLE>When a man loves a woman</TITLE>'
'<ARTIST>Percy Sledge</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>Atlantic</COMPANY><PRICE>8.70</PRICE>'
'<YEAR>1987</YEAR></CD><CD><TITLE>Black angel</TITLE><ARTIST>Savage Rose</ARTIST><COUNTRY>EU</COUNTRY>'
'<COMPANY>Mega</COMPANY><PRICE>10.90</PRICE><YEAR>1995</YEAR></CD><CD><TITLE>1999 Grammy Nominees</TITLE>'
'<ARTIST>Many</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>Grammy</COMPANY><PRICE>10.20</PRICE><YEAR>1999</YEAR>'
'</CD><CD><TITLE>For the good times</TITLE><ARTIST>Kenny Rogers</ARTIST><COUNTRY>UK</COUNTRY>'
'<COMPANY>Mucik Master</COMPANY><PRICE>8.70</PRICE><YEAR>1995</YEAR></CD><CD><TITLE>Big Willie style</TITLE>'
'<ARTIST>Will Smith</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>Columbia</COMPANY><PRICE>9.90</PRICE>'
'<YEAR>1997</YEAR></CD><CD><TITLE>Tupelo Honey</TITLE><ARTIST>Van Morrison</ARTIST><COUNTRY>UK</COUNTRY>'
'<COMPANY>Polydor</COMPANY><PRICE>8.20</PRICE><YEAR>1971</YEAR></CD><CD><TITLE>Soulsville</TITLE>'
'<ARTIST>Jorn Hoel</ARTIST><COUNTRY>Norway</COUNTRY><COMPANY>WEA</COMPANY><PRICE>7.90</PRICE>'
'<YEAR>1996</YEAR></CD><CD><TITLE>The very best of</TITLE><ARTIST>Cat Stevens</ARTIST><COUNTRY>UK</COUNTRY>'
'<COMPANY>Island</COMPANY><PRICE>8.90</PRICE><YEAR>1990</YEAR></CD><CD><TITLE>Stop</TITLE>'
'<ARTIST>Sam Brown</ARTIST><COUNTRY>UK</COUNTRY><COMPANY>A and M</COMPANY><PRICE>8.90</PRICE>'
'<YEAR>1988</YEAR></CD><CD><TITLE>Bridge of Spies</TITLE><ARTIST>T\'Pau</ARTIST><COUNTRY>UK</COUNTRY>'
'<COMPANY>Siren</COMPANY><PRICE>7.90</PRICE><YEAR>1987</YEAR></CD><CD><TITLE>Private Dancer</TITLE>'
'<ARTIST>Tina Turner</ARTIST><COUNTRY>UK</COUNTRY><COMPANY>Capitol</COMPANY><PRICE>8.90</PRICE>'
'<YEAR>1983</YEAR></CD><CD><TITLE>Midt om natten</TITLE><ARTIST>Kim Larsen</ARTIST><COUNTRY>EU</COUNTRY>'
'<COMPANY>Medley</COMPANY><PRICE>7.80</PRICE><YEAR>1983</YEAR></CD><CD><TITLE>Pavarotti Gala Concert</TITLE>'
'<ARTIST>Luciano Pavarotti</ARTIST><COUNTRY>UK</COUNTRY><COMPANY>DECCA</COMPANY><PRICE>9.90</PRICE>'
'<YEAR>1991</YEAR></CD><CD><TITLE>The dock of the bay</TITLE><ARTIST>Otis Redding</ARTIST>'
'<COUNTRY>USA</COUNTRY><COMPANY>Stax Records</COMPANY><PRICE>7.90</PRICE><YEAR>1968</YEAR></CD><CD>'
'<TITLE>Picture book</TITLE><ARTIST>Simply Red</ARTIST><COUNTRY>EU</COUNTRY><COMPANY>Elektra</COMPANY>'
'<PRICE>7.20</PRICE><YEAR>1985</YEAR></CD><CD><TITLE>Red</TITLE><ARTIST>The Communards</ARTIST>'
'<COUNTRY>UK</COUNTRY><COMPANY>London</COMPANY><PRICE>7.80</PRICE><YEAR>1987</YEAR></CD><CD>'
'<TITLE>Unchain my heart</TITLE><ARTIST>Joe Cocker</ARTIST><COUNTRY>USA</COUNTRY><COMPANY>EMI</COMPANY>'
'<PRICE>8.20</PRICE><YEAR>1987</YEAR></CD></CATALOG>'
}
def convert_user_to_xml(users):
"""
Convert a user (or a list of users) to a XML string.
Why a function to do this, since this is a dummy database with hard-coded values? Well, it must be fun for me too.
:param users: user or list of users.
:return: xml string
"""
base = '<user><id is_active="{is_active}" is_vip="{is_vip}">{id}</id><first_name>{first_name}</first_name>' \
'<last_name>{first_name}</last_name><email>{email}</email><is_active>{is_active}</is_active>' \
'<is_vip>{is_vip}</is_vip><site>https://raccoon.ninja</site></user>'
if not isinstance(users, (list, tuple)):
users = [users,]
user_list = list()
for user in users:
user_list.append(base.format(
id=user.get("id"),
first_name=user.get("first_name"),
last_name=user.get("last_name"),
email=user.get("email"),
is_active=user.get("is_active"),
is_vip=user.get("is_vip")
))
return """<?xml version="1.0" encoding="UTF-8"?><users>{}</users>""".format("".join(user_list))
|
the-stack_106_24591 | # Copyright (c) 2012-2018, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
from .validators import boolean, integer, json_checker, double
try:
from awacs.aws import Policy
policytypes = (dict, Policy)
except ImportError:
policytypes = dict,
class RetentionPeriod(AWSProperty):
props = {
'NumberOfDays': (integer, False),
'Unlimited': (boolean, False),
}
class Channel(AWSObject):
resource_type = "AWS::IoTAnalytics::Channel"
props = {
'ChannelName': (basestring, False),
'RetentionPeriod': (RetentionPeriod, False),
'Tags': ((Tags, list), False),
}
class AddAttributes(AWSProperty):
props = {
'Attributes': (json_checker, False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class ActivityChannel(AWSProperty):
props = {
'ChannelName': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class Datastore(AWSProperty):
props = {
'DatastoreName': (basestring, False),
'Name': (basestring, False),
}
class DeviceRegistryEnrich(AWSProperty):
props = {
'Attribute': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
'RoleArn': (basestring, False),
'ThingName': (basestring, False),
}
class DeviceShadowEnrich(AWSProperty):
props = {
'Attribute': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
'RoleArn': (basestring, False),
'ThingName': (basestring, False),
}
class Filter(AWSProperty):
props = {
'Filter': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class Lambda(AWSProperty):
props = {
'BatchSize': (integer, False),
'LambdaName': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class Math(AWSProperty):
props = {
'Attribute': (basestring, False),
'Math': (basestring, False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class RemoveAttributes(AWSProperty):
props = {
'Attributes': ([basestring], False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class SelectAttributes(AWSProperty):
props = {
'Attributes': ([basestring], False),
'Name': (basestring, False),
'Next': (basestring, False),
}
class Activity(AWSProperty):
props = {
'AddAttributes': (AddAttributes, False),
'Channel': (ActivityChannel, False),
'Datastore': (Datastore, False),
'DeviceRegistryEnrich': (DeviceRegistryEnrich, False),
'DeviceShadowEnrich': (DeviceShadowEnrich, False),
'Filter': (Filter, False),
'Lambda': (Lambda, False),
'Math': (Math, False),
'RemoveAttributes': (RemoveAttributes, False),
'SelectAttributes': (SelectAttributes, False),
}
class Pipeline(AWSObject):
resource_type = "AWS::IoTAnalytics::Pipeline"
props = {
'PipelineActivities': ([Activity], True),
'PipelineName': (basestring, False),
'Tags': ((Tags, list), False),
}
class RetentionPeriod(AWSProperty):
props = {
'NumberOfDays': (integer, False),
'Unlimited': (boolean, False),
}
class Datastore(AWSObject):
resource_type = "AWS::IoTAnalytics::Datastore"
props = {
'DatastoreName': (basestring, False),
'RetentionPeriod': (RetentionPeriod, False),
'Tags': ((Tags, list), False),
}
class ResourceConfiguration(AWSProperty):
props = {
'ComputeType': (basestring, True),
'VolumeSizeInGB': (integer, True),
}
class DatasetContentVersionValue(AWSProperty):
props = {
'DatasetName': (basestring, False),
}
class OutputFileUriValue(AWSProperty):
props = {
'FileName': (basestring, False),
}
class Variable(AWSProperty):
props = {
'DatasetContentVersionValue': (DatasetContentVersionValue, False),
'DoubleValue': (double, False),
'OutputFileUriValue': (OutputFileUriValue, False),
'StringValue': (basestring, False),
'VariableName': (basestring, False)
}
class ContainerAction(AWSProperty):
props = {
'ExecutionRoleArn': (basestring, True),
'Image': (basestring, True),
'ResourceConfiguration': (ResourceConfiguration, False),
'Variables': ([Variable], False),
}
class DeltaTime(AWSProperty):
props = {
'TimeExpression': (basestring, True),
'OffsetSeconds': (integer, True),
}
class QueryActionFilter(AWSProperty):
props = {
'DeltaTime': (DeltaTime, False),
}
class QueryAction(AWSProperty):
props = {
'Filters': ([QueryActionFilter], False),
'SqlQuery': (basestring, False),
}
class Action(AWSProperty):
props = {
'ActionName': (basestring, True),
'ContainerAction': (ContainerAction, False),
'QueryAction': (QueryAction, False)
}
class IotEventsDestinationConfiguration(AWSProperty):
props = {
'InputName': (basestring, True),
'RoleArn': (basestring, True),
}
class GlueConfiguration(AWSProperty):
props = {
'DatabaseName': (basestring, True),
'TableName': (basestring, True),
}
class S3DestinationConfiguration(AWSProperty):
props = {
'Bucket': (basestring, True),
'GlueConfiguration': (GlueConfiguration, False),
'Key': (basestring, True),
'RoleArn': (basestring, True),
}
class DatasetContentDeliveryRuleDestination(AWSProperty):
props = {
'IotEventsDestinationConfiguration':
(IotEventsDestinationConfiguration, False),
'S3DestinationConfiguration': (S3DestinationConfiguration, False),
}
class DatasetContentDeliveryRule(AWSProperty):
props = {
'Destination': (DatasetContentDeliveryRuleDestination, True),
'EntryName': (basestring, False),
}
class Schedule(AWSProperty):
props = {
'ScheduleExpression': (basestring, True),
}
class TriggeringDataset(AWSProperty):
props = {
'DatasetName': (basestring, True),
}
class Trigger(AWSProperty):
props = {
'Schedule': (Schedule, False),
'TriggeringDataset': (TriggeringDataset, False),
}
class VersioningConfiguration(AWSProperty):
props = {
'MaxVersions': (integer, False),
'Unlimited': (boolean, False),
}
class Dataset(AWSObject):
resource_type = "AWS::IoTAnalytics::Dataset"
props = {
'Actions': ([Action], True),
'ContentDeliveryRules': ([DatasetContentDeliveryRule], False),
'DatasetName': (basestring, False),
'RetentionPeriod': (RetentionPeriod, False),
'Tags': (Tags, False),
'Triggers': ([Trigger], False),
'VersioningConfiguration': (VersioningConfiguration, False),
}
|
the-stack_106_24592 | """Master server for lab-nanny
Collects data from the different nodes and makes it available to the
clients using websockets.
The functionality of the master server is to join the data from the
different nodes and make it available in two forms:
-- clients using websockets
-- store it in a database
To do this, the master uses the Masterserver.tick method, which
-- submits updates to the clients
-- sends requests for data to the nodes
(in this order)
By centralizing the communications (that is, nodes send updates to
MasterServer, which then sends them to the clients), we reduce the
amount of connections required from (#clients * #nodes) to
(#clients + #nodes)
The master server handles the connections both with the inside (nodes)
and the outside (clients) using two classes: NodeHandler and
ClientHandler, which are classes derived from the
tornado.websocket.WebSocketHandler class.
The server uses an auxilitary communications handler class (CommsHandler)
which keeps a list of nodes and clients, and the last data from the nodes.
"""
# Master server for lab-nanny
#
# Collects data from the different nodes and makes it available to the clients through websockets.
#!/usr/bin/python
#TODO: have a way to "disconnect" the nodes when they do disconnect.
import tornado.httpserver
import tornado.websocket
import tornado.ioloop as ioloop
import tornado.web
import tornado
from tornado.websocket import WebSocketClosedError
import signal
import argparse
import time
from database.DBHandler import DBHandler as DBHandler
from servers.header import MST_HEADER
import uuid
import socket
import json
from json2html import json2html
SOCKETPORT = 8001
SLAVE_SOCKETNAME = r'/nodes_ws'
CLIENT_SOCKETNAME = r'/client_ws'
STATUS_ADDR = r'/status'
DEFAULTMESSAGE = 'X,50,0'
DEFAULTDBNAME = 'example.db'
PERIODICITY = 100
DB_PERIODICITY = 30000 #Save data to db every...
TFORMAT = '%y/%m/%d %H:%M:%S'
METAKEYWORD = 'meta'
CONNCLOSEDSTR = 'Connection closed'
condition_trap = {'name':'Trap unlock',
'obs_lab':'lab7',
'obs_ch':'ch4',
'obs_range':(1,1.5),
'target_lab':'lab7',
'target_ch':13,
'target_val':1,
'message':'Trap unlocked'}
condition_temp = {'name':'Temperature changes',
'obs_lab':'lab7',
'obs_ch':'ch2',
'obs_range':(19,23),
'target_lab':'lab7',
'target_ch':13,
'target_val':1,
'message':'Temperature outside of bounds'}
class MasterServer(object):
""" Class that runs the Master Server for lab-nanny.
It keeps a NodeHandler and a ClientHandler object to communicate with
the slave nodes and the clients, which in turn use an instance of the
CommsHandler class to do internal communications.
Periodically (every fraction of a second), the Master server polls the
nodes for data, and sends the results to the clients.
Additionally, with a different periodicity (~10s) the Master server
saves a copy of the data to a database.
"""
def __init__(self, slave_socketname = SLAVE_SOCKETNAME,
socketport=SOCKETPORT,
client_socketname=CLIENT_SOCKETNAME,
periodicity=PERIODICITY,
db_periodicity = DB_PERIODICITY,
status_addr = STATUS_ADDR,
verbose = True):
#Init parameters
self.socketport = socketport
self.slave_socketname = slave_socketname
self.client_socketname = client_socketname
self.status_addr = status_addr
self.callback_periodicity = periodicity
self.db_callback_periodicity = db_periodicity
self.verbose = verbose
self.callback = []
self.dbcallback = []
self.HTTPserver = []
self._conditions = [] # list of dictionaries
# Create instance of the CommsHandler to mediate communications between
# node and client handlers
self.comms_handler = CommsHandler()
# Add callback db_metadata_append upon change to the metadata in nodes
self.comms_handler.bind_to_metadata_change(self.db_metadata_append)
# Also, start communication with the database
self.db_handler = DBHandler(db_name=DEFAULTDBNAME)
# Init program
self._conditions.append(condition_trap)
self._conditions.append(condition_temp)
self.run()
def run(self):
""" Main function of the MasterServer class.
It creates a tornado web application with two websocket handlers and
one web RequestHandler: the two websockets are one for the nodes,
and one for the clients, listening on the same port
(self.socket_port), but using different names (the defauls are
'/nodes_ws' and '/clients_ws' respectively); the web request handler
shows information about the status of the master server using the same
socket and a different address ('/status').
Afterwards, this method initialises two periodic callbacks:
- One that manages the node/client communications, typically with a
sub-second periodicity
- Another one to store long-term traces of the data to a database
(every ~10s)
"""
self.application = tornado.web.Application([(self.slave_socketname,
NodeHandler,
{'comms_handler':self.comms_handler,
'verbose':self.verbose}),
(self.client_socketname,
ClientHandler,
{'comms_handler':self.comms_handler,
'verbose':self.verbose}),
(self.status_addr,
StatusHandler,
{'comms_handler':self.comms_handler})])
try:
self.HTTPserver = self.application.listen(self.socketport)
fqdn = socket.getfqdn()
alias = socket.gethostbyname(socket.gethostname())
print('Setting up connections:\n-----------------------------------')
print('Status page: @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.status_addr,
alias))
print('Websockets opened:')
print('-Client WS EST @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.client_socketname,
alias))
print('-Nodes WS EST @ {}:{}{}, ({})'.format(fqdn,
self.socketport,
self.slave_socketname,
alias))
print('-----------------------------------')
except socket.error as error:
#Catch the error if the connections are already present:
if error.errno == 10048:
pass
else:
raise
self.callback= ioloop.PeriodicCallback(self.tick,
self.callback_periodicity)
self.callback.start()
print('\nStarting ioloop')
# To save to DB:
self.dbcallback= ioloop.PeriodicCallback(self.db_tick,
self.db_callback_periodicity)
self.dbcallback.start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
ioloop.IOLoop.instance().stop()
print('(MST {}) Exiting gracefully... '.format(time.strftime(TFORMAT)))
finally:
self.on_close()
def tick(self):
""" Function called periodically to manage node/client communication
- First, the function sends the last data (obtained from the nodes)
to the clients
- Then, it requests more data to the nodes.
By first sending the data, and then asking for more, we make sure the
nodes have time to send the data back to the MasterServer before
sending that data to the clients; this comes at the expense of sending
"old data" (with a repetition period), which has no impact unless the
application is time-critical.
"""
# TODO: should only send data to the right client connection?, instead of relying on the nodes to check whether the message is for them?
try:
# If the NodeHandler decides to write messages to the clients upon
# reception of each message, comment this line
ClientHandler.broadcast(self.comms_handler.last_data)
# Write a command with no side consequences. The 'X' ensures that
# all nodes reply
msg = DEFAULTMESSAGE
broadcast(self.comms_handler.nodes,msg)
self.check_conditions()
except WebSocketClosedError:
print('Websocket closed')
#In case we want to exit, we send a KeyboardInterrupt
except KeyboardInterrupt:
raise
def db_tick(self):
""" Function called periodically to save data to the database
This function generates an entry in the database for each node ID
held in the CommsHandler.last_data instance.
The details of writing to the database are found in the
database.DBHandler module.
"""
# Write values to db (called every N seconds, probably 30-60)
# if self.verbose:
## CHECK HERE IF THE METADATA HAS BEEN ADDED
num_connected_devices = len(self.comms_handler.last_data)
if num_connected_devices>0:
print('(MST {}) Adding {} entries to DB '\
.format(time.strftime(TFORMAT),num_connected_devices))
for id in self.comms_handler.last_data:
datadict = self.comms_handler.last_data[id]
# Add data to observations table
# Check if table with name "id" exists
# Add data to specific table for ID
self.db_handler.add_database_entry(datadict)
self.db_handler.commit()
def db_metadata_append(self,idx):
""" Function called when a new node transmits its metadata
This function generates an entry in the database for each new node
The entry in the database is composed of a timestamp, a username, and the JSON string.
"""
print('(MST {}) Updating metadata'.format(time.strftime(TFORMAT)))
# Metadata can be updated upon (re)connection, or when the connection
# is closing. When (re)connecting, the metadata is a dictionary
# which contains, amongst others, a 'user' key. This is not the
# case upon closing the connection, thus we need the user from
# somewhere else.
if isinstance(self.comms_handler.metadata[idx],dict):
user = self.comms_handler.metadata[idx]['user']
else:
user = self.comms_handler.last_data[idx]['user']
self.db_handler.register_new_metadata(user,self.comms_handler.metadata[idx])
def on_close(self):
self.db_handler.close()
def check_conditions(self):
for condition in self._conditions:
# if obs_lab/obs_ch outside obs_range:
# send target_lab/target_ch the target_val
lab = condition['obs_lab']
observ_channel = condition['obs_ch']
range_boundary = condition['obs_range']
target_lab = condition['target_lab']
target_channel = condition['target_ch']
target_value = condition['target_val']
node_id = self.comms_handler.get_nodeID_by_user(lab)
if len(node_id)>0:
current_observed_val = self.comms_handler.last_data[node_id[0]][observ_channel]
if not range_boundary[0]<= current_observed_val <= range_boundary[1]:
# Add here entry to database when condition is not met
target_id = self.comms_handler.get_nodeID_by_user(target_lab)
msg = target_lab+','+str(target_channel)+','+str(target_value)
broadcast(self.comms_handler.nodes,msg)
print(condition['message'])
print('{} <= {} <= {}'.format(range_boundary[0],current_observed_val,range_boundary[1]))
else:
pass
class NodeHandler(tornado.websocket.WebSocketHandler):
""" Class that handles the communication via websockets with the slave nodes.
"""
node_dict = {}
def initialize(self, comms_handler,verbose=True):
"""Initialisation of an object of the NodeHandler class.
We provide a communications handler object which keeps a list of the nodes
and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
self.verbose = verbose
def open(self):
""" Callback executed upon opening a new slave node connection.
This function adds the new connection to the class "nodes" list and
provides a unique id to the connection using the uuid.uuid4().hex
function.
:return:
"""
# We could do here the configuration of the node, like a dictionary with the channels exposed
#self.write_message('Init')
self.id = uuid.uuid4().hex
NodeHandler.node_dict[self.id] = self
ip = self.request.remote_ip
print('(NDH {}) New NODE {} ({}). (out of {}) ' \
.format(time.strftime(TFORMAT),
socket.getfqdn(ip),
ip,
len(NodeHandler.node_dict)))
print('(NDH) UUID: {}'.format(self.id))
def on_message(self, message):
""" Callback executed upon message reception from the master server.
The message is a JSON string, which is converted to a dictionary.
:param message:
:return:
"""
## TODO: maybe we can code here a case in which we configure
## For example, we can write a "configure" key in the dictionary
message_dict = json.loads(message)
if METAKEYWORD not in message_dict:
if self.verbose:
if not message_dict['error']:
print('(NDH) time: {0:.3f}, user: {1}, error: {2}, ch0: {3}'\
.format(message_dict["x"],
message_dict["user"],
message_dict["error"],
message_dict["ch0"]))
else:
print('(NDH) time: {0:.3f}, user: {1}, error: {2}'\
.format(message_dict["x"],
message_dict["user"],
message_dict["error"]))
#There are two ways in which we can pass the data to the clients:
# - Store the data in the self.__comms_handler.last_data dictionary
# - Send the data to the clients everytime a message is received
# The first one helps with synchronizing sending the data to the clients.
# The second one is more immediate, but it might impact the performance of the network,
# since we communicate with each of the clients on every message received.
# To use the first method, uncomment this line, and make sure that the "tick()" function
# in the master server uses :
self.__comms_handler.last_data[self.id] = message_dict
else:
self.user = message_dict['user']
self.__comms_handler.add_metadata(self.id,message_dict)
# To use the second method, uncomment this other line
#for client in self.__comms_handler.clients:
# client.write_message(message)
def on_close(self):
# Add log to metadata table in database
self.__comms_handler.add_metadata(self.id,CONNCLOSEDSTR)
# Remove nodehandler from the comms_handler instance and the class'
# node_list.
self.__comms_handler.remove_key(self.id)
NodeHandler.node_dict.pop(self.id, None)
ip = self.request.remote_ip
user = self.user
print('(NDH {}) Connection with {} ({}) closed '\
.format(time.strftime(TFORMAT),
ip, user))
def check_origin(self, origin):
#TODO: change this to actually check the origin
return True
@classmethod
def broadcast_to_nodes(cls,msg=DEFAULTMESSAGE):
""" Function to send a message to all the nodes held in the self.__comms_handler nodes list.
:param msg: message to broadcast
:return:
"""
#In case we want to exit, we send a KeyboardInterrupt
try:
broadcast(cls.node_dict, msg)
except KeyboardInterrupt:
raise
class ClientHandler(tornado.websocket.WebSocketHandler):
""" Class that handles the communication via websockets with the
slave nodes.
"""
client_list = []
def initialize(self, comms_handler,verbose=False):
""" Initialisation of an object of the ClientHandler class.
We provide a communications handler object which keeps a list of the
nodes and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
self.verbose = verbose
def open(self):
""" Callback executed upon opening a new client connection.
This function adds the new connection to the class "client" list.
:return:
"""
# We could do here the configuration of the node, like a dictionary with the channels exposed
ClientHandler.client_list.append(self)
print('(CLH {}) New connection from {}. Total of clients: {}'\
.format(time.strftime(TFORMAT),
self.request.remote_ip,
len(ClientHandler.client_list)))
def on_message(self, message):
""" Callback executed upon message reception from the client.
The message is a JSON string, which is then broadcasted to all the
nodes sequentially.
:param message:
:return:
"""
if self.verbose:
print('(CLH {}) Message received from client: {}'\
.format(time.strftime(TFORMAT),
message))
for node in self.__comms_handler.nodes:
self.__comms_handler.nodes[node].write_message(message)
def on_close(self):
print('(CLH {}) Connection closed'\
.format(time.strftime(TFORMAT)))
ClientHandler.client_list.remove(self)
print(ClientHandler.client_list)
def check_origin(self, origin):
#TODO: should actually check the origin
return True
@classmethod
def broadcast(cls, msg):
for client in cls.client_list:
client.write_message(msg)
class StatusHandler(tornado.web.RequestHandler):
def initialize(self, comms_handler):
"""Initialisation of an object of the NodeHandler class.
We provide a communications handler object which keeps a list of the nodes
and clients, and a list of the last messages from the nodes.
:param comms_handler:
:type comms_handler: CommsHandler
:param verbose: True for verbose output
:return:
"""
self.__comms_handler = comms_handler
def get(self):
# Time
fetch_time = time.strftime(TFORMAT)
self.write('<meta http-equiv="refresh" content="10">')
self.write(' <style> .wrapper {display:flex}</style>')
self.write('<p> TIME: {}</p>'.format(fetch_time))
# Nodes
num_nodes = len(self.__comms_handler.nodes)
self.write("<h3>Number of connected nodes: {}</h3><ul>".format(num_nodes))
for node_key in self.__comms_handler.nodes:
node = self.__comms_handler.nodes[node_key]
if 'user' in node.__dict__:
user = node.user
else:
user ='no ID'
self.write('<li>{} ({})</li>'.format(socket.getfqdn(node.request.remote_ip),
user))
# Clients
num_clients = len(self.__comms_handler.clients)
self.write("</ul><h3>Number of connected clients: {}</h3><ul style>".format(num_clients))
for client in self.__comms_handler.clients:
self.write('<li>{}</li>'.format(socket.getfqdn(client.request.remote_ip)))
self.write("</ul><h3>Last data: </h3>")
self.write("<div class=wrapper>")
for node_id in self.__comms_handler.last_data:
last_data = self.__comms_handler.last_data[node_id]
self.write('<p>{} {}</p>'.format(last_data['user'],
json2html.convert(json=last_data)))
self.write("</div>")
class CommsHandler(object):
""" Class that keeps references of the nodes and the clients for
communication purposes
It also keeps a dictionary with a reference to the last data sent
(self.last_data) with the keys being the ids of the NodeHandler
instances, and another one (self.metadata) which stores the metadata
(that is, the "contents" of each channel in the self.last_data
dictionaries).
Whenever the connection between the master and the node is (re)
established, the metadata corresponding to that id needs to be
recorded by an external class. To do this, we use an observer
pattern in which the external class observes changes to a property
(in this case, self.last_metadata_id) from the outside using the
self.bind_to function and perform some callback whenever this
value changes.
"""
def __init__(self):
self.nodes = NodeHandler.node_dict #list
self.clients = ClientHandler.client_list #list
#Data dictionary
self.last_data = {} #dictionary
#Metadata dictionary
self.metadata = {} #dictionary
self._last_metadata_id = []
self._metadata_observers= []
def get_last_metadata_id(self):
return self._last_metadata_id
def set_last_metadata_id(self, value):
#print('setting new metadata id')
self._last_metadata_id = value
for callback in self._metadata_observers:
callback(value)
last_metadata_id = property(get_last_metadata_id,set_last_metadata_id)
def bind_to_metadata_change(self, callback):
''' Binds callbacks to changes in the values of self._last_metadata_id
This function is used to add metadata to the database upon (re)connection
of the server/client link.
:param callback:
:return:
'''
self._metadata_observers.append(callback)
def add_metadata(self, id, contents):
print(contents)
self.metadata[id] = contents
self.last_metadata_id = id # This triggers the callback
def remove_key(self,id):
"""
Removes the node with a given id from the comms_handler.
We need to make sure that both the last_data and the metadata
entries are removed
:param id: the UUID given by the MasterServer to the node
:type id: str
:return:
"""
self.last_data.pop(id,None)
self.metadata.pop(id,None)
def get_nodeID_by_user(self,user):
""" Returns the node.id of the node with a given user name
Since the user is first obtained after getting data, we infer
it from the information in the self.last_data dictionary.
:param user: The laboratory name
:type user: str
:return: Returns the UUID given by the master server to the node
with a given username
"""
return [key for key in self.last_data if self.last_data[key]['user'] == user]
########################################
def broadcast(dictionary_of_endpoints, msg):
""" Broadcasts a message to a list of endpoints using the "write_message"
method.
:param dictionary_of_endpoints:
:param msg:
:return:
"""
for endpoint in dictionary_of_endpoints:
dictionary_of_endpoints[endpoint].write_message(msg)
def main1(periodicity=100, verbose=0):
my_master_server = MasterServer(periodicity=periodicity,
verbose=verbose)
return my_master_server
def signal_handler(signum,frame):
tornado.ioloop.IOLoop.instance().stop()
if __name__ == "__main__":
print(MST_HEADER)
print
parser = argparse.ArgumentParser()
parser.add_argument("-pr","--periodicity",
help="periodicity to poll nodes",
type=int,default=PERIODICITY)
parser.add_argument("-dbpr","--database_periodicity",
help="periodicity of saving data to database",
type=int,default=DB_PERIODICITY)
parser.add_argument("-v","--verbose",help="Activate verbose",
type=int,default=0)
args = parser.parse_args()
signal.signal(signal.SIGINT,signal_handler)
main1(periodicity=args.periodicity,
verbose=args.verbose)
|
the-stack_106_24593 | """Tests for hermite module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite as herm
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
H0 = np.array([1])
H1 = np.array([0, 2])
H2 = np.array([-2, 0, 4])
H3 = np.array([0, -12, 0, 8])
H4 = np.array([12, 0, -48, 0, 16])
H5 = np.array([0, 120, 0, -160, 0, 32])
H6 = np.array([-120, 0, 720, 0, -480, 0, 64])
H7 = np.array([0, -1680, 0, 3360, 0, -1344, 0, 128])
H8 = np.array([1680, 0, -13440, 0, 13440, 0, -3584, 0, 256])
H9 = np.array([0, 30240, 0, -80640, 0, 48384, 0, -9216, 0, 512])
Hlist = [H0, H1, H2, H3, H4, H5, H6, H7, H8, H9]
def trim(x):
return herm.hermtrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
def test_hermzero(self):
assert_equal(herm.hermzero, [0])
def test_hermone(self):
assert_equal(herm.hermone, [1])
def test_hermx(self):
assert_equal(herm.hermx, [0, .5])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herm.hermadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermsub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herm.hermsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermmulx(self):
assert_equal(herm.hermmulx([0]), [0])
assert_equal(herm.hermmulx([1]), [0, .5])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, .5]
assert_equal(herm.hermmulx(ser), tgt)
def test_hermmul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herm.hermval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herm.hermval(self.x, pol2)
pol3 = herm.hermmul(pol1, pol2)
val3 = herm.hermval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermdiv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herm.hermadd(ci, cj)
quo, rem = herm.hermdiv(tgt, ci)
res = herm.hermadd(herm.hermmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermval(self):
#check empty input
assert_equal(herm.hermval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Hlist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herm.hermval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herm.hermval(x, [1]).shape, dims)
assert_equal(herm.hermval(x, [1, 0]).shape, dims)
assert_equal(herm.hermval(x, [1, 0, 0]).shape, dims)
def test_hermval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herm.hermval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herm.hermval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermgrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herm.hermgrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermgrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herm.hermgrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herm.hermgrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermint(self):
# check exceptions
assert_raises(ValueError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herm.hermint([0], m=i, k=k)
assert_almost_equal(res, [0, .5])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i])
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herm.hermval(-1, hermint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermpol = herm.poly2herm(pol)
hermint = herm.hermint(hermpol, m=1, k=[i], scl=2)
res = herm.herm2poly(hermint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1)
res = herm.hermint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k])
res = herm.hermint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], lbnd=-1)
res = herm.hermint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herm.hermint(tgt, m=1, k=[k], scl=2)
res = herm.hermint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermint(c) for c in c2d.T]).T
res = herm.hermint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c) for c in c2d])
res = herm.hermint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermint(c, k=3) for c in c2d])
res = herm.hermint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermder(self):
# check exceptions
assert_raises(ValueError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herm.hermder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herm.hermder(herm.hermint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herm.hermder(c) for c in c2d.T]).T
res = herm.hermder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herm.hermder(c) for c in c2d])
res = herm.hermder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermvander(self):
# check for 1d x
x = np.arange(3)
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herm.hermvander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herm.hermval(x, coef))
def test_hermvander2d(self):
# also tests hermval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herm.hermvander2d(x1, x2, [1, 2])
tgt = herm.hermval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermvander3d(self):
# also tests hermval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herm.hermvander3d(x1, x2, x3, [1, 2, 3])
tgt = herm.hermval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herm.hermvander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermfit(self):
def f(x):
return x*(x - 1)*(x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, herm.hermfit, [1], [1], -1)
assert_raises(TypeError, herm.hermfit, [[1]], [1], 0)
assert_raises(TypeError, herm.hermfit, [], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [[[1]]], 0)
assert_raises(TypeError, herm.hermfit, [1, 2], [1], 0)
assert_raises(TypeError, herm.hermfit, [1], [1, 2], 0)
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herm.hermfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, herm.hermfit, [1], [1], [-1,])
assert_raises(ValueError, herm.hermfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, herm.hermfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herm.hermfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
coef3 = herm.hermfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(herm.hermval(x, coef3), y)
#
coef4 = herm.hermfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
coef4 = herm.hermfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = herm.hermfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(herm.hermval(x, coef4), y)
#
coef2d = herm.hermfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = herm.hermfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herm.hermfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = herm.hermfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = herm.hermfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herm.hermfit(x, x, 1), [0, .5])
assert_almost_equal(herm.hermfit(x, x, [0, 1]), [0, .5])
# test fitting only even Legendre polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = herm.hermfit(x, y, 4)
assert_almost_equal(herm.hermval(x, coef1), y)
coef2 = herm.hermfit(x, y, [0, 2, 4])
assert_almost_equal(herm.hermval(x, coef2), y)
assert_almost_equal(coef1, coef2)
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
assert_raises(ValueError, herm.hermcompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herm.hermcompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
class TestGauss(TestCase):
def test_100(self):
x, w = herm.hermgauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herm.hermvander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermfromroots(self):
res = herm.hermfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herm.hermfromroots(roots)
res = herm.hermval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herm.herm2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermroots(self):
assert_almost_equal(herm.hermroots([1]), [])
assert_almost_equal(herm.hermroots([1, 1]), [-.5])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herm.hermroots(herm.hermfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermtrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herm.hermtrim, coef, -1)
# Test results
assert_equal(herm.hermtrim(coef), coef[:-1])
assert_equal(herm.hermtrim(coef, 1), coef[:-3])
assert_equal(herm.hermtrim(coef, 2), [0])
def test_hermline(self):
assert_equal(herm.hermline(3, 4), [3, 2])
def test_herm2poly(self):
for i in range(10):
assert_almost_equal(herm.herm2poly([0]*i + [1]), Hlist[i])
def test_poly2herm(self):
for i in range(10):
assert_almost_equal(herm.poly2herm(Hlist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-x**2)
res = herm.hermweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.