content
stringlengths 5
1.05M
|
---|
#==========================================================================
#
# Copyright Insight Software Consortium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#==========================================================================*/
#
# Test basic properties of modified times
#
import itk
#image and transform are in the same module, but filters are in a different module.
imType = itk.Image[itk.F, 2]
imTypeB = itk.Image[itk.UC, 2]
im = imType.New()
transType = itk.Transform[itk.D, 3]
trans = transType.New()
filtType = itk.AndImageFilter[imTypeB, imTypeB, imTypeB]
filt = filtType.New()
metricType = itk.ImageToImageMetricv4[imType, imType]
met = metricType.New()
#We modify them in the order image, transform, filter
for _ in range(3000):
im.Modified()
trans.Modified()
met.Modified()
filt.Modified()
#and their Modified times should respect that order.
assert im.GetMTime() < trans.GetMTime()
assert trans.GetMTime() < met.GetMTime()
assert met.GetMTime() < filt.GetMTime()
|
import h5py
import numpy as np
import json
from collections import defaultdict
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy import stats
import matplotlib.pylab as pylab
import seaborn as sns
from scipy import stats
import dill as pkl
rel_cate_recall = pkl.load(open('./output/rel_cat_recall.npz','rb'))
rel_cate_recall_vis = rel_cate_recall[100]
del rel_cate_recall_vis['all_rel_cates']
rel_cate_dist = np.load(open('./output/rel_dis.npy','rb'))
rel_cate_dist= rel_cate_dist[1:]
rel_dict = json.load(open('/mnt/data1/guoyuyu/datasets/visual_genome/data/genome/VG-SGG-dicts.json','r'))
ind_rel = rel_dict['idx_to_predicate']
rel_ind = rel_dict['predicate_to_idx']
def dict2list(dic:dict,rel_cate_dist):
keys = dic.keys()
vals = dic.values()
lst = [(key, val, dist) for key, val, dist in zip(keys, vals, rel_cate_dist)]
return lst
def draw_hist_from_dic(dict, name='None',step=5):
fig_length = len(dict)
params = {
'axes.labelsize': '25',
'xtick.labelsize': '45',
'ytick.labelsize': '20',
'lines.linewidth': '8',
'legend.fontsize': '25',
'figure.figsize': str(fig_length)+', 50' # set figure size
}
pylab.rcParams.update(params)
x = np.arange(len(dict))
x_labels = []
y_values = []
plt.title(name)
for i in dict:
y_values.append(i[2])
x_labels.append(i[0])
plt.bar(x, y_values)
plt.xticks(x, x_labels, rotation='vertical', weight=200)
plt.savefig('./misc/'+name+'.pdf', dpi=200)
#plt.legend(loc='best')
plt.close('all')
return 0
rel_dis_dic = sorted(dict2list(rel_cate_recall_vis,rel_cate_dist), key=lambda x:x[2], reverse=True)
draw_hist_from_dic(rel_dis_dic,'dist_of_labels') |
from django.shortcuts import render
from . models import *
# Create your views here.
def home(request):
context = {}
return render(request, "myprofile/home.html", context)
def about(request):
about = About.objects.all()
context = {
'about': about
}
return render(request, "myprofile/about.html", context)
def work(request):
projects = Work.objects.all()
context = {
'projects': projects
}
return render(request, "myprofile/work.html", context)
def contact(request):
email = request.POST.get('email', False)
contact_us = Contact()
contact_us.email = email
contact_us.save()
context = {}
return render(request, "myprofile/contact.html", context)
|
import uuid
import graphql
import github
class HTTPClient(graphql.client.HTTPClient):
__slots__ = ("token", "user_agent", "uuid")
def __init__(self, token, session, user_agent):
super().__init__(session=session, url="https://api.github.com/graphql")
self.uuid = str(uuid.uuid4())
self.token = f"bearer {token}"
self.user_agent = user_agent or f"ShineyDev/github@{github.version}:{self.uuid}"
async def request(self, document_, operation_, variables_, **kwargs):
headers = kwargs.pop("headers", None) or dict()
headers["Authorization"] = self.token
headers["User-Agent"] = self.user_agent
try:
data = await super().request(document_, operation_, variables_, headers=headers, **kwargs)
except github.ClientError:
raise
except graphql.client.ClientResponseHTTPError as e:
try:
exc_type = github.errors._response_error_map[e.response.status]
except KeyError:
exc_type = github.ClientResponseHTTPError
raise exc_type(e.message, e.response, e.data) from e
except graphql.client.ClientResponseGraphQLError as e:
try:
exc_type = github.errors._response_error_map[e.data["errors"][0]["type"]]
except KeyError:
exc_type = github.ClientResponseGraphQLError
raise exc_type(e.message, e.response, e.data) from e
except graphql.client.ClientResponseError as e:
raise github.ClientResponseError(e.message) from e
except graphql.client.ClientError as e:
raise github.ClientError(e.message) from e
else:
return data
async def _fetch(self, document_, *path, _data_validate=None, **kwargs):
data = await self.request(document_, None, kwargs, _data_validate=_data_validate)
return github.utils._follow(data, path)
async def fetch_query_all_codes_of_conduct(self, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.CodeOfConduct, fields)
query = "{codesOfConduct{%s}}" % ",".join(fields)
path = ("codesOfConduct",)
def validate(response, data):
value = github.utils._follow(data["data"], path)
if any([c.get("body", False) is None for c in value]):
# NOTE: (body=null) 1240368
raise github.ClientResponseGraphQLInternalError("The GraphQL service failed to fetch a code of conduct body.", response, data)
return await self._fetch(query, *path, _data_validate=validate)
async def fetch_query_all_licenses(self, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.License, fields)
query = "{licenses{%s}}" % ",".join(fields)
path = ("licenses",)
def validate(response, data):
value = github.utils._follow(data["data"], path)
if any([l.get("body", False) == "" for l in value]):
# NOTE: (body="") 1240368
raise github.ClientResponseGraphQLInternalError("The GraphQL service failed to fetch a license body.", response, data)
return await self._fetch(query, *path, _data_validate=validate)
async def fetch_query_code_of_conduct(self, key, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.CodeOfConduct, fields)
query = "query($key:String!){codeOfConduct(key:$key){%s}}" % ",".join(fields)
path = ("codeOfConduct",)
def validate(response, data):
value = github.utils._follow(data["data"], path)
if value is None or key == "other":
# NOTE: (value=null) 1143102
# NOTE: (key="other") body=null
raise github.ClientResponseGraphQLNotFoundError(f"Could not resolve to a code of conduct with the key '{key}'.", response, data)
if value.get("body", False) is None:
# NOTE: (body=null) 1240368
raise github.ClientResponseGraphQLInternalError("The GraphQL service failed to fetch the code of conduct body.", response, data)
value = await self._fetch(query, *path, key=key, _data_validate=validate)
if "key" not in value.keys():
value["key"] = key
return value
async def fetch_query_license(self, key, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.License, fields)
query = "query($key:String!){license(key:$key){%s}}" % ",".join(fields)
path = ("license",)
def validate(response, data):
value = github.utils._follow(data["data"], path)
if value is None or key == "other":
# NOTE: (value=null) 1143102
# NOTE: (key="other") body=""
raise github.ClientResponseGraphQLNotFoundError(f"Could not resolve to a license with the key '{key}'.", response, data)
if value.get("body", False) == "":
# NOTE: (body="") 1240368
raise github.ClientResponseGraphQLInternalError("The GraphQL service failed to fetch the license body.", response, data)
value = await self._fetch(query, *path, key=key, _data_validate=validate)
if "key" not in value.keys():
value["key"] = key
return value
async def fetch_query_metadata(self, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.Metadata, fields)
query = "{meta{%s}}" % ",".join(fields)
path = ("meta",)
return await self._fetch(query, *path)
async def fetch_query_node(self, type, id, *, fields=None):
fields = github.utils._get_merged_graphql_fields(type, fields)
query = "query($id:ID!){node(id:$id){...on %s{%s}}}" % (github.utils._get_graphql_type(type), ",".join(fields))
path = ("node",)
value = await self._fetch(query, *path, id=id)
if "id" not in value.keys():
value["id"] = id
return value
async def fetch_query_rate_limit(self, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.RateLimit, fields)
query = "{rateLimit(dryRun:true){%s}}" % ",".join(fields)
path = ("rateLimit",)
return await self._fetch(query, *path)
async def fetch_query_resource(self, type, url, *, fields=None):
fields = github.utils._get_merged_graphql_fields(type, fields)
query = "query($url:URI!){resource(url:$url){...on %s{%s}}}" % (github.utils._get_graphql_type(type), ",".join(fields))
path = ("resource",)
value = await self._fetch(query, *path, url=url)
if "url" not in value.keys():
value["url"] = url
return value
async def fetch_query_topic(self, name, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.Topic, fields)
query = "query($name:String!){topic(name:$name){%s}}" % ",".join(fields)
path = ("topic",)
def validate(response, data):
value = github.utils._follow(data["data"], path)
if value is None:
# NOTE: (value=null) 1143102
raise github.ClientResponseGraphQLNotFoundError(f"Could not resolve to a topic with the name '{name}'.", response, data)
value = await self._fetch(query, *path, name=name, _data_validate=validate)
if "name" not in value.keys():
value["name"] = name
return value
async def fetch_topic_related_topics(self, topic_id, limit, *, fields=None):
fields = github.utils._get_merged_graphql_fields(github.Topic, fields)
query = "query($topic_id:ID!,$limit:Int){node(id:$topic_id){...on Topic{relatedTopics(first:$limit){%s}}}}" % ",".join(fields)
path = ("node", "relatedTopics")
return await self._fetch(query, *path, limit=limit, topic_id=topic_id)
__all__ = [
"HTTPClient",
]
|
import torch
from torch import nn
from torch.nn import functional as F
from torchvision.ops import nms
from collections import OrderedDict
from ...config import prepare_config
class LocMaxNMSPostprocessor(nn.Module):
"""A score local maxima + nms postprocessor. Keeps predictions that correspond
to local maxima of scores. Then applies standard nms.
Config:
kernel_size: max pooling kernel size for local maxima search. Default: 3
score_threshold: minimal score needed to keep prediction. Default: 0.5
nms_iou_threshold: parameter for nms. Default: 0.5
Shape:
Input: {
"scores_t": :math:`(B,H,W)`,
"deltas_t": :math:`(B,2,H,W)`,
"sizes_t": :math:`(B,2,H,W)`,
"landmarks_t": :math:`(B,n_landmarks,2,H,W)`, (optional),
"offsets": (int, int),
"stride": int,
}
Output: [
{
"scores": :math:`(N_{i},)`,
"bboxes": :math:`(N_{i},4)`,
"landmarks": :math:`(N_{i},n_landmarks,2)`}, (optional)
},
for i in range(B),
]
"""
@staticmethod
def get_default_config():
return OrderedDict([
("kernel_size", 3),
("score_threshold", 0.5),
("nms_iou_threshold", 0.5),
])
def __init__(self, config=None):
super().__init__()
config = prepare_config(self, config)
for k, v in config.items():
self.__dict__[k] = v
def forward(self, x, score_threshold=None, nms_iou_threshold=None):
offsets = x["offsets"]
stride = x["stride"]
score_threshold = self.score_threshold if score_threshold is None else score_threshold
nms_iou_threshold = self.nms_iou_threshold if nms_iou_threshold is None else nms_iou_threshold
scores = x["scores_t"]
deltas = x["deltas_t"]
sizes = x["sizes_t"]
id_b, id_h, id_w = torch.where(
torch.logical_and(
scores > score_threshold,
scores == F.max_pool2d(scores, self.kernel_size, 1, self.kernel_size//2)
)
)
batch_idx, results_per_batch_idx = torch.unique(id_b.cpu(),
sorted=True, return_counts=True)
batch_size = scores.shape[0]
splits = torch.zeros(batch_size).long()
splits[batch_idx] = results_per_batch_idx
splits = splits.tolist()
scores = scores[id_b, id_h, id_w]
deltas = deltas[id_b, :, id_h, id_w]
sizes = sizes[id_b, :, id_h, id_w]
y_coord = id_h*stride + offsets[0]
x_coord = id_w*stride + offsets[1]
pivots = torch.stack([x_coord, y_coord], dim=1)
centers = pivots + deltas
scores = scores.float().cpu()
scores = torch.split(scores, splits)
bboxes = torch.cat([centers - sizes/2, centers + sizes/2], dim=1).float().cpu()
bboxes = torch.split(bboxes, splits)
keep = [nms(_bboxes, _scores, nms_iou_threshold)\
for _bboxes, _scores in zip(bboxes, scores)]
scores = [_scores[_keep] for _scores, _keep in zip(scores, keep)]
bboxes = [_bboxes[_keep] for _bboxes, _keep in zip(bboxes, keep)]
if not "landmarks_t" in x.keys():
result = [{"bboxes": _bboxes, "scores": _scores}\
for _bboxes, _scores in zip(bboxes, scores)]
return result
else:
landmarks = x["landmarks"]
landmarks = landmarks[id_b, ..., id_h, id_w]
landmarks = landmarks + pivots[:,None,:]
landmarks = landmarks.float().cpu()
landmarks = torch.split(landmarks, splits)
landmarks = [_landmarks[_keep] for _landmarks, _keep in zip(landmarks, keep)]
result = [{"bboxes": _bboxes, "scores": _scores, "landmarks": _landmarks}\
for _bboxes, _scores, _landmarks in zip(bboxes, scores, landmarks)]
return result
|
from spaceone.core.manager import BaseManager
from spaceone.inventory.connector.identity_connector import IdentityConnector
class IdentityManager(BaseManager):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.identity_conn: IdentityConnector = self.locator.get_connector('IdentityConnector')
def get_user(self, user_id, domain_id):
return self.identity_conn.get_user(user_id, domain_id)
def list_users(self, query, domain_id):
return self.identity_conn.list_users(query, domain_id)
def get_project(self, project_id, domain_id):
return self.identity_conn.get_project(project_id, domain_id)
def list_projects(self, query, domain_id):
return self.identity_conn.list_projects(query, domain_id) |
from django.views.generic import TemplateView
class SubmissionsView(TemplateView):
template_name = 'management/submissions.html'
class UserManagementView(TemplateView):
template_name = 'management/user_management.html'
|
import struct
import random
import socket
import time
import io
import re
import collections
HEADER_SIZE = 12
Header = collections.namedtuple('Header', ['msg', 'len', 'sync'])
def unpack_header(buf):
msg, len_, sync = struct.unpack('<III', buf[:HEADER_SIZE])
return Header(msg, len_, sync), buf[HEADER_SIZE:]
def pack_header(header):
return struct.pack('<III', header.msg, header.len, header.sync)
def scanf(fmt, buf):
"""return <values: tuple>, <error: str>, <remain: bytes>"""
vals = []
for f in fmt:
if f == 'u':
if len(buf) < 4:
return tuple(vals), \
"decoding error (fmt:'u'): too small buffer", buf
vals.append(struct.unpack_from('<I', buf)[0])
buf = buf[4:]
elif f == 'w':
val, err, new_buf = _berint_decode(buf)
if err:
return tuple(vals), f"decoding error (fmt:'w'): {err}", buf
vals.append(val)
buf = new_buf
elif f == 'W':
val, err, new_buf = _berstr_decode(buf)
if err:
return tuple(vals), f"decoding error (fmt:'W'): {err}", buf
vals.append(val)
buf = new_buf
else:
return tuple(vals), "not implemented", buf
return tuple(vals), None, buf
def printf(fmt, vals):
"""return <blob: bytes>, <error: str>"""
return
def _mp_fmt(key):
key >>= 5
if key == 0:
return 'w'
elif key == 1:
return 'W'
return None
def unpack_mp(buf):
vals = {}
while len(buf) > 0:
key, err, buf = scanf('w', buf)
if err:
return None, err
key = key[0]
val, err, buf = scanf(_mp_fmt(key), buf)
if err:
return None, err
vals[key] = val[0]
return vals, None
def _berint_decode(buf):
val = 0
for i in range(5):
if len(buf) < 1:
return None, "too small buffer", buf
val = (val << 7) | buf[0] & 0x7F
next = buf[0] & 0x80
buf = buf[1:]
if not next:
break
return val, None, buf
def _berstr_decode(buf):
len_, err, new_buf = _berint_decode(buf)
if err:
return None, err, buf
if len_ > len(new_buf):
return None, "too small buffer", buf
return new_buf[:len_], None, new_buf[len_:]
|
# -*- coding: utf-8 -*-
"""chapter11.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1xiOWojlWDCiyw8Nt0-boNhmAo8tE_yFE
"""
#last chapter, chapter 11, virtual screening
#A Virtual Screening Workflow Example
#a set of
# molecules known to bind to a particular protein, as well as a set of molecules assumed
# to not bind, to train a convolutional neural network to identify new molecules with
# the potential to bind to the target
# ERK2 MAPK1 same protein, involved in signalling
#We will train the model to distinguish a set of ERK2 active compounds from a set of
#decoy compounds. The active and decoy compounds are derived from the DUD-E
#database, which is designed for testing predictive models
# Commented out IPython magic to ensure Python compatibility.
##setup tensorflow v1
# %tensorflow_version 1.x
!wget -c https://repo.anaconda.com/archive/Anaconda3-2019.10-Linux-x86_64.sh
!chmod +x Anaconda3-2019.10-Linux-x86_64.sh
!bash ./Anaconda3-2019.10-Linux-x86_64.sh -b -f -p /usr/local
!conda install -y -c deepchem -c rdkit -c conda-forge -c omnia deepchem-gpu=2.3.0
import sys
sys.path.append('/usr/local/lib/python3.7/site-packages/')
from rdkit import Chem
from rdkit.Chem import Draw
from rdkit.Chem.Draw import IPythonConsole
import pandas as pd
from rdkit.Chem import PandasTools
from rdkit.Chem import Descriptors
from rdkit.Chem import rdmolops
import seaborn as sns
active_df = pd.read_csv("actives_final.ism",header=None,sep=" ")
active_rows,active_cols = active_df.shape
active_df.columns = ["SMILES","ID","ChEMBL_ID"]
active_df["label"] = ["Active"]*active_rows
PandasTools.AddMoleculeColumnToFrame(active_df,"SMILES","Mol")
def add_property_columns_to_df(df_in):
df_in["mw"] = [Descriptors.MolWt(mol) for mol in df_in.Mol]
df_in["logP"] = [Descriptors.MolLogP(mol) for mol in df_in.Mol]
df_in["charge"] = [rdmolops.GetFormalCharge(mol) for mol in df_in.Mol]
add_property_columns_to_df(active_df)
active_df.head()
decoy_df = pd.read_csv("decoys_final.ism",header=None,sep=" ")
decoy_df.columns = ["SMILES","ID"]
decoy_rows, decoy_cols = decoy_df.shape
decoy_df["label"] = ["Decoy"]*decoy_rows
PandasTools.AddMoleculeColumnToFrame(decoy_df,"SMILES","Mol")
add_property_columns_to_df(decoy_df)
decoy_df.head()
tmp_df = active_df.append(decoy_df)
sns.violinplot(tmp_df["label"],tmp_df["mw"])
sns.violinplot(tmp_df["label"],tmp_df["logP"])
sns.violinplot(tmp_df["label"],tmp_df["charge"])
charged = decoy_df[decoy_df["charge"] != 0]
charged.shape[0]/decoy_df.shape[0]
from neutralize import NeutraliseCharges
revised_decoy_df = decoy_df[["SMILES","ID","label"]].copy()
revised_decoy_df["SMILES"] = [NeutraliseCharges(x)[0] for x in revised_decoy_df["SMILES"]]
PandasTools.AddMoleculeColumnToFrame(revised_decoy_df,"SMILES","Mol")
add_property_columns_to_df(revised_decoy_df)
new_tmp_df = active_df.append(revised_decoy_df)
sns.violinplot(new_tmp_df["label"],new_tmp_df["charge"])
charged = revised_decoy_df[revised_decoy_df["charge"] != 0]
charged.shape[0]/revised_decoy_df.shape[0]
active_df["is_active"] = [1] * active_df.shape[0]
revised_decoy_df["is_active"] = [0] * revised_decoy_df.shape[0]
combined_df = active_df.append(revised_decoy_df)[["SMILES","ID","is_active"]]
combined_df.head()
combined_df.to_csv("dude_erk1_mk01.csv")
#part1 over
#part 2 training a predictive model
import deepchem as dc
from deepchem.models import GraphConvModel
import numpy as np
import sys
import pandas as pd
import seaborn as sns
from rdkit.Chem import PandasTools
def generate_graph_conv_model():
batch_size = 128
model = GraphConvModel(1, batch_size=batch_size, mode='classification',model_dir="./model_dir")
return model
dataset_file = "dude_erk2_mk01.csv"
tasks = ["is_active"]
featurizer = dc.feat.ConvMolFeaturizer()
loader = dc.data.CSVLoader(tasks=tasks, smiles_field="SMILES", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
splitter = dc.splits.RandomSplitter()
metrics = [dc.metrics.Metric(dc.metrics.matthews_corrcoef, np.mean, mode="classification")]
training_score_list = []
validation_score_list = []
transformers = []
cv_folds = 10
for i in range(0,cv_folds):
model = generate_graph_conv_model()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(dataset)
model.fit(train_dataset)
train_scores = model.evaluate(train_dataset, metrics, transformers)
training_score_list.append(train_scores["mean-matthews_corrcoef"])
validation_scores = model.evaluate(valid_dataset, metrics, transformers)
validation_score_list.append(validation_scores["mean-matthews_corrcoef"])
print(training_score_list)
print(validation_score_list)
sns.boxplot(["training"]*cv_folds+["validation"]*cv_folds,training_score_list+validation_score_list)
pred = [x.flatten() for x in model.predict(valid_dataset)]
pred_df = pd.DataFrame(pred,columns=["neg","pos"])
pred_df["active"] = [int(x) for x in valid_dataset.y]
pred_df["SMILES"] = valid_dataset.ids
pred_df.head()
pred_df.sort_values("pos",ascending=False).head(25)
sns.boxplot(pred_df.active,pred_df.pos)
false_negative_df = pred_df.query("active == 1 & pos < 0.5").copy()
PandasTools.AddMoleculeColumnToFrame(false_negative_df,"SMILES","Mol")
false_negative_df
false_positive_df = pred_df.query("active == 0 & pos > 0.5").copy()
PandasTools.AddMoleculeColumnToFrame(false_positive_df,"SMILES","Mol")
false_positive_df
model.fit(dataset)
#part 2 over part 3 begin
#install rd_filters for the 3rd part
!pip install git+https://github.com/PatWalters/rd_filters.git
!rd_filters -h
!rd_filters filter --in zinc_100k.smi --prefix zinc
df = pd.read_csv("zinc.csv")
df.head()
from collections import Counter
count_list = list(Counter(df.FILTER).items())
count_df = pd.DataFrame(count_list,columns=["Rule","Count"])
count_df.sort_values("Count",inplace=True,ascending=False)
count_df.head()
smiles_list = df[df.FILTER == "Filter41_12_dicarbonyl > 0"].SMILES[:10]
from rdkit import Chem
from rdkit.Chem import Draw
mol_list = [Chem.MolFromSmiles(x) for x in smiles_list]
dicarbonyl = Chem.MolFromSmarts('*C(=O)C(=O)*')
match_list = [mol.GetSubstructMatch(dicarbonyl) for mol in mol_list]
Draw.MolsToGridImage(mol_list,highlightAtomLists=match_list,molsPerRow=3)
#part 3 over , starting part 4
import deepchem as dc
import pandas as pd
from rdkit.Chem import PandasTools, Draw
from rdkit import DataStructs
from rdkit.ML.Cluster import Butina
from rdkit.Chem import rdMolDescriptors as rdmd
import seaborn as sns
model = dc.models.GraphConvModel(1, batch_size=128, mode='classification',model_dir="model_dir")
model.restore()
featurizer = dc.feat.ConvMolFeaturizer()
df = pd.read_csv("zinc.smi",sep=" ",header=None)
df.columns=["SMILES","Name"]
rows,cols = df.shape
df["Val"] = [0] * rows
df.head()
infile_name = "zinc_filtered.csv"
df.to_csv(infile_name,index=False)
loader = dc.data.CSVLoader(tasks=['Val'], smiles_field="SMILES", featurizer=featurizer)
dataset = loader.featurize(infile_name, shard_size=8192)
pred = model.predict(dataset)
pred_df = pd.DataFrame([x.flatten() for x in pred],columns=["Neg","Pos"])
sns.distplot(pred_df.Pos,rug=True)
combo_df = df.join(pred_df,how="outer")
combo_df.sort_values("Pos",inplace=True,ascending=False)
PandasTools.AddMoleculeColumnToFrame(combo_df,"SMILES","Mol")
combo_df.head()
Draw.MolsToGridImage(combo_df.Mol[:10],molsPerRow=5,legends=["%.2f" % x for x in combo_df.Pos[:10]])
def butina_cluster(mol_list,cutoff=0.35):
fp_list = [rdmd.GetMorganFingerprintAsBitVect(m, 3, nBits=2048) for m in mol_list]
dists = []
nfps = len(fp_list)
for i in range(1,nfps):
sims = DataStructs.BulkTanimotoSimilarity(fp_list[i],fp_list[:i])
dists.extend([1-x for x in sims])
mol_clusters = Butina.ClusterData(dists,nfps,cutoff,isDistData=True)
cluster_id_list = [0]*nfps
for idx,cluster in enumerate(mol_clusters,1):
for member in cluster:
cluster_id_list[member] = idx
return cluster_id_list
best_100_df = combo_df.head(100).copy()
best_100_df["Cluster"] = butina_cluster(best_100_df.Mol)
best_100_df.head()
len(best_100_df.Cluster.unique())
best_cluster_rep_df = best_100_df.drop_duplicates("Cluster")
best_cluster_rep_df.shape
best_cluster_rep_df.to_csv("best_cluster_represenatives.csv")
#mols = best_cluster_rep_df.iloc[:,5]
#type(mols)
#chapter over
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from mistral.policies import base
EXECUTIONS = 'executions:%s'
rules = [
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'create',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Create a new execution.',
operations=[
{
'path': '/v2/executions',
'method': 'POST'
}
]
),
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'delete',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Delete the specified execution.',
operations=[
{
'path': '/v2/executions/{execution_id}',
'method': 'DELETE'
}
]
),
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'get',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Return the specified execution.',
operations=[
{
'path': '/v2/executions/{execution_id}',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'list',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Return all executions.',
operations=[
{
'path': '/v2/executions',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'list:all_projects',
check_str=base.RULE_ADMIN_ONLY,
description='Return all executions from all projects.',
operations=[
{
'path': '/v2/executions',
'method': 'GET'
}
]
),
policy.DocumentedRuleDefault(
name=EXECUTIONS % 'update',
check_str=base.RULE_ADMIN_OR_OWNER,
description='Update an execution.',
operations=[
{
'path': '/v2/executions',
'method': 'PUT'
}
]
)
]
def list_rules():
return rules
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
def main(argv):
filename = argv[1]
data = []
with open(filename, 'rb') as f:
header1 = f.readline()
header2 = f.readline()
line = str(f.readline(), 'utf-8')
while line:
i = int('0x' + line[:-1], 16)
data.append(i)
line = str(f.readline(), 'utf-8')
# sys.stdout.buffer.write(bytes(header1, encoding='utf-8'))
# sys.stdout.buffer.write(bytes(header2, encoding='utf-8'))
sys.stdout.buffer.write(header1)
sys.stdout.buffer.write(header2)
sys.stdout.buffer.write(bytes(data))
if __name__ == '__main__':
main(sys.argv)
|
"""
Tests for simple se commands that just output to stdout and don't require
a book as input.
"""
import pytest
import se
from helpers import must_run
SIMPLE_CMDS = [
("dec2roman", "1 4 7 45 900", "I\nIV\nVII\nXLV\nCM"),
("dec2roman", "1867", "MDCCCLXVII"),
("roman2dec", "III XXV LXXVI CXLII DCCCLXIV", "3\n25\n76\n142\n864"),
("roman2dec", "MDCCLXXVI", "1776"),
("make-url-safe", "http://google.com", "http-google-com"),
("make-url-safe", "abc123.!-+d xyz ", "abc123-d\nxyz"),
("titlecase", "'the Mysterious Affair At styles'", "The Mysterious Affair at Styles"),
("titlecase", "heart of darkness", "Heart\nOf\nDarkness"),
]
@pytest.mark.parametrize("cmd_name, cmd_args, cmd_out", SIMPLE_CMDS)
def test_simple_cmds(cmd_name: str, cmd_args: str, cmd_out: str, capfd):
"""Execute command and check output"""
must_run(f"se {cmd_name} {cmd_args}")
out, _ = capfd.readouterr()
assert cmd_out == out.rstrip()
def test_version(capfd):
"""Verify that the version command returns the version"""
must_run("se version")
out, _ = capfd.readouterr()
assert out.startswith(se.VERSION)
def test_help(capfd):
"""Verify that the help command returns without an error"""
must_run("se help")
out, _ = capfd.readouterr()
assert out.splitlines()[0] == "The following commands are available:"
|
import json
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import AnonymousUser, User
from unittest.mock import patch
from .models import *
class dotdict(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Create your tests here.
class APITestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user(
username='jacob', email='jacob@…', password='top_secret')
@patch('yandex_checkout.Payment.create')
def test_create_payment(self, mocked_payment_create):
class PaymentMock:
id=1
status = 'pending'
description = 'test desciption'
amount = dotdict({'value': 1, 'currency': 'RUB'})
confirmation = dotdict({'confirmation_url': 'http://confirmation_url'})
mocked_payment_create.return_value = PaymentMock()
data = {'payment_total_sum': 100.0}
self.client.login(username='jacob', password='top_secret')
self.assertEqual(YandexKassaPayment.objects.count(), 0)
resp = self.client.post(reverse('create_payment'), json.dumps(data), content_type="application/json")
self.assertTrue(resp.status_code, 200)
self.assertEqual(YandexKassaPayment.objects.count(), 1) |
from collections import namedtuple
import pytest
from iroha import Iroha, ed25519_sha2
iroha = Iroha('ADMIN_ACCOUNT_ID')
command = [Iroha.command('CreateDomain', domain_id='domain', default_role='user')]
transaction = Iroha.transaction(iroha, command)
Test_data = namedtuple('Test_data', ['message', 'private_key', 'public_key'])
Test_data.__new__.__defaults__ = (transaction, None, None)
data_scope = ([Test_data(private_key="f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70",
public_key=b'313a07e6384776ed95447710d15e59148473ccfc052a681317a72a69f2a49910'),
Test_data(
private_key=b'f101537e319568c765b2cc89698325604991dca57b9716b58016b253506cab70',
public_key=b'313a07e6384776ed95447710d15e59148473ccfc052a681317a72a69f2a49910'),
Test_data(
private_key=ed25519_sha2.SigningKey(b'\x99\xfe\x89i\xac\xda\xfb\t\xbf\xdd\x00F7\x0e/\xa2X\x0b\x0c%\x91\xa266%%\r\xa1Mw\x1bc'),
public_key='ed0120ca0d372c15b712b46fa1c6e4afc4fd7e23e91dbf869da497db898d884f45ac40')
])
data_ids = ['priv_key, pub_key({},{})'.format(t.private_key, t.public_key)
for t in data_scope]
@pytest.fixture(scope='session', params=data_scope, ids=data_ids)
def crypto_data(request):
return request.param
|
"""Current version of package keras_mixed_sequence."""
__version__ = "1.0.27"
|
from numpy.core.fromnumeric import reshape
import streamlit as st
import pickle
import numpy as np
import pandas as pd
st.title("Modeling Earthquake Damage")
st.header("How much damage did the building incur?")
st.markdown("----------------------------------------")
# load model
with open('saved-earthquake-model.pkl', 'rb') as file:
model = pickle.load(file)
# user input features
# age
st.markdown("Age of building:")
age = st.slider('', min_value = 0, max_value = 995, step = 25)
# count_families
st.markdown("Number of Families that Live in the building:")
count_families = st.slider('', min_value = 0, max_value = 6, step = 1)
# foundation_ type
st.markdown("Type of foundation used while building:")
foundation_type_choice = st.radio('', ['H', 'I', 'R', 'U', 'W'])
if foundation_type_choice == 'H':
foundation_type = 0
elif foundation_type_choice == 'I':
foundation_type = 1
elif foundation_type_choice == 'R':
foundation_type = 2
elif foundation_type_choice == 'U':
foundation_type = 3
else:
foundation_type = 4
# roof_type
st.markdown("Type of roof used while building:")
roof_type_choice = st.radio('', ['N', 'Q', 'X'])
if roof_type_choice == 'N':
roof_type = 0
elif roof_type_choice == 'Q':
roof_type = 1
else:
roof_type = 2
# has_superstructure_mud_mortar_stone
st.markdown("Is the building made out of Mud Mortar Stone?")
has_superstructure_mud_mortar_stone_choice = st.radio('', ['Yes', 'No'])
if has_superstructure_mud_mortar_stone_choice == 'Yes':
has_superstructure_mud_mortar_stone = 1
else:
has_superstructure_mud_mortar_stone = 0
# button click prediction
st.markdown("### Make a prediction! 🔮")
click = st.button("Click Here")
if click:
# model predictions
all_features = np.array([age, count_families, foundation_type, roof_type, has_superstructure_mud_mortar_stone])
prediction = model.predict(all_features.reshape(1, -1))
if prediction == 1:
st.header(f'The model predicts a damage grade of {prediction[0]} - low building damage 🏠 ✔️')
elif prediction == 2:
st.header(f'The model predicts a damage grade of {prediction[0]} - medium amount of building damage 🏠 🔨')
else:
st.header(f'The model predicts a damage grade of {prediction[0]} - almost complete building destruction 🏚 ❌')
# data dictionary source
st.write("##")
st.markdown("<a href='https://www.drivendata.org/competitions/57/nepal-earthquake/page/136/'>Data Dictionary / Data Source</a>", unsafe_allow_html=True)
|
from wagtail.documents.models import Document as WagtailDocument, get_document_model
from graphene_django.types import DjangoObjectType
import graphene
from ..registry import registry
from ..utils import resolve_queryset
from .structures import QuerySetList
class DocumentObjectType(DjangoObjectType):
"""
Base document type used if one isn't generated for the current model.
All other node types extend this.
"""
class Meta:
model = WagtailDocument
exclude_fields = ("tags",)
id = graphene.ID()
title = graphene.String()
file = graphene.String()
created_at = graphene.DateTime()
file_size = graphene.Int()
file_hash = graphene.String()
def DocumentsQuery():
registry.documents[WagtailDocument] = DocumentObjectType
mdl = get_document_model()
model_type = registry.documents[mdl]
class Mixin:
documents = QuerySetList(model_type, enable_search=True)
# Return all pages, ideally specific.
def resolve_documents(self, info, **kwargs):
return resolve_queryset(mdl.objects.all(), info, **kwargs)
return Mixin
def get_document_type():
registry.documents[WagtailDocument] = DocumentObjectType
mdl = get_document_model()
return registry.documents[mdl]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-05-02 19:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parcelhubPOS', '0008_auto_20180429_0142'),
]
operations = [
migrations.AddField(
model_name='invoiceitem',
name='totalgst',
field=models.DecimalField(decimal_places=2, default=0, max_digits=30, verbose_name='Total GST'),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='totalprice',
field=models.DecimalField(decimal_places=2, default=0, max_digits=30, verbose_name='Total price'),
preserve_default=False,
),
migrations.AddField(
model_name='invoiceitem',
name='unit',
field=models.IntegerField(default=1),
),
]
|
import random
import time
import chainer.functions as F
import gym
import numpy as np
def reseed(env, pool_rank):
np.random.seed(pool_rank + get_time_seed())
random.seed(pool_rank + get_time_seed())
env.seed(pool_rank + get_time_seed())
def sym_mean(x):
return F.sum(x) / x.size
def gamma_expand(x, a):
x, a = np.asarray(x), np.asarray(a)
y = np.zeros_like(x)
for t in reversed(range(len(x))):
y[t] = x[t] + a[t] * (0 if t == len(x) - 1 else y[t + 1])
return y
def get_dims(env):
if isinstance(env.action_space, gym.spaces.Discrete):
if isinstance(env.observation_space, gym.spaces.Discrete):
env_dim = env.observation_space.n
elif isinstance(env.observation_space, gym.spaces.MultiDiscrete):
env_dim = env.observation_space.shape[0] * 3
else:
env_dim = env.observation_space.shape * 3
act_dim = env.action_space.n
n_output_params = 1
else:
env_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
n_output_params = 2
return env_dim, act_dim, n_output_params
def int_to_onehot(x, dim):
y = np.zeros(dim)
y[x] = 1
return y
def onehot_to_int(x):
x = x.astype(int)
return np.where(x == 1)[0][0]
def relative_ranks(x):
def ranks(x):
ranks = np.zeros(len(x), dtype=int)
ranks[x.argsort()] = np.arange(len(x))
return ranks
y = ranks(x.ravel()).reshape(x.shape).astype(np.float32)
return y / (x.size - 1.) - 0.5
class Adam(object):
def __init__(self, shape, stepsize, beta1=0.9, beta2=0.999, epsilon=1e-08, dtype=np.float32):
self.stepsize, self.beta1, self.beta2, self.epsilon = stepsize, beta1, beta2, epsilon
self.t = 0
self.m = np.zeros(shape, dtype=dtype)
self.v = np.zeros(shape, dtype=dtype)
def step(self, g):
self.t += 1
a = self.stepsize * np.sqrt(1 - self.beta2 ** self.t) / (1 - self.beta1 ** self.t)
self.m = self.beta1 * self.m + (1 - self.beta1) * g
self.v = self.beta2 * self.v + (1 - self.beta2) * (g * g)
return - a * self.m / (np.sqrt(self.v) + self.epsilon)
class Normalizer(object):
def __init__(self, shape, epsilon=1e-2):
self.shape = shape
self.sum = np.zeros(shape, dtype=np.float32)
self.sum2 = np.full(shape, epsilon, dtype=np.float32)
self.count = epsilon
def _get_mean_and_std(self):
mean = self.sum / self.count
std = np.sqrt(np.maximum(self.sum2 / self.count - np.square(mean), 0.01))
return mean, std
def update(self, x):
self.sum += np.sum(x, axis=0)
self.sum2 += np.sum(np.square(x), axis=0)
self.count += x.shape[0]
def norm(self, x):
mean, std = self._get_mean_and_std()
return (x - mean) / std
def unnorm(self, x):
mean, std = self._get_mean_and_std()
return mean + x * std
def gaussian_kl(params0, params1):
(mean0, logstd0), (mean1, logstd1) = params0, params1
assert mean0.shape == logstd0.shape == mean1.shape == logstd1.shape
return F.sum(
logstd1 - logstd0 + (F.square(F.exp(logstd0)) + F.square(mean0 - mean1)) / (
2.0 * F.square(F.exp(logstd1))) - 0.5,
axis=1
)
def categorical_kl(params0, params1):
params0 = params0[0]
params1 = params1[0]
assert params0.shape == params1.shape
a0 = params0 - F.tile(F.max(params0, axis=1, keepdims=True), (1, 4))
a1 = params1 - F.tile(F.max(params1, axis=1, keepdims=True), (1, 4))
ea0 = F.exp(a0)
ea1 = F.exp(a1)
z0 = F.tile(F.sum(ea0, axis=1, keepdims=True), (1, 4))
z1 = F.tile(F.sum(ea1, axis=1, keepdims=True), (1, 4))
p0 = ea0 / z0
return F.sum(p0 * (a0 - F.log(z0) - a1 + F.log(z1)), axis=1)
def log_misc_stats(key, logger, lst_value):
lst_value = np.asarray(lst_value)
logger.logkv(key + '~', np.mean(lst_value))
logger.logkv(key + 'Median', np.median(lst_value))
logger.logkv(key + 'Std', np.std(lst_value))
logger.logkv(key + '-', np.min(lst_value))
logger.logkv(key + '+', np.max(lst_value))
def get_time_seed():
return int(1000000 * time.time() % 100000) * 1000
def ret_to_obj(ret):
"""Objective function
"""
return np.mean(ret[-3:])
class Schedule(object):
def value(self, t):
raise NotImplementedError()
def linear_interpolation(l, r, alpha):
return l + alpha * (r - l)
class PiecewiseSchedule(object):
def __init__(self, endpoints, interpolation=linear_interpolation, outside_value=None):
idxes = [e[0] for e in endpoints]
assert idxes == sorted(idxes)
self._interpolation = interpolation
self._outside_value = outside_value
self._endpoints = endpoints
def value(self, t):
for (l_t, l), (r_t, r) in zip(self._endpoints[:-1], self._endpoints[1:]):
if l_t <= t < r_t:
alpha = float(t - l_t) / (r_t - l_t)
return self._interpolation(l, r, alpha)
assert self._outside_value is not None
return self._outside_value
|
# Network para AWS
import ipaddress
import json
import os
from libcloud.compute.drivers.ec2 import VPCInternetGateway, EC2Network
from bastion.component import Component
from bastion.libcloudbastion.ec2 import EC2VPNGateway
from bastion.networking.network.base import Network
from bastion.networking.private_dns.aws import AWSPrivateDns
from bastion.networking.route_table.aws import AWSRouteTable
from bastion.networking.security_group.aws import AWSSecurityGroup
from bastion.networking.subnet.aws import AWSSubnet
class AWSNetwork(Network, Component):
vpn_gateway_private_ip = None
def get_cloud_driver(self):
return self.networking.get_cloud_driver()
def list_route_tables(self):
cloud_driver = self.get_cloud_driver()
all_route_tables = cloud_driver.ex_list_route_tables()
vpc_route_tables = []
for lib_route_table in all_route_tables:
if lib_route_table.extra['vpc_id'] == self.id:
route_table = AWSRouteTable(lib_route_table.id, lib_route_table.name, lib_route_table.routes,
lib_route_table.subnet_associations,
lib_route_table.propagating_gateway_ids,
self, extra=lib_route_table.extra)
vpc_route_tables.append(route_table)
return vpc_route_tables
def list_security_groups(self):
cloud_driver = self.get_cloud_driver()
all_security_groups = cloud_driver.ex_get_security_groups()
vpc_security_groups = []
for lib_security_group in all_security_groups:
if lib_security_group.extra['vpc_id'] == self.id:
vpc_security_groups.append(AWSSecurityGroup(lib_security_group.id,
lib_security_group.name,
self))
return vpc_security_groups
def create_private_dns(self, domain):
# Create Private Zone
dns_server = self.networking.driver.dns_driver.ex_create_private_zone(domain=domain,
vpc_id=self.id,
region=self.networking.driver.cred.region)
self.private_dns = AWSPrivateDns(domain, dns_server, self)
return self.private_dns
def create_subnet(self, cidr, name=None):
print "Creando Subred en Red Virtual EC2 {} ...".format(self.name)
cloud_driver = self.get_cloud_driver()
availability_zones = cloud_driver.ex_list_availability_zones()
zone = [az for az in availability_zones][0]
subnet = cloud_driver.ex_create_subnet(vpc_id=self.id,
name=name,
cidr_block=cidr,
availability_zone=zone.name)
print "Subred creada con nombre='{0}', cidr='{1}'.".format(
subnet.name,
cidr)
return AWSSubnet(subnet.id,
subnet.name,
cidr,
self)
def create_vpn_subnet(self):
network_mask_length = ipaddress.ip_network(self.cidr).prefixlen
subnet_ip = ipaddress.ip_network(self.cidr).network_address + (2 ** (28 - network_mask_length) - 1) * 2 ** (
32 - 28)
subnet = ipaddress.ip_network((subnet_ip, 28))
vpn_subnet_name = "%s-vpn-subnet" % self.name
self.vpn_subnet = self.create_subnet(str(subnet), vpn_subnet_name)
def prepare_vpn_connection(self):
# Create VPN Terminal VM
self.vpn_gateway = self.networking.driver.baseCompute.create_vm(name=self.name + "-vpn",
image_id=self.networking.driver.prop.vpn_image_id,
subnet=self.vpn_subnet)
self.vpn_gateway.set_source_dest_check(False)
self.vpn_gateway_ip = self.vpn_gateway.public_ips[0]
self.vpn_gateway_private_ip = self.vpn_gateway.private_ips[0]
def get_vpn_connection_public_ip(self):
return self.vpn_gateway_ip
def connect_to_vpn_network(self, cidr, public_ip, shared_key):
vpn_parameters =\
{
'local_ip': self.vpn_gateway_private_ip,
'local_public_ip': self.vpn_gateway_ip,
'local_subnet': self.cidr,
'remote_public_ip': public_ip,
'remote_subnet': cidr,
'psk': shared_key
}
route_table = self.list_route_tables()[0]
route_table.create_route(cidr, internet_gateway=None, network_interface=self.vpn_gateway.primary_network_interface)
cloud_driver = self.get_cloud_driver()
default_security_group = self.list_security_groups()[0]
cloud_driver.ex_authorize_security_group_by_id(default_security_group.id, 0, 65535, public_ip + '/32',
protocol='-1')
this_file_path = os.path.abspath(os.path.dirname(__file__))
strongswan_playbook_path = os.path.join(this_file_path, "../../playbooks/torian-strongswan.yml")
self.vpn_gateway.provision(playbook_path=strongswan_playbook_path,
parameters=json.dumps(vpn_parameters),
user='ubuntu')
def start_vpn_connection(self):
this_file_path = os.path.abspath(os.path.dirname(__file__))
ipsec_restart_path = os.path.join(this_file_path, "../../playbooks/ipsec-restart.yml")
self.vpn_gateway.provision(playbook_path=ipsec_restart_path,
user='ubuntu')
def create_dns_subnet(self):
network_mask_length = ipaddress.ip_network(self.cidr).prefixlen
subnet_ip = ipaddress.ip_network(self.cidr).network_address + (2 ** (28 - network_mask_length) - 2) * 2 ** (
32 - 28)
subnet = ipaddress.ip_network((subnet_ip, 28))
dns_subnet_name = "%s-dns-subnet" % self.name
self.dns_subnet = self.create_subnet(str(subnet), dns_subnet_name)
def list_subnets(self):
cloud_driver = self.get_cloud_driver()
all_subnets = cloud_driver.ex_list_subnets()
vpc_subnets = []
for lib_subnet in all_subnets:
if lib_subnet.extra['vpc_id'] == self.id:
vpc_subnet = AWSSubnet(lib_subnet.id,
lib_subnet.name,
lib_subnet.extra['cidr_block'],
self)
vpc_subnets.append(vpc_subnet)
return vpc_subnets
def delete(self):
cloud_driver = self.get_cloud_driver()
networks = cloud_driver.ex_list_networks(network_ids=[self.id])
network = [n for n in networks][0]
cloud_driver.ex_delete_network(network)
def attach_gateway(self, gateway):
cloud_driver = self.get_cloud_driver()
cloud_gateway = VPCInternetGateway(id=gateway.id, name=None, vpc_id=None, state=None, driver=None)
cloud_network = EC2Network(id=self.id, name=None, cidr_block=None)
cloud_driver.ex_attach_internet_gateway(gateway=cloud_gateway, network=cloud_network)
def attach_vpn_gateway(self, vpn_gateway):
cloud_driver = self.get_cloud_driver()
cloud_vpn_gateway = EC2VPNGateway(id=vpn_gateway.id, name=None, state=None)
cloud_network = EC2Network(id=self.id, name=None, cidr_block=None)
cloud_driver.ex_attach_vpn_gateway(vpn_gateway=cloud_vpn_gateway, network=cloud_network)
|
import RPi.GPIO as GPIO
import os
import subprocess
import sys
def startsystem(channel):
print("Starting!")
subprocess.Popen(["python3", "index.py"])
sys.exit(0)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(13, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(13,GPIO.RISING,callback=startsystem)
message = input("")
GPIO.cleanup() |
#!/usr/bin/env python
from math import floor
class LaserSpeed:
"""
MIT License.
This is the standard library for converting to and from speed code information for LHYMICRO-GL.
The units in the speed code have acceleration/deceleration factors which slightly modifies the equations used
to convert between values and speeds. The fundamental units within the speed code values are period-ticks.
All values relate to a value in the counter to count off the number of oscillations within the
(typically 22.1184) Mhz crystal. The max value here is 65535, with the addition of a diagonal delay.
For the M2 board, the original Chinese Software gave a slope of 12120. However, experiments with the actual
physical speed put this value at 11142, which properly reflects that all speeds tend to be at 91.98% of the
requested speed.
The board is ultimately controlling a stepper motor and the speed a stepper motor travels is the result of
the time between the ticks. Since the crystal oscillator is the same, the delay is controlled by the counted
oscillations subticks, which gives us the time between stepper motor pulses. Most of the devices we are
dealing with are 1000 dpi stepper motors, so, for example, to travel at 1 inch a second requires that the
device tick at 1 kHz. To do this it must delay 1 ms between ticks. This corresponds to a value of 48296 in
the M2 board. Which has an equation of 65536 - (5120 + 12120T) where T is the period requested in ms. This is
equal to 25.4 mm/s. If we want a 2 ms delay, which is half the speed (0.5kHz, 0.5 inches/second, 12.7 mm/s)
we do 65536 - (5120 + 24240) which gives us a value of 36176. This would be encoded as a 16 bit number
broken up into 2 ascii 3 digit strings between 0-255. 141 for the high bits and 80 for the low bits.
So CV01410801 where the final character "1" is the acceleration factor since it's within that range.
The speed in mm/s is also used for determining which acceleration to use and as a factor for some boards
(B2, M2) the horizontal encoded value. Slowing down the device down while traveling diagonally makes the
diagonal and orthogonal take the same amount of time (thereby cutting to the same depth). These are the same
period-ticks units and is simply summed with the 65536 - (b + mT) value in cases that both stepper motors
are used.
"""
def __init__(self, *args, **kwargs):
self.board = "M2"
self.speed = 30
self.d_ratio = None
self.raster_step = 0
self.acceleration = None
self.suffix_c = None
self.raster_horizontal = True
self.fix_speeds = False
self.fix_lows = False
self.fix_limit = False
if "board" in kwargs:
self.board = kwargs["board"]
if "speed" in kwargs:
self.speed = float(kwargs["speed"])
if "d_ratio" in kwargs:
self.d_ratio = kwargs["d_ratio"]
if "raster_step" in kwargs:
self.raster_step = kwargs["raster_step"]
if "suffix_c" in kwargs:
self.suffix_c = kwargs["suffix_c"]
if "acceleration" in kwargs:
self.acceleration = kwargs["acceleration"]
if "fix_speeds" in kwargs:
self.fix_speeds = kwargs["fix_speeds"]
if "fix_lows" in kwargs:
self.fix_lows = kwargs["fix_lows"]
if "fix_limit" in kwargs:
self.fix_limit = kwargs["fix_limit"]
if "raster_horizontal" in kwargs:
self.raster_horizontal = kwargs["raster_horizontal"]
if len(args) >= 1:
self.board = args[0]
if len(args) >= 2:
if isinstance(args[1], (float, int)):
self.speed = float(args[1])
elif isinstance(args[1], str):
# this is a speedcode value.
(
code_value,
accel,
step_value,
diagonal,
raster_step,
suffix_c,
) = LaserSpeed.parse_speed_code(args[1])
b, m = LaserSpeed.get_equation(
self.board,
accel=accel,
suffix_c=suffix_c,
fix_speeds=self.fix_speeds,
)
self.speed = LaserSpeed.get_speed_from_value(code_value, b, m)
self.acceleration = accel
self.raster_step = raster_step
self.suffix_c = suffix_c
if len(args) >= 3:
self.raster_step = args[2]
def __str__(self):
return self.speedcode
def __repr__(self):
parts = list()
if self.board != "M2":
parts.append('board="%s"' % self.board)
if self.speed is not None:
parts.append("speed=%f" % self.speed)
if self.d_ratio is not None:
parts.append("d_ratio=%f" % self.d_ratio)
if self.raster_step != 0:
parts.append("raster_step=%d" % self.raster_step)
if self.suffix_c is not None:
parts.append("suffix_c=%s" % str(self.suffix_c))
if self.acceleration is not None:
parts.append("acceleration=%d" % self.acceleration)
if self.fix_speeds:
parts.append("fix_speeds=%s" % str(self.fix_speeds))
if self.fix_lows:
parts.append("fix_lows=%s" % str(self.fix_lows))
if self.fix_limit:
parts.append("fix_limit=%s" % str(self.fix_limit))
if not self.raster_horizontal:
parts.append("raster_horizontal=%s" % str(self.raster_horizontal))
return "LaserSpeed(%s)" % (", ".join(parts))
@property
def speedcode(self):
return LaserSpeed.get_code_from_speed(
self.speed,
self.raster_step,
self.board,
self.d_ratio,
self.acceleration,
self.suffix_c,
fix_limit=self.fix_limit,
fix_speeds=self.fix_speeds,
fix_lows=self.fix_lows,
raster_horizontal=self.raster_horizontal,
)
@staticmethod
def get_speed_from_code(speed_code, board="M2", fix_speeds=False):
"""
Gets the speed expected from a speedcode. Should calculate the expected speed from the data code given.
:param speed_code: The speedcode to check.
:param board: The board this speedcode was made for.
:param fix_speeds: Is this speedcode in a fixed_speed code?
:return:
"""
(
code_value,
accel,
step_value,
diagonal,
raster_step,
suffix_c,
) = LaserSpeed.parse_speed_code(speed_code)
b, m = LaserSpeed.get_equation(
board, accel=accel, suffix_c=suffix_c, fix_speeds=fix_speeds
)
return LaserSpeed.get_speed_from_value(code_value, b, m)
@staticmethod
def get_code_from_speed(
mm_per_second,
raster_step=0,
board="M2",
d_ratio=None,
acceleration=None,
suffix_c=None,
fix_limit=False,
fix_speeds=False,
fix_lows=False,
raster_horizontal=True,
):
"""
Get a speedcode from a given speed. The raster step appends the 'G' value and uses speed ranges.
The d_ratio uses the default/auto ratio. The accel is optional and forces the speedcode to work
for that particular acceleration.
:param mm_per_second: speed to convert to code.
:param raster_step: raster step mode to use. Use (g0,g1) tuple for unidirectional valuations.
:param board: Nano Board Model
:param d_ratio: M1, M2, B1, B2 have ratio of optional speed
:param acceleration: Optional force acceleration code rather than default for that speed.
:param suffix_c: Optional force suffix_c mode for the board. (True forces suffix_c on, False forces it off)
:param fix_limit: Removes max speed limit.
:param fix_speeds: Give corrected speed (faster by 8.9%)
:param fix_lows: Force low speeds into correct bounds.
:param raster_horizontal: is it rastering with the laser head, or the much heavier bar?
:return: speed code produced.
"""
if d_ratio is None:
d_ratio = 0.261199033289
if not fix_limit and mm_per_second > 240 and raster_step == 0:
mm_per_second = 19.05 # Arbitrary default speed for out range value.
if acceleration is None:
acceleration = LaserSpeed.get_acceleration_for_speed(
mm_per_second,
raster_step != 0,
raster_horizontal=raster_horizontal,
fix_speeds=fix_speeds,
)
if suffix_c is None:
suffix_c = LaserSpeed.get_suffix_c(board, mm_per_second)
b, m = LaserSpeed.get_equation(
board, accel=acceleration, suffix_c=suffix_c, fix_speeds=fix_speeds
)
speed_value = LaserSpeed.get_value_from_speed(mm_per_second, b, m)
if fix_lows and speed_value < 0:
# produced a negative speed value, go ahead and set that to 0
speed_value = 0
encoded_speed = LaserSpeed.encode_16bit(speed_value)
if raster_step != 0:
# There is no C suffix notation for raster step.
if isinstance(raster_step, tuple):
return "V%s%1dG%03dG%03d" % (
encoded_speed,
acceleration,
raster_step[0],
raster_step[1],
)
else:
return "V%s%1dG%03d" % (encoded_speed, acceleration, raster_step)
if d_ratio == 0 or board in ("A", "B", "M"):
# We do not need the diagonal code.
if raster_step == 0:
if suffix_c:
return "CV%s1C" % encoded_speed
else:
return "CV%s%1d" % (encoded_speed, acceleration)
else:
step_value = min(int(floor(mm_per_second) + 1), 128)
frequency_kHz = float(mm_per_second) / 25.4
try:
period_in_ms = 1 / frequency_kHz
except ZeroDivisionError:
period_in_ms = 0
d_value = d_ratio * m * period_in_ms / float(step_value)
if fix_lows:
if d_value > 0xFFFF:
d_value = 0xFFFF
if d_value < 0:
d_value = 0
encoded_diagonal = LaserSpeed.encode_16bit(d_value)
if suffix_c:
return "CV%s1%03d%sC" % (encoded_speed, step_value, encoded_diagonal)
else:
return "CV%s%1d%03d%s" % (
encoded_speed,
acceleration,
step_value,
encoded_diagonal,
)
@staticmethod
def parse_speed_code(speed_code):
"""
Parses a speedcode into the relevant parts these are:
Prefixed codes CV or V, the code value which is a string of numbers that is either
7 or 16 characters long. With bugged versions being permitted to be 5 characters longer
being either 12 or 21 characters long. Since the initial 3 character string becomes an
8 character string falling out of the 000-255 range and becoming (16777216-v).
Codes with a suffix-c value are equal to 1/12th with different timings.
Codes with G-values are raster stepped. Two of these codes implies unidirectional rasters
but the those are a specific (x,0) step sequence.
:param speed_code: Speedcode to parse
:return: code_value, accel, step_value, diagonal, raster_step, suffix_c
"""
suffix_c = False
prefix_c = False
start = 0
end = len(speed_code)
if speed_code[start] == "C":
start += 1
prefix_c = True
if speed_code[end - 1] == "C":
end -= 1
suffix_c = True
if speed_code[start : start + 4] == "V167" and speed_code[start + 4] not in (
"0",
"1",
"2",
):
# The 4th character can only be 0,1,2 except for error speeds.
code_value = LaserSpeed.decode_16bit(speed_code[start + 1 : start + 12])
start += 12
# The value for this speed is so low, it's negative
# and bit-shifted in 24 bits of a negative number.
# These are produced by chinese software but are not valid.
else:
code_value = LaserSpeed.decode_16bit(speed_code[start + 1 : start + 7])
start += 7
code_value = 65536 - code_value
accel = int(speed_code[start])
start += 1
raster_step = 0
if speed_code[end - 4] == "G":
raster_step = int(speed_code[end - 3 : end])
end -= 4
# Removes Gxxx
if speed_code[end - 4] == "G":
raster_step = (int(speed_code[end - 3 : end]), raster_step)
end -= 4
# Removes Gxxx, means this is was GxxxGxxx.
step_value = 0
diagonal = 0
if (end + 1) - start >= 9:
step_value = int(speed_code[start : start + 3])
diagonal = LaserSpeed.decode_16bit(speed_code[start + 3 : end])
return code_value, accel, step_value, diagonal, raster_step, suffix_c
@staticmethod
def get_value_from_speed(mm_per_second, b, m):
"""
Calculates speed value from a given speed.
"""
try:
frequency_kHz = float(mm_per_second) / 25.4
period_in_ms = 1.0 / frequency_kHz
return 65536 - LaserSpeed.get_value_from_period(period_in_ms, b, m)
except ZeroDivisionError:
return 65536 - b
@staticmethod
def get_value_from_period(x, b, m):
"""
Takes in period in ms and converts it to value.
This is a simple linear relationship.
"""
return m * x + b
@staticmethod
def get_speed_from_value(value, b, m):
try:
period_in_ms = LaserSpeed.get_period_from_value(value, b, m)
frequency_kHz = 1 / period_in_ms
return 25.4 * frequency_kHz
except ZeroDivisionError:
return 0
@staticmethod
def get_period_from_value(y, b, m):
try:
return (y - b) / m
except ZeroDivisionError:
return float("inf")
@staticmethod
def decode_16bit(code):
b1 = int(code[0:-3])
if b1 > 16000000:
b1 -= 16777216 # decode error negative numbers
if b1 > 0x7FFF:
b1 = b1 - 0xFFFF
b2 = int(code[-3:])
return (b1 << 8) + b2
@staticmethod
def encode_16bit(value):
value = int(value)
b0 = value & 255
b1 = (value >> 8) & 0xFFFFFF # unsigned shift, to emulate bugged form.
return "%03d%03d" % (b1, b0)
@staticmethod
def get_actual_speed(op_speed, fix_speeds=False):
"""Get the actual speed for a specified operation speed."""
return op_speed / 0.919493599053179 if fix_speeds else op_speed
@staticmethod
def get_acceleration_for_speed(
mm_per_second, raster=False, raster_horizontal=True, fix_speeds=False
):
"""
Gets the acceleration factor for a particular speed.
It is known that vertical rastering has different acceleration factors.
This is not fully mapped out but appeared more in line with non-rastering values.
:param mm_per_second: Speed to find acceleration value for.
:param raster: Whether this speed is for a rastering.
:param raster_horizontal: Whether this speed is for horizontal rastering (top-to-bottom, y-axis speed)
:param fix_speeds: is fixed speed mode on?
:return: 1-4: Value for the accel factor.
"""
mm_per_second = LaserSpeed.get_actual_speed(mm_per_second, fix_speeds)
if mm_per_second <= 25.4:
return 1
if 25.4 < mm_per_second <= 60:
return 2
if raster and raster_horizontal:
if 60 < mm_per_second < 127:
return 2
if 127 <= mm_per_second <= 320:
return 3
if 320 < mm_per_second:
return 4
else:
if 60 < mm_per_second < 127:
return 3
if 127 <= mm_per_second:
return 4
# With the m2 nano, raster acceleration is defined by the distance allowed and the speed
# To determine how to optimally split rasters which are far apart into separate images
# or to combine these separate images back into larger images where that makes sense
# we need this information to estimate when one option is more optimal than another
#
# We have only measured horizontal acceleration distances,
# but IMO from what we know about m2 nano simplicity,
# it is likely that vertical accel distances are likely the same
#
# However because these values are only used to determine
# raster groups and not for the burns themselves,
# if these are incorrect we will just get sub-optimal grouping
# and not quality issues
ACCELERATION_DISTANCES = [ # In mm
3.2512, # acceleration 1 distance 128mil
3.2512, # acceleration 2 distance 128mil
4.8768, # acceleration 3 distance 192mil
6.5024, # acceleration 4 distance 256mil
]
@staticmethod
def get_acceleration_time(speed, accel):
"""Calculate 1/2 sweep distance for speed / accel as raster margin"""
return (
375.0
* (LaserSpeed.ACCELERATION_DISTANCES[accel - 1] ** 1.36)
/ (speed ** 0.75)
)
@staticmethod
def get_suffix_c(board, mm_per_second=None):
"""
Due to a bug in the Chinese software the cutoff for the B2 machine is the same as the M2
at 7, but because if the half-stepping the invalid range the minimum speed is 9.509.
And this is below the threshold. Speeds between 7-9.509 will be invalid.
Since the B2 board is intended to duplicate this it will error as well.
"""
if board == "B2":
if mm_per_second < 7:
return True
if board == "M2" and mm_per_second < 7:
return True
return False
@staticmethod
def get_equation(board, accel=1, suffix_c=False, fix_speeds=False):
"""
The speed for the M2 was physically checked and found to be inaccurate.
If strict is used it will seek to strictly emulate the Chinese software.
The physical device scaled properly with a different slope.
The correct value has been established for the M2 board. It's guessed at for
the B2 board being twice the M2 board. It is not known for A or B, B1 or B2
"""
b = 784.0
if accel == 3:
b = 896.0
if accel == 4:
b = 1024.0
if board in ("A", "B", "B1"):
# A, B, B1 have no known suffix-C equations.
return b, 2000.0
m = 12120.0
if fix_speeds:
m = 11148.0
if board == "B2":
m *= 2
if suffix_c:
return b, m / 12.0
else:
# Non-B2 b-values
if accel == 3:
b = 5632.0
elif accel == 4:
b = 6144.0
else:
b = 5120.0
if suffix_c:
return 8.0, m / 12.0
return b, m
|
import json
import os
from typing import Any
import spotipy
from spotipy.oauth2 import SpotifyOAuth
try:
CLIENT_ID = os.environ["SPOTIPY_CLIENT_ID"]
CLIENT_SECRET = os.environ["SPOTIPY_CLIENT_SECRET"]
CLIENT_REDIRECT_URI = os.environ["SPOTIPY_REDIRECT_URI"]
except Exception as e:
print("An Environment Variable for Spotify is incorrrect. Please verify you have the follow correct:\n")
print("CLIENT_ID\nCLIENT_SECRET\nREDIRECT_URI\n")
redirect = "http://localhost:3000"
class Spotify:
def __init__(self, deviceName:str, nospotify):
self._scope = "user-read-playback-state,user-modify-playback-state"
if not nospotify:
self._credentials = SpotifyOAuth(client_id=CLIENT_ID, client_secret=CLIENT_SECRET, redirect_uri=redirect, scope=self._scope, open_browser=True)
self.spotify = spotipy.Spotify(client_credentials_manager=self._credentials)
else:
self._credentials = None
self.spotify = None
self._deviceName = deviceName
self._deviceId = -1 if nospotify else self.establishConnection()
self._recentQuery = {}
self._queue = set()
def _findDeviceId(self) -> Any:
id = -1
devices = self.getDevices()
for device in devices["devices"]:
if device['name'] == self.getDeviceName():
id = device['id']
return id
def _setDeviceId(self, id) -> None:
self._deviceId = id
return None
def _addToLocalQueue(self, id) -> None:
knownQuery = self.getRecentQuery()
self._queue.add(knownQuery[id])
return None
def checkAndRemoveFromQueue(self, uri) -> None:
for item in self._queue:
if item[2] == uri:
self._queue.remove(item)
break
return None
def getCurrentPlayback(self):
return self.spotify.currently_playing()
def establishConnection(self, maxAttempts=3) -> None:
currentRetries = 1
deviceId = self._findDeviceId()
while deviceId == -1 and currentRetries <= maxAttempts:
print("Attempting to establish spotify device connection...")
print(f"Retries remaining: {(maxAttempts - currentRetries)}")
deviceId = self._findDeviceId
if deviceId != -1:
self._setDeviceId(deviceId)
print(f"Established connection to device id: {self.getDeviceId()}")
else:
print(f"Error! Could not establish connection to device named: {self.getDeviceName()}")
return None
def getDevices(self) -> json:
return self.spotify.devices()
def getDeviceId(self) -> Any:
return self._deviceId
def getDeviceName(self) -> str:
return self._deviceName
def getRecentQuery(self) -> dict:
return self._recentQuery
def getQueue(self) -> set:
return self._queue
def setRecentQuery(self, results:dict) -> None:
self._recentQuery = results
return None
def printSearchResults(self, results:dict) -> None:
for key, value in results.items():
print(f" ID: {key}")
print(f" TRACK: {value[0]}")
print(f"ARTIST: {value[1]}")
print(f" URI: {value[2]}")
print()
return None
def search(self, query) -> None:
formattedResults = {}
results = self.spotify.search(q=query)
id = 1
for item in results['tracks']['items']:
for artist in item['artists']:
formattedResults[id] = (item['name'], artist['name'], item['uri'])
id += 1
self.setRecentQuery(formattedResults)
return None
def play(self) -> None:
self.spotify.start_playback(device_id=self.getDeviceId())
return None
def pause(self) -> None:
self.spotify.pause_playback(device_id=self.getDeviceId())
return None
def next(self) -> None:
self.spotify.next_track(device_id=self.getDeviceId())
return None
def previous(self) -> None:
self.spotify.previous_track(device_id=self.getDeviceId())
return None
def requestTrack(self, id) -> None:
uri = self.getTrackUri(id, self.getRecentQuery())
if self.spotify.current_playback():
self.spotify.add_to_queue(uri=uri, device_id=self.getDeviceId())
else:
self.spotify.start_playback(uris=[uri], device_id=self.getDeviceId())
self._addToLocalQueue(id)
return None
def getTrackUri(self, trackId, recentQuery:dict) -> Any:
return recentQuery[trackId][2] #uri position in tuple |
"""
Escreva um programa que receba como entrada o valor do saque realizado pelo cliente de um banco e retorne quantas
notas de cada valor serão necessárias para atender ao saque com a menor quantidade de notas possível. Serão utilizadas
notas de 100, 50, 20, 10, 5, 2 e 1 real.
"""
n1 = int(input('Digite o valor do saque: '))
n2 = 0
lista = [100, 50, 20, 10, 5, 2, 1]
while True:
if n1 == 0:
break
else:
quant = int(n1 / lista[n2])
n1 -= (quant * lista[n2])
if quant > 0:
print(f'Será preciso de {quant} nota(s) de R$ {lista[n2]}')
n2 += 1
|
#!/usr/bin/env python3
from collections import namedtuple, UserList
Step = namedtuple('Step', ['actor', 'options'])
class MultiStep(UserList):
def __init__(self, iterable=None):
# validation logic
if iterable:
[e.actor for e in iterable]
super().__init__(iterable)
def __setitem__(self, i, elem):
elem.actor
self.list[i] = elem
def insert(self, i, elem):
elem.actor
super().insert(i, elem)
def append(self, elem):
elem.actor
super().append(elem)
def extend(self, iterable):
[e.actor for e in iterable]
super().extend(l)
def __call__(self):
# attempt to return lazy data
lazy_data = None
for step in self:
if step.options:
options = step.options
else:
options = dict()
lazy_data = step.actor(lazy_data, **options)
return lazy_data
def compute(self, *args, **kwargs):
self.computation = self()
self.computation.compute()
def persist(self, *args, **kwargs):
self.computation = self()
return self.computation.persist()
|
"""Description
"""
import sys, os, time, argparse
from collections import OrderedDict, deque
import tensorflow as tf
import numpy as np
from feeder import Feeder
from model import SptAudioGen, SptAudioGenParams
from pyutils.ambisonics.distance import ambix_emd
import myutils
from definitions import *
def parse_arguments():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('model_dir', help='Directory to store model.')
parser.add_argument('--subset_fn', default='')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--overwrite', action='store_true')
parser.add_argument('--gpu', type=int, default=0, help="GPU id")
args = parser.parse_args(sys.argv[1:])
if len(args.subset_fn) == 0:
args.subset_fn = None
return args
def main(args):
eval_fn = os.path.join(args.model_dir, 'eval-detailed.txt')
assert os.path.exists(args.model_dir), 'Model dir does not exist.'
assert args.overwrite or not os.path.exists(eval_fn), 'Evaluation file already exists.'
os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % args.gpu
print ('\n' + '='*30 + ' ARGUMENTS ' + '='*30)
params = myutils.load_params(args.model_dir)
for k, v in params.__dict__.iteritems():
print ('TRAIN | {}: {}'.format(k, v))
for k, v in args.__dict__.iteritems():
print ('EVAL | {}: {}'.format(k, v))
sys.stdout.flush()
DURATION = 0.1
BATCH_SIZE = 16
with tf.device('/cpu:0'), tf.compat.v1.variable_scope('feeder'):
feeder = Feeder(params.db_dir,
subset_fn=args.subset_fn,
ambi_order=params.ambi_order,
audio_rate=params.audio_rate,
video_rate=params.video_rate,
context=params.context,
duration=DURATION,
return_video=VIDEO in params.encoders,
img_prep=myutils.img_prep_fcn(),
return_flow=FLOW in params.encoders,
frame_size=(224, 448),
queue_size=BATCH_SIZE*5,
n_threads=4,
for_eval=True)
batches = feeder.dequeue(BATCH_SIZE)
ambix_batch = batches['ambix']
video_batch = batches['video'] if VIDEO in params.encoders else None
flow_batch = batches['flow'] if FLOW in params.encoders else None
audio_mask_batch = batches['audio_mask']
ss = int(params.audio_rate * params.context) / 2
t = int(params.audio_rate * DURATION)
audio_input = ambix_batch[:, :, :params.ambi_order**2]
audio_target = ambix_batch[:, ss:ss+t, params.ambi_order**2:]
print ('\n' + '=' * 20 + ' MODEL ' + '=' * 20)
sys.stdout.flush()
with tf.device('/gpu:0'):
# Model
num_sep = params.num_sep_tracks if params.separation != NO_SEPARATION else 1
net_params = SptAudioGenParams(sep_num_tracks=num_sep, ctx_feats_fc_units=params.context_units,
loc_fc_units=params.loc_units, sep_freq_mask_fc_units=params.freq_mask_units,
sep_fft_window=params.fft_window)
model = SptAudioGen(ambi_order=params.ambi_order,
audio_rate=params.audio_rate,
video_rate=params.video_rate,
context=params.context,
sample_duration=DURATION,
encoders=params.encoders,
separation=params.separation,
params=net_params)
# Inference
pred_t = model.inference_ops(audio=audio_input, video=video_batch, flow=flow_batch, is_training=False)
# Losses and evaluation metrics
with tf.compat.v1.variable_scope('metrics'):
w_t = audio_input[:, ss:ss+t]
_, stft_dist_ps, lsd_ps, mse_ps, snr_ps = model.evaluation_ops(pred_t, audio_target, w_t,
mask_channels=audio_mask_batch[:, params.ambi_order**2:])
# Loader
vars2save = [v for v in tf.global_variables() if not v.op.name.startswith('metrics')]
saver = tf.train.Saver(vars2save)
print ('\n' + '='*30 + ' VARIABLES ' + '='*30)
model_vars = tf.global_variables()
import numpy as np
for v in model_vars:
if 'Adam' in v.op.name.split('/')[-1]:
continue
print (' * {:50s} | {:20s} | {:7s} | {:10s}'.format(v.op.name, str(v.get_shape()), str(np.prod(v.get_shape())), str(v.dtype)))
print ('\n' + '='*30 + ' EVALUATION ' + '='*30)
sys.stdout.flush()
config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True)
)
with tf.Session(config=config) as sess:
print ('Loading model...')
sess.run(model.init_ops)
saver.restore(sess, tf.train.latest_checkpoint(args.model_dir))
print ('Initializing data feeders...')
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess, coord)
feeder.start_threads(sess)
all_metrics = ['amplitude/predicted', 'amplitude/gt',
'mse/avg', 'mse/X','mse/Y', 'mse/Z',
'stft/avg', 'stft/X','stft/Y', 'stft/Z',
'lsd/avg', 'lsd/X','lsd/Y', 'lsd/Z',
'mel_lsd/avg', 'mel_lsd/X','mel_lsd/Y', 'mel_lsd/Z',
'snr/avg', 'snr/X','snr/Y', 'snr/Z',
'env_mse/avg', 'env_mse/X','env_mse/Y', 'env_mse/Z',
'emd/dir', 'emd/dir2']
metrics = OrderedDict([(key, []) for key in all_metrics])
sample_ids = []
telapsed = deque(maxlen=20)
print ('Start evaluation...')
it = -1
# run_options = tf.RunOptions(timeout_in_ms=60*1000)
while True:
it += 1
if feeder.done(sess):
break
start_time = time.time()
outs = sess.run([batches['id'], audio_mask_batch, w_t, audio_target, pred_t, stft_dist_ps, lsd_ps, mse_ps, snr_ps])
video_id, layout, mono, gt, pred = outs[:5]
gt_m = np.concatenate((mono, gt), axis=2) * layout[:, np.newaxis, :]
pred_m = np.concatenate((mono, pred), axis=2) * layout[:, np.newaxis, :]
stft_dist, lsd, mse, snr = outs[5:]
_env_time = 0.
_emd_time = 0.
_pow_time = 0.
_lsd_time = 0.
for smp in range(BATCH_SIZE):
metrics['stft/avg'].append(np.mean(stft_dist[smp]))
for i, ch in zip(range(3), 'YZX'):
metrics['stft/'+ch].append(stft_dist[smp, i])
metrics['lsd/avg'].append(np.mean(lsd[smp]))
for i, ch in zip(range(3), 'YZX'):
metrics['lsd/'+ch].append(lsd[smp, i])
metrics['mse/avg'].append(np.mean(mse[smp]))
for i, ch in zip(range(3), 'YZX'):
metrics['mse/'+ch].append(mse[smp, i])
metrics['snr/avg'].append(np.nanmean(snr[smp]))
for i, ch in zip(range(3), 'YZX'):
metrics['snr/'+ch].append(snr[smp, i])
# Compute Mel LSD distance
_t = time.time()
mel_lsd = myutils.compute_lsd_dist(pred[smp], gt[smp], params.audio_rate)
metrics['mel_lsd/avg'].append(np.mean(mel_lsd))
for i, ch in zip(range(3), 'YZX'):
metrics['mel_lsd/'+ch].append(mel_lsd[i])
_lsd_time += (time.time() - _t)
# Compute envelope distances
_t = time.time()
env_mse = myutils.compute_envelope_dist(pred[smp], gt[smp])
metrics['env_mse/avg'].append(np.mean(env_mse))
for i, ch in zip(range(3), 'YZX'):
metrics['env_mse/'+ch].append(env_mse[i])
_env_time += (time.time() - _t)
# Compute EMD (for speed, only compute emd over first 0.1s of every 1sec)
_t = time.time()
emd_dir, emd_dir2 = ambix_emd(pred_m[smp], gt_m[smp], model.snd_rate, ang_res=30)
metrics['emd/dir'].append(emd_dir)
metrics['emd/dir2'].append(emd_dir2)
_emd_time += (time.time() - _t)
# Compute chunk power
_t = time.time()
metrics['amplitude/gt'].append(np.abs(gt[smp]).max())
metrics['amplitude/predicted'].append(np.abs(pred[smp]).max())
_pow_time += (time.time() - _t)
sample_ids.append(video_id[smp])
telapsed.append(time.time() - start_time)
#print '\nTotal:', telapsed[-1]
#print 'Env:', _env_time
#print 'LSD:', _lsd_time
#print 'EMD:', _emd_time
#print 'POW:', _pow_time
if it % 100 == 0:
# Store evaluation metrics
with open(eval_fn, 'w') as f:
f.write('SampleID | {}\n'.format(' '.join(metrics.keys())))
for smp in range(len(sample_ids)):
f.write('{} | {}\n'.format(sample_ids[smp], ' '.join([str(metrics[key][smp]) for key in metrics])))
if it % 5 == 0:
stats = OrderedDict([(m, np.mean(metrics[m])) for m in all_metrics])
myutils.print_stats(stats.values(), stats.keys(), BATCH_SIZE, telapsed, it, tag='EVAL')
sys.stdout.flush()
# Print progress
stats = OrderedDict([(m, np.mean(metrics[m])) for m in all_metrics])
myutils.print_stats(stats.values(), stats.keys(), BATCH_SIZE, telapsed, it, tag='EVAL')
sys.stdout.flush()
with open(eval_fn, 'w') as f:
f.write('SampleID | {}\n'.format(' '.join(metrics.keys())))
for smp in range(len(sample_ids)):
f.write('{} | {}\n'.format(sample_ids[smp], ' '.join([str(metrics[key][smp]) for key in metrics])))
print('\n'+'#'*60)
print('End of evaluation.')
if __name__ == '__main__':
main(parse_arguments())
|
# encoding: utf-8
# Author: Bingxin Ke
# Created: 2021/10/4
"""
Homogeneous coordinate transformation
"""
from typing import Union
import numpy as np
import open3d as o3d
import torch
def points_to_transform(p1, p2):
raise NotImplemented
def extent_transform_to_points(extents, transform):
# _p1 = np.array([0, 0, 0, 1]).reshape((4, 1))
_half_extents = extents / 2.0
_p1 = np.concatenate([_half_extents * -1, [1]]).reshape((4, 1)) * -1
_p2 = np.concatenate([_half_extents, [1]]).reshape((4, 1))
_p1 = np.matmul(np.array(transform), _p1).squeeze()
_p2 = np.matmul(np.array(transform), _p2).squeeze()
_p1 = _p1 / _p1[3]
_p2 = _p2 / _p2[3]
return _p1[:3], _p2[:3]
def normalize_pc(points: Union[np.ndarray, o3d.geometry.PointCloud], scales, center_shift):
"""
Normalize a point cloud: x_norm = (x_ori - center_shift) / scale
Args:
points: input point cloud
scales: scale of source data
center_shift: shift of original center (in original crs)
Returns:
"""
if isinstance(points, o3d.geometry.PointCloud):
points = np.asarray(points.points)
norm_pc = (points - center_shift) / scales
return norm_pc
def invert_normalize_pc(points: Union[np.ndarray, o3d.geometry.PointCloud], scales, center_shift):
"""
Invert normalization of a point cloud: x_ori = scales * x_norm + center_shift
Args:
points:
scales:
center_shift:
Returns:
"""
if isinstance(points, o3d.geometry.PointCloud):
points = np.asarray(points.points)
ori_pc = points * scales + center_shift
return ori_pc
def apply_transform(p, M):
if isinstance(p, np.ndarray):
p = p.reshape((-1, 3))
p = np.concatenate([p, np.ones((p.shape[0], 1))], 1).transpose()
p2 = np.matmul(M, p).squeeze()
p2 = p2 / p2[3, :]
return p2[0:3, :].transpose()
elif isinstance(p, torch.Tensor):
p = p.reshape((-1, 3))
p = torch.cat([p, torch.ones((p.shape[0], 1)).to(p.device)], 1).transpose(0, 1)
p2 = torch.matmul(M.double(), p.double()).squeeze()
p2 = p2 / p2[3, :]
return p2[0:3, :].transpose(0, 1).to(p.dtype)
else:
raise TypeError
def invert_transform(M):
if isinstance(M, np.ndarray):
return np.linalg.inv(M)
elif isinstance(M, torch.Tensor):
return torch.inverse(M.double()).to(M.dtype)
else:
raise TypeError
def stack_transforms(M_ls):
"""
M_out = M_ls[0] * M_ls[1] * M_ls[2] * ...
Args:
M_ls:
Returns:
"""
M_out = M_ls[0]
if isinstance(M_out, np.ndarray):
for M in M_ls[1:]:
M_out = np.matmul(M_out, M)
return M_out
elif isinstance(M_out, torch.Tensor):
for M in M_ls[1:]:
M_out = torch.matmul(M_out, M)
return M_out
else:
raise TypeError
|
def soma (x, y):
return x + y
def multiplica (x, y, z):
return x * y * z
def meu_nome():
return "Lucas Zarza"
|
# ipop-project
# Copyright 2016, University of Florida
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import json
import signal
import argparse
import threading
import importlib
import uuid
import controller.framework.fxlib as fxlib
from collections import OrderedDict
from controller.framework.CFxHandle import CFxHandle
from controller.framework.CFxSubscription import CFxSubscription
class CFX(object):
def __init__(self):
self._config = OrderedDict()
self.parse_config()
"""
CFxHandleDict is a dict containing the references to CFxHandles of all CMs with key as the module name and
value as the CFxHandle reference
"""
self._cfx_handle_dict = {}
self.model = self._config["CFx"]["Model"]
self._event = None
self._subscriptions = {}
self._node_id = self.set_node_id()
self._load_order = []
def submit_cbt(self, cbt):
recipient = cbt.request.recipient
if cbt.op_type == "Response":
recipient = cbt.response.recipient
self._cfx_handle_dict[recipient]._cm_queue.put(cbt)
def initialize(self,):
# check for circular dependencies in the configuration file
dependency_graph = {}
for key in self._config:
if key != "CFx":
try:
dependency_graph[key] = self._config[key]["Dependencies"]
except Exception as error:
pass
if self.detect_cyclic_dependency(dependency_graph):
print("Circular dependency detected in config.json. Exiting")
sys.exit()
self.build_load_order()
# iterate and load the modules specified in the configuration file
for module_name in self._load_order:
self.load_module(module_name)
# intialize all the CFxHandles which in turn initialize the CMs
for module_name in self._load_order:
self._cfx_handle_dict[module_name].initialize()
# start all the worker and timer threads
for module_name in self._cfx_handle_dict:
self._cfx_handle_dict[module_name]._cm_thread.start()
if self._cfx_handle_dict[module_name]._timer_thread:
self._cfx_handle_dict[module_name]._timer_thread.start()
def load_module(self, module_name):
"""
Dynamically load the modules specified in the config file. Allow model
specific module implementations to override the default by attempting
to load them first.
"""
if len(self.model) > 0:
if os.path.isfile("controller/modules/{0}/{1}.py"
.format(self.model, module_name)):
module = importlib.import_module("controller.modules.{0}.{1}"
.format(self.model, module_name))
else:
module = importlib.import_module("controller.modules.{0}"
.format(module_name))
# get the class with name key from module
module_class = getattr(module, module_name)
# create a CFxHandle object for each module
handle = CFxHandle(self)
self._config[module_name]["NodeId"] = self._node_id
instance = module_class(handle, self._config[module_name], module_name)
handle._cm_instance = instance
handle._cm_config = self._config[module_name]
# store the CFxHandle object references in the
# dict with module name as the key
self._cfx_handle_dict[module_name] = handle
def add_dependencies(self, module_name):
dependencies = self._config[module_name].get("Dependencies", {})
for dep in dependencies:
if dep not in self._load_order:
self.add_dependencies(dep)
if module_name not in self._load_order:
self._load_order.append(module_name)
def build_load_order(self,):
# creates a module load order based on how they are listed in the
# config file and their dependency list
try:
for module_name in self._config:
module_enabled = self._config[module_name].get("Enabled", True)
if module_enabled and module_name != "CFx":
self.add_dependencies(module_name)
except KeyError:
pass
def detect_cyclic_dependency(self, g):
# test if the directed graph g has a cycle
path = set()
def visit(vertex):
path.add(vertex)
for neighbour in g.get(vertex, ()):
if (neighbour in path) or visit(neighbour):
return True
path.remove(vertex)
return False
return any(visit(v) for v in g)
def __handler(self, signum=None, frame=None):
print("Signal handler called with signal ", signum)
def parse_config(self):
for k in fxlib.MODULE_ORDER:
self._config[k] = fxlib.CONFIG.get(k)
parser = argparse.ArgumentParser()
parser.add_argument("-c", help="load configuration from a file",
dest="config_file", metavar="config_file")
parser.add_argument("-u", help="update configuration file if needed",
dest="update_config", action="store_true")
parser.add_argument("-p", help="load remote ip configuration file",
dest="ip_config", metavar="ip_config")
parser.add_argument("-s", help="configuration as json string"
" (overrides configuration from file)",
dest="config_string", metavar="config_string")
parser.add_argument("--pwdstdout", help="use stdout as "
"password stream",
dest="pwdstdout", action="store_true")
args = parser.parse_args()
if args.config_file:
# load the configuration file
with open(args.config_file) as f:
# load the configuration file into an OrderedDict with the
# modules in the order in which they appear
json_data = json.load(f, object_pairs_hook=OrderedDict)
for key in json_data:
if self._config.get(key, False):
self._config[key].update(json_data[key])
else:
self._config[key] = json_data[key]
if args.config_string:
loaded_config = json.loads(args.config_string)
for key in loaded_config:
if self._config.get(key, None):
self._config[key].update(loaded_config[key])
def set_node_id(self,):
config = self._config["CFx"]
# if NodeId is not specified in Config file, generate NodeId
nodeid = config.get("NodeId", None)
if nodeid is None or len(nodeid) == 0:
try:
with open("nid", "r") as f:
nodeid = f.read()
except IOError:
pass
if nodeid is None or len(nodeid) == 0:
nodeid = str(uuid.uuid4().hex)
with open("nid", "w") as f:
f.write(nodeid)
return nodeid
def wait_for_shutdown_event(self):
self._event = threading.Event()
# Since signal.pause() is not avaialble on windows, use event.wait()
# with a timeout to catch KeyboardInterrupt. Without timeout, it"s
# not possible to catch KeyboardInterrupt because event.wait() is
# a blocking call without timeout. The if condition checks if the os
# is windows.
if os.name == "nt":
while True:
try:
self._event.wait(1)
except (KeyboardInterrupt, SystemExit) as e:
print("Controller shutdown event: {0}".format(str(e)))
break
else:
for sig in [signal.SIGINT]:
signal.signal(sig, self.__handler)
# signal.pause() sleeps until SIGINT is received
signal.pause()
def terminate(self):
for module_name in self._cfx_handle_dict:
if self._cfx_handle_dict[module_name]._timer_thread:
self._cfx_handle_dict[module_name]._exit_event.set()
self._cfx_handle_dict[module_name]._cm_queue.put(None)
# wait for the threads to process their current CBTs and exit
print("waiting for threads to exit ...")
for module_name in self._cfx_handle_dict:
self._cfx_handle_dict[module_name]._cm_thread.join()
print("{0} exited".format(self._cfx_handle_dict[module_name]._cm_thread.name))
if self._cfx_handle_dict[module_name]._timer_thread:
self._cfx_handle_dict[module_name]._timer_thread.join()
print("{0} exited".format(self._cfx_handle_dict[module_name]._timer_thread.name))
sys.exit(0)
def query_param(self, param_name=""):
try:
if param_name == "IpopVersion":
return self._config["CFx"]["IpopVersion"]
if param_name == "NodeId":
return self._node_id
if param_name == "Overlays":
return self._config["CFx"]["Overlays"]
if param_name == "Model":
return self.model
except Exception as error:
print("Exception occurred while querying data." + str(error))
return None
# Caller is the subscription source
def publish_subscription(self, owner_name, subscription_name, owner):
sub = CFxSubscription(owner_name, subscription_name)
sub._owner = owner
if sub._owner_name not in self._subscriptions:
self._subscriptions[sub._owner_name] = []
self._subscriptions[sub._owner_name].append(sub)
return sub
def remove_subscription(self, sub):
sub.post_update("SUBSCRIPTION_SOURCE_TERMINATED")
if sub._owner_name not in self._subscriptions:
raise NameError("Failed to remove subscription source \"{}\"."
" No such provider name exists."
.format(sub._owner_name))
self._subscriptions[sub._owner_name].remove(sub)
def find_subscription(self, owner_name, subscription_name):
sub = None
if owner_name not in self._subscriptions:
raise NameError("The specified subscription provider {} was not found.".format(owner_name))
for sub in self._subscriptions[owner_name]:
if sub._subscription_name == subscription_name:
return sub
return None
# Caller is the subscription sink
def start_subscription(self, owner_name, subscription_name, Sink):
sub = self.find_subscription(owner_name, subscription_name)
if sub is not None:
sub.add_subscriber(Sink)
else:
raise NameError("The specified subscription name was not found")
def end_subscription(self, owner_name, subscription_name, Sink):
sub = self.find_subscription(owner_name, subscription_name)
if sub is not None:
sub.remove_subscriber(Sink)
if __name__ == "__main__":
cf = CFX()
cf.initialize()
|
import random
from time import time
import warnings
warnings.filterwarnings('ignore')
from tqdm import tqdm
from tree_modules import kd_tree, quad_tree
def test_random_insertions(val_range=100, num_elements=1000, reps=10):
q_time = 0
k_time = 0
print("\nRandom element insertion")
print(f"{num_elements} points, x,y:[0,{val_range}] - Avg. of {reps} runs")
for _ in tqdm(range(reps)):
k = kd_tree()
q = quad_tree()
rand_elements = [(random.randrange(val_range), random.randrange(val_range)) for _ in range(num_elements)]
t_s = time()
for item in tqdm(rand_elements, position=1, leave=False):
x, y = item
q.add_element(x, y)
t_e = time()
q_time += t_e - t_s
t_s = time()
for item in tqdm(rand_elements, position=1, leave=False):
x, y = item
k.add_element(x, y)
t_e = time()
k_time += t_e - t_s
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
def test_build(val_range=100, num_elements=1000, reps=10):
q_time = 0
k_time = 0
print("\nBuilding from given list of points")
print(f"{num_elements} points, x,y:[0,{val_range}] - Avg. of {reps} runs")
for _ in tqdm(range(reps)):
k = kd_tree()
q = quad_tree()
rand_elements = [(random.randrange(val_range), random.randrange(val_range)) for _ in range(num_elements)]
t_s = time()
q.build(point_list=rand_elements)
t_e = time()
q_time += t_e - t_s
t_s = time()
k.build(point_list=rand_elements)
t_e = time()
k_time += t_e - t_s
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
def build_trees(val_range=100, num_elements=1000):
q = quad_tree()
k = kd_tree()
for _ in range(num_elements):
x = random.randrange(val_range)
y = random.randrange(val_range)
q.add_element(x, y)
k.add_element(x, y)
return q, k
def test_random_searches(num_searches=50, val_range=100, num_elements=1000, reps=10):
q_time = 0
k_time = 0
print("\nRandom point search")
print(f"{num_searches} points in popul. of {num_elements} - Avg. of {reps} runs")
q, k = build_trees(val_range=val_range, num_elements=num_elements)
for _ in tqdm(range(reps)):
rand_search_points = [(random.randrange(val_range), random.randrange(val_range)) for _ in range(num_searches)]
ts = time()
for item in tqdm(rand_search_points, position=1, leave=False):
x, y = item
_ = q.search(x, y)
te = time()
q_time += te - ts
ts = time()
for item in tqdm(rand_search_points, position=1, leave=False):
x, y = item
_ = k.search(x, y)
te = time()
k_time += te - ts
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
def test_storage(val_range=100, num_elements=1000, reps=10):
q_time = 0
k_time = 0
print("\nStorage testing")
print(f"Trees of {num_elements} popul - Avg. of {reps} runs")
for _ in tqdm(range(reps)):
q, k = build_trees(val_range=val_range, num_elements=num_elements)
ts = time()
_ = q.storage()
te = time()
q_time += te - ts
ts = time()
_ = k.storage()
te = time()
k_time += te - ts
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
def test_knn_search(num_searches=50, max_k=7, val_range=100, num_elements=1000, reps=10):
print("\nkNN-Search")
print(f"{num_searches} points in popul. of {num_elements} - Avg. of {reps} runs")
q, k = build_trees(val_range=val_range, num_elements=num_elements)
for ck in range(1, max_k):
q_time = 0
k_time = 0
txt = f"[k = {ck}]"
for _ in tqdm(range(reps), desc=txt):
rand_search_points = [(random.randrange(val_range), random.randrange(val_range)) for _ in range(num_searches)]
ts = time()
for item in tqdm(rand_search_points, position=1, leave=False):
x, y = item
_ = q.knn_search(x, y, ck)
te = time()
q_time += te - ts
ts = time()
for item in tqdm(rand_search_points, position=1, leave=False):
x, y = item
_ = k.knn_search(x, y, ck)
te = time()
k_time += te - ts
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
def test_delete(num_deletions, val_range=100, num_elements=1000, reps=10):
q_time = 0
k_time = 0
print("\nDeletion testing")
print(f"{num_searches} points in popul. of {num_elements} - Avg. of {reps} runs")
for _ in tqdm(range(reps)):
k = kd_tree()
q = quad_tree()
rand_elements = [(random.randrange(val_range), random.randrange(val_range)) for _ in range(num_elements)]
for item in rand_elements:
x, y = item
q.add_element(x, y)
k.add_element(x, y)
deletion_points = []
for _ in range(num_deletions):
key = random.randrange(len(rand_elements))
deletion_points.append(rand_elements[key])
del rand_elements[key]
t_s = time()
for item in tqdm(deletion_points, position=1, leave=False):
x, y = item
_ = k.delete_element(x, y)
t_e = time()
k_time += t_e - t_s
t_s = time()
for item in tqdm(deletion_points, position=1, leave=False):
x, y = item
_ = q.delete_element(x, y)
t_e = time()
q_time += t_e - t_s
k_time /= reps
q_time /= reps
print(f"kD_Tree: {round(k_time, 4)}s \t\tQuadTree: {round(q_time, 4)}s")
# =============================================================================
if __name__ == "__main__":
print("Hi!")
reps = 10
val_range = 250
num_elements = 20000
num_searches = 1000
max_k = 7
reduction = 5
test_random_insertions(val_range=val_range, num_elements=num_elements, reps=reps)
test_build(val_range=int(val_range / reduction), num_elements=int(num_elements / (reduction ** 2)), reps=reps)
test_storage(val_range=val_range, num_elements=num_elements, reps=reps)
test_delete(num_deletions=num_searches, val_range=val_range, num_elements=num_elements, reps=reps)
test_random_searches(num_searches=num_searches,
val_range=val_range, num_elements=num_elements, reps=reps)
test_knn_search(num_searches=num_searches, max_k=max_k,
val_range=val_range, num_elements=num_elements, reps=reps) |
import tensorflow as tf
INPUT_HEIGHT = 200
INPUT_WIDTH = 200
class FineNet(tf.keras.Model):
def __init__(self, alpha, lmbda, d_latent):
super(FineNet, self).__init__()
self.alpha = alpha
self.lmbda = lmbda
self.d_latent = d_latent
self.embedder = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(16, 7, 2, "same"), # 100
tf.keras.layers.BatchNormalization(),
tf.keras.layers.MaxPool2D((2, 2), 2, padding="same"), # 50
ResidualBlock([32, 32, 64], [1, 3, 1], 2),
tf.keras.layers.MaxPool2D((2, 2), 2, padding="same"), # 25
ResidualBlock([64, 64, 128], [1, 3, 1], 3),
tf.keras.layers.MaxPool2D(
(2, 2),
2,
padding="same",
), # 13
ResidualBlock([128, 128, 256], [1, 3, 1], 4),
tf.keras.layers.MaxPool2D((2, 2), 2, padding="same"), # 7
ResidualBlock([256, 256, 512], [1, 3, 1], 3),
tf.keras.layers.AvgPool2D((7, 7), 7), # 1
tf.keras.layers.Reshape((512,)),
tf.keras.layers.Dense(d_latent),
]
)
def call(self, x, training=False):
z = self.embedder(x, training=training)
return tf.math.l2_normalize(z, axis=1)
def call_on_identities(self, identities_x, training=False):
n_identities = identities_x.shape[0]
n_prints_per_identity = identities_x.shape[1]
prints_x = tf.reshape(identities_x, [-1, INPUT_HEIGHT, INPUT_WIDTH, 1])
prints_z = self.call(prints_x, training=training)
identities_z = tf.reshape(
prints_z, [n_identities, n_prints_per_identity, self.d_latent]
)
return identities_z
def triplet_loss(self, z_a, z_p, z_n):
batch_sz = z_a.shape[0]
positive_dist = tf.norm(z_a - z_p, axis=1)
negative_dist = tf.norm(z_a - z_n, axis=1)
J = positive_dist - negative_dist + self.alpha
return tf.math.maximum(J, tf.zeros([batch_sz]))
def softmax_loss(self, z_a, z_p):
z_a_softmax = tf.nn.softmax(z_a, axis=1)
z_p_softmax = tf.nn.softmax(z_p, axis=1)
l = -tf.reduce_sum(z_a_softmax * tf.math.log(z_p_softmax), axis=1)
return l
def loss_function(self, z_a, z_p, z_n):
l = self.triplet_loss(z_a, z_p, z_n) # + self.lmbda * \
# self.softmax_loss(z_a, z_p)
s = tf.reduce_sum(l)
return s
class ResidualBlock(tf.keras.Model):
def __init__(self, filters, kernel_sizes, repetitions):
super(ResidualBlock, self).__init__()
filters = filters * repetitions
kernel_sizes = kernel_sizes * repetitions
n_conv = len(filters)
assert n_conv == len(kernel_sizes)
self.convolutions = tf.keras.Sequential()
for i in range(n_conv):
c = tf.keras.layers.Conv2D(filters[i], kernel_sizes[i], padding="same")
b = tf.keras.layers.BatchNormalization()
a = tf.keras.layers.ReLU()
self.convolutions.add(c)
self.convolutions.add(b)
self.convolutions.add(a)
def call(self, x, training=False):
out = self.convolutions(x, training=training)
return tf.pad(x, [[0, 0], [0, 0], [0, 0], [0, out.shape[3] - x.shape[3]]]) + out
|
#!/usr/bin/env python
"""
Created on Wed Apr 8 15:19:52 2015
Author: Oren Freifeld
Email: freifeld@csail.mit.edu
"""
import numpy as np
from of.utils import *
from pyvision.essentials import Img
from pylab import plt
def colored_squares(dimy,dimx,nPixels_in_square_side):
"""
"""
M=nPixels_in_square_side
seg = np.zeros((dimy,dimx),dtype=np.int32)
yy,xx = np.mgrid[:dimy,:dimx]
xx = xx.astype(np.float)
yy = yy.astype(np.float)
dimx = float(dimx)
dimy=float(dimy)
nTimesInX = np.floor(xx / M).max() + 1
seg = np.floor(yy / M) * nTimesInX + np.floor(xx / M)
seg = seg.astype(np.int32)
return seg
def random_permute_labels(seg):
p=np.random.permutation(seg.max()+1)
seg2 = np.zeros_like(seg)
for c in range(seg.max()+1):
seg2[seg==c]=p[c]
return seg2.astype(np.int32)
if __name__ == "__main__":
tic = time.clock()
seg= colored_squares(512*2, 512*2,64*4)
toc = time.clock()
print toc-tic
plt.figure(1)
plt.clf()
plt.imshow(seg,interpolation="Nearest")
plt.axis('scaled') |
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from abc import ABCMeta
from dataclasses import dataclass
from pants.base.build_root import BuildRoot
from pants.core.util_rules.distdir import DistDir
from pants.engine.console import Console
from pants.engine.fs import Digest, DirectoriesToMerge, DirectoryToMaterialize, Workspace
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get, MultiGet
from pants.engine.target import (
Configuration,
TargetsToValidConfigurations,
TargetsToValidConfigurationsRequest,
)
from pants.engine.unions import union
class AWSLambdaError(Exception):
pass
@dataclass(frozen=True)
class CreatedAWSLambda:
digest: Digest
name: str
runtime: str
handler: str
@union
class AWSLambdaConfiguration(Configuration, metaclass=ABCMeta):
"""The fields necessary to create an AWS Lambda from a target."""
class AWSLambdaOptions(LineOriented, GoalSubsystem):
"""Generate an AWS Lambda."""
name = "awslambda"
class AWSLambdaGoal(Goal):
subsystem_cls = AWSLambdaOptions
@goal_rule
async def create_awslambda(
console: Console,
options: AWSLambdaOptions,
distdir: DistDir,
buildroot: BuildRoot,
workspace: Workspace,
) -> AWSLambdaGoal:
targets_to_valid_configs = await Get[TargetsToValidConfigurations](
TargetsToValidConfigurationsRequest(
AWSLambdaConfiguration,
goal_description=f"the `{options.name}` goal",
error_if_no_valid_targets=True,
)
)
awslambdas = await MultiGet(
Get[CreatedAWSLambda](AWSLambdaConfiguration, config)
for config in targets_to_valid_configs.configurations
)
merged_digest = await Get[Digest](
DirectoriesToMerge(tuple(awslambda.digest for awslambda in awslambdas))
)
result = workspace.materialize_directory(
DirectoryToMaterialize(merged_digest, path_prefix=str(distdir.relpath))
)
with options.line_oriented(console) as print_stdout:
for awslambda, path in zip(awslambdas, result.output_paths):
print_stdout(f"Wrote code bundle to {os.path.relpath(path, buildroot.path)}")
print_stdout(f" Runtime: {awslambda.runtime}")
print_stdout(f" Handler: {awslambda.handler}")
print_stdout("")
return AWSLambdaGoal(exit_code=0)
def rules():
return [create_awslambda]
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Everything needed to run classification and regression tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import tensorflow.compat.v1 as tf
import sys
from bam.bert import tokenization
from bam.data import feature_spec
from bam.data import task_weighting
from bam.helpers import utils
from bam.task_specific import task
from bam.task_specific.classification import classification_metrics
from bam.data.NERLoader import NERLoader
from bam.helpers.CRF import CustomCRF,distillation_loss
from bam.tf_crf.crf_helper import allowed_transitions
#from bam.helpers.crf_static_contraint_helper import allowed_transitions
import json
class InputExample(task.Example):
"""A single training/test example for simple sequence classification."""
def __init__(self, eid, task_name, text_a, text_b=None, label=None, mask=None):
super(InputExample, self).__init__(task_name)
self.eid = eid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.mask = mask
class SingleOutputTask(task.Task):
"""A task with a single label per input (e.g., text classification)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer):
super(SingleOutputTask, self).__init__(config, name)
self._tokenizer = tokenizer
self._distill_inputs = None
def featurize(self, example, is_training):
"""Turn an InputExample into a dict of features."""
if is_training and self.config.distill and self._distill_inputs is None:
self._distill_inputs = utils.load_pickle(
self.config.distill_inputs(self.name))
tokens_a = self._tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = self._tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, self.config.max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > self.config.max_seq_length - 2:
tokens_a = tokens_a[0:(self.config.max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it
# makes it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = self._tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < self.config.max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == self.config.max_seq_length
assert len(input_mask) == self.config.max_seq_length
assert len(segment_ids) == self.config.max_seq_length
eid = example.eid
features = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"task_id": self.config.task_names.index(self.name),
self.name + "_eid": eid,
}
self._add_features(features, example,
None if self._distill_inputs is None else
self._distill_inputs[eid])
return features
def _load_glue(self, lines, split, text_a_loc, text_b_loc, label_loc,
skip_first_line=False, eid_offset=0, swap=False):
examples = []
for (i, line) in enumerate(lines):
if i == 0 and skip_first_line:
continue
eid = i - (1 if skip_first_line else 0) + eid_offset
text_a = tokenization.convert_to_unicode(line[text_a_loc])
if text_b_loc is None:
text_b = None
else:
text_b = tokenization.convert_to_unicode(line[text_b_loc])
if "test" in split or "diagnostic" in split:
label = self._get_dummy_label()
else:
label = tokenization.convert_to_unicode(line[label_loc])
if swap:
text_a, text_b = text_b, text_a
examples.append(InputExample(eid=eid, task_name=self.name,
text_a=text_a, text_b=text_b, label=label))
return examples
@abc.abstractmethod
def _get_dummy_label(self):
pass
@abc.abstractmethod
def _add_features(self, features, example, distill_inputs):
pass
class NERTask(task.Task):
"""A task with a single label per token in input (e.g., NER)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,label_list):
super(NERTask, self).__init__(config, name)
self._tokenizer = tokenizer
self._distill_inputs = None
self.label_list = label_list
def featurize(self, example, is_training):
"""Turn an InputExample into a dict of features."""
if is_training and self.config.distill and self._distill_inputs is None:
self._distill_inputs = utils.load_pickle(
self.config.distill_inputs(self.name))
input_ids = example.text_a
input_mask = example.mask
segment_ids = []
while len(segment_ids) < self.config.max_seq_length:
segment_ids.append(0)
assert len(input_ids) == self.config.max_seq_length
assert len(input_mask) == self.config.max_seq_length
assert len(segment_ids) == self.config.max_seq_length
eid = example.eid
features = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"task_id": self.config.task_names.index(self.name),
self.name + "_eid": eid,
}
self._add_features(features, example,
None if self._distill_inputs is None else
self._distill_inputs[eid])
return features
def _load_processed(self, filename):
examples = []
with open(filename) as file:
data = json.load(file)
temp_texts = data["Mixed"]["tweets"]
temp_labels = data["Mixed"]["labels"]
texts=[]
labels=[]
for text,label in zip(temp_texts,temp_labels):
if not len(text.strip()) == 0:
texts.append(text)
labels.append(label)
labels2idx = {v:k for k,v in enumerate(self.label_list)}
idx2labels = {v: k for k, v in labels2idx.items()}
loader = NERLoader()
text_ids,labels,masks = loader.load(texts, labels, labels2idx, tokenizer=self._tokenizer,max_position_embeddings=self.config.max_seq_length)
for (i,text_a) in enumerate(text_ids):
eid = i
text_b = None
label = labels[i]
mask = masks[i]
examples.append(InputExample(eid=eid, task_name=self.name,
text_a=text_a, text_b=text_b, label=label,mask=mask))
return examples
@abc.abstractmethod
def _get_dummy_label(self):
pass
@abc.abstractmethod
def _add_features(self, features, example, distill_inputs):
pass
class RegressionTask(SingleOutputTask):
"""A regression task (e.g., STS)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,
min_value, max_value):
super(RegressionTask, self).__init__(config, name, tokenizer)
self._tokenizer = tokenizer
self._min_value = min_value
self._max_value = max_value
def _get_dummy_label(self):
return 0.0
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec(self.name + "_eid", []),
feature_spec.FeatureSpec(self.name + "_targets", [],
is_int_feature=False)]
if self.config.distill:
feature_specs.append(feature_spec.FeatureSpec(
self.name + "_distill_targets", [], is_int_feature=False))
return feature_specs
def _add_features(self, features, example, distill_inputs):
label = float(example.label)
assert self._min_value <= label <= self._max_value
label = (label - self._min_value) / self._max_value
features[example.task_name + "_targets"] = label
if distill_inputs is not None:
features[self.name + "_distill_targets"] = distill_inputs
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
reprs = bert_model.get_pooled_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
predictions = tf.layers.dense(reprs, 1)
predictions = tf.squeeze(predictions, -1)
targets = features[self.name + "_targets"]
if self.config.distill:
distill_targets = features[self.name + "_distill_targets"]
if self.config.teacher_annealing:
targets = ((targets * percent_done) +
(distill_targets * (1 - percent_done)))
else:
targets = ((targets * (1 - self.config.distill_weight)) +
(distill_targets * self.config.distill_weight))
losses = tf.square(predictions - targets)
outputs = dict(
loss=losses,
predictions=predictions,
targets=features[self.name + "_targets"],
eid=features[self.name + "_eid"]
)
return losses, outputs
def get_scorer(self):
return classification_metrics.RegressionScorer()
class TokenClassificationTask(NERTask):
"""A classification task (e.g., MNLI)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,
label_list):
super(TokenClassificationTask, self).__init__(config, name, tokenizer,label_list)
self._tokenizer = tokenizer
self._label_list = label_list
self.crf=None
self.T = config.T
def _get_dummy_label(self):
return self._label_list[0]
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec(self.name + "_eid", []),
feature_spec.FeatureSpec(self.name + "_label_ids", [self.config.max_seq_length], is_int_feature=True)] #,
#feature_spec.FeatureSpec(self.name + "_masks", [self.config.max_seq_length], is_int_feature=False)]
if self.config.distill:
feature_specs.append(feature_spec.FeatureSpec(
self.name + "_distill_targets", [self.config.max_seq_length], is_int_feature=False))
return feature_specs
def _add_features(self, features, example, distill_inputs):
label_id = example.label
features[example.task_name + "_label_ids"] = label_id
#features[example.task_name + "_masks"] = example.mask
if distill_inputs is not None:
features[self.name + "_distill_targets"] = distill_inputs
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
num_labels = len(self._label_list)
#if self.crf is None:
constraints = allowed_transitions("BIO", dict(enumerate(self._label_list)))
self.crf = CustomCRF(units=num_labels,START_TAG = num_labels-2, STOP_TAG = num_labels-1, transition_constraint=constraints)
reprs = bert_model.get_sequence_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
#mask = features[self.name + "_masks"]
mask = features["input_mask"]
#mask2len = tf.reduce_sum(mask, axis=1)
#print_op = tf.print(mask, output_stream=sys.stderr)
#with tf.control_dependencies([print_op]):
decoded_sequence, best_score, forward_score, backward_score = self.crf(reprs,mask)#tf.layers.dense(reprs, num_labels)
posterior_score = forward_score + backward_score
#log_probs = tf.nn.log_softmax(posterior_score, axis=-1)
label_ids = features[self.name + "_label_ids"]
if self.config.distill:
teacher_labels = tf.nn.softmax(features[self.name + "_distill_targets"] / self.T,axis=-1)
true_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
if self.config.teacher_annealing:
labels = ((true_labels * percent_done) +
(teacher_labels * (1 - percent_done)))
else:
labels = ((true_labels * (1 - self.config.distill_weight)) +
(teacher_labels * self.config.distill_weight))
else:
labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
#print(labels.shape,log_probs.shape)
#print(posterior_score,labels,mask)
losses = tf.repeat(tf.expand_dims(distillation_loss(posterior_score, labels, mask, self.T),axis=0),repeats=[label_ids.shape[0]])
#losses = -tf.reduce_sum(tf.reduce_sum(labels * log_probs, axis=-1),axis=-1)
#losses, trans = self.crf_loss(logits,labels * log_probs,mask,num_labels,mask2len)
#predict,viterbi_score = tf.contrib.crf.crf_decode(logits, trans, mask2len)
outputs = dict(
loss=losses,
logits=posterior_score,
predictions=decoded_sequence,
label_ids=label_ids,
eid=features[self.name + "_eid"],
)
return losses, outputs
def get_scorer(self):
return classification_metrics.BIOF1Scorer(self.label_list)
class ClassificationTask(SingleOutputTask):
"""A classification task (e.g., MNLI)."""
__metaclass__ = abc.ABCMeta
def __init__(self, config, name, tokenizer,
label_list):
super(ClassificationTask, self).__init__(config, name, tokenizer)
self._tokenizer = tokenizer
self._label_list = label_list
def _get_dummy_label(self):
return self._label_list[0]
def get_feature_specs(self):
feature_specs = [feature_spec.FeatureSpec(self.name + "_eid", []),
feature_spec.FeatureSpec(self.name + "_label_ids", [])]
if self.config.distill:
feature_specs.append(feature_spec.FeatureSpec(
self.name + "_logits", [len(self._label_list)], is_int_feature=False))
return feature_specs
def _add_features(self, features, example, distill_inputs):
label_map = {}
for (i, label) in enumerate(self._label_list):
label_map[label] = i
label_id = label_map[example.label]
features[example.task_name + "_label_ids"] = label_id
if distill_inputs is not None:
features[self.name + "_logits"] = distill_inputs
def get_prediction_module(self, bert_model, features, is_training,
percent_done):
num_labels = len(self._label_list)
reprs = bert_model.get_pooled_output()
if is_training:
reprs = tf.nn.dropout(reprs, keep_prob=0.9)
logits = tf.layers.dense(reprs, num_labels)
# probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = features[self.name + "_label_ids"]
if self.config.distill:
teacher_labels = tf.nn.softmax(features[self.name + "_logits"] / 1.0)
true_labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
if self.config.teacher_annealing:
labels = ((true_labels * percent_done) +
(teacher_labels * (1 - percent_done)))
else:
labels = ((true_labels * (1 - self.config.distill_weight)) +
(teacher_labels * self.config.distill_weight))
else:
labels = tf.one_hot(label_ids, depth=num_labels, dtype=tf.float32)
losses = -tf.reduce_sum(labels * log_probs, axis=-1)
outputs = dict(
loss=losses,
logits=logits,
predictions=tf.argmax(logits, axis=-1),
label_ids=label_ids,
eid=features[self.name + "_eid"],
)
return losses, outputs
def get_scorer(self):
return classification_metrics.AccuracyScorer()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class MNLI(ClassificationTask):
"""Multi-NLI."""
def __init__(self, config, tokenizer):
super(MNLI, self).__init__(config, "mnli", tokenizer,
["contradiction", "entailment", "neutral"])
def get_examples(self, split):
if split == "dev":
split += "_matched"
return self.load_data(split + ".tsv", split)
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
if split == "diagnostic":
examples += self._load_glue(lines, split, 1, 2, None, True)
else:
examples += self._load_glue(lines, split, 8, 9, -1, True)
return examples
def get_test_splits(self):
return ["test_matched", "test_mismatched", "diagnostic"]
class MRPC(ClassificationTask):
"""Microsoft Research Paraphrase Corpus."""
def __init__(self, config, tokenizer):
super(MRPC, self).__init__(config, "mrpc", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
offset = 0
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 3, 4, 0, True)
if not offset:
offset = len(examples)
if self.config.double_unordered and split == "train":
examples += self._load_glue(lines, split, 3, 4, 0, True, offset, True)
return examples
class CoLA(ClassificationTask):
"""Corpus of Linguistic Acceptability."""
def __init__(self, config, tokenizer):
super(CoLA, self).__init__(config, "cola", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(
lines, split, 1 if split == "test" else 3, None, 1, split == "test")
return examples
def get_scorer(self):
return classification_metrics.MCCScorer()
class SST(ClassificationTask):
"""Stanford Sentiment Treebank."""
def __init__(self, config, tokenizer):
super(SST, self).__init__(config, "sst", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
if "test" in split:
examples += self._load_glue(lines, split, 1, None, None, True)
else:
examples += self._load_glue(lines, split, 0, None, 1, True)
return examples
class QQP(ClassificationTask):
"""Quora Question Pair."""
def __init__(self, config, tokenizer):
super(QQP, self).__init__(config, "qqp", tokenizer, ["0", "1"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1 if split == "test" else 3,
2 if split == "test" else 4, 5, True)
return examples
class RTE(ClassificationTask):
"""Recognizing Textual Entailment."""
def __init__(self, config, tokenizer):
super(RTE, self).__init__(config, "rte", tokenizer,
["entailment", "not_entailment"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1, 2, 3, True)
return examples
class QNLI(ClassificationTask):
"""Question NLI."""
def __init__(self, config, tokenizer):
super(QNLI, self).__init__(config, "qnli", tokenizer,
["entailment", "not_entailment"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 1, 2, 3, True)
return examples
class TREC(ClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(TREC, self).__init__(config, "trec", tokenizer,
["num", "loc", "hum", "desc", "enty", "abbr"])
def _create_examples(self, lines, split):
examples = []
for _ in range(task_weighting.get_task_multiple(self, split)):
examples += self._load_glue(lines, split, 0, None, 1, False)
return examples
class STS(RegressionTask):
"""Semantic Textual Similarity."""
def __init__(self, config, tokenizer):
super(STS, self).__init__(config, "sts", tokenizer, 0.0, 5.0)
def _create_examples(self, lines, split):
examples = []
offset = 0
for _ in range(task_weighting.get_task_multiple(self, split)):
if split == "test":
examples += self._load_glue(lines, split, -2, -1, None, True)
else:
examples += self._load_glue(lines, split, -3, -2, -1, True)
if not offset:
offset = len(examples)
if self.config.double_unordered and split == "train":
examples += self._load_glue(
lines, split, -3, -2, -1, True, offset, True)
return examples
class Covid(TokenClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(Covid, self).__init__(config, "covid", tokenizer,
['[PAD]', 'B-STA', 'I-STA', 'B-CONTR', 'I-CONTR','B-NCT', 'I-NCT', 'B-LB', 'I-LB', 'B-REG', 'I-REG', 'B-OTH', 'I-OTH', 'O','[CLS]','[SEP]'])
def get_examples(self, split):
if split == "dev":
split = "val"
path = self.config.json_data_dir(self.name+"/"+split + ".json")
return self._load_processed(path)
def get_test_splits(self):
return ["test"]
class Mixed(TokenClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(Mixed, self).__init__(config, "mixed", tokenizer,
['[PAD]', 'B-STA', 'I-STA', 'B-CONTR', 'I-CONTR','B-NCT', 'I-NCT', 'B-LB', 'I-LB', 'B-REG', 'I-REG', 'B-OTH', 'I-OTH', 'O','[CLS]','[SEP]'])
def get_examples(self, split):
if split == "dev":
split = "val"
return self._load_processed(self.config.json_data_dir(self.name+"/"+split + ".json"))
def get_test_splits(self):
return ["test"]
class LocExp(TokenClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(LocExp, self).__init__(config, "locexp", tokenizer,
['[PAD]', 'B-LOC', 'I-LOC','O','[CLS]','[SEP]'])
def get_examples(self, split):
if split == "dev":
split = "val"
return self._load_processed(self.config.json_data_dir(self.name+"/"+split + ".json"))
def get_test_splits(self):
return ["test"]
class GeoNY(TokenClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(GeoNY, self).__init__(config, "geony", tokenizer,
['[PAD]', 'B-ADM', 'I-ADM', 'B-BUI', 'I-BUI','B-TRA', 'I-TRA', 'O','[CLS]','[SEP]'])
def get_examples(self, split):
if split == "dev":
split = "val"
return self._load_processed(self.config.json_data_dir(self.name + "/" + split + ".json"))
def get_test_splits(self):
return ["test"]
class GeoNZ(TokenClassificationTask):
"""Question Type Classification."""
def __init__(self, config, tokenizer):
super(GeoNZ, self).__init__(config, "geonz", tokenizer,
['[PAD]', 'B-ADM', 'I-ADM', 'B-BUI', 'I-BUI','B-TRA', 'I-TRA', 'O','[CLS]','[SEP]'])
def get_examples(self, split):
if split == "dev":
split = "val"
return self._load_processed(self.config.json_data_dir(self.name + "/" + split + ".json"))
def get_test_splits(self):
return ["test"]
|
# This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, The QuTiP Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import itertools
import numpy as np
import pytest
import qutip
from qutip.core import data as _data
def expected(qobj, sel):
if qobj.isbra or qobj.isket:
qobj = qobj.proj()
sel = sorted(sel)
dims = [[x for i, x in enumerate(qobj.dims[0]) if i in sel]]*2
new_shape = (np.prod(dims[0]),) * 2
out = qobj.full()
before, after = 1, qobj.shape[0]
for i, dim in enumerate(qobj.dims[0]):
after //= dim
if i in sel:
before = before * dim
continue
tmp_dims = (before, dim, after) * 2
out = np.einsum('aibcid->abcd', out.reshape(tmp_dims))
return qutip.Qobj(out.reshape(new_shape), dims=dims)
@pytest.fixture(params=[_data.CSR, _data.Dense], ids=['CSR', 'Dense'])
def dtype(request):
return request.param
@pytest.fixture(params=[True, False], ids=['dm', 'ket'])
def dm(request):
return request.param
@pytest.fixture
def state(dtype, dm):
dims = [2, 3, 4]
state = qutip.rand_ket(np.prod(dims), dims=[dims, [1]*len(dims)])
if dm:
state = state.proj()
return state.to(dtype)
def test_ptrace_noncompound_rand(dtype, dm):
"""Test `A.ptrace(0) == A` when `A` is in a non-tensored Hilbert space."""
for _ in range(5):
state = qutip.rand_ket(5)
if dm:
state = state.proj()
state = state.to(dtype)
assert state.ptrace(0) == (state if dm else state.proj())
@pytest.mark.parametrize('pair', list(itertools.combinations(range(3), 2)))
def test_ptrace_unsorted_selection_subset(state, pair):
"""
Regression test for gh-1325. ptrace should work the same independently of
the order of the input; no transposition in done in the trace operation.
"""
# pair is always sorted.
state_ordered = state.ptrace(pair)
state_reversed = state.ptrace(pair[::-1])
assert state_ordered.dims == state_reversed.dims
assert state_ordered == state_reversed
@pytest.mark.parametrize('permutation', list(itertools.permutations(range(3))))
def test_ptrace_unsorted_selection_all(state, permutation):
state_ptraced = state.ptrace(permutation)
if state.isket:
state = state.proj()
assert state.dims == state_ptraced.dims
assert state == state_ptraced
@pytest.mark.parametrize(['selection', 'exception'], [
pytest.param(4, IndexError, id='too big'),
pytest.param(-1, IndexError, id='too small'),
pytest.param([0, 0], ValueError, id='duplicate'),
# 'too many' may throw either from duplication or invalid index.
pytest.param([0, 1, 2, 3], Exception, id='too many'),
])
def test_ptrace_fails_on_invalid_input(state, selection, exception):
with pytest.raises(exception):
state.ptrace(selection)
def test_ptrace_rand(dtype):
'ptrace : randomized tests'
for _ in range(5):
A = qutip.tensor(
qutip.rand_ket(5), qutip.rand_ket(2), qutip.rand_ket(3),
).to(dtype)
for sel in ([2, 1], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_dm(2), qutip.thermal_dm(10, 1), qutip.rand_unitary(3),
).to(dtype)
for sel in ([1, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.tensor(
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
qutip.rand_ket(2), qutip.rand_ket(2), qutip.rand_ket(2),
).to(dtype)
for sel in ([3, 2], [0, 2], [0, 1]):
assert A.ptrace(sel) == expected(A, sel)
A = qutip.rand_dm(64, 0.5, dims=[[4, 4, 4], [4, 4, 4]]).to(dtype)
for sel in ([0], [1], [0, 2]):
assert A.ptrace(sel) == expected(A, sel)
|
#!/usr/bin/env python
import argparse
import re
import sys
def FixWhitespace(path):
lines = file(path, "rb").readlines()
should_rewrite = False
for i, line in enumerate(lines):
trailing_whitespace, = re.search("(\s*)$", line).groups()
if trailing_whitespace == "\n":
continue
print "%s(%d): incorrect line ending: %r" % (path, i, trailing_whitespace)
line = re.sub("(\s*)$", "\n", line)
lines[i] = line
should_rewrite = True
file(path, "wb").write("".join(lines))
def Main(args=sys.argv[1:]):
parser = argparse.ArgumentParser()
parser.add_argument("path", nargs="+")
options = parser.parse_args(args)
for path in options.path:
FixWhitespace(path)
if __name__ == "__main__":
Main()
|
#!/usr/bin/env python
# This fact finds the paths of compiled ardupilot firmwares
# otherwise compile attempts happen every run, which is unnecessary and very slow
import os,re
print "ardupilotfw_test=yes"
if os.path.isfile("/srv/maverick/code/ardupilot/ArduCopter/ArduCopter.elf"):
print "ardupilotfw_arducopter=yes"
else:
print "ardupilotfw_arducopter=no"
if os.path.isfile("/srv/maverick/code/ardupilot/ArduPlane/ArduPlane.elf"):
print "ardupilotfw_arduplane=yes"
else:
print "ardupilotfw_arduplane=no"
if os.path.isfile("/srv/maverick/code/ardupilot/APMrover2/APMrover2.elf"):
print "ardupilotfw_apmrover2=yes"
else:
print "ardupilotfw_apmrover2=no"
if os.path.isfile("/srv/maverick/code/ardupilot/AntennaTracker/AntennaTracker.elf"):
print "ardupilotfw_antennatracker=yes"
else:
print "ardupilotfw_antennatracker=no"
# Define main data container
waffiles = []
for root, dirs, files in os.walk("/srv/maverick/code/ardupilot/build"):
for file in files:
dirs = root.split("/")
trpath = "/".join(dirs[-2::])
file = os.path.join(trpath, file)
if re.search("bin/", file):
waffiles.append(file)
# Finally, print the data out in the format expected of a fact provider
if waffiles:
print "waffiles="+str(",".join(waffiles))
else:
print "waffiles=false" |
from django.shortcuts import render, get_object_or_404
from django.views import View
from .models import Blog
from projects.models import AboutPerson, PersonSocialMedia
# View for all blog posts
class BlogView(View):
def get(self, request):
about = AboutPerson.objects.get(pk=1)
social_medias = PersonSocialMedia.objects.all()
blogs = Blog.objects
context = {'about': about, 'social_medias': social_medias, 'blogs': blogs}
return render(request, template_name='blog/blog.html', context=context)
# View for one blog post
class BlogPostView(View):
def get(self, request, blog_id, blog_slug):
about = AboutPerson.objects.get(pk=1)
social_medias = PersonSocialMedia.objects.all()
blog_post = get_object_or_404(Blog, pk=blog_id)
context = {'about': about, 'social_medias': social_medias, 'blog_post': blog_post}
return render(request, template_name='blog/post.html', context=context)
|
""" A Simple CLI to parse text file with League Results"""
__version__ = "0.0.2" |
from .load_docx import load_docx
from .load_chat import load_chat
from .creat_relation import creat_relation, creat_relations
|
# Project Quex (http://quex.sourceforge.net); License: MIT;
# (C) 2005-2020 Frank-Rene Schaefer;
#_______________________________________________________________________________
import quex.input.regular_expression.core as regular_expression
import quex.input.files.mode_option as mode_option
import quex.input.files.code_fragment as code_fragment
from quex.input.files.specifier.mode import ModeParsed
from quex.input.code.core import CodeUser
from quex.input.code.base import SourceRef
import quex.engine.misc.error as error
import quex.engine.misc.similarity as similarity
from quex.engine.misc.file_in import EndOfStreamException, \
check, \
check_or_die, \
check_end_of_file, \
read_identifier, \
read_until_letter, \
read_until_whitespace, \
is_identifier, \
skip_whitespace, \
optional_flags
from quex.output.token.id_generator import token_id_db_enter
import quex.blackboard as blackboard
from quex.blackboard import setup as Setup, \
Lng, \
standard_incidence_db
from collections import namedtuple
def parse(fh, mode_parsed_db):
# NOTE: Catching of EOF happens in caller: parse_section(...)
skip_whitespace(fh)
position = fh.tell()
mode_name = read_identifier(fh, OnMissingStr="Missing identifier at beginning of mode definition.")
error.insight("Mode '%s'" % mode_name)
# NOTE: constructor does register this mode in the mode_db
new_mode = ModeParsed(mode_name, SourceRef.from_FileHandle(fh))
if new_mode.name in mode_parsed_db:
error.log("Mode '%s' has been defined twice.\n" % new_mode.name,
new_mode.sr, DontExitF=True)
error.log("Earlier definition here.",
mode_parsed_db[new_mode.name].sr)
mode_parsed_db[new_mode.name] = new_mode
# (*) inherited modes / option_db
skip_whitespace(fh)
dummy = fh.read(1)
if dummy not in [":", "{"]:
error.log("missing ':' or '{' after mode '%s'" % mode_name, fh)
if dummy == ":":
new_mode.direct_base_mode_name_list = _parse_base_mode_list(fh)
_parse_option_list(fh, new_mode)
# (*) read in pattern-action pairs and events
while not check(fh, "}"):
if check_end_of_file(fh):
error.log("End of file reached while parsing mode '%s'." % mode_name, fh, position)
_parse_pattern_action_pair(new_mode, fh)
def _parse_base_mode_list(fh):
"""RETURNS: List of names of direct base modes.
Deeper base modes need to be determined from reflecting a mode
hierarchie.
"""
skip_whitespace(fh)
result_list = []
trailing_comma_f = False
while 1 + 1 == 2:
pos = fh.tell()
if check(fh, "{"): fh.seek(pos); break
elif check(fh, "<"): fh.seek(pos); break
skip_whitespace(fh)
identifier = read_identifier(fh)
if not identifier: break
result_list.append(identifier)
trailing_comma_f = False
if not check(fh, ","): break
trailing_comma_f = True
if trailing_comma_f:
error.warning("Trailing ',' after base mode '%s'." % result_list[-1], fh)
_check_against_old_syntax_of_base_mode_definitions(fh, result_list)
return result_list
def _parse_option_list(fh, new_mode):
while 1 + 1 == 2:
sr = SourceRef.from_FileHandle(fh)
identifier, setting = mode_option.parse(fh, new_mode)
if identifier is None: break
new_mode.option_db.enter(identifier, setting, sr, new_mode.name)
def _parse_pattern_action_pair(new_mode, fh):
skip_whitespace(fh)
if __parse_keyword_list_and_action(new_mode, fh):
return
elif __parse_brief_and_action(new_mode, fh):
return
elif __parse_event_and_action(new_mode, fh):
return
else:
__parse_pattern_and_action(new_mode, fh)
def __parse_pattern_and_action(new_mode, fh):
pattern_list = regular_expression.parse_multiple_result(fh)
for pattern in pattern_list:
sr = SourceRef.from_FileHandle(fh, new_mode.name)
pattern.set_source_reference(sr)
__parse_action(new_mode, fh, pattern_list)
def __parse_action(new_mode, fh, pattern_list):
position = fh.tell()
try:
skip_whitespace(fh)
position = fh.tell()
code = code_fragment.parse(fh, "regular expression", ErrorOnFailureF=False)
if code is not None:
assert isinstance(code, CodeUser), "Found: %s" % code.__class__
for pattern in pattern_list:
new_mode.add_pattern_action_pair(pattern, code, fh)
return
fh.seek(position)
word, dummy, position_before_marker = read_until_letter(fh, [";"], Verbose=True)
if word == "PRIORITY-MARK":
error.log("PRIORITY-MARK is has been renamed to 'DEMOTION'.", fh)
elif word == "DEMOTION":
# This mark 'lowers' the priority of a pattern to the priority of the current
# pattern index (important for inherited patterns, that have higher precedence).
# The parser already constructed a state machine for the pattern that is to
# be assigned a new priority. Since, this machine is not used, let us just
# use its id.
fh.seek(position_before_marker)
check_or_die(fh, ";")
for pattern in pattern_list:
new_mode.add_match_priority(pattern, fh)
elif word == "DELETION":
# This mark deletes any pattern that was inherited with the same 'name'
fh.seek(position_before_marker)
check_or_die(fh, ";", ". Since quex version 0.33.5 this is required.")
for pattern in pattern_list:
new_mode.add_match_deletion(pattern, fh)
else:
error.log("Missing token '=>', '{', 'DEMOTION', or 'DELETION' after '%s'.\n" % pattern_list[0].pattern_string() + \
"found: '%s'. Note, that since quex version 0.33.5 it is required to add a ';'\n" % word + \
"to the commands DEMOTION and DELETION.", fh)
except EndOfStreamException:
error.error_eof("pattern action", fh, position)
def __parse_event_and_action(new_mode, fh):
pos = fh.tell()
word = read_until_whitespace(fh)
# Allow '<<EOF>>' and '<<FAIL>>' out of respect for classical tools like 'lex'
if word == "<<EOF>>": word = "on_end_of_stream"
elif word == "<<FAIL>>": word = "on_failure"
elif word in blackboard.all_section_title_list:
error.log("Pattern '%s' is a quex section title. Has the closing '}' of mode %s \n" % (word, new_mode.name) \
+ "been forgotten? Else use quotes, i.e. \"%s\"." % word, fh)
elif len(word) < 3 or word[:3] != "on_": fh.seek(pos); return False
if word == "on_indentation":
fh.seek(pos)
error.log("Definition of 'on_indentation' is no longer supported since version 0.51.1.\n"
"Please, use 'on_indent' for the event of an opening indentation, 'on_dedent'\n"
"for closing indentation, and 'on_nodent' for no change in indentation.\n"
"If you want to match 'on_indentation' as a string, use quotes.", fh)
comment = "Unknown event handler '%s'. \n" % word + \
"Note, that any pattern starting with 'on_' is considered an event handler.\n" + \
"use double quotes to bracket patterns that start with 'on_'."
error.verify_word_in_list(word, list(standard_incidence_db.keys()) + ["keyword_list"], comment,
fh)
code = code_fragment.parse(fh, "%s::%s event handler" % (new_mode.name, word))
incidence_id = standard_incidence_db[word][0]
if Lng.suspicious_RETURN_in_event_handler(incidence_id, code.get_text()):
error.warning("Suspicious 'FLUSH' in event handler '%s'.\n" % incidence_id \
+ "This statement will trigger 'on_after_match' handler.\n" \
+ "May be, use plain return instead.", code.sr)
new_mode.incidence_db[word] = code
return True
def __parse_brief_and_action(new_mode, fh):
"""ADAPTS: new_mode.pattern_action_list where new pattern action pairs
are entered.
RETURNS: True, in case of success.
EXITS: in case of syntax errors.
"""
position = fh.tell()
identifier = read_identifier(fh)
if identifier != "brief":
if similarity.get(identifier, ["brief", "briefing", "briefly"]) != -1:
error.warning("'%s' is similar to keyword 'brief'.\n"
"For clarity, use quotes." % identifier, fh)
fh.seek(position)
return False
flags = optional_flags(fh, "brief pattern action pair list", "",
{"N": "pass LexemeNull to token contructor.",
"L": "pass Lexeme to token constructor.",
"i": "implicit token identifier definition."},
BadCombinationList=["NL"])
skip_whitespace(fh)
prefix = read_identifier(fh)
skip_whitespace(fh)
lexeme_null_f = "N" in flags
lexeme_f = "L" in flags
implicit_tid_f = "i" in flags
check_or_die(fh, "{", "Opening bracket required after 'brief'.")
while not check(fh, "}"):
skip_whitespace(fh)
pattern = regular_expression.parse(fh)
skip_whitespace(fh)
position = fh.tell()
identifier = read_identifier(fh)
if not identifier:
error.log("Missing identifier after regular expression.", fh)
identifier = "%s%s" % (prefix, identifier)
check_or_die(fh, ";",
"Semincolon required after brief token identifier '%s'." % identifier)
if implicit_tid_f: token_id_db_enter(fh, identifier)
code = code_fragment.get_CodeUser_for_token_sending(fh, identifier, position,
LexemeNullF = lexeme_null_f,
LexemeF = lexeme_f)
new_mode.add_pattern_action_pair(pattern, code, fh)
return True
def __parse_keyword_list_and_action(new_mode, fh):
"""ADAPTS: new_mode.pattern_action_list where new pattern action pairs
are entered.
RETURNS: True, in case of success.
EXITS: in case of syntax errors.
"""
position = fh.tell()
identifier = read_identifier(fh)
if identifier != "keyword_list":
if similarity.get(identifier, ["keyword_list", "key words"]) != -1:
error.warning("'%s' is similar to keyword 'keyword_list'.\n"
"For clarity, use quotes." % identifier, fh)
fh.seek(position)
return False
def to_identifier(PatternCarryingIdentifier, fh):
"""RETURNS: Path in 'PatternCarryingIdentifier' given as string if
there is single path on single characters that comply
the requirements to be part of an identifier.
None, else.
"""
sm = PatternCarryingIdentifier.borrow_sm()
if not sm: return None
code_point_sequence = sm.get_sequence()
if not code_point_sequence: return None
candidate = "".join(eval("u'\\U%08X'" % x) for x in code_point_sequence)
if not is_identifier(candidate): return None
else: return candidate
def error_exit(fh, position):
current_position = fh.tell()
fh.seek(position)
text = fh.read(current_position - position)
for suspicious in ";.:,|":
if suspicious in text:
error.log("keywords in 'keyword_list' are must be white space separated. Found '%s'." % suspicious, fh)
else:
error.log("Cannot convert regular expression into identifier.", fh)
flags = optional_flags(fh, "keyword_list", "u",
{"u": "(default) make correspondent token identifiers uppercase.",
"l": "make correspondent token identifiers lowercase.",
"N": "(default) pass LexemeNull to token contructor.",
"L": "pass Lexeme to token constructor.",
"i": "implicit token identifier definition."},
BadCombinationList=["ul", "NL"])
lexeme_null_f = "N" in flags
lexeme_f = "L" in flags
implicit_tid_f = "i" in flags
lowercase_f = "l" in flags
uppercase_f = "u" in flags
skip_whitespace(fh)
prefix = read_identifier(fh)
skip_whitespace(fh)
check_or_die(fh, "{", "Opening bracket required after 'keyword_list'.")
while not check(fh, "}"):
skip_whitespace(fh)
position = fh.tell()
pattern = regular_expression.parse(fh)
identifier = to_identifier(pattern, fh)
if identifier is None: error_exit(fh, position)
elif uppercase_f: identifier = identifier.upper()
elif lowercase_f: identifier = identifier.lower()
identifier = "%s%s" % (prefix, identifier)
if implicit_tid_f: token_id_db_enter(fh, identifier)
code = code_fragment.get_CodeUser_for_token_sending(fh, identifier, position,
LexemeNullF = lexeme_null_f,
LexemeF = lexeme_f)
new_mode.add_pattern_action_pair(pattern, code, fh)
return True
def _check_against_old_syntax_of_base_mode_definitions(fh, direct_base_mode_name_list):
if not direct_base_mode_name_list: return
pos = fh.tell()
skip_whitespace(fh)
dummy_identifier = read_identifier(fh)
if dummy_identifier:
error.log("Missing separating ',' between base modes '%s' and '%s'.\n" \
% (direct_base_mode_name_list[-1], dummy_identifier) + \
"(The comma separator is mandatory since quex 0.53.1)", fh)
fh.seek(pos)
|
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN)
print GPIO.input(7)
|
# Uma empresa possui 30 funcionários e resolveu oferecer um
# auxílio família de R$ 150,00 por filho. O sistema deverá
# perguntar a quantidade de filhos e informar o valor total do
# bônus para cada funcionário.
numFuncionarios = 30
contador = 1
while contador<=numFuncionarios:
numFilhos = int(input('\nDigite quantidade de Filhos: '))
bonus = numFilhos*150
print(f'Para {numFilhos} filhos, um BONUS de: {bonus}')
contador+=1 |
# The MIT License (MIT)
#
# Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# base stuff
import os
import sys
import gc
# numpy
import numpy as np
# torch
import torch
import torch.cuda.amp as amp
import torch.distributed as dist
from torch.nn.parallel.distributed import DistributedDataParallel
# custom stuff
from utils import metric
# import wandb
try:
import wandb
except ImportError:
pass
class Trainer(object):
def __init__(self, pargs, model, criterion, optimizer, grad_scaler, scheduler, device):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self.gscaler = grad_scaler
self.scheduler = scheduler
self.device = device
self.enable_dali = (not pargs.data_format == "hdf5")
# check for distributed lamb:
have_distributed_lamb = True
try:
from apex.contrib.optimizers.distributed_fused_lamb import DistributedFusedLAMB
except:
have_distributed_lamb = False
if have_distributed_lamb and isinstance(self.optimizer, DistributedFusedLAMB):
self.enable_distributed_lamb = True
self.optimizer.set_is_accumulation_step(False)
else:
self.enable_distributed_lamb = False
# we need this for distlamb
if self.enable_distributed_lamb:
# we need that in order for it to work with async graph capture
self.lr_cpu = torch.tensor([0.], dtype=torch.float32, device='cpu').pin_memory()
# extract relevant parameters
self.batch_size = pargs.local_batch_size
self.enable_jit = pargs.enable_jit
self.enable_amp = (pargs.precision_mode == "amp")
self.force_fp16 = (pargs.precision_mode == "fp16")
self.enable_nhwc = pargs.enable_nhwc
self.enable_graph = pargs.enable_graph
# set that to None
self.graph = None
# check if model is scriptable
self.jit_scriptable = True
for m in self.model.modules():
if hasattr(m, "jit_scriptable"):
self.jit_scriptable = self.jit_scriptable and m.jit_scriptable
if not self.jit_scriptable:
break
def _compile(self, input_shape):
# exit if we do not compile
if not self.enable_jit:
return
# set model to train just to be sure
self.model.train()
# input example
input_example = torch.zeros((self.batch_size, *input_shape), dtype=torch.float32, device=self.device)
input_example.normal_()
# convert to half if requested
if self.force_fp16:
input_example = input_example.half()
# we need to convert to NHWC if necessary
if self.enable_nhwc:
input_example = input_example.contiguous(memory_format = torch.channels_last)
# compile the model
with amp.autocast(enabled = self.enable_amp):
# extract the right thing to jit
model_handle = self.model if not isinstance(self.model, DistributedDataParallel) else self.model.module
# GBN is not scriptable, we need to workaround here
if self.jit_scriptable:
model_handle = torch.jit.script(model_handle)
else:
model_handle = torch.jit.trace(model_handle, input_example, check_trace = False)
# the criterion is always scriptable
self.criterion = torch.jit.script(self.criterion)
def _warmup(self, input_shape, label_shape, warmup_stream = None, num_warmup = 20):
# set model to train just to be sure
self.model.train()
# extract or create stream
stream = torch.cuda.Stream() if warmup_stream is None else warmup_stream
# create input:
input_example = torch.zeros((self.batch_size, *input_shape), dtype=torch.float32, device=self.device)
input_example.normal_()
label_example = torch.zeros((self.batch_size, *label_shape), dtype=torch.int64, device=self.device)
# convert to half if requested
if self.force_fp16:
input_example = input_example.half()
# we need to convert to NHWC if necessary
if self.enable_nhwc:
input_example = input_example.contiguous(memory_format = torch.channels_last)
# wait for ambient stream before starting capture
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
# warmup:
for _ in range(num_warmup):
self.optimizer.zero_grad()
with amp.autocast(enabled = self.enable_amp):
output = self.model(input_example)
loss = self.criterion(output, label_example)
# distributed lamb init
if self.enable_distributed_lamb:
self.optimizer._lazy_init_stage1()
self.gscaler.scale(loss).backward()
# distributed lamb finalize
if self.enable_distributed_lamb:
self.optimizer._lazy_init_stage2()
self.optimizer.complete_reductions()
torch.cuda.current_stream().wait_stream(stream)
def _capture(self, input_shape, label_shape, graph_stream = None, num_warmup = 20, graph_pool = None):
# exit if we do not capture
if not self.enable_graph:
return
# set model to train just to be sure
self.model.train()
# extract or create capture stream
capture_stream = torch.cuda.Stream() if graph_stream is None else graph_stream
# create input:
self.static_input = torch.zeros((self.batch_size, *input_shape), dtype=torch.float32, device=self.device)
self.static_input.normal_()
self.static_label = torch.zeros((self.batch_size, *label_shape), dtype=torch.int64, device=self.device)
# convert to half if requested
if self.force_fp16:
self.static_input = self.static_input.half()
# we need to convert to NHWC if necessary
if self.enable_nhwc:
self.static_input = self.static_input.contiguous(memory_format = torch.channels_last)
# wait for ambient stream before starting capture
capture_stream.wait_stream(torch.cuda.current_stream())
# enter stream context
with torch.cuda.stream(capture_stream):
# warmup:
for _ in range(num_warmup):
self.optimizer.zero_grad()
# FW pass
with amp.autocast(enabled = self.enable_amp):
output = self.model(self.static_input)
loss = self.criterion(output, self.static_label)
# distributed lamb work here
if self.enable_distributed_lamb:
self.optimizer._lazy_init_stage1()
# BW pass
self.gscaler.scale(loss).backward()
# distributed lamb postprocessing
if self.enable_distributed_lamb:
self.optimizer._lazy_init_stage2()
self.optimizer.set_global_scale(self.gscaler._get_scale_async())
self.optimizer.complete_reductions()
self.gscaler.step(self.optimizer)
self.gscaler.update()
# sync streams
capture_stream.synchronize()
# clean up
if num_warmup > 0:
del output,loss
gc.collect()
torch.cuda.empty_cache()
# create graph
self.graph = torch.cuda._Graph()
# zero grads before capture:
self.model.zero_grad(set_to_none=True)
# start capture
if graph_pool is not None:
self.graph.capture_begin(pool = graph_pool)
else:
self.graph.capture_begin()
# preprocessing
#self.optimizer.zero_grad() # not necessary according to Michael
#self.static_scale = self.gscaler._scale
# FW pass
with amp.autocast(enabled = self.enable_amp):
self.static_output = self.model(self.static_input)
self.static_loss = self.criterion(self.static_output, self.static_label)
# BW pass
self.gscaler.scale(self.static_loss).backward()
# should also be done
# distributed lamb postprocessing
if self.enable_distributed_lamb:
self.optimizer.set_global_scale(self.gscaler._get_scale_async())
self.optimizer.complete_reductions()
self.gscaler.step(self.optimizer)
self.gscaler.update()
# end capture
self.graph.capture_end()
torch.cuda.current_stream().wait_stream(capture_stream)
def preprocess(self, input_shape, label_shape, scaffolding_stream = None, graph_pool = None):
# compile
self._compile(input_shape)
# warmup
self._warmup(input_shape, label_shape, warmup_stream = scaffolding_stream, num_warmup = 10)
# capture
self._capture(input_shape, label_shape, graph_stream = scaffolding_stream, num_warmup = 0, graph_pool = graph_pool)
def step(self, inputs, label):
# set model to train to be sure
self.model.train()
# convert input if requested
if self.force_fp16:
inputs = inputs.half()
# to NHWC
if self.enable_nhwc:
N, H, W, C = (self.batch_size, 768, 1152, 16)
inputs = torch.as_strided(inputs, size=[N, C, H, W], stride=[C*H*W, 1, W*C, C])
if self.graph is None:
with amp.autocast(enabled = self.enable_amp):
outputs = self.model.forward(inputs)
loss = self.criterion(outputs, label)
# prepare optimizer
self.optimizer.zero_grad()
# backward pass
self.gscaler.scale(loss).backward()
# postprocess
if self.enable_distributed_lamb:
self.optimizer.set_global_scale(self.gscaler._get_scale_async())
self.optimizer.complete_reductions()
# update scaler
self.gscaler.step(self.optimizer)
self.gscaler.update()
else:
# run graph
self.static_input.copy_(inputs)
self.static_label.copy_(label)
#self.static_scale.copy_(self.gscaler._scale)
self.graph.replay()
# DEBUG
## postprocess
#if self.enable_distributed_lamb:
# self.optimizer.complete_reductions()
# self.optimizer.set_global_scale(self.gscaler._get_scale_async())
# DEBUG
if not self.enable_distributed_lamb:
self.gscaler.step(self.optimizer)
self.gscaler.update()
# copy variables
loss = self.static_loss.clone()
outputs = self.static_output.clone()
# get current learning rate
current_lr = self.optimizer.param_groups[0]['lr']
# scheduler step if requested:
if self.scheduler is not None:
self.scheduler.step()
if self.enable_distributed_lamb:
self.lr_cpu[0] = current_lr
self.optimizer._lr.copy_(self.lr_cpu[0])
return loss, outputs, current_lr
def train_step(pargs, comm_rank, comm_size,
step, epoch, trainer,
train_loader,
logger, have_wandb, max_num_steps_per_epoch=None):
# epoch loop
for step_in_epoch, (inputs, label, filename) in enumerate(train_loader):
if step_in_epoch==max_num_steps_per_epoch:
break
if not trainer.enable_dali:
# send to device
inputs = inputs.to(trainer.device)
label = label.to(trainer.device)
loss, outputs, current_lr = trainer.step(inputs, label)
# step counter
step += 1
#log if requested
if (step % pargs.logging_frequency == 0):
# allreduce for loss
loss_avg = loss.detach()
if dist.is_initialized():
dist.reduce(loss_avg, dst=0, op=dist.ReduceOp.SUM)
loss_avg_train = loss_avg.item() / float(comm_size)
# Compute score
outputs = outputs.detach()
if pargs.enable_nhwc:
outputs = outputs.contiguous(memory_format = torch.contiguous_format)
predictions = torch.argmax(torch.softmax(outputs, 1), 1)
iou = metric.compute_score_new(predictions, label, num_classes=3)
iou_avg = iou.detach()
if dist.is_initialized():
dist.reduce(iou_avg, dst=0, op=dist.ReduceOp.SUM)
iou_avg_train = iou_avg.item() / float(comm_size)
# log values
logger.log_event(key = "learning_rate", value = current_lr, metadata = {'epoch_num': epoch+1, 'step_num': step})
logger.log_event(key = "train_accuracy", value = iou_avg_train, metadata = {'epoch_num': epoch+1, 'step_num': step})
logger.log_event(key = "train_loss", value = loss_avg_train, metadata = {'epoch_num': epoch+1, 'step_num': step})
if have_wandb and (comm_rank == 0):
wandb.log({"train_loss": loss_avg_train}, step = step)
wandb.log({"train_accuracy": iou_avg_train}, step = step)
wandb.log({"learning_rate": current_lr}, step = step)
return step
def train_step_profile(pargs, comm_rank, comm_size,
step, epoch, trainer,
train_loader,
start_profiler, stop_profiler):
# enable profiling
with torch.autograd.profiler.emit_nvtx(enabled = True):
# epoch loop
train_iter = iter(train_loader)
epoch_done = False
while(True):
if step == pargs.capture_range_start:
start_profiler()
# step region
torch.cuda.synchronize()
torch.cuda.nvtx.range_push(f"step_{step}")
# IO region
torch.cuda.nvtx.range_push(f"data_loading")
try:
inputs, label, filename = next(train_iter)
except StopIteration:
epoch_done = True
torch.cuda.nvtx.range_pop()
if epoch_done:
break
if pargs.data_format == "hdf5":
# send to device
inputs = inputs.to(trainer.device)
label = label.to(trainer.device)
if not pargs.io_only:
loss, outputs, current_lr = trainer.step(inputs, label)
# step counter
step += 1
torch.cuda.synchronize()
torch.cuda.nvtx.range_pop()
if step >= pargs.capture_range_stop:
stop_profiler()
break
return step
|
class LoginForm(forms.Form):
username=forms.UsernameField(required=True,error_messages={'required':"用户名不能为空"})
password=forms.PasswordField(max_length=120,min_length=6,required=True,error_messages={'required':"密码不能为空"})
|
"""The core session used as default when no backend is connected."""
import logging
from .session import Session
from .sparql_backend import SparqlResult, SparqlBindingSet, SPARQLBackend
logger = logging.getLogger(__name__)
class CoreSession(Session, SPARQLBackend):
"""Core default session for all objects."""
_warned_sparql_slow = False
def __str__(self):
"""Convert the core session object to string."""
return "<CoreSession object>"
# OVERRIDE
def _notify_update(self, cuds_object):
pass
# OVERRIDE
def _notify_delete(self, cuds_object):
pass
# OVERRIDE
def _notify_read(self, cuds_object):
pass
def _get_full_graph(self):
"""Get the triples in the core session."""
return self.graph
def _sparql(self, query_string):
"""Execute the given SPARQL query on the graph of the core session.
Args:
query_string (str): The SPARQL query as a string.
"""
if not CoreSession._warned_sparql_slow:
logger.warning('At the moment, SPARQL queries on the default '
'session of OSP-core (the core session) are '
'supported, but slow. For better performance, '
'please perform the query on another session with '
'SPARQL support (e.g. a triple store wrapper).')
CoreSession._warned_sparql_slow = True
result = self.graph.query(query_string)
return CoreSession.CoreSessionSparqlResult(result, self)
class CoreSessionSparqlResult(SparqlResult):
"""The result of a SPARQL query on the core session."""
def __init__(self, query_result, session):
"""Initialize the result."""
self.result = query_result
super().__init__(session)
def close(self):
"""Close the connection."""
pass
def __iter__(self):
"""Iterate the result."""
for row in self.result:
yield CoreSession.CoreSessionSparqlBindingSet(row,
self.session)
def __len__(self):
"""Compute the number of elements in the result."""
return len(self.result)
class CoreSessionSparqlBindingSet(SparqlBindingSet):
"""A row in the result. Mapping from variable to value."""
def __init__(self, row, session):
"""Initialize the row."""
self.binding_set = row
super().__init__(session)
def _get(self, variable_name):
return self.binding_set[variable_name]
core_session = CoreSession()
|
from app.api import callback_api, operator_api, task_api, response_api, services_api, database_api, crypto_api
from app.api import payloads_api, analytics_api, c2profiles_api, file_api, operation_api, payloadtype_api
from app.api import command_api, reporting_api, credential_api, keylog_api, transform_api, mitre_api, artifacts_api
from app.api import rabbitmq_api, apitokens_api, browserscript_api
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
graphs_path = root_path+'/results_analysis/graphs/'
print("root path:{}".format(root_path))
# plt.rcParams['figure.figsize']=(10,8)
plt.rcParams['font.size']=6
# plt.rcParams["figure.figsize"] = [7.48, 5.61]
plt.rcParams['image.cmap']='plasma'
# plt.rcParams['axes.linewidth']=0.8
vmd_train = pd.read_csv(root_path+"/Huaxian_vmd/data/VMD_TRAIN.csv")
eemd_train = pd.read_csv(root_path+"/Huaxian_eemd/data/EEMD_TRAIN.csv")
ssa_train = pd.read_csv(root_path+"/Huaxian_ssa/data/SSA_TRAIN.csv")
dwt_train = pd.read_csv(root_path+"/Huaxian_dwt/data/db10-2/DWT_TRAIN.csv")
vmd_train=vmd_train.drop("ORIG",axis=1)
eemd_train=eemd_train.drop("ORIG",axis=1)
ssa_train=ssa_train.drop("ORIG",axis=1)
dwt_train=dwt_train.drop("ORIG",axis=1)
vmd_corrs = vmd_train.corr(method="pearson")
eemd_corrs = eemd_train.corr(method="pearson")
ssa_corrs = ssa_train.corr(method="pearson")
dwt_corrs = dwt_train.corr(method="pearson")
print(vmd_corrs)
plt.figure(figsize=(3.54,3.54))
plt.title("Pearson-Correlation for subsignals of VMD at Huaxian station",fontsize=6)
ax=plt.imshow(vmd_corrs)
plt.xlabel(r"${S}_i$")
plt.ylabel(r"${S}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$Corr_{i,j}$")
plt.clim(0,1)
plt.tight_layout()
# plt.show()
plt.figure(figsize=(3.54,3.54))
plt.title("Pearson-Correlation for subsignals of SSA at Huaxian station",fontsize=6)
ax=plt.imshow(ssa_corrs)
plt.xlabel(r"${S}_i$")
plt.ylabel(r"${S}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$Corr_{i,j}$")
plt.clim(0,1)
plt.tight_layout()
# plt.show()
plt.figure(figsize=(3.54,3.54))
plt.title("Pearson-Correlation for subsignals of EEMD at Huaxian station",fontsize=6)
ax=plt.imshow(eemd_corrs)
plt.xlabel(r"${S}_i$")
plt.ylabel(r"${S}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$Corr_{i,j}$")
plt.clim(0,1)
plt.tight_layout()
# plt.show()
plt.figure(figsize=(3.54,3.54))
plt.title("Pearson-Correlation for subsignals of DWT at Huaxian station",fontsize=6)
ax=plt.imshow(dwt_corrs)
plt.xlabel(r"${S}_i$")
plt.ylabel(r"${S}_j$")
plt.colorbar(ax.colorbar, fraction=0.045)
ax.colorbar.set_label("$Corr_{i,j}$")
plt.clim(0,1)
plt.tight_layout()
# plt.show()
corrs=[eemd_corrs,ssa_corrs,vmd_corrs,dwt_corrs]
titles=["EEMD","SSA","VMD","DWT",]
plt.figure(figsize=(3.54,3.4))
for i in range(len(corrs)):
plt.subplot(2,2,i+1)
plt.title(titles[i],fontsize=6)
ax1=plt.imshow(corrs[i])
plt.xlabel(r"${S}_i$")
plt.ylabel(r"${S}_j$")
plt.colorbar(ax1.colorbar, fraction=0.045)
ax1.colorbar.set_label("$Corr_{i,j}$")
plt.clim(0,1)
plt.tight_layout()
# plt.show()
series_len=[9,12,8,3]
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(3.54,3.3))
for (ax,i) in zip(axes.flat,list(range(len(corrs)))):
ax.set_title(titles[i],fontsize=6)
ax.set_xlabel(r"${S}_i$")
ax.set_ylabel(r"${S}_j$")
im = ax.imshow(corrs[i], cmap='viridis',vmin=0, vmax=1)
if i==1:
ax.set_xticks(np.arange(0, series_len[i], 2))
ax.set_yticks(np.arange(0, series_len[i], 2))
ax.set_xticklabels(np.arange(1, series_len[i]+1, 2))
ax.set_yticklabels(np.arange(1, series_len[i]+1, 2))
else:
ax.set_xticks(np.arange(0, series_len[i], 1))
ax.set_yticks(np.arange(0, series_len[i], 1))
ax.set_xticklabels(np.arange(1, series_len[i]+1, 1))
ax.set_yticklabels(np.arange(1, series_len[i]+1, 1))
fig.subplots_adjust(bottom=0.08, top=0.96, left=0.1, right=0.8,wspace=0.5, hspace=0.3)
# add an axes, lower left corner in [0.83, 0.1] measured in figure coordinate with axes width 0.02 and height 0.8
cb_ax = fig.add_axes([0.83, 0.12, 0.04, 0.805])
cbar = fig.colorbar(im, cax=cb_ax)
cbar.set_ticks(np.arange(0, 1.1, 0.5))
cbar.set_label(r"$Corr_{i,j}$")
# cbar.set_ticklabels(['low', 'medium', 'high'])
# plt.savefig(graphs_path+"Pearson_corr_huaxian.eps",format="EPS",dpi=2000)
# plt.savefig(graphs_path+"Pearson_corr_huaxian.tif",format="TIFF",dpi=1200)
plt.show()
|
# Script for sending the second mail
#
# Configuration
# - config.yaml.example for the smtp connection
# - text.dat.example for the e-mail's text
# The tags inside < > are replaced with the values
# of the corresponding attributes in the bibtex
# file
#
# @author Open Data in Experimental Mechanics
# Packages
import csv
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import yaml, codecs
import sys
import bibtexparser
####
# Activate utf8 encoding
reload(sys)
sys.setdefaultencoding('utf8')
# Open the template text of the e-mail
file = open("text.dat.example",'r')
plainText = file.read()
# Configuraton of the smtp connection
username= ""
server= ""
password= ""
port = -1
address = ""
subject = ""
print "Starting e-mail sending script"
# Loading the configuration from the yaml file
with open("config.yaml.example",'r') as f:
doc = yaml.load(f)
username = doc["Mail"]["user"]
server = doc["Mail"]["server"]
password = doc["Mail"]["pw"]
port = int(doc["Mail"]["port"])
address = doc["Mail"]["address"]
subject = doc["Mail"]["subject"]
print "Loading config data for", username, server, port
# Start the connection to the server
server = smtplib.SMTP(server, port)
server.starttls()
server.login(username, password)
# Send mails
with open('second.csv', 'rb') as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')
for row in spamreader:
if len(row[3]) == 0 or len(row[4]) == 0:
name = ""
with open("../data/"+row[0]+".bib") as bibtex_file:
bibtex_str = bibtex_file.read()
bib_database = bibtexparser.loads(bibtex_str)
for k, entry in enumerate(bib_database.entries):
if entry['author-email'] == row[1]:
name = entry['author-name']
customText = plainText.replace("<author>",name.encode("utf8"))
customText = customText.replace("<title>",row[2].encode("utf8"))
customText = customText.replace("<year>",row[0].encode("utf8"))
customSubject = subject.replace("<year>",row[0].encode("utf8"))
customSubject = customSubject.replace("<title>",row[2].encode("utf8"))
print customText
to = row[1]
cc = ""
bcc = "patrick.diehl@polymtl.ca"
rcpt = cc.split(",") + bcc.split(",") + [to]
msg = MIMEMultipart("alternative")
msg['From'] = address
msg['To'] = to
msg['Subject'] = customSubject
msg['Bcc'] = bcc
msg.attach(MIMEText(customText.encode("utf-8"), 'plain', "utf8"))
text = msg.as_string()
server.sendmail(address, rcpt, text)
print "E-mail sent to " , to
# Close the connection
server.quit()
|
#!/usr/bin/env python
import os
import sys
import unittest
from client import TestClient
from server import TestServer
from tracecontext import Traceparent, Tracestate
client = None
server = None
def environ(name, default = None):
if not name in os.environ:
if default:
os.environ[name] = default
else:
raise EnvironmentError('environment variable {} is not defined'.format(name))
return os.environ[name]
STRICT_LEVEL = int(environ('STRICT_LEVEL', '2'))
print('STRICT_LEVEL: {}'.format(STRICT_LEVEL))
def setUpModule():
global client
global server
environ('SERVICE_ENDPOINT')
client = client or TestClient(host = '127.0.0.1', port = 7777, timeout = 5)
server = server or TestServer(host = '127.0.0.1', port = 7777, timeout = 3)
server.start()
with client.scope() as scope:
response = scope.send_request()
def tearDownModule():
server.stop()
class TestBase(unittest.TestCase):
import re
traceparent_name_re = re.compile(r'^traceparent$', re.IGNORECASE)
traceparent_format = r'^([0-9a-f]{2})-([0-9a-f]{32})-([0-9a-f]{16})-([0-9a-f]{2})$'
traceparent_format_re = re.compile(traceparent_format)
tracestate_name_re = re.compile(r'^tracestate$', re.IGNORECASE)
def make_request(self, headers, count = 1):
import pprint
with client.scope() as scope:
arguments = {
'url': environ('SERVICE_ENDPOINT'),
'headers': headers,
'arguments': [],
}
for idx in range(count):
arguments['arguments'].append({'url': scope.url(str(idx)), 'arguments': []})
response = scope.send_request(arguments = arguments)
verbose = ['', '']
verbose.append('Harness trying to send the following request to your service {0}'.format(arguments['url']))
verbose.append('')
verbose.append('POST {} HTTP/1.1'.format(arguments['url']))
for key, value in arguments['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
verbose.append(pprint.pformat(arguments['arguments']))
verbose.append('')
results = response['results'][0]
if 'exception' in results:
verbose.append('Harness got an exception {}'.format(results['exception']))
verbose.append('')
verbose.append(results['msg'])
else:
verbose.append('Your service {} responded with HTTP status {}'.format(arguments['url'], results['status']))
verbose.append('')
for key, value in results['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
if isinstance(results['body'], str):
verbose.append(results['body'])
else:
verbose.append(pprint.pformat(results['body']))
for idx in range(count):
if str(idx) in response:
verbose.append('Your service {} made the following callback to harness'.format(arguments['url']))
verbose.append('')
for key, value in response[str(idx)]['headers']:
verbose.append('{}: {}'.format(key, value))
verbose.append('')
verbose.append('')
verbose = os.linesep.join(verbose)
if 'HARNESS_DEBUG' in os.environ:
print(verbose)
result = []
for idx in range(count):
self.assertTrue(str(idx) in response, 'your test service failed to make a callback to the test harness {}'.format(verbose))
result.append(response[str(idx)])
return result
def get_traceparent(self, headers):
retval = []
for key, value in headers:
if self.traceparent_name_re.match(key):
retval.append((key, value))
self.assertEqual(len(retval), 1, 'expect one traceparent header, got {} {!r}'.format('more' if retval else 'zero', retval))
return Traceparent.from_string(retval[0][1])
def get_tracestate(self, headers):
tracestate = Tracestate()
for key, value in headers:
if self.tracestate_name_re.match(key):
tracestate.from_string(value)
return tracestate
def make_single_request_and_get_tracecontext(self, headers):
headers = self.make_request(headers)[0]['headers']
return (self.get_traceparent(headers), self.get_tracestate(headers))
class TraceContextTest(TestBase):
def test_both_traceparent_and_tracestate_missing(self):
'''
harness sends a request without traceparent or tracestate
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([])
def test_traceparent_included_tracestate_missing(self):
'''
harness sends a request with traceparent but without tracestate
expects a valid traceparent from the output header, with the same trace_id but different parent_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertNotEqual(traceparent.parent_id.hex(), '1234567890123456')
def test_traceparent_duplicated(self):
'''
harness sends a request with two traceparent headers
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789011-1234567890123456-01'],
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789011')
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_header_name(self):
'''
harness sends an invalid traceparent using wrong names
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['trace-parent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['trace.parent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_header_name_valid_casing(self):
'''
harness sends a valid traceparent using different combination of casing
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TraceParent', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TrAcEpArEnT', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['TRACEPARENT', '00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0x00(self):
'''
harness sends an invalid traceparent with extra trailing characters
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01.'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01-what-the-future-will-be-like'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0xcc(self):
'''
harness sends an valid traceparent with future version 204 (0xcc)
expects a valid traceparent from the output header with the same trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01-what-the-future-will-be-like'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'cc-12345678901234567890123456789012-1234567890123456-01.what-the-future-will-be-like'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_0xff(self):
'''
harness sends an invalid traceparent with version 255 (0xff)
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', 'ff-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in version
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '.0-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0.-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_too_long(self):
'''
harness sends an invalid traceparent with version more than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '000-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0000-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_version_too_short(self):
'''
harness sends an invalid traceparent with version less than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '0-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_id_all_zero(self):
'''
harness sends an invalid traceparent with trace_id = 00000000000000000000000000000000
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-00000000000000000000000000000000-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '00000000000000000000000000000000')
def test_traceparent_trace_id_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in trace_id
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-.2345678901234567890123456789012-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '.2345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-1234567890123456789012345678901.-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '1234567890123456789012345678901.')
def test_traceparent_trace_id_too_long(self):
'''
harness sends an invalid traceparent with trace_id more than 32 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-123456789012345678901234567890123-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '123456789012345678901234567890123')
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertNotEqual(traceparent.trace_id.hex(), '23456789012345678901234567890123')
def test_traceparent_trace_id_too_short(self):
'''
harness sends an invalid traceparent with trace_id less than 32 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-1234567890123456789012345678901-1234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '1234567890123456789012345678901')
def test_traceparent_parent_id_all_zero(self):
'''
harness sends an invalid traceparent with parent_id = 0000000000000000
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-0000000000000000-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in parent_id
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-.234567890123456-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-123456789012345.-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_too_long(self):
'''
harness sends an invalid traceparent with parent_id more than 16 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-12345678901234567-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_parent_id_too_short(self):
'''
harness sends an invalid traceparent with parent_id less than 16 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-123456789012345-01'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_illegal_characters(self):
'''
harness sends an invalid traceparent with illegal characters in trace_flags
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-.0'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-0.'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_too_long(self):
'''
harness sends an invalid traceparent with trace_flags more than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-001'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_trace_flags_too_short(self):
'''
harness sends an invalid traceparent with trace_flags less than 2 HEXDIG
expects a valid traceparent from the output header, with a newly generated trace_id
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-1'],
])
self.assertNotEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_traceparent_ows_handling(self):
'''
harness sends an valid traceparent with heading and trailing OWS
expects a valid traceparent from the output header
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', ' 00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '\t00-12345678901234567890123456789012-1234567890123456-01'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01 '],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01\t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '\t 00-12345678901234567890123456789012-1234567890123456-01 \t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
def test_tracestate_included_traceparent_missing(self):
'''
harness sends a request with tracestate but without traceparent
expects a valid traceparent from the output header
expects the tracestate to be discarded
'''
traceparent, tracestate1 = self.make_single_request_and_get_tracecontext([
['tracestate', 'foo=1'],
])
traceparent, tracestate2 = self.make_single_request_and_get_tracecontext([
['tracestate', 'foo=1,bar=2'],
])
self.assertEqual(len(tracestate1), len(tracestate2))
def test_tracestate_included_traceparent_included(self):
'''
harness sends a request with both tracestate and traceparent
expects a valid traceparent from the output header with the same trace_id
expects the tracestate to be inherited
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,bar=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn("foo", tracestate)
self.assertIn("bar", tracestate)
self.assertEqual(tracestate['foo'], '1')
self.assertEqual(tracestate['bar'], '2')
def test_tracestate_header_name(self):
'''
harness sends an invalid tracestate using wrong names
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['trace-state', 'foo=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['trace.state', 'foo=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
def test_tracestate_header_name_valid_casing(self):
'''
harness sends a valid tracestate using different combination of casing
expects the tracestate to be inherited
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TraceState', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TrAcEsTaTe', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['TRACESTATE', 'foo=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
def test_tracestate_empty_header(self):
'''
harness sends a request with empty tracestate header
expects the empty tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', ''],
])
self.assertTrue(not tracestate or tracestate != '')
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', ''],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', ''],
['tracestate', 'foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
def test_tracestate_multiple_headers_different_keys(self):
'''
harness sends a request with multiple tracestate headers, each contains different set of keys
expects a combined tracestate
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,bar=2'],
['tracestate', 'rojo=1,congo=2'],
['tracestate', 'baz=3'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
self.assertTrue('bar=2' in str(tracestate))
self.assertTrue('rojo=1' in str(tracestate))
self.assertTrue('congo=2' in str(tracestate))
self.assertTrue('baz=3' in str(tracestate))
self.assertTrue(str(tracestate).index('foo=1') < str(tracestate).index('bar=2'))
self.assertTrue(str(tracestate).index('bar=2') < str(tracestate).index('rojo=1'))
self.assertTrue(str(tracestate).index('rojo=1') < str(tracestate).index('congo=2'))
self.assertTrue(str(tracestate).index('congo=2') < str(tracestate).index('baz=3'))
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_duplicated_keys(self):
'''
harness sends a request with an invalid tracestate header with duplicated keys
expects the tracestate to be inherited, and the duplicated keys to be either kept as-is or one of them
to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1,foo=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate) or 'foo=2' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate))
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'foo=2'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertTrue('foo=1' in str(tracestate) or 'foo=2' in str(tracestate))
def test_tracestate_all_allowed_characters(self):
'''
harness sends a request with a valid tracestate header with all legal characters
expects the tracestate to be inherited
'''
key_without_vendor = ''.join([
''.join(map(chr, range(0x61, 0x7A + 1))), # lcalpha
'0123456789', # DIGIT
'_',
'-',
'*',
'/',
])
key_with_vendor = key_without_vendor + '@a-z0-9_-*/'
value = ''.join([
''.join(map(chr, range(0x20, 0x2B + 1))),
''.join(map(chr, range(0x2D, 0x3C + 1))),
''.join(map(chr, range(0x3E, 0x7E + 1))),
])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', key_without_vendor + '=' + value],
])
self.assertIn(key_without_vendor, tracestate)
self.assertEqual(tracestate[key_without_vendor], value)
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', key_with_vendor + '=' + value],
])
self.assertIn(key_with_vendor, tracestate)
self.assertEqual(tracestate[key_with_vendor], value)
def test_tracestate_ows_handling(self):
'''
harness sends a request with a valid tracestate header with OWS
expects the tracestate to be inherited
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1 \t , \t bar=2, \t baz=3'],
])
self.assertIn('foo', tracestate)
self.assertIn('bar', tracestate)
self.assertIn('baz', tracestate)
self.assertEqual(tracestate['foo'], '1')
self.assertEqual(tracestate['bar'], '2')
self.assertEqual(tracestate['baz'], '3')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1\t \t,\t \tbar=2,\t \tbaz=3'],
])
self.assertIn('foo', tracestate)
self.assertIn('bar', tracestate)
self.assertIn('baz', tracestate)
self.assertEqual(tracestate['foo'], '1')
self.assertEqual(tracestate['bar'], '2')
self.assertEqual(tracestate['baz'], '3')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', ' foo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', '\tfoo=1'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1 '],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1\t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', '\t foo=1 \t'],
])
self.assertEqual(traceparent.trace_id.hex(), '12345678901234567890123456789012')
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_key_illegal_characters(self):
'''
harness sends a request with an invalid tracestate header with illegal key
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo =1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo '])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'FOO=1'],
])
self.assertRaises(KeyError, lambda: tracestate['FOO'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo.bar=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo.bar'])
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_key_illegal_vendor_format(self):
'''
harness sends a request with an invalid tracestate header with illegal vendor format
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo@=1,bar=2'],
])
self.assertRaises(KeyError, lambda: tracestate['bar'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', '@foo=1,bar=2'],
])
self.assertRaises(KeyError, lambda: tracestate['bar'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo@@bar=1,bar=2'],
])
self.assertRaises(KeyError, lambda: tracestate['bar'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo@bar@baz=1,bar=2'],
])
self.assertRaises(KeyError, lambda: tracestate['bar'])
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_member_count_limit(self):
'''
harness sends a request with a valid tracestate header with 32 list members
expects the tracestate to be inherited
harness sends a request with an invalid tracestate header with 33 list members
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'bar01=01,bar02=02,bar03=03,bar04=04,bar05=05,bar06=06,bar07=07,bar08=08,bar09=09,bar10=10'],
['tracestate', 'bar11=11,bar12=12,bar13=13,bar14=14,bar15=15,bar16=16,bar17=17,bar18=18,bar19=19,bar20=20'],
['tracestate', 'bar21=21,bar22=22,bar23=23,bar24=24,bar25=25,bar26=26,bar27=27,bar28=28,bar29=29,bar30=30'],
['tracestate', 'bar31=31,bar32=32'],
])
self.assertIn('bar01', tracestate)
self.assertEqual(tracestate['bar01'], '01')
self.assertEqual(len(tracestate), 32)
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'bar01=01,bar02=02,bar03=03,bar04=04,bar05=05,bar06=06,bar07=07,bar08=08,bar09=09,bar10=10'],
['tracestate', 'bar11=11,bar12=12,bar13=13,bar14=14,bar15=15,bar16=16,bar17=17,bar18=18,bar19=19,bar20=20'],
['tracestate', 'bar21=21,bar22=22,bar23=23,bar24=24,bar25=25,bar26=26,bar27=27,bar28=28,bar29=29,bar30=30'],
['tracestate', 'bar31=31,bar32=32,bar33=33'],
])
self.assertRaises(KeyError, lambda: tracestate['bar01'])
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_key_length_limit(self):
'''
harness sends tracestate header with a key of 256 and 257 characters
harness sends tracestate header with a key of 14 and 15 characters in the vendor section
harness sends tracestate header with a key of 241 and 242 characters in the tenant section
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'z' * 256 + '=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 'z' * 257 + '=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 't' * 241 + '@' + 'v' * 14 + '=1'],
])
self.assertIn('foo', tracestate)
self.assertEqual(tracestate['foo'], '1')
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 't' * 242 + '@v=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=1'],
['tracestate', 't@' + 'v' * 15 + '=1'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
@unittest.skipIf(STRICT_LEVEL < 2, "strict")
def test_tracestate_value_illegal_characters(self):
'''
harness sends a request with an invalid tracestate header with illegal value format
expects the tracestate to be discarded
'''
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=bar=baz'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
traceparent, tracestate = self.make_single_request_and_get_tracecontext([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-00'],
['tracestate', 'foo=,bar=3'],
])
self.assertRaises(KeyError, lambda: tracestate['foo'])
self.assertRaises(KeyError, lambda: tracestate['bar'])
class AdvancedTest(TestBase):
def test_multiple_requests_with_valid_traceparent(self):
'''
harness sends a valid traceparent and asks vendor service to callback multiple times
expects the trace_id to be inherited by all the callbacks
'''
trace_ids = set()
parent_ids = set()
for response in self.make_request([
['traceparent', '00-12345678901234567890123456789012-1234567890123456-01'],
], 3):
traceparent = self.get_traceparent(response['headers'])
trace_ids.add(traceparent.trace_id.hex())
parent_ids.add(traceparent.parent_id.hex())
self.assertEqual(len(trace_ids), 1)
self.assertTrue('12345678901234567890123456789012' in trace_ids)
self.assertEqual(len(parent_ids), 3)
def test_multiple_requests_without_traceparent(self):
'''
harness asks vendor service to callback multiple times
expects a different parent_id each time
'''
trace_ids = set()
parent_ids = set()
for response in self.make_request([], 3):
traceparent = self.get_traceparent(response['headers'])
trace_ids.add(traceparent.trace_id.hex())
parent_ids.add(traceparent.parent_id.hex())
self.assertEqual(len(parent_ids), 3)
def test_multiple_requests_with_illegal_traceparent(self):
'''
harness sends an invalid traceparent and asks vendor service to callback multiple times
expects new trace_id(s) generated
'''
trace_ids = set()
parent_ids = set()
for response in self.make_request([
['traceparent', '00-00000000000000000000000000000000-1234567890123456-01'],
], 3):
traceparent = self.get_traceparent(response['headers'])
trace_ids.add(traceparent.trace_id.hex())
parent_ids.add(traceparent.parent_id.hex())
self.assertFalse('00000000000000000000000000000000' in trace_ids)
self.assertEqual(len(parent_ids), 3)
if __name__ == '__main__':
if len(sys.argv) >= 2:
os.environ['SERVICE_ENDPOINT'] = sys.argv[1]
if not 'SERVICE_ENDPOINT' in os.environ:
print('''
Usage: python {0} <service endpoint> [patterns]
Environment Variables:
HARNESS_DEBUG when set, debug mode will be enabled (default to disabled)
HARNESS_HOST the public host/address of the test harness (default 127.0.0.1)
HARNESS_PORT the public port of the test harness (default 7777)
HARNESS_TIMEOUT the timeout (in seconds) used for each test case (default 5)
HARNESS_BIND_HOST the host/address which the test harness binds to (default to HARNESS_HOST)
HARNESS_BIND_PORT the port which the test harness binds to (default to HARNESS_PORT)
SERVICE_ENDPOINT your test service endpoint (no default value)
STRICT_LEVEL the level of test strictness (default 2)
Example:
python {0} http://127.0.0.1:5000/test
python {0} http://127.0.0.1:5000/test TraceContextTest.test_both_traceparent_and_tracestate_missing
python {0} http://127.0.0.1:5000/test AdvancedTest
python {0} http://127.0.0.1:5000/test AdvancedTest TraceContextTest.test_both_traceparent_and_tracestate_missing
'''.strip().format(sys.argv[0]), file = sys.stderr)
exit(-1)
host = environ('HARNESS_HOST', '127.0.0.1')
port = environ('HARNESS_PORT', '7777')
timeout = environ('HARNESS_TIMEOUT', '5')
bind_host = environ('HARNESS_BIND_HOST', host)
bind_port = environ('HARNESS_BIND_PORT', port)
client = TestClient(host = host, port = int(port), timeout = int(timeout) + 1)
server = TestServer(host = bind_host, port = int(bind_port), timeout = int(timeout))
suite = unittest.TestSuite()
loader = unittest.TestLoader()
if len(sys.argv) > 2:
for name in sys.argv[2:]:
suite.addTests(loader.loadTestsFromName(name, module = sys.modules[__name__]))
else:
suite.addTests(loader.loadTestsFromModule(sys.modules[__name__]))
result = unittest.TextTestRunner(verbosity = 2).run(suite)
sys.exit(len(result.errors) + len(result.failures))
|
# %%
import torch
from UnarySim.kernel.add import FSUAdd
from UnarySim.stream.gen import RNG, SourceGen, BSGen
from UnarySim.metric.metric import ProgError
import matplotlib.pyplot as plt
import time
# %%
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# %%
def add_test(rng="Sobol", row=128, col=10000, bitwidth=8, plot_en=False):
modes = ["bipolar", "unipolar"]
scaled = [True, False]
result_pe = []
stype = torch.float
btype = torch.float
rtype = torch.float
scale_mod = row
for mode in modes:
for scale in scaled:
run_time = 0
acc_dim = 0
result_pe_cycle = []
uadd = FSUAdd(mode=mode, scaled=scale, scale=scale_mod, dim=acc_dim).to(device)
if mode == "unipolar":
iVec = torch.rand(row, col).mul(2**bitwidth).round().div(2**bitwidth).to(device)
elif mode == "bipolar":
iVec = torch.rand(row, col).mul(2).sub(1).mul(2**bitwidth).round().div(2**bitwidth).to(device)
oVec = torch.sum(iVec, acc_dim).to(device)
iVecSource = SourceGen(iVec, bitwidth=bitwidth, mode=mode, rtype=rtype)().to(device)
iVecRNG = RNG(bitwidth, 1, rng, rtype)().to(device)
iVecBS = BSGen(iVecSource, iVecRNG, stype).to(device)
iVecPE = ProgError(iVec, scale=1, mode=mode).to(device)
if scale is True:
if acc_dim == 0:
oVecPE = ProgError(oVec, scale=scale_mod, mode=mode).to(device)
elif acc_dim ==1:
oVecPE = ProgError(oVec, scale=scale_mod, mode=mode).to(device)
else:
oVecPE = ProgError(oVec, scale=1, mode=mode).to(device)
with torch.no_grad():
idx = torch.zeros(iVecSource.size()).type(torch.long).to(device)
for i in range(2**bitwidth):
iBS = iVecBS(idx + i)
iVecPE.Monitor(iBS)
start_time = time.time()
oVecU = uadd(iBS)
run_time = time.time() - start_time + run_time
oVecPE.Monitor(oVecU)
rmse = torch.sqrt(torch.mean(torch.mul(oVecPE()[1], oVecPE()[1])))
result_pe_cycle.append(1-rmse.item())
print("--- %s seconds ---" % (time.time() - start_time))
print("RNG: "+rng+", data: "+mode+", scaled: "+str(scale))
print("input error: ", "min: ", torch.min(iVecPE()[1]).item(), "max: ", torch.max(iVecPE()[1]).item())
print("output error: ", "min: ", torch.min(oVecPE()[1]).item(), "max: ", torch.max(oVecPE()[1]).item(), "RMSE: ", rmse.item())
if plot_en is True:
result_pe = oVecPE()[1].cpu().numpy()
print("error distribution=========>")
plt.figure(figsize=(3,1.5))
fig = plt.hist(result_pe.flatten(), bins='auto') # arguments are passed to np.histogram
plt.show()
print("progressive accuracy=========>")
plt.figure(figsize=(3,1.5))
fig = plt.plot(result_pe_cycle) # arguments are passed to np.histogram
plt.show()
# %%
rng = "Sobol"
row = 128
col = 10000
bitwidth = 12
add_test(rng, row, col, bitwidth)
# # %%
# rng = "Race"
# row = 128
# col = 10000
# add_test(rng, row, col)
# # %%
# rng = "LFSR"
# row = 128
# col = 10000
# add_test(rng, row, col)
# # %%
# rng = "SYS"
# row = 128
# col = 10000
# add_test(rng, row, col)
# # %%
|
#!/usr/bin/env python3
# SPDX-License-Identifier: MIT
rawdata = []
with open("input") as f:
rawdata = f.read()
data = rawdata.split("\n\n")
any_count = 0
all_count = 0
for group in data:
# probably a bit overkill lol
any_yes = {c for c in group.replace("\n", "")}
any_count += len(any_yes)
all_yes = any_yes
for line in group.split():
all_yes = all_yes.intersection({c for c in line})
all_count += len(all_yes)
print(any_count)
print(all_count)
|
# -*- coding: utf-8 -*-
import scrapy
from ..items import QuotesItem
class QuotesSpiderCssSpider(scrapy.Spider):
name = 'quotes_spider_css'
allowed_domains = ['quotes.toscrape.com']
start_urls = ['https://quotes.toscrape.com/']
def parse(self, response):
items = QuotesItem()
all_quotes = response.css('.quote')
for quote in all_quotes:
items['text'] = quote.css('.text::text').extract_first()
items['author'] = quote.css('.author::text').extract_first()
items['tags'] = quote.css('div.tags > a.tag::text').extract()
# tags = quote.css('.tag::text').extract()
yield items
next_page = response.css('.next > a::attr(href)').extract_first()
if next_page:
yield scrapy.Request(response.urljoin(next_page))
|
import time
from cube2common.constants import mastermodes, disconnect_types, privileges
from cipolla.game.server_message_formatter import error
from cipolla.punitive_effects.punitive_effect_info import TimedExpiryInfo, EffectInfo
from cipolla.game.client.exceptions import InsufficientPermissions
from cipolla.game.gamemode.gamemodes import get_mode_name_from_num # type: ignore
from cipolla.game.map.resolve_map_name import resolve_map_name
from cipolla.game.client.exceptions import InsufficientPermissions, StateError, UnknownPlayer
from cipolla.game.room.exceptions import UnknownEvent
from cipolla.game.server_message_formatter import *
from cipolla.game.client.exceptions import *
from cube2common.vec import vec
from cube2common.constants import *
from cipolla.protocol import swh
from cipolla.utils.filtertext import filtertext
from cipolla.utils.dictionary_get import dictget
from cipolla.game.edit.selection import Selection
from cipolla.utils.tracing import *
from cipolla.game.client.client import Client
from cipolla.game.room.room import Room
from typing import Any, Dict, List, Union
class BaseRole(object):
def __init__(self) -> None:
self.privilege = privileges.PRIV_NONE
self.actions = {
'set_bot_limit': self.on_disabled,
'check_maps': self.on_disabled,
'edit_remip': self.on_disabled,
'edit_new_map': self.on_disabled,
'edit_get_map': self.on_disabled,
'get_demo': self.on_disabled,
'list_demos': self.on_disabled,
'clear_demo': self.on_disabled,
'stop_demo_recording': self.on_disabled,
'set_demo_recording': self.on_disabled,
'add_bot': self.on_disabled,
'delete_bot': self.on_disabled,
'set_bot_balance': self.on_disabled,
'set_game_speed': self.on_disabled,
'give_master': self.on_not_allowed,
'set_master_mode': self.on_not_allowed,
'set_team': self.on_not_allowed,
'kick': self.on_not_allowed,
'clear_bans': self.on_not_allowed,
'map_vote': self.on_not_allowed,
'pause_game': self.on_not_allowed,
'set_master': self.on_set_master,
'set_spectator': self.on_set_spectator,
'item_list': self.on_item_list,
'flag_list': self.on_flag_list,
'base_list': self.on_base_list,
'map_crc': self.on_map_crc,
'command': self.on_command,
'N_SHOOT': self.on_shoot,
'N_ADDBOT': self.on_addbot,
'N_DELBOT': self.on_delbot,
'N_AUTHANS': self.on_nauthans,
'N_AUTHKICK': self.on_authkick,
'N_AUTHTRY': self.on_authtry,
'N_BASES': self.on_bases,
'N_BOTBALANCE': self.on_botbalance,
'N_BOTLIMIT': self.on_botlimit,
'N_CHECKMAPS': self.on_checkmaps,
'N_CLEARBANS': self.on_clearbans,
'N_CLEARDEMOS': self.on_disabled,
'N_CLIENTPING': self.on_clientping,
'N_CLIPBOARD': self.on_clipboard,
'N_CONNECT': self.on_connect,
'N_COPY': self.on_copy,
'N_DELCUBE': self.on_delcube,
'N_EDITENT': self.on_editent,
'N_EDITF': self.on_editf,
'N_EDITMODE': self.on_editmode,
'N_EDITM': self.on_editm,
'N_EDITT': self.on_editt,
'N_EDITVAR': self.on_editvar,
'N_EXPLODE': self.on_explode,
'N_FORCEINTERMISSION': self.on_forceintermission,
'N_GAMESPEED': self.on_gamespeed,
'N_GETDEMO': self.on_disabled,
'N_GETMAP': self.on_getmap,
'N_GUNSELECT': self.on_gunselect,
'N_INITFLAGS': self.on_initflags,
'N_ONFLIP': self.on_flip,
'N_ITEMLIST': self.on_itemlist,
'N_ITEMPICKUP': self.on_itempickup,
'N_JUMPPAD': self.on_jumppad,
'N_KICK': self.on_kick,
'N_LISTDEMOS': self.on_disabled,
'N_MAPCHANGE': self.on_mapchange,
'N_MAPCRC': self.on_mapcrc,
'N_MAPVOTE': self.on_mapvote,
'N_NEWMAP': self.on_newmap,
'N_MASTERMODE': self.on_not_allowed,
'N_PASTE': self.on_paste,
'N_PAUSEGAME': self.on_not_allowed,
'N_PING': self.on_ping,
'N_POS': self.on_pos,
'N_POS': self.on_pos,
'N_RECORDDEMO': self.on_disabled,
'N_REMIP': self.on_remip,
'N_REPAMMO': self.on_repammo,
'N_REPLACE': self.on_replace,
'N_SAYTEAM': self.on_sayteam,
'N_SERVCMD': self.on_servcmd,
'N_ROTATE': self.on_rotate,
'N_SOUND': self.on_sound,
'N_SPAWN': self.on_spawn,
'N_SETMASTER': self.on_setmaster,
'N_SPECTATOR': self.on_spectator,
'N_STOPDEMO': self.on_disabled,
'N_SETTEAM': self.on_setteam,
'N_SUICIDE': self.on_suicide,
'N_SWITCHMODEL': self.on_switchmodel,
'N_SWITCHNAME': self.on_switchname,
'N_SWITCHTEAM': self.on_switchteam,
'N_TAKEFLAG': self.on_takeflag,
'N_TAUNT': self.on_taunt,
'N_TELEPORT': self.on_teleport,
'N_TEXT': self.on_text,
'N_TRYDROPFLAG': self.on_trydropflag,
'N_TRYSPAWN': self.on_tryspawn,
}
def handle_event(self, event_name: str, room: Room, *args, **kwargs) -> None:
action = self.actions.get(event_name, self.on_unknown_event)
return action(room, *args, **kwargs) # type: ignore
# TODO: mypy
def handle_message(self, client: Client, room: Room, message_type: str, message: Dict[str, Any]) -> None:
action = self.actions.get(message_type, self.on_unknown_message)
return action(client, room, message) # type: ignore
# TODO: mypy
def on_unknown_message(self, client, room, message):
print("===ERROR UnknownMessage:", message)
raise UnknownMessage(message)
# from game_event_handler
def handle_text_event(self, text, room, player):
def parse(text):
text = text[1:].split(' ')
return text[0], text[1:]
cmd, args = parse(text)
if not cmd in self.text_actions:
player.client.send_server_message(error('Command does not exist. Type #commands for more info'))
return self.text_actions[cmd](room, player, cmd, args)
def on_unknown_event(self, ev_name, *args, **kwargs):
print("===ERROR UnknownEvent:", *args, **kwargs)
raise UnknownEvent('Event: '+ev_name+' Arguments: '+str(args) + str(kwargs))
def on_disabled(self, room, client, *a, **kw):
client.send_server_message(red('Command disabled'))
pass
def on_set_master(self, room, client, target_pn, *args):
client_target = room.get_player(target_pn).client
if client == client_target:
if not room.admins_present() and not room.masters_present():
room.change_privilege(client, privileges.PRIV_MASTER)
else:
client.send_server_message(info('master is already claimed'))
def on_check_maps(self, room, client):
pass
def on_set_bot_limit(self, room, client, limit):
pass
def on_item_list(self, room, client, item_list):
room.gamemode.on_client_item_list(client, item_list)
def on_flag_list(self, room: Room, client: Client, flag_list: Union[List, List]) -> None:
room.gamemode.on_client_flag_list(client, flag_list)
# def on_give_master(self, room, client, client_target):
# room._client_change_privilege(client, client_target, 1)
def on_delete_bot(self, room, client):
pass
def on_edit_new_map(self, room, client, size):
pass
def on_add_bot(self, room, client, skill):
pass
def on_base_list(self, room, client, base_list):
room.gamemode.on_client_base_list(client, base_list)
def on_map_crc(self, room: Room, client: Client, crc: int) -> None:
# TODO: Implement optional spectating of clients without valid map CRC's
pass
def on_map_vote(self, room, client, map_name, mode_num):
mode_name = get_mode_name_from_num(mode_num)
map_name = yield resolve_map_name(room, map_name)
room.change_map_mode(map_name, mode_name)
def on_clear_bans(self, room, client):
# TODO: Permissions checks
client._punitive_model.clear_effects('ban')
def on_command(self, room, client, command):
pass
def on_set_bot_balance(self, room, client, balance):
pass
def on_set_game_speed(self, room, client, speed):
pass
def on_edit_get_map(self, room, client):
pass
def on_not_allowed(self, *args, **kwargs):
from cipolla.game.player.player import Player
from cipolla.game.client.client import Client
message = 'you are unable to use this command'
for cc in args:
if isinstance(cc, Player):
cc.client.send_server_message(red(message))
return
elif isinstance(cc, Client):
cc.send_server_message(red(message))
return
def on_commands(self, room, player, *args, **kwargs):
available_commands = self.text_actions.keys()
formatted_command_list = list(map(lambda s: '#'+s, available_commands))
player.client.send_server_message("\f7Commands: " + " | ".join(formatted_command_list))
def on_info(self, *args, **kwargs):
#TODO get info server
# client.send_server_message(info(cipolla_server.server_info_model.value))
pass
def on_stats(self, *args, **kwargs):
#TODO statsss
pass
def on_list_mods(self, room, player, *args, **kw):
mods = ModsManager().list_mods()
player.client.send_server_message(info("Available mods: " + " | ".join(mods)))
def on_shoot(self, client: Client, room: Room, message: Dict[str, int]) -> None:
player = client.get_player(message['aiclientnum'])
shot_id = message['shot_id']
gun = message['gun']
from_pos = vec(*dictget(message, 'fx', 'fy', 'fz')) # type: ignore
to_pos = vec(*dictget(message, 'tx', 'ty', 'tz')) # type: ignore
hits = message['hits']
room.handle_player_event('shoot', player, shot_id, gun, from_pos, to_pos, hits)
def on_addbot(self, client, room, message):
# room.handle_client_event('add_bot', client, message['skill'])
# TODO: not implemented
pass
def on_nauthans(self, client, room, message):
authdomain = message['authdomain']
authid = message['authid']
answer = message['answer']
client.answer_auth_challenge(authdomain, authid, answer)
def on_authkick(self, client, room, message):
# authdomain = message['authdomain']
# authname = message['authname']
# target_pn = message['target_cn']
# reason = message['reason']
# TODO: not implemented
# deferred = client.auth(authdomain, authname)
# callback = lambda r: room.handle_client_event('kick', client, target_pn, reason)
# deferred.addCallbacks(callback, callback)
pass
def on_authtry(self, client, room, message):
# authdomain = message['authdomain']
# authname = message['authname']
# deferred = client.auth(authdomain, authname)
# TODO: not implemented
pass
def on_bases(self, client, room, message):
# TODO: not implemented
# room.handle_client_event('base_list', client, message['bases'])
pass
def on_botbalance(self, client, room, message):
# TODO: not implemented
# room.handle_client_event('set_bot_balance', client, message['balance'])
pass
def on_botlimit(self, client, room, message):
# TODO: not implemented
# room.handle_client_event('set_bot_limit', client, message['limit'])
pass
def on_checkmaps(self, client, room, message):
# TODO: not implemented
# room.handle_client_event('check_maps', client)
pass
def on_clearbans(self, client, room, message):
# TODO: not implemented
# room.handle_client_event('clear_bans', client)
pass
def on_clientping(self, client: Client, room: Room, message: Dict[str, int]) -> None:
ping = message['ping']
client.ping_buffer.add(ping)
player = client.get_player()
swh.put_clientping(player.state.messages, ping)
def on_clipboard(self, client, room, message):
pass
def on_connect(self, client: Client, room: Room, message: Dict[str, Union[str, int]]) -> None:
if not client.is_connected:
client.connect_received(message)
def on_copy(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
room.handle_player_event('edit_copy', player, selection)
def on_delbot(self, client, room, message):
# room.handle_client_event('delete_bot', client)
# TODO: not implemented
pass
def on_delcube(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
room.handle_player_event('edit_delete_cubes', player, selection)
def on_editent(self, client, room, message):
player = client.get_player()
entity_id = message['entid']
entity_type = message['type']
x, y, z = dictget(message, 'x', 'y', 'z')
attrs = message['attrs']
room.handle_player_event('edit_entity', player, entity_id, entity_type, x, y, z, attrs)
def on_editf(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
direction = message['direction']
mode = message['mode']
room.handle_player_event('edit_face', player, selection, direction, mode)
def on_editmode(self, client, room, message):
player = client.get_player()
room.handle_player_event('edit_mode', player, message['value'])
def on_editm(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
material = message['material']
material_filter = message['material_filter']
room.handle_player_event('edit_material', player, selection, material, material_filter)
def on_editt(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
texture = message['texture']
all_faces = message['all_faces']
room.handle_player_event('edit_texture', player, selection, texture, all_faces)
def on_editvar(self, client, room, message):
pass
def on_explode(self, client, room, message):
player = client.get_player(message['aiclientnum'])
cmillis = message['cmillis']
gun = message['gun']
explode_id = message['explode_id']
hits = message['hits']
room.handle_player_event('explode', player, cmillis, gun, explode_id, hits)
def on_forceintermission(self, client, room, message):
pass
def on_gamespeed(self, client, room, message):
# room.handle_client_event('set_game_speed', client, message['value'])
# TODO: not implemented
pass
def on_gunselect(self, client: Client, room: Room, message: Dict[str, int]) -> None:
player = client.get_player(message['aiclientnum'])
room.handle_player_event('gunselect', player, message['gunselect'])
def on_initflags(self, client: Client, room: Room, message: Dict[str, Union[List, int, List]]) -> None:
# room.handle_client_event('flag_list', client, message['flags'])
# TODO: not implemented
pass
def on_itemlist(self, client, room, message):
# room.handle_client_event('item_list', client, message['items'])
# TODO: not implemented
pass
def on_itempickup(self, client, room, message):
# player = client.get_player(message['aiclientnum'])
# room.handle_player_event('pickup_item', player, message['item_index'])
# # TODO: not implemented
pass
def on_jumppad(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('jumppad', player, message['jumppad'])
def on_kick(self, client, room, message):
# room.handle_client_event('kick', client, message['target_cn'], message['reason'])
# TODO: not implemented
pass
def on_mapchange(self, client, room, message):
# TODO: never called?
# room.handle_client_event('map_vote', client, message['map_name'], message['mode_num'])
pass
def on_mapcrc(self, client: Client, room: Room, message: Dict[str, Union[str, int]]) -> None:
# room.handle_client_event('map_crc', client, message['mapcrc'])
# TODO: not implemented
pass
def on_mapvote(self, client: Client, room: Room, message: Dict[str, Union[str, int]]) -> None:
client.role.handle_event('map_vote', room, client, message['map_name'], message['mode_num'])
def on_newmap(self, client, room, message):
# room.handle_client_event('edit_new_map', client, message['size'])
# TODO: not implemented
pass
def on_paste(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
room.handle_player_event('edit_paste', player, selection)
def on_ping(self, client, room, message):
with client.sendbuffer(1, False) as cds:
swh.put_pong(cds, message['cmillis'])
def on_pos(self, client: Client, room: Room, message: Dict[str, Union[int, List[int], bytes]]) -> None:
assert isinstance(message['clientnum'], int)
player = client.get_player(message['clientnum'])
player.state.update_position(message['position'], message['raw_position'])
def on_remip(self, client, room, message):
# room.handle_client_event('edit_remip', client)
# TODO: not implemented
pass
def on_repammo(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('replenish_ammo', player)
def on_replace(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
texture = message['texture']
new_texture = message['new_texture']
in_selection = message['in_selection']
room.handle_player_event('edit_replace', player, selection, texture, new_texture, in_selection)
def on_rotate(self, client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
axis = message['axis']
room.handle_player_event('edit_rotate', player, selection, axis)
def on_sayteam(self, client: Client, room: Room, message: Dict[str, Union[str, int]]) -> None:
player = client.get_player()
room.handle_player_event('team_chat', player, message['text'])
def on_servcmd(self, client, room, message):
# room.handle_client_event('command', client, message['command'])
# TODO: implement
pass
def on_setmaster(self, client, room, message):
# room.handle_client_event('set_master', client, message['target_cn'], message['pwdhash'], message['value'])
# TODO: not implemented
pass
def on_set_spectator(self, room, client, target_pn, spectate):
player = room.get_player(target_pn)
if player is None:
raise UnknownPlayer(cn=target_pn)
elif client == player.client:
room._set_player_spectator(player, spectate)
else:
client.send_server_message(error("you can't spectate other players")) # TODO: make usage function
def on_setteam(self, client, room, message):
# team_name = filtertext(message['team'], False, MAXTEAMLEN)
# room.handle_client_event('set_team', client, message['target_cn'], team_name)
# TODO: not implemented
pass
def on_sound(self, client: Client, room: Room, message: Dict[str, int]) -> None:
player = client.get_player(message['aiclientnum'])
room.handle_player_event('sound', player, message['sound'])
def on_spawn(self, client: Client, room: Room, message: Dict[str, int]) -> None:
player = client.get_player(message['aiclientnum'])
room.handle_player_event('spawn', player, message['lifesequence'], message['gunselect'])
def on_spectator(self, client, room, message):
# room.handle_client_event('set_spectator', client, message['target_cn'], bool(message['value']))
# TODO: is it implemented???
pass
def on_suicide(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('suicide', player)
def on_switchmodel(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('switch_model', player, message['playermodel'])
def on_switchname(self, client, room, message):
player = client.get_player(-1)
name = filtertext(message['name'], False, MAXNAMELEN)
if len(name) <= 0:
name = "unnamed"
room.handle_player_event('switch_name', player, name)
def on_switchteam(self, client, room, message):
player = client.get_player(-1)
team_name = filtertext(message['team'], False, MAXTEAMLEN)
room.handle_player_event('switch_team', player, team_name)
def on_takeflag(self, client: Client, room: Room, message: Dict[str, int]) -> None:
player = client.get_player(message['aiclientnum'])
room.handle_player_event('take_flag', player, message['flag'], message['version'])
def on_taunt(self, client, room, message):
player = client.get_player()
room.handle_player_event('taunt', player)
def on_teleport(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('teleport', player, message['teleport'], message['teledest'])
def on_text(self, client: Client, room: Room, message: Dict[str, Union[str, int]]) -> None:
player = client.get_player()
room.handle_player_event('game_chat', player, message['text'])
def on_trydropflag(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('try_drop_flag', player)
def on_tryspawn(self, client, room, message):
player = client.get_player(message['aiclientnum'])
room.handle_player_event('request_spawn', player)
def on_getmap(client, room, message):
# room.handle_client_event('edit_get_map', client)
# TODO: not implemented
pass
def on_flip(client, room, message):
del message['aiclientnum']
player = client.get_player()
selection = Selection.from_message(message)
room.handle_player_event('edit_flip', player, selection)
class MasterRole(BaseRole):
def __init__(self) -> None:
super().__init__()
self.privilege = privileges.PRIV_MASTER
self.actions.update({
'map_vote': self.on_map_vote,
'set_spectator': self.on_set_spectator,
'set_master': self.on_set_master,
'set_master_mode': self.on_set_master_mode,
'set_team': self.on_set_team,
'pause_game': self.on_pause_game,
'N_MASTERMODE': self.on_mastermode,
'N_PAUSEGAME': self.on_pause_game,
})
def on_map_vote(self, room: Room, client: Client, map_name: str, mode_num: int) -> None:
mode_name = get_mode_name_from_num(mode_num)
map_name = resolve_map_name(room, map_name)
room.change_map_mode(map_name, mode_name)
def on_kick(self, room, player, args):
target_client = room.get_target_client(args[0])
if target_client is None:
player.client.send_server_message(usage_error('Invalid client number'))
else:
expiry_time = time.time() + (4 * SECONDS_PER_HOUR)
client._punitive_model.add_effect('ban', target_client.host, EffectInfo(TimedExpiryInfo(expiry_time)))
target_client.disconnect(disconnect_types.DISC_KICK, error("You were kicked by {name#kicker}", kicker=target_client))
def on_mastermode(self, client, room, message):
self.handle_event('set_master_mode', room, client, message['mastermode'])
client.send_server_message(info('Master mode changed'))
def on_set_master(self, room, client, target_pn, *args):
client_target = room.get_player(target_pn).client
# if client_target is the same as client, the client is trying to relinquish his master
if client_target == client:
room.change_privilege(client, privileges.PRIV_NONE)
else:
room.change_privilege(client_target, privileges.PRIV_MASTER)
def on_set_spectator(self, room, client, target_pn, spectate):
print(target_pn)
print(spectate)
target_player = room.get_player(target_pn)
if target_player is None:
raise UnknownPlayer(cn=target_pn)
elif isinstance(target_player.client.role, AdminRole):
client.send_server_message(error('you can\'t spectate the masta admin!'))
else:
room._set_player_spectator(target_player, spectate)
def on_set_master_mode(self, room, client, mastermode):
if mastermode == mastermodes.MM_PRIVATE:
raise GenericError("Mastermode private not allowed")
if mastermode < mastermodes.MM_OPEN or mastermode > mastermodes.MM_PRIVATE:
raise GenericError("Mastermode out of allowed range.")
room.set_mastermode(mastermode)
def on_set_team(self, room, client, target_pn, team_name):
target_player = room.get_player(target_pn)
if target_player is None:
raise UnknownPlayer(cn=target_pn)
elif isinstance(target_player.client.role, AdminRole):
client.send_server_message(error('you can\'t change the team of the masta admin!'))
else:
room.gamemode.on_player_try_set_team(client.get_player(), target_player, target_player.teamname, team_name)
def on_pause_game(self, room, client, pause):
if pause:
if room.is_paused and not room.is_resuming: raise StateError('The game is already paused.')
room.pause()
room._broadcaster.server_message(info(f"{client.get_player().name} has paused the game."))
elif not pause:
if not room.is_paused: raise StateError('The game is already resumed.')
room.resume()
room._broadcaster.server_message(info(f"{client.get_player().name} has resumed the game."))
class AdminRole(MasterRole):
def __init__(self) -> None:
super().__init__()
self.privilege = privileges.PRIV_ADMIN
self.actions.update({
'set_master': self.on_set_master,
'set_spectator': self.on_set_spectator,
})
def on_set_master(self, room, client, target_pn, *args):
client_target = room.get_player(target_pn).client
if client_target != client:
if isinstance(client_target.role, MasterRole):
room.change_privilege(client_target, privileges.PRIV_NONE)
else:
room.change_privilege(client_target, privileges.PRIV_MASTER)
def on_set_spectator(self, room, client, target_pn, spectate):
target_player = room.get_player(target_pn)
if target_player is None:
raise UnknownPlayer(cn=target_pn)
room._set_player_spectator(target_player, spectate)
|
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
from tqdm import tqdm
import torch
import pickle
from model.utils.config import cfg, cfg_from_file, cfg_from_list
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.faster_rcnn.generic_extractor import generic_extractor
from easydict import EasyDict as edict
import glob
from scipy.misc import imread
import cv2
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
from torch.utils.data import Dataset
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
class roibatchLoader(Dataset):
def __init__(self, image_path, image_urls, image_extension):
self._image_urls = image_urls
self._image_path = image_path
self._image_extension = image_extension
def __getitem__(self, index):
im = imread(os.path.join(\
self._image_path, self._image_urls[index] + self._image_extension))
im = im[:, :, ::-1] # rgb -> bgr
target_size = 600
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
data = torch.from_numpy(im)
data_height, data_width = data.size(0), data.size(1)
data = data.permute(2, 0, 1)
return (data, im_scale)
def __len__(self):
return len(self._image_urls)
def formalize_bbox(_im_summary):
"""
Extract bboxes from all classes and return a list of bbox.
Each element of the list is in the form: [x1, y1, x2, y2, class_id, score].
The returned list is sorted descendingly according to score.
"""
boxes = [] # each element: x, y, w, h, class_id, score
probs = [] # prob distribution for each bounding box
feats = [] # pooled features
for class_id, items in enumerate(_im_summary.pred.boxes):
for bbox in items:
x1, y1, x2, y2, score = bbox
boxes.append([x1, y1, x2, y2, class_id, score])
for class_id, items in enumerate(_im_summary.pred.cls_prob):
for cls_prob in items:
probs.append(cls_prob)
assert len(boxes) == len(probs)
for class_id, items in enumerate(_im_summary.pred.pooled_feat):
for f in items:
feats.append(f)
assert len(boxes) == len(feats)
bundles = list(zip(boxes, probs, feats))
bundles = sorted(bundles, key=lambda x: x[0][-1], reverse = True) # sort by confidence descendingly
boxes, probs, feats = zip(*bundles)
return (list(boxes), list(probs), list(feats))
def package_image_summary(im_summary, _feature_path):
boxes, probs, feats = formalize_bbox(im_summary)
im_summary_out = {}
im_summary_out['boxes'] = boxes
im_summary_out['scale'] = im_summary.info.dim_scale[2]
curr_im_path = im_summary.info.image_idx + ".pkl"
pickle.dump(im_summary_out, open(os.path.join(_feature_path, curr_im_path), 'wb'))
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset', help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
def filter_small_box(boxes, min_area):
boxes_index = []
for i, box in enumerate(boxes):
x1, y1, x2, y2, _ = box
area = (x2-x1)*(y2-y1)
if(area >= min_area):
boxes_index.append(i)
return boxes_index
if __name__ == '__main__':
device = torch.device('cuda:0')
class_labels = ['__background__', 'bush', 'kite', 'laptop', 'bear', 'paper', 'shoe', 'chair', 'ground', 'flowers', 'tire',
'cup', 'sky', 'bench', 'window', 'bike', 'board', 'hat', 'plate', 'woman', 'handle', 'food', 'trees', 'wave',
'giraffe', 'background', 'foot', 'shadow', 'clouds', 'button', 'shelf', 'bag', 'sand', 'nose', 'rock', 'sidewalk',
'glasses', 'fence', 'people', 'house', 'sign', 'hair', 'street', 'zebra', 'mirror', 'logo', 'girl', 'arm', 'flower',
'leaf', 'clock', 'dirt', 'lights', 'boat', 'bird', 'pants', 'umbrella', 'bed', 'leg', 'reflection', 'water', 'tracks',
'sink', 'trunk', 'post', 'box', 'boy', 'cow', 'shoes', 'leaves', 'skateboard', 'pillow', 'road', 'letters', 'wall',
'jeans', 'number', 'pole', 'table', 'writing', 'cloud', 'sheep', 'horse', 'eye', 'top', 'seat', 'tail', 'vehicle', 'brick',
'legs', 'banana', 'head', 'door', 'shorts', 'bus', 'motorcycle', 'glass', 'flag', 'train', 'child', 'line', 'ear', 'neck',
'car', 'cap', 'tree', 'roof', 'cat', 'coat', 'grass', 'toilet', 'player', 'airplane', 'glove', 'helmet', 'shirt', 'floor', 'bowl',
'snow', 'field', 'lamp', 'elephant', 'tile', 'beach', 'pizza', 'wheel', 'picture', 'plant', 'ball', 'spot', 'hand', 'plane', 'mouth',
'stripe', 'letter', 'vase', 'man', 'building', 'surfboard', 'windows', 'light', 'counter', 'lines', 'dog', 'face', 'jacket',
'person', 'part', 'truck', 'bottle', 'wing']
assert len(class_labels) == 151
num_classes = len(class_labels)
image_path = os.path.join('/home/alex/faster-rcnn.pytorch/data/flickr30k_alex/')
image_extension = ".jpg"
image_index = glob.glob(os.path.join(image_path, "*" + image_extension))
image_index = [os.path.basename(x)[:-len(image_extension)] for x in image_index]
feature_path = os.path.join(image_path, 'features')
if not os.path.exists(feature_path):
os.makedirs(feature_path)
dataset = roibatchLoader(image_path, image_index, image_extension)
num_images = len(dataset)
max_per_image = 100
metaInfo = edict()
args = parse_args()
print('Called with args:')
print(args)
np.random.seed(cfg.RNG_SEED)
assert args.dataset == 'vg'
assert args.net == 'vgg16'
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
metaInfo.imdb_image_index = image_index
meta_file = os.path.join(feature_path, "meta.pkl")
with open(meta_file, 'wb') as f:
pickle.dump(metaInfo, f, pickle.HIGHEST_PROTOCOL)
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
fasterRCNN = generic_extractor(class_labels, pretrained=False, class_agnostic=args.class_agnostic)
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1).to(device)
gt_boxes = torch.FloatTensor([[ 1., 1., 1., 1., 1.]]).to(device)
num_boxes = torch.LongTensor([0]).to(device)
if args.cuda:
cfg.CUDA = True
with torch.no_grad():
fasterRCNN.to(device)
fasterRCNN.eval()
thresh = 0.0 # default value when vis=False
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0, pin_memory=True)
data_iter = iter(dataloader)
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in tqdm(range(num_images)):
all_feat_class = [[] for _ in xrange(num_classes)]
all_probs_class = [[] for _ in xrange(num_classes)]
all_boxes_class = [[] for _ in xrange(num_classes)]
data = next(data_iter)
scale = data[1].item()
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info = torch.FloatTensor([[im_data.size(2), im_data.size(3), scale]]).to(device)
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label, image_summary = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
###### assume: order does not change
image_summary.info.image_idx = image_index[i]
image_summary.info.data = generic_extractor._detach2numpy(im_data).squeeze()
# phase 0
scores = cls_prob.data
boxes = rois.data[:, :, 1:5] # (x1, y1, x2, y2)
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
box_deltas = box_deltas.view(-1, 4) \
* torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(class_labels))
# adjust boxes by deltas; output in (x1, y1, x2, y2)
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
# avoid boxes go out of image
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
pred_boxes /= scale # (x1, y1, x2, y2)
scores = scores.squeeze() # torch.Size([300, 151])
pooled_feat_backup = image_summary.pred.pooled_feat
pred_boxes = pred_boxes.squeeze() # torch.Size([300, 604]), 604=4*151
for j in xrange(1, num_classes):
inds = torch.nonzero(scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
curr_prob = scores # 300 x 151
curr_feat = pooled_feat_backup # 300 x 512 x 7 x 7
cls_scores = scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = pred_boxes[inds, :]
else:
cls_boxes = pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
cls_dets = cls_dets[order]
curr_prob = curr_prob[order]
curr_feat = curr_feat[order]
keep = nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
curr_prob = curr_prob[keep.view(-1).long()]
curr_feat = curr_feat[keep.view(-1).long()]
all_boxes_class[j] = cls_dets.cpu().numpy()
all_probs_class[j] = curr_prob.cpu().numpy()
all_feat_class[j] = curr_feat
else:
all_boxes_class[j] = empty_array
all_probs_class[j] = empty_array
all_feat_class[j] = empty_array
min_area = 2000
for j in xrange(1, num_classes):
filter_index = filter_small_box(all_boxes_class[j], min_area)
all_boxes_class[j] = all_boxes_class[j][filter_index]
all_probs_class[j] = all_probs_class[j][filter_index]
all_feat_class[j] = all_feat_class[j][filter_index]
# Limit to max_per_image detections *over all classes*
# phase 3
curr_boxes = []
curr_scores = []
if max_per_image > 0:
# flatten scores for all boxes of this image
image_scores = np.hstack([all_boxes_class[j][:, -1] for j in xrange(1, num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, num_classes):
keep = np.where(all_boxes_class[j][:, -1] >= image_thresh)[0]
all_boxes_class[j] = all_boxes_class[j][keep, :]
all_probs_class[j] = all_probs_class[j][keep, :]
all_feat_class[j] = all_feat_class[j][keep, :]
if(i % 1000 == 0):
print("Cleaning CUDA cache")
torch.cuda.empty_cache()
image_summary.pred.cls_prob = [all_probs_class[j] for j in range(num_classes)]
image_summary.pred.boxes= [all_boxes_class[j] for j in range(num_classes)]
image_summary.pred.pooled_feat = [all_feat_class[j] for j in range(num_classes)]
feature_file = os.path.join(feature_path, image_summary.info.image_idx+".pkl")
package_image_summary(image_summary, feature_path)
|
#!/usr/bin/python3
import sys
import rclpy
import math
import random
from rclpy.node import Node
from rclpy.time import CONVERSION_CONSTANT, Duration
from geometry_msgs.msg import Vector3Stamped
class PublishAsyncStddev(Node):
def __init__(self):
super().__init__('publish_async_stddev')
self._pub_value = self.create_publisher(Vector3Stamped, 'value', 1)
self._pub_stddev = self.create_publisher(Vector3Stamped, 'stddev', 1)
self._timer = self.create_timer(0.1, self._on_timer)
def _on_timer(self):
msg = Vector3Stamped()
t = self.get_clock().now()
t += Duration(nanoseconds=random.randint(0, CONVERSION_CONSTANT / 1e3))
msg.header.stamp = t.to_msg()
msg.vector.x = math.sin(t.nanoseconds / CONVERSION_CONSTANT)
self._pub_value.publish(msg)
msg.vector.x = 1.0
if bool(random.getrandbits(3)):
print('publishing')
self._pub_stddev.publish(msg)
def main(args=sys.argv):
rclpy.init(args=args)
rclpy.spin(PublishAsyncStddev())
if __name__ == '__main__':
main()
|
from django.conf.urls import patterns, include, url
from .views import ReferralListView, ReferralUpdateView
urlpatterns = patterns('',
url(r'^$', ReferralListView.as_view(), name='referral_list'),
url(r'(?P<referral_id>[\w-]+)/$', ReferralUpdateView.as_view(), name='referral_update'),
)
|
from __future__ import print_function
import logging as log
import sys
import os
from argparse import ArgumentParser
import cv2
from openvino.inference_engine import IENetwork, IEPlugin
from lpr.trainer import decode_ie_output
from utils.helpers import load_module
def build_argparser():
parser = ArgumentParser()
parser.add_argument("--model", help="Path to an .xml file with a trained model.", required=True, type=str)
parser.add_argument("--cpu_extension",
help="MKLDNN (CPU)-targeted custom layers. "
"Absolute path to a shared library with the kernels implementation", type=str, default=None)
parser.add_argument("--plugin_dir", help="Path to a plugin folder", type=str, default=None)
parser.add_argument("--device",
help="Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample "
"will look for a suitable plugin for device specified (CPU by default)", default="CPU",
type=str)
parser.add_argument('path_to_config', help='Path to a config.py')
parser.add_argument('input_image', help='Image with license plate')
return parser
def display_license_plate(number, license_plate_img):
size = cv2.getTextSize(number, cv2.FONT_HERSHEY_SIMPLEX, 0.55, 2)
text_width = size[0][0]
text_height = size[0][1]
height, width, _ = license_plate_img.shape
license_plate_img = cv2.copyMakeBorder(license_plate_img, 0, text_height + 10, 0,
0 if text_width < width else text_width - width,
cv2.BORDER_CONSTANT, value=(255, 255, 255))
cv2.putText(license_plate_img, number, (0, height + text_height + 5), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 0, 0), 2)
return license_plate_img
def load_ir_model(model_xml, device, plugin_dir, cpu_extension):
model_bin = os.path.splitext(model_xml)[0] + ".bin"
# initialize plugin
log.info("Initializing plugin for %s device...", device)
plugin = IEPlugin(device=device, plugin_dirs=plugin_dir)
if cpu_extension and 'CPU' in device:
plugin.add_cpu_extension(cpu_extension)
# read IR
log.info("Reading IR...")
net = IENetwork.from_ir(model=model_xml, weights=model_bin)
if "CPU" in plugin.device:
supported_layers = plugin.get_supported_layers(net)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if not_supported_layers:
log.error("Following layers are not supported by the plugin for specified device %s:\n %s",
plugin.device, ', '.join(not_supported_layers))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using "
"--cpu_extension command line argument")
sys.exit(1)
# input / output check
assert len(net.inputs.keys()) == 1, "LPRNet must have only single input"
assert len(net.outputs) == 1, "LPRNet must have only single output topologies"
input_blob = next(iter(net.inputs))
out_blob = next(iter(net.outputs))
log.info("Loading IR to the plugin...")
exec_net = plugin.load(network=net, num_requests=2)
shape = net.inputs[input_blob].shape
del net
return exec_net, plugin, input_blob, out_blob, shape
# pylint: disable=too-many-locals
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
cfg = load_module(args.path_to_config)
exec_net, plugin, input_blob, out_blob, shape = load_ir_model(args.model, args.device,
args.plugin_dir, args.cpu_extension)
n_batch, channels, height, width = shape
cur_request_id = 0
while 1:
frame = cv2.imread(args.input_image)
img_to_display = frame.copy()
in_frame = cv2.resize(frame, (width, height))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
in_frame = in_frame.reshape((n_batch, channels, height, width))
exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
if exec_net.requests[cur_request_id].wait(-1) == 0:
# Parse detection results of the current request
lp_code = exec_net.requests[cur_request_id].outputs[out_blob]
lp_number = decode_ie_output(lp_code, cfg.r_vocab)
img_to_display = display_license_plate(lp_number, img_to_display)
cv2.imshow('License Plate', img_to_display)
key = cv2.waitKey(0)
if key == 27:
break
del exec_net
del plugin
if __name__ == '__main__':
sys.exit(main() or 0)
|
import logging
import os
import time
from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.python import PythonOperator
from astronomer.providers.microsoft.azure.operators.data_factory import (
AzureDataFactoryRunPipelineOperatorAsync,
)
from astronomer.providers.microsoft.azure.sensors.data_factory import (
AzureDataFactoryPipelineRunStatusSensorAsync,
)
EXECUTION_TIMEOUT = int(os.getenv("EXECUTION_TIMEOUT", 6))
default_args = {
"execution_timeout": timedelta(hours=EXECUTION_TIMEOUT),
"azure_data_factory_conn_id": "azure_data_factory_default",
"factory_name": "ADFProvidersTeamDataFactoryTest", # This can also be specified in the ADF connection.
"resource_group_name": "team_provider_resource_group_test_1", # This can also be specified in the ADF connection.
"retries": int(os.getenv("DEFAULT_TASK_RETRIES", 2)),
"retry_delay": timedelta(seconds=int(os.getenv("DEFAULT_RETRY_DELAY_SECONDS", 60))),
}
CLIENT_ID = os.getenv("CLIENT_ID", "")
CLIENT_SECRET = os.getenv("CLIENT_SECRET", "")
TENANT_ID = os.getenv("TENANT_ID", "")
SUBSCRIPTION_ID = os.getenv("SUBSCRIPTION_ID", "")
RESOURCE_GROUP_NAME = os.getenv("RESOURCE_GROUP_NAME", "")
DATAFACTORY_NAME = os.getenv("DATAFACTORY_NAME", "")
LOCATION = os.getenv("LOCATION", "eastus")
CONNECTION_STRING = os.getenv("CONNECTION_STRING", "")
PIPELINE_NAME = os.getenv("PIPELINE_NAME", "pipeline1")
ACTIVITY_NAME = os.getenv("ACTIVITY_NAME", "copyBlobtoBlob")
DATASET_INPUT_NAME = os.getenv("DATASET_INPUT_NAME", "ds_in")
DATASET_OUTPUT_NAME = os.getenv("DATASET_OUTPUT_NAME", "ds_out")
BLOB_FILE_NAME = os.getenv("BLOB_FILE_NAME", "test.txt")
OUTPUT_BLOB_PATH = os.getenv("OUTPUT_BLOB_PATH", "container1/output")
BLOB_PATH = os.getenv("BLOB_PATH", "container1/input")
STORAGE_LINKED_SERVICE_NAME = os.getenv("STORAGE_LINKED_SERVICE_NAME", "storageLinkedService001")
rg_params = {"location": LOCATION}
df_params = {"location": LOCATION}
def create_adf_storage_pipeline() -> None:
"""
Creates Azure resource if not present, Azure Data factory, Azure Storage linked service,
Azure blob dataset both input and output and Data factory pipeline
"""
from azure.core.exceptions import HttpResponseError
from azure.identity import ClientSecretCredential
from azure.mgmt.datafactory import DataFactoryManagementClient
from azure.mgmt.datafactory.models import (
AzureBlobDataset,
AzureStorageLinkedService,
BlobSink,
BlobSource,
CopyActivity,
DatasetReference,
DatasetResource,
Factory,
LinkedServiceReference,
LinkedServiceResource,
PipelineResource,
SecureString,
)
from azure.mgmt.resource import ResourceManagementClient
credentials = ClientSecretCredential(
client_id=CLIENT_ID, client_secret=CLIENT_SECRET, tenant_id=TENANT_ID
)
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
resource_group_exist = None
try:
resource_group_exist = resource_client.resource_groups.get(RESOURCE_GROUP_NAME)
except HttpResponseError as e:
logging.exception("Resource group not found, so creating one %s", e.__str__())
if not resource_group_exist:
resource_client.resource_groups.create_or_update(RESOURCE_GROUP_NAME, rg_params)
# Create a data factory
adf_client = DataFactoryManagementClient(credentials, SUBSCRIPTION_ID)
df_resource = Factory(location=LOCATION)
df = adf_client.factories.create_or_update(RESOURCE_GROUP_NAME, DATAFACTORY_NAME, df_resource)
while df.provisioning_state != "Succeeded":
df = adf_client.factories.get(RESOURCE_GROUP_NAME, DATAFACTORY_NAME)
time.sleep(1)
# Create an Azure Storage linked service
# IMPORTANT: specify the name and key of your Azure Storage account.
storage_string = SecureString(value=CONNECTION_STRING)
ls_azure_storage = LinkedServiceResource(
properties=AzureStorageLinkedService(connection_string=storage_string)
)
adf_client.linked_services.create_or_update(
RESOURCE_GROUP_NAME, DATAFACTORY_NAME, STORAGE_LINKED_SERVICE_NAME, ls_azure_storage
)
# Create an Azure blob dataset (input)
ds_ls = LinkedServiceReference(reference_name=STORAGE_LINKED_SERVICE_NAME)
ds_azure_blob = DatasetResource(
properties=AzureBlobDataset(
linked_service_name=ds_ls, folder_path=BLOB_PATH, file_name=BLOB_FILE_NAME
)
)
adf_client.datasets.create_or_update(
RESOURCE_GROUP_NAME, DATAFACTORY_NAME, DATASET_INPUT_NAME, ds_azure_blob
)
# Create an Azure blob dataset (output)
ds_out_azure_blob = DatasetResource(
properties=AzureBlobDataset(linked_service_name=ds_ls, folder_path=OUTPUT_BLOB_PATH)
)
adf_client.datasets.create_or_update(
RESOURCE_GROUP_NAME, DATAFACTORY_NAME, DATASET_OUTPUT_NAME, ds_out_azure_blob
)
# Create a copy activity
blob_source = BlobSource()
blob_sink = BlobSink()
ds_in_ref = DatasetReference(reference_name=DATASET_INPUT_NAME)
ds_out_ref = DatasetReference(reference_name=DATASET_OUTPUT_NAME)
copy_activity = CopyActivity(
name=ACTIVITY_NAME, inputs=[ds_in_ref], outputs=[ds_out_ref], source=blob_source, sink=blob_sink
)
# Create a pipeline with the copy activity
p_obj = PipelineResource(activities=[copy_activity], parameters={})
adf_client.pipelines.create_or_update(RESOURCE_GROUP_NAME, DATAFACTORY_NAME, PIPELINE_NAME, p_obj)
def delete_azure_data_factory_storage_pipeline() -> None:
"""Delete data factory, storage linked service pipeline, dataset"""
from azure.identity import ClientSecretCredential
from azure.mgmt.datafactory import DataFactoryManagementClient
from azure.mgmt.resource import ResourceManagementClient
credentials = ClientSecretCredential(
client_id=CLIENT_ID, client_secret=CLIENT_SECRET, tenant_id=TENANT_ID
)
# create resource client
resource_client = ResourceManagementClient(credentials, SUBSCRIPTION_ID)
# create Data factory client
adf_client = DataFactoryManagementClient(credentials, SUBSCRIPTION_ID)
# Delete pipeline
adf_client.pipelines.delete(RESOURCE_GROUP_NAME, DATAFACTORY_NAME, PIPELINE_NAME)
# Delete input dataset
adf_client.datasets.delete(RESOURCE_GROUP_NAME, DATAFACTORY_NAME, DATASET_INPUT_NAME)
# Delete output dataset
adf_client.datasets.delete(RESOURCE_GROUP_NAME, DATAFACTORY_NAME, DATASET_OUTPUT_NAME)
# Delete Linked services
adf_client.linked_services.delete(
RESOURCE_GROUP_NAME, DATAFACTORY_NAME, linked_service_name=STORAGE_LINKED_SERVICE_NAME
)
# Delete Data factory
adf_client.factories.delete(RESOURCE_GROUP_NAME, DATAFACTORY_NAME)
# Delete Resource Group
resource_client.resource_groups.begin_delete(RESOURCE_GROUP_NAME)
with DAG(
dag_id="example_async_adf_run_pipeline",
start_date=datetime(2021, 8, 13),
schedule_interval=None,
catchup=False,
default_args=default_args,
tags=["example", "async", "Azure Pipeline"],
) as dag:
# [START howto_create_resource_group]
create_azure_data_factory_storage_pipeline = PythonOperator(
task_id="create_azure_data_factory_storage_pipeline",
python_callable=create_adf_storage_pipeline,
)
# [END howto_create_resource_group]
# [START howto_operator_adf_run_pipeline]
run_pipeline_wait = AzureDataFactoryRunPipelineOperatorAsync(
task_id="run_pipeline_wait",
pipeline_name=PIPELINE_NAME,
)
# [END howto_operator_adf_run_pipeline]
# [START howto_operator_adf_run_pipeline]
run_pipeline_no_wait = AzureDataFactoryRunPipelineOperatorAsync(
task_id="run_pipeline_no_wait",
pipeline_name=PIPELINE_NAME,
wait_for_termination=False,
)
# [END howto_operator_adf_run_pipeline]
# [START howto_sensor_pipeline_run_sensor_async]
pipeline_run_sensor_async = AzureDataFactoryPipelineRunStatusSensorAsync(
task_id="pipeline_run_sensor_async",
run_id=run_pipeline_wait.output["run_id"],
)
# [END howto_sensor_pipeline_run_sensor_async]
remove_azure_data_factory_storage_pipeline = PythonOperator(
task_id="remove_azure_data_factory_storage_pipeline",
python_callable=delete_azure_data_factory_storage_pipeline,
trigger_rule="all_done",
)
(
create_azure_data_factory_storage_pipeline
>> run_pipeline_wait
>> run_pipeline_no_wait
>> pipeline_run_sensor_async
>> remove_azure_data_factory_storage_pipeline
)
|
from flask import request
from flask_restful import Resource
from sqlalchemy import exc
from api import db
from api.models import User
class Users(Resource):
def get(self):
return (
{
"status": "success",
"data": {
"users": [user.to_json() for user in User.query.all()]
},
},
200,
)
def post(self):
post_data = request.get_json()
if (
not post_data
or "username" not in post_data
or "email" not in post_data
or "password" not in post_data
):
return {"message": "Invalid payload", "status": "fail"}, 400
username = post_data.get("username")
email = post_data.get("email")
password = post_data.get("password")
try:
user_with_username = User.query.filter_by(
username=username
).first()
user_with_mail = User.query.filter_by(email=email).first()
if not (user_with_username or user_with_mail):
db.session.add(
User(username=username, email=email, password=password)
)
db.session.commit()
return (
{"status": "success", "message": f"{email} was added!"},
201,
)
else:
what = 'Username' if user_with_username else 'Email'
return (
{
"status": "fail",
"message": f"{what} already exists.",
},
400,
)
except exc.IntegrityError:
db.session.rollback()
return {"message": "Database error", "status": "fail"}, 400
class UsersId(Resource):
def get(self, user_id):
error = {"status": "fail", "message": "User does not exist."}
if not str(user_id).isdigit():
return error, 404
try:
user = User.query.filter_by(id=user_id).first()
if not user:
return error, 404
else:
return ({"status": "success", "data": user.to_json()}, 200)
except ValueError:
return error, 404
|
from typing import Union
import jax.numpy as np
import numpy as onp
import pandas as pd
ArrayLikes = [pd.DataFrame, pd.Series, np.ndarray, np.DeviceArray, onp.ndarray]
ArrayLikeType = Union[pd.DataFrame, pd.Series, np.ndarray, np.DeviceArray, onp.ndarray]
|
__version__ = '1.0.0'
from .corrlib import *
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\My_Codes\easylearn-fmri\eslearn\GUI\easylearn_main_gui.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(439, 703)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(300, 400))
MainWindow.setMaximumSize(QtCore.QSize(100000, 100000))
MainWindow.setMouseTracking(False)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(100)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setAcceptDrops(False)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtWidgets.QGridLayout(self.centralwidget)
self.gridLayout.setObjectName("gridLayout")
self.logo = QtWidgets.QLabel(self.centralwidget)
self.logo.setMinimumSize(QtCore.QSize(0, 100))
self.logo.setText("")
self.logo.setObjectName("logo")
self.gridLayout.addWidget(self.logo, 0, 0, 1, 1)
self.data_loading = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.data_loading.sizePolicy().hasHeightForWidth())
self.data_loading.setSizePolicy(sizePolicy)
self.data_loading.setStyleSheet("")
self.data_loading.setIconSize(QtCore.QSize(30, 30))
self.data_loading.setObjectName("data_loading")
self.gridLayout.addWidget(self.data_loading, 1, 0, 1, 1)
self.feature_engineering = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.feature_engineering.sizePolicy().hasHeightForWidth())
self.feature_engineering.setSizePolicy(sizePolicy)
self.feature_engineering.setIconSize(QtCore.QSize(30, 30))
self.feature_engineering.setObjectName("feature_engineering")
self.gridLayout.addWidget(self.feature_engineering, 2, 0, 1, 1)
self.machine_learning = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.machine_learning.sizePolicy().hasHeightForWidth())
self.machine_learning.setSizePolicy(sizePolicy)
self.machine_learning.setIconSize(QtCore.QSize(30, 30))
self.machine_learning.setObjectName("machine_learning")
self.gridLayout.addWidget(self.machine_learning, 3, 0, 1, 1)
self.model_evaluation = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.model_evaluation.sizePolicy().hasHeightForWidth())
self.model_evaluation.setSizePolicy(sizePolicy)
self.model_evaluation.setObjectName("model_evaluation")
self.gridLayout.addWidget(self.model_evaluation, 4, 0, 1, 1)
self.statistical_analysis = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statistical_analysis.sizePolicy().hasHeightForWidth())
self.statistical_analysis.setSizePolicy(sizePolicy)
self.statistical_analysis.setIconSize(QtCore.QSize(30, 30))
self.statistical_analysis.setObjectName("statistical_analysis")
self.gridLayout.addWidget(self.statistical_analysis, 5, 0, 1, 1)
self.textBrowser = QtWidgets.QTextBrowser(self.centralwidget)
self.textBrowser.setMinimumSize(QtCore.QSize(0, 20))
self.textBrowser.setMaximumSize(QtCore.QSize(10000000, 100))
self.textBrowser.setMidLineWidth(30)
self.textBrowser.setObjectName("textBrowser")
self.gridLayout.addWidget(self.textBrowser, 6, 0, 1, 1)
self.save_load = QtWidgets.QHBoxLayout()
self.save_load.setObjectName("save_load")
self.quit = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.quit.sizePolicy().hasHeightForWidth())
self.quit.setSizePolicy(sizePolicy)
self.quit.setObjectName("quit")
self.save_load.addWidget(self.quit)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.save_load.addItem(spacerItem)
self.run = QtWidgets.QPushButton(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.run.sizePolicy().hasHeightForWidth())
self.run.setSizePolicy(sizePolicy)
self.run.setIconSize(QtCore.QSize(30, 30))
self.run.setObjectName("run")
self.save_load.addWidget(self.run)
self.gridLayout.addLayout(self.save_load, 7, 0, 1, 1)
self.progressBar = QtWidgets.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.gridLayout.addWidget(self.progressBar, 8, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 439, 26))
self.menubar.setObjectName("menubar")
self.menueasylearn = QtWidgets.QMenu(self.menubar)
self.menueasylearn.setObjectName("menueasylearn")
self.menuHelp_H = QtWidgets.QMenu(self.menubar)
self.menuHelp_H.setObjectName("menuHelp_H")
self.menuSkin = QtWidgets.QMenu(self.menubar)
self.menuSkin.setObjectName("menuSkin")
MainWindow.setMenuBar(self.menubar)
self.current_working_directory = QtWidgets.QAction(MainWindow)
self.current_working_directory.setObjectName("current_working_directory")
self.select_working_directory = QtWidgets.QAction(MainWindow)
self.select_working_directory.setObjectName("select_working_directory")
self.create_configuration_file = QtWidgets.QAction(MainWindow)
self.create_configuration_file.setObjectName("create_configuration_file")
self.choose_configuration_file = QtWidgets.QAction(MainWindow)
self.choose_configuration_file.setObjectName("choose_configuration_file")
self.actionDark = QtWidgets.QAction(MainWindow)
self.actionDark.setObjectName("actionDark")
self.actionBlack = QtWidgets.QAction(MainWindow)
self.actionBlack.setObjectName("actionBlack")
self.actionDarkOrange = QtWidgets.QAction(MainWindow)
self.actionDarkOrange.setObjectName("actionDarkOrange")
self.actionGray = QtWidgets.QAction(MainWindow)
self.actionGray.setObjectName("actionGray")
self.actionBlue = QtWidgets.QAction(MainWindow)
self.actionBlue.setObjectName("actionBlue")
self.actionNavy = QtWidgets.QAction(MainWindow)
self.actionNavy.setObjectName("actionNavy")
self.actionClassic = QtWidgets.QAction(MainWindow)
self.actionClassic.setObjectName("actionClassic")
self.actionLight = QtWidgets.QAction(MainWindow)
self.actionLight.setObjectName("actionLight")
self.menueasylearn.addSeparator()
self.menueasylearn.addAction(self.select_working_directory)
self.menueasylearn.addAction(self.create_configuration_file)
self.menueasylearn.addAction(self.choose_configuration_file)
self.menuSkin.addAction(self.actionDark)
self.menuSkin.addAction(self.actionBlack)
self.menuSkin.addAction(self.actionDarkOrange)
self.menuSkin.addAction(self.actionGray)
self.menuSkin.addAction(self.actionBlue)
self.menuSkin.addAction(self.actionNavy)
self.menuSkin.addAction(self.actionClassic)
self.menuSkin.addAction(self.actionLight)
self.menubar.addAction(self.menueasylearn.menuAction())
self.menubar.addAction(self.menuHelp_H.menuAction())
self.menubar.addAction(self.menuSkin.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.data_loading.setText(_translate("MainWindow", "Data Loading"))
self.feature_engineering.setText(_translate("MainWindow", "Feature Engineering"))
self.machine_learning.setText(_translate("MainWindow", "Machine Learning"))
self.model_evaluation.setText(_translate("MainWindow", "Model Evaluation"))
self.statistical_analysis.setText(_translate("MainWindow", "Statistical Analysis"))
self.quit.setText(_translate("MainWindow", "Quit"))
self.run.setText(_translate("MainWindow", "Run"))
self.menueasylearn.setTitle(_translate("MainWindow", "Project initialization(&I)"))
self.menuHelp_H.setTitle(_translate("MainWindow", "Help(&H)"))
self.menuSkin.setTitle(_translate("MainWindow", "Skin"))
self.current_working_directory.setText(_translate("MainWindow", "Current working directory"))
self.select_working_directory.setText(_translate("MainWindow", "Select working directory"))
self.create_configuration_file.setText(_translate("MainWindow", "Create configuration file"))
self.choose_configuration_file.setText(_translate("MainWindow", "Load configuration file"))
self.actionDark.setText(_translate("MainWindow", "Dark"))
self.actionBlack.setText(_translate("MainWindow", "Black"))
self.actionDarkOrange.setText(_translate("MainWindow", "DarkOrange"))
self.actionGray.setText(_translate("MainWindow", "Gray"))
self.actionBlue.setText(_translate("MainWindow", "Blue"))
self.actionNavy.setText(_translate("MainWindow", "Navy"))
self.actionClassic.setText(_translate("MainWindow", "Classic"))
self.actionLight.setText(_translate("MainWindow", "Light"))
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.camera_image = None
self.lights = []
#self.count = 0 # record freq enter detection
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
# TODO: Implement
# base_waypoints will be called only once since the base way point
# would not change ,so it will be stroed in the class
self.waypoints = waypoints
if not self.waypoints_2d:
# just to get the coordinates of the waypoints (x,y)
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] \
for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d) # constructa KDTree using the 2d waypoints
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state: # if state change we start the counter
self.state_count = 0
self.state = state
# since the classifier could be unstable and keep changing all the time
# we will only take action of the classifier stays unchanged for a certain
# threshold of classification loops
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state # record the last state
light_wp = light_wp if state == TrafficLight.RED else -1 # we only interested in the red light
self.last_wp = light_wp # record the previous traffic light state
self.upcoming_red_light_pub.publish(Int32(light_wp)) # publish the confident traffic light state
else:
# if we are not confident just publish the previous traffic light state
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x,y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
#TODO implement
closest_idx = self.waypoint_tree.query([x,y],1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
#return light.state # get the light state provided by the simulator
if(not self.has_image):
self.prev_light_loc = None
return False
# change from ros image message to cv rgb image
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "rgb8")
status = self.light_classifier.get_classification(cv_image)
#rospy.loginfo("[traffic] ",tl_color," traffic light detected")
#Get classification
return status
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
light = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x,
self.pose.pose.position.y)
#TODO find the closest visible traffic light (if one exists)
diff = len(self.waypoints.waypoints) # number of visible points ahead of the car
# loop through all possible stop line and find the one closest visible stopline
for i , light in enumerate(self.lights):
line = stop_line_positions[i] # get the stop line waypoint index
# get the closest waypoint index of this traffic light coordinates
temp_wp_idx = self.get_closest_waypoint(line[0],line[1])
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff: # check to see if stop line is ahead and visible infront of the car
#rospy.loginfo("[debug] light: {}, car_wp_indx: {}, wp_indx: {}, d: {}".format(
# i, car_wp_idx, temp_wp_idx, d))
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
break
# only detect and classify when 50 way poits ahead the traffic light
# with half the hz of this node for detection and classification
#rospy.loginfo('[outside] state count is {}'.format(self.state_count))
if closest_light and diff <80:
#rospy.loginfo('[inside] count is {}'.format(self.state_count))
state = self.get_light_state(closest_light)
return line_wp_idx, state # return the stop line index is there is visible and the state of the light
return -1, TrafficLight.UNKNOWN # return -1 if there is no visible traffice light
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/contrib/mpi_collectives/mpi_message.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/mpi_collectives/mpi_message.proto',
package='tensorflow.contrib.mpi',
syntax='proto3',
serialized_pb=_b('\n4tensorflow/contrib/mpi_collectives/mpi_message.proto\x12\x16tensorflow.contrib.mpi\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\x89\x02\n\nMPIRequest\x12\x14\n\x0crequest_rank\x18\x01 \x01(\x05\x12\x44\n\x0crequest_type\x18\x02 \x01(\x0e\x32..tensorflow.contrib.mpi.MPIRequest.RequestType\x12)\n\x0btensor_type\x18\x03 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x13\n\x0btensor_name\x18\x04 \x01(\t\x12\x32\n\x0ctensor_shape\x18\x05 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\"+\n\x0bRequestType\x12\r\n\tALLREDUCE\x10\x00\x12\r\n\tALLGATHER\x10\x01\"\xd3\x01\n\x0bMPIResponse\x12G\n\rresponse_type\x18\x01 \x01(\x0e\x32\x30.tensorflow.contrib.mpi.MPIResponse.ResponseType\x12\x13\n\x0btensor_name\x18\x02 \x01(\t\x12\x15\n\rerror_message\x18\x03 \x01(\t\"O\n\x0cResponseType\x12\r\n\tALLREDUCE\x10\x00\x12\r\n\tALLGATHER\x10\x01\x12\t\n\x05\x45RROR\x10\x02\x12\x08\n\x04\x44ONE\x10\x03\x12\x0c\n\x08SHUTDOWN\x10\x04\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_MPIREQUEST_REQUESTTYPE = _descriptor.EnumDescriptor(
name='RequestType',
full_name='tensorflow.contrib.mpi.MPIRequest.RequestType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ALLREDUCE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLGATHER', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=388,
serialized_end=431,
)
_sym_db.RegisterEnumDescriptor(_MPIREQUEST_REQUESTTYPE)
_MPIRESPONSE_RESPONSETYPE = _descriptor.EnumDescriptor(
name='ResponseType',
full_name='tensorflow.contrib.mpi.MPIResponse.ResponseType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ALLREDUCE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALLGATHER', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DONE', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SHUTDOWN', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=566,
serialized_end=645,
)
_sym_db.RegisterEnumDescriptor(_MPIRESPONSE_RESPONSETYPE)
_MPIREQUEST = _descriptor.Descriptor(
name='MPIRequest',
full_name='tensorflow.contrib.mpi.MPIRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='request_rank', full_name='tensorflow.contrib.mpi.MPIRequest.request_rank', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='request_type', full_name='tensorflow.contrib.mpi.MPIRequest.request_type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_type', full_name='tensorflow.contrib.mpi.MPIRequest.tensor_type', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_name', full_name='tensorflow.contrib.mpi.MPIRequest.tensor_name', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.contrib.mpi.MPIRequest.tensor_shape', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MPIREQUEST_REQUESTTYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=166,
serialized_end=431,
)
_MPIRESPONSE = _descriptor.Descriptor(
name='MPIResponse',
full_name='tensorflow.contrib.mpi.MPIResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='response_type', full_name='tensorflow.contrib.mpi.MPIResponse.response_type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_name', full_name='tensorflow.contrib.mpi.MPIResponse.tensor_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='error_message', full_name='tensorflow.contrib.mpi.MPIResponse.error_message', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_MPIRESPONSE_RESPONSETYPE,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=434,
serialized_end=645,
)
_MPIREQUEST.fields_by_name['request_type'].enum_type = _MPIREQUEST_REQUESTTYPE
_MPIREQUEST.fields_by_name['tensor_type'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_MPIREQUEST.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_MPIREQUEST_REQUESTTYPE.containing_type = _MPIREQUEST
_MPIRESPONSE.fields_by_name['response_type'].enum_type = _MPIRESPONSE_RESPONSETYPE
_MPIRESPONSE_RESPONSETYPE.containing_type = _MPIRESPONSE
DESCRIPTOR.message_types_by_name['MPIRequest'] = _MPIREQUEST
DESCRIPTOR.message_types_by_name['MPIResponse'] = _MPIRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
MPIRequest = _reflection.GeneratedProtocolMessageType('MPIRequest', (_message.Message,), dict(
DESCRIPTOR = _MPIREQUEST,
__module__ = 'tensorflow.contrib.mpi_collectives.mpi_message_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.mpi.MPIRequest)
))
_sym_db.RegisterMessage(MPIRequest)
MPIResponse = _reflection.GeneratedProtocolMessageType('MPIResponse', (_message.Message,), dict(
DESCRIPTOR = _MPIRESPONSE,
__module__ = 'tensorflow.contrib.mpi_collectives.mpi_message_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.contrib.mpi.MPIResponse)
))
_sym_db.RegisterMessage(MPIResponse)
# @@protoc_insertion_point(module_scope)
|
""" Plot functions. """
# pylint: disable=too-many-statements
from numbers import Number
from copy import copy
import colorsys
import cv2
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patheffects
from matplotlib.cm import get_cmap, register_cmap
from matplotlib.patches import Patch
from matplotlib.colors import ColorConverter, ListedColormap, LinearSegmentedColormap, is_color_like
from mpl_toolkits import axes_grid1
import plotly.figure_factory as ff
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from .utils import to_list
from ..batchflow import deprecated
def plot_image(data=None, mode='imshow', backend='matplotlib', **kwargs):
""" Overall plotter function, converting kwarg-names to match chosen backend and redirecting
plotting task to one of the methods of backend-classes.
"""
if backend in ('matplotlib', 'plt'):
return MatplotlibPlotter.plot(data=data, mode=mode, **kwargs)
if backend in ('plotly', 'go'):
return getattr(PlotlyPlotter, mode)(data, **kwargs)
raise ValueError(f'{backend} backend is not supported!')
def plot_loss(data, title=None, **kwargs):
""" Shorthand for loss plotting. """
kwargs = {
'xlabel': 'Iterations',
'ylabel': 'Loss',
'label': title or 'Loss graph',
'xlim': (0, None),
'rolling_mean': 10,
'final_mean': 100,
**kwargs
}
return plot_image(data, mode='curve', backend='matplotlib', **kwargs)
def filter_parameters(params, keys=None, prefix='', index=None, index_condition=None):
""" Make a subdictionary of parameters with required keys.
Parameter are retrieved if:
a. It is explicitly requested (via `keys` arg).
b. Its name starts with given prefix (defined by `prefix` arg).
Parameters
----------
params : dict
Arguments to filter.
keys : sequence
Keys to retrieve.
prefix : str, optional
Arguments with keys starting with given prefix will also be retrieved.
Defaults to `''`, i.e. no prefix used.
index : int
Index of argument value to retrieve.
If none provided, get whole argument value.
If value is non-indexable, get it without indexing.
index_condition : callable
Function that takes indexed argument value and returns a bool specifying whether should it be really indexed.
"""
result = {}
keys = keys or list(params.keys())
if prefix:
keys += [key.split(prefix)[1] for key in params if key.startswith(prefix)]
for key in keys:
value = params.get(prefix + key, params.get(key))
if value is None:
continue
# check if parameter value indexing is requested and possible
if index is not None and isinstance(value, list):
# check if there is no index condition or there is one and it is satisfied
if index_condition is None or index_condition(value[index]):
value = value[index]
result[key] = value
return result
class MatplotlibPlotter:
""" Plotting backend for matplotlib.
Basic class logic
-----------------
Simply provide data, plot mode and parameters to the `plot_image` call.
`MatplotlibPlotter` takes care of redirecting params to methods they are meant for.
The logic behind the process is the following:
1. Convert some provided parameters from 'plotly' to 'matplotlib' naming convention.
2. Obtain default params for chosen mode and merge them with provided params.
3. Put data into a double-nested list (via `make_nested_data`).
Nestedness levels define subplot and layer data order correspondingly.
4. Parse axes or create them if none provided via `make_or_parse_axes`.
5. For every axis-data pair:
a. Filter params relevant for ax (via `filter_parameters`).
b. Call chosen plot method (one of `imshow`, `wiggle`, `hist` or `curve`) with ax params.
c. Apply all annotations with ax params (via `annotate_axis`).
6. Show and save figure (via `show_and_save`).
Data display scenarios
----------------------
1. The simplest one if when one provide a single data array.
2. A more advanced use case is when one provide a list of arrays:
a. Images are put on same axis and overlaid: data=[image_1, image_2];
b. Images are put on separate axes: data=[image_1, image_2], separate=True;
3. The most complex scenario is when one wish to display images in a 'mixed' manner.
For example, to overlay first two images but to display the third one separately, one must use:
data = [[image_1, image_2], image_3];
The order of arrrays inside the double-nested structure basically declares, which of them belong to the same axis
and therefore should be rendered one over another, and which must be displayed separately.
Note that in general parameters should resemble data nestedness level.
That allows binding axes and parameters that correspond each other.
However, it's possible for parameter to be a single item — in that case it's shared across all subplots and layers.
Advanced parameters managing
----------------------------
The list of parameters expected by specific plot method is rather short.
But there is a way to provide parameter to a plot method, even if it's not hard-coded.
One must use specific prefix for that.
Address docs of `imshow`, `wiggle`, `hist`, `curve` and `annotate_axis` for details.
This also allows one to pass arguments of the same name for different plotting steps.
E.g. `plt.set_title` and `plt.set_xlabel` both require `fontsize` argument.
Providing `{'fontsize': 30}` in kwargs will affect both title and x-axis labels.
To change parameter for title only, one can provide {'title_fontsize': 30}` instead.
"""
@classmethod
def plot(cls, data, mode='imshow', separate=False, **kwargs):
""" Plot manager.
Parses axes from kwargs if provided, else creates them.
Filters parameters and calls chosen plot method for every axis-data pair.
Parameters
----------
data : np.ndarray or a list of np.ndarray objects (possibly nested)
If list has level 1 nestedness, 'overlaid/separate' logic is handled via `separate` parameter.
If list has level 2 nestedness, outer level defines subplots order while inner one defines layers order.
Shape of data items depends on chosen mode (see below).
mode : 'imshow', 'wiggle', 'hist', 'curve'
If 'imshow' plot given arrays as images.
If 'wiggle' plot 1d subarrays of given array as signals.
Subarrays are extracted from given data with fixed step along vertical axis.
If 'hist' plot histogram of flattened array.
If 'curve' plot given arrays as curves.
separate : bool
Whether plot images on separate axes instead of putting them all together on a single one.
Incompatible with 'wiggle' mode.
kwargs :
- For one of `imshow`, 'wiggle`, `hist` or `curve` (depending on chosen mode).
Parameters and data nestedness levels must match.
Every param with 'imshow_', 'wiggle_', 'hist_' or 'curve_' prefix is redirected to corresponding method.
- For `annotate_axis`.
Every param with 'title_', 'suptitle_', 'xlabel_', 'ylabel_', 'xticks_', 'yticks_', 'ticks_', 'xlim_',
'ylim_', colorbar_', 'legend_' or 'grid_' prefix is redirected to corresponding matplotlib method.
Also 'facecolor', 'set_axisbelow', 'disable_axes' arguments are accepted.
"""
if mode == 'wiggle' and separate:
raise ValueError("Can't use `separate` option with `wiggle` mode.")
PLOTLY_TO_PYPLOT = {'zmin': 'vmin', 'zmax': 'vmax', 'xaxis': 'xlabel', 'yaxis': 'ylabel'}
# pylint: disable=expression-not-assigned
[kwargs.update({new: kwargs[old]}) for old, new in PLOTLY_TO_PYPLOT.items() if old in kwargs]
mode_defaults = getattr(cls, f"{mode.upper()}_DEFAULTS")
all_params = {**mode_defaults, **kwargs}
data = cls.make_nested_data(data=data, separate=separate)
axes = cls.make_or_parse_axes(mode=mode, n_subplots=len(data), all_params=all_params)
for ax_num, (ax_data, ax) in enumerate(zip(data, axes)):
index_condition = None if separate else lambda x: isinstance(x, list)
ax_params = filter_parameters(all_params, index=ax_num, index_condition=index_condition)
ax_params = getattr(cls, mode)(ax=ax, data=ax_data, **ax_params)
cls.annotate_axis(ax=ax, ax_num=ax_num, ax_params=ax_params, all_params=all_params, mode=mode)
[ax.set_axis_off() for ax in axes[len(data):]] # pylint: disable=expression-not-assigned
return cls.save_and_show(fig=axes[0].figure, **kwargs)
# Action methods
@staticmethod
def make_nested_data(data, separate):
""" Construct nested list of data arrays for plotting. """
if data is None:
return []
if isinstance(data, np.ndarray):
return [[data]]
if isinstance(data[0], Number):
return [[np.array(data)]]
if all(isinstance(item, np.ndarray) for item in data):
return [[item] for item in data] if separate else [data]
if separate:
raise ValueError("Arrays list must be flat, when `separate` option is True.")
return [[item] if isinstance(item, np.ndarray) else item for item in data]
@classmethod
def make_or_parse_axes(cls, mode, n_subplots, all_params):
""" Create figure and axes if needed, else use provided. """
MODE_TO_FIGSIZE = {'imshow' : (12, 12),
'hist' : (8, 5),
'wiggle' : (12, 7),
'curve': (15, 5)}
axes = all_params.pop('axis', None)
axes = all_params.pop('axes', axes)
axes = all_params.pop('ax', axes)
if axes is None:
FIGURE_KEYS = ['figsize', 'facecolor', 'dpi', 'ncols', 'nrows', 'constrained_layout']
params = filter_parameters(all_params, FIGURE_KEYS, prefix='figure_')
params['figsize'] = params.get('figsize', MODE_TO_FIGSIZE[mode])
if ('ncols' not in params) and ('nrows' not in params):
params['ncols'] = n_subplots
_, axes = plt.subplots(**params)
axes = to_list(axes)
n_axes = len(axes)
if n_axes < n_subplots:
raise ValueError(f"Not enough axes provided ({n_axes}) for {n_subplots} subplots.")
return axes
@classmethod
def annotate_axis(cls, ax, ax_num, ax_params, all_params, mode):
""" Apply requested annotation functions to given axis with chosen parameters. """
# pylint: disable=too-many-branches
TEXT_KEYS = ['fontsize', 'family', 'color']
# title
keys = ['title', 'label', 'y'] + TEXT_KEYS
params = filter_parameters(ax_params, keys, prefix='title_', index=ax_num)
params['label'] = params.pop('title', None) or params.get('label')
if params:
ax.set_title(**params)
# suptitle
keys = ['t', 'y'] + TEXT_KEYS
params = filter_parameters(ax_params, keys, prefix='suptitle_')
params['t'] = params.get('t') or params.get('suptitle') or params.get('label')
if params:
ax.figure.suptitle(**params)
# xlabel
keys = ['xlabel'] + TEXT_KEYS
params = filter_parameters(ax_params, keys, prefix='xlabel_', index=ax_num)
if params:
ax.set_xlabel(**params)
# ylabel
keys = ['ylabel'] + TEXT_KEYS
params = filter_parameters(ax_params, keys, prefix='ylabel_', index=ax_num)
if params:
ax.set_ylabel(**params)
# aspect
params = filter_parameters(ax_params, ['aspect'], prefix='aspect_', index=ax_num)
if params:
ax.set_aspect(**params)
# xticks
params = filter_parameters(ax_params, ['xticks'], prefix='xticks_', index=ax_num)
if 'xticks' in params:
params['ticks'] = params.get('ticks', params.pop('xticks'))
if params:
ax.set_xticks(**params)
# yticks
params = filter_parameters(ax_params, ['yticks'], prefix='yticks_', index=ax_num)
if 'yticks' in params:
params['ticks'] = params.get('ticks', params.pop('yticks'))
if params:
ax.set_yticks(**params)
# ticks
keys = ['labeltop', 'labelright', 'labelcolor', 'direction']
params = filter_parameters(ax_params, keys, prefix='tick_', index=ax_num)
if params:
ax.tick_params(**params)
# xlim
params = filter_parameters(ax_params, ['xlim'], prefix='xlim_', index=ax_num)
if 'xlim' in params:
params['left'] = params.get('left', params.pop('xlim'))
if params:
ax.set_xlim(**params)
# ylim
params = filter_parameters(ax_params, ['ylim'], prefix='ylim_', index=ax_num)
if 'ylim' in params:
params['bottom'] = params.get('bottom', params.pop('ylim'))
if params:
ax.set_ylim(**params)
# colorbar
if all_params.get('colorbar', False) and mode == 'imshow':
keys = ['colorbar', 'fraction', 'aspect', 'fake', 'ax_image']
params = filter_parameters(ax_params, keys, prefix='colorbar_', index=ax_num)
# if colorbar is disabled for subplot, add param to plot fake axis instead to keep proportions
params['fake'] = not params.pop('colorbar', True)
cls.add_colorbar(**params)
# legend
keys = ['label', 'size', 'cmap', 'color', 'loc']
params = filter_parameters(ax_params, keys, prefix='legend_')
params['color'] = params.pop('cmap', None) or params.get('color')
if params.get('label') is not None:
cls.add_legend(ax, **params)
# grid
keys = ['grid', 'b', 'which', 'axis']
params = filter_parameters(ax_params, keys, prefix='grid_', index=ax_num)
params['b'] = params.pop('grid', params.pop('b', 'False'))
if params:
ax.grid(**params)
if ax_params.get('facecolor'):
ax.set_facecolor(ax_params['facecolor'])
ax.set_axisbelow(ax_params.get('set_axisbelow', False))
if ax_params.get('disable_axes'):
ax.set_axis_off()
elif not ax.axison:
ax.set_axis_on()
@staticmethod
def save_and_show(fig, show=True, savepath=None, return_figure=False, pyqt=False, **kwargs):
""" Save and show plot if needed. """
if pyqt:
return None
save_kwargs = dict(bbox_inches='tight', pad_inches=0, dpi=100)
save_kwargs.update(kwargs.get('save') or {})
# save if necessary and render
if savepath is not None:
fig.savefig(savepath, **save_kwargs)
if show:
fig.show()
else:
plt.close()
plot_image.last_figure = fig
if return_figure:
return fig
return None
# Rendering methods
IMSHOW_DEFAULTS = {
# image
'cmap': ['Greys_r', 'firebrick', 'mediumseagreen', 'thistle', 'darkorange', 'navy', 'gold',
'red', 'turquoise', 'darkorchid', 'darkkhaki', 'royalblue', 'yellow',
'chocolate', 'forestgreen', 'lightpink', 'darkslategray', 'deepskyblue', 'wheat'],
'facecolor': 'white',
# axis labels
'xlabel': '', 'ylabel': '',
# colorbar
'colorbar_fraction': 3.0,
'colorbar_aspect': 30,
# ticks
'labeltop': True,
'labelright': True,
'direction': 'inout',
# legend
'legend_loc': 0,
'legend_size': 10,
'legend_label': None,
# common
'fontsize': 20,
# grid
'grid': False,
# other
'order_axes': (1, 0, 2),
'bad_color': (.0,.0,.0,.0),
'transparize_masks': None,
}
@classmethod
def imshow(cls, ax, data, **kwargs):
""" Plot arrays as images one over another on given axis.
Parameters
----------
ax : matplotlib axis
Axis to plot images on.
data : list of np.ndarray
Every item must be a valid matplotlib image.
kwargs :
order_axes : tuple of ints
Order of image axes.
bad_values : list of numbers
Data values that should be displayed with 'bad_color'.
transparize_masks : bool, optional
Whether treat zeros in binary masks as bad values or not.
If True, make zero values in all binary masks transparent on display.
If False, do not make zero values in any binary masks transparent on display.
If not provided, make zero values transparent in all masks that overlay an image.
params for images drawn by `plt.imshow`:
- 'cmap', 'vmin', 'vmax', 'interpolation', 'alpha', 'extent'
- params with 'imshow_' prefix
Notes
-----
See class docs for details on prefixes usage.
See class and method defaults for arguments examples.
"""
for image_num, image in enumerate(data):
image = np.transpose(image, axes=kwargs['order_axes'][:image.ndim]).astype(np.float32)
keys = ['cmap', 'vmin', 'vmax', 'interpolation', 'alpha', 'extent']
params = filter_parameters(kwargs, keys, prefix='imshow_', index=image_num)
params['cmap'] = cls.make_cmap(params.pop('cmap'), kwargs['bad_color'])
params['extent'] = params.get('extent') or [0, image.shape[1], image.shape[0], 0]
# fill some values with nans to display them with `bad_color`
bad_values = filter_parameters(kwargs, ['bad_values'], index=image_num).get('bad_values', [])
transparize_masks = kwargs.get('transparize_masks')
transparize_masks = transparize_masks if transparize_masks is not None else image_num > 0
if transparize_masks:
unique_values = tuple(np.unique(image))
if unique_values == (0,) or unique_values == (0, 1): # pylint: disable=consider-using-in
params['vmin'] = params.get('vmin', 0)
bad_values = [0]
for bad_value in bad_values:
image[image == bad_value] = np.nan
ax_image = ax.imshow(image, **params)
if image_num == 0:
kwargs['ax_image'] = ax_image
return kwargs
WIGGLE_DEFAULTS = {
# main
'step': 15,
'width_multiplier': 1,
'curve_width': 1,
# wiggle
'wiggle_color': 'k',
'wiggle_linestyle': '-',
# curve
'color': 'r',
'marker': 'o',
'linestyle': '',
# suptitle
'suptitle_color': 'k',
# title
'title_color': 'k',
# axis labels
'xlabel': '', 'ylabel': '',
'xlabel_color': 'k', 'ylabel_color': 'k',
# ticks
'labeltop': True,
'labelright': True,
'direction': 'inout',
# legend
'legend_loc': 1,
'legend_size': 15,
# grid
'grid_axis': 'y',
# common
'set_axisbelow': True,
'fontsize': 20, 'label': ''
}
@classmethod
def wiggle(cls, ax, data, **kwargs):
""" Make wiggle plot of signals array. Optionally overlap it with a curve.
Parameters
----------
ax : matplotlib axis
Axis to plot images on.
data : np.ndarray or list of np.ndarray
If array, must be 2d.
If list, must contain image and curve arrays.
Curve, in turn must be either 1d array of heights or 2d array mask.
If 1d heights, its shape must match correposnding image dimension.
If 2d mask, its shape must match image shape.
In both cases it is expected, that there must be `np.nan` where curve is not defined.
kwargs :
step : int, optional
Step to take signals from the array with.
width_multiplier : float, optional
Scale factor for signals amplitudes.
color : matplotlib color
Wiggle lines color.
fill_color : matplotlib color
Wiggle fill color.
xlim, ylims : tuples of int
Define displayed data limits.
params for overlaid curves drawn by `plt.plot`:
- 'color', 'linestyle', 'marker', 'markersize'
- params with 'curve_' prefix
Notes
-----
See class docs for details on prefixes usage.
See class and method defaults for arguments examples.
"""
image, *curves = data
offsets = np.arange(0, image.shape[0], kwargs['step'])
y_range = np.arange(0, image.shape[1])
x_range = [] # accumulate traces to draw curve above them if needed
for offset in offsets:
x = offset + kwargs['width_multiplier'] * image[offset] / np.std(image)
params = filter_parameters(kwargs, ['color'], prefix='wiggle_')
ax.plot(x, y_range, **params)
fill_color = kwargs.get('fill_color') or params['color']
ax.fill_betweenx(y_range, offset, x, where=(x > offset), color=fill_color)
x_range.append(x)
x_range = np.r_[x_range]
if 'xlim' not in kwargs:
kwargs['xlim'] = (x_range[0].min(), x_range[-1].max())
if 'ylim' not in kwargs:
kwargs['ylim'] = (y_range.max(), y_range.min())
for curve_num, curve in enumerate(curves):
keys = ['color', 'linestyle', 'marker', 'markersize']
params = filter_parameters(kwargs, keys, prefix='curve_', index=curve_num)
width = params.pop('width', 1)
curve = curve[offsets]
if curve.ndim == 1:
curve_x = (~np.isnan(curve)).nonzero()[0]
curve_y = curve[curve_x]
# transform height-mask to heights if needed
elif curve.ndim == 2:
curve = (~np.isnan(curve)).nonzero()
curve_x = curve[0][(width // 2)::width]
curve_y = curve[1][(width // 2)::width]
ax.plot(x_range[curve_x, curve_y], curve_y, **params)
return kwargs
HIST_DEFAULTS = {
# hist
'bins': 50,
'color': ['firebrick', 'mediumseagreen', 'thistle', 'darkorange', 'navy', 'gold',
'red', 'turquoise', 'darkorchid', 'darkkhaki', 'royalblue', 'yellow',
'chocolate', 'forestgreen', 'lightpink', 'darkslategray', 'deepskyblue', 'wheat'],
'alpha': 0.8,
'facecolor': 'white',
# suptitle
'suptitle_color': 'k',
'suptitle_y': 1.01,
# title
'title_color' : 'k',
# axis labels
'xlabel': '', 'ylabel': '',
'xlabel_color' : 'k', 'ylabel_color' : 'k',
# legend
'legend_size': 10,
'legend_label': None,
'legend_loc': 0,
# grid
'grid': True,
# common
'set_axisbelow': True,
'fontsize': 20
}
@classmethod
def hist(cls, ax, data, **kwargs):
""" Plot histograms on given axis.
Parameters
----------
ax : matplotlib axis
Axis to plot images on.
data : np.ndarray or list of np.ndarray
Arrays to build histograms. Can be of any shape since they are flattened.
kwargs :
params for overlaid histograms drawn by `plt.hist`:
- 'bins', 'color', 'alpha'
- params with 'hist_' prefix
Notes
-----
See class docs for details on prefixes usage.
See class and method defaults for arguments examples.
"""
for image_num, array in enumerate(data):
array = array.flatten()
params = filter_parameters(kwargs, ['bins', 'color', 'alpha'], prefix='hist_', index=image_num)
ax.hist(array, **params)
return kwargs
CURVE_DEFAULTS = {
# main
'rolling_mean': None,
'rolling_final': None,
# curve
'color': ['skyblue', 'sandybrown', 'lightpink', 'mediumseagreen', 'thistle', 'firebrick',
'forestgreen', 'navy', 'gold', 'red', 'turquoise', 'darkorchid',
'darkkhaki', 'royalblue', 'yellow', 'chocolate', 'darkslategray', 'wheat'],
'facecolor': 'white',
# suptitle
'suptitle_color': 'k',
# title
'title_color': 'k',
# axis labels
'xlabel': 'x', 'ylabel': 'y',
'xlabel_color': 'k', 'ylabel_color': 'k',
# legend
'legend_loc': 0,
'legend_size': 10,
'legend_label': None,
# common
'fontsize': 20,
'grid': True
}
@classmethod
def curve(cls, ax, data, **kwargs):
""" Plot curves on given axis.
Parameters
----------
ax : matplotlib axis
Axis to plot images on.
data : np.ndarray or list of np.ndarray
Arrays to plot. Must be 1d.
kwargs :
rolling_mean : int or None
If int, calculate and display rolling mean with window `rolling_mean` size.
rolling_final : int or None
If int, calculate an display mean over last `rolling_final` array elements.
params for overlaid curves drawn by `plt.plot`:
- 'color', 'linestyle', 'alpha'
- params with 'curve_' prefix
Notes
-----
See class docs for details on prefixes usage.
See class and method defaults for arguments examples.
"""
for image_num, array in enumerate(data):
keys = ['color', 'linestyle', 'alpha']
params = filter_parameters(kwargs, keys, prefix='curve_', index=image_num)
ax.plot(array, **params)
mean_color = cls.scale_lightness(params['color'], scale=.5)
rolling_mean = kwargs.get('rolling_mean')
if rolling_mean:
averaged = array.copy()
window = min(10 if rolling_mean is True else rolling_mean, len(array))
if window > len(averaged * 2):
break
averaged[(window // 2):(-window // 2 + 1)] = np.convolve(array, np.ones(window) / window, mode='valid')
ax.plot(averaged, color=mean_color, linestyle='--')
final_mean = kwargs.get('final_mean')
if final_mean:
window = 100 if final_mean is True else final_mean
mean = np.mean(array[-window:])
line_len = len(array) // 20
curve_len = len(array)
line_x = np.arange(line_len) + curve_len
line_y = [mean] * line_len
ax.plot(line_x, line_y, linestyle='--', linewidth=1.2, color=mean_color)
fontsize = 10
text_x = curve_len + line_len
text_y = mean - fontsize / 300
text = ax.text(text_x, text_y, f"{mean:.3f}", fontsize=fontsize)
text.set_path_effects([patheffects.Stroke(linewidth=3, foreground='white'), patheffects.Normal()])
kwargs['xlim'] = (0, text_x)
return kwargs
# Predefined colormaps
METRIC_CDICT = {
'red': [[0.0, None, 1.0], [0.33, 1.0, 1.0], [0.66, 1.0, 1.0], [1.0, 0.0, None]],
'green': [[0.0, None, 0.0], [0.33, 0.0, 0.0], [0.66, 1.0, 1.0], [1.0, 0.5, None]],
'blue': [[0.0, None, 0.0], [0.33, 0.0, 0.0], [0.66, 0.0, 0.0], [1.0, 0.0, None]]
}
METRIC_CMAP = LinearSegmentedColormap('Metric', METRIC_CDICT)
METRIC_CMAP.set_bad(color='black')
register_cmap(name='Metric', cmap=METRIC_CMAP)
DEPTHS_CMAP = ListedColormap(get_cmap('viridis_r')(np.linspace(0.0, 0.5, 100)))
register_cmap(name='Depths', cmap=DEPTHS_CMAP)
SAMPLER_CMAP = ListedColormap([ColorConverter().to_rgb('blue'),
ColorConverter().to_rgb('red'),
ColorConverter().to_rgb('purple')])
register_cmap(name='Sampler', cmap=SAMPLER_CMAP)
# Supplementary methods
@staticmethod
def make_cmap(color, bad_color=None):
""" Make listed colormap from 'white' and provided color. """
try:
cmap = copy(plt.get_cmap(color))
except ValueError: # if not a valid cmap name, expect it to be a matplotlib color
if isinstance(color, str):
color = ColorConverter().to_rgb(color)
cmap = ListedColormap([(1, 1, 1, 1), color])
if bad_color is not None:
cmap.set_bad(color=bad_color)
return cmap
@staticmethod
def scale_lightness(color, scale):
""" Make new color with modified lightness from existing. """
if isinstance(color, str):
color = ColorConverter.to_rgb(color)
h, l, s = colorsys.rgb_to_hls(*color)
return colorsys.hls_to_rgb(h, min(1, l * scale), s = s)
@staticmethod
def add_colorbar(ax_image, aspect=30, fraction=0.5, color='black', fake=False):
""" Append colorbar to the image on the right. """
divider = axes_grid1.make_axes_locatable(ax_image.axes)
width = axes_grid1.axes_size.AxesY(ax_image.axes, aspect=1./aspect)
pad = axes_grid1.axes_size.Fraction(fraction, width)
cax = divider.append_axes("right", size=width, pad=pad)
if fake:
cax.set_axis_off()
else:
colorbar = ax_image.axes.figure.colorbar(ax_image, cax=cax)
colorbar.ax.yaxis.set_tick_params(color=color)
ax_image.axes.created_colorbar = colorbar
@staticmethod
def add_legend(ax, color, label, size, loc):
""" Add patches to legend. All invalid colors are filtered. """
handles = getattr(ax.get_legend(), 'legendHandles', [])
colors = [color for color in to_list(color) if is_color_like(color)]
labels = to_list(label)
new_patches = [Patch(color=color, label=label) for color, label in zip(colors, labels) if label]
handles += new_patches
if handles:
ax.legend(handles=handles, loc=loc, prop={'size': size})
class PlotlyPlotter:
""" Plotting backend for plotly. """
DEPRECATION_MESSAGE = "Plotly backend is deprecated."
@staticmethod
def convert_kwargs(mode, kwargs):
""" Update kwargs-dict to match plotly-conventions: update keys of the dict and
values in some cases.
"""
# make conversion-dict for kwargs-keys
keys_converter = {
'label': 'title', 't': 'title',
'xlabel': 'xaxis', 'ylabel': 'yaxis',
'vmin': 'zmin', 'vmax': 'zmax',
}
# make new dict updating keys and values
converted = {}
for key, value in kwargs.items():
if key in keys_converter:
new_key = keys_converter[key]
if key == 'xlabel':
converted[new_key] = {'title_text': value,
'automargin': True,
'titlefont': {'size': kwargs.get('fontsize', 30)}}
if key == 'ylabel':
converted[new_key] = {'title_text': value,
'titlefont': {'size': kwargs.get('fontsize', 30)},
'automargin': True,
'autorange': 'reversed'}
else:
converted[new_key] = value
else:
converted[key] = value
return converted
@staticmethod
def channelize_image(image, total_channels, color=None, greyscale=False, opacity=None):
""" Channelize an image. Can be used to make an opaque rgb or grayscale image.
"""
# case of a partially channelized image
if image.ndim == 3:
if image.shape[-1] == total_channels:
return image
background = np.zeros((*image.shape[:-1], total_channels))
background[:, :, :image.shape[-1]] = image
if opacity is not None:
background[:, :, -1] = opacity
return background
# case of non-channelized image
if isinstance(color, str):
color = ColorConverter().to_rgb(color)
background = np.zeros((*image.shape, total_channels))
for i, value in enumerate(color):
background[:, :, i] = image * value
# in case of greyscale make all 3 channels equal to supplied image
if greyscale:
for i in range(3):
background[:, :, i] = image
# add opacity if needed
if opacity is not None:
background[:, :, -1] = opacity * (image != 0).astype(int)
return background
@staticmethod
def save_and_show(fig, show=True, savepath=None, **kwargs):
""" Save and show plot if needed.
"""
save_kwargs = kwargs.get('save', {})
# save if necessary and render
if savepath is not None:
fig.write_image(savepath, **save_kwargs)
if show:
fig.show()
else:
fig.close()
@classmethod
@deprecated(DEPRECATION_MESSAGE)
def single(cls, image, **kwargs):
""" Plot single image/heatmap using plotly.
Parameters
----------
image : np.ndarray
2d-array for plotting.
kwargs : dict
max_size : int
maximum size of a rendered image.
title : str
title of rendered image.
zmin : float
the lowest brightness-level to be rendered.
zmax : float
the highest brightness-level to be rendered.
opacity : float
transparency-level of the rendered image
xaxis : dict
controls the properties of xaxis-labels; uses plotly-format.
yaxis : dict
controls the properties of yaxis-labels; uses plotly-format.
slice : tuple
sequence of slice-objects for slicing the image to a lesser one.
order_axes : tuple
tuple of ints; defines the order of axes for transposition operation
applied to the image.
other
"""
kwargs = cls.convert_kwargs('single', kwargs)
# update defaults to make total dict of kwargs
defaults = {'reversescale': True,
'colorscale': 'viridis',
'opacity' : 1.0,
'max_size' : 600,
'order_axes': (1, 0),
'slice': (slice(None, None), slice(None, None))}
ax_params = {**defaults, **kwargs}
# form different groups of kwargs
render_kwargs = filter_parameters(ax_params, ['reversescale', 'colorscale', 'opacity', 'showscale'])
label_kwargs = filter_parameters(ax_params, ['xaxis', 'yaxis', 'coloraxis_colorbar', 'title'])
slc = ax_params['slice']
# calculate canvas sizes
width, height = image.shape[1], image.shape[0]
coeff = ax_params['max_size'] / max(width, height)
width = coeff * width
height = coeff * height
# plot the image and set titles
plot_data = go.Heatmap(z=np.transpose(image, axes=ax_params['order_axes'])[slc], **render_kwargs)
fig = go.Figure(data=plot_data)
fig.update_layout(width=width, height=height, **label_kwargs)
cls.save_and_show(fig, **ax_params)
@classmethod
@deprecated(DEPRECATION_MESSAGE)
def overlap(cls, images, **kwargs):
""" Plot several images on one canvas using plotly: render the first one in greyscale
and the rest ones in opaque 'rgb' channels, one channel for each image.
Supports up to four images in total.
Parameters
----------
images : list/tuple
sequence of 2d-arrays for plotting. Can store up to four images.
kwargs : dict
max_size : int
maximum size of a rendered image.
title : str
title of rendered image.
opacity : float
opacity of 'rgb' channels.
xaxis : dict
controls the properties of xaxis-labels; uses plotly-format.
yaxis : dict
controls the properties of yaxis-labels; uses plotly-format.
slice : tuple
sequence of slice-objects for slicing the image to a lesser one.
order_axes : tuple
tuple of ints; defines the order of axes for transposition operation
applied to the image.
other
"""
kwargs = cls.convert_kwargs('overlap', kwargs)
# update defaults to make total dict of kwargs
defaults = {'coloraxis_colorbar': {'title': 'amplitude'},
'colors': ('red', 'green', 'blue'),
'opacity' : 1.0,
'title': 'Seismic inline',
'max_size' : 600,
'order_axes': (1, 0),
'slice': (slice(None, None), slice(None, None))}
ax_params = {**defaults, **kwargs}
# form different groups of kwargs
render_kwargs = filter_parameters(ax_params, ['zmin', 'zmax'])
label_kwargs = filter_parameters(ax_params, ['xaxis', 'yaxis', 'coloraxis_colorbar', 'title'])
slc = ax_params['slice']
# calculate canvas sizes
width, height = images[0].shape[1], images[0].shape[0]
coeff = ax_params['max_size'] / max(width, height)
width = coeff * width
height = coeff * height
# manually combine first image in greyscale and the rest ones colored differently
combined = cls.channelize_image(255 * np.transpose(images[0], axes=ax_params['order_axes']),
total_channels=4, greyscale=True)
for i, img in enumerate(images[1:]):
color = ax_params['colors'][i]
combined += cls.channelize_image(255 * np.transpose(img, axes=ax_params['order_axes']),
total_channels=4, color=color, opacity=ax_params['opacity'])
plot_data = go.Image(z=combined[slc], **render_kwargs) # plot manually combined image
# plot the figure
fig = go.Figure(data=plot_data)
fig.update_layout(width=width, height=height, **label_kwargs)
cls.save_and_show(fig, **ax_params)
@classmethod
@deprecated(DEPRECATION_MESSAGE)
def rgb(cls, image, **kwargs):
""" Plot one image in 'rgb' using plotly.
Parameters
----------
image : np.ndarray
3d-array containing channeled rgb-image.
kwargs : dict
max_size : int
maximum size of a rendered image.
title : str
title of the rendered image.
xaxis : dict
controls the properties of xaxis-labels; uses plotly-format.
yaxis : dict
controls the properties of yaxis-labels; uses plotly-format.
slice : tuple
sequence of slice-objects for slicing the image to a lesser one.
order_axes : tuple
tuple of ints; defines the order of axes for transposition operation
applied to the image.
other
"""
kwargs = cls.convert_kwargs('rgb', kwargs)
# update defaults to make total dict of kwargs
defaults = {'coloraxis_colorbar': {'title': 'depth'},
'max_size' : 600,
'order_axes': (1, 0, 2),
'slice': (slice(None, None), slice(None, None))}
ax_params = {**defaults, **kwargs}
# form different groups of kwargs
render_kwargs = filter_parameters(ax_params, [])
label_kwargs = filter_parameters(ax_params, ['xaxis', 'yaxis', 'coloraxis_colorbar', 'title'])
slc = ax_params['slice']
# calculate canvas sizes
width, height = image.shape[1], image.shape[0]
coeff = ax_params['max_size'] / max(width, height)
width = coeff * width
height = coeff * height
# plot the image and set titles
plot_data = go.Image(z=np.transpose(image, axes=ax_params['order_axes'])[slc], **render_kwargs)
fig = go.Figure(data=plot_data)
fig.update_layout(width=width, height=height, **label_kwargs)
cls.save_and_show(fig, **ax_params)
@classmethod
@deprecated(DEPRECATION_MESSAGE)
def separate(cls, images, **kwargs):
""" Plot several images on a row of canvases using plotly.
TODO: add grid support.
Parameters
----------
images : list/tuple
sequence of 2d-arrays for plotting.
kwargs : dict
max_size : int
maximum size of a rendered image.
title : str
title of rendered image.
xaxis : dict
controls the properties of xaxis-labels; uses plotly-format.
yaxis : dict
controls the properties of yaxis-labels; uses plotly-format.
slice : tuple
sequence of slice-objects for slicing the image to a lesser one.
order_axes : tuple
tuple of ints; defines the order of axes for transposition operation
applied to the image.
other
"""
kwargs = cls.convert_kwargs('separate', kwargs)
# defaults
defaults = {'max_size' : 600,
'order_axes': (1, 0),
'slice': (slice(None, None), slice(None, None))}
grid = (1, len(images))
ax_params = {**defaults, **kwargs}
# form different groups of kwargs
render_kwargs = filter_parameters(ax_params, [])
label_kwargs = filter_parameters(ax_params, ['title'])
xaxis_kwargs = filter_parameters(ax_params, ['xaxis'])
yaxis_kwargs = filter_parameters(ax_params, ['yaxis'])
slc = ax_params['slice']
# make sure that the images are greyscale and put them each on separate canvas
fig = make_subplots(rows=grid[0], cols=grid[1])
for i in range(grid[1]):
img = cls.channelize_image(255 * np.transpose(images[i], axes=ax_params['order_axes']),
total_channels=4, greyscale=True, opacity=1)
fig.add_trace(go.Image(z=img[slc], **render_kwargs), row=1, col=i + 1)
fig.update_xaxes(row=1, col=i + 1, **xaxis_kwargs['xaxis'])
fig.update_yaxes(row=1, col=i + 1, **yaxis_kwargs['yaxis'])
fig.update_layout(**label_kwargs)
cls.save_and_show(fig, **ax_params)
def show_3d(x, y, z, simplices, title, zoom_slice, colors=None, show_axes=True, aspect_ratio=(1, 1, 1),
axis_labels=None, width=1200, height=1200, margin=(0, 0, 20), savepath=None,
images=None, resize_factor=2, colorscale='Greys', **kwargs):
""" Interactive 3D plot for some elements of cube.
Parameters
----------
x, y, z : numpy.ndarrays
Triangle vertices.
simplices : numpy.ndarray
(N, 3) array where each row represent triangle. Elements of row are indices of points
that are vertices of triangle.
title : str
Title of plot.
zoom_slice : tuple of slices
Crop from cube to show.
colors : list or None
List of colors for each simplex.
show_axes : bool
Whether to show axes and their labels.
aspect_ratio : tuple of floats.
Aspect ratio for each axis.
axis_labels : tuple
Titel for each axis.
width, height : number
Size of the image.
margin : tuple of ints
Added margin for each axis, by default, (0, 0, 20).
savepath : str
Path to save interactive html to.
images : list of tuples
Each tuple is triplet of image, location and axis to load slide from seismic cube.
resize_factor : float
Resize factor for seismic slides. Is needed to spedify loading and ploting of seismic slices.
colorscale : str
Colormap for seismic slides.
kwargs : dict
Other arguments of plot creation.
"""
#pylint: disable=too-many-arguments
# Arguments of graph creation
kwargs = {
'title': title,
'colormap': [plt.get_cmap('Depths')(x) for x in np.linspace(0, 1, 10)],
'edges_color': 'rgb(70, 40, 50)',
'show_colorbar': False,
'width': width,
'height': height,
'aspectratio': {'x': aspect_ratio[0], 'y': aspect_ratio[1], 'z': aspect_ratio[2]},
**kwargs
}
if colors is not None:
fig = ff.create_trisurf(x=x, y=y, z=z, color_func=colors, simplices=simplices, **kwargs)
else:
fig = ff.create_trisurf(x=x, y=y, z=z, simplices=simplices, **kwargs)
if images is not None:
for image, loc, axis in images:
shape = image.shape
image = cv2.resize(image, tuple(np.array(shape) // resize_factor))[::-1]
grid = np.meshgrid(
np.linspace(0, shape[0], image.shape[0]),
np.linspace(0, shape[1], image.shape[1])
)
if axis == 0:
x, y, z = loc * np.ones_like(image), grid[0].T + zoom_slice[1].start, grid[1].T + zoom_slice[2].start
elif axis == 1:
y, x, z = loc * np.ones_like(image), grid[0].T + zoom_slice[0].start, grid[1].T + zoom_slice[2].start
else:
z, x, y = loc * np.ones_like(image), grid[0].T + zoom_slice[0].start, grid[1].T + zoom_slice[1].start
fig.add_surface(x=x, y=y, z=z, surfacecolor=np.flipud(image),
showscale=False, colorscale='Greys')
# Update scene with title, labels and axes
fig.update_layout(
{
'scene': {
'xaxis': {
'title': axis_labels[0] if show_axes else '',
'showticklabels': show_axes,
'range': [zoom_slice[0].stop + margin[0], zoom_slice[0].start - margin[0]]
},
'yaxis': {
'title': axis_labels[1] if show_axes else '',
'showticklabels': show_axes,
'range': [zoom_slice[1].start + margin[1], zoom_slice[1].stop - margin[1]]
},
'zaxis': {
'title': axis_labels[2] if show_axes else '',
'showticklabels': show_axes,
'range': [zoom_slice[2].stop + margin[2], zoom_slice[2].start - margin[2]]
},
'camera_eye': {
"x": 1.25, "y": 1.5, "z": 1.5
},
}
}
)
fig.show()
if savepath:
fig.write_html(savepath)
|
# -*- coding: utf-8 -*-
host_reduction_timer = 0
host_quantize_timer = 0
host_unquantize_timer = 0
host_average_timer = 0
host_c_timer = 0
alloc_dealloc_timer = 0
def reset_timers():
global host_reduction_timer, host_quantize_timer, host_unquantize_timer, host_average_timer, host_c_timer, alloc_dealloc_timer, host_reduction_timer
host_quantize_timer = 0
host_unquantize_timer = 0
host_average_timer = 0
host_c_timer = 0
alloc_dealloc_timer = 0
|
# alexnet.py COPYRIGHT Fujitsu Limited 2021
#!/usr/bin/env python
# coding: utf-8
##### Reference #####
# https://github.com/sh-tatsuno/pytorch/blob/master/tutorials/Pytorch_Tutorials.ipynb
# https://github.com/sh-tatsuno/pytorch/blob/master/tutorials/Learning_PyTorch_with_Examples.ipynb
# https://pytorch.org/tutorials/beginner/examples_autograd/two_layer_net_custom_function.html
# how to chekc intermediate gradient
# https://tutorialmore.com/questions-1905405.htm
# https://discuss.pytorch.org/t/why-cant-i-see-grad-of-an-intermediate-variable/94/5
# AlexNet for cifar10
# http://cedro3.com/ai/pytorch-alexnet/
# ====================================
# how to run DNN training with pytorch
# 1. import library
# 2. load dataset
# 3. define network model
# - network structure
# - loss function
# - optimizer
# 4. run training
# 5. run test
# ====================================
# import library
import torch
import torch.nn as nn
import torch.nn.functional as F
# ====================================
## To change "channels for conv layer" & "nodes for fc layer" by pruning, custum model is defined.
# for CIFAR-10
class AlexNet(nn.Module):
def __init__(
self,
num_classes=10,
out_ch_conv1=64,
out_ch_conv2=256,
out_ch_conv3=384,
out_ch_conv4=256,
out_ch_conv5=256,
out_ch_fc1=4096,
out_ch_fc2=4096
):
super(AlexNet, self).__init__()
self.conv1 = nn.Conv2d(3, out_ch_conv1, kernel_size=3, stride=1, padding=1)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(out_ch_conv1, out_ch_conv2, kernel_size=5, padding=2)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Conv2d(out_ch_conv2, out_ch_conv3, kernel_size=3, padding=1)
self.conv4 = nn.Conv2d(out_ch_conv3, out_ch_conv4, kernel_size=3, padding=1)
self.conv5 = nn.Conv2d(out_ch_conv4, out_ch_conv5, kernel_size=3, padding=1)
self.pool5 = nn.MaxPool2d(kernel_size=2, stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((4, 4))
self.drop1 = nn.Dropout()
self.fc1 = nn.Linear(out_ch_conv5 * 4 * 4, out_ch_fc1)
self.drop2 = nn.Dropout()
self.fc2 = nn.Linear(out_ch_fc1, out_ch_fc2)
self.fc3 = nn.Linear(out_ch_fc2, num_classes)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = F.relu(x)
x = self.pool2(x)
x = self.conv3(x)
x = F.relu(x)
x = self.conv4(x)
x = F.relu(x)
x = self.conv5(x)
x = F.relu(x)
x = self.pool5(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.drop1(x)
x = self.fc1(x)
x = F.relu(x)
x = self.drop2(x)
x = self.fc2(x)
x = F.relu(x)
x = self.fc3(x)
return x
|
from subprocess import call
from datetime import datetime
import os
import pandas as pd
from sty import fg, rs
import time
import csv
import json
import re
import sys
import requests
import shutil
start_time = time.time()
headers_Get = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'DNT': '1',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}
html_tags = {
'knowledge_panel': 'kp-blk knowledge-panel',
'claimed': "Own this business?",
'name': "kno-ecr-pt PZPZlf gsmt",
'summary': "kc:/local:one line summary",
'stars': "kc:/collection/knowledge_panels/local_reviewable:star_score",
'comments': "t-h6pVaOIWfNg",
'web_review': "kc:/location/location:third_party_aggregator_ratings",
'phone': 'LrzXr zdqRlf kno-fv',
# 'days': "kc:/location/location:hours",
'address': "kc:/location/location:address",
'website': "IzNS7c duf-h",
'gmap': "rhsl4 rhsmap3col",
'visiting': "kc:/local:plan your visit"
}
html_regexes = {
'name': '<span>(.*)</span>',
'summary': '<span class="YhemCb">(.*?)</span>',
'stars': 'aria-label="(.*?)"',
'comments': '<span>(.*)</span>',
# 'web_review': 'aria-label="(.*?)"',
# 'web_review': 'href="(.*?)"',
'web_review': '(.*)',
'phone': '<span>(.*?)</span>',
'hours': '<td>(.*)</td>',
'address': '<span class="LrzXr">(.*)</span>',
'website': 'href="(.*?)"',
'gmap': 'data-url="(.*?)"',
'visiting': '<b>(.*)</b>'
}
# days = ["Sunday", "Monday", "Tuesday",
# "Wednesday", "Thursday", "Friday", "Saturday"]
csv_data = 'results.csv'
csv_data_true = 'results_true.csv'
csv_data_false = 'results_false.csv'
good_res = 0
bad_res = 0
EDITOR = os.environ.get('EDITOR') if os.environ.get('EDITOR') else 'vim'
def current_time():
return datetime.now().strftime('%Y-%m-%d-%H-%M')
def google(q):
s = requests.Session()
q = '+'.join(q.casefold().replace(
'&', ' and ').replace("'", ' ').replace('!', '').replace('é', 'e').split())
url = 'https://www.google.com/search?q=' + q + '&ie=utf-8&oe=utf-8'
r = s.get(url, headers=headers_Get)
return r.text
def get_string_after_tag(string, tag, regex, distance):
if(tag not in string):
return None
index = string.find(tag)
substr = string[index: index+distance]
if re.search(regex, substr):
return re.search(regex, substr).group(1)
else:
return None
def get_details(query):
html_results = google(query)
results = {'query': query}
has_knowledge_panel = html_tags['knowledge_panel'] in html_results
# print(html_results)
if(has_knowledge_panel):
results['query'] = query.replace(
'&', ' and ').replace("'", ' ').replace('!', '')
results['exists'] = True
results['name'] = get_string_after_tag(
html_results, html_tags['name'], html_regexes['name'], 500)
results['claimed'] = html_tags['claimed'] not in html_results
summary = get_string_after_tag(
html_results, html_tags['summary'], html_regexes['summary'], 600)
if(summary):
results['summary'] = summary
stars = get_string_after_tag(
html_results, html_tags['stars'], html_regexes['stars'], 500)
if(stars):
results['stars'] = stars.split(":")[1].split(" sur")[0]
comments = get_string_after_tag(
html_results, html_tags['comments'], html_regexes['comments'], 500)
if(comments):
results['comments'] = comments.split("\xa0avis")[0]
web_review = get_string_after_tag(
html_results, html_tags['web_review'], html_regexes['web_review'], 2500)
if(web_review):
web_review_all = re.findall(
'(?:href=[\'"])([:/.A-z?<_&\s=>0-9;-]+)', web_review)
web_review_1 = web_review_all[0]
results['web_review_1'] = web_review_1
if len(web_review_all) > 1:
web_review_2 = web_review_all[1]
results['web_review_2'] = web_review_2
phone_number = get_string_after_tag(
html_results, html_tags['phone'], html_regexes['phone'], 200)
if(phone_number):
results['phone_number'] = phone_number
address = get_string_after_tag(
html_results, html_tags['address'], html_regexes['address'], 1000)
if(address):
results['address'] = address
website = get_string_after_tag(
html_results, html_tags['website'], html_regexes['website'], 200)
if(website):
results['website'] = website.split("/?")[0]
gmap = get_string_after_tag(
html_results, html_tags['gmap'], html_regexes['gmap'], 1000)
if(gmap):
# results['gmap'] = gmap
gmap_lat = re.findall("\/@(-?[\d\.]*)", gmap)
gmap_lng = re.findall("\/@[-?\d\.]*\,([-?\d\.]*)", gmap)
results['gmap_lat'] = gmap_lat[0]
results['gmap_lng'] = gmap_lng[0]
visiting = get_string_after_tag(
html_results, html_tags['visiting'], html_regexes['visiting'], 500)
if(visiting):
results['visiting'] = visiting
# if html_tags['days'] in html_results:
# hours_index = html_results.find(html_tags['days'])
# hours_substr = html_results[hours_index: hours_index+2000]
# for day in days:
# results['{}_hours'.format(day)] = get_string_after_tag(
# hours_substr, day, html_regexes['hours'], 50)
else:
results['exists'] = False
return results
if __name__ == "__main__":
with open(sys.argv[1], newline='') as csvfile:
with open(csv_data, 'w', newline='') as results:
reader = csv.reader(csvfile)
fieldnames = [
'query',
'exists',
'name',
'summary',
'phone_number',
'address',
'website',
'web_review_1',
'web_review_2',
'claimed',
'stars',
'comments',
'visiting',
'gmap_lat',
'gmap_lng',
# "Friday_hours", "Saturday_hours", "Sunday_hours", "Monday_hours", "Tuesday_hours", "Wednesday_hours", "Thursday_hours"
]
writer = csv.DictWriter(results, fieldnames=fieldnames)
writer.writeheader()
for row in reader:
fetch = get_details(u" ".join(row))
if(fetch['exists'] == True):
writer.writerow(fetch)
print(fg.green, reader.line_num,
row[0], fetch['exists'], fg.rs)
else:
fetch = get_details(u" ".join(row))
writer.writerow(fetch)
print(fg.li_cyan, "AGAIN!", reader.line_num,
row[0], fetch['exists'], fg.rs)
if(fetch['exists'] == False):
print(fg.red, "... NOPE!!!", fg.rs)
# CLEAN FILES!
with open(csv_data, 'r') as inp, open(csv_data_false, 'w') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[1] != "True":
writer.writerow(row)
with open(csv_data, 'r') as inp, open(csv_data_true, 'w') as out:
writer = csv.writer(out)
for row in csv.reader(inp):
if row[1] != "False":
writer.writerow(row)
df = pd.read_csv(csv_data_false)
# df = df.drop(df.loc[:, 'exists':'gmap_lng'].columns, axis=1)
df = df.drop(df.iloc[:, 1:14].columns, axis=1)
df.to_csv(csv_data_false, header=False, index=False)
# GET THE COUNT!
print('')
print(" 🌈 🦄 💨")
print('')
with open(csv_data_true) as f:
total = sum(1 for line in f)
good_res = total-1
print(fg.li_green, "😎 total good data: ", total-1, fg.rs)
with open(csv_data_false) as f:
total = sum(1 for line in f)
bad_res = total
print(fg.li_red, "😭 total bad data: ", total, fg.rs)
print('')
# COPY FILES INTO TIMESTAMPS FOLDER IF NEEDED
if(good_res > 0):
os.mkdir(os.path.join('./', str(current_time())))
shutil.copy(csv_data, str(current_time()))
shutil.copy(csv_data_false, str(current_time()))
shutil.copy(csv_data_true, str(current_time()))
# REPORT
mybad = (bad_res * 100)/(good_res + bad_res)
elapsed_time = time.time() - start_time
print(fg.li_yellow, "🤖 BTW! Done in: ", time.strftime(
"%H:%M:%S", time.gmtime(elapsed_time)), " with ", "{0:.2f}".format(round(mybad, 2)), "% ", "errors", fg.rs)
try:
input_ = raw_input
except NameError:
input_ = input
def query_yes_no(question, default=False):
yes_list = ["yes", "y"]
no_list = ["no", "n"]
default_dict = {
None: "[y/n]",
True: "[Y/n]",
False: "[y/N]",
}
default_str = default_dict[default]
prompt_str = "%s %s " % (question, default_str)
while True:
choice = input_(prompt_str).lower()
if not choice and default is not None:
return default
if choice in yes_list:
return True
if choice in no_list:
return False
notification_str = "Please respond with 'y' or 'n'"
print(notification_str)
q1 = fg.li_yellow + " 🤖 Do you want to open " + \
csv_data_false + " inside " + EDITOR + " ?" + fg.rs
qq = fg.li_yellow + " 🤖 Bye..." + fg.rs
print('')
edit_false_data = query_yes_no(q1)
if edit_false_data == True:
call([EDITOR, csv_data_false])
elif edit_false_data == False:
print(qq)
quit
|
"""Test FOLIO Operators and functions."""
import pytest
import requests
from pytest_mock import MockerFixture
from ils_middleware.tasks.folio.login import FolioLogin
@pytest.fixture
def mock_request(monkeypatch, mocker: MockerFixture):
def mock_post(*args, **kwargs):
post_response = mocker.stub(name="post_result")
post_response.status_code = 201
post_response.headers = {"x-okapi-token": "some_jwt_token"}
return post_response
def mock_raise_for_status(*args, **kwargs):
error_response = mocker.stub(name="post_error")
error_response.status_code = 500
error_response.text = "Internal server error"
monkeypatch.setattr(requests, "post", mock_post)
monkeypatch.setattr(requests.Response, "raise_for_status", mock_raise_for_status)
# <Response [201]>
def test_valid_login(mock_request):
assert (
FolioLogin(
url="https://okapi-folio.dev.sul.stanford.edu/authn/login",
username="DEVSYS",
password="APASSWord",
tenant="sul",
)
== "some_jwt_token"
)
def test_missing_url():
with pytest.raises(KeyError, match="url"):
FolioLogin()
def test_missing_username():
with pytest.raises(KeyError, match="username"):
FolioLogin(url="https://test-login.com")
def test_missing_password():
with pytest.raises(KeyError, match="password"):
FolioLogin(url="https://test-login.com", username="DEVSYS")
def test_missing_tenant():
with pytest.raises(KeyError, match="tenant"):
FolioLogin(url="https://test-login.com", username="DEVSYS", password="PASS")
|
import numpy as np
import pandas as pd
import threading
from sklearn.externals import joblib
from sklearn.ensemble import RandomForestRegressor
from pprint import pprint
df = pd.read_csv('https://drive.google.com/uc?export=download&id=1XoV8SfvHmzaxRuDRe81OWSQu10dYTbO5',sep=',')
print("Number of data points: %d \n" % df.shape[0])
print(df.columns.values);
# Drop string field and id field
# print("The", df.shape[1], "features (and their data types) are: \n ", df.dtypes, "\n")
# Partition the features from the class to predict
df_X = df.iloc[:, 2:12].copy()
df_X = pd.get_dummies(df_X)
print(df_X.head())
df_y = df[df.columns[16]].copy()
print(df_y.head())
from sklearn.model_selection import train_test_split
# (random_state): we use a fixed random seed so we get the same results every time.
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2, random_state=0)
print ("\nNumber of training instances: ", len(X_train), "\nNumber of test instances: ", len(X_test))
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
#Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 1300, stop = 1700, num = 50)]
#Number of features to consider at every split
max_features = ['auto', 'sqrt']
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(30, 80, num = 10)]
max_depth.append(None)
# Method of selecting samples for training each tree
bootstrap = [True, False]
min_samples_split = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
# Create the random grid
random_grid = {
'min_samples_split': min_samples_split,
'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'bootstrap': bootstrap
}
# print("\nDataset description: \n", X_train.describe())
print("Creating model")
model = RandomForestRegressor(n_jobs = -1)
#pprint(model.get_params())
model = RandomizedSearchCV(estimator = model, param_distributions = random_grid, cv = 4, verbose=2, n_jobs = -1, n_iter = 150)
print("Fitting model")
model.fit(X_train, y_train)
pprint(model.best_params_)
model = model.best_estimator_
print("Saving model")
joblib.dump(model, "../pklfiles/RandomForestRegressorRandomizedSearchCV2Repayment.pkl")
#print("Loading model")
#model = joblib.load("RandomForestRegressor(1000)forcolnofortuning.pkl")
print("Predicting model")
predictions = model.predict(X_test)
print("Scoring model (R^2)")
print(model.score(X_test, y_test))
errors = abs(predictions - y_test)
print('Mean Absolute Error:', round(np.mean(errors), 2), 'degrees.')
mape = 100 * (errors / y_test)
# Calculate and display accuracy
accuracy = 100 - np.mean(mape)
print('Accuracy:', round(accuracy, 2), '%.')
#print(str(X_train[0:1]))
#print(str(y_train[0:1]))
#print(model.predict(X_train[0:1]))
#print(str(X_test[0:1]))
#print(str(y_test[0:1]))
#print(model.predict(X_test[0:1]))
|
from django.contrib import admin
from .models import SearchQuery
# Register your models here.
admin.site.register(SearchQuery)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__title__ = ''
__author__ = 'xuzhao'
__email__ = 'xuzhao@zhique.design'
from rest_framework.routers import DefaultRouter
from .viewsets import CarouselViewSet, SocialAccountViewSet
router = DefaultRouter(trailing_slash=False)
router.register(r'carousels', CarouselViewSet)
router.register(r'social-accounts', SocialAccountViewSet)
app_name = 'customize'
urlpatterns = [
] + router.urls
|
#!/usr/bin/env python3
# @author: marcelf
'''
Utility class to connect to ldap and generate secure passwords
'''
ciphers256 = "TWOFISH CAMELLIA256 AES256"
ciphers192 = "CAMELLIA192 AES192"
ciphers128 = "CAMELLIA128 AES"
ciphersBad = "BLOWFISH IDEA CAST5 3DES"
digests = "SHA512 SHA384 SHA256 SHA224 RIPEMD160 SHA1"
compress = "ZLIB BZIP2 ZIP Uncompressed"
gpgconf = """# gpg.conf settings for key generation:
expert
allow-freeform-uid
allow-secret-key-import
trust-model tofu+pgp
tofu-default-policy unknown
enable-large-rsa
enable-dsa2
cert-digest-algo SHA512
default-preference-list {0} {1} {2} {3} {4} {5}
personal-cipher-preferences {0} {1} {2} {3}
personal-digest-preferences {4}
personal-compress-preferences {5}
""".format(ciphers256, ciphers192, ciphers128, ciphersBad, digests, compress)
agentconf = """# gpg-agent.conf settings for key generation:
default-cache-ttl 300
"""
import os
import sys
import string
from random import SystemRandom
from subprocess import check_output, CalledProcessError
# ================================================================
# public: flatten
# ================================================================
def flatten(list_of_lists):
'''
Makes a list of lists flatten
@param l list
@return l flattened list
[[1,2,3][4,5,6]]
gets
[1,2,3,4,5,6]
'''
return [item for sublist in list_of_lists for item in sublist]
# ================================================================
# public: password_generator
# ================================================================
def password_generator(size=20, chars=string.ascii_letters + string.digits):
'''
generates random password with digits lower- and uppercase ascii
@param size length of password
@param chars chars to be select by random
@return password contains the generated password
'''
secrets = SystemRandom()
# Use secrets instead of random, cause random is very predictable
return ''.join(secrets.choice(chars) for _ in range(size))
def create_gnupghome(path):
'''
creates a gnupg home with the configurations above
the home is only created if the path does not exist
@param path specifies the path of the GPG_HOME directory
'''
if not os.path.exists(path) is True:
print("Creating GPG_HOME under {0}.".format(path))
os.mkdir(path)
os.chmod(path, 0o700)
with open("{0}/{1}".format(path, "gpg.conf"), "w") as conf:
conf.write(gpgconf)
os.chmod("{0}/{1}".format(path, "gpg.conf"), 0o600)
with open("{0}/{1}".format(path, "gpg-agent.conf"), "w") as conf:
conf.write(agentconf)
os.chmod("{0}/{1}".format(path, "gpg-agent.conf"), 0o600) |
def test_distinct(man):
errors = []
G = man.setGraph("swapi")
count = 0
for i in G.query().V().distinct():
count += 1
if count != 39:
errors.append("Distinct %s != %s" % (count, 39))
count = 0
for i in G.query().V().distinct("_gid"):
count += 1
if count != 39:
errors.append("Distinct %s != %s" % (count, 39))
count = 0
for i in G.query().V().distinct("eye_color"):
count += 1
if count != 8:
errors.append("Distinct %s != %s" % (count, 8))
count = 0
for i in G.query().V().distinct("gender"):
count += 1
if count != 4:
errors.append("Distinct %s != %s" % (count, 4))
count = 0
for i in G.query().V().distinct("non-existent-field"):
count += 1
if count != 0:
errors.append("Distinct %s != %s" % (count, 0))
count = 0
for i in G.query().V().hasLabel("Character").as_("person").out().distinct("$person.name"):
count += 1
if count != 18:
errors.append("Distinct G.query().V().hasLabel(\"Person\").as_(\"person\").out().distinct(\"$person.name\") %s != %s" % (count, 18))
count = 0
for i in G.query().V().hasLabel("Character").as_("person").out().distinct("$person.eye_color"):
count += 1
if count != 8:
errors.append("Distinct G.query().V().hasLabel(\"Person\").as_(\"person\").out().distinct(\"$person.eye_color\") %s != %s" % (count, 8))
return errors
def test_distinct_multi(man):
errors = []
G = man.setGraph("swapi")
count = 0
o = {}
for i in G.query().V().as_("a").out().distinct(["$a.eye_color", "_gid"]).render(["$a.eye_color", "_gid"]):
if i[0] in o and o[i[0]] != i[1]:
errors.append("Non-unique pair returned: %s" % (i))
count += 1
if count != 29:
errors.append("Distinct multi %s != %s" % (count, 29))
return errors
|
#!/usr/bin/env python3
import sys
import os
from bioblend import galaxy
gi = galaxy.GalaxyInstance(url='https://usegalaxy.eu')
wfs = gi.workflows.get_workflows()
owners = ['wolfgang-maier', 'bgruening']
output_dir = 'workflows'
if not os.path.isdir(output_dir):
os.mkdir( output_dir )
for wf in wfs:
# if 'covid' in ",".join(wf['tags']).lower():
if 'covid' in wf['name'].lower():
if wf['deleted'] or not wf['published']:
continue
if wf['owner'] not in owners:
continue
# print( f"{wf['name']} {wf['tags']} {wf['owner']} {wf['update_time']}"
print( f"{wf['name']}" )
gi.workflows.export_workflow_to_local_path(wf['id'], output_dir)
|
## @file
# Hardware feature class definition.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
from ucollections import OrderedDict as SETTINGS
from register import REG
from cpuid import CPUID
from msr import MSR
PROCESSOR_FEATURES = {
"VMX" : {
"Description" : "Virtual-Machine Extentions",
"{Support}" : SETTINGS((
("CPUID[01].ECX.VMX", 1),
)),
"{Enable}" : SETTINGS((
("REG.CR4.VMXE", 1),
)),
"{Disable}" : SETTINGS((
("REG.CR4.VMXE", 0),
)),
"(Capabilities)" : [],
"[RelatedSettings]" : ["MSR[0x3A].Lock_bit",
"MSR[0x3A].Enable_VMX_inside_SMX_operation",
"MSR[0x3A].Enable_VMX_outside_SMX_operation"],
},
"SMX" : {
"Description" : "Safer Mode Extentions",
"{Support}" : SETTINGS((
("CPUID[01].ECX.SMX", 1),
)),
"{Enable}" : SETTINGS((
("REG.CR4.SMXE", 1),
)),
"{Disable}" : SETTINGS((
("REG.CR4.SMXE", 0),
)),
"(Capabilities)" : [],
"[RelatedSettings]" : ["MSR[0x3A].Lock_bit",
"MSR[0x3A].Enable_VMX_inside_SMX_operation",
"MSR[0x3A].Enable_VMX_outside_SMX_operation",
"MSR[0x3A].SENTER_Local_Function_Enables",
"MSR[0x3A].SENTER_Global_Enable"],
},
"SGX" : {
"Description" : "Software Guard Extentions",
"{Support}" : SETTINGS((
("CPUID[07].EBX.SGX", 1),
)),
"{Enable}" : SETTINGS((
("REG.CR4.SMXE", 1),
)),
"{Disable}" : SETTINGS((
("REG.CR4.SMXE", 0),
)),
"(Capabilities)" : ["CPUID[0x12,0].EAX",
"CPUID[0x12,0].EBX",
"CPUID[0x12,0].EDX",
"CPUID[0x12,1].EAX",
"CPUID[0x12,1].EBX",
"CPUID[0x12,1].ECX",
"CPUID[0x12,1].EDX",
"CPUID[0x12,2].EAX",
"CPUID[0x12,2].EBX",
"CPUID[0x12,2].ECX",
"CPUID[0x12,2].EDX",
],
"[RelatedSettings]" : ["MSR[0x3A].Lock_bit",
"MSR[0x3A].SGX_Launch_Control_Enable",
"MSR[0x3A].SGX_Global_Enable"],
},
"APIC" : {
"Description" : "Local APIC",
"{Support}" : SETTINGS((
("CPUID[01].EDX.APIC", 1),
)),
"{Enable}" : SETTINGS((
("MSR[0x1B].APIC_Global_Enable", 1),
)),
"{Disable}" : SETTINGS((
("MSR[0x1B].APIC_Global_Enable", 0),
("MSR[0x80F].APIC_Software_Enable", 0),
)),
"(Capabilities)" : [],
"[RelatedSettings]" : ["MSR[0x3A].Lock_bit",
"MSR[0x3A].SGX_Launch_Control_Enable",
"MSR[0x3A].SGX_Global_Enable"],
},
"X2APIC" : {
"Description" : "Extended XAPIC",
"{Support}" : SETTINGS((
("CPUID[01].ECX.x2APIC", 1),
)),
"{Enable}" : SETTINGS((
("MSR[0x1B].APIC_Global_Enable", 1),
("MSR[0x1B].Enable_x2APIC_mode", 1),
)),
"{Disable}" : SETTINGS((
("MSR[0x1B].Enable_x2APIC_mode", 0),
)),
"(Capabilities)" : [],
"[RelatedSettings]" : ["MSR[0x802]",
"MSR[0x803]",
"MSR[0x808]",
"MSR[0x80A]",
"MSR[0x80B]",
"MSR[0x80D]",
"MSR[0x80F]",
"MSR[0x810]",
"MSR[0x811]",
"MSR[0x812]",
"MSR[0x813]",
"MSR[0x814]",
"MSR[0x815]",
"MSR[0x816]",
"MSR[0x817]",
"MSR[0x818]",
"MSR[0x819]",
"MSR[0x81A]",
"MSR[0x81B]",
"MSR[0x81C]",
"MSR[0x81D]",
"MSR[0x81E]",
"MSR[0x81F]",
"MSR[0x820]",
"MSR[0x821]",
"MSR[0x822]",
"MSR[0x823]",
"MSR[0x824]",
"MSR[0x825]",
"MSR[0x826]",
"MSR[0x827]",
"MSR[0x828]",
"MSR[0x82F]",
"MSR[0x830]",
"MSR[0x832]",
"MSR[0x833]",
"MSR[0x834]",
"MSR[0x835]",
"MSR[0x836]",
"MSR[0x837]",
"MSR[0x838]",
"MSR[0x839]",
"MSR[0x83E]",
"MSR[0x83F]",
],
},
}
class FeatureClass(object):
DESC = {}
def __new__(Class, FeatureDesc):
for Obj in Class.DESC:
if Class.DESC[Obj] == FeatureDesc:
return Obj
return super(Class, FeatureClass).__new__(Class)
def __init__(self, FeatureDesc):
FeatureClass.DESC[self] = FeatureDesc
def __getattr__(self, Name):
Desc = FeatureClass.DESC[self]
if Name in Desc:
return Desc[Name]
ActName = "{%s}" % Name
if ActName in Desc:
for Cond in Desc[ActName]:
if eval("%s != %s" % (Cond, Desc[ActName][Cond])):
return False
return True
ActName = "[%s]" % Name
if ActName in Desc:
Result = {}
for Cond in Desc[ActName]:
try:
Result[Cond] = eval(Cond)
except:
Result[Cond] = None
return Result
return None
def __setattr__(self, Name, Settings):
Desc = FeatureClass.DESC[self]
ActName = "{%s}" % Name
if ActName in Desc:
for Reg in Desc[ActName]:
if Reg in Settings:
Data = Settings[Reg]
else:
Data = Desc[ActName][Reg]
eval("%s = %s" % (Reg, Data))
class FeatureHelperClass(object):
def __getattr__(self, Name):
if Name in PROCESSOR_FEATURES:
return FeatureClass(PROCESSOR_FEATURES[Name])
return None
FEATURE = FeatureHelperClass()
if __name__ == "__main__":
import sys
feature = getattr(FEATURE, sys.argv[0])
print (feature.Description)
for item in ["Support", "Enable", "RelatedSettings"]:
result = getattr(feature, item)
if isinstance(result, dict) or isinstance(result, SETTINGS):
print(" %s:" % item)
for n in result:
print(" %s:" % n, result[n])
else:
print(" %s:" % item, result)
|
from selenium.webdriver.common.by import By
class HomeLocators(object):
FIRST_ENTRY_TITLE = (By.CSS_SELECTOR, ".entries > li > h2 ")
LOGIN = (By.CSS_SELECTOR, '.metanav > a')
class LoginLocators(object):
TITLE = (By.CSS_SELECTOR, "h2")
SUBMIT = (By.CSS_SELECTOR, "#login")
ERROR = (By.CSS_SELECTOR, ".error")
|
from requirement import db, bcrypt, login_manager
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
class User(db.Model, UserMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(length=30), nullable=False, unique=False)
username = db.Column(db.String(length=30), nullable=False, unique=True)
password_hash = db.Column(db.String(length=60), nullable=False, unique=True)
email = db.Column(db.String(length=50), nullable=False, unique=True)
projects = db.relationship('Project', backref='owned_user', lazy=True)
@property
def password(self):
return self.password
@password.setter
def password(self, plain_text_password):
self.password_hash = bcrypt.generate_password_hash(plain_text_password).decode('utf-8')
def check_password_correction(self, attempted_password):
return bcrypt.check_password_hash(self.password_hash, attempted_password)
class Project(db.Model):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(length=100), nullable=False)
description = db.Column(db.String(length=1024))
owner = db.Column(db.Integer, db.ForeignKey('user.id'))
project = db.relationship('Requirement', backref='requirement_owner', cascade="all,delete", lazy=True)
class Requirement(db.Model):
id = db.Column(db.Integer(), primary_key=True)
title = db.Column(db.String(length=400), nullable=False)
level = db.Column(db.Integer())
priority = db.Column(db.Integer())
req_type = db.Column(db.Integer())
changes = db.Column(db.Integer())
review = db.Column(db.Integer())
evaluation = db.Column(db.Integer())
evaluation_method = db.Column(db.Integer())
quality_factor = db.Column(db.Integer())
description = db.Column(db.String(length=1024))
project = db.Column(db.Integer(), db.ForeignKey('project.id'))
parent_id = db.Column(db.Integer, db.ForeignKey('requirement.id'))
parent = db.relationship('Requirement', remote_side='Requirement.id', back_populates='children', lazy=True)
children = db.relationship('Requirement', back_populates='parent', lazy=True)
|
from django.urls import path
from chatchannels import consumers
app_name = 'chatchannels'
websocket_urlpatterns = [
path('connect/<chat_channel_id>', consumers.ChatChannelConsumer, name="connect")
]
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import re
import shutil
import sys
import pathlib
from qiime2.core import transform
from .base import FormatBase, ValidationError, _check_validation_level
class PathMakerDescriptor:
def __init__(self, file):
self.file = file
def __get__(self, obj, cls=None):
if obj is None:
raise Exception()
return getattr(obj, self.file.name).path_maker
class File:
def __init__(self, pathspec, *, format=None):
if format is None:
raise TypeError("Must provide a format.")
self.pathspec = pathspec
self.format = format
def __get__(self, obj, cls=None):
if obj is None:
return self
return BoundFile(self.name, self.pathspec, self.format, obj)
class FileCollection(File):
def __init__(self, pathspec, *, format=None):
super().__init__(pathspec, format=format)
self._path_maker = None
def set_path_maker(self, function):
self._path_maker = function
return PathMakerDescriptor(self)
def __get__(self, obj, cls=None):
if obj is None:
return self
if self._path_maker is None:
raise NotImplementedError()
return BoundFileCollection(self.name, self.pathspec, self.format,
obj, path_maker=self._path_maker)
class BoundFile:
@property
def mode(self):
return self._directory_format._mode
def __init__(self, name, pathspec, format, directory_format):
self.name = name
self.pathspec = pathspec
self.format = format
self._directory_format = directory_format
self._path_maker = lambda s: pathspec
def view(self, view_type):
from_type = transform.ModelType.from_view_type(self.format)
to_type = transform.ModelType.from_view_type(view_type)
transformation = from_type.make_transformation(to_type)
return transformation(self.path_maker())
def write_data(self, view, view_type, **kwargs):
# TODO: make `view_type` optional like in `Artifact.import_data`
if self.mode != 'w':
raise TypeError("Cannot use `set`/`add` when mode=%r" % self.mode)
from_type = transform.ModelType.from_view_type(view_type)
to_type = transform.ModelType.from_view_type(self.format)
transformation = from_type.make_transformation(to_type)
result = transformation(view)
result.path._move_or_copy(self.path_maker(**kwargs))
def _validate_members(self, collected_paths, level):
found_members = False
root = pathlib.Path(self._directory_format.path)
for path in collected_paths:
if re.fullmatch(self.pathspec, str(path.relative_to(root))):
if collected_paths[path]:
# Not a ValidationError, this just shouldn't happen.
raise ValueError("%r was already validated by another"
" field, the pathspecs (regexes) must"
" overlap." % path)
collected_paths[path] = True
found_members = True
self.format(path, mode='r').validate(level)
if not found_members:
raise ValidationError(
"Missing one or more files for %s: %r"
% (self._directory_format.__class__.__name__, self.pathspec))
@property
def path_maker(self):
def bound_path_maker(**kwargs):
# Must wrap in a naive Path, otherwise an OutPath would be summoned
# into this world, and would destroy everything in its path.
path = (pathlib.Path(self._directory_format.path) /
self._path_maker(self._directory_format, **kwargs))
# NOTE: path makers are bound to the directory format, so must be
# provided as the first argument which will look like `self` to
# the plugin-dev.
path.parent.mkdir(parents=True, exist_ok=True)
return path
return bound_path_maker
class BoundFileCollection(BoundFile):
def __init__(self, name, pathspec, format, directory_format, path_maker):
super().__init__(name, pathspec, format, directory_format)
self._path_maker = path_maker
def view(self, view_type):
raise NotImplementedError("Use `iter_views` instead.")
def iter_views(self, view_type):
# Don't want an OutPath, just a Path
root = pathlib.Path(self._directory_format.path)
paths = [fp for fp in sorted(root.glob('**/*'))
if re.match(self.pathspec, str(fp.relative_to(root)))]
from_type = transform.ModelType.from_view_type(self.format)
to_type = transform.ModelType.from_view_type(view_type)
transformation = from_type.make_transformation(to_type)
for fp in paths:
# TODO: include capture?
yield fp.relative_to(root), transformation(fp)
class _DirectoryMeta(type):
def __init__(self, name, bases, dct):
super().__init__(name, bases, dct)
if hasattr(self, '_fields'):
fields = self._fields.copy()
else:
fields = []
for key, value in dct.items():
if isinstance(value, File):
# TODO: validate that the paths described by `value` are unique
# within a DirectoryFormat
value.name = key
fields.append(key)
self._fields = fields
class DirectoryFormat(FormatBase, metaclass=_DirectoryMeta):
def validate(self, level='max'):
_check_validation_level(level)
if not self.path.is_dir():
raise ValidationError("%s is not a directory." % self.path)
collected_paths = {p: None for p in self.path.glob('**/*')
if not p.name.startswith('.') and
p.is_file()}
for field in self._fields:
getattr(self, field)._validate_members(collected_paths, level)
for path, value in collected_paths.items():
if value:
continue
if value is None:
raise ValidationError("Unrecognized file (%s) for %s."
% (path, self.__class__.__name__))
if hasattr(self, '_validate_'):
try:
self._validate_(level)
except ValidationError as e:
raise ValidationError(
"%s is not a(n) %s:\n\n%s"
% (self.path, self.__class__.__name__, str(e))
) from e
def save(self, path, ext=None):
path = str(path) # in case of pathlib.Path
path = path.rstrip('.')
# ignore the extension when saving a directory
shutil.copytree(self.path, path)
return path
class SingleFileDirectoryFormatBase(DirectoryFormat):
pass
def SingleFileDirectoryFormat(name, pathspec, format):
# TODO: do the same hack namedtuple does so we don't mangle globals
# (arguably the code is going to be broken if defined dynamically anyways,
# but better to find that out later than writing in the module namespace
# even if it isn't called module-level [which is must be!])
df = type(name, (SingleFileDirectoryFormatBase,),
{'file': File(pathspec, format=format)})
df.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
return df
|
import sys
import os
import platform
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import (QCoreApplication, QPropertyAnimation, QDate, QDateTime, QMetaObject, QPoint, QRect, QSize,
QTime, QUrl, QEvent)
from PySide2.QtGui import (QBrush, QColor, QConicalGradient, QCursor, QFont, QFontDatabase, QIcon, QKeySequence,
QLinearGradient, QPainter, QPixmap, QRadialGradient)
from PySide2.QtWidgets import *
from PySide2.QtCore import QFileInfo
from PySide2.QtPrintSupport import QPrinter, QPrintPreviewDialog
from ui_interface import Ui_MainWindow
class HYSTON(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setWindowTitle("Mthandizi")
self.setIcon()
self.create_menu()
self.ui.start_btn.clicked.connect(lambda: self.ui.textEdit.setText("Hello am hyston kayange from blantyre "
"i know am not weathly but Lord i have you , "
"you make awesome life for me and my family "
"you "
"you receive all the glory Lord i need you "
"more everyday every hour you deseverve "
"everything oh God "
"be there for me as am doing this project "
"amen father God "))
self.ui.printbutton.clicked.connect(self.clear)
self.show()
def create_menu(self):
mainMenu = self.menuBar()
fileMenu = mainMenu.addMenu('File')
viewMenu = mainMenu.addMenu('View')
editMenu = mainMenu.addMenu('Edit')
fontMenu = mainMenu.addMenu('Font')
helpMenu = mainMenu.addMenu('Help')
openAction = QAction(QIcon('open.png'), "Open", self)
openAction.setShortcut('Ctrl+O')
saveAction = QAction(QIcon('save.png'), "Save", self)
saveAction.setShortcut('Ctrl+S')
exitAction = QAction(QIcon('exit.png'), "Exit", self)
exitAction.setShortcut('Ctrl+X')
previewAction = QAction(QIcon('printpreview.png'), "Print Preview", self)
exitAction.triggered.connect(self.exit_app)
previewAction.triggered.connect(self.print_preview_dialog)
fileMenu.addAction(openAction)
fileMenu.addAction(saveAction)
fileMenu.addAction(exitAction)
viewMenu.addAction(previewAction)
def setIcon(self):
appIcon = QIcon("icon.png")
self.setWindowIcon(appIcon)
def exit_app(self):
self.close()
def print_preview_dialog(self):
printer = QPrinter(QPrinter.HighResolution)
previewDialog = QPrintPreviewDialog(printer, self)
previewDialog.paintRequested.connect(self.print_preview)
previewDialog.exec_()
def print_preview(self, printer):
self.ui.textEdit.print_(printer)
def clear(self):
self.ui.textEdit.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = HYSTON()
sys.exit(app.exec_())
|
import sys
def A():
n = sys.stdin.readline().rstrip()
if "3" in n:
print("YES")
elif int(n) % 3 == 0:
print("YES")
else:
print("NO")
def B():
mod = 10007
t = [0, 0, 1]
for _ in range(1001001):
t.append(t[-1] + t[-2] + t[-3])
t[-1] %= mod
n = int(sys.stdin.readline().rstrip())
print(t[n - 1])
def C():
pass
def D():
pass
if __name__ == "__main__":
# A()
B()
C()
D()
|
"""
This module have all the web elements in the
home page
"""
class HomePageLocators:
""" Home Page Class """
"""Xpaths for Home Page """
select_location_text_box = "//input[@placeholder='Select Location']"
select_location_list = "//label[@class = 'localityName']"
select_date_and_time_button = "//button[contains(@id, 'laterButton')]"
select_time_button = "//input[text()='Select Time']"
calender_table = "ui-datepicker-calendar"
dates_list = "//tbody/tr/td/a"
hours_list = "//tbody/tr/td/button[contains(@id, 'h') and contains(@class, 'validhour')]"
minutes_list = "//tbody/tr/td/button[contains(@id, 'm') and contains(@class, 'validminute')]"
start_ordering_button = "//button[text() = 'Start Ordering']"
place_order_dialog_box_button = "//button[text()='Ok! Place Order']"
|
#since 1939 , T12 no longer manufactured since T8 propagated
import bpy
bpy.context.object.data.type = 'AREA'
lampdata = bpy.context.object.data
lampdata.size = 0.038
lampdata.size_y = 1.2192
lampdata.shadow_ray_samples_x = 1
lampdata.shadow_ray_samples_y = 2
lampdata.color = (0.901, 1.0, 0.979)
lampdata.energy = 2.14492#2300lm/21.446(=lux)*0.004*2.5(distance) *2 for distance is the point of half strength
lampdata.distance = 1.0 #dist values multiplied by 10 for area lights for same power as bulb/spot/...
#lampdata.falloff_type = 'INVERSE_SQUARE'
|
# @Author: chunyang.xu
# @Email: 398745129@qq.com
# @Date: 2020-06-08 14:02:33
# @Last Modified time: 2021-03-08 15:09:19
# @github: https://github.com/longfengpili
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from .redshift import RedshiftDB
from .sqlite import SqliteDB
from .mysql import MysqlDB
# from .snowflake import SnowflakeDB
__doc__ = "数据库接口"
__all__ = ['RedshiftDB', 'SqliteDB', 'MysqlDB']
|
#!/bin/bash
# --------------------------------------------------
# Filename: Calculate_BBs.py
# Revision: 1.0
# Data: 2018/9/13
# Author: Yue Cao
# Email: ycao009@cs.ucr.edu
# Description: Merge new flipping branches (Basic block form) with target value to the branch set, and print out all new ones.
# Purpose: Check if there are new flipping targets. If there are no new flipping targets, no need to run Dynamic Taint Analysis.
# --------------------------------------------------
#Version 1.0
#Distint variables are considered new
import sys
#file0 = "../Branches_BB_set.log"
#file2 = "../Branches_BB_set_bak.log"
file1 = sys.argv[1]
if len(sys.argv) == 6:
file0 = sys.argv[2]
file2 = sys.argv[3]
Model1_offset = int(sys.argv[4])
Model0_end = int(sys.argv[5])
f0 = file(file0, "r")
f = file(file1, "r")
f2 = file(file2, "w")
new = 0
Branches_set = set()
for line in f0: #Branches_BB_set
line = line.strip()
keys = line.split("#")
Branches_set.add(keys[0]) # currently only has one distinct BBOffset; which may miss cases if its min/max value is different
f2.write(line+'\n')
#keys = line.split(",")
#Edge_dict[(keys[0], keys[1])] = int(keys[2])
for line in f:
line = line.strip()
# compare the whole
# or compare the variable only
keys = line.split("#")
if keys[0] not in Branches_set:
#sys.stderr.write("New added: " + keys[0]+"\n")
f2.write(line+'\n')
# If branch is flipped on Model0 or Model1, we don't flip the branch from the other model
p0 = keys[0].find(",")
p1 = keys[0].find(":")
Model_index = keys[0][0:p0-1] #Model 0,b
BB_index = keys[0][p0+2:p1] #bb_index
Rest = keys[0][p1:]
Branches_set.add(keys[0])
#sys.stderr.write("Branches added: " + keys[0]+"\n")
index = int(BB_index)
if index <= Model0_end:
index = index+Model1_offset
new_BB_index = str(index)
new_str = Model_index+"1,b"+new_BB_index+Rest+"#"+keys[1]+"#"+keys[2]+"#"+keys[3]
f2.write(new_str+'\n')
Branches_set.add(Model_index+"1,b"+new_BB_index+Rest)
#sys.stderr.write("Branches added: " + Model_index+"1,b"+new_BB_index+Rest+"\n")
else:
index = index-Model1_offset
new_BB_index = str(index)
new_str = Model_index+"0,b"+new_BB_index+Rest+"#"+keys[1]+"#"+keys[2]+"#"+keys[3]
f2.write(new_str+'\n')
Branches_set.add(Model_index+"0,b"+new_BB_index+Rest)
#sys.stderr.write("Branches added: " + Model_index+"0,b"+new_BB_index+Rest+"\n")
new = new+1
# remove operands that have huge values
if len(keys[1]) > 10 or len(keys[2]) > 10:
continue
print line
|
# -*- coding: utf-8 -*-
import urllib.request
import json
import requests
import os
path = 'result\\'
#id = '2093492691'
id = '2089576957'
proxy_addr = "122.241.72.191:808"
pic_num = 0
weibo_name = "-樱群-"
def use_proxy(url, proxy_addr):
req = urllib.request.Request(url)
req.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0")
proxy = urllib.request.ProxyHandler({'http': proxy_addr})
opener = urllib.request.build_opener(proxy, urllib.request.HTTPHandler)
urllib.request.install_opener(opener)
data = urllib.request.urlopen(req).read().decode('utf-8', 'ignore')
return data
def get_containerid(url):
data = use_proxy(url, proxy_addr)
content = json.loads(data).get('data')
for data in content.get('tabsInfo').get('tabs'):
if (data.get('tab_type') == 'weibo'):
containerid = data.get('containerid')
return containerid
def get_userInfo(id):
url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id
data = use_proxy(url, proxy_addr)
content = json.loads(data).get('data')
profile_image_url = content.get('userInfo').get('profile_image_url')
description = content.get('userInfo').get('description')
profile_url = content.get('userInfo').get('profile_url')
verified = content.get('userInfo').get('verified')
guanzhu = content.get('userInfo').get('follow_count')
name = content.get('userInfo').get('screen_name')
fensi = content.get('userInfo').get('followers_count')
gender = content.get('userInfo').get('gender')
urank = content.get('userInfo').get('urank')
print("微博昵称:" + name + "\n" + "微博主页地址:" + profile_url + "\n" + "微博头像地址:" + profile_image_url + "\n" + "是否认证:" + str(
verified) + "\n" + "微博说明:" + description + "\n" + "关注人数:" + str(guanzhu) + "\n" + "粉丝数:" + str(
fensi) + "\n" + "性别:" + gender + "\n" + "微博等级:" + str(urank) + "\n")
def get_weibo(id, file):
global pic_num
i = 1
while True:
url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id
weibo_url = 'https://m.weibo.cn/api/container/getIndex?type=uid&value=' + id + '&containerid=' + get_containerid(
url) + '&page=' + str(i)
try:
data = use_proxy(weibo_url, proxy_addr)
content = json.loads(data).get('data')
cards = content.get('cards')
if (len(cards) > 0):
for j in range(len(cards)):
print("-----正在爬取第" + str(i) + "页,第" + str(j) + "条微博------")
card_type = cards[j].get('card_type')
if (card_type == 9):
mblog = cards[j].get('mblog')
attitudes_count = mblog.get('attitudes_count')
comments_count = mblog.get('comments_count')
created_at = mblog.get('created_at')
reposts_count = mblog.get('reposts_count')
scheme = cards[j].get('scheme')
text = mblog.get('text')
if mblog.get('pics') != None:
# print(mblog.get('original_pic'))
# print(mblog.get('pics'))
pic_archive = mblog.get('pics')
for _ in range(len(pic_archive)):
pic_num += 1
print(pic_archive[_]['large']['url'])
imgurl = pic_archive[_]['large']['url']
img = requests.get(imgurl)
f = open(path + weibo_name + '\\' + str(pic_num) + str(imgurl[-4:]),
'ab') # 存储图片,多媒体文件需要参数b(二进制文件)
f.write(img.content) # 多媒体存储content
f.close()
with open(file, 'a', encoding='utf-8') as fh:
fh.write("----第" + str(i) + "页,第" + str(j) + "条微博----" + "\n")
fh.write("微博地址:" + str(scheme) + "\n" + "发布时间:" + str(
created_at) + "\n" + "微博内容:" + text + "\n" + "点赞数:" + str(
attitudes_count) + "\n" + "评论数:" + str(comments_count) + "\n" + "转发数:" + str(
reposts_count) + "\n")
i += 1
else:
break
except Exception as e:
print(e)
pass
if __name__ == "__main__":
if os.path.isdir(path + weibo_name):
pass
else:
os.mkdir(path + weibo_name)
file = path + weibo_name + '\\' + weibo_name + ".txt"
get_userInfo(id)
get_weibo(id, file) |
# Copyright (C) 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
# pylint: disable=C0301,W0622,R0914
import argparse
import hashlib
import json
import os
import subprocess
import tempfile
import yaml
from mmcv.utils import Config
MMDETECTION_TOOLS = f'{os.path.dirname(__file__)}/../../../../external/mmdetection/tools'
FACE_DETECTION_TOOLS = os.path.dirname(__file__)
def parse_args():
""" Parses input args. """
args = argparse.ArgumentParser()
args.add_argument('config',
help='A path to model training configuration file (.py).')
args.add_argument('snapshot',
help='A path to pre-trained snapshot (.pth).')
args.add_argument('out',
help='A path to output file where models metrics will be saved (.yml).')
args.add_argument('--wider_dir',
help='Specify this path if you would like to test your model on WiderFace dataset.')
return args.parse_args()
def replace_text_in_file(path, replace_what, replace_by):
""" Replaces text in file. """
with open(path) as read_file:
content = '\n'.join([line.rstrip() for line in read_file.readlines()])
if content.find(replace_what) == -1:
return False
content = content.replace(replace_what, replace_by)
with open(path, 'w') as write_file:
write_file.write(content)
return True
def collect_ap(path):
""" Collects average precision values in log file. """
average_precisions = []
beginning = 'Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = '
with open(path) as read_file:
content = [line.strip() for line in read_file.readlines()]
for line in content:
if line.startswith(beginning):
average_precisions.append(float(line.replace(beginning, '')))
return average_precisions
def sha256sum(filename):
""" Computes sha256sum. """
h = hashlib.sha256()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
for n in iter(lambda: f.readinto(mv), 0):
h.update(mv[:n])
return h.hexdigest()
def compute_wider_metrics(config_path, snapshot, work_dir, wider_dir, outputs):
""" Computes WiderFace metrics on easy, medium, hard subsets. """
wider_data_folder = wider_dir
os.makedirs(wider_data_folder, exist_ok=True)
wider_data_zip = os.path.join(wider_data_folder, 'WIDER_val.zip')
assert os.path.exists(wider_data_zip), f'failed to find WIDER_val.zip here: {wider_data_zip}'
subprocess.run(f'unzip -q -o {wider_data_zip} -d {wider_data_folder}'.split(' '), check=True)
eval_tools_zip = os.path.join(wider_data_folder, 'eval_tools.zip')
if not os.path.exists(eval_tools_zip):
subprocess.run(
f'wget http://shuoyang1213.me/WIDERFACE/support/eval_script/eval_tools.zip'
f' -O {eval_tools_zip}'.split(' '), check=True)
subprocess.run(f'unzip -q -o {eval_tools_zip} -d {wider_data_folder}'.split(' '), check=True)
wider_annotation_zip = os.path.join(wider_data_folder, 'ider_face_split.zip')
if not os.path.exists(wider_annotation_zip):
subprocess.run(
f'wget http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip'
f' -O {wider_annotation_zip}'.split(' '), check=True)
subprocess.run(f'unzip -q -o {wider_annotation_zip} -d {wider_data_folder}'.split(' '), check=True)
wider_annotation = os.path.join(wider_dir, 'wider_face_split', 'wider_face_val_bbx_gt.txt')
wider_images = os.path.join(wider_dir, 'WIDER_val', 'images')
wider_coco_annotation = os.path.join(wider_dir, 'instances_val.json')
subprocess.run(
f'python {FACE_DETECTION_TOOLS}/wider_to_coco.py'
f' {wider_annotation} {wider_images} {wider_coco_annotation}'.split(' '), check=True)
res_pkl = os.path.join(work_dir, 'wider_face_res.pkl')
with open(os.path.join(work_dir, 'test_py_on_wider_stdout_'), 'w') as test_py_stdout:
subprocess.run(
f'python {MMDETECTION_TOOLS}/test.py'
f' {config_path} {snapshot}'
f' --out {res_pkl}'
f' --update_config data.test.ann_file={wider_coco_annotation} data.test.img_prefix={wider_dir}'.split(' '),
stdout=test_py_stdout, check=True)
wider_face_predictions = tempfile.mkdtemp()
subprocess.run(
f'python {FACE_DETECTION_TOOLS}/test_out_to_wider_predictions.py'
f' {config_path} {res_pkl} {wider_face_predictions}'.split(' '), check=True)
print(wider_face_predictions)
res_wider_metrics = os.path.join(work_dir, "wider_metrics.json")
subprocess.run(
f'python {FACE_DETECTION_TOOLS}/wider_face_eval.py'
f' -g {wider_data_folder}/eval_tools/ground_truth/'
f' -p {wider_face_predictions}'
f' --out {res_wider_metrics}'.split(' '), check=True)
with open(res_wider_metrics) as read_file:
content = json.load(read_file)
outputs.extend(content)
return outputs
def coco_ap_eval(config_path, work_dir, snapshot, res_pkl, outputs):
""" Computes COCO AP. """
with open(os.path.join(work_dir, 'test_py_stdout'), 'w') as test_py_stdout:
subprocess.run(
f'python {MMDETECTION_TOOLS}/test.py'
f' {config_path} {snapshot}'
f' --out {res_pkl} --eval bbox'.split(' '), stdout=test_py_stdout, check=True)
average_precision = collect_ap(os.path.join(work_dir, 'test_py_stdout'))[0]
outputs.append({'key': 'ap', 'value': average_precision * 100, 'unit': '%', 'display_name': 'AP @ [IoU=0.50:0.95]'})
return outputs
def custom_ap_eval(config_path, work_dir, res_pkl, outputs):
""" Computes AP on faces that are greater than 64x64. """
res_custom_metrics = os.path.join(work_dir, "custom_metrics.json")
subprocess.run(
f'python {FACE_DETECTION_TOOLS}/wider_custom_eval.py'
f' {config_path} {res_pkl} --out {res_custom_metrics}'.split(' '), check=True)
with open(res_custom_metrics) as read_file:
ap_64x64 = [x['average_precision'] for x in json.load(read_file) if x['object_size'][0] == 64][0]
outputs.append({'key': 'ap_64x64', 'value': ap_64x64, 'display_name': 'AP for faces > 64x64', 'unit': '%'})
return outputs
def get_complexity_and_size(cfg, config_path, work_dir, outputs):
""" Gets complexity and size of a model. """
image_shape = [x['img_scale'] for x in cfg.test_pipeline if 'img_scale' in x][0][::-1]
image_shape = " ".join([str(x) for x in image_shape])
res_complexity = os.path.join(work_dir, "complexity.json")
subprocess.run(
f'python {MMDETECTION_TOOLS}/get_flops.py'
f' {config_path}'
f' --shape {image_shape}'
f' --out {res_complexity}'.split(' '), check=True)
with open(res_complexity) as read_file:
content = json.load(read_file)
outputs.extend(content)
return outputs
def get_file_size_and_sha256(snapshot):
""" Gets size and sha256 of a file. """
return {
'sha256': sha256sum(snapshot),
'size': os.path.getsize(snapshot),
'name': os.path.basename(snapshot),
'source': snapshot
}
def eval(config_path, snapshot, wider_dir, out):
""" Main evaluation procedure. """
cfg = Config.fromfile(config_path)
work_dir = tempfile.mkdtemp()
print('results are stored in:', work_dir)
if os.path.islink(snapshot):
snapshot = os.path.join(os.path.dirname(snapshot), os.readlink(snapshot))
files = get_file_size_and_sha256(snapshot)
metrics = []
res_pkl = os.path.join(work_dir, "res.pkl")
metrics = coco_ap_eval(config_path, work_dir, snapshot, res_pkl, metrics)
metrics = custom_ap_eval(config_path, work_dir, res_pkl, metrics)
if wider_dir:
metrics = compute_wider_metrics(config_path, snapshot, work_dir, wider_dir, metrics)
metrics = get_complexity_and_size(cfg, config_path, work_dir, metrics)
for metric in metrics:
metric['value'] = round(metric['value'], 3)
outputs = {
'files': [files],
'metrics': metrics
}
if os.path.exists(out):
with open(out) as read_file:
content = yaml.load(read_file)
content.update(outputs)
outputs = content
with open(out, 'w') as write_file:
yaml.dump(outputs, write_file)
def main():
""" Main function. """
args = parse_args()
eval(args.config, args.snapshot, args.wider_dir, args.out)
if __name__ == '__main__':
main()
|
# Prirejeno po datotekah iz predavanj in vaj.
import csv
import json
import os
import requests
default_headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
# Prenos spletne strani
def url_v_html(url, mapa, ime_datoteke, headers=default_headers):
'''Sprejme url in v dano destinacijo shrani HTML datoteko.'''
try:
page_content = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
print(f"Napaka pri povezovanju na {url}")
return None
if page_content.status_code == requests.codes.ok:
os.makedirs(mapa, exist_ok=True)
path = os.path.join(mapa, ime_datoteke)
with open(path, 'w', encoding='utf-8') as file_out:
file_out.write(page_content.text)
else:
print(f"Napaka pri prenosu strani {url}")
return None
# Pisanje in odpiranje
def odpri_html(mapa, ime_datoteke):
'''Vrne niz z vsebino dane datoteke.'''
with open(os.path.join(mapa, ime_datoteke), encoding='utf-8') as datoteka:
return datoteka.read()
def zapisi_csv(slovarji, imena_polj, ime_datoteke):
'''Iz seznama slovarjev ustvari CSV datoteko z glavo.'''
with open(ime_datoteke, 'w', encoding='utf-8') as csv_datoteka:
writer = csv.DictWriter(csv_datoteka, fieldnames=imena_polj)
writer.writeheader()
for slovar in slovarji:
writer.writerow(slovar)
def zapisi_json(objekt, ime_datoteke):
'''Iz danega objekta ustvari JSON datoteko.'''
with open(ime_datoteke, 'w', encoding='utf-8') as json_datoteka:
json.dump(objekt, json_datoteka, indent=4, ensure_ascii=False)
def odpri_json(ime_datoteke):
'''Odpre dano JSON datoteko.'''
with open(ime_datoteke, 'r', encoding='utf-8') as json_datoteka:
return json.load(json_datoteka) |
from .StupidArtnet import StupidArtnet
from stupidArtnet.StupidArtnetServer import StupidArtnetServer
from stupidArtnet.ArtnetUtils import shift_this, put_in_range, make_address_mask
|
import tkinter as tk
def convert():
input_value = float(var_input.get())
grams_value = input_value * 1000
var_grams.set('{}g'.format(grams_value))
pounds_value = input_value * 2.20462
var_pounds.set('{}lbs'.format(pounds_value))
ounces_value = input_value * 35.274
var_ounces.set('{}oz'.format(ounces_value))
window = tk.Tk()
window.rowconfigure([0, 1], minsize=30, weight=1)
window.columnconfigure([0, 1, 2], minsize=30, weight=1)
var_input = tk.StringVar()
var_grams = tk.StringVar()
var_pounds = tk.StringVar()
var_ounces = tk.StringVar()
lbl_kg = tk.Label(window, text="kg")
txt_input = tk.Entry(window, textvariable=var_input)
btn_convert = tk.Button(window, text="Convert", command=convert)
lbl_grams = tk.Label(window, textvariable=var_grams)
lbl_pounds = tk.Label(window, textvariable=var_pounds)
lbl_ounces = tk.Label(window, textvariable=var_ounces)
lbl_kg.grid(row=0, column=0)
txt_input.grid(row=0, column=1)
btn_convert.grid(row=0, column=2)
lbl_grams.grid(row=1, column=0)
lbl_pounds.grid(row=1, column=1)
lbl_ounces.grid(row=1, column=2)
window.mainloop()
|
import logging
import multiprocessing
import time
from c4.messaging import (Envelope,
PeerRouter,
RouterClient)
log = logging.getLogger(__name__)
class TestRouterClient(object):
MESSAGES = 100
def test_sendMessage(self, clusterInfo):
counter = multiprocessing.Value("i", 0)
peer1 = PeerRouter("peer1", clusterInfo)
def peer1Handler(message, envelope):
with counter.get_lock():
counter.value += 1
peer1.addHandler(peer1Handler)
peer1.start(timeout=1)
client = RouterClient("peer1")
for _ in range(self.MESSAGES):
testEnvelope = Envelope("client", "peer1", Action="test", isRequest=False, includeTime=True)
client.sendMessage(testEnvelope)
# wait with timeout for messages
end = time.clock() + 10
while counter.value < self.MESSAGES and time.clock() < end:
time.sleep(0.01)
peer1.stop(timeout=1)
assert counter.value == self.MESSAGES
def test_sendRequest(self, clusterInfo):
counter = multiprocessing.Value("i", 0)
peer1 = PeerRouter("peer1", clusterInfo)
def peer1Handler(message, envelope):
with counter.get_lock():
counter.value += 1
return message
peer1.addHandler(peer1Handler)
peer1.start(timeout=1)
client = RouterClient("peer1")
for _ in range(self.MESSAGES):
testEnvelope = Envelope("client", "peer1", Action="test", includeTime=True)
message = client.sendRequest(testEnvelope)
assert message is not None
peer1.stop(timeout=1)
assert counter.value == self.MESSAGES |
import numpy as np
import cv2
import tensorflow as tf
class Evaluator(object):
def __init__(self, config):
self.mutual_check = False
self.err_thld = config['err_thld']
self.matches = self.bf_matcher_graph()
self.stats = {
'i_avg_recall': 0,
'v_avg_recall': 0,
'all_avg_recall': 0,
}
def homo_trans(self, coord, H):
kpt_num = coord.shape[0]
homo_coord = np.concatenate((coord, np.ones((kpt_num, 1))), axis=-1)
proj_coord = np.matmul(H, homo_coord.T).T
proj_coord = proj_coord / proj_coord[:, 2][..., None]
proj_coord = proj_coord[:, 0:2]
return proj_coord
def bf_matcher_graph(self):
descriptors_a = tf.compat.v1.placeholder(tf.float32, (None, None), 'descriptor_a')
descriptors_b = tf.compat.v1.placeholder(tf.float32, (None, None), 'descriptor_b')
sim = tf.linalg.matmul(descriptors_a, descriptors_b, transpose_b=True)
ids1 = tf.range(0, tf.shape(sim)[0])
nn12 = tf.math.argmax(sim, axis=1, output_type=tf.int32)
if self.mutual_check:
nn21 = tf.math.argmax(sim, axis=0, output_type=tf.int32)
mask = tf.equal(ids1, tf.gather(nn21, nn12))
matches = tf.stack([tf.boolean_mask(ids1, mask), tf.boolean_mask(nn12, mask)])
else:
matches = tf.stack([ids1, nn12])
return matches
def bf_matcher(self, sess, descriptors_a, descriptors_b):
input_dict = {
"descriptor_a:0": descriptors_a,
"descriptor_b:0": descriptors_b
}
matches = sess.run(self.matches, input_dict)
return matches.T
def feature_matcher(self, sess, ref_feat, test_feat):
matches = self.bf_matcher(sess, ref_feat, test_feat)
matches = [cv2.DMatch(matches[i][0], matches[i][1], 0) for i in range(matches.shape[0])]
return matches
def get_inlier_matches(self, ref_coord, test_coord, putative_matches, gt_homo, scaling=1.):
p_ref_coord = np.float32([ref_coord[m.queryIdx] for m in putative_matches]) / scaling
p_test_coord = np.float32([test_coord[m.trainIdx] for m in putative_matches]) / scaling
proj_p_ref_coord = self.homo_trans(p_ref_coord, gt_homo)
dist = np.sqrt(np.sum(np.square(proj_p_ref_coord - p_test_coord[:, 0:2]), axis=-1))
inlier_mask = dist <= self.err_thld
inlier_matches = [putative_matches[z] for z in np.nonzero(inlier_mask)[0]]
return inlier_matches
def get_gt_matches(self, ref_coord, test_coord, gt_homo, scaling=1.):
ref_coord = ref_coord / scaling
test_coord = test_coord / scaling
proj_ref_coord = self.homo_trans(ref_coord, gt_homo)
pt0 = np.expand_dims(proj_ref_coord, axis=1)
pt1 = np.expand_dims(test_coord, axis=0)
norm = np.linalg.norm(pt0 - pt1, ord=None, axis=2)
min_dist = np.min(norm, axis=1)
gt_num = np.sum(min_dist <= self.err_thld)
return gt_num
|
class ObservableProperty:
def __init__(self, signal_name: str):
self.attr_name = ''
self.signal_name = signal_name
def __set_name__(self, owner, name):
self.attr_name = '_' + name
def __get__(self, instance, owner):
return getattr(instance, self.attr_name)
def __set__(self, instance, value):
setattr(instance, self.attr_name, value)
getattr(instance, self.signal_name).emit(value)
|
# LICENSE: PSF.
import asyncio
import concurrent.futures
import re
import sys
import threading
import unittest
import uvloop
from asyncio import test_utils
from uvloop import _testbase as tb
from unittest import mock
from test import support
# Most of the tests are copied from asyncio
def _fakefunc(f):
return f
def first_cb():
pass
def last_cb():
pass
class _TestFutures:
def create_future(self):
raise NotImplementedError
def test_future_initial_state(self):
f = self.create_future()
self.assertFalse(f.cancelled())
self.assertFalse(f.done())
f.cancel()
self.assertTrue(f.cancelled())
def test_future_cancel(self):
f = self.create_future()
self.assertTrue(f.cancel())
self.assertTrue(f.cancelled())
self.assertTrue(f.done())
self.assertRaises(asyncio.CancelledError, f.result)
self.assertRaises(asyncio.CancelledError, f.exception)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_future_result(self):
f = self.create_future()
self.assertRaises(asyncio.InvalidStateError, f.result)
f.set_result(42)
self.assertFalse(f.cancelled())
self.assertTrue(f.done())
self.assertEqual(f.result(), 42)
self.assertEqual(f.exception(), None)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_future_exception(self):
exc = RuntimeError()
f = self.create_future()
self.assertRaises(asyncio.InvalidStateError, f.exception)
if sys.version_info[:3] > (3, 5, 1):
# StopIteration cannot be raised into a Future - CPython issue26221
self.assertRaisesRegex(TypeError,
"StopIteration .* cannot be raised",
f.set_exception, StopIteration)
f.set_exception(exc)
self.assertFalse(f.cancelled())
self.assertTrue(f.done())
self.assertRaises(RuntimeError, f.result)
self.assertEqual(f.exception(), exc)
self.assertRaises(asyncio.InvalidStateError, f.set_result, None)
self.assertRaises(asyncio.InvalidStateError, f.set_exception, None)
self.assertFalse(f.cancel())
def test_future_exception_class(self):
f = self.create_future()
f.set_exception(RuntimeError)
self.assertIsInstance(f.exception(), RuntimeError)
def test_future_yield_from_twice(self):
f = self.create_future()
def fixture():
yield 'A'
x = yield from f
yield 'B', x
y = yield from f
yield 'C', y
g = fixture()
self.assertEqual(next(g), 'A') # yield 'A'.
self.assertEqual(next(g), f) # First yield from f.
f.set_result(42)
self.assertEqual(next(g), ('B', 42)) # yield 'B', x.
# The second "yield from f" does not yield f.
self.assertEqual(next(g), ('C', 42)) # yield 'C', y.
def test_future_repr(self):
self.loop.set_debug(True)
f_pending_debug = self.create_future()
frame = f_pending_debug._source_traceback[-1]
self.assertEqual(repr(f_pending_debug),
'<Future pending created at %s:%s>'
% (frame[0], frame[1]))
f_pending_debug.cancel()
self.loop.set_debug(False)
f_pending = self.create_future()
self.assertEqual(repr(f_pending), '<Future pending>')
f_pending.cancel()
f_cancelled = self.create_future()
f_cancelled.cancel()
self.assertEqual(repr(f_cancelled), '<Future cancelled>')
f_result = self.create_future()
f_result.set_result(4)
self.assertEqual(repr(f_result), '<Future finished result=4>')
self.assertEqual(f_result.result(), 4)
exc = RuntimeError()
f_exception = self.create_future()
f_exception.set_exception(exc)
self.assertEqual(repr(f_exception),
'<Future finished exception=RuntimeError()>')
self.assertIs(f_exception.exception(), exc)
def func_repr(func):
filename, lineno = test_utils.get_function_source(func)
text = '%s() at %s:%s' % (func.__qualname__, filename, lineno)
return re.escape(text)
f_one_callbacks = self.create_future()
f_one_callbacks.add_done_callback(_fakefunc)
fake_repr = func_repr(_fakefunc)
self.assertRegex(repr(f_one_callbacks),
r'<Future pending cb=\[%s\]>' % fake_repr)
f_one_callbacks.cancel()
self.assertEqual(repr(f_one_callbacks),
'<Future cancelled>')
f_two_callbacks = self.create_future()
f_two_callbacks.add_done_callback(first_cb)
f_two_callbacks.add_done_callback(last_cb)
first_repr = func_repr(first_cb)
last_repr = func_repr(last_cb)
self.assertRegex(repr(f_two_callbacks),
r'<Future pending cb=\[%s, %s\]>'
% (first_repr, last_repr))
f_many_callbacks = self.create_future()
f_many_callbacks.add_done_callback(first_cb)
for i in range(8):
f_many_callbacks.add_done_callback(_fakefunc)
f_many_callbacks.add_done_callback(last_cb)
cb_regex = r'%s, <8 more>, %s' % (first_repr, last_repr)
self.assertRegex(repr(f_many_callbacks),
r'<Future pending cb=\[%s\]>' % cb_regex)
f_many_callbacks.cancel()
self.assertEqual(repr(f_many_callbacks),
'<Future cancelled>')
def test_future_copy_state(self):
if sys.version_info[:3] < (3, 5, 1):
raise unittest.SkipTest()
from asyncio.futures import _copy_future_state
f = self.create_future()
f.set_result(10)
newf = self.create_future()
_copy_future_state(f, newf)
self.assertTrue(newf.done())
self.assertEqual(newf.result(), 10)
f_exception = self.create_future()
f_exception.set_exception(RuntimeError())
newf_exception = self.create_future()
_copy_future_state(f_exception, newf_exception)
self.assertTrue(newf_exception.done())
self.assertRaises(RuntimeError, newf_exception.result)
f_cancelled = self.create_future()
f_cancelled.cancel()
newf_cancelled = self.create_future()
_copy_future_state(f_cancelled, newf_cancelled)
self.assertTrue(newf_cancelled.cancelled())
@mock.patch('asyncio.base_events.logger')
def test_future_tb_logger_abandoned(self, m_log):
fut = self.create_future()
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_future_tb_logger_result_unretrieved(self, m_log):
fut = self.create_future()
fut.set_result(42)
del fut
self.assertFalse(m_log.error.called)
@mock.patch('asyncio.base_events.logger')
def test_future_tb_logger_result_retrieved(self, m_log):
fut = self.create_future()
fut.set_result(42)
fut.result()
del fut
self.assertFalse(m_log.error.called)
def test_future_wrap_future(self):
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1, loop=self.loop)
res, ident = self.loop.run_until_complete(f2)
self.assertIsInstance(f2, asyncio.Future)
self.assertEqual(res, 'oi')
self.assertNotEqual(ident, threading.get_ident())
def test_future_wrap_future_future(self):
f1 = self.create_future()
f2 = asyncio.wrap_future(f1)
self.assertIs(f1, f2)
def test_future_wrap_future_use_global_loop(self):
with mock.patch('asyncio.futures.events') as events:
events.get_event_loop = lambda: self.loop
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = asyncio.wrap_future(f1)
self.assertIs(self.loop, f2._loop)
def test_future_wrap_future_cancel(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(f1.cancelled())
self.assertTrue(f2.cancelled())
def test_future_wrap_future_cancel2(self):
f1 = concurrent.futures.Future()
f2 = asyncio.wrap_future(f1, loop=self.loop)
f1.set_result(42)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertFalse(f1.cancelled())
self.assertEqual(f1.result(), 42)
self.assertTrue(f2.cancelled())
def test_future_source_traceback(self):
self.loop.set_debug(True)
future = self.create_future()
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(future._source_traceback, list)
self.assertEqual(future._source_traceback[-2][:3],
(__file__,
lineno,
'test_future_source_traceback'))
def check_future_exception_never_retrieved(self, debug):
last_ctx = None
def handler(loop, context):
nonlocal last_ctx
last_ctx = context
self.loop.set_debug(debug)
self.loop.set_exception_handler(handler)
def memory_error():
try:
raise MemoryError()
except BaseException as exc:
return exc
exc = memory_error()
future = self.create_future()
if debug:
source_traceback = future._source_traceback
future.set_exception(exc)
future = None
support.gc_collect()
test_utils.run_briefly(self.loop)
self.assertIsNotNone(last_ctx)
self.assertIs(last_ctx['exception'], exc)
self.assertEqual(last_ctx['message'],
'Future exception was never retrieved')
if debug:
tb = last_ctx['source_traceback']
self.assertEqual(tb[-2].name,
'check_future_exception_never_retrieved')
def test_future_exception_never_retrieved(self):
self.check_future_exception_never_retrieved(False)
def test_future_exception_never_retrieved_debug(self):
self.check_future_exception_never_retrieved(True)
def test_future_wrap_future(self):
from uvloop.loop import _wrap_future
def run(arg):
return (arg, threading.get_ident())
ex = concurrent.futures.ThreadPoolExecutor(1)
f1 = ex.submit(run, 'oi')
f2 = _wrap_future(f1, loop=self.loop)
res, ident = self.loop.run_until_complete(f2)
self.assertIsInstance(f2, asyncio.Future)
self.assertEqual(res, 'oi')
self.assertNotEqual(ident, threading.get_ident())
def test_future_wrap_future_future(self):
from uvloop.loop import _wrap_future
f1 = self.create_future()
f2 = _wrap_future(f1)
self.assertIs(f1, f2)
def test_future_wrap_future_cancel(self):
from uvloop.loop import _wrap_future
f1 = concurrent.futures.Future()
f2 = _wrap_future(f1, loop=self.loop)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(f1.cancelled())
self.assertTrue(f2.cancelled())
def test_future_wrap_future_cancel2(self):
from uvloop.loop import _wrap_future
f1 = concurrent.futures.Future()
f2 = _wrap_future(f1, loop=self.loop)
f1.set_result(42)
f2.cancel()
test_utils.run_briefly(self.loop)
self.assertFalse(f1.cancelled())
self.assertEqual(f1.result(), 42)
self.assertTrue(f2.cancelled())
class _TestFuturesDoneCallbacks:
def run_briefly(self):
test_utils.run_briefly(self.loop)
def _make_callback(self, bag, thing):
# Create a callback function that appends thing to bag.
def bag_appender(future):
bag.append(thing)
return bag_appender
def _new_future(self):
raise NotImplementedError
def test_future_callbacks_invoked_on_set_result(self):
bag = []
f = self._new_future()
f.add_done_callback(self._make_callback(bag, 42))
f.add_done_callback(self._make_callback(bag, 17))
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [42, 17])
self.assertEqual(f.result(), 'foo')
def test_future_callbacks_invoked_on_set_exception(self):
bag = []
f = self._new_future()
f.add_done_callback(self._make_callback(bag, 100))
self.assertEqual(bag, [])
exc = RuntimeError()
f.set_exception(exc)
self.run_briefly()
self.assertEqual(bag, [100])
self.assertEqual(f.exception(), exc)
def test_future_remove_done_callback(self):
bag = []
f = self._new_future()
cb1 = self._make_callback(bag, 1)
cb2 = self._make_callback(bag, 2)
cb3 = self._make_callback(bag, 3)
# Add one cb1 and one cb2.
f.add_done_callback(cb1)
f.add_done_callback(cb2)
# One instance of cb2 removed. Now there's only one cb1.
self.assertEqual(f.remove_done_callback(cb2), 1)
# Never had any cb3 in there.
self.assertEqual(f.remove_done_callback(cb3), 0)
# After this there will be 6 instances of cb1 and one of cb2.
f.add_done_callback(cb2)
for i in range(5):
f.add_done_callback(cb1)
# Remove all instances of cb1. One cb2 remains.
self.assertEqual(f.remove_done_callback(cb1), 6)
self.assertEqual(bag, [])
f.set_result('foo')
self.run_briefly()
self.assertEqual(bag, [2])
self.assertEqual(f.result(), 'foo')
###############################################################################
# Tests Matrix
###############################################################################
class Test_UV_UV_create_future(_TestFutures, tb.UVTestCase):
# Test uvloop.Loop.create_future
def create_future(self):
return self.loop.create_future()
class Test_UV_UV_Future(_TestFutures, tb.UVTestCase):
# Test that uvloop.Future can be instantiated directly
def create_future(self):
return uvloop.Future(loop=self.loop)
class Test_UV_AIO_Futures(_TestFutures, tb.UVTestCase):
def create_future(self):
return asyncio.Future(loop=self.loop)
class Test_AIO_Futures(_TestFutures, tb.AIOTestCase):
def create_future(self):
return asyncio.Future(loop=self.loop)
class Test_UV_UV_FuturesCallbacks(_TestFuturesDoneCallbacks, tb.UVTestCase):
def _new_future(self):
return self.loop.create_future()
class Test_UV_AIO_FuturesCallbacks(_TestFuturesDoneCallbacks, tb.UVTestCase):
def _new_future(self):
return asyncio.Future(loop=self.loop)
class Test_AIO_FuturesCallbacks(_TestFuturesDoneCallbacks, tb.AIOTestCase):
def _new_future(self):
return asyncio.Future(loop=self.loop)
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
img = cv2.imread('lowContrast.png',0)
plt.hist(img.ravel(),256,[0,256])
plt.show()
plt.savefig('hist.png')
equ = cv2.equalizeHist(img)
res = np.hstack((img,equ))
cv2.imshow('Equalized Image',res)
cv2.imwrite('Equalized Image.png',res)
plt.hist(res.ravel(),256,[0,256])
plt.show()
plt.savefig('equal-hist.png') |
import asyncio
import platform
from dask.distributed import Client
def square(x):
return x ** 2
async def f():
client = await Client("localhost:8786", processes=False, asynchronous=True)
A = client.map(square, range(10000))
result = await client.submit(sum, A)
print(result)
await client.close()
return result
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(f())
|
from tests.GreetingExtension import GreetingExtension
class AfternoonColorExtension(GreetingExtension):
_name = 'afternoon'
def greet(self, person: str) -> None:
print(f'{self._name} {person}')
|
# Generated by Django 3.0.11 on 2020-12-25 16:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('styles', '0004_auto_20201225_1051'),
('users', '0007_auto_20201222_0922'),
]
operations = [
migrations.AddField(
model_name='user',
name='favorite_style',
field=models.ForeignKey(blank=True, help_text='Select your default TaiChi Style.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='styles.Style'),
),
]
|
Subsets and Splits