max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
ros/niryo_one_ros/niryo_one_tcp_server/clients/python/examples/vision_demonstrators/2_multiple_reference_conditioning.py | paubrunet97/astrocytes | 5 | 12796451 | <filename>ros/niryo_one_ros/niryo_one_tcp_server/clients/python/examples/vision_demonstrators/2_multiple_reference_conditioning.py
"""
This script shows an example of how to use Niryo One's vision to
make a conditioning according to the objects' color.
The objects will be conditioned in a grid of dimension grid_dimension. The Y axis corresponds
to the Color : BLUE / RED / GREEN. It will be 3
The X axis corresponds to how many objects can be put on the same line before increasing
the conditioning height.
Once a line is completed, objects will be pack over the lower level
"""
from niryo_one_tcp_client import *
# -- MUST Change these variables
robot_ip_address = "192.168.1.202" # IP address of the Niryo One
tool_used = RobotTool.GRIPPER_1 # Tool used for picking
workspace_name = "workspace_1" # Robot's Workspace Name
# -- Can change these variables
grid_dimension = (3, 3)
# -- Should Change these variables
# The pose from where the image processing happen
observation_pose = PoseObject(
x=0.20, y=0., z=0.3,
roll=0.0, pitch=1.57, yaw=0.0,
)
# Center of the conditioning area
center_conditioning_pose = PoseObject(
x=0.0, y=-0.25, z=0.12,
roll=-0., pitch=1.57, yaw=-1.57
)
# Joints where the robot goes at the end of its process
sleep_joints = [0.0, 0.55, -1.2, 0.0, 0.0, 0.0]
# -- MAIN PROGRAM
def process(niryo_one_client):
try_without_success = 0
count_dict = {
"BLUE": 0,
"RED": 0,
"GREEN": 0,
}
# Loop until too much failures
while try_without_success < 3:
# Moving to observation pose
niryo_one_client.move_pose(*observation_pose.to_list())
# Trying to get object from Niryo One API
ret = niryo_one_client.vision_pick(workspace_name,
height_offset=0.0,
shape=Shape.ANY,
color=Color.ANY)
# Unpacking return result
status, obj_found, shape, color = ret
if not status or not obj_found:
try_without_success += 1
continue
# Choose Y position according to Color
color_val = color.value
if color_val == "BLUE":
offset_y = -1
elif color_val == "RED":
offset_y = 0
else:
offset_y = 1
# Choose X & Z position according to how the color line is filled
offset_x = count_dict[color_val] % grid_dimension[0] - grid_dimension[0] // 2
offset_z = count_dict[color_val] // grid_dimension[0]
# Going to place the object
place_pose = center_conditioning_pose.copy_with_offsets(0.05 * offset_x, 0.05 * offset_y, 0.025 * offset_z)
niryo_one_client.place_from_pose(*niryo_one_client.pose_to_list(place_pose))
# Increment count
count_dict[color_val] += 1
try_without_success = 0
if __name__ == '__main__':
# Connect to robot
client = NiryoOneClient()
client.connect(robot_ip_address)
# Changing tool
client.change_tool(tool_used)
# Calibrate robot if robot needs calibration
client.calibrate(CalibrateMode.AUTO)
# Launching main process
process(client)
# Ending
client.move_joints(*sleep_joints)
client.set_learning_mode(True)
# Releasing connection
client.quit()
| 2.921875 | 3 |
Visualization.py | Hoeiriis/ConvForTimeSeries-Mirror | 0 | 12796452 | import matplotlib.pyplot as plt
from CuteFlower2.data_loading import cd
import os
def save_hist_plot(history, name="test", path=None):
train_errors = history.history['loss']
val_errors = history.history['val_loss']
plt.style.use('bmh')
plt.plot(range(len(train_errors)), train_errors, 'g-', label="Train")
plt.plot(range(len(val_errors)), val_errors, 'r-', label="Val")
plt.legend()
if path is None:
path = os.getcwd()+"/Data"
with cd(path):
plt.savefig("Train_val_graph_{}".format(name))
plt.clf()
def intermediate_drawer(name, path=None, draw=False):
train_loss = []
val_loss = []
plt.style.use('bmh')
def drawer(logs):
train_loss.append(logs['loss'])
val_loss.append(logs['val_loss'])
loss_range = range(len(train_loss))
plt.ion() # Ved ikke om man skal gøre det i hvert loop, det er nok fint at have den udenfor men w/e
train_loss_plot, = plt.plot(
loss_range, train_loss, label='Training Loss')
val_loss_plot, = plt.plot(
loss_range, val_loss, label='Validation loss')
plt.legend(handles=[train_loss_plot, val_loss_plot])
if not draw:
plt.show()
plt.pause(0.001)
if path is not None:
with cd(path):
plt.savefig("Train_val_graph_{}".format(name))
plt.clf()
return drawer
| 2.640625 | 3 |
web/test/test_client.py | davisyoshida/qb-api | 11 | 12796453 | <filename>web/test/test_client.py
from client import QbApi
import unittest
USER_ID = 0
BASE_URL = 'http://127.0.0.1:5000/qb-api/v1'
API_KEY = 0
class QuizBowlClientTests(unittest.TestCase):
def test_get_num_questions(self):
client = QbApi(BASE_URL, USER_ID, API_KEY)
num_qs = client.get_num_questions()
self.assertIsInstance(num_qs, int)
self.assertTrue(num_qs > 0)
def test_get_question_length(self):
client = QbApi(BASE_URL, USER_ID, API_KEY)
q_length = client.get_question_length(0)
self.assertIsInstance(q_length, int)
self.assertTrue(q_length > 0)
def test_get_word(self):
client = QbApi(BASE_URL, USER_ID, API_KEY)
word = client.get_word(0, 0)
self.assertIsInstance(w, str)
self.assertTrue(w)
def test_submit_answer_success(self):
client = QbApi(BASE_URL, USER_ID, API_KEY)
res = client.submit_answer(0, 'abcasdfasdfasdf')
self.assertEqual(res, False)
def test_submit_answer_duplicate(self):
client = QbApi(BASE_URL, USER_ID, API_KEY)
client.submit_answer(0, 'answer1')
self.assertRaises(ValueError, client.submit_answer(0, 'answer2'))
| 2.828125 | 3 |
train.py | DeepanshKhurana/udacityproject-ml-imageclassifier | 0 | 12796454 | # Imports
import pandas as pd
import numpy as np
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
import torch.utils.data
from torchvision import datasets, models, transforms
from collections import OrderedDict
import os
import argparse
# Functions
def arg_parser():
'''
Takes in command-line arguments and parses them for usage of our Python functions.
'''
parser = argparse.ArgumentParser(description='ImageClassifier Params')
parser.add_argument('--architecture',
type=str,
help='Architecture and model from torchvision.models as strings: vgg16 and densenet121 supported.')
parser.add_argument('--learning_rate',
type=float,
help='Learning Rate for our Neural Network. Default is 0.001.')
parser.add_argument('--hidden',
type=int,
help='Hidden Units for our Neural Network. Default is 1024.')
parser.add_argument('--dropout',
type=float,
help='Dropout value for our Dropout layers. Default is 0.05.')
parser.add_argument('--epochs',
type=int,
help='Epochs for Neural Network training. Default is 1.')
parser.add_argument('--gpu',
type=str,
help='Use GPU (Y for Yes; N for No). Default is Y.')
args = parser.parse_args()
return(args)
def load(data_dir='./flowers'):
'''
Loads data for train, test and validation.
Also loads dataloaders for all three in the same order.
Returns all six datasets and loaders, in the same order.
'''
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
data_transforms = {'train': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.RandomVerticalFlip(0.5),
transforms.RandomRotation(75),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'valid': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])]),
'test': transforms.Compose([transforms.RandomResizedCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
}
image_datasets = {
'train': datasets.ImageFolder(train_dir, transform=data_transforms['train']),
'test': datasets.ImageFolder(test_dir, transform=data_transforms['test']),
'valid': datasets.ImageFolder(valid_dir, transform=data_transforms['valid'])
}
dataloaders = {
'train': torch.utils.data.DataLoader(image_datasets['train'], 64, shuffle=True),
'test': torch.utils.data.DataLoader(image_datasets['test'], 32, shuffle=True),
'valid': torch.utils.data.DataLoader(image_datasets['valid'], 32, shuffle=True)
}
return(dataloaders['train'], dataloaders['test'], dataloaders['valid'], image_datasets['train'], image_datasets['test'], image_datasets['valid'])
def set_device(gpu):
'''
Sets the device based on the parameter. Also handles most edge-cases.
Returns the device variable to be used later.
'''
if gpu=='Y':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cpu':
print('CUDA not available; using CPU')
else:
print('Using GPU')
elif gpu=='N':
device = 'cpu'
print('Using CPU')
else:
print('Incorrect Value for GPU entered.')
print('Fallback to default GPU: 1')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if device=='cpu':
print('CUDA not available; using CPU')
else:
print('Using GPU')
return(device)
def build(device, architecture='vgg16', dropout=0.05, hidden=1024, learning_rate=0.001):
'''
Takens in architecture, gpu, dropout, hidden, learning_rate.
Returns a torch model.
'''
if architecture:
if architecture=='vgg16':
model = models.vgg16(pretrained=True)
model.name = architecture
input_ = 25088
elif architecture=='densenet121':
model = models.densenet121(pretrained=True)
model.name = architecture
input_ = 1024
else:
print('Invalid input: Please use \'vgg16\' or \'densenet121\'')
else:
print('No architecture given. Fallback to default architecture: \'vgg16\'')
model = models.vgg16(pretrained=True)
model.name = architecture
input_ = 25088
if hidden:
hidden = hidden
else:
print('No number of hidden inputs specified. Fallback to default inputs: 1024')
hidden = 1024
if learning_rate:
learning_rate = learning_rate
else:
print('No learning_rate specified. Fallback to default learning_rate: 0.001')
learning_rate = 0.001
if dropout:
dropout = dropout
else:
print('No dropout specified. Fallback to default dropout: 0.05')
dropout = 0.05
for parameter in model.parameters():
parameter.requires_grad = False
classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(input_, hidden)),
('relu', nn.ReLU()),
('dropout1', nn.Dropout(dropout)),
('fc2', nn.Linear(hidden, 102, bias=True)),
('output', nn.LogSoftmax(dim=1))
]))
model.classifier = classifier
model.to(device)
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
return(model, criterion, optimizer)
def validation(model, valid_loader, criterion, device):
'''
Validation function for our model.
Returns validation loss and accuracy.
'''
valid_loss = 0
valid_acc = 0
for ii, (inputs, labels) in enumerate(valid_loader):
inputs, labels = inputs.to(device), labels.to(device)
output = model.forward(inputs)
valid_loss += criterion(output, labels).item()
ps = torch.exp(output)
equality = (labels.data == ps.max(dim=1)[1])
valid_acc += equality.type(torch.FloatTensor).mean()
return valid_loss, valid_acc
def train(model, criterion, optimizer, train_loader, valid_loader, device, epochs=1, print_every=50):
'''
Trains our Neural Network model
'''
steps = 0
if epochs:
epochs = epochs
else:
print('No epochs specified. Fallback to default epochs: 1')
epochs = 1
print('Training Model for {} epochs'.format(epochs))
for e in range(epochs):
running_loss = 0
for ii, (inputs, labels) in enumerate(train_loader):
steps += 1
inputs, labels = inputs.to(device), labels.to(device)
model.zero_grad()
# Forward and backward passes
outputs = model.forward(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
model.eval()
with torch.no_grad():
valid_loss, valid_acc = validation(model, valid_loader, criterion, device)
training_loss = round(float(running_loss/print_every), 3)
valid_loss = round(float(valid_loss/len(valid_loader)), 3)
valid_acc = round(float(valid_acc/len(valid_loader)), 3)
print('Epoch: {}/{} :: Training Loss: {} :: Validation Loss: {} :: Validation Accuracy: {}'
.format(e+1, epochs, training_loss, valid_loss, valid_acc))
running_loss = 0
model.train()
print('Model training complete!')
return(model)
def validate(model, test_loader, device):
'''
Prints validation accuracy of model
'''
correct = 0
total = 0
with torch.no_grad():
model.eval()
for data in test_loader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
accuracy = round(100 * correct / total, 2)
print('Accuracy: {}'.format(accuracy))
def save(model, train_data, epochs, architecture):
'''
Saves the model to the given path.
'''
model.class_to_idx = train_data.class_to_idx
if epochs:
epochs = epochs
else:
epochs = 1
checkpoint = {'state_dict': model.state_dict(),
'classifier': model.classifier,
'class_to_idx': model.class_to_idx,
'epochs': epochs,
'architecture': architecture}
file = 'checkpoint.pth'
torch.save(checkpoint, file)
print('Model saved to {}!'.format(file))
# Main
def main():
args = arg_parser()
if args.gpu:
gpu=args.gpu
else:
print('No input given. Fallback to default GPU: 1')
gpu='Y'
device = set_device(gpu)
train_loader, test_loader, valid_loader, train_data, test_data, valid_data = load()
model, criterion, optimizer = build(device, architecture=args.architecture, dropout=args.dropout, hidden=args.hidden, learning_rate=args.learning_rate)
model = train(model=model, train_loader=train_loader, valid_loader=valid_loader, device=device, criterion=criterion, optimizer=optimizer, epochs=args.epochs)
validate(model=model, test_loader=test_loader, device=device)
save(model=model, train_data=train_data, epochs=args.epochs, architecture = args.architecture)
if __name__ == '__main__':
main()
| 2.578125 | 3 |
contrib/python/cuuid/base_x.py | Kronuz/Xapiand | 370 | 12796455 | <gh_stars>100-1000
#
# Copyright (C) 2015-2018 Dubalu LLC. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""
BaseX encoding
"""
__version__ = '0.0.1'
class BaseX(object):
def __init__(self, alphabet, translate):
self.alphabet = alphabet
self.translate = translate
self.base = len(self.alphabet)
self.decoder = [self.base] * 256
for i, a in enumerate(self.alphabet):
o = ord(a)
self.decoder[o] = i
x = -1
for a in self.translate:
o = ord(a)
i = self.decoder[o]
if i < self.base:
x = i
else:
self.decoder[o] = x
def encode_int(self, i, default_one=True):
"""Encode an integer using BaseX"""
if not i and default_one:
return self.alphabet[0]
string = ""
sum_chk = 0
while i:
i, idx = divmod(i, self.base)
string = self.alphabet[idx] + string
sum_chk += idx
sumsz = len(string)
sum_chk += sumsz + sumsz / self.base
return string, sum_chk % self.base
def encode(self, v):
"""Encode a string using BaseX"""
if not isinstance(v, bytes):
raise TypeError("a bytes-like object is required, not '%s'" % type(v).__name__)
p, acc = 1, 0
for c in map(ord, reversed(v)):
acc += p * c
p = p << 8
result, sum_chk = self.encode_int(acc, default_one=False)
sum_chk = (self.base - (sum_chk % self.base)) % self.base
return result + self.alphabet[sum_chk]
def decode_int(self, v):
"""Decode a BaseX encoded string as an integer"""
if not isinstance(v, str):
v = v.decode('ascii')
decimal = 0
sum_chk = 0
sumsz = 0
for char in v:
o = ord(char)
i = self.decoder[o]
if i < 0:
continue
if i >= self.base:
raise ValueError("Invalid character")
decimal = decimal * self.base + i
sum_chk += i
sumsz += 1
sum_chk += sumsz + sumsz / self.base
return decimal, sum_chk % self.base
def decode(self, v):
"""Decode a BaseX encoded string"""
if not isinstance(v, str):
v = v.decode('ascii')
while True:
chk = self.decoder[ord(v[-1:])]
v = v[:-1]
if chk < 0:
continue
if chk >= self.base:
raise ValueError("Invalid character")
break
acc, sum_chk = self.decode_int(v)
sum_chk += chk
if sum_chk % self.base:
raise ValueError("Invalid checksum")
result = []
while acc:
result.append(acc & 0xff)
acc >>= 8
return ''.join(map(chr, reversed(result)))
def chksum(self, v):
"""Get checksum character for BaseX encoded string"""
if not isinstance(v, str):
v = v.decode('ascii')
acc, sum_chk = self.decode_int(v)
sum_chk = (self.base - (sum_chk % self.base)) % self.base
return self.alphabet[sum_chk]
b59 = BaseX('zGLUAC2EwdDRrkWBatmscxyYlg6jhP7K53TibenZpMVuvoO9H4XSQq8FfJN', '~l1IO0')
b59decode = b59.decode
b59encode = b59.encode
def main():
"""BaseX encode or decode FILE, or standard input, to standard output."""
import sys
import argparse
stdout = sys.stdout
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
'file',
metavar='FILE',
nargs='?',
type=argparse.FileType('r'),
default='-')
parser.add_argument(
'-d', '--decode',
action='store_true',
help='decode data')
parser.add_argument(
'-c', '--check',
action='store_true',
help='append a checksum before encoding')
args = parser.parse_args()
fun = {
(False, False): b59encode,
(True, False): b59decode,
}[(args.decode, args.check)]
data = args.file.read().rstrip(b'\n')
try:
result = fun(data)
except Exception as e:
sys.exit(e)
if not isinstance(result, bytes):
result = result.encode('ascii')
stdout.write(result)
if __name__ == '__main__':
main()
| 2.125 | 2 |
tuba/run_disc.py | korhanpolat/phoenix_term_discovery | 0 | 12796456 | import argparse
import sys
from os.path import join
from os import chdir
import subprocess
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--sge', type=str, default='nosge')
parser.add_argument('-l', '--filelist', type=str, default='')
parser.add_argument('-zr_root', '--zr_root', type=str, default='/home/korhan/Desktop/zerospeech2017/track2/src/ZRTools')
args = parser.parse_args()
chdir(args.zr_root)
command = './run_disc {} {}'.format(args.sge,args.filelist)
print(command)
subprocess.call(command.split())
| 2.375 | 2 |
models/__init__.py | lmycross/segmentation | 6 | 12796457 | from .deconvnetmodel import *
from .deeplab_v2 import *
from .deeplab_v2_multiscale import *
from .deeplab_vggmodel import *
from .dilatedmodel import *
from .ducmodel import *
from .fcnmodel import *
from .gcnmodel import *
from .linknetmodel import *
from .segnetmodel import *
from .tiramisu import *
from .tiramisu_nobias import * | 1.054688 | 1 |
danmu/predictRecentWords.py | ShakexIngwu/crackwords | 0 | 12796458 | #!/usr/bin/env python3
import re
from gensim.models import word2vec
from gensim.models import KeyedVectors
from operator import itemgetter
filePath = '/home/ubuntu/danmu/corpusSegRecentWords.txt'
fileTrainRead = []
#read the file by line
with open(filePath) as fileTrainRaw:
for line in fileTrainRaw:
fileTrainRead.append(line)
#load the pre-trained word2vec vector set
model = KeyedVectors.load_word2vec_format('/home/ubuntu/danmu/corpusWord2Vec.bin', binary=True)
#predict for each word and then calculate the most frequent topic word set
wordFreq = {}
for i in range(len(fileTrainRead)):
words = fileTrainRead[i][0].split( )
for j, word in enumerate(words):
# word = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+".decode("utf8"), "",word)
# word = re.sub("[【】╮╯▽╰╭★→「」]+".decode("utf8"),"",word)
# word = re.sub("!,❤。~《》:()【】「」?”“;:、".decode("utf8"),"",word)
if not re.match(r"[【】╮╯▽╰╭★→「」\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()!,❤。~《》:()【】「」?”“;:、0-9a-zA-Z]+", word):
try:
similarWords = model.most_similar(word, topn=10)
for idx, similarWord in enumerate(similarWords):
if similarWord[0] not in wordFreq:
wordFreq[similarWord[0]] = 1
else:
wordFreq[similarWord[0]] += 1
except:
pass
top10Words = [k for k in sorted(wordFreq.items(), key=itemgetter(1), reverse=True)[:10]]
for _, word in enumerate(top10Words):
print (word[0])
| 3 | 3 |
OneEncoder_MultiDecoders/models.py | Ali-Sahili/Background-Subtraction-Unsupervised-Learning | 5 | 12796459 |
import torch
import torch.nn as nn
from torch.autograd import Variable
from Param import nc, nz, device
class Model512(nn.Module):
def __init__(self,nz=nz,nef=8,ngf=8,nc=nc):
super(Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
def forward(self, input):
x = self.encode(input)
x = self.decode3(x)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512
""" VAE with three losses at three scales of the decoder """
class VAE_Model512(nn.Module):
def __init__(self,nz=nz,ngf=8,nef=8,nc=3):
super(VAE_Model512, self).__init__()
self.nz=nz
self.nc=nc
## Encoder Part ##
self.encode = nn.Sequential(
# input is (nc) x 512 x 512
nn.Conv2d(nc, nef, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef),
nn.LeakyReLU(0.2, inplace=True),
# state size is (nef) x 256 x 256
nn.Conv2d(nef, nef * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*2) x 128 x 128
nn.Conv2d(nef*2, nef * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*4) x 64 x 64
nn.Conv2d(nef * 4, nef * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*8) x 32 x 32
nn.Conv2d(nef*8, nef * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 16),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*16) x 16 x 16
nn.Conv2d(nef * 16, nef * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 32),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*32) x 8 x 8
nn.Conv2d(nef * 32, nef * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(nef * 64),
nn.LeakyReLU(0.2, inplace=True),
# state size. (nef*64) x 4 x 4
nn.Conv2d(nef * 64, nef * 128, 4, 1, 0, bias=False),
nn.BatchNorm2d(nef * 128),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(nef * 128, nz, 1, 1, 0, bias=True),
nn.Sigmoid()
)
## #####
## Decoder Part ##
self.decode3 = nn.Sequential(
nn.ConvTranspose2d(nz, ngf *128 , 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 128),
nn.ReLU(True),
# size ngf*128 x2 x2
nn.ConvTranspose2d(ngf * 128, ngf * 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 64),
nn.ReLU(True),
# size ngf*64 x4 x4
nn.ConvTranspose2d(ngf * 64, ngf * 32, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 32),
nn.ReLU(True),
# size ngf*32 x8 x8
nn.ConvTranspose2d(ngf*32, ngf * 16, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.ReLU(True),
# state size. (ngf*16) x 16 x16
nn.ConvTranspose2d(ngf * 16, ngf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 32 x 32
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True))
# state size. (ngf*4) x 64 x 64
self.conv_layer128 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 3, 1, 1, bias=False),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode2 = nn.Sequential(
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True))
# state size. (ngf*2) x 128 x 128
self.conv_layer256 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 3, 1, 1, bias=False))
self.decode1 = nn.Sequential(
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 256 x 256
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
#nn.Sigmoid() # for VAE
# state size. (nc) x 512 x 512
)
self.output_layer = nn.Tanh() #nn.Sigmoid()
self.fc1 = nn.Linear(nz, 64)
self.fc2 = nn.Linear(nz, 64)
self.fc3 = nn.Linear(64, nz)
def reparametrize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.FloatTensor(std.size()).normal_().to(device)
eps = Variable(eps)
return eps.mul(std).add_(mu)
def forward(self, input):
b_size = input.shape[0]
x = self.encode(input).view(b_size, nz)
mu = self.fc1(x) #fc1
logvar = self.fc2(x) #fc2
z = self.reparametrize(mu, logvar)
z = self.fc3(z).reshape(-1, self.nz, 1, 1) #fc3
#del x
x = self.decode3(z)
out128 = self.output_layer(self.conv_layer128(x))
x = self.decode2(x)
out256 = self.output_layer(self.conv_layer256(x))
out512 = self.decode1(x)
return out128, out256, out512, mu, logvar
| 2.53125 | 3 |
retail store management system.py | smartinternz02/SPS-9311-Internet-of-Things | 0 | 12796460 |
import time
import sys
import ibmiotf.application
import ibmiotf.device
import random
import json
#Provide your IBM Watson Device Credentials
organization = "1tzgh7"
deviceType = "iotdevice"
deviceId = "0000"
authMethod = "token"
authToken = "<PASSWORD>"
# Initialize the device client\
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data['command'])
if cmd.data['command']=='EXPIRED':
print("PRODUCT EXPIRED IS RECIEVED")
elif cmd.data['command']=='lightoff':
print("PRODUCT NOT EXPIRED IS RECIEVED")
if cmd.command == "setInterval":
if 'interval' not in cmd.data:
print("Error - command is missing required information: 'interval'")
else:
interval = cmd.data['interval']
elif cmd.command == "print":
if 'message' not in cmd.data:
print("Error - command is missing required information: 'message'")
else:
print(cmd.data['message'])
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
# Connect and send a datapoint "hello" with value "world" into the cloud as an event of type "greeting" 10 times
deviceCli.connect()
while True:
products = "Pasta","bread","butter","panner"
product_ids = 12345,3413,2341,4501
expiry_dates = "20-02-2021","22-02-2021","12-05-2021","12-05-2021"
data = {"prod_name":products, "pro_id":product_ids, "expiry_date":expiry_dates}
#print data
def myOnPublishCallback():
print ("Published Data to IBM Watson")
success = deviceCli.publishEvent("Data", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(1)
deviceCli.commandCallback = myCommandCallback
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| 2.890625 | 3 |
__init__.py | yogi-poet/dance_furby | 1 | 12796461 | <filename>__init__.py
from mycroft import MycroftSkill, intent_file_handler
import subprocess
class ControlFurby(MycroftSkill):
def __init__(self):
MycroftSkill.__init__(self)
@intent_file_handler('furby.tell.intent')
def handle_furby_tell(self, message):
self.speak_dialog('furby.tell')
@intent_file_handler('furby.dance.intent')
def handle_furby_dance(self, message):
self.speak_dialog('furby.dance')
@intent_file_handler('furby.sleep.intent')
def handle_furby_sleep(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.sleep')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "862"])
@intent_file_handler('furby.laugh.intent')
def handle_furby_laugh(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.laugh')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "863"])
@intent_file_handler('furby.burp.intent')
def handle_furby_burp(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.burp')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "864"])
@intent_file_handler('furby.fart.intent')
def handle_furby_fart(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.fart')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "865"])
@intent_file_handler('furby.purr.intent')
def handle_furby_purr(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.purr')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "866"])
@intent_file_handler('furby.sneeze.intent')
def handle_furby_sneeze(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.sneeze')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "867"])
@intent_file_handler('furby.sing.intent')
def handle_furby_sing(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.snide_remark')
self.speak_dialog('furby.sing')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "868"])
@intent_file_handler('furby.talk.intent')
def handle_furby_talk(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.talk')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "869"])
@intent_file_handler('furby.feed.intent')
def handle_furby_feed(self, message):
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "820"])
self.speak_dialog('furby.feed')
subprocess.call(["perl", "/home/pi/Hacksby/bin/furby-send.pl", "853"])
def stop(self):
pass
def create_skill():
return ControlFurby()
| 2.46875 | 2 |
src/denzel_cli/scripts/cli.py | eliorc/denzel | 17 | 12796462 | <reponame>eliorc/denzel
from .. import commands
from .. import config
import click
@click.group()
def cli():
pass
# -------- startproject --------
@cli.command()
@click.argument('name', type=str)
@click.option('--gpu/--no-gpu', default=False, help="Support for NVIDIA GPU", show_default=True)
def startproject(name, gpu):
"""Builds the denzel project skeleton"""
commands.create_project(project_name=name, use_gpu=gpu)
# -------- launch --------
@cli.command()
@click.option('--api-port', default=config.API_PORT, type=int, help="API endpoints port", show_default=True)
@click.option('--monitor-port', default=config.MONITOR_PORT, type=int, help="Monitor UI port", show_default=True)
def launch(api_port, monitor_port):
"""Builds and starts all services"""
commands.launch(api_port, monitor_port)
# -------- shutdown --------
@cli.command()
@click.option('--purge/--no-purge', default=False, help="Discard the docker images", show_default=True)
def shutdown(purge):
"""Stops and deletes all services"""
commands.shutdown(purge)
# -------- start --------
@cli.command()
def start():
"""Start services"""
commands.start()
# -------- stop --------
@cli.command()
def stop():
"""Stop services"""
commands.stop()
# -------- restart --------
@cli.command()
def restart():
"""Restart services"""
commands.restart()
# -------- status --------
@cli.command()
@click.option('--live/--no-live', default=False,
help='Live status view', show_default=True)
def status(live):
"""Examine status of services and worker"""
commands.status(live)
# -------- updateosreqs --------
@cli.command()
def updateosreqs():
"""Run shell commands from requirements.sh on all services"""
commands.updateosreqs()
# -------- updatepipreqs --------
@cli.command()
def updatepipreqs():
"""Update services according to requirements.txt"""
commands.updatepipreqs()
# -------- updatepipreqs --------
@cli.command()
def updatereqs():
"""Update services using requirements.txt and requirements.sh"""
commands.updatereqs()
# -------- logs --------
@cli.command()
@click.option('--service', default='all', type=click.Choice(config.SERVICES + ['all']),
help='Target service', show_default=True)
@click.option('--live/--no-live', default=False,
help='Follow logs output', show_default=True)
def logs(service, live):
"""Show service logs"""
commands.logs(service, live)
# -------- logworker --------
@cli.command()
@click.option('--live/--no-live', default=False,
help='Follow logs output', show_default=True)
def logworker(live):
"""Show worker log"""
commands.logworker(live)
# -------- shell --------
@cli.command()
@click.option('--service', default='denzel', type=click.Choice(config.SERVICES),
help='Target service', show_default=True)
def shell(service):
"""Connect to service bash shell"""
commands.shell(service)
# -------- response --------
@cli.command()
@click.option('--sync/--async', required=True, default=True, help='Responses synchronicity')
@click.option('--timeout', default=5., type=float, show_default=True, help='Sync response timeout in seconds')
def response(sync, timeout):
"""Set response manner (sync/async) and sync timeout"""
if sync is None:
raise click.ClickException('Must pass --sync or --async')
if sync and timeout <= 0:
raise click.ClickException('Sync timeout must be greater than 0')
commands.response(sync, timeout)
| 2.25 | 2 |
netbox_paloalto/migrations/0001_initial.py | rodvand/netbox-paloalto | 26 | 12796463 | <reponame>rodvand/netbox-paloalto<gh_stars>10-100
# Generated by Django 3.0.5 on 2020-04-18 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='FirewallConfig',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('hostname', models.CharField(max_length=50)),
('api_key', models.CharField(max_length=128)),
('panorama', models.BooleanField()),
],
),
]
| 1.765625 | 2 |
nsd1806/python/day08/tcpserv2.py | MrWangwf/nsd1806 | 0 | 12796464 | <gh_stars>0
import socket
host = ''
port = 12345
addr = (host, port)
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(addr)
s.listen(1)
while True:
try:
cli_sock, cli_addr = s.accept()
except KeyboardInterrupt:
break
print('Hello,', cli_addr)
while True:
data = cli_sock.recv(1024).decode() # 把bytes类型解码为str类型
if data.strip() == 'quit':
break
print(data)
sdata = input('> ') + '\r\n'
cli_sock.send(sdata.encode()) # 将str编码为bytes
cli_sock.close()
s.close()
| 2.75 | 3 |
kaminskySilent.py | filipdavidovic/kaminsky_vulnerability | 5 | 12796465 | <reponame>filipdavidovic/kaminsky_vulnerability
import argparse
from scapy.all import ARP, Ether, sniff, sendp, send, IP, UDP, DNS, DNSQR, DNSRR
from vars import ccolors
import utils
from arppoison import arppoison
import sys
import socket
def silent(args):
# ARP poison the vicitims (two way ARP poisoning)
for ip in args.soaIP:
arppoison(args.victim, ip)
# send query request to the victim
args.randomSubdomain = utils.getRandomSubdomain() + args.targetDomain
reqPkt = IP(dst=args.victim) / UDP(sport=123) / DNS(qr=0, qd=DNSQR(qname=args.randomSubdomain))
send(reqPkt, verbose=False)
global globalargs
globalargs = args
# listen for packets on all interfaces (expect query request from victim to authoritative DNS)
sniff(prn=dnsSpoof)
def dnsSpoof(pkt):
if not pkt.haslayer(DNSQR) or not pkt.haslayer(UDP):
sendp(pkt, verbose=False)
else:
if (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst in globalargs.soaIP):
# return the response to the victim (it will think its from the authoritative DNS)
spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \
DNS(id=pkt[DNS].id, qr=1, aa=1, qd=pkt[DNS].qd, \
ns=DNSRR(rrname=globalargs.targetDomain, type='NS', rdata=globalargs.soaDomain[0], ttl=globalargs.ttl), \
ar=DNSRR(rrname=globalargs.soaDomain[0], type='A', rdata=globalargs.addressToForge, ttl=globalargs.ttl))
send(spoof_pkt, verbose=False)
print ccolors.OKGREEN + "Victim DNS poisoned...\n" + ccolors.NC
elif (globalargs.randomSubdomain in pkt[DNS].qd.qname) and (pkt[IP].dst == globalargs.addressToForge):
spoof_pkt = IP(dst=pkt[IP].src, src=pkt[IP].dst) / \
UDP(dport=pkt[UDP].sport, sport=pkt[UDP].dport) / \
DNS(id=pkt[DNS].id, qr=1, aa=1, rcode=0, qd=pkt[DNS].qd, \
an=DNSRR(rrname=pkt[DNS].qd.qname, type="A", rdata="172.16.58.3"))
send(spoof_pkt, verbose=False)
print ccolors.OKGREEN + "Attack successful!\n" + ccolors.NC + ccolors.WARNING + "Terminating..." + ccolors.NC
sys.exit()
| 2.609375 | 3 |
satella/instrumentation/metrics/structures/cache_dict.py | piotrmaslanka/satella | 12 | 12796466 | <reponame>piotrmaslanka/satella
import logging
import time
import typing as tp
from satella.coding.structures import CacheDict, LRUCacheDict, ExclusiveWritebackCache
from satella.coding.typing import K, V
from .. import Metric
from ..metric_types.callable import CallableMetric
from ..metric_types.counter import CounterMetric
from ..metric_types.measurable_mixin import MeasurableMixin
logger = logging.getLogger(__name__)
class MetrifiedCacheDict(CacheDict[K, V]):
"""
A CacheDict with metrics!
:param cache_hits: a counter metric that will be updated with +1 each time there's a cache hit
:param cache_miss: a counter metric that will be updated with +1 each time there's a cache miss
:param refreshes: a metric that will be updated with +1 each time there's a cache refresh
:param how_long_refresh_takes: a metric that will be ticked with time value_getter took
"""
def __init__(self, stale_interval, expiration_interval, value_getter,
value_getter_executor=None, cache_failures_interval=None,
time_getter=time.monotonic,
default_value_factory=None,
cache_hits: tp.Optional[CounterMetric] = None,
cache_miss: tp.Optional[CounterMetric] = None,
refreshes: tp.Optional[CounterMetric] = None,
how_long_refresh_takes: tp.Optional[MeasurableMixin] = None):
if refreshes:
old_value_getter = value_getter
def value_getter_replacement(item):
try:
return old_value_getter(item)
finally:
if self.refreshes:
self.refreshes.runtime(+1)
value_getter = value_getter_replacement
if how_long_refresh_takes:
value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter)
super().__init__(stale_interval, expiration_interval, value_getter,
value_getter_executor, cache_failures_interval, time_getter,
default_value_factory)
self.cache_hits = cache_hits
self.cache_miss = cache_miss
self.refreshes = refreshes
self.how_long_refresh_takes = how_long_refresh_takes
def __getitem__(self, item):
if self.has_info_about(item):
if self.cache_hits:
self.cache_hits.runtime(+1)
else:
if self.cache_miss:
self.cache_miss.runtime(+1)
return super().__getitem__(item)
class MetrifiedLRUCacheDict(LRUCacheDict[K, V]):
"""
A LRUCacheDict with metrics!
:param cache_hits: a counter metric that will be updated with +1 each time there's a cache hit
:param cache_miss: a counter metric that will be updated with +1 each time there's a cache miss
:param refreshes: a metric that will be updated with +1 each time there's a cache refresh
:param how_long_refresh_takes: a metric that will be ticked with time value_getter took
"""
def __init__(self, stale_interval: float, expiration_interval: float,
value_getter, value_getter_executor=None,
cache_failures_interval=None,
time_getter=time.monotonic,
default_value_factory=None,
max_size: int = 100,
cache_hits: tp.Optional[Metric] = None,
cache_miss: tp.Optional[Metric] = None,
refreshes: tp.Optional[Metric] = None,
how_long_refresh_takes: tp.Optional[MeasurableMixin] = None,
evictions: tp.Optional[Metric] = None,
**kwargs):
if refreshes:
old_value_getter = value_getter
def value_getter_replacement(item):
try:
return old_value_getter(item)
finally:
if self.refreshes:
self.refreshes.runtime(+1)
value_getter = value_getter_replacement
if how_long_refresh_takes:
value_getter = how_long_refresh_takes.measure(value_getter=time_getter)(value_getter)
super().__init__(stale_interval, expiration_interval, value_getter,
value_getter_executor, cache_failures_interval, time_getter,
default_value_factory, max_size=max_size)
self.cache_hits = cache_hits
self.cache_miss = cache_miss
self.refreshes = refreshes
self.evictions = evictions
self.how_long_refresh_takes = how_long_refresh_takes
def evict(self):
if self.evictions is not None:
self.evictions.runtime(+1)
super().evict()
def __getitem__(self, item):
if self.has_info_about(item):
if self.cache_hits:
self.cache_hits.runtime(+1)
else:
if self.cache_miss:
self.cache_miss.runtime(+1)
return super().__getitem__(item)
class MetrifiedExclusiveWritebackCache(ExclusiveWritebackCache[K, V]):
__slots__ = ('cache_miss', 'cache_hits')
def __init__(self, *args,
cache_hits: tp.Optional[CounterMetric] = None,
cache_miss: tp.Optional[CounterMetric] = None,
entries_waiting: tp.Optional[CallableMetric] = None,
**kwargs):
super().__init__(*args, **kwargs)
self.cache_miss = cache_miss
self.cache_hits = cache_hits
if entries_waiting is not None:
entries_waiting.callable = self.get_queue_length()
def __getitem__(self, item):
if item in self.in_cache:
if self.cache_hits:
self.cache_hits.runtime(+1)
else:
if self.cache_miss:
self.cache_miss.runtime(+1)
return super().__getitem__(item)
| 2.25 | 2 |
factorial.py | CrownCrafter/School | 0 | 12796467 | import math
print(math.factorial(int(input("Enter number "))))
| 3.4375 | 3 |
comlocal/interface/ComIFace.py | jtsiva/ComLocAL | 0 | 12796468 | from twisted.spread import pb
from twisted.internet import reactor
from comlocal.core.Com import Com
class ComIFace (object):
def __init__(self, name, port):
self.readCB = None
self._comiface = None
self.name = name
self.port = port
def start(self):
self._comiface = _ComIFace(self)
self.tcpPort = reactor.listenTCP(self.port, pb.PBServerFactory(self._comiface), interface='127.0.0.1')
d = self._comiface.unregister()
d.addCallback(lambda res: self._comiface.register())
return d
def stop(self):
d = self._comiface.unregister()
port, self.tcpPort = self.tcpPort, None
d.addCallback(lambda res: port.stopListening())
return d
def write(self, msg, dest):
message = {'msg':msg,'dest':dest}
return self._comiface.doWrite(message)
def cmd(self, cmd, **kwargs):
command = {'cmd':cmd}
for key in kwargs:
command[key] = kwargs[key]
return self._comiface.doCmd(command)
class _ComIFace(pb.Root):
def __init__(self, iface):
self.iface = iface
self.port = self.iface.port
self.obj = None
def register(self):
def regAck(result):
assert 'success' in result['result']
self.iface.registered = True
def failed(reason):
print reason
def connected(obj):
self.obj = obj
regPacket = {'cmd': 'reg_app', 'name':self.iface.name,'port':self.port}
d = obj.callRemote('cmd', regPacket)
d.addCallbacks(regAck,failed)
#d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed)
return d
if self.obj is None:
factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1", Com.myPort, factory)
d = factory.getRootObject()
d.addCallbacks(connected, failed)
else:
d = connected(self.obj)
return d
def unregister(self):
def regAck(result):
#assert 'success' in result['result']
self.iface.registered = False
def failed(reason):
print reason
def connected(obj):
self.obj = obj
regPacket = {'cmd': 'unreg_app', 'name':self.iface.name,'port':self.iface.port}
d = obj.callRemote('cmd', regPacket)
d.addCallbacks(regAck,failed)
#d.addCallbacks(lambda result: obj.broker.transport.loseConnection(), failed)
return d
if self.obj is None:
factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1", Com.myPort, factory)
d = factory.getRootObject()
d.addCallbacks(connected, failed)
else:
d = connected(self.obj)
return d
def doWrite(self, msg):
def writeAck(result):
return result
def failed(reason):
print reason
reason.printTraceback()
def connected(obj):
self.obj = obj
# def closeAndReturn (result):
# obj.broker.transport.loseConnection()
# return result
d = obj.callRemote('write', msg)
d.addCallbacks(writeAck, failed)
#d.addCallbacks(closeAndReturn, failed)
return d
if self.obj is None:
factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1", Com.myPort, factory)
d = factory.getRootObject()
d.addCallbacks(connected, failed)
else:
d = connected(self.obj)
return d
def doCmd(self,cmd):
def writeAck(result):
#print self.success(str(result))
return result
def failed(reason):
log.msg(self.failure (str(reason)))
reason.printTraceback()
def connected(obj):
self.obj = obj
def closeAndReturn (result):
#obj.broker.transport.loseConnection()
return result
d = obj.callRemote('cmd', cmd)
d.addCallbacks(writeAck, failed)
d.addCallbacks(closeAndReturn, failed)
return d
if self.obj is None:
factory = pb.PBClientFactory()
reactor.connectTCP("127.0.0.1", Com.myPort, factory)
d = factory.getRootObject()
d.addCallbacks(connected, failed)
else:
d = connected(self.obj)
return d
def remote_read(self, message):
self.iface.readCB(message)
| 2.296875 | 2 |
test/unit/__init__.py | Pezmc/bank2ynab | 39 | 12796469 | <reponame>Pezmc/bank2ynab<gh_stars>10-100
import sys
from os.path import dirname, join, realpath
project_dirname = dirname(dirname(realpath(__file__)))
path = join(project_dirname, "bank2ynab")
if path not in sys.path:
sys.path.append(path)
| 2.21875 | 2 |
flaskapp/blueprints/views.py | luyisimiger/proyecto_sl_pruebas_de_carga | 0 | 12796470 | <gh_stars>0
from flask import current_app, Blueprint, render_template, redirect, url_for, request
from flaskapp.logic.ab import runab
views = Blueprint('views', __name__)
@views.route('/')
def home():
return render_template("base.html")
@views.route('/ab', methods=["GET", "POST"])
def ab_get():
url = "http://www.github.com/"
n = "20"
c = "5"
if request.json:
print(request.json)
c = str(request.json['c'])
n = str(request.json['n'])
url = str(request.json['url'])
args = ["ab", "-c", c, "-n", n, url]
print(args)
result = runab(args)
# return render_template("ab.html")
return result
| 2.578125 | 3 |
gfzs/views/header.py | yukihirop/gfzs | 0 | 12796471 | <gh_stars>0
import curses
import os, sys
GOOGLE = "Google"
FUZZY = "Fuzzy"
SEARCH = "Search"
try:
# need when 「python3 gfzs/views/header.py」
if __name__ == "__main__":
# https://codechacha.com/ja/how-to-import-python-files/
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import info
import utils.logger as logger
from base import Base
if os.environ.get("DEBUG"):
import utils.debug as debug
# need when 「cat fixtures/rust.json | python -m gfzs」
# need when 「cat fixtures/rust.json | bin/gfzs」
else:
from gfzs import info
from gfzs.views.base import Base
import gfzs.utils.logger as logger
if os.environ.get("DEBUG"):
import gfzs.utils.debug as debug
# need when 「python3 gfzs/controller.py」
except ModuleNotFoundError:
# https://codechacha.com/ja/how-to-import-python-files/
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname("../"))))
import info
from views.base import Base
import utils.logger as logger
if os.environ.get("DEBUG"):
import utils.debug as debug
class Header(Base):
def __init__(self, stdscr):
super().__init__(stdscr, None, "header")
self.version = "(%s)" % info.__version__
self.copyright = info.__copyright__
def create(self):
logger.debug("[Header] create")
self._init_layout()
self._make_header()
self.window.refresh()
def reset(self):
logger.debug("[Header] reset")
self.window.erase()
self._init_layout()
self._make_header()
self.window.refresh()
def _init_layout(self):
self.parent_height, self.parent_width = self.stdscr.getmaxyx()
self.window = curses.newwin(2, self.parent_width, 0, 0)
# https://stackoverflow.com/a/53016371/9434894
def _make_header(self):
start_index = 0
# Write Google
google = list(GOOGLE)
first_o = True
for i in range(len(google)):
c = google[i]
if c == "o":
if first_o:
first_o = False
self.window.addstr(0, i, c, self.color.google("o"))
else:
self.window.addstr(0, i, c, self.color.google("o2"))
else:
self.window.addstr(0, i, c, self.color.google(c))
# Write Fuzzy
start_index += len(GOOGLE) + 1
self.window.addstr(0, start_index, FUZZY, self.color.fuzzy())
# Write Search
start_index += len(FUZZY) + 1
self.window.addstr(0, start_index, SEARCH, self.color.search())
# Write verion
start_index += len(SEARCH) + 1
self.window.addstr(0, start_index, self.version, self.color.version())
# Write Copyright
self.window.addstr(
0,
self.parent_width - len(self.copyright),
self.copyright,
self.color.copy_right(),
)
self.window.hline(
1, 0, curses.ACS_HLINE | self.colors["hline"], self.parent_width
)
if __name__ == "__main__":
class TestHeader(Header):
def run(self):
self._loop()
def _end_curses(self):
""" Terminates the curses application. """
logger.debug("[TestHeader] end curses")
curses.nocbreak()
self.window.keypad(0)
curses.echo()
curses.endwin()
def _loop(self):
self.create()
while True:
try:
user_input = self.window.getch()
except curses.error:
continue
except KeyboardInterrupt:
break
if user_input == curses.KEY_RESIZE:
self.reset()
if __name__ == "__main__":
import signal
# local
# https://codechacha.com/ja/how-to-import-python-files/
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from model import Model
import runtime.config as runtime_config
import utils.color as color
progname = "gfzs.views.header"
properties = {"progname": progname, "severity": 0, "log_path": "./tmp/gfzs.log"}
logger.init_properties(**properties)
logger.debug("start %s" % progname)
def handle_sigint(signum, frame):
logger.debug("detect SIGINT (Ctrl-c)")
logger.debug("exit 0")
sys.exit(0)
signal.signal(signal.SIGINT, handle_sigint)
runtime_config.init()
if not runtime_config.valid():
logger.debug("[print] 'Config is invalid.'")
print("Config is invalid.")
for error in runtime_config.errors:
logger.error(error)
print("Error: %s" % error)
logger.debug("exit 1")
sys.exit(1)
# initscr() returns a window object representing the entire screen.
logger.debug("init curses")
stdscr = curses.initscr()
color.init()
# turn off automatic echoing of keys to the screen
curses.noecho()
# Buffering off
# https://docs.python.org/ja/3/library/curses.html#curses.cbreak
curses.cbreak()
# Disable the mouse cursor.
curses.curs_set(0)
target = TestHeader(stdscr)
error = None
try:
target.run()
except curses.error as e:
error = str(e)
finally:
target._end_curses()
if error != None:
logger.error(error)
print(error)
logger.debug("end %s" % progname, new_line=True)
| 2.203125 | 2 |
app_modules/widgets/numpad.py | l337quez/Aplicaci-n-ANDROID-para-control-del-suministro-de-energia- | 14 | 12796472 | <reponame>l337quez/Aplicaci-n-ANDROID-para-control-del-suministro-de-energia-
from kivy.uix.gridlayout import GridLayout
from kivy.lang import Builder
Builder.load_string('''
<NumpadBlueButton@BlueButton>:
on_release: self.parent.select_btn(self.text)
<Numpad>:
cols: 3
rows: 4
spacing: 3, 3
NumpadBlueButton:
text: '1'
NumpadBlueButton:
text: '2'
NumpadBlueButton:
text: '3'
NumpadBlueButton:
text: '4'
NumpadBlueButton:
text: '5'
NumpadBlueButton:
text: '6'
NumpadBlueButton:
text: '7'
NumpadBlueButton:
text: '8'
NumpadBlueButton:
text: '9'
<ActivityNumpad>:
cols: 1
rows: 2
spacing: 3, 3
BoxLayout:
size_hint_y: 0.5
orientation: 'horizontal'
BlueButton:
size_hint: 0.2, 1
text: '1'
on_release: root.select_btn(1)
BlueButton:
size_hint: 0.8, 1
text: 'App'
on_release: root.select_btn(1)
BoxLayout:
size_hint_y: 0.5
orientation: 'horizontal'
BlueButton:
size_hint: 0.2, 1
text: '2'
on_release: root.select_btn(2)
BlueButton:
size_hint: 0.8, 1
text: 'Service'
on_release: root.select_btn(2)
''')
class Numpad(GridLayout):
selected_btn = 1
callback = None
def select_btn(self, num):
self.selected_btn = num
if self.callback:
self.callback(num)
class ActivityNumpad(GridLayout):
selected_btn = 1
callback = None
def select_btn(self, num):
self.selected_btn = num
if self.callback:
self.callback(num)
| 2.375 | 2 |
trained_traffic_model/image_regression.py | christnp/e6889-project | 0 | 12796473 |
import numpy as np
import pandas as pd
from sklearn import model_selection
import tensorflow as tf
from pathlib import Path
"""
<NAME>, WAK2116, ELEN-E6889, Spring 2019
Final Project
This python file trains a neural network that predicts an activity level
based on a jpg image from a traffic camera
This is an initial attempt at doing regression based on image data.
It is loosely based on TF image classification examples and
"Deep Leaning: Image Recognition" on Lynda.com
"""
# view sample image
img_path = "./labeled_data/"
df = pd.read_csv('./labeled_data/labels.txt')
#print(df)
df_train, df_valid = model_selection.train_test_split(df, test_size=.1)
#print(df_train)
#print("---")
#print(df_valid)
def keras_data(data):
# Output arrays
x = np.empty([0, 160, 160, 3], dtype=np.float32)
y = np.empty([data.datetime.count()], dtype=np.float32)
#print(x.shape)
#print(y.shape)
# Read in and preprocess a batch of images
sess = tf.Session()
for i in range(0, data.datetime.count()):
#print(data.people[data.index[i]])
y_value = data.vehicles[data.index[i]] + data.people[data.index[i]]
#print(y_value)
#y = np.append(y, [y_value])
y[i] = y_value
# convert image to a tensor
# img_raw = tf.read_file(sample_img_path)
image_file = img_path + data.datetime[data.index[i]]
img_raw = tf.read_file(image_file)
#print(repr(img_raw)[:200]+"...")
img_tensor = tf.image.decode_image(img_raw)
#print(img_tensor.shape)
cropped_tensor = tf.image.crop_to_bounding_box(img_tensor,80, 80, 160, 220)
#print(cropped_tensor.shape)
#output_image = tf.image.encode_png(cropped_tensor)
#file = tf.write_file("text.png",output_image)
img_tensor = tf.image.resize(cropped_tensor, [160, 160])
#img_tensor = tf.image.resize(img_tensor, [240, 240]) # squish it down a bit
img_tensor /= 255.0 # normalize to [0,1] range
# print(img_tensor)
#print(img_tensor.shape)
# print(img_tensor.dtype)
sess = tf.Session()
with sess.as_default():
np_array = img_tensor.eval()
#print("np from tensor", np_array.shape)
indexed_array = np.expand_dims(np_array, axis=0)
#print("np from tensor with index",indexed_array.shape)
x = np.append(x, indexed_array, axis=0)
#print("x shape", x.shape)
#print(y.shape)
return x, y
x_train, y_train = keras_data(df_train)
x_test, y_test = keras_data(df_valid)
#y_train = tf.keras.utils.to_categorical(y_train, 16)
#y_test = tf.keras.utils.to_categorical(y_test, 16)
y_train = y_train / 16
y_test = y_test / 16
#(x_train, y_train), (x_test,y_test) = tf.keras.datasets.cifar10.load_data()
#x_train = x_train.astype("float32")
#x_test = x_test.astype("float32")
#x_train = x_train / 255
#x_test = x_test / 255
#y_train = tf.keras.utils.to_categorical(y_train, 10)
#y_test = tf.keras.utils.to_categorical(y_test, 10)
model = tf.keras.Sequential()
#model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(32, 32, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), padding='same', activation='relu', input_shape=(160, 160, 3)))
model.add(tf.keras.layers.Conv2D(32,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Conv2D(64,(3,3), padding='same', activation='relu'))
model.add(tf.keras.layers.Conv2D(64,(3,3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(2,2))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(512, activation="relu"))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(100, activation='relu'))
model.add(tf.keras.layers.Dropout(.25))
#model.add(tf.keras.layers.Dense(10, activation="softmax"))
model.add(tf.keras.layers.Dense(10, activation="relu"))
model.add(tf.keras.layers.Dense(1))
model.compile(
#loss='categorical_crossentropy',
loss='mse',
optimizer='adam',
metrics=["accuracy", "mae"]
)
model.summary()
model.fit(
x_train,
y_train,
batch_size=10,
epochs=30,
validation_data=[x_test, y_test],
shuffle=True #,
#steps_per_epoch=1000
)
# save structure
model_structure = model.to_json()
f = Path("model_structure.json")
f.write_text(model_structure)
# save weights
model.save_weights("model_weights.h5")
| 3.171875 | 3 |
src/utils.py | sheepy0125/some-platformer-game | 0 | 12796474 | """
Tools for Some Platformer Game
Created by sheepy0125
02/10/2021
"""
from pathlib import Path
###############
### Globals ###
###############
ROOT_PATH: Path = Path(__file__).parent.parent
####################
### Logger class ###
####################
class Logger:
"""Log messages with ease"""
colors: dict = {
"log": "\033[92m",
"warn": "\033[93m",
"fatal": "\033[91m",
"normal": "\033[0m",
}
@staticmethod
def log(message: str):
print(f"{Logger.colors['log']}[INFO] {message}{Logger.colors['normal']}")
@staticmethod
def warn(message: str):
print(f"{Logger.colors['warn']}[WARN] {message}{Logger.colors['normal']}")
@staticmethod
def fatal(message: str):
print(f"{Logger.colors['fatal']}[FAIL] {message}{Logger.colors['normal']}")
@staticmethod
def log_error(error: Exception):
Logger.fatal(
f"{type(error).__name__}: {str(error)} (line {error.__traceback__.tb_lineno})"
)
#############################
### Scroll handling class ###
#############################
class Scrolling:
scroll_x: float = 0
scroll_y: float = 0
max_scroll_x: float = 0
max_scroll_y: float = 0
@staticmethod
def setup_scrolling(map_size, tile_size, screen_size):
"""Setup scrolling"""
Scrolling.max_scroll_x = (map_size[0] * tile_size) - (screen_size[0])
Scrolling.max_scroll_y = (map_size[1] * tile_size) - (screen_size[1]) - 50
Logger.log(
f"Max scrolling: ({Scrolling.max_scroll_x}, {Scrolling.max_scroll_y})"
)
@staticmethod
def update_scrolling(player_pos, scroll_offset):
"""Update scrolling"""
# Center player
Scrolling.scroll_x += (
player_pos[0] - Scrolling.scroll_x - scroll_offset[0]
) / 10
Scrolling.scroll_y += (
player_pos[1] - Scrolling.scroll_y - scroll_offset[1]
) / 10
# Don't allow scrolling off the map
# X axis
if Scrolling.scroll_x <= 0:
Scrolling.scroll_x = 0
elif Scrolling.scroll_x >= Scrolling.max_scroll_x:
Scrolling.scroll_x = Scrolling.max_scroll_x
# Y axis
if Scrolling.scroll_y <= 0:
Scrolling.scroll_y = 0
elif Scrolling.scroll_y >= Scrolling.max_scroll_y:
Scrolling.scroll_y = Scrolling.max_scroll_y
class Animations:
def __init__(self,image_path,cols,rows,dict_names):
self.sprites = self.get_images_from_spritesheet(image_path,cols,rows)
self.dict = self.load_dict(dict_names)
def load_dict(self,dict_names):
for i in range(len(sprites)):
self.dict[dict_names[i]] = sprites[i]
def add_extra_sprites(self):
for i in self.dict:
copied_sprites = self.dict[i].copy()
squashed_sprites = []
stretched_sprites = []
for i in copied_sprites:
squashed_sprite = pygame.transform.scale()
squashed_sprites.append()
stretched_sprites.append()
def get_images_from_spritesheet(image_path, cols, rows):
"""
get the images from the spritesheet
cols is number of columns
rows is number of rows
"""
spritesheet = pygame.image.load(image_path)
sprite_width = spritesheet.get_width() / cols
sprite_height = spritesheet.get_height() / rows
empty_image = pygame.Surface((sprite_width,sprite_height)).get_buffer().raw
rows = []
# loop through the number of columns
for col_num in range(cols):
# get the x position of the sprite by multiplying
# the column that its on by the width
x_pos = col_num * sprite_width
row_images = []
for row_num in range(rows):
# loop through the number of rows
y_pos = row_num * sprite_height
sprite_rect = (x_pos, y_pos, sprite_width, sprite_height)
sprite = spritesheet.subsurface(sprite_rect)
if sprite.get_buffer().raw == empty_image:
continue
row_images.append(sprite)
rows.append(row_images)
return rows
| 2.671875 | 3 |
Interface.py | BhasherBEL/LittleFather | 5 | 12796475 | <gh_stars>1-10
#!/usr/local/bin/python
# coding: utf-8
import Coms
import sys
path = ['~']
def run() -> None:
while True:
input_value = input('LittleFather(' + '/'.join(path) + ') $ ').split(' ')
if len(input_value) == 0:
continue
if not interpret(input_value):
break
def custom_input():
return input('> ')
def interpret(args: list) -> bool:
command = args[0]
args = [] if len(args) == 1 else args[1:]
if command in Coms.commands:
Coms.commands[command].execute(args)
elif command == 'exit':
return False
else:
print('unknown command')
return True
| 2.9375 | 3 |
dialogentail/util/math.py | nouhadziri/DialogEntailment | 56 | 12796476 |
def safe_div(a, b):
return (a / b) if b else 0
| 1.859375 | 2 |
server.py | magj3k/swiftpage | 0 | 12796477 | import os
import time
import webbrowser
import threading
from addons.addons_server import *
from page import *
import http.server
import socketserver
last_modified_time = 0
dev_page_prefix = '''
<html>
<title>SwiftPage Development Server</title>
<script src='https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js'></script>
<script>
var lastRefreshToken = "none";
function loadCommands(filename) {
$.get(filename, function(data, textStatus) {
if (textStatus == 'success') {
var lines = data.match(/^.*(('''
dev_page_middle = '\\'+'r'+'\\'+'n'+'|'+'\\'+'n'+'|'+'\\'+'r'
dev_page_suffix = ''')|$)/gm)
for (var i = 0; i < lines.length; i++) {
//console.log(lines[i]); // TODO: remove
if (lines[i] !== '' && lines[i] !== ' ') {
if (lastRefreshToken !== lines[i]) {
lastRefreshToken = lines[i];
var iframe = document.getElementsByName('content_window')[0];
iframe.src = iframe.src
iframe.contentWindow.location.reload(true);
//iframe.location.reload(true);
}
}
}
} else {
console.log('Commands file does not exist.');
}
});
}
function checkForCommands() {
var filename = '.swiftpage_commands';
loadCommands(filename);
}
setInterval(checkForCommands, 30);
</script>
<body style='padding: 0px; margin: 0px;'>
<iframe src='./site/index.html' name='content_window' style='width: 100%; height: 100%; outline: none;' frameborder='0'></iframe>
</body>
</html>
'''
dev_page = dev_page_prefix+dev_page_middle+dev_page_suffix
addons_server = AddonsServer(page)
def main_loop():
while True:
global last_modified_time
# checks to see if files have been updated
modified_time = os.path.getmtime("page.py")
# if necessary, saves new copy of swiftpage
if last_modified_time != modified_time:
last_modified_time = modified_time
os.system('python create_page.py')
print("Page modified, new SwiftPage generated: "+str(last_modified_time))
# refreshes web browser and writes other commands
commands = open(".swiftpage_commands","w")
commands.write(str(last_modified_time))
commands.close()
else:
# empties commands
commands = open(".swiftpage_commands","w")
commands.write("")
commands.close()
time.sleep(0.6)
def addons_loop():
global addons_server
addons_server.on_update()
# removes existing commands file
os.remove(".swiftpage_commands")
# creates dev_server.html
dev_server_page = open("dev_server.html","w")
dev_server_page.write(dev_page)
dev_server_page.close()
# defines custom HTTP handler
class customHandler(http.server.SimpleHTTPRequestHandler):
def log_message(self, format, *args):
return
# starts web server
port = 8080
handler = customHandler # http.server.SimpleHTTPRequestHandler
t1 = threading.Thread(target=main_loop)
t2 = threading.Thread(target=addons_loop)
with socketserver.TCPServer(("", port), handler) as httpd:
# opens web browser of local server
webbrowser.open('http://127.0.0.1:8080/dev_server.html', new=0, autoraise=True)
print("SwiftPage server running, your site will now be automatically regenerated when changes are made")
# starts loops
t1.start()
t2.start()
# serves html server
httpd.serve_forever()
| 2.21875 | 2 |
lm1b/model/model_nodes.py | samiraabnar/lm_1b_fullgraph | 0 | 12796478 | # todo: add dropout to trainer
# todo: add GPU support to trainer
# todo: reset lstm hidden state for inference
# todo: cleanup batch_sizing inconsistencies
import tensorflow as tf
import re
import os
import lm1b.model.char_embedding_nodes as char_embedding_nodes
from lm1b.utils.util import merge
from lm1b.utils.model import sharded_linear, create_sharded_weights
NUM_SHARDS = 8
def _attach_cached_lstm_nodes(input, hparams=None):
"""
LSTM with cached / preserved hidden state
see: https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html
see: https://stackoverflow.com/questions/37969065/tensorflow-best-way-to-save-state-in-rnns
:param input: tensor of word embeddings
:param hparams:
:return: lstm output and state
"""
# LSTM with cached / preserved hidden state
# https://r2rt.com/non-zero-initial-states-for-recurrent-neural-networks.html
cell = tf.contrib.rnn.LSTMCell(num_units=NUM_SHARDS * hparams.word_embedding_size,
num_proj=hparams.word_embedding_size,
num_unit_shards=NUM_SHARDS, num_proj_shards=NUM_SHARDS,
forget_bias=1.0, use_peepholes=True)
state_c = tf.get_variable(name="state_c",
shape=(hparams.batch_size * hparams.sequence_length, 8192),
initializer=tf.zeros_initializer,
trainable=False)
state_h = tf.get_variable(name="state_h",
shape=(hparams.batch_size * hparams.sequence_length, 1024),
initializer=tf.zeros_initializer,
trainable=False)
out_0, state_0 = cell(input, tf.nn.rnn_cell.LSTMStateTuple(state_c, state_h))
ass_c = tf.assign(state_c, state_0[0])
ass_h = tf.assign(state_h, state_0[1])
with tf.control_dependencies([ass_c, ass_h]):
out_0 = tf.identity(out_0)
return out_0, state_0
def _attach_projection_nodes(input, hparams=None):
"""
Project LSTM outputs to sparse vectors / word predictions
:param input: lstm outputs
:param hparams:
:return: tensor shaped [?,vocab_size]
"""
softmax_w = create_sharded_weights((hparams.vocab_size / NUM_SHARDS, hparams.word_embedding_size),
num_shards=NUM_SHARDS,
concat_dim=1)
softmax_w = tf.reshape(softmax_w, shape=(-1, hparams.word_embedding_size))
softmax_b = tf.get_variable('b', shape=(hparams.vocab_size))
logits = tf.nn.bias_add(tf.matmul(input, softmax_w, transpose_b=True), softmax_b, data_format="NHWC")
softmax = tf.nn.softmax(logits)
return logits, softmax
def _attach_log_perplexity_nodes(logits, targets, target_weights, hparams=None):
"""
:param logits:
:param targets:
:param target_weights:
:param hparams:
:return:
"""
target_list = tf.reshape(targets, [-1])
target_weights_list = tf.to_float(tf.reshape(target_weights, [-1]))
# hrmm
word_count = tf.add(tf.reduce_sum(target_weights_list), 0.0000999999974738)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=target_list)
cross_entropy = tf.multiply(cross_entropy, tf.to_float(target_weights))
return {"log_perplexity": tf.reduce_sum(cross_entropy) / word_count,
"cross_entropy": cross_entropy}
CHAR_EMBEDDING_SCOPE = "char_embedding"
LSTM_SCOPE_PREFIX = "lstm/lstm_"
SOFTMAX_SCOPE = "softmax"
def attach_inference_nodes(input_seqs, hparams=None):
"""
Predict next word for each sequence / timestep in input_seqs
:param input_seqs: tensor of character encoded words
:param hparams:
:return: dict of inference nodes
"""
with tf.variable_scope(CHAR_EMBEDDING_SCOPE):
word_embeddings = char_embedding_nodes.attach_char_embedding_nodes(input_seqs, num_shards=NUM_SHARDS,
hparams=hparams)
word_embeddings = tf.reshape(word_embeddings, (-1, hparams.word_embedding_size))
cell_out = word_embeddings
cell_state_all_layers = []
cell_out_all_layers = []
for layer_num in range(0, 2):
with tf.variable_scope(LSTM_SCOPE_PREFIX + str(layer_num)):
cell_out, cell_state = _attach_cached_lstm_nodes(cell_out, hparams=hparams)
cell_state_all_layers.append(cell_state)
cell_out_all_layers.append(cell_out)
lstm_outputs = tf.reshape(cell_out, shape=(-1, hparams.word_embedding_size))
with tf.variable_scope(SOFTMAX_SCOPE):
logits, softmax = _attach_projection_nodes(lstm_outputs, hparams=hparams)
return {
"word_embeddings": word_embeddings,
"lstm_outputs": lstm_outputs,
"lstm_state": cell_state,
"logits": logits,
"softmax": softmax,
"cell_state_all_layers": cell_state_all_layers,
"cell_out_all_layers": cell_out_all_layers
}
def attach_predicted_word_nodes(logits, id_to_word_lookup_table, k=5, hparams=None):
"""
Helper to pull out the most likely words
:param logits:
:param id_to_word_lookup_table:
:param k:
:param hparams:
:return:
"""
top_k = tf.nn.top_k(logits, k)
top_word_ids = top_k.indices
word_predictions = tf.reshape(id_to_word_lookup_table.lookup(tf.to_int64(tf.reshape(top_word_ids, [-1]))), [-1, k])
return {"predicted_words": word_predictions,
"top_k": top_k}
def attach_training_nodes(loss, hparams=None):
"""
Attach nodes for training. Work in progress...
:param loss:
:param hparams:
:return:
"""
trainable_vars = tf.trainable_variables()
tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope="")
tf.global_variables()
all_gradients = tf.gradients(loss, trainable_vars)
lstm_gradients = filter(lambda x: -1 < x.op.name.find("lstm"), all_gradients)
non_lstm_gradients = set(all_gradients).difference(lstm_gradients)
lstm_gradients, global_norm = tf.clip_by_global_norm(lstm_gradients, hparams.lstm_clip_grad_norm)
all_gradients = non_lstm_gradients.union(lstm_gradients)
optimizer = tf.train.AdagradOptimizer(hparams.learning_rate)
global_step = tf.Variable(0, name='global_step', trainable=False)
train_op = optimizer.apply_gradients(zip(all_gradients, trainable_vars), global_step=global_step)
return {"train_op": train_op, "global_step": global_step}
def restore_original_lm1b(sess, run_config):
"""
Var mapping shenanigans to restore the pre-trained model to the current graph
:param sess:
:param run_config:
:return:
"""
def create_lm1b_restoration_var_map(char_embedding_vars, lstm_vars, softmax_vars):
var_map = {}
# Map char embedding vars
var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), char_embedding_vars)))
# Map lstm embedding vars
var_map_regexes = {r"^(" + LSTM_SCOPE_PREFIX + "\d)/lstm_cell/projection/kernel/part_(\d).*": r"\1/W_P_\2",
r"^(" + LSTM_SCOPE_PREFIX + "\d)/lstm_cell/kernel/part_(\d).*": r"\1/W_\2",
r"^(" + LSTM_SCOPE_PREFIX + "\d)/lstm_cell/bias.*": r"\1/B",
r"^(" + LSTM_SCOPE_PREFIX + "\d)/lstm_cell/w_([fio])_diag.*":
lambda match: match.group(1) + "/W_" + match.group(
2).upper() + "_diag",
}
for r_match, r_replace in var_map_regexes.items():
matching_variables = filter(lambda x: re.match(r_match, x.name), lstm_vars)
for v in matching_variables:
var_map[re.sub(r_match, r_replace, v.name)] = v
# Map softmax embedding vars
var_map = merge(var_map, dict(map(lambda x: (x.op.name, x), softmax_vars)))
return var_map
var_map = create_lm1b_restoration_var_map(
char_embedding_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=CHAR_EMBEDDING_SCOPE),
lstm_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=LSTM_SCOPE_PREFIX),
softmax_vars=tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=SOFTMAX_SCOPE)
)
saver = tf.train.Saver(var_list=var_map)
saver.restore(sess, os.path.join(run_config['model_dir_path_original'], "ckpt-*"))
| 2.296875 | 2 |
test/test.py | simulency/Robot_Executor_Demo | 0 | 12796479 | <reponame>simulency/Robot_Executor_Demo
# -*- coding: utf-8 -*-
from requests import post
r = post(url='http://127.0.0.1:5000/testsuit',data={'userId':'Robert'})
print(r.status_code)
print(r.json()) | 2.109375 | 2 |
frame/snake.py | Rosikobu/snake-reloaded | 0 | 12796480 | import pygame, sys
import time
from pygame.math import Vector2
from .config import FPS, xSize, ySize, cell_size, cell_number, CUTTING
from .eatable.saw import Saw
from .eatable.cake import Cake
class Snake(object):
is_moving = False
def __init__(self, screen: pygame.Surface) -> None:
self.load_snake_texture()
self.body = [Vector2(5,10),Vector2(4,10),Vector2(3,10)]
self.pyScreen = screen
self.direction = Vector2(1,0)
self.new_block = False
self.slowed = False
def draw_snake_object(self) -> None:
for index, block in enumerate(self.body):
# rect for positioning
x_pos = int(block.x * cell_size)
y_pos = int(block.y * cell_size)
block_rect = pygame.Rect(x_pos, y_pos, cell_size, cell_size)
# what direction is tha face
if index == 0:
self.pyScreen.blit(self.head,block_rect)
elif index == len(self.body) - 1:
self.pyScreen.blit(self.tail,block_rect)
else:
previous_block = self.body[index + 1] - block
next_block = self.body[index - 1] - block
if previous_block.x == next_block.x:
self.pyScreen.blit(self.body_vertical, block_rect)
elif previous_block.y == next_block.y:
self.pyScreen.blit(self.body_horizontal, block_rect)
else:
if previous_block.x == -1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == -1:
self.pyScreen.blit(self.body_tl, block_rect)
elif previous_block.x == -1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == -1:
self.pyScreen.blit(self.body_bl, block_rect)
elif previous_block.x == 1 and next_block.y == -1 or previous_block.y == -1 and next_block.x == 1:
self.pyScreen.blit(self.body_tr, block_rect)
elif previous_block.x == 1 and next_block.y == 1 or previous_block.y == 1 and next_block.x == 1:
self.pyScreen.blit(self.body_br, block_rect)
def draw_snake(self) -> None:
# Update Snake-Model
self.update_head_graphics()
self.update_tail_graphics()
self.draw_snake_object()
def update_tail_graphics(self) -> pygame.Surface:
tail_relation = self.body[-2] - self.body[-1]
if tail_relation == Vector2(-1,0): self.tail = self.tail_left
elif tail_relation == Vector2(1,0): self.tail = self.tail_right
elif tail_relation == Vector2(0,-1): self.tail = self.tail_up
elif tail_relation == Vector2(0,1): self.tail = self.tail_down
def update_head_graphics(self) -> pygame.Surface:
head_relation = self.body[1] - self.body[0]
if head_relation == Vector2(-1,0): self.head = self.head_left
elif head_relation == Vector2(1,0): self.head = self.head_right
elif head_relation == Vector2(0,-1): self.head = self.head_up
elif head_relation == Vector2(0,1): self.head = self.head_down
def move_snake(self) -> None:
if Saw.get_cutted() == False or len(self.body) < (abs(CUTTING)+1):
if self.new_block == True:
body_copy = self.body[:]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
if Cake.eated_the_cake():
if Cake.get_cake_countdown() != 0:
Cake.decrase_cake_countdown()
else:
Cake.remove_cake()
self.new_block = False
else:
self.new_block = False
else:
body_copy = self.body[:-1]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
else:
self.new_block = False
body_copy = self.body[:CUTTING]
body_copy.insert(0, body_copy[0] + self.direction)
self.body = body_copy[:]
Saw.cutting_done()
Snake.is_moving = False
def set_direction(self, vec) -> pygame.Surface:
#Snake.is_moving = True
self.direction = vec
def add_block(self) -> None:
self.new_block = True
def load_snake_texture(self) -> pygame.Surface:
# Kopf
self.head_up = pygame.image.load('assets/Schlange/Schlange_Kopf_oben.png')
self.head_right = pygame.image.load('assets/Schlange/Schlange_Kopf_rechts.png')
self.head_left = pygame.image.load('assets/Schlange/Schlange_Kopf_links.png')
self.head_down = pygame.image.load('assets/Schlange/Schlange_Kopf_unten.png')
# Schwanz
self.tail_up = pygame.image.load('assets/Schlange/Schlange_Schwanz_oben.png')
self.tail_down = pygame.image.load('assets/Schlange/Schlange_Schwanz_unten.png')
self.tail_right = pygame.image.load('assets/Schlange/Schlange_Schwanz_rechts.png')
self.tail_left = pygame.image.load('assets/Schlange/Schlange_Schwanz_links.png')
# Körper
self.body_vertical = pygame.image.load('assets/Schlange/Schlange_vertikal.png')
self.body_horizontal = pygame.image.load('assets/Schlange/Schlange_horizontal.png')
# Directions
self.body_tr = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_oben.png')
self.body_tl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_oben.png')
self.body_br = pygame.image.load('assets/Schlange/Schlange_Ecke_rechts_unten.png')
self.body_bl = pygame.image.load('assets/Schlange/Schlange_Ecke_links_unten.png') | 2.96875 | 3 |
menu/management/commands/send_notification.py | ben174/lunch-bot | 0 | 12796481 | <filename>menu/management/commands/send_notification.py
from django.core.management.base import BaseCommand
from util.mailer import send_menu_email
class Command(BaseCommand):
help = 'Sends today\'s menu to lunch-bot mailing list. If this menu has already been notified, it will' \
'skip sending.'
def handle(self, *args, **options):
send_menu_email()
| 2.109375 | 2 |
Vitis-AI-Profiler/xatAnalyzer/parser/cuEdge.py | dendisuhubdy/Vitis-AI | 1 | 12796482 |
# Copyright 2019 Xilinx Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import parser.parserBase
import parser.ftraceUtil
CUs = []
class cuEdgeParser(parser.parserBase.Parser):
def __init__(self):
super().__init__('cuEdge')
def parse(self, data, options):
#cuMap = options['cuMap']
cuEvents = {}
cuRetData = {}
for l in data:
event = parser.ftraceUtil.parse(l, options)
idx = event.infoDetail['cu_idx']
coreID = "TIMELINE-CU_%s" % hex(idx)
if coreID not in cuEvents.keys():
cuEvents[coreID] = []
cuEvents[coreID].append(event)
"""return {"CU-dpu_1": [[st, et], ...]}"""
for core in cuEvents.keys():
totalT = cuEvents[core][-1].timeStamp - cuEvents[core][0].timeStamp
if totalT == 0:
continue
cuRetData[core] = []
runT = 0
start = 0
for e in cuEvents[core]:
eventType = e.func
if eventType == 'cu_start':
start = e.timeStamp
elif eventType == 'cu_done':
if start == 0:
continue
# prefix, i.pid, i.startTime, i.endTime, color
cuRetData[core].append(["thread", 88, start, e.timeStamp, "#ee0000"])
runT += (e.timeStamp - start)
else:
continue
print("##### Util of %s: %.2f" % (core, runT * 100 / totalT))
return cuRetData
parser.parserBase.register(cuEdgeParser())
| 2.046875 | 2 |
send_alerts.py | zestedesavoir/zds-antispam | 2 | 12796483 | from bs4 import BeautifulSoup
import requests
def send_alerts(website, bot_username, bot_password, suspected_usernames):
login_page_url = "/membres/connexion/"
login_form_url = "/membres/connexion/?next=/"
login_form_data = {
"username": bot_username,
"password": <PASSWORD>,
}
profile_page_url = "/@"
profile_form_id = "report-profile"
profile_form_data = {
"reason": "Spam potentiel"
}
s = requests.session()
# Bot login
login_page_req = s.get(website+login_page_url)
login_page_req.raise_for_status()
referer = login_page_req.url
login_page_soup = BeautifulSoup(login_page_req.content, "lxml")
login_form = login_page_soup.find("form", action=login_form_url)
for el in login_form.find_all(name=True):
if "name" in el.attrs:
if not el.attrs.get("name") in login_form_data:
login_form_data[el.attrs.get("name")] = el.attrs.get("value")
s.headers.update({"referer": referer})
login_auth_req = s.post(website+login_form_url, data=login_form_data)
login_auth_req.raise_for_status()
referer = login_auth_req.url
# Sending alerts
for username in suspected_usernames:
s.headers.update({"referer": referer})
profile_page_req = s.get(website+profile_page_url+username)
profile_page_req.raise_for_status()
referer = profile_page_req.url
profile_page_soup = BeautifulSoup(profile_page_req.content, "lxml")
profile_form = profile_page_soup.find("form", id=profile_form_id)
profile_form_url = profile_form.attrs.get("action")
for el in profile_form.find_all(name=True):
if "name" in el.attrs:
if not el.attrs.get("name") in profile_form_data:
profile_form_data[el.attrs.get("name")] = el.attrs.get("value")
s.headers.update({"referer": referer})
profile_alert_req = s.post(website+profile_form_url, data=profile_form_data)
profile_alert_req.raise_for_status()
referer = profile_alert_req.url
| 2.6875 | 3 |
SqrMelon/audioLibs/qtwav.py | trevorvanhoof/sqrmelon | 93 | 12796484 | from qtutil import *
from audioLibs.base import Song
from PyQt4.QtMultimedia import *
import struct
class QtWavSong(Song):
def __init__(self, path):
super(QtWavSong, self).__init__(path)
# parse the file header
# http://soundfile.sapp.org/doc/WaveFormat/
with open(path, 'rb') as fh:
assert fh.read(4) == 'RIFF', 'This is not a wave file'
fh.read(4) # file size in bytes, ignore
assert fh.read(4) == 'WAVE', 'This is not a wave file'
assert fh.read(4) == 'fmt ', 'This is not a wave file'
assert struct.unpack('<i', fh.read(4))[0] == 16, 'This is not a PCM wave file, not supported'
assert struct.unpack('<h', fh.read(2))[0] == 1, 'This is not a PCM wave file, not supported'
numChannels = struct.unpack('<h', fh.read(2))[0]
sampleRate = struct.unpack('<i', fh.read(4))[0]
fh.read(4) # byteRate
fh.read(2) # blockAlign
bitsPerSample = struct.unpack('<h', fh.read(2))[0]
assert bitsPerSample in (8, 16)
assert fh.read(4) == 'data', 'Additional bytes found in PCM wave file header.'
fh.read(4) # sample data size
self.waveDataOffset = fh.tell() # sample data start
# store info for seeking
self.chunkSize = numChannels * bitsPerSample / 8
self.sampleRate = sampleRate
# convert to format
format = QAudioFormat()
format.setSampleRate(sampleRate)
format.setChannels(numChannels)
format.setSampleSize(bitsPerSample)
format.setCodec("audio/pcm")
format.setByteOrder(QAudioFormat.LittleEndian)
# According to the wave format spec the bitsPerSample determins if data is UInt8 or Int16
format.setSampleType({8: QAudioFormat.UnSignedInt, 16: QAudioFormat.SignedInt}[bitsPerSample])
# ensure we can play this data
device = QAudioDeviceInfo.defaultOutputDevice()
assert device.isFormatSupported(format)
self.output = QAudioOutput(format, None)
self.audioFile = QFile(path)
self.audioFile.open(QIODevice.ReadOnly)
self.output.start(self.audioFile)
def seekAndPlay(self, seconds):
self.audioFile.seek(int(seconds * self.sampleRate) * self.chunkSize + self.waveDataOffset)
self.output.start(self.audioFile)
def stop(self):
self.output.stop()
| 2.828125 | 3 |
prev_ob_models/McTavish2012/analysis.py | fameshpatel/olfactorybulb | 5 | 12796485 | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 6 18:22:04 2011
@author: -
"""
import os
import numpy
from matplotlib import pyplot
from neuronpy.graphics import spikeplot
from bulbspikes import *
from neuronpy.util import spiketrain
from params import sim_var
homedir = os.path.join(os.path.relpath('..'))
analysis_path = homedir
def format_axes(ax, dt=1, ylim=(0.,4.)):
#ax.set_xticks(numpy.arange(0,num_intervals,(num_intervals-1)/4.))
#ax.set_xticklabels(['$-\pi$','$-\pi/2$','$0$','$\pi/2$','$\pi$'], fontsize=18)
xlim = ax.get_xlim()
timesteps=int((xlim[1]*dt-xlim[0]*dt)/2.)
ax.set_xticks(numpy.linspace(xlim[0],xlim[1],5))
ax.set_xticklabels(numpy.asarray(numpy.linspace(-timesteps,timesteps,5), dtype=int))
ax.set_xlabel('lag (ms)')
ax.set_ylim(ylim)
ax.set_ylabel('Synchronization magnitude')
def draw_cell(cellid, ax, color='black'):
xloc = 10+cellid*20
# Lateral dends
y = numpy.abs(numpy.subtract(range(101), xloc))
yvec = numpy.log(numpy.add(y,1))
ax.plot(range(101), yvec, color=color)
# Soma
ax.fill_between(range(101), numpy.ones(101), yvec, \
where=numpy.ma.masked_where(yvec < 1., yvec).mask, \
color=color, linewidth=0.)
# Glom
ax.plot([xloc], [9], color=color, marker='o', markersize=10, markerfacecolor='white', markeredgecolor=color)
ax.plot([xloc], [9], color=color, marker='o', markersize=9, alpha=0.25)
ax.plot([xloc], [9], color=color, marker='1', markersize=7, markeredgewidth=2)
# Primary dendrite
ax.plot([xloc, xloc], [0,8], color=color, linewidth=2)
format_schematic_axis(ax)
def draw_weights(cellids, ax, color='black',scale=1.):
"""Draw granule cells"""
import synweightsnapshot
sws = synweightsnapshot.SynWeightSnapshot( \
nummit=sim_var['num_mitral'], \
numgran=sim_var['num_granule'])
raw=sws.read_file(sim_var['wt_input_file'],
os.path.join(homedir, sim_var['weight_dir']))
sws.parse_data(raw)
for cellid in cellids:
wts = sws.m2g[cellid,:,0]
wts = wts/numpy.max(wts)
for i in range(len(wts)):
if wts[i] > 0.0001:
cellloc = 10+cellid*20
y = numpy.abs(i - cellloc)
yloc = numpy.log(numpy.add(y,1))
gloc = -3.5+((i%2)*1.5)
ax.plot([i],[yloc], marker='o', markerfacecolor=color, markersize=4.*scale, markeredgecolor=color)
ax.plot([i,i],[yloc, gloc], color=color)
ax.plot([i],[gloc], marker='^', markerfacecolor=color, markersize=6.*scale, markeredgecolor=color)
format_schematic_axis(ax)
def format_schematic_axis(ax):
ax.set_xlim((0,100))
xticks = [10,30,50,70,90]
ax.set_xticks(xticks)
ax.set_xticklabels(numpy.multiply(xticks,10))
ax.set_xlabel('distance in microns')
ax.set_ylim((-5,11))
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
ax.set_yticks([])
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('black')
ax.xaxis.set_ticks_position('bottom')
def read_weightevents():
M = numpy.loadtxt(os.path.join(analysis_path, 'stimweightevents.txt'))
data = []
for i in range(5):
data.append([])
for m in M:
data[int(m[0])].append(m[1])
return data
def read_delayevents():
M = numpy.loadtxt(os.path.join(analysis_path, 'stimdelayevents.txt'))
data = []
for i in range(5):
data.append([])
for m in M:
data[int(m[0])].append(m[1])
return data
def raster(pair=[0,4], cluster_width=5, fi=.005, xlim=(1000,2000)):
# pos1 = (10+pair[0]*20, cluster_width, 1, pair)
# pos2 = (10+pair[1]*20, cluster_width, 1, pair)
# stim_odor_mags = numpy.ones(5)*.55
fig = pyplot.figure(figsize=(9.5,5.7))
raster_ax = fig.add_axes([.1,.1,.8,.27])
schematic_ax = fig.add_axes([.1,.85,.8,.1])
syn_ax = fig.add_axes([.1,.45,.8,.225])
draw_cell(pair[0], schematic_ax, color='red')
draw_cell(pair[1], schematic_ax, color='blue')
draw_weights(pair, schematic_ax, color='black')
# Analyze an output file in some_dir
bulb_spikes = BulbSpikes(sim_time=sim_var['tstop'])
bulb_spikes.read_file(os.path.join(homedir,'spikeout.spk'))
breath_events = numpy.loadtxt(os.path.join(homedir, 'breathevents.txt'))
wts = read_weightevents()
delays = read_delayevents()
dt = 1
tstop = xlim[1]
x = numpy.arange(0,tstop,dt)
y0 = numpy.zeros(tstop/dt)
y1 = numpy.zeros(tstop/dt)
EXP = numpy.exp(numpy.multiply(x,-1./200.))-numpy.exp( \
numpy.multiply(x,-1./20.))
idx = 0
for b in breath_events:
if b >= tstop:
break
else:
dtidx = int((b+delays[pair[0]][idx])/dt)
y0[dtidx:] += EXP[:-dtidx]*wts[pair[0]][idx]
dtidx = int((b+delays[pair[1]][idx])/dt)
y1[dtidx:] += EXP[:-dtidx]*wts[pair[1]][idx]
idx += 1
redplt = syn_ax.plot(x,y0, color='red')
blueplt = syn_ax.plot(x,y1, color='blue')
for breath in breath_events:
breathplt = syn_ax.plot([breath, breath], [0,2], linestyle='--', \
color='gray', linewidth=2)
syn_ax.set_xlim(xlim)
syn_ax.set_ylim(0,1.6)
syn_ax.set_yticks([])
syn_ax.set_xticks([])
syn_ax.set_ylabel('EPSC onto tuft')
leg = syn_ax.legend([breathplt, redplt, blueplt], \
['sniff event', 'input onto red', 'input onto blue'], \
bbox_to_anchor=(0, 1.15, 1., .102), loc=1, ncol=3, mode="expand", \
borderaxespad=0., handletextpad=.2)
# Mark sniff interval
for i in range(len(breath_events)):
if breath_events[i] > xlim[0]:
span = syn_ax.annotate('', xy=(breath_events[i], .28), xycoords='data',
xytext=(breath_events[i+1], .28), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text((breath_events[i]+breath_events[i+1])/2., .53, \
'sniff every\n150 - 250 ms', \
horizontalalignment='center', verticalalignment='top', \
backgroundcolor='white')
break
# Mark amplitude interval
span = syn_ax.annotate('', xy=(1190, 1.28), xycoords='data',
xytext=(1190, 1.12), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text(1215, 1.21, \
'+/- 5%', \
horizontalalignment='left', verticalalignment='center')
# Mark delay interval
for i in range(len(breath_events)):
if breath_events[i] > 1400:
span = syn_ax.annotate('', xy=(breath_events[i]-2, .5), xycoords='data',
xytext=(breath_events[i]+17, .5), \
textcoords='data', \
arrowprops=dict(arrowstyle="|-|", linewidth=2)
)
syn_ax.text(breath_events[i]+7.5, .28, \
'delay 0-15 ms', \
horizontalalignment='center', verticalalignment='top', \
backgroundcolor='white')
break
spikes = bulb_spikes.get_mitral_spikes()
ref=spikes[pair[0]]
comp=spikes[pair[1]]
gcspikes = bulb_spikes.get_granule_spikes()
mididx = 10+pair[0]*20
gcleft = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]
mididx = 10+pair[1]*20
gcright = gcspikes[mididx-int(cluster_width/2.):mididx+int(cluster_width/2.)+1]
sp = spikeplot.SpikePlot(fig=fig, savefig=False)
sp.set_markercolor('blue')
sp.set_markeredgewidth(2.)
sp.set_markerscale(4)
sp.plot_spikes([comp], label='comp', cell_offset=cluster_width*2+5, \
draw=False )
sp.set_markercolor('red')
sp.plot_spikes([ref], label='ref', cell_offset=cluster_width*2+2, \
draw=False)
sp.set_markerscale(1.3)
sp.set_markeredgewidth(1.5)
sp.set_markercolor('blue')
sp.plot_spikes(gcright, label='gcright', cell_offset=cluster_width, \
draw=False)
sp.set_markercolor('red')
sp.plot_spikes(gcleft, label='gcleft', cell_offset=0, \
draw=False)
coincidences, mask_a, mask_b, ratio = \
spiketrain.get_sync_traits(ref, comp, window=5)
# idx = 0
# for i in mask_a:
# if i == 1:
# raster_ax.plot([ref[idx]],[cluster_width*2+1.9], marker='o', color='red')
# idx += 1
idx = 0
for i in mask_b:
if i == 1:
if comp[idx] >= xlim[0] and comp[idx] < xlim[1]:
raster_ax.text(comp[idx],cluster_width*2+8.5, '*', \
color='purple', fontweight='bold', \
horizontalalignment='center', verticalalignment='center')
#raster_ax.plot([comp[idx]],[cluster_width*2+7], marker='o', color='blue')
idx += 1
raster_ax.text(2000,cluster_width*2+8.5, '(synchronized)', color='purple', \
horizontalalignment='center', verticalalignment='center',
fontsize=11)
raster_ax.set_yticks([])
ylim = (0.5, cluster_width*2+7.5)
for breath in breath_events:
raster_ax.plot([breath, breath], [ylim[0], ylim[1]], linestyle='--', color='gray', linewidth=2)
sp.update_xlim(xlim)
raster_ax.set_ylim(ylim)
raster_ax.set_xlabel('time (ms)')
raster_ax.set_ylabel('spike output\n granule mitral\n\n', horizontalalignment='center')
pos = schematic_ax.get_position()
schematic_ax.text(.025, pos.ymax+.02, 'A)', transform=fig.transFigure,
verticalalignment='baseline')
pos = syn_ax.get_position()
syn_ax.text(.025, pos.ymax+.07, 'B)', transform=fig.transFigure,
verticalalignment='baseline')
pos = raster_ax.get_position()
raster_ax.text(.025, pos.ymax+.02, 'C)', transform=fig.transFigure,
verticalalignment='baseline')
# fig.savefig(os.path.join(analysis_path, 'raster_w%d_(%d-%d)_%.3f.pdf') %(cluster_width, pair[0], pair[1], fi))
fig.savefig(os.path.join(analysis_path, 'fig1.pdf'))
raster()
| 2.28125 | 2 |
d3status/tasks/email_tasks.py | nicozhang/startUp | 124 | 12796486 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012 feilong.me. All rights reserved.
#
# @author: <NAME> <<EMAIL>>
# Created on Jun 30, 2012
#
from celery.task import task
from d3status.mail import send_email
@task
def send_email_task(fr, to, subject, body, html=None, attachments=[]):
send_email(fr, to, subject, body, html, attachments)
| 1.84375 | 2 |
api/tacticalrmm/agents/tasks.py | meyerje/tacticalrmm | 0 | 12796487 | import os
import subprocess
from loguru import logger
from time import sleep
import random
import requests
from packaging import version as pyver
from django.conf import settings
from tacticalrmm.celery import app
from agents.models import Agent, AgentOutage
from core.models import CoreSettings
logger.configure(**settings.LOG_CONFIG)
@app.task
def send_agent_update_task(pks, version):
assert isinstance(pks, list)
q = Agent.objects.filter(pk__in=pks)
agents = [i.pk for i in q if pyver.parse(i.version) < pyver.parse(version)]
chunks = (agents[i : i + 30] for i in range(0, len(agents), 30))
for chunk in chunks:
for pk in chunk:
agent = Agent.objects.get(pk=pk)
if agent.operating_system is not None:
if "64bit" in agent.operating_system:
arch = "64"
elif "32bit" in agent.operating_system:
arch = "32"
else:
arch = "64"
url = settings.DL_64 if arch == "64" else settings.DL_32
inno = (
f"winagent-v{version}.exe"
if arch == "64"
else f"winagent-v{version}-x86.exe"
)
r = agent.salt_api_async(
func="win_agent.do_agent_update_v2",
kwargs={
"inno": inno,
"url": url,
},
)
sleep(10)
@app.task
def auto_self_agent_update_task():
core = CoreSettings.objects.first()
if not core.agent_auto_update:
return
q = Agent.objects.all()
agents = [
i.pk
for i in q
if pyver.parse(i.version) < pyver.parse(settings.LATEST_AGENT_VER)
]
chunks = (agents[i : i + 30] for i in range(0, len(agents), 30))
for chunk in chunks:
for pk in chunk:
agent = Agent.objects.get(pk=pk)
if agent.operating_system is not None:
if "64bit" in agent.operating_system:
arch = "64"
elif "32bit" in agent.operating_system:
arch = "32"
else:
arch = "64"
url = settings.DL_64 if arch == "64" else settings.DL_32
inno = (
f"winagent-v{settings.LATEST_AGENT_VER}.exe"
if arch == "64"
else f"winagent-v{settings.LATEST_AGENT_VER}-x86.exe"
)
r = agent.salt_api_async(
func="win_agent.do_agent_update_v2",
kwargs={
"inno": inno,
"url": url,
},
)
sleep(10)
@app.task
def update_salt_minion_task():
q = Agent.objects.all()
agents = [
i.pk
for i in q
if pyver.parse(i.version) >= pyver.parse("0.11.0")
and pyver.parse(i.salt_ver) < pyver.parse(settings.LATEST_SALT_VER)
]
chunks = (agents[i : i + 50] for i in range(0, len(agents), 50))
for chunk in chunks:
for pk in chunk:
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_async(func="win_agent.update_salt")
sleep(20)
@app.task
def get_wmi_detail_task(pk):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=30, func="win_agent.system_info")
if r == "timeout" or r == "error":
return "failed"
agent.wmi_detail = r
agent.save(update_fields=["wmi_detail"])
return "ok"
@app.task
def sync_salt_modules_task(pk):
agent = Agent.objects.get(pk=pk)
r = agent.salt_api_cmd(timeout=35, func="saltutil.sync_modules")
# successful sync if new/charnged files: {'return': [{'MINION-15': ['modules.get_eventlog', 'modules.win_agent', 'etc...']}]}
# successful sync with no new/changed files: {'return': [{'MINION-15': []}]}
if r == "timeout" or r == "error":
logger.error(f"Unable to sync modules {agent.salt_id}")
return
logger.info(f"Successfully synced salt modules on {agent.hostname}")
return "ok"
@app.task
def batch_sync_modules_task():
# sync modules, split into chunks of 50 agents to not overload salt
agents = Agent.objects.all()
online = [i.salt_id for i in agents if i.status == "online"]
chunks = (online[i : i + 50] for i in range(0, len(online), 50))
for chunk in chunks:
Agent.salt_batch_async(minions=chunk, func="saltutil.sync_modules")
sleep(10)
@app.task
def batch_sysinfo_task():
# update system info using WMI
agents = Agent.objects.all()
online = [
i.salt_id
for i in agents
if not i.not_supported("0.11.0") and i.status == "online"
]
chunks = (online[i : i + 30] for i in range(0, len(online), 30))
for chunk in chunks:
Agent.salt_batch_async(minions=chunk, func="win_agent.local_sys_info")
sleep(10)
@app.task
def uninstall_agent_task(salt_id):
attempts = 0
error = False
while 1:
try:
r = requests.post(
f"http://{settings.SALT_HOST}:8123/run",
json=[
{
"client": "local",
"tgt": salt_id,
"fun": "win_agent.uninstall_agent",
"timeout": 8,
"username": settings.SALT_USERNAME,
"password": settings.SALT_PASSWORD,
"eauth": "<PASSWORD>",
}
],
timeout=10,
)
ret = r.json()["return"][0][salt_id]
except Exception:
attempts += 1
else:
if ret != "ok":
attempts += 1
else:
attempts = 0
if attempts >= 10:
error = True
break
elif attempts == 0:
break
if error:
logger.error(f"{salt_id} uninstall failed")
else:
logger.info(f"{salt_id} was successfully uninstalled")
try:
r = requests.post(
f"http://{settings.SALT_HOST}:8123/run",
json=[
{
"client": "wheel",
"fun": "key.delete",
"match": salt_id,
"username": settings.SALT_USERNAME,
"password": settings.SALT_PASSWORD,
"eauth": "<PASSWORD>",
}
],
timeout=30,
)
except Exception:
logger.error(f"{salt_id} unable to remove salt-key")
return "ok"
@app.task
def agent_outage_email_task(pk):
sleep(random.randint(1, 15))
outage = AgentOutage.objects.get(pk=pk)
outage.send_outage_email()
outage.outage_email_sent = True
outage.save(update_fields=["outage_email_sent"])
@app.task
def agent_recovery_email_task(pk):
sleep(random.randint(1, 15))
outage = AgentOutage.objects.get(pk=pk)
outage.send_recovery_email()
outage.recovery_email_sent = True
outage.save(update_fields=["recovery_email_sent"])
@app.task
def agent_outages_task():
agents = Agent.objects.only("pk")
for agent in agents:
if agent.status == "overdue":
outages = AgentOutage.objects.filter(agent=agent)
if outages and outages.last().is_active:
continue
outage = AgentOutage(agent=agent)
outage.save()
if agent.overdue_email_alert:
agent_outage_email_task.delay(pk=outage.pk)
if agent.overdue_text_alert:
# TODO
pass
| 2.078125 | 2 |
nitro/resource/stat/snmp/snmp_stats.py | HanseMerkur/nitro-python | 2 | 12796488 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nitro.resource.base.base_resource import base_resource
from nitro.resource.base.base_resource import base_response
from nitro.service.options import options
from nitro.exception.nitro_exception import nitro_exception
from nitro.util.nitro_util import nitro_util
class snmp_stats(base_resource) :
""" """
def __init__(self) :
self._clearstats = ""
self._snmptotrxpkts = 0
self._snmprxpktsrate = 0
self._snmptottxpkts = 0
self._snmptxpktsrate = 0
self._snmptotgetreqs = 0
self._snmpgetreqsrate = 0
self._snmptotgetnextreqs = 0
self._snmpgetnextreqsrate = 0
self._snmptotgetbulkreqs = 0
self._snmpgetbulkreqsrate = 0
self._snmptotresponses = 0
self._snmpresponsesrate = 0
self._snmptottraps = 0
self._snmptoterrreqdropped = 0
self._snmptotparseerrs = 0
self._snmptotbadversions = 0
self._snmptotbadcommname = 0
self._snmptotbadcommuse = 0
self._snmpunsupportedsecuritylevel = 0
self._snmpnotintimewindow = 0
self._snmpunknownusername = 0
self._snmpunknownengineids = 0
self._snmpwrongdigests = 0
self._snmpdecryptionerrors = 0
@property
def clearstats(self) :
"""Clear the statsistics / counters.<br/>Possible values = basic, full."""
try :
return self._clearstats
except Exception as e:
raise e
@clearstats.setter
def clearstats(self, clearstats) :
"""Clear the statsistics / counters
:param clearstats:
"""
try :
self._clearstats = clearstats
except Exception as e:
raise e
@property
def snmpdecryptionerrors(self) :
"""SNMP packets that were dropped because they could not be decrypted."""
try :
return self._snmpdecryptionerrors
except Exception as e:
raise e
@property
def snmptotresponses(self) :
"""SNMP Get-Response PDUs that have been generated by the NetScaler."""
try :
return self._snmptotresponses
except Exception as e:
raise e
@property
def snmptotbadcommuse(self) :
"""The total number of SNMP Messages received that represented an SNMP operation which was not allowed by the SNMP community named in the Message."""
try :
return self._snmptotbadcommuse
except Exception as e:
raise e
@property
def snmptoterrreqdropped(self) :
"""SNMP requests dropped."""
try :
return self._snmptoterrreqdropped
except Exception as e:
raise e
@property
def snmpgetnextreqsrate(self) :
"""Rate (/s) counter for snmptotgetnextreqs."""
try :
return self._snmpgetnextreqsrate
except Exception as e:
raise e
@property
def snmptotrxpkts(self) :
"""SNMP packets received."""
try :
return self._snmptotrxpkts
except Exception as e:
raise e
@property
def snmptottxpkts(self) :
"""SNMP packets transmitted."""
try :
return self._snmptottxpkts
except Exception as e:
raise e
@property
def snmptotparseerrs(self) :
"""Number of ASN.1 or BER errors encountered when decoding received SNMP Messages."""
try :
return self._snmptotparseerrs
except Exception as e:
raise e
@property
def snmptottraps(self) :
"""SNMP Trap PDUs that have been generated by the NetScaler."""
try :
return self._snmptottraps
except Exception as e:
raise e
@property
def snmptotbadversions(self) :
"""Number of SNMP messages received, which were for an unsupported SNMP version."""
try :
return self._snmptotbadversions
except Exception as e:
raise e
@property
def snmptxpktsrate(self) :
"""Rate (/s) counter for snmptottxpkts."""
try :
return self._snmptxpktsrate
except Exception as e:
raise e
@property
def snmpresponsesrate(self) :
"""Rate (/s) counter for snmptotresponses."""
try :
return self._snmpresponsesrate
except Exception as e:
raise e
@property
def snmpgetreqsrate(self) :
"""Rate (/s) counter for snmptotgetreqs."""
try :
return self._snmpgetreqsrate
except Exception as e:
raise e
@property
def snmptotbadcommname(self) :
"""SNMP messages received, which used an SNMP community name not known to the NetScaler."""
try :
return self._snmptotbadcommname
except Exception as e:
raise e
@property
def snmptotgetnextreqs(self) :
"""SNMP Get-Next PDUs that have been accepted and processed."""
try :
return self._snmptotgetnextreqs
except Exception as e:
raise e
@property
def snmpunknownengineids(self) :
"""SNMP packets that were dropped because they referenced an SNMP engine ID that was not known to the NetScaler."""
try :
return self._snmpunknownengineids
except Exception as e:
raise e
@property
def snmpwrongdigests(self) :
"""SNMP packets that were dropped because they did not contain the expected digest value."""
try :
return self._snmpwrongdigests
except Exception as e:
raise e
@property
def snmpgetbulkreqsrate(self) :
"""Rate (/s) counter for snmptotgetbulkreqs."""
try :
return self._snmpgetbulkreqsrate
except Exception as e:
raise e
@property
def snmpnotintimewindow(self) :
"""SNMP packets that were dropped because they appeared outside of the authoritative SNMP engine's window."""
try :
return self._snmpnotintimewindow
except Exception as e:
raise e
@property
def snmptotgetbulkreqs(self) :
"""SNMP Get-Bulk PDUs that have been accepted and proZcessed."""
try :
return self._snmptotgetbulkreqs
except Exception as e:
raise e
@property
def snmpunknownusername(self) :
"""SNMP packets that were dropped because they referenced a user that was not known to the SNMP engine."""
try :
return self._snmpunknownusername
except Exception as e:
raise e
@property
def snmpunsupportedsecuritylevel(self) :
"""SNMP packets that were dropped because they requested a security level that was
unknown to the NetScaler or otherwise unavailable.
"""
try :
return self._snmpunsupportedsecuritylevel
except Exception as e:
raise e
@property
def snmptotgetreqs(self) :
"""SNMP Get-Request PDUs that have been accepted and processed."""
try :
return self._snmptotgetreqs
except Exception as e:
raise e
@property
def snmprxpktsrate(self) :
"""Rate (/s) counter for snmptotrxpkts."""
try :
return self._snmprxpktsrate
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
"""converts nitro response into object and returns the object array in case of get request.
:param service:
:param response:
"""
try :
result = service.payload_formatter.string_to_resource(snmp_response, response, self.__class__.__name__.replace('_stats',''))
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmp
except Exception as e :
raise e
def _get_object_name(self) :
"""Returns the value of object identifier argument"""
try :
return 0
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
"""Use this API to fetch the statistics of all snmp_stats resources that are configured on netscaler.
:param service:
:param name: (Default value = "")
:param option_: (Default value = "")
"""
try :
obj = snmp_stats()
if not name :
response = obj.stat_resources(service, option_)
return response
except Exception as e:
raise e
class Clearstats:
""" """
basic = "basic"
full = "full"
class snmp_response(base_response) :
""" """
def __init__(self, length=1) :
self.snmp = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmp = [snmp_stats() for _ in range(length)]
| 1.71875 | 2 |
doctable/parallel2/workerresource.py | devincornell/sqlitedocuments | 1 | 12796489 | import collections
import dataclasses
import gc
import multiprocessing
import os
from multiprocessing import Lock, Pipe, Pool, Process, Value
from typing import Any, Callable, Dict, Iterable, List, NewType, Tuple, Union
from .exceptions import (UserFuncRaisedException, WorkerDiedError,
WorkerIsAliveError, WorkerIsDeadError,
WorkerResourceReceivedUnidentifiedMessage)
from .messaging import (BaseMessage, DataPayload, SigClose, StatusRequest,
UserFunc, UserFuncException, WorkerError, WorkerStatus)
from .workerprocess import WorkerProcess
class WorkerResource:
'''Manages a worker process and pipe to it.'''
__slots__ = ['pipe', 'proc', 'verbose']
def __init__(self, target: Callable = None, start: bool = False, args=None, kwargs=None, logging: bool = True, verbose: bool = False, method: str = 'forkserver'):
'''Open Process and pipe to it.
'''
self.verbose = verbose
# set up userfunc
if target is not None:
args = args if args is not None else tuple()
kwargs = kwargs if kwargs is not None else dict()
userfunc = UserFunc(target, *args, **kwargs)
else:
userfunc = None
ctx = multiprocessing.get_context(method)
self.pipe, worker_pipe = Pipe(duplex=True)
self.proc = ctx.Process(
target=WorkerProcess(worker_pipe, userfunc=userfunc, verbose=verbose, logging=logging),
)
# start worker if requested
if start:
self.start()
def __repr__(self):
return f'{self.__class__.__name__}[{self.pid}]'
def __enter__(self):
if not self.is_alive():
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_tb):
self.join()
def __del__(self):
if self.verbose: print(f'{self}.__del__ was called!')
self.terminate(check_alive=False)
############### Main interface methods ###############
def poll(self) -> bool:
'''Check if worker sent anything.
'''
return self.pipe.poll()
def execute(self, data: Any):
'''Send data to worker and blocking return result upon reception.
'''
self.send_data(data)
return self.recv_data()
def recv_data(self) -> Any:
'''Receive raw data from user function.'''
return self.recv().data
def send_data(self, data: Any, **kwargs) -> None:
'''Send any data to worker process to be handled by user function.'''
return self.send_payload(DataPayload(data, **kwargs))
def update_userfunc(self, func: Callable, *args, **kwargs):
'''Send a new UserFunc to worker process.
'''
return self.send_payload(UserFunc(func, *args, **kwargs))
def get_status(self):
'''Blocking request status update from worker.
'''
self.send_payload(StatusRequest())
return self.recv()
############### Pipe interface ###############
def send_payload(self, payload: BaseMessage) -> None:
'''Send a Message (DataPayload or otherwise) to worker process.
'''
if not self.proc.is_alive():
raise WorkerIsDeadError('.send_payload()', self.proc.pid)
if self.verbose: print(f'{self} sending: {payload}')
try:
return self.pipe.send(payload)
except BrokenPipeError:
raise WorkerDiedError(self.proc.pid)
def recv(self) -> DataPayload:
'''Return received DataPayload or raise exception.
'''
try:
payload = self.pipe.recv()
if self.verbose: print(f'{self} received: {payload}')
except (BrokenPipeError, EOFError, ConnectionResetError):
if self.verbose: print('caught one of (BrokenPipeError, EOFError, ConnectionResetError)')
raise WorkerDiedError(self.proc.pid)
# handle incoming data
if isinstance(payload, DataPayload) or isinstance(payload, WorkerStatus):
return payload
elif isinstance(payload, WorkerError):
#self.terminate(check_alive=True)
raise payload.e
elif isinstance(payload, UserFuncException):
raise UserFuncRaisedException(payload.e)
else:
raise WorkerResourceReceivedUnidentifiedMessage()
############### Process interface ###############
@property
def pid(self):
'''Get process id from worker.'''
return self.proc.pid
def is_alive(self, *arsg, **kwargs):
'''Get status of process.'''
return self.proc.is_alive(*arsg, **kwargs)
def start(self):
'''Start the process, throws WorkerIsAliveError if already alive.'''
if self.proc.is_alive():
raise WorkerIsAliveError('.start()', self.proc.pid)
return self.proc.start()
def join(self, check_alive=True):
'''Send SigClose() to Worker and then wait for it to die.'''
if check_alive and not self.proc.is_alive():
raise WorkerIsDeadError('.join()', self.proc.pid)
try:
self.pipe.send(SigClose())
except BrokenPipeError:
pass
return self.proc.join()
def terminate(self, check_alive=True):
'''Send terminate signal to worker.'''
if check_alive and not self.proc.is_alive():
raise WorkerIsDeadError('.terminate()', self.proc.pid)
return self.proc.terminate()
#class WorkerPool(list):
#
# ############### Worker Creation ###############
# def is_alive(self):
# return len(self) > 0 and all([w.is_alive() for w in self])
#
# def start(self, num_workers: int, *args, func: Callable = None, **kwargs):
# if self.is_alive():
# raise ValueError('This WorkerPool already has running workers.')
#
# # start each worker
# for ind in range(num_workers):
# self.append(WorkerResource(ind, *args, func=func, **kwargs))
#
# return self
#
# def update_userfunc(self, userfunc: Callable):
# return [w.update_userfunc(userfunc) for w in self]
#
# ############### Low-Level Process Operations ###############
# def join(self):
# [w.join() for w in self]
# self.clear()
#
# def terminate(self):
# [w.terminate() for w in self]
# self.clear()
| 2.453125 | 2 |
stl_rules/tr_left_turn.py | luigiberducci/mon-road | 0 | 12796490 | <filename>stl_rules/tr_left_turn.py
from typing import Dict, List
import numpy as np
from stl_rules.stl_rule import STLRule
class TrafficRuleLeftTurn(STLRule):
"""
This rule implement the Traffic Rule for two cars approaching a junction in opposite directions:
<<[...] They may only proceed if they can see that they will neither endanger nor substantially impede a road user who
has the right of way.
[...] Nor must a road user who is obliged to give way substantially impede a road user who has the right of way when
the latter turns into the other road.>>
Intuition behind formalization:
premise = "ego is approaching but not occupying the junction j, other car has no time to brake"
proper response = "ego brakes until reach zero-velocity or other car crossed the intersection"
Formalization in STL:
premise = (ego_can_brake AND NOT(next(ego_can_brake))) AND (car_can_brake) AND (is_in_junction(ego, j)<=0)
ego_can_brake = dist(ego,j) > d_lon_safe(ego,j)
car_can_brake = dist(car,j) > d_lon_safe(car,j)
plan = plan_react AND plan_brake
plan_react = (release_condition) R^ns_{0:rho} (a_lon(ego) <= a_lon_maxacc)
plan_brake = (release_condition) R^ns_{rho:inf} (a_lon(ego) <= -a_lon_minbr)
release_condition = (v_lon(ego) <= 0) OR (dist(car, j) <= 0) OR (car_can_brake)
Note: this condition takes into account 2 possible implementation of the distance metric (only-positive or pos-neg)
If pos-neg, when car crosses junction, d(car,j)<0 and then release_condition is true (the ego can cross)
If only-pos, when car crosses junction, d(car,j)=inf, then car_can_brake and release_condition is true
*Rewriting*: some operators have been rewritten to match the rtamt spec language (e.g. non-strict release)
- Def. Release: phi_1 R_I phi_2 = not(not(phi_1) Until_i not(phi_2))
- Def. Non-Strict Release: phi_1 R^ns_I phi_2 = phi_1 R_I (phi_1 or phi_2)
"""
@property
def variables(self):
return ["time", "d_lon_ej", "d_lon_cj", "d_lon_min_ej", "d_lon_min_cj",
"is_e_in_junc", "v_lon_e", "a_lon_e"]
@property
def types(self):
return ["int", "float", "float", "float", "float", "float", "float", "float"]
def __init__(self, rss_params):
"""
:param rss_params: static parameters for rss monitoring
`a_lon_minbr`, `a_lon_maxbr` : min, max longitudinal acceleration when breaking
`a_lon_minacc`, `a_lon_maxacc` : min, max longitudinal acceleration
`rho`: reaction time in seconds
`rho_dt`: reaction time in number of steps (note: we use `next` operator, we need discrete-time stl)
`max_steps`: overestimation of the episode length, used to monitor open intervals
"""
required_parameters = ["a_lon_minbr", "a_lon_maxacc", "rho", "rho_dt", "sim_dt", "max_steps"]
assert all([p in rss_params for p in required_parameters])
self._p = {p: rss_params[p] for p in required_parameters}
@property
def spec(self):
# predicates
E_canbrake = "(d_lon_ej > d_lon_min_ej)"
C_canbrake = "(d_lon_cj > d_lon_min_cj)"
E_not_injunc = "(is_e_in_junc <= 0)"
V_lon_e_stop = "(v_lon_e <= 0)"
C_react_or_crossed = f"({C_canbrake} or (d_lon_cj<0))" # the check on d_lon_cj in case d has pos-neg interpret.
A_lon_e_maxacc = f"(a_lon_e <= {self._p['a_lon_maxacc']})"
A_lon_e_minbr = f"(a_lon_e <= -{self._p['a_lon_minbr']})"
release_cond = f"({V_lon_e_stop} or {C_react_or_crossed})"
# specification
# note: non-strict release operator is written using not and until
S = f"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))"
P_react = f"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))"
P_brake = f"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))"
P_leftturn = f"({P_react} and {P_brake})"
# resulting specification
phi_lt_resp = f"always (({S} and (next (not {S}))) -> (next {P_leftturn}))"
return phi_lt_resp
@property
def demo_spec(self):
# predicates
E_canbrake = "(d_lon_ej > d_lon_min_ej)"
C_canbrake = "(d_lon_cj > d_lon_min_cj)"
E_not_injunc = "(is_e_in_junc <= 0)"
V_lon_e_stop = "(v_lon_e <= 0)"
C_react_or_crossed = f"({C_canbrake} or (d_lon_cj<0))" # the check on d_lon_cj in case d has pos-neg interpret.
A_lon_e_maxacc = f"(a_lon_e <= {self._p['a_lon_maxacc']})"
A_lon_e_minbr = f"(a_lon_e <= -{self._p['a_lon_minbr']})"
release_cond = f"({V_lon_e_stop} or {C_react_or_crossed})"
# specification
# note: non-strict release operator is written using not and until
S = f"(({E_canbrake} and not(next({E_canbrake}))) and (not({C_canbrake})) and ({E_not_injunc}))"
P_react = f"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_maxacc})))"
P_brake = f"(not(not({release_cond}) until[0:{self._p['rho_dt']}] not({release_cond} or {A_lon_e_minbr})))"
P_leftturn = f"({P_react} and {P_brake})"
# resulting specification
phi_lt_resp = f"(next (not {S})) -> (next {P_leftturn})"
return phi_lt_resp
def _compute_dynamic_safe_long_dist_to_junction(self, data: Dict[str, np.ndarray], v_field: str) -> np.ndarray:
# note: the only change is the assumption that v_front = 0, because a junction is stationary
# then, we just remove the `d_f_brake` term from the calculation
d_b_prebr = data[v_field] * self._p['rho'] + 1 / 2 * self._p['a_lon_maxacc'] * self._p['rho'] ** 2
d_b_brake_num = ((data[v_field] + self._p['rho'] * self._p['a_lon_maxacc']) ** 2)
d_b_brake_den = 2 * self._p['a_lon_minbr']
d_b_brake = d_b_brake_num / d_b_brake_den
d_diff = d_b_prebr + d_b_brake
d_lon_min = np.maximum(d_diff, np.zeros_like(d_diff))
return d_lon_min
def generate_signals_for_demo(self, data: Dict[str, np.ndarray], begin:int=5, end:int=1000) -> Dict[str, List]:
# check input
obs_signals = ["elapsed_time", "v_lon_ego", "v_lon_car", "a_lon_ego", "d_ego_j", "is_e_in_j", "d_car_j"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {}
out_signals["elapsed_time"] = data["elapsed_time"] - data["elapsed_time"][0]
out_signals["time"] = np.floor((data["elapsed_time"] - data["elapsed_time"][0]) / self._p["sim_dt"]).astype(int)
out_signals["a_lon_e"] = data["a_lon_ego"]
out_signals["v_lon_e"] = data["v_lon_ego"]
out_signals["d_lon_ej"] = data["d_ego_j"]
out_signals["d_lon_cj"] = data["d_car_j"]
out_signals["is_e_in_junc"] = data["is_e_in_j"]
out_signals["d_lon_min_ej"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field="v_lon_ego")
out_signals["d_lon_min_cj"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field="v_lon_car")
out_signals = {k: list(v[begin:end]) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
def generate_signals(self, data: Dict[str, np.ndarray]) -> Dict[str, List]:
# check input
obs_signals = ["time", "a_lon_e", "v_lon_e", "v_lon_c", "d_lon_ej", "d_lon_cj", "is_e_in_junc"]
assert all([s in data for s in obs_signals]), f"missing in signals ({obs_signals} not in {data.keys()})"
# generate output signals from input signals
out_signals = {}
out_signals["time"] = data["time"]
out_signals["a_lon_e"] = data["a_lon_e"]
out_signals["v_lon_e"] = data["v_lon_e"]
out_signals["d_lon_ej"] = data["d_lon_ej"]
out_signals["d_lon_cj"] = data["d_lon_cj"]
out_signals["is_e_in_junc"] = data["is_e_in_junc"]
out_signals["d_lon_min_ej"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field="v_lon_e")
out_signals["d_lon_min_cj"] = self._compute_dynamic_safe_long_dist_to_junction(data, v_field="v_lon_c")
out_signals = {k: list(v) for k, v in out_signals.items()}
# check output
assert all([s in out_signals for s in
self.variables]), f"missing out signals ({self.variables} not in {out_signals.keys()})"
return out_signals
| 3.515625 | 4 |
guniflask_cli/commands/version.py | jadbin/guniflask-cli | 2 | 12796491 | import click
from guniflask_cli import __version__
@click.group()
def cli_version():
pass
@cli_version.command('version')
def main():
"""
Print the version.
"""
Version().run()
class Version:
def run(self):
print(f" guniflask-cli: v{__version__}")
import guniflask
print(f" guniflask: v{guniflask.__version__}")
| 2.34375 | 2 |
environment_model/mini_pacman/model/env_copy_model.py | FlorianKlemt/pytorch-latent-i2a | 3 | 12796492 | import torch
# the copy model returns the identity,
# this is its own class so we dont have to change the code to use the copymodel
class CopyEnvModel(torch.nn.Module):
def __init__(self):
super(CopyEnvModel, self).__init__()
def forward(self, input_frame, input_action):
return input_frame, torch.zeros(input_frame.shape[0]).cuda() | 2.671875 | 3 |
netpbmfile/__init__.py | cgohlke/netpbmfile | 4 | 12796493 | # netpbmfile/__init__.py
from .netpbmfile import __doc__, __all__, __version__
from .netpbmfile import *
| 1.039063 | 1 |
src/players/management/commands/fetch_report.py | codacy-badger/hbscorez | 0 | 12796494 | <filename>src/players/management/commands/fetch_report.py
import logging
import time
from datetime import timedelta
from typing import List
import requests
from games.models import Game
MAX_RETRY_DURATION: timedelta = timedelta(hours=3)
RETRY_DURATIONS: List[timedelta] = [
timedelta(seconds=10),
timedelta(seconds=30),
timedelta(minutes=1),
timedelta(minutes=5),
timedelta(minutes=30),
timedelta(hours=1),
MAX_RETRY_DURATION
]
LOGGER = logging.getLogger('hbscorez')
def fetch_report(game: Game):
for retry_duration in RETRY_DURATIONS:
try:
return requests.get(game.report_source_url(), stream=True)
except requests.exceptions.ConnectionError as ex:
LOGGER.warning('Could not fetch report %s', game)
if retry_duration == MAX_RETRY_DURATION:
raise ex
LOGGER.debug('Now wating for %s', retry_duration)
time.sleep(retry_duration.total_seconds())
| 2.640625 | 3 |
wikiquote/__init__.py | guilhembn/python-wikiquotes | 0 | 12796495 | from .quotes import quotes, random_titles, search, quotes_and_authors
from .qotd import quote_of_the_day
from . import langs
def supported_languages():
l = langs.SUPPORTED_LANGUAGES[:]
l.sort()
return l
| 1.632813 | 2 |
test/qe/compression-filter/test_compression.py | sharwell/repose | 0 | 12796496 | #!/usr/bin/env python
from narwhal import repose
import unittest
from narwhal import conf
from narwhal import pathutil
import xmlrunner as _xmlrunner
import logging
import time
import argparse
import os
import deproxy
logger = logging.getLogger(__name__)
config_dir = pathutil.join(os.getcwd(), 'etc/repose')
deployment_dir = pathutil.join(os.getcwd(), 'var/repose')
artifact_dir = pathutil.join(os.getcwd(), 'usr/share/repose/filters')
log_file = pathutil.join(os.getcwd(), 'var/log/repose/current.log')
repose_port = 8888
stop_port = 7777
deproxy_port = 9999
headers = {}
startup_wait_time = 15
def setUpModule():
# Set up folder hierarchy
logger.debug('setUpModule')
pathutil.create_folder(config_dir)
pathutil.create_folder(deployment_dir)
pathutil.create_folder(os.path.dirname(log_file))
config_verbose = False
def apply_config_set(config_set_name, params=None):
if params is None:
params = {}
conf.process_config_set(config_set_name, verbose=config_verbose,
destination_path=config_dir, params=params)
class TestCompression(unittest.TestCase):
def setUp(self):
logger.debug('setUp')
self.deproxy = deproxy.Deproxy()
self.end_point = self.deproxy.add_endpoint(('localhost', deproxy_port))
pathutil.clear_folder(config_dir)
params = {
'port': repose_port,
'target_hostname': 'localhost',
'target_port': deproxy_port,
'deployment_dir': deployment_dir,
'artifact_dir': artifact_dir,
'log_file': log_file
}
apply_config_set('configs/.config-set.xml', params=params)
self.valve = repose.ReposeValve(config_dir=config_dir,
stop_port=stop_port)
time.sleep(startup_wait_time)
def test_compression_with_gzip(self):
logger.debug('test_compression_with_gzip')
url = 'http://localhost:%i/' % repose_port
logger.debug('url = %s' % url)
time.sleep(1)
mc = self.deproxy.make_request(method='GET', url=url,
headers=headers)
self.assertEqual(mc.received_response.code, '200', msg=mc)
self.assertEqual(len(mc.handlings), 1, msg=mc)
def tearDown(self):
logger.debug('tearDown')
if self.valve is not None:
self.valve.stop()
if self.deproxy is not None:
self.deproxy.shutdown_all_endpoints()
available_test_cases = [
TestCompression
]
def run():
test_case_map = dict()
for tc_class in available_test_cases:
test_case_map[tc_class.__name__] = tc_class
parser = argparse.ArgumentParser()
parser.add_argument('--print-log', help="Print the log to STDERR.",
action='store_true')
parser.add_argument('--test-case', action='append',
help="Which test case to run. Can be specififed "
"multiple times. 'all' is the default, and runs all "
"available test cases",
choices=['all'] + test_case_map.keys(),
type=str)
args = parser.parse_args()
if args.print_log:
logging.basicConfig(level=logging.DEBUG,
format=('%(asctime)s %(levelname)s:%(name)s:'
'%(funcName)s:'
'%(filename)s(%(lineno)d):'
'%(threadName)s(%(thread)d):%(message)s'))
global config_verbose
config_verbose = True
if args.test_case is None:
args.test_case = ['all']
test_cases = []
test_cases_set = set()
for tc in args.test_case:
if tc == 'all':
test_cases = available_test_cases
break
if tc not in test_cases_set:
test_cases_set.add(tc)
test_cases.append(test_case_map[tc])
logger.debug('run')
setUpModule()
suite = unittest.TestSuite()
loader = unittest.TestLoader()
load_tests = loader.loadTestsFromTestCase
for test_case in test_cases:
suite.addTest(load_tests(test_case))
testRunner = _xmlrunner.XMLTestRunner(output='test-reports')
result = testRunner.run(suite)
if __name__ == '__main__':
run()
| 1.992188 | 2 |
django_stripe/templatetags/stripe.py | itsnamgyu/api-demo | 1 | 12796497 | <reponame>itsnamgyu/api-demo
from django import template
from django.utils.safestring import mark_safe
from django_stripe import settings
from django_stripe.models import *
OPEN_TAG = "<script>"
CLOSE_TAG = "</script>"
INIT_TEMPLATE = """
var stripe = Stripe('{STRIPE_PUBLIC_KEY}');
"""
BUTTON_TEMPLATE = """
$('{button_selector}').click(function() {{
stripe.redirectToCheckout({{
sessionId: '{checkout_session_id}'
}}).then(function (result) {{
$('{error_selector}').html(result.error.message);
}});
}});
"""
register = template.Library()
def _get_stripe_import_element():
element = '<script src="https://js.stripe.com/v3/"></script>'
return element
def _get_stripe_init_script():
script = INIT_TEMPLATE.format(STRIPE_PUBLIC_KEY=settings.PUBLIC_KEY)
return script
def _get_stripe_button_script(checkout_session, button_selector, error_selector):
script = BUTTON_TEMPLATE.format(
button_selector=button_selector,
checkout_session_id=checkout_session.stripe_session_id,
error_selector=error_selector,
)
return script
@register.simple_tag()
def stripe_import():
return mark_safe(_get_stripe_import_element())
@register.simple_tag()
def stripe_init():
script = _get_stripe_init_script()
element = "\n".join((OPEN_TAG, script, CLOSE_TAG))
return mark_safe(element)
@register.simple_tag()
def stripe_button(checkout_session, button_selector, error_selector):
script = _get_stripe_button_script(
checkout_session, button_selector, error_selector
)
element = "\n".join((OPEN_TAG, script, CLOSE_TAG))
return mark_safe(element)
@register.simple_tag()
def stripe_standalone(checkout_session, button_selector, error_selector):
import_element = _get_stripe_import_element()
init_script = _get_stripe_init_script()
button_script = _get_stripe_button_script(
checkout_session, button_selector, error_selector
)
element = "\n".join((OPEN_TAG, init_script, button_script, CLOSE_TAG))
html = "\n".join((import_element, element))
return mark_safe(html)
| 2.265625 | 2 |
subtract.py | Rj-Aman/tails | 0 | 12796498 | a = 3
b = 2
sub = a-b
print(sub)
| 2.734375 | 3 |
clist/migrations/0012_auto_20191123_0953.py | horacexd/clist | 166 | 12796499 | <gh_stars>100-1000
# Generated by Django 2.1.7 on 2019-11-23 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('clist', '0011_auto_20190818_1125'),
]
operations = [
migrations.AddIndex(
model_name='contest',
index=models.Index(fields=['start_time'], name='clist_conte_start_t_9eec7a_idx'),
),
migrations.AddIndex(
model_name='contest',
index=models.Index(fields=['end_time'], name='clist_conte_end_tim_341782_idx'),
),
]
| 1.523438 | 2 |
Packages/Dead/demo/Script/tutorials/orientation_and_output.py | xylar/cdat | 62 | 12796500 | <filename>Packages/Dead/demo/Script/tutorials/orientation_and_output.py
# Adapted for numpy/ma/cdms2 by convertcdms.py
# Import the modules needed for the tuturial
# cdms - Climate Data Management system accesses gridded data.
# vcs - Visualization and control System 1D and 2D plotting routines.
# cdutil - Climate utilitizes that contains miscellaneous routines for
# manipulating variables.
# time - This module provides various functions to mainpulate time values.
# os - Operation System routines for Mac, DOS, NT, or Posix depending on
# the system you're on.
# sys - This module provides access to some objects used or maintained by
# the interpreter and to functions that interact strongly with the interpreter.
import vcs, cdms2 as cdms, cdutil, time, os, sys
# Open data file:
filepath = os.path.join(vcs.sample_data, 'clt.nc')
cdmsfile = cdms.open( filepath )
# Extract a 3 dimensional data set
data = cdmsfile('clt')
# Initialize VCS:
v = vcs.init()
# Opening a VCS Canvas - not necessary to do this
# before issuing the plot command!
v.open()
# A quick plot of the data
v.plot( data )
# Changing plot orientation to "Portrait"
v.portrait()
# To change the orientation back to "Landscape"
v.landscape()
print "Generating Landscape output..."
##############################################################
#
# Saving "Landscape" orientation graphics to file
# "Landscape" is the default output orientation.
#
##############################################################
# Append to a postscript file
v.postscript('test.ps')
# Overwrite the existing postscript file
v.postscript('test.ps')
# GIF format - append landscape orientation gif image
v.gif('test.gif', merge='a', orientation='l', geometry='800x600')
# CGM format - append to an existing cgm file
v.cgm('test.cgm', 'a')
# Encapsulated Postscript - overwrite an existing eps file
v.eps('test.eps', 'r')
# PDF format
v.pdf('test.pdf')
##############################################################
# GhostScript (gs) format
# This routine allows the user to save the VCS canvas in one of the many
# GhostScript (gs) file types (also known as devices). To view other
# GhostScript devices, issue the command "gs --help" at the terminal
# prompt.
##############################################################
v.gs('example') # defaults: device='png256', orientation='l' and resolution='792x612'
v.gs(filename='example.tif', device='tiffpack', orientation='l', resolution='800x600')
v.gs(filename='example.pdf', device='pdfwrite', orientation='l', resolution='200x200')
# Changing plot orientation to "Portrait"
v.portrait()
print "Generating Portrait output..."
##############################################################
#
# Saving "Portrait" orientation graphics to file
#
##############################################################
# Append postscript output to an existing file
v.postscript('test.ps','a','p')
# Overwrite existing postscript file with a new postscript file
v.postscript('test.ps','r','p')
# GIF format - overwrite gif image(s) output with portriat gif image
v.gif('test.gif', merge='r', orientation='p', geometry='800x600')
# CGM format - overwrite existing cgm file
v.cgm('test.cgm', 'r')
# Encapsulated Postscript - append portait output to an existing eps file.
v.eps('test.eps', 'a', 'p')
# PDF format
v.pdf ('test.pdf', 'p')
###############################################################
# GhostScript (gs) format
# This routine allows the user to save the VCS canvas in one of the many
# GhostScript (gs) file types (also known as devices). To view other
# GhostScript devices, issue the command "gs --help" at the terminal
# prompt.
###############################################################
v.gs(filename='example.jpg', device='jpeg', orientation='p', resolution='1000x1000')
| 2.96875 | 3 |
tests/sympc/protocol/beaver/beaver_test.py | hershd23/SyMPC | 64 | 12796501 | <gh_stars>10-100
# third party
# third party
import pytest
from sympc.protocol import Falcon
from sympc.session import Session
from sympc.session import SessionManager
from sympc.store import CryptoPrimitiveProvider
def test_rst_invalid_triple(get_clients) -> None:
parties = get_clients(3)
falcon = Falcon("malicious")
session = Session(parties, protocol=falcon)
SessionManager.setup_mpc(session)
shape_x = (1,)
shape_y = (1,)
# create an inconsistent sharing,invoke a prrs first
session.session_ptrs[0].prrs_generate_random_share(shape_x)
with pytest.raises(ValueError):
CryptoPrimitiveProvider.generate_primitives(
"beaver_mul",
session=session,
g_kwargs={
"session": session,
"a_shape": shape_x,
"b_shape": shape_y,
"nr_parties": session.nr_parties,
},
p_kwargs={"a_shape": shape_x, "b_shape": shape_y},
)
| 1.976563 | 2 |
ezotv/cache_tools/__init__.py | marcsello/ezotv-frontend | 0 | 12796502 | #!/usr/bin/env python3
from .redis_client import redis_client
from .cached_base_http_session import CachedBaseHttpSession | 1.046875 | 1 |
reddit2telegram/channels/ani_bm/app.py | AkhzarFarhan/reddit2telegram | 0 | 12796503 | <filename>reddit2telegram/channels/ani_bm/app.py
#encoding:utf-8
subreddit = 'ani_bm'
t_channel = '@ani_bm'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| 1.59375 | 2 |
porters/QQbot/nonebot_plugins/nonebot_porter.py | lizard1998myx/MultiBot | 3 | 12796504 | from nonebot import CommandSession, on_command
from nonebot import on_natural_language, NLPSession, IntentCommand
from ....requests import Request
from ....responses import *
from ....distributor import Distributor
from ....utils import image_url_to_path
from ....paths import PATHS
import os, logging, traceback
# BLACKLIST = [3288849221]
BLACKLIST = []
@on_natural_language(only_to_me=False, only_short_message=False, allow_empty_message=True)
async def _(session: NLPSession):
return IntentCommand(100.0, 'porter', args={'message': session.msg_text})
@on_command('porter')
async def porter(session: CommandSession):
logging.debug('=========== [MultiBot] Entered nonebot porter ==========')
# 在任何情况下,把所有消息打包成Request交给分拣中心(Distributor),然后处理分拣中心发回的Response序列
# Resqust打包
request = Request()
request.platform = 'CQ'
request.user_id = str(session.ctx['user_id'])
self_id = str(session.self_id)
self_names = ['韩大佬', 'lzy', '林子逸', '子兔', 'xsx', '小石像']
bot_called = False
if request.user_id == self_id:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
elif request.user_id in BLACKLIST:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
if '[CQ:at,qq={}]'.format(self_id) in session.ctx['raw_message']:
# 被at时
bot_called = True
if 'group_id' in session.ctx.keys():
request.group_id = str(session.ctx['group_id'])
else:
# 私聊时
bot_called = True
for message in session.ctx['message']:
if message['type'] == 'text' and request.msg is None:
text = message['data']['text'].strip()
# 呼叫检测
for name in self_names:
if name in text:
# 被叫到时
bot_called = True
text = text.strip()
while text[:len(name)] == name:
text = text[len(name):]
while text[-len(name):] == name:
text = text[:-len(name)]
for sign in [None, ',', ',', None]:
text = text.strip(sign)
# 消息段检测
if '请使用' in text and '新版手机QQ' in text:
request.echo = True
request.msg = '【NonebotPorter】不支持的消息段:"%s"' % text
continue
# 空文本检测
if text != '':
request.msg = text
elif message['type'] == 'image' and request.img is None:
# 先不下载图片,获取response时下载
request.img = message['data']['url']
# request.img = image_url_to_path(message['data']['url'], header='QQBot')
elif message['type'] == 'record' and request.aud is None:
request.aud = os.path.join(PATHS['cqhttp'], 'data', 'voices', message['data']['file'])
elif message['type'] == 'location':
request.loc = {'longitude': float(message['data']['lon']),
'latitude': float(message['data']['lat'])}
elif message['type'] not in ['face', 'at', 'anonymous', 'share', 'reply']:
request.echo = True
request.msg = f"【NonebotPorter】不支持的消息段[{message['type']}]:" \
f"{str(message).replace('CQ:', '$CQ$:')}"
continue
# 初始化分拣中心
distributor = Distributor()
# 获取Response序列,同时下载图片,若出错则返回错误信息
def get_responses():
if request.img:
request.img = image_url_to_path(request.img, header='QQBot')
response_list = distributor.handle(request=request)
return response_list
# 用于执行Response序列
async def execute(response_list: list):
for response in response_list:
try:
if isinstance(response, ResponseMsg) or isinstance(response, ResponseGrpMsg):
msg = response.text
for at_id in response.at_list:
msg += '[CQ:at,qq=%s]' % str(at_id)
# 过长文本多次发送
max_length = 2000
while len(msg) > 0:
msg_left = msg[max_length:] # msg超出maxL的部分
msg = msg[:max_length] # msg只保留maxL内的部分
if isinstance(response, ResponseMsg): # 私聊
await session.send(message=msg)
else: # 群消息
await session.bot.send_group_msg(group_id=response.group_id, message=msg)
if msg_left != '': # 这轮超出部分为0时
msg = msg_left
else:
msg = ''
elif isinstance(response, ResponseMusic):
await session.send(message=f'[CQ:music,type={response.platform},id={response.music_id}]')
elif isinstance(response, ResponseImg) or isinstance(response, ResponseGrpImg):
# 需要在盘符之后加入一个反斜杠,并且不使用双引号
img_msg = '[CQ:image,file=file:///%s]' % os.path.abspath(response.file).replace(':', ':\\')
if isinstance(response, ResponseImg):
await session.send(message=img_msg)
else:
await session.bot.send_group_msg(group_id=response.group_id, message=img_msg)
elif isinstance(response, ResponseCQFunc):
try:
output = await eval('session.bot.%s' % response.func_name)(**response.kwargs)
except AttributeError:
await session.send('【NonebotPorter】不支持的函数:%s' % response.func_name)
except TypeError:
await session.send('【NonebotPorter】不支持的参数:%s' % str(response.kwargs))
except SyntaxError:
await session.send('【NonebotPorter】语法错误')
else:
await execute(distributor.process_output(output=output)) # 递归处理新的Response序列
except:
# 诸如发送失败等问题
logging.error(traceback.format_exc())
# 在筛选后,把Request交给分拣中心,执行返回的Response序列
if bot_called:
# 符合呼出条件的,直接执行
await execute(response_list=get_responses())
elif distributor.use_active(request=request, save=False):
# 不符合呼出条件的,若有活动Session对应,也可以执行
await execute(response_list=get_responses())
else:
logging.debug('=========== [MultiBot] Left nonebot porter ==========')
return
# 刷新并保存最新的session信息
distributor.refresh_and_save()
logging.debug('=========== [MultiBot] Completed nonebot porter ==========')
| 2.203125 | 2 |
20180530_struct_hyperreal.py | randompirate/algostructures-and-datarithms | 0 | 12796505 | <filename>20180530_struct_hyperreal.py
"""
Hyperreal numbers as an extension of float
https://en.wikipedia.org/wiki/Hyperreal_number
"""
class HFloat(float):
"""docstring for HFloat"""
mul_map = {
'one' : {'one' : 'one' , 'omega' : 'omega', 'epsilon' : 'epsilon'},
'omega' : {'one' : 'omega' , 'omega' : 'omega', 'epsilon' : 'one'},
'epsilon': {'one' : 'epsilon', 'omega' : 'one' , 'epsilon' : 'epsilon'},
}
def __new__(self, value, unit = 'one'): # Float is immutable, so overwrite new as well
self.unit = unit
return float.__new__(self, value)
def __init__(self,value, unit = 'one'):
super(float, self).__init__()
def __mul__(self, other):
if type(other) != HFloat:
ounit = 'one'
else:
ounit = other.unit
newval = super().__mul__(other)
newunit = HFloat.mul_map[self.unit][ounit]
return HFloat(newval, newunit)
def __truediv__(self, other):
return super().__truediv__(other)
def __add__(self, other):
if not self.unit and not other.unit:
return
def __str__(self):
return super().__str__() + {'omega' : 'ω', 'epsilon' : 'ε', 'one': ''}.get(self.unit, )
f = HFloat(1, unit = 'epsilon')
# print(f.unit)
# print(f)
print(f + 1)
# print(f/2)
# print(f/0)
# print(dir(f)) | 3.21875 | 3 |
tests/util.py | ProstoMaxim/hupper | 0 | 12796506 | import os
import subprocess
import sys
import tempfile
import threading
import time
here = os.path.abspath(os.path.dirname(__file__))
class TestApp(threading.Thread):
name = None
args = None
stdin = None
daemon = True
def __init__(self):
super(TestApp, self).__init__()
self.exitcode = None
self.process = None
self.tmpfile = None
self.tmpsize = 0
self.response = None
self.stdout, self.stderr = b'', b''
def start(self, name, args):
self.name = name
self.args = args or []
fd, self.tmpfile = tempfile.mkstemp()
os.close(fd)
touch(self.tmpfile)
self.tmpsize = os.path.getsize(self.tmpfile)
self.response = readfile(self.tmpfile)
super(TestApp, self).start()
def run(self):
cmd = [sys.executable, '-m', 'tests.' + self.name]
if self.tmpfile:
cmd += ['--callback-file', self.tmpfile]
cmd += self.args
env = os.environ.copy()
env['PYTHONUNBUFFERED'] = '1'
self.process = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
universal_newlines=True,
)
try:
self.stdout, self.stderr = self.process.communicate(self.stdin)
finally:
self.exitcode = self.process.wait()
def is_alive(self):
return self.process is not None and self.exitcode is None
def stop(self):
if self.is_alive():
self.process.terminate()
self.join()
if self.tmpfile:
os.unlink(self.tmpfile)
self.tmpfile = None
def wait_for_response(self, timeout=5, interval=0.1):
self.tmpsize = wait_for_change(
self.tmpfile,
last_size=self.tmpsize,
timeout=timeout,
interval=interval,
)
self.response = readfile(self.tmpfile)
def touch(fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def readfile(path):
with open(path, 'rb') as fp:
return fp.readlines()
def wait_for_change(path, last_size=0, timeout=5, interval=0.1):
start = time.time()
size = os.path.getsize(path)
while size == last_size:
duration = time.time() - start
sleepfor = interval
if timeout is not None: # pragma: no cover
if duration >= timeout:
raise RuntimeError(
'timeout waiting for change to file=%s' % (path,))
sleepfor = min(timeout - duration, sleepfor)
time.sleep(sleepfor)
size = os.path.getsize(path)
return size
| 2.40625 | 2 |
cli_fun/classes.py | e4r7hbug/cli-fun | 0 | 12796507 | <reponame>e4r7hbug/cli-fun<filename>cli_fun/classes.py<gh_stars>0
"""FunCLI classes."""
import importlib
import logging
import os
import sys
import click
class Context(object):
"""Shared context object for passing information between commands."""
def __init__(self):
self.verbose = False
def log(self, msg, *args):
"""Logs a message to stderr."""
if args:
msg %= args
click.echo(msg, file=sys.stderr)
def vlog(self, msg, *args):
"""Logs a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args)
class FunCLI(click.MultiCommand):
"""Click class for gathering commands."""
log = logging.getLogger(__name__)
def list_commands(self, ctx):
"""Search through the _commands_ directory for modules to use."""
command_list = []
cmd_folder = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'commands'))
for filename in os.listdir(cmd_folder):
self.log.debug('Found file in command directory: %s', filename)
if filename.endswith('.py'):
if not filename.startswith('__'):
command_name = filename[0:-3]
self.log.debug('Adding command to list: %s', command_name)
command_list.append(command_name)
command_list.sort()
self.log.debug('Sorted command list: %s', command_list)
return command_list
def get_command(self, ctx, name):
"""Dynamically import modules in the _commands_ directory."""
try:
if sys.version_info[0] == 2:
self.log.debug('Python 2 detected, encoding "%s" in ascii.',
name)
name = name.encode('ascii', 'replace')
mod = importlib.import_module('.commands.{0}'.format(name),
__package__)
self.log.debug('Imported module: %s', mod)
return mod.cli
except ImportError as error:
self.log.warning('Failed to import: %s', name)
self.log.warning('Error information:\n%s', error)
return
except SyntaxError:
self.log.warning('Failed to import: %s', name)
self.log.warning('Might be a Python %s incompatible module.',
sys.version_info[0])
return
| 2.53125 | 3 |
UnityEngine/Rendering/SphericalHarmonicsL2Array/__init__.py | Grim-es/udon-pie-auto-completion | 0 | 12796508 | <reponame>Grim-es/udon-pie-auto-completion
from UdonPie import System
from UdonPie import UnityEngine
from UdonPie.Undefined import *
class SphericalHarmonicsL2Array:
def __new__(cls, arg1=None):
'''
:returns: SphericalHarmonicsL2Array
:rtype: UnityEngine.SphericalHarmonicsL2Array
'''
pass
def __setitem__(self, key, value):
'''
:param key: Int32
:type key: System.Int32 or int
:param value: SphericalHarmonicsL2
:type value: UnityEngine.SphericalHarmonicsL2
'''
pass
def __getitem__(self, key):
'''
:param key: Int32
:type key: System.Int32 or int
:returns: SphericalHarmonicsL2
:rtype: UnityEngine.SphericalHarmonicsL2
'''
pass
| 2.390625 | 2 |
lnt/__init__.py | flotwig/lnt | 7 | 12796509 | name = "lnt"
| 1.046875 | 1 |
keymaster/common/service/randomizer.py | shiroyuki/spymaster | 0 | 12796510 | <filename>keymaster/common/service/randomizer.py<gh_stars>0
import subprocess
from functools import lru_cache
from typing import Callable, Dict, List
from imagination.decorator.service import registered
@registered()
class Randomizer:
def __init__(self):
self.__known_generator_map: Dict[str, Callable] = {
RandomizerMethod.OPENSSL_BASE64: self._use_openssl_base64,
RandomizerMethod.OPENSSL_HEX: self._use_openssl_hex,
}
@property
@lru_cache(maxsize=1)
def known_methods(self) -> List[str]:
return sorted(self.__known_generator_map.keys())
def randomize(self, method: str, length: int):
if method not in self.__known_generator_map:
raise UnknownRandomizationMethodError(method)
return self.__known_generator_map[method](length)
@staticmethod
def _use_openssl_base64(length: int) -> str:
return subprocess.check_output(['openssl', 'rand', '-base64', str(length)]).decode().strip()
@staticmethod
def _use_openssl_hex(length: int) -> str:
return subprocess.check_output(['openssl', 'rand', '-hex', str(length)]).decode().strip()
class RandomizerMethod:
OPENSSL_BASE64 = 'openssl:base64'
OPENSSL_HEX = 'openssl:hex'
class UnknownRandomizationMethodError(RuntimeError):
pass
| 2.578125 | 3 |
main_scripts/main_parameter_search.py | JiaHe-yogurt/GNN | 0 | 12796511 | import os
from data_loader.data_generator import DataGenerator
from models.invariant_basic import invariant_basic
from trainers.trainer import Trainer
from Utils.config import process_config
from Utils.dirs import create_dirs
from Utils import doc_utils
from Utils.utils import get_args
from data_loader import data_helper as helper
# capture the config path from the run arguments
# then process the json configuration file
config = process_config('/Users/jiahe/PycharmProjects/gnn multiple inputs/configs/parameter_search_config.json')
os.environ["CUDA_VISIBLE_DEVICES"] = config.gpu
import tensorflow.compat.v1 as tf
import numpy as np
tf.set_random_seed(1)
base_summary_folder = config.summary_dir
base_exp_name = config.exp_name
# create the experiments dirs
create_dirs([config.summary_dir, config.checkpoint_dir])
data = DataGenerator(config)
for lr in [0.00008*(2**i) for i in range(2,8)]:
for a1d in [[5],[10]]:
for a3d in [[5], [10],[15]]:
for fully in [[50,50],[20,20]]:
config.learning_rate = lr
config.architecture2d = a1d
config.architecture = a3d
config.fc = fully
config.exp_name = base_exp_name + " lr={0}_a2d={1}_a3d = {2}_fc = {3}".format(lr, a1d,a3d,fully)
curr_dir = os.path.join(base_summary_folder, "lr={0}_a2d={1}_a3d = {2}_fc = {3}".format(lr, a1d, a3d, fully))
config.summary_dir = curr_dir
create_dirs([curr_dir])
# create your data generator
data.config.learning_rate=lr
data.config.architecture2d = a1d
data.config.architecture3d = a3d
data.config.fc = fully
gpuconfig = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
gpuconfig.gpu_options.visible_device_list = config.gpus_list
gpuconfig.gpu_options.allow_growth = True
sess = tf.Session(config=gpuconfig)
# create an instance of the model you want
model = invariant_basic(config, data)
# create trainer and pass all the previous components to it
trainer = Trainer(sess, model, data, config)
# here you train your model
acc, loss, _ = trainer.train()
sess.close()
tf.reset_default_graph()
import pandas as pd
def summary_10fold_results(summary_dir):
df = pd.read_csv(summary_dir+"/per_epoch_stats.csv")
acc = np.array(df["val_accuracy"])
print("Results")
print("Mean Accuracy = {0}".format(np.mean(acc)))
# print("Mean std = {0}".format(np.std(acc)))
return np.mean(acc)
| 1.96875 | 2 |
chb/models/DllFunctionAPI.py | psifertex/CodeHawk-Binary | 0 | 12796512 | <reponame>psifertex/CodeHawk-Binary
# ------------------------------------------------------------------------------
# Access to the CodeHawk Binary Analyzer Analysis Results
# Author: <NAME>
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from chb.models.DllFunctionParameter import DllFunctionParameter
class DllFunctionAPI(object):
def __init__(self,summary,xnode):
self.summary = summary
self.xnode = xnode
def get_calling_convention(self): return xnode.get('cc')
def get_adjustment(self): return int(xnode.get('adj'))
def get_parameters(self):
return [ DllFunctionParameter(self,p) for p in self.xnode.findall('par') ]
def get_stack_parameters(self):
stackparams = [ p for p in self.get_parameters() if p.is_stack_parameter() ]
return sorted(stackparams,key=lambda p:p.get_stack_nr())
def get_stack_parameter_names(self):
stackparams = self.get_stack_parameters()
return [ p.name for p in stackparams ]
| 1.195313 | 1 |
app/admin.py | naritotakizawa/django-genericforeignkey-sample | 0 | 12796513 | <reponame>naritotakizawa/django-genericforeignkey-sample
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import Post, File, Comment
class FileInline(GenericTabularInline):
model = File
class PostAdmin(admin.ModelAdmin):
inlines = [FileInline]
class CommentAdmin(admin.ModelAdmin):
inlines = [FileInline]
admin.site.register(Comment, CommentAdmin)
admin.site.register(Post, PostAdmin)
admin.site.register(File)
| 2.015625 | 2 |
tests/seleniumwire/proxy/test_utils.py | SilverFruity/selenium-wire | 0 | 12796514 | import contextlib
import os
from unittest import TestCase
from seleniumwire.proxy.utils import get_upstream_proxy
class GetUpstreamProxyTest(TestCase):
def test_get_config(self):
options = {
'proxy': {
'http': 'http://username1:password1@server1:8888',
'https': 'https://username2:password2@server2:8888',
'no_proxy': 'localhost'
}
}
proxy = get_upstream_proxy(options)
http = proxy['http']
self.assertEqual('http', http.scheme)
self.assertEqual('username1', http.username)
self.assertEqual('password1', http.password)
self.assertEqual('server1:8888', http.hostport)
https = proxy['https']
self.assertEqual('https', https.scheme)
self.assertEqual('username2', https.username)
self.assertEqual('password2', https.password)
self.assertEqual('server2:8888', https.hostport)
self.assertEqual('localhost', proxy['no_proxy'])
def test_get_from_env(self):
with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888',
HTTPS_PROXY='https://username2:password2@server2:8888',
NO_PROXY='localhost'):
proxy = get_upstream_proxy({})
http = proxy['http']
self.assertEqual('http', http.scheme)
self.assertEqual('username1', http.username)
self.assertEqual('password1', http.password)
self.assertEqual('server1:8888', http.hostport)
https = proxy['https']
self.assertEqual('https', https.scheme)
self.assertEqual('username2', https.username)
self.assertEqual('password2', https.password)
self.assertEqual('server2:8888', https.hostport)
self.assertEqual('localhost', proxy['no_proxy'])
def test_merge(self):
options = {
'proxy': {
'https': 'https://username3:password3@server3:8888',
'no_proxy': 'localhost'
}
}
with self.set_env(HTTP_PROXY='http://username1:password1@server1:8888',
HTTPS_PROXY='https://username2:password2@server2:8888',
NO_PROXY='127.0.0.1'):
proxy = get_upstream_proxy(options)
http = proxy['http']
self.assertEqual('http', http.scheme)
self.assertEqual('username1', http.username)
self.assertEqual('password1', http.password)
self.assertEqual('server1:8888', http.hostport)
# The dict config overrides that defined in env variables
https = proxy['https']
self.assertEqual('https', https.scheme)
self.assertEqual('username3', https.username)
self.assertEqual('password3', https.password)
self.assertEqual('server3:8888', https.hostport)
self.assertEqual('localhost', proxy['no_proxy'])
def test_none(self):
options = None
proxy = get_upstream_proxy(options)
self.assertEqual({}, proxy)
@contextlib.contextmanager
def set_env(self, **environ):
"""Context manager used to temporarily set environment vars."""
old_environ = dict(os.environ)
os.environ.update(environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(old_environ)
| 2.640625 | 3 |
pys/classes/exceptions.py | Xithrius/Examples | 0 | 12796515 | <reponame>Xithrius/Examples
class BaseErrorInheretence(Exception):
pass
class SpecialError(BaseErrorInheretence):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# More code here if needed
raise SpecialError('stuff and things', error='something happened')
| 2.671875 | 3 |
dltk/core/residual_unit.py | themantalope/DLTK | 1,397 | 12796516 | <reponame>themantalope/DLTK<filename>dltk/core/residual_unit.py
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import tensorflow as tf
import numpy as np
def vanilla_residual_unit_3d(inputs,
out_filters,
kernel_size=(3, 3, 3),
strides=(1, 1, 1),
mode=tf.estimator.ModeKeys.EVAL,
use_bias=False,
activation=tf.nn.relu6,
kernel_initializer=tf.initializers.variance_scaling(distribution='uniform'),
bias_initializer=tf.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None):
"""Implementation of a 3D residual unit according to [1]. This
implementation supports strided convolutions and automatically
handles different input and output filters.
[1] <NAME> et al. Identity Mappings in Deep Residual Networks. ECCV 2016.
Args:
inputs (tf.Tensor): Input tensor to the residual unit. Is required to
have a rank of 5 (i.e. [batch, x, y, z, channels]).
out_filters (int): Number of convolutional filters used in
the sub units.
kernel_size (tuple, optional): Size of the convoltional kernels
used in the sub units
strides (tuple, optional): Convolution strides in (x,y,z) of sub
unit 0. Allows downsampling of the input tensor via strides
convolutions.
mode (str, optional): One of the tf.estimator.ModeKeys: TRAIN, EVAL or
PREDICT
activation (optional): A function to use as activation function.
use_bias (bool, optional): Train a bias with each convolution.
kernel_initializer (TYPE, optional): Initialisation of convolution kernels
bias_initializer (TYPE, optional): Initialisation of bias
kernel_regularizer (None, optional): Additional regularisation op
bias_regularizer (None, optional): Additional regularisation op
Returns:
tf.Tensor: Output of the residual unit
"""
pool_op = tf.layers.max_pooling3d
conv_params = {'padding': 'same',
'use_bias': use_bias,
'kernel_initializer': kernel_initializer,
'bias_initializer': bias_initializer,
'kernel_regularizer': kernel_regularizer,
'bias_regularizer': bias_regularizer}
in_filters = inputs.get_shape().as_list()[-1]
assert in_filters == inputs.get_shape().as_list()[-1], \
'Module was initialised for a different input shape'
x = inputs
orig_x = x
# Handle strided convolutions
if np.prod(strides) != 1:
orig_x = pool_op(inputs=orig_x,
pool_size=strides,
strides=strides,
padding='valid')
# Sub unit 0
with tf.variable_scope('sub_unit0'):
# Adjust the strided conv kernel size to prevent losing information
k = [s * 2 if s > 1 else k for k, s in zip(kernel_size, strides)]
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = activation(x)
x = tf.layers.conv3d(
inputs=x,
filters=out_filters,
kernel_size=k, strides=strides,
**conv_params)
# Sub unit 1
with tf.variable_scope('sub_unit1'):
x = tf.layers.batch_normalization(
x, training=mode == tf.estimator.ModeKeys.TRAIN)
x = activation(x)
x = tf.layers.conv3d(
inputs=x,
filters=out_filters,
kernel_size=kernel_size,
strides=(1, 1, 1),
**conv_params)
# Add the residual
with tf.variable_scope('sub_unit_add'):
# Handle differences in input and output filter sizes
if in_filters < out_filters:
orig_x = tf.pad(
tensor=orig_x,
paddings=[[0, 0]] * (len(x.get_shape().as_list()) - 1) + [[
int(np.floor((out_filters - in_filters) / 2.)),
int(np.ceil((out_filters - in_filters) / 2.))]])
elif in_filters > out_filters:
orig_x = tf.layers.conv3d(
inputs=orig_x,
filters=out_filters,
kernel_size=kernel_size,
strides=(1, 1, 1),
**conv_params)
x += orig_x
return x
| 2.453125 | 2 |
fitness.py | IPA-HD/ldaf_classification | 0 | 12796517 | import torch
import torch.nn as nn
class DenseOmega(nn.Module):
"""
Dense (+symmetric) Omega matrix
which applies to vectorized state with shape (batch, c, n, 1).
"""
def __init__(self, n, c):
super(DenseOmega, self).__init__()
self.n = n
self.c = c
# self.fc should have bias=False
# but this does not have an effect due to the custom forward pass below
# the bug was not fixed to maintain compatibility with older model checkpoints
self.fc = nn.Linear(n*c, n*c)
def forward(self, v):
batch = v.shape[0]
x = v.reshape((batch, self.c*self.n, 1))
y1 = torch.matmul(self.fc.weight, x)
y2 = torch.matmul(self.fc.weight.t(), x)
return 0.5*(y1+y2).reshape((batch, self.c, self.n, 1))
def dense_matrix(self):
return 0.5*(self.fc.weight + self.fc.weight.t()) | 3.09375 | 3 |
models/model_redeem_card.py | RapDoodle8080/mooli-milk-tea-management-system | 1 | 12796518 | from models.DAO import DAO
from utils.exception import ValidationError
from utils.validation import is_money
from models.shared import find_user
import string
from random import randint
# Prepare the char set for the coupon code
# Modify the char set according to your needs
# The char set contains all upper case letters and 0 to 9
char_set = list(string.ascii_uppercase)
[char_set.append(n) for n in range(0, 10)]
def generate_random_coupon_code():
# Generate a coupon code of length 16
return ''.join([str(char_set[randint(0, len(char_set)-1)]) for n in range(0, 16)])
def add_redeem_cards(value, batch = 1):
# Clean the input data
value = str(value).strip()
batch = str(batch).strip()
# Check is the input valid
if not is_money(value) or not batch.isdecimal():
raise ValidationError('Invalid input type.')
# Establish db connection
dao = DAO()
cursor = dao.cursor()
sql = """INSERT INTO redeem_card (
redeem_code,
value
) VALUES (
%(redeem_code)s,
%(value)s
)"""
for i in range(int(batch)):
cursor.execute(sql, {'redeem_code': generate_random_coupon_code(), 'value': value})
# Commit every 10 writes
if (i + 1) % 10 == 0:
dao.commit()
dao.commit()
def delete_redeem_card(redeem_code):
# Clean the input data
redeem_code = str(redeem_code).strip()
# Establish db connection
dao = DAO()
cursor = dao.cursor()
# Check if the redeem card exists
if find_redeem_card(redeem_code) is None:
raise ValidationError('The redeem card does not exists.')
sql = """DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s"""
cursor.execute(sql, {'redeem_code': redeem_code})
dao.commit()
def find_redeem_card(redeem_code):
# Clean the input data
param = str(redeem_code).strip()
# Establish db connection
dao = DAO()
cursor = dao.cursor()
# Query database
sql = """SELECT * FROM redeem_card WHERE redeem_code = %(redeem_code)s"""
cursor.execute(sql, {'redeem_code': redeem_code})
result = cursor.fetchone()
return result
def get_redeem_cards(limit = 0, offset = 0):
# Clean the input data
limit = str(limit).strip()
offset = str(offset).strip()
if not limit.isdecimal() or not offset.isdecimal():
raise ValidationError('IInvalid pagination parameters.')
# Establish db connection
dao = DAO()
cursor = dao.cursor()
# Query database
sql = """SELECT * FROM redeem_card ORDER BY redeem_code ASC"""
if not int(limit) == 0:
sql += ' LIMIT ' + limit + ' OFFSET ' + offset
cursor.execute(sql)
result = cursor.fetchall()
return result
def redeem(user_id, redeem_code):
# Clean the input data
user_id = str(user_id).strip()
redeem_code = str(redeem_code).strip()
# Find redeem card
redeem_card = find_redeem_card(redeem_code)
if redeem_card is None:
raise ValidationError('Invalid redeen code.')
# Find user
user = find_user(method = 'id', param = user_id)
if user is None:
raise ValidationError('user not found.')
# Establish db connection
dao = DAO()
cursor = dao.cursor()
sql = """UPDATE user SET balance = %(new_balance)s WHERE user_id = %(user_id)s"""
new_balance = user['balance'] + redeem_card['value']
cursor.execute(sql, {'new_balance': new_balance, 'user_id': user_id})
sql = """DELETE FROM redeem_card WHERE redeem_code = %(redeem_code)s"""
cursor.execute(sql, {'redeem_code': redeem_code})
dao.commit()
def count_records_length():
# Establish db connection
dao = DAO()
cursor = dao.cursor()
# Query database
sql = """SELECT count(redeem_code) as len FROM redeem_card"""
cursor.execute(sql)
length = cursor.fetchone()['len']
return length | 2.640625 | 3 |
eventex/subscriptions/tests/test_view_detail.py | gustavo7lagoas/eventex_wttd | 0 | 12796519 | import uuid
from django.test import TestCase
from django.shortcuts import resolve_url as r
from eventex.subscriptions.models import Subscription
class SubscriptionDetailGet(TestCase):
def setUp(self):
self.obj = Subscription.objects.create(
name='<NAME>',
cpf='12345678901',
email='<EMAIL>',
phone='938654321'
)
self.response = self.client.get(r('subscriptions:detail', self.obj.uid))
def test_get(self):
self.assertEqual(200, self.response.status_code)
def test_template_used(self):
self.assertTemplateUsed(self.response, 'subscriptions/subscription_detail.html')
def test_context(self):
subscription = self.response.context['subscription']
self.assertIsInstance(subscription, Subscription)
def test_html(self):
contents = (
self.obj.name,
self.obj.cpf,
self.obj.email,
self.obj.phone
)
for content in contents:
with self.subTest():
self.assertContains(self.response, content)
class SubscriptionDetailNotFound(TestCase):
def setUp(self):
uid = uuid.uuid4()
self.response = self.client.get(r('subscriptions:detail', uid))
def test_not_found(self):
self.assertEqual(404, self.response.status_code) | 2.140625 | 2 |
NetworkConstants.py | LostMyAccount/Game-Maker-Server | 3 | 12796520 | from enum import Enum
receive_codes = {
"PING": 0,
"HANDSHAKE": 1,
"DISCONNECT": 2,
}
handshake_codes = {
"UNKNOWN": 0,
"WAITING_ACK": 1,
"COMPLETED": 2
}
| 2.171875 | 2 |
index/urls.py | woyuanbingbuyuan/LinkedDataQuery | 0 | 12796521 | from django.urls import path
from . import views
urlpatterns = [
path('', views.index),
path('expand', views.expand),
path('upload', views.upload),
path('comment', views.add_comment),
path('public_data', views.get_public_data),
] | 1.671875 | 2 |
tools/pr_spec2raw.py | PearCoding/PearRay | 19 | 12796522 | <gh_stars>10-100
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Use:
# pr_spec2xyz INPUT OUTPUT
import sys
import struct
import pypearray as pr
# Main
if __name__ == '__main__':
if len(sys.argv) != 3:
print("Not enough arguments given. Need an input and output file")
exit()
input = sys.argv[1]
output = sys.argv[2]
inputFile = pr.SpectralFile.open(input)
with open(output, 'wb') as f:
for i in range(1, inputFile.height):
for j in range(1, inputFile.width):
spec = inputFile[i-1, j-1]
for k in range(1, pr.Spectrum.SAMPLING_COUNT):
f.write(struct.pack("!f", spec[k-1]))
| 2.75 | 3 |
DIMCREATOR/public_function.py | MINT1TINM/DIM-CREATOR | 0 | 12796523 | import MySQLdb
def antisql(content):
antistr=u"'|and|exec|insert|select|delete|update|count|*|%|chr|mid|master|truncate|char|declare|;|or|-|+|,".split(u"|")
for i in range (len(antistr)):
if antistr[i] in content:
return 1
return 0
def sql_select(text):
conn=MySQLdb.connect(host='localhost',user="root",passwd="<PASSWORD>",db="dimcreator",port=3306,charset='utf8')
cur = conn.cursor()
cur.execute(text)
res=cur.fetchall()
cur.close()
conn.commit()
conn.close()
return res
def sql_write(text):
conn=MySQLdb.connect(host='localhost',user="root",passwd="<PASSWORD>",db="dimcreator",port=3306,charset='utf8')
cur = conn.cursor()
cur.execute(text)
cur.close()
conn.commit()
conn.close()
| 2.953125 | 3 |
py/levels/captured.py | triffid/kiki | 2 | 12796524 | <reponame>triffid/kiki<gh_stars>1-10
# .................................................................................................................
# Level design of 'captured' by <NAME>
# .................................................................................................................
level_dict["captured"] = {
"scheme": "default_scheme",
"size": (9,9,9),
"intro": "captured",
"help": (
"$scale(1.5)mission:\nget to the exit!\n\n" + \
"to get to the exit,\nmove the stones",
),
"player": { "position": (0,-3,0),
},
"exits": [
{
"name": "exit",
"active": 1,
"position": (0,0,0),
},
],
"create":
"""
s = world.getSize()
for i in [-2, 2]:
world.addObjectPoly (KikiStone, [world.decenter (1, 1, i), world.decenter(1, -1, i),
world.decenter (-1, -1, i), world.decenter(-1, 1, i)])
world.addObjectPoly (KikiStone, [world.decenter (1, i, 1), world.decenter(1, i, -1),\
world.decenter (-1, i, -1), world.decenter(-1, i, 1)])
world.addObjectPoly (KikiStone, [world.decenter (i, 1, 1), world.decenter(i, 1, -1),\
world.decenter (i, -1, -1), world.decenter(i, -1, 1)])
for i in [-4, -2, 2, 4]:
world.addObjectAtPos (KikiStone(), world.decenter(i, 0, 0))
world.addObjectAtPos (KikiStone(), world.decenter(0, i, 0))
world.addObjectAtPos (KikiStone(), world.decenter(0, 0, i))
""",
}
| 1.898438 | 2 |
foldit/collab_viz.py | awb-carleton/pattern-analysis | 0 | 12796525 | <filename>foldit/collab_viz.py<gh_stars>0
import colorsys
import subprocess
import argparse
import os
import csv
import json
import logging
from itertools import groupby
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from graphviz import Digraph
from concurrent.futures import ProcessPoolExecutor
from foldit.foldit_data import get_relevant_sids
from util import PDB_Info, get_data_value
from typing import NamedTuple, Tuple, List, Dict
import matplotlib
matplotlib.use("Agg")
def get_source(e):
cands = e.pdl[-2::-1]
i = 0
while cands[i]['header']['score'] == 9999.99 or cands[i]['actions'] == {} or cands[i]['header']['uid'] == e.uid:
i += 1
return cands[i]
def col_to_str(col):
return '#' + ("%0.2X%0.2X%0.2X" % (int(col[0] * 0xff), int(col[1] * 0xff), int(col[2] * 0xff)))
def is_corrupted(pdl, uid):
return all(p['actions'] == {} or p['header']['uid'] == uid or p['header']['uid'] == '0' or p['header']['score'] == 9999.99 for p in pdl)
def remove_corrupted(pdb_infos):
return [x for x in pdb_infos if not is_corrupted(x.pdl, x.uid)]
def render_collab(data):
user_colors = {}
for pid in data.pid.unique():
print(pid)
df = data[data.pid == pid]
start = df.timestamps.apply(min).min()
end = df.timestamps.apply(max).max()
pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p in l.pdb_infos] if r.lines else []) +
([p for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []),
key=lambda p: p.timestamp), axis=1)
shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map(
lambda d: [x for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: p.uid), lambda p: p.uid)}
active_evolves = remove_corrupted(sum(pdb_infos.map(
lambda d: [x for x in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions']]), []))
passive_evolves = remove_corrupted(sum(pdb_infos.map(
lambda d: [x for x in d if int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1]['actions'] == {}]), []))
# map gids to colors
cdict = {}
# gids = set([xs[0].gid for xs in shared.values()] + [get_source(e)['header']['gid'] for e in active_evolves + passive_evolves])
# cm = plt.get_cmap('tab10' if len(gids) <= 10 else 'tab20')
# for gid, c in zip(gids, cm.colors):
# cdict[gid] = col_to_str(c)
groups_uids = {gid: [uid for _, uid in g] for gid, g in groupby(
sorted(set(sum(pdb_infos.map(lambda d: [(x.gid, x.uid) for x in d if int(x.sharing_gid) > 1 or
(int(x.sharing_gid) == 0 and x.scoretype == '2' and x.pdl[-1][
'actions'])]), []))), lambda p: p[0])}
for gid, uids in groups_uids.items():
cdict[gid] = {}
group_colors = user_colors.setdefault(gid, {})
colors = [col_to_str(c) for c in list(plt.get_cmap('tab20').colors) + list(plt.get_cmap('tab20b').colors)]
new_uids = uids[:]
for prev_uid, color in group_colors.items():
colors.remove(color)
if prev_uid in new_uids:
new_uids.remove(prev_uid)
cdict[gid][prev_uid] = color
assert len(colors) >= len(new_uids)
for uid, c in zip(new_uids, colors):
cdict[gid][uid] = c
group_colors[uid] = c
dot = Digraph(name="parent", graph_attr={'forecelabels': 'true', 'K': '0.6', 'repulsiveforce': '2'},
node_attr={'style': 'filled'}, edge_attr={'color': '#00000055'})
group_clusters = {gid: Digraph(name="cluster_{}".format(gid),
graph_attr={'label': "group_{}".format(gid)}) for gid in groups_uids}
evolved = {}
# create evolver nodes & edges
uid_grouped = groupby(sorted(active_evolves, key=lambda e: e.uid), lambda e: e.uid) # group by evolver
uid_source_grouped = {
uid: {k: list(g) for k, g in
groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])),
lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in
uid_grouped} # further group by source
active_uids = list(uid_source_grouped.keys())
# evolver_clusters = {gid: Digraph(name="cluster_active_evolve_{}".format(gid),
# node_attr={'shape': 'oval'},
# graph_attr={'label': '{}_active_evolvers'.format(gid)})
# for gid in gids}
for uid, evolved_targets in uid_source_grouped.items():
gid = list(evolved_targets.values())[0][0].gid
# evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid)
for target, evolves in evolved_targets.items():
group_clusters[gid].node("{} on {}@{:.2f}".format(uid, *target), fillcolor=cdict[gid][uid], shape="oval",
label="{:.2f}".format(min(e.energy for e in evolves)))
# evoling_start = min(e.timestamp for e in evolves)
# edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (evoling_start - start) / (end - start), 0.7)
# evolving_time = sum(get_sessions([e.timestamp for e in evolves])
group_clusters[gid].edge("{} on {}@{:.2f}".format(uid, *target), "{}@{:.2f}".format(*target),
penwidth=str(0.2 + np.log10(len(evolves))),
style="dashed" if min(e.energy for e in evolves) >= target[1] else "solid")
evolved["{}@{:.2f}".format(*target)] = True
# for sg in evolver_clusters.values():
# dot.subgraph(sg)
# do it again, this time for people who just loaded in a shared solution but didn't do anything
# uid_grouped = groupby(sorted(passive_evolves, key=lambda e: e.uid), lambda e: e.uid)
# uid_source_grouped = {
# uid: {k: min(g, key=lambda p: p.energy) for k, g in
# groupby(sorted(g, key=lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score'])),
# lambda e: (get_source(e)['header']['uid'], get_source(e)['header']['score']))} for uid, g in
# uid_grouped if uid not in active_uids} # screen out anyone who later actively evolved
#
# evolver_clusters = {gid: Digraph(name="cluster_passive_ evolve_{}".format(gid),
# node_attr={'shape': 'oval'},
# graph_attr={'label': '{}_passive_evolvers'.format(gid)})
# for gid in gids}
# for uid, evolved_targets in uid_source_grouped.items():
# gid = list(evolved_targets.values())[0].gid
# evolver_clusters[gid].node(uid, fillcolor=cdict[gid], label=uid)
# for target, evolve in evolved_targets.items():
# dot.edge(uid, "{}@{:.2f}".format(*target), penwidth='3', style='dashed')
# evolved["{}@{:.2f}".format(*target)] = True
# for sg in evolver_clusters.values():
# dot.subgraph(sg)
# nodes and edges for shared solutions
for uid, pdbs in shared.items():
gid = pdbs[0].gid
for p in pdbs:
if p.scoretype == '2' and not is_corrupted(p.pdl, p.uid):
source = get_source(p)
# edge_color = colorsys.hsv_to_rgb(0.28, 0.1 + 0.9 * (p.timestamp - start) / (end - start), 0.7)
group_clusters[gid].edge("{}@{:.2f}".format(uid, p.energy),
"{}@{:.2f}".format(source['header']['uid'], source['header']['score']),
penwidth='3')
evolved.setdefault("{}@{:.2f}".format(uid, p.energy), False)
evolved["{}@{:.2f}".format(source['header']['uid'], source['header']['score'])] = True
for uid, pdbs in shared.items():
gid = pdbs[0].gid
num_ignored = len([p for p in pdbs if "{}@{:.2f}".format(uid, p.energy) not in evolved])
# with dot.subgraph(name="cluster_{}".format(uid),
# graph_attr={'label': "{}_shared ({} ignored)".format(uid, num_ignored), 'forcelabels': 'true',
# 'style': 'filled', 'fillcolor': cdict[pdbs[0].gid]},
# node_attr={'style': 'filled'}) as c:
for p in pdbs:
if "{}@{:.2f}".format(uid, p.energy) in evolved:
shape = "box" if p.scoretype == '1' or is_corrupted(p.pdl, p.uid) else "diamond"
# c.node("{}@{:.2f}".format(uid, p.energy), label="{:.2f}".format(p.energy), shape=shape,
group_clusters[gid].node("{}@{:.2f}".format(uid, p.energy), label="{:.2f}".format(p.energy), shape=shape,
style='filled,solid' if evolved["{}@{:.2f}".format(uid, p.energy)] else 'filled,dashed',
fillcolor=cdict[gid][uid])
# color="#ffffff")
for cluster in group_clusters.values():
dot.subgraph(cluster)
# output raw source, then use command line graphviz tools to fix cluster layout
outname = "collab_viz/collab_{}".format(pid)
with open(outname, 'w') as out:
out.write(dot.source)
subprocess.run(
"ccomps -xC {} | dot | gvpack -array_c{} | neato -Tpng -n2 -o {}.png".format(outname, len(groups_uids)+1, outname),
shell=True, check=True)
class ShareTag(NamedTuple):
uid: str
energy: float
class Collaborator(NamedTuple):
uid: str
gid: str
pdbs: List[PDB_Info]
energy_comps: Dict[str, float]
tag: ShareTag
parent: "Collaborator"
source: ShareTag
children: List["Collaborator"]
def get_tag(s: PDB_Info) -> ShareTag:
return ShareTag(s.uid, round(s.energy, 4))
def get_source_tag(s: PDB_Info) -> ShareTag:
source = get_source(s)
return ShareTag(source['header']['uid'], round(source['header']['score'], 4))
def get_evolver_uid(s: PDB_Info, lines: list) -> str:
if s.scoretype == "1":
return s.uid
for i, line in enumerate(lines):
sids = [p.sid for p in line.pdb_infos]
if s.sid in sids:
return s.uid + "evol" + str(i)
raise ValueError("evolver pdb {} not found in any evolver lines for {}".format(s.sid, (s.uid, s.pid)))
def get_collab_children(root_tag: ShareTag, collab: Collaborator, evolves_by_source: dict) -> List[Collaborator]:
children = []
for uid, pdbs in evolves_by_source[root_tag].items():
best = min(pdbs, key=lambda p: p.energy)
child = Collaborator(uid, collab.gid, pdbs, {c.name: c.energy * c.weight for c in best.energy_components},
ShareTag(uid, round(best.energy, 4)), collab, root_tag, [])
children.append(child)
for pdb in pdbs:
if get_tag(pdb) in evolves_by_source and int(pdb.sharing_gid) > 1:
child.children.extend(get_collab_children(get_tag(pdb), child, evolves_by_source))
return children
def get_team_structures(data, soln_lookup, child_lookup):
collabs = {}
for pid in data.pid.unique():
logging.debug("getting team structures for {}".format(pid))
df = data[data.pid == pid]
pdb_infos = df.apply(lambda r: sorted(([p for l in r.lines for p in l.pdb_infos] if r.lines else []) +
([p for l in r.evol_target_lines for p in l.pdb_infos] if r.evol_lines else []),
key=lambda p: p.timestamp), axis=1)
evol_lines_lookup = {uid: get_data_value(uid, pid, "evol_target_lines", df) for uid in df.uid}
euid_lookup = {pdb.sid: get_evolver_uid(pdb, evol_lines_lookup[pdb.uid]) for pdb in sum(pdb_infos.values, [])}
shared = {uid: list(pdbs) for uid, pdbs in groupby(sorted(sum(pdb_infos.map(
lambda d: [x for x in d if int(x.sharing_gid) > 1]), []), key=lambda p: euid_lookup[p.sid]),
lambda p: euid_lookup[p.sid])}
active_evolves = remove_corrupted(sum(pdb_infos.map(
lambda d: [x for x in d if x.scoretype == '2' and x.pdl[-1]['actions']]), []))
uid_grouped = groupby(sorted(active_evolves, key=lambda e: euid_lookup[e.sid]),
lambda e: euid_lookup[e.sid]) # group by evolver
uid_source_grouped = {
uid: {k: list(g) for k, g in
groupby(sorted(g, key=lambda e: get_source_tag(e)), lambda e: get_source_tag(e))} for uid, g in
uid_grouped} # further group by source
evolved_targets = {target for targets in uid_source_grouped.values() for target in targets}
roots = sum(([p for p in pdbs if p.scoretype == '1' and get_tag(p) in evolved_targets] for pdbs in shared.values()), [])
evolves_by_source = {tag: list(pdbs) for tag, pdbs in groupby(sorted(active_evolves, key=lambda s: get_source_tag(s)),
lambda s: get_source_tag(s))}
evolves_by_source = {tag: {uid: list(pdbs) for uid, pdbs in groupby(sorted(xs, key=lambda x: euid_lookup[x.sid]),
lambda x: euid_lookup[x.sid])}
for tag, xs in evolves_by_source.items()}
collabs[pid] = []
for root in roots:
tag = get_tag(root)
sids = get_relevant_sids(root, soln_lookup, child_lookup)
collab = Collaborator(tag.uid, root.gid, [soln_lookup[sid] for sid in sids] if sids else [],
{c.name: c.energy * c.weight for c in root.energy_components}, tag, None, None, [])
collab.children.extend(get_collab_children(tag, collab, evolves_by_source))
collabs[pid].append(collab)
return collabs
# for collab in sorted(collabs['2003642'], key=lambda c: c.pdbs[0].gid):
# print(collab.pdbs[0].gid)
# print(collab.tag)
# front = [(1, c) for c in collab.children]
# while len(front) > 0:
# ntab, cur = front.pop()
# print(" "*ntab, cur.tag)
# front.extend([(ntab + 1, c) for c in cur.children])
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='collab_viz.py')
parser.add_argument("--debug", action='store_true')
parser.add_argument('pids', nargs='+')
args = parser.parse_args()
if args.debug:
for pid in args.pids:
render_collab(pid)
else:
with ProcessPoolExecutor(30) as pool:
pool.map(render_collab, args.pids, chunksize=1)
| 2.265625 | 2 |
bin/output_mover.py | ndeporzio/cosmicfish | 0 | 12796526 | import os
import shutil
prepath_in='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/EUCLID/EUCLID_GridPlot_'
prepath_out='/n/home02/ndeporzio/projects/cosmicfish/cfworkspace/results/FINAL_RESULTS/Parsed/EUCLID/EUCLID_GridPlot_'
for idx in range(120):
tidx=idx//10
midx=idx%10
path=prepath_out+str(idx)+'/'
os.mkdir(path)
filename= ('gp_'+str(tidx)+'_'+str(midx)+'.db')
shutil.copyfile(prepath_in+str(idx)+'/'+filename, prepath_out+str(idx)+'/'+filename)
print(path)
# os.rename(prepath_in+str(idx)+'.err', prepath_out+str(idx)+'/EUCLID_GridPlot_56234229_'+str(idx)+'.err')
| 2.140625 | 2 |
reinforce.py | kitfactory/python_test | 0 | 12796527 | <reponame>kitfactory/python_test<filename>reinforce.py
import gym
import numpy as np
import matplotlib.pyplot as plt
# 動画の描画関数の宣言
# 参考URL http://nbviewer.jupyter.org/github/patrickmineault
# /xcorr-notebooks/blob/master/Render%20OpenAI%20gym%20as%20GIF.ipynb
# from JSAnimation.IPython_display import display_animation
from matplotlib import animation
# from IPython.display import display
def display_frames_as_gif(frames):
"""
Displays a list of frames as a gif, with controls
"""
plt.figure(figsize=(frames[0].shape[1]/72.0, frames[0].shape[0]/72.0),
dpi=72)
patch = plt.imshow(frames[0])
plt.axis('off')
def animate(i):
patch.set_data(frames[i])
anim = animation.FuncAnimation(plt.gcf(), animate, frames=len(frames),
interval=50)
anim.save('movie_cartpole.mp4') # 追記:動画の保存です
# display(display_animation(anim, default_mode='loop'))
# CartPoleをランダムに動かす
frames = []
env = gym.make('CartPole-v0')
env.reset()
for step in range(0, 200):
frames.append(env.render(mode='rgb_array')) # framesに各時刻の画像を追加していく
action = np.random.choice(2) # 0(カートを左に押す),1(カートを右に押す)をランダムに返す
observation, reward, done, info = env.step(action) # actionを実行する
display_frames_as_gif(frames) | 2.875 | 3 |
tests/settings_test.py | uploadcare/intercom-rank | 12 | 12796528 | from app.settings import *
DEBUG = False
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///:memory:'
SERVER_NAME = 'localhost'
WTF_CSRF_ENABLED = False
WTF_CSRF_CHECK_DEFAULT = False
WTF_CSRF_METHODS = []
| 1.28125 | 1 |
Desafio/ex097.py | NathanMuniz/Exercises-Python | 0 | 12796529 | <reponame>NathanMuniz/Exercises-Python
def tran_titulo(tit):
x = len(tit)
print((x + 5) * "~")
print(f" {tit}")
print((x + 5) * "~")
titulo_principal = "<NAME>"
titulo_secundario = "Curso de Python no Youtube"
paragrafo = "Cev"
tran_titulo(titulo_principal)
tran_titulo(titulo_secundario)
tran_titulo(paragrafo) | 3.453125 | 3 |
users/migrations/0002_shifts_per_roster.py | gregcowell/roster-wizard | 18 | 12796530 | # Generated by Django 2.2 on 2019-04-24 03:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='customuser',
name='ward_name',
),
migrations.AddField(
model_name='customuser',
name='shifts_per_roster',
field=models.IntegerField(default=10),
preserve_default=False,
),
]
| 1.5625 | 2 |
mongoDB/connections.py | Atharva-Gundawar/PyDB | 0 | 12796531 | <filename>mongoDB/connections.py
# Install pymongo using 'pip install pymongo'
import urllib
from pymongo import MongoClient
client = MongoClient()
# Connect to localhost :
# Format :
# client = MongoClient('<host>', port_number)
client = MongoClient('localhost', 27017)
# or use the entire URL
# client = MongoClient('mongodb://<host>:<portnum>/')
client = MongoClient('mongodb://localhost:27017/')
# Connect via URI :
# Use pip3 install pymongo[tls] for connecting to atlas
# Format :
# client = pymongo.MongoClient(<Atlas connection string>)
client = MongoClient(
'mongodb+srv://admin:<password>@cluster0-pm5vp.mongodb.net/test?retryWrites=true&w=majority')
# Percent-Escaping Username and Password
username = "Atharva"
password = r"<PASSWORD>"
client = MongoClient('mongodb://%s:%[email protected]' %
(urllib.parse.quote_plus(username), urllib.parse.quote_plus(password)))
# With encryption
# Here authSource overrides the default database name which is admin
# SCRAM-SHA-1/SCRAM-SHA-256
client = MongoClient('localhost', username='Atharva', password='password',
authSource='some_db', authMechanism='SCRAM-SHA-256')
# TLS/SSL connections with certificate validation
# The MONGODB-X509 mechanism authenticates a username derived from the
# distinguished subject name of the X.509 certificate presented by the driver during TLS/SSL negotiation.
client = MongoClient('localhost', username="<X.509 derived username>", authMechanism="MONGODB-X509",
tls=True, tlsCertificateKeyFile='/path/to/client.pem', tlsCAFile='/path/to/ca.pem')
# AWS Connections
# Authenticate using AWS IAM credentials
# The access_key_id and secret_access_key passed into the URI MUST be percent escaped.
client = MongoClient(
"mongodb://<access_key_id>:<secret_access_key>@localhost/?authMechanism=MONGODB-AWS")
# Check status
print(client.stats)
| 2.8125 | 3 |
myenv/Scripts/alembic-script.py | tcsnszh97/Software-engineering | 6 | 12796532 | <filename>myenv/Scripts/alembic-script.py
#!e:\python\szuprojects\flasky\myenv\scripts\python3.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.10','console_scripts','alembic'
__requires__ = 'alembic==0.8.10'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('alembic==0.8.10', 'console_scripts', 'alembic')()
)
| 1.921875 | 2 |
interact/roc.py | drugilsberg/interact | 43 | 12796533 | """Methods used to build ROC."""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, auc
# seaborn settings
sns.set_style("white")
sns.set_context("paper")
color_palette = sns.color_palette("colorblind")
sns.set_palette(color_palette)
def _get_total_undirected_interactions(n):
return n * (n - 1) / 2
def _check_index(index, labels_set, interaction_symbol='<->'):
e1, e2 = index.split(interaction_symbol)
return (e1 in labels_set and e2 in labels_set)
def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'):
labels_set = set(labels)
filtering = pd.Series([
_check_index(index, labels_set, interaction_symbol)
for index in indexes
])
return indexes[filtering]
def _is_index_diagonal(index, interaction_indices='<->'):
a_node, another_node = index.split(interaction_indices)
return a_node == another_node
def _get_evaluation_on_given_labels(
labels, true_interactions, predicted_interactions, no_self_loops=True
):
total_interactions = _get_total_undirected_interactions(len(labels))
interaction_indices = list(
set(
_filter_indices_with_labels(predicted_interactions.index, labels) |
_filter_indices_with_labels(true_interactions.index, labels)
)
)
if no_self_loops:
interaction_indices = [
index
for index in interaction_indices
if not _is_index_diagonal(index)
]
predicted_interactions = predicted_interactions.reindex(
interaction_indices
).fillna(0.0)
true_interactions = true_interactions.reindex(
interaction_indices
).fillna(0.0)
zero_interactions = int(total_interactions) - len(interaction_indices)
y = np.append(true_interactions.values, np.zeros((zero_interactions)))
scores = np.append(
predicted_interactions.values, np.zeros((zero_interactions))
)
return y, scores
def get_roc_df(
pathway_name, method_name, true_interactions, predicted_interactions,
number_of_roc_points=100
):
"""Return dataframe that can be used to plot a ROC curve."""
labels = {
gene
for genes in [
true_interactions.e1, predicted_interactions.e1,
true_interactions.e2, predicted_interactions.e2
]
for gene in genes
}
y, scores = _get_evaluation_on_given_labels(
labels, true_interactions.intensity,
predicted_interactions.intensity
)
# print(method_name, y, scores)
reference_xx = np.linspace(0, 1, number_of_roc_points)
if sum(y) > 0:
xx, yy, threshold = roc_curve(y, scores)
print(method_name, y, scores, threshold, xx, yy)
area_under_curve = auc(xx, yy)
yy = np.interp(reference_xx, xx, yy)
else:
yy = reference_xx
area_under_curve = 0.5 # worst
roc_df = pd.DataFrame({
'pathway': number_of_roc_points * [pathway_name],
'method': (
number_of_roc_points * [method_name]
),
'YY': yy,
'XX': reference_xx.tolist()
})
return roc_df, area_under_curve
def plot_roc_curve_from_df(
df, auc_dict_list=None, output_filepath=None, figsize=(6, 6)
):
"""From a df with multiple methods plot a roc curve using sns.tspot."""
xlabel = 'False Discovery Rate'
ylabel = 'True Positive Rate'
title = 'Receiver Operating Characteristic'
# rename method name to include AUC to show it in legend
if auc_dict_list:
for method in auc_dict_list.keys():
mean_auc = np.mean(auc_dict_list[method])
method_indices = df['method'] == method
df['mean_auc'] = mean_auc
df.loc[method_indices, 'method'] = (
'{} '.format(
method.capitalize()
if method != 'INtERAcT'
else method
) +
'AUC=%0.2f' % mean_auc
)
df = df.sort_values(by='method')
df.rename(columns={'method': ''}, inplace=True) # to avoid legend title
plt.figure(figsize=figsize)
sns.set_style("whitegrid", {'axes.grid': False})
sns.tsplot(
data=df, time='XX', value='YY',
condition='', unit='pathway', legend=True
)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if output_filepath:
plt.savefig(output_filepath, bbox_inches='tight')
| 2.875 | 3 |
frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py | Semicheche/foa_frappe_docker | 0 | 12796534 | <reponame>Semicheche/foa_frappe_docker<filename>frappe-bench/apps/erpnext/erpnext/hr/doctype/attendance/attendance.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import getdate, nowdate
from frappe import _
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Attendance(Document):
def validate_duplicate_record(self):
res = frappe.db.sql("""select name from `tabAttendance` where employee = %s and attendance_date = %s
and name != %s and docstatus = 1""",
(self.employee, self.attendance_date, self.name))
if res:
frappe.throw(_("Attendance for employee {0} is already marked").format(self.employee))
set_employee_name(self)
def check_leave_record(self):
leave_record = frappe.db.sql("""select leave_type, half_day from `tabLeave Application`
where employee = %s and %s between from_date and to_date
and docstatus = 1""", (self.employee, self.attendance_date), as_dict=True)
if leave_record:
if leave_record[0].half_day:
self.status = 'Half Day'
frappe.msgprint(_("Employee {0} on Half day on {1}").format(self.employee, self.attendance_date))
else:
self.status = 'On Leave'
self.leave_type = leave_record[0].leave_type
frappe.msgprint(_("Employee {0} on Leave on {1}").format(self.employee, self.attendance_date))
if self.status == "On Leave" and not leave_record:
frappe.throw(_("No leave record found for employee {0} for {1}").format(self.employee, self.attendance_date))
def validate_attendance_date(self):
date_of_joining = frappe.db.get_value("Employee", self.employee, "date_of_joining")
if getdate(self.attendance_date) > getdate(nowdate()):
frappe.throw(_("Attendance can not be marked for future dates"))
elif date_of_joining and getdate(self.attendance_date) < getdate(date_of_joining):
frappe.throw(_("Attendance date can not be less than employee's joining date"))
def validate_employee(self):
emp = frappe.db.sql("select name from `tabEmployee` where name = %s and status = 'Active'",
self.employee)
if not emp:
frappe.throw(_("Employee {0} is not active or does not exist").format(self.employee))
def validate(self):
from erpnext.controllers.status_updater import validate_status
validate_status(self.status, ["Present", "Absent", "On Leave", "Half Day"])
self.validate_attendance_date()
self.validate_duplicate_record()
self.check_leave_record()
| 2.078125 | 2 |
sdk/ml/azure-ai-ml/azure/ai/ml/_restclient/aio/_azure_machine_learning_workspaces.py | dubiety/azure-sdk-for-python | 1 | 12796535 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import AsyncARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from msrest import Deserializer, Serializer
from ._configuration import AzureMachineLearningWorkspacesConfiguration
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
from azure.core.credentials_async import AsyncTokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class AzureMachineLearningWorkspaces(MultiApiClientMixin, _SDKClient):
"""These APIs allow end users to operate on Azure Machine Learning Workspace resources.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2022-05-01'
_PROFILE_TAG = "azure.mgmt.machinelearningservices.AzureMachineLearningWorkspaces"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'assets': '1.0.0',
'async_operations': 'v1.0',
'batch_job_deployment': '2020-09-01-dataplanepreview',
'batch_job_endpoint': '2020-09-01-dataplanepreview',
'data_call': '1.5.0',
'data_container': '1.5.0',
'data_version': '1.5.0',
'dataset_containers': '2021-10-01',
'dataset_controller_v2': '1.5.0',
'dataset_v2': '1.5.0',
'dataset_versions': '2021-10-01',
'datasets_v1': '1.5.0',
'delete': 'v1.0',
'events': 'v1.0',
'experiments': 'v1.0',
'extensive_model': '1.0.0',
'get_operation_status': '1.5.0',
'metric': 'v1.0',
'migration': '1.0.0',
'models': '1.0.0',
'registry_management_non_workspace': 'v1.0',
'run': 'v1.0',
'run_artifacts': 'v1.0',
'runs': 'v1.0',
'spans': 'v1.0',
'temporary_data_references': '2021-10-01-dataplanepreview',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
api_version: Optional[str] = None,
base_url: str = "https://management.azure.com",
profile: KnownProfiles = KnownProfiles.default,
**kwargs # type: Any
) -> None:
self._config = AzureMachineLearningWorkspacesConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(AzureMachineLearningWorkspaces, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 1.5.0: :mod:`dataset_dataplane.models<azure.mgmt.machinelearningservices.dataset_dataplane.models>`
* 1.0.0: :mod:`model_dataplane.models<azure.mgmt.machinelearningservices.model_dataplane.models>`
* v1.0: :mod:`registry_discovery.models<azure.mgmt.machinelearningservices.registry_discovery.models>`
* v1.0: :mod:`runhistory.models<azure.mgmt.machinelearningservices.runhistory.models>`
* 2020-09-01-dataplanepreview: :mod:`v2020_09_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.models>`
* 2021-10-01: :mod:`v2021_10_01.models<azure.mgmt.machinelearningservices.v2021_10_01.models>`
* 2021-10-01-dataplanepreview: :mod:`v2021_10_01_dataplanepreview.models<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.models>`
* 2022-01-01-preview: :mod:`v2022_01_01_preview.models<azure.mgmt.machinelearningservices.v2022_01_01_preview.models>`
* 2022-02-01-preview: :mod:`v2022_02_01_preview.models<azure.mgmt.machinelearningservices.v2022_02_01_preview.models>`
* 2022-05-01: :mod:`v2022_05_01.models<azure.mgmt.machinelearningservices.v2022_05_01.models>`
"""
if api_version == '1.5.0':
from ..dataset_dataplane import models
return models
elif api_version == '1.0.0':
from ..model_dataplane import models
return models
elif api_version == 'v1.0':
from ..registry_discovery import models
return models
elif api_version == 'v1.0':
from ..runhistory import models
return models
elif api_version == '2020-09-01-dataplanepreview':
from ..v2020_09_01_dataplanepreview import models
return models
elif api_version == '2021-10-01':
from ..v2021_10_01 import models
return models
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview import models
return models
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview import models
return models
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview import models
return models
elif api_version == '2022-05-01':
from ..v2022_05_01 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def assets(self):
"""Instance depends on the API version:
* 1.0.0: :class:`AssetsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.AssetsOperations>`
"""
api_version = self._get_api_version('assets')
if api_version == '1.0.0':
from ..model_dataplane.aio.operations import AssetsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'assets'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def async_operations(self):
"""Instance depends on the API version:
* v1.0: :class:`AsyncOperationsOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.AsyncOperationsOperations>`
"""
api_version = self._get_api_version('async_operations')
if api_version == 'v1.0':
from ..registry_discovery.aio.operations import AsyncOperationsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'async_operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchDeploymentsOperations>`
* 2022-02-01-preview: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchDeploymentsOperations>`
* 2022-05-01: :class:`BatchDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchDeploymentsOperations>`
"""
api_version = self._get_api_version('batch_deployments')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import BatchDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import BatchDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.BatchEndpointsOperations>`
* 2022-02-01-preview: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.BatchEndpointsOperations>`
* 2022-05-01: :class:`BatchEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.BatchEndpointsOperations>`
"""
api_version = self._get_api_version('batch_endpoints')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import BatchEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import BatchEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_deployment(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobDeploymentOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobDeploymentOperations>`
"""
api_version = self._get_api_version('batch_job_deployment')
if api_version == '2020-09-01-dataplanepreview':
from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobDeploymentOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_deployment'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def batch_job_endpoint(self):
"""Instance depends on the API version:
* 2020-09-01-dataplanepreview: :class:`BatchJobEndpointOperations<azure.mgmt.machinelearningservices.v2020_09_01_dataplanepreview.aio.operations.BatchJobEndpointOperations>`
"""
api_version = self._get_api_version('batch_job_endpoint')
if api_version == '2020-09-01-dataplanepreview':
from ..v2020_09_01_dataplanepreview.aio.operations import BatchJobEndpointOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'batch_job_endpoint'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeContainersOperations>`
* 2022-02-01-preview: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeContainersOperations>`
* 2022-05-01: :class:`CodeContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeContainersOperations>`
"""
api_version = self._get_api_version('code_containers')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import CodeContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import CodeContainersOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import CodeContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def code_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.CodeVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.CodeVersionsOperations>`
* 2022-02-01-preview: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.CodeVersionsOperations>`
* 2022-05-01: :class:`CodeVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.CodeVersionsOperations>`
"""
api_version = self._get_api_version('code_versions')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import CodeVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import CodeVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import CodeVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'code_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentContainersOperations>`
* 2022-02-01-preview: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentContainersOperations>`
* 2022-05-01: :class:`ComponentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentContainersOperations>`
"""
api_version = self._get_api_version('component_containers')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import ComponentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import ComponentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import ComponentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def component_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComponentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ComponentVersionsOperations>`
* 2022-02-01-preview: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ComponentVersionsOperations>`
* 2022-05-01: :class:`ComponentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComponentVersionsOperations>`
"""
api_version = self._get_api_version('component_versions')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import ComponentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import ComponentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'component_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def compute(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ComputeOperations>`
* 2022-01-01-preview: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.ComputeOperations>`
* 2022-05-01: :class:`ComputeOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ComputeOperations>`
"""
api_version = self._get_api_version('compute')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import ComputeOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import ComputeOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import ComputeOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'compute'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_call(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataCallOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataCallOperations>`
"""
api_version = self._get_api_version('data_call')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DataCallOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_call'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_container(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataContainerOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataContainerOperations>`
"""
api_version = self._get_api_version('data_container')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DataContainerOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_container'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_containers(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataContainersOperations>`
* 2022-05-01: :class:`DataContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataContainersOperations>`
"""
api_version = self._get_api_version('data_containers')
if api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import DataContainersOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import DataContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_version(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DataVersionOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DataVersionOperations>`
"""
api_version = self._get_api_version('data_version')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DataVersionOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_version'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def data_versions(self):
"""Instance depends on the API version:
* 2022-02-01-preview: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DataVersionsOperations>`
* 2022-05-01: :class:`DataVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DataVersionsOperations>`
"""
api_version = self._get_api_version('data_versions')
if api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import DataVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import DataVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'data_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetContainersOperations>`
"""
api_version = self._get_api_version('dataset_containers')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import DatasetContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_controller_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetControllerV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetControllerV2Operations>`
"""
api_version = self._get_api_version('dataset_controller_v2')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DatasetControllerV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_controller_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_v2(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetV2Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetV2Operations>`
"""
api_version = self._get_api_version('dataset_v2')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DatasetV2Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_v2'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def dataset_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatasetVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatasetVersionsOperations>`
"""
api_version = self._get_api_version('dataset_versions')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import DatasetVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'dataset_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datasets_v1(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DatasetsV1Operations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DatasetsV1Operations>`
"""
api_version = self._get_api_version('datasets_v1')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DatasetsV1Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datasets_v1'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def datastores(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.DatastoresOperations>`
* 2022-02-01-preview: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.DatastoresOperations>`
* 2022-05-01: :class:`DatastoresOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.DatastoresOperations>`
"""
api_version = self._get_api_version('datastores')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import DatastoresOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import DatastoresOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import DatastoresOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'datastores'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def delete(self):
"""Instance depends on the API version:
* 1.5.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.DeleteOperations>`
* v1.0: :class:`DeleteOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.DeleteOperations>`
"""
api_version = self._get_api_version('delete')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import DeleteOperations as OperationClass
elif api_version == 'v1.0':
from ..runhistory.aio.operations import DeleteOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'delete'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentContainersOperations>`
* 2022-02-01-preview: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentContainersOperations>`
* 2022-05-01: :class:`EnvironmentContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentContainersOperations>`
"""
api_version = self._get_api_version('environment_containers')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import EnvironmentContainersOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import EnvironmentContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def environment_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.EnvironmentVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.EnvironmentVersionsOperations>`
* 2022-02-01-preview: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.EnvironmentVersionsOperations>`
* 2022-05-01: :class:`EnvironmentVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.EnvironmentVersionsOperations>`
"""
api_version = self._get_api_version('environment_versions')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import EnvironmentVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import EnvironmentVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'environment_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def events(self):
"""Instance depends on the API version:
* v1.0: :class:`EventsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.EventsOperations>`
"""
api_version = self._get_api_version('events')
if api_version == 'v1.0':
from ..runhistory.aio.operations import EventsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'events'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def experiments(self):
"""Instance depends on the API version:
* v1.0: :class:`ExperimentsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.ExperimentsOperations>`
"""
api_version = self._get_api_version('experiments')
if api_version == 'v1.0':
from ..runhistory.aio.operations import ExperimentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'experiments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def extensive_model(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ExtensiveModelOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ExtensiveModelOperations>`
"""
api_version = self._get_api_version('extensive_model')
if api_version == '1.0.0':
from ..model_dataplane.aio.operations import ExtensiveModelOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'extensive_model'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def get_operation_status(self):
"""Instance depends on the API version:
* 1.5.0: :class:`GetOperationStatusOperations<azure.mgmt.machinelearningservices.dataset_dataplane.aio.operations.GetOperationStatusOperations>`
"""
api_version = self._get_api_version('get_operation_status')
if api_version == '1.5.0':
from ..dataset_dataplane.aio.operations import GetOperationStatusOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'get_operation_status'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def jobs(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.JobsOperations>`
* 2022-02-01-preview: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.JobsOperations>`
* 2022-05-01: :class:`JobsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.JobsOperations>`
"""
api_version = self._get_api_version('jobs')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import JobsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import JobsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import JobsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'jobs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def metric(self):
"""Instance depends on the API version:
* v1.0: :class:`MetricOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.MetricOperations>`
"""
api_version = self._get_api_version('metric')
if api_version == 'v1.0':
from ..runhistory.aio.operations import MetricOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'metric'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def migration(self):
"""Instance depends on the API version:
* 1.0.0: :class:`MigrationOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.MigrationOperations>`
"""
api_version = self._get_api_version('migration')
if api_version == '1.0.0':
from ..model_dataplane.aio.operations import MigrationOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'migration'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_containers(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelContainersOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelContainersOperations>`
* 2022-02-01-preview: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelContainersOperations>`
* 2022-05-01: :class:`ModelContainersOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelContainersOperations>`
"""
api_version = self._get_api_version('model_containers')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import ModelContainersOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import ModelContainersOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import ModelContainersOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_containers'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def model_versions(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.ModelVersionsOperations>`
* 2021-10-01-dataplanepreview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.ModelVersionsOperations>`
* 2022-02-01-preview: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.ModelVersionsOperations>`
* 2022-05-01: :class:`ModelVersionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.ModelVersionsOperations>`
"""
api_version = self._get_api_version('model_versions')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import ModelVersionsOperations as OperationClass
elif api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import ModelVersionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import ModelVersionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'model_versions'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def models(self):
"""Instance depends on the API version:
* 1.0.0: :class:`ModelsOperations<azure.mgmt.machinelearningservices.model_dataplane.aio.operations.ModelsOperations>`
"""
api_version = self._get_api_version('models')
if api_version == '1.0.0':
from ..model_dataplane.aio.operations import ModelsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'models'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_deployments(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineDeploymentsOperations>`
* 2022-02-01-preview: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineDeploymentsOperations>`
* 2022-05-01: :class:`OnlineDeploymentsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineDeploymentsOperations>`
"""
api_version = self._get_api_version('online_deployments')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import OnlineDeploymentsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import OnlineDeploymentsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_deployments'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def online_endpoints(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.OnlineEndpointsOperations>`
* 2022-02-01-preview: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_02_01_preview.aio.operations.OnlineEndpointsOperations>`
* 2022-05-01: :class:`OnlineEndpointsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.OnlineEndpointsOperations>`
"""
api_version = self._get_api_version('online_endpoints')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-02-01-preview':
from ..v2022_02_01_preview.aio.operations import OnlineEndpointsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import OnlineEndpointsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'online_endpoints'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def operations(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`Operations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.Operations>`
* 2022-01-01-preview: :class:`Operations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.Operations>`
* 2022-05-01: :class:`Operations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import Operations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import Operations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import Operations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operations'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateEndpointConnectionsOperations>`
* 2022-01-01-preview: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateEndpointConnectionsOperations>`
* 2022-05-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import PrivateEndpointConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_endpoint_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.PrivateLinkResourcesOperations>`
* 2022-01-01-preview: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.PrivateLinkResourcesOperations>`
* 2022-05-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import PrivateLinkResourcesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'private_link_resources'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def quotas(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.QuotasOperations>`
* 2022-01-01-preview: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.QuotasOperations>`
* 2022-05-01: :class:`QuotasOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.QuotasOperations>`
"""
api_version = self._get_api_version('quotas')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import QuotasOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import QuotasOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import QuotasOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'quotas'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def registry_management_non_workspace(self):
"""Instance depends on the API version:
* v1.0: :class:`RegistryManagementNonWorkspaceOperations<azure.mgmt.machinelearningservices.registry_discovery.aio.operations.RegistryManagementNonWorkspaceOperations>`
"""
api_version = self._get_api_version('registry_management_non_workspace')
if api_version == 'v1.0':
from ..registry_discovery.aio.operations import RegistryManagementNonWorkspaceOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'registry_management_non_workspace'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run(self):
"""Instance depends on the API version:
* v1.0: :class:`RunOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunOperations>`
"""
api_version = self._get_api_version('run')
if api_version == 'v1.0':
from ..runhistory.aio.operations import RunOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def run_artifacts(self):
"""Instance depends on the API version:
* v1.0: :class:`RunArtifactsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunArtifactsOperations>`
"""
api_version = self._get_api_version('run_artifacts')
if api_version == 'v1.0':
from ..runhistory.aio.operations import RunArtifactsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'run_artifacts'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def runs(self):
"""Instance depends on the API version:
* v1.0: :class:`RunsOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.RunsOperations>`
"""
api_version = self._get_api_version('runs')
if api_version == 'v1.0':
from ..runhistory.aio.operations import RunsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'runs'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def spans(self):
"""Instance depends on the API version:
* v1.0: :class:`SpansOperations<azure.mgmt.machinelearningservices.runhistory.aio.operations.SpansOperations>`
"""
api_version = self._get_api_version('spans')
if api_version == 'v1.0':
from ..runhistory.aio.operations import SpansOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'spans'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def temporary_data_references(self):
"""Instance depends on the API version:
* 2021-10-01-dataplanepreview: :class:`TemporaryDataReferencesOperations<azure.mgmt.machinelearningservices.v2021_10_01_dataplanepreview.aio.operations.TemporaryDataReferencesOperations>`
"""
api_version = self._get_api_version('temporary_data_references')
if api_version == '2021-10-01-dataplanepreview':
from ..v2021_10_01_dataplanepreview.aio.operations import TemporaryDataReferencesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'temporary_data_references'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def usages(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.UsagesOperations>`
* 2022-01-01-preview: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.UsagesOperations>`
* 2022-05-01: :class:`UsagesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.UsagesOperations>`
"""
api_version = self._get_api_version('usages')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import UsagesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import UsagesOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import UsagesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'usages'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def virtual_machine_sizes(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.VirtualMachineSizesOperations>`
* 2022-01-01-preview: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.VirtualMachineSizesOperations>`
* 2022-05-01: :class:`VirtualMachineSizesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.VirtualMachineSizesOperations>`
"""
api_version = self._get_api_version('virtual_machine_sizes')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import VirtualMachineSizesOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import VirtualMachineSizesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'virtual_machine_sizes'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_connections(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceConnectionsOperations>`
* 2022-01-01-preview: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceConnectionsOperations>`
* 2022-05-01: :class:`WorkspaceConnectionsOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceConnectionsOperations>`
"""
api_version = self._get_api_version('workspace_connections')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import WorkspaceConnectionsOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import WorkspaceConnectionsOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_connections'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspace_features(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspaceFeaturesOperations>`
* 2022-01-01-preview: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspaceFeaturesOperations>`
* 2022-05-01: :class:`WorkspaceFeaturesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspaceFeaturesOperations>`
"""
api_version = self._get_api_version('workspace_features')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import WorkspaceFeaturesOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import WorkspaceFeaturesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspace_features'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def workspaces(self):
"""Instance depends on the API version:
* 2021-10-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2021_10_01.aio.operations.WorkspacesOperations>`
* 2022-01-01-preview: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_01_01_preview.aio.operations.WorkspacesOperations>`
* 2022-05-01: :class:`WorkspacesOperations<azure.mgmt.machinelearningservices.v2022_05_01.aio.operations.WorkspacesOperations>`
"""
api_version = self._get_api_version('workspaces')
if api_version == '2021-10-01':
from ..v2021_10_01.aio.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-01-01-preview':
from ..v2022_01_01_preview.aio.operations import WorkspacesOperations as OperationClass
elif api_version == '2022-05-01':
from ..v2022_05_01.aio.operations import WorkspacesOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'workspaces'".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
| 1.695313 | 2 |
tests/operators/test_op/test_csr_mul.py | KnowingNothing/akg-test | 0 | 12796536 | import numpy as np
import scipy.sparse
import akg
from akg import tvm
from akg import topi
from tests.common.base import get_rtol_atol
from tests.common.gen_random import random_gaussian
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from akg.utils.result_analysis import target_profiling
from akg.utils.format_transform import to_tvm_nd_array, get_shape
from akg.utils.dsl_create import get_broadcast_shape
def csr_mul(dense, sparse_data, col_idx, row_idx, shape):
assert len(shape) == 2, "only supports 2-dim sparse tensor"
assert len(dense.shape) <= 2
assert dense.dtype == sparse_data.dtype, "data and weight must have the same dtype"
num_rows = row_idx.shape[0] - 1
dense_shape = get_shape(dense.shape)
sparse_shape = get_shape(shape)
broadcast_shape = get_broadcast_shape(dense_shape, sparse_shape)
need_expand = tvm.const(len(dense_shape) < len(broadcast_shape))
need_broadcast_first_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[0] < broadcast_shape[0])
need_broadcast_last_dim = tvm.const(
len(dense_shape) == len(broadcast_shape) and dense_shape[1] < broadcast_shape[1])
def gen_ir(dense, sparse_data, col_idx, row_idx, output):
ib = tvm.ir_builder.create()
with ib.for_range(0, num_rows, name='i') as i:
start = ib.load(row_idx, i)
end = ib.load(row_idx, i + 1)
with ib.for_range(0, end - start, name='j') as j:
pos = start + j
with ib.if_scope(pos < end):
val = ib.load(sparse_data, pos)
col = ib.load(col_idx, pos)
with ib.if_scope(need_expand):
ib.store(output, pos, val * ib.load(dense, [col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_first_dim):
ib.store(output, pos, val * ib.load(dense, [0, col]))
with ib.else_scope():
with ib.if_scope(need_broadcast_last_dim):
ib.store(output, pos, val * ib.load(dense, [i, 0]))
with ib.else_scope():
ib.store(output, pos, val * ib.load(dense, [i, col]))
return ib.get()
output_name = "T_csr_mul_" + dense.op.name + "_" + sparse_data.op.name
out_buf = tvm.decl_buffer(sparse_data.shape, sparse_data.dtype, output_name)
return tvm.extern([shape],
[dense, sparse_data, col_idx, row_idx],
lambda ins, outs: gen_ir(ins[0], ins[1], ins[2], ins[3], outs[0]),
dtype=sparse_data.dtype, out_buffers=[out_buf], name=output_name)
def gen_data(shape1, shape2, dtype1, dtype2):
dense = random_gaussian(shape1).astype(dtype1)
sparse_data = scipy.sparse.rand(shape2[0], shape2[1], density=0.2, format='csr', dtype=dtype1)
expect = sparse_data.multiply(np.broadcast_to(dense, shape2))
return dense, sparse_data.data, sparse_data.indices.astype(dtype2), sparse_data.indptr.astype(dtype2), expect.data
def test_csr_mul(shape1, shape2, dtype1, dtype2, poly_sch=False, attrs=None):
if not attrs:
attrs = {"target": "cuda"}
# gen data
op_attrs = [shape2]
dense, sparse_data, col_idx, row_idx, expect = gen_data(shape1, shape2, dtype1, dtype2)
output_shape = expect.shape
attrs["csr_avg_row"] = sparse_data.shape[0] // shape1[0]
mod = utils.op_build_test(csr_mul, [shape1, sparse_data.shape, col_idx.shape, row_idx.shape],
[dtype1, dtype1, dtype2, dtype2], op_attrs=op_attrs, polyhedral=poly_sch,
attrs=attrs, kernel_name="csr_mul")
if len(expect.shape) == 0:
output_shape = (1, )
output = np.zeros(output_shape, expect.dtype)
output = utils.mod_launch(mod, (dense, sparse_data, col_idx, row_idx, output), expect=expect)
atol, rtol = get_rtol_atol("csr_mul", dtype1)
res = compare_tensor(output, expect, rtol=rtol, atol=atol)
print("Test {}".format("Pass" if res else "Failed"))
target_name = attrs["target"].split()[0]
if not res:
mod_source = mod
if target_name != "llvm":
mod_source = mod.imported_modules[0]
print("Error {}:========================".format(target_name))
print(mod_source.get_source())
raise AssertionError("Test fail")
if attrs["profiling"]:
args_list = to_tvm_nd_array(
[dense, sparse_data, col_idx, row_idx, output, expect], akg.tvm.context(target_name, 0))
target_profiling(mod, *args_list, target=target_name, repeat_time=attrs["repeat_time"])
| 1.90625 | 2 |
pywebfaction/exceptions.py | dominicrodger/pywebfaction | 0 | 12796537 | import ast
EXCEPTION_TYPE_PREFIX = "<class 'webfaction_api.exceptions."
EXCEPTION_TYPE_SUFFIX = "'>"
def _parse_exc_type(exc_type):
# This is horribly hacky, but there's not a particularly elegant
# way to go from the exception type to a string representing that
# exception.
if not exc_type.startswith(EXCEPTION_TYPE_PREFIX):
return None
if not exc_type.endswith(EXCEPTION_TYPE_SUFFIX):
return None
return exc_type[len(EXCEPTION_TYPE_PREFIX):len(EXCEPTION_TYPE_SUFFIX) * -1]
def _parse_exc_message(exc_message):
if not exc_message:
return None
message = ast.literal_eval(exc_message)
if isinstance(message, list):
if not message:
return None
return message[0]
return None
class WebFactionFault(Exception):
def __init__(self, underlying):
self.underlying_fault = underlying
try:
exc_type, exc_message = underlying.faultString.split(':', 1)
self.exception_type = _parse_exc_type(exc_type)
self.exception_message = _parse_exc_message(exc_message)
except ValueError:
self.exception_type = None
self.exception_message = None
| 2.546875 | 3 |
forms.py | sarar0sa/PCAPinator | 0 | 12796538 | <filename>forms.py
from flask_wtf import FlaskForm
from wtforms import (StringField, IntegerField, SubmitField)
from wtforms.validators import InputRequired, Length, Regexp
valid_search_time = "^(19[0-9][0-9]|20[0-9][0-9])(\/)(0[1-9]|1[0-2])(\/)(0[1-9]|1[0-9]|2[0-9]|3[0-1])\+(0[0-9]|1[0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])$"
# The form rendered in the main-page with their corresponding validators
class PcapForm(FlaskForm):
pcap_id = IntegerField('PCAP ID', validators=[InputRequired(message = "The PCAP ID must only consist of numbers")])
device_name = StringField('Device Name',validators=[InputRequired(), Length(max=20)])
session_id = IntegerField('Session ID', validators=[InputRequired(message = "The Session ID must only consist of numbers")])
search_time = StringField('Search Time',validators=[InputRequired(), Length(max=30), Regexp(valid_search_time, message="The Search Time must have the format: yyyy/mm/dd+hr:min:sec")])
submit = SubmitField('Submit')
| 2.953125 | 3 |
download/bookmarks_spreadsheet.py | quihi/fanfiction | 0 | 12796539 | <reponame>quihi/fanfiction
#!/usr/bin/env python3
'''
This downloads metadata about the bookmarks in a collection on
Archive of Our Own and creates a spreadsheet.
It will likely work for a user's bookmark's but I haven't tried it.
To use: replace BOOKMARKS with the URL that you want to summarize
(be sure to include the ?page= part)
replace [user] in HEADERS with your username
replace PAGES with the number of pages of bookmarks there are.
Word counts are not added for external bookmarks.
This prints to standard output and should be redirected to a file.
Lines 54-57 search for if a particular series is linked
(i.e. the fic is in the series) and add a note based on that.
You can delete that and your own other extra notes.
'''
from bs4 import BeautifulSoup
import requests
import time
BOOKMARKS = "https://archiveofourown.org/collections/RigelBlackComprehensive/bookmarks?page="
HEADERS = { "User-Agent": "[user]-bot" }
PAGES = 15
DEBUG = False
def main():
for p in range(1, PAGES+1):
link = BOOKMARKS + str(p)
r = requests.get(link, headers=HEADERS)
soup = BeautifulSoup(r.text, "html5lib")
fics = soup.find_all(class_="bookmark blurb group")
for blurb in fics:
if DEBUG:
print("BLURB")
print("\n\n")
print(blurb)
print("\n\n\n\n\n")
number = blurb["id"].strip("bookmark_")
title = blurb.h4.a.string
# Author is None for non-AO3 fics, since they're text and not links
author = blurb.find(rel="author")
if author == None:
author = blurb.h4.a.next_sibling.replace("by", "").strip()
else:
author = author.string
words = blurb.find(name="dd", class_="words")
if words is None:
words = ""
else:
words = words.string
if "/series/1722145" in str(blurb):
notes = "Rev Arc"
else:
notes = ""
print("{}\t{}\t{}\t{}\t{}".format(number, title, author, words, notes))
time.sleep(10)
if __name__ == '__main__':
main()
| 3.484375 | 3 |
baselines/ddpg/src/simulation/Simulate_training.py | hzm2016/Peg_in_hole_assembly | 25 | 12796540 | <filename>baselines/ddpg/src/simulation/Simulate_training.py<gh_stars>10-100
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: Simulate_main
Description :
Author : <NAME>
date: 18-1-12
-------------------------------------------------
Change Activity:
18-1-12
-------------------------------------------------
"""
# -*- coding: utf-8 -*-
import os
import time
from collections import deque
import pickle
import sys
from baselines import logger
from simulation_ddpg import DDPG
from util import mpi_mean, mpi_std, mpi_max, mpi_sum
import baselines.common.tf_util as U
import tensorflow as tf
from mpi4py import MPI
import numpy as np
import pandas as pd
"""First the path should be added."""
sys.path.append("/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines")
def train(env, nb_epochs, nb_epoch_cycles, render_eval, reward_scale, render, param_noise, actor, critic,
normalize_returns, normalize_observations, critic_l2_reg, actor_lr, critic_lr, action_noise,
popart, gamma, clip_norm, nb_train_steps, nb_rollout_steps, nb_eval_steps, batch_size, memory,
tau=0.01, eval_env=None, param_noise_adaption_interval=50, restore=False):
rank = MPI.COMM_WORLD.Get_rank()
max_action = np.array([0.2, 0.2, 0.2, 0.2, 0.2, 0.2])
# min_action = np.array([-0.2, -0.2, -0.2, -0.2, -0.2, -0.2])
logger.info('scaling actions by {} before executing in env'.format(max_action))
model_directory = '/home/zhimin/PycharmProjects/RL_UA/Peg_in_Hole/1-baselines/baselines/ddpg/simulation_data'
agent = DDPG(actor, critic, memory, env.state_dim, env.action_dim,
gamma=gamma, tau=tau, normalize_returns=normalize_returns,
normalize_observations=normalize_observations,
batch_size=batch_size, action_noise=action_noise, param_noise=param_noise, critic_l2_reg=critic_l2_reg,
actor_lr=actor_lr, critic_lr=critic_lr, enable_popart=popart, clip_norm=clip_norm,
reward_scale=reward_scale, restore=restore)
logger.info('Using agent with the following configuration:')
logger.info(str(agent.__dict__.items()))
saver = tf.train.Saver()
"""Set up logging stuff only for a single worker"""
# if rank == 0:
# saver = tf.train.Saver()
# else:
# saver = None
# eval_episode_rewards_history = deque(maxlen=100)
episode_rewards_history = deque(maxlen=100)
with U.single_threaded_session() as sess:
"""Prepare everything"""
if restore:
saver = tf.train.import_meta_graph(model_directory + 'model.meta')
agent.restore_model(model_directory, saver, sess)
else:
agent.initialize(sess)
sess.graph.finalize()
"""Agent Reset"""
agent.reset()
# episode_step = 0
# episodes = 0
# t = 0
"""Force calibration"""
# if env.robot_control.CalibFCforce() is False:
# exit()
delay_rate = np.power(10, 1 / nb_epochs)
epoch_start_time = time.time()
epoch_episode_rewards = []
epoch_episode_steps = []
epoch_adaptive_distances = []
epoch_episodes_discount_reward = []
epoch_episodes_average_reward = []
epoch_actions = []
epoch_qs = []
Force_moments = []
epoch_episodes = 0
Long_term_reward = - 0.10
for epoch in range(nb_epochs):
"""Show the result for cycle 20 times and Save the model"""
epoch_actor_losses = []
epoch_critic_losses = []
"""Delay the learning rate"""
epoch_actor_lr = actor_lr / delay_rate
epoch_critic_lr = critic_lr / delay_rate
for cycle in range(nb_epoch_cycles):
"""environment reset """
agent.reset()
obs = env.reset()
episode_reward = 0.
episode_discount_reward = 0.
q_value = 0.
done = False
forcement = []
Last_average_reward = 0.
Number_episodes = 0.
for t_rollout in range(nb_rollout_steps):
"""Predict next action"""
action, q = agent.pi(obs, apply_noise=True, compute_Q=True)
assert action.shape[0] == env.action_dim
q_value += q
"""scale for execution in env"""
new_obs, r, done, info, expert_action = env.step(action, t_rollout)
episode_discount_reward += gamma * r
"""adapt_action_noise"""
agent.feed_back_explore(action, expert_action)
logger.info("The maximum force:" + str(max(abs(new_obs[0:3]))) + " The maximum moments:" + str(max(abs(new_obs[3:6]))))
episode_reward += r
delta = r - Long_term_reward
# if memory.nb_entries >= batch_size and param_noise is not None:
# agent.feed_back_explore(delta)
Number_episodes = gamma + gamma*Number_episodes
Last_average_reward = r + gamma*Last_average_reward
"""Plot the force and moments"""
# if render:
# forcement.append(new_obs[0:6])
# # print(forcement)
# Force_moments.append(new_obs[0:6])
# env.plot_force(forcement, t_rollout+1)
if epoch == 0 and cycle == 0:
forcement.append(new_obs[0:6])
Force_moments.append(new_obs[0:6])
# env.plot_force(forcement, t_rollout + 1)
if epoch == nb_epoch_cycles - 1 and cycle == nb_epoch_cycles - 1:
forcement.append(new_obs[0:6])
Force_moments.append(new_obs[0:6])
# env.plot_force(forcement, t_rollout + 1)
epoch_actions.append(action)
agent.store_transition(obs, action, r, new_obs, done)
obs = new_obs
"""Episode done and start pull the pegs step by step"""
if done:
logger.info('Peg-in-hole assembly done!!!')
epoch_episode_rewards.append(episode_reward)
epoch_episodes_discount_reward.append(Last_average_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(t_rollout)
epoch_episodes += 1
# pull_done = False
# while pull_done is False and info:
# pull_done, pull_safe = env.step_up() #Simulation env
# pull_done, pull_safe = env.pull_up() #True env
#
# if pull_safe is False:
# logger.info('Pull up the pegs failed for the exceed force!!!')
# exit()
break
"""Episode failed and start pull the pegs step by step"""
if info is False:
logger.info('Peg-in-hole assembly failed for the exceed force!!!')
# pull_done = False
# while pull_done is False and info:
# pull_done, pull_safe = env.step_up()
# pull_done, pull_safe = env.pull_up() # True env
#
# if pull_safe is False:
# logger.info('Peg-in-hole assembly failed for the exceed force!!!')
# exit()
break
Long_term_reward = Last_average_reward/Number_episodes
epoch_qs.append(q_value)
env.save_figure('force_moment')
epoch_episodes_average_reward.append(Long_term_reward)
agent.feedback_adptive_explore()
if t_rollout == nb_rollout_steps - 1:
logger.info('Peg-in-hole assembly failed for exceed steps!!!')
logger.info('The deepest position'.format(obs[8]))
"""train model for nb_train_steps times"""
for t_train in range(nb_train_steps):
cl, al = agent.train(epoch_actor_lr, epoch_critic_lr)
epoch_critic_losses.append(cl)
epoch_actor_losses.append(al)
agent.update_target_net()
"""Adapt param noise, if necessary"""
if memory.nb_entries >= batch_size and param_noise is not None:
distance = agent.adapt_param_noise()
epoch_adaptive_distances.append(distance)
"""write the result into the summary"""
agent.log_scalar("actor_loss", mpi_mean(epoch_actor_losses), epoch_episodes)
agent.log_scalar("critic_loss", mpi_mean(epoch_critic_losses), epoch_episodes)
agent.log_scalar("episode_score", mpi_mean(epoch_episode_rewards), epoch_episodes)
agent.log_scalar("episode_steps", mpi_mean(epoch_episode_steps), epoch_episodes)
agent.log_scalar("episode_average_reward", mpi_mean(epoch_episodes_average_reward), epoch_episodes)
agent.log_scalar("episode_discount_score", mpi_mean(epoch_episodes_discount_reward), epoch_episodes)
"""Log stats."""
epoch_train_duration = time.time() - epoch_start_time
stats = agent.get_stats()
combined_stats = {}
for key in sorted(stats.keys()):
combined_stats[key] = mpi_mean(stats[key])
"""Rollout statistics. compute the mean of the total nb_epoch_cycles"""
combined_stats['rollout/return'] = mpi_mean(epoch_episode_rewards)
combined_stats['rollout/return_history'] = mpi_mean(np.mean(episode_rewards_history))
combined_stats['rollout/episode_steps'] = mpi_mean(epoch_episode_steps)
combined_stats['rollout/episodes'] = mpi_sum(epoch_episodes)
combined_stats['rollout/actions_mean'] = mpi_mean(epoch_actions)
combined_stats['rollout/actions_std'] = mpi_std(epoch_actions)
combined_stats['rollout/Q_mean'] = mpi_mean(epoch_qs)
"""Train statistics"""
combined_stats['train/loss_actor'] = mpi_mean(epoch_actor_losses)
combined_stats['train/loss_critic'] = mpi_mean(epoch_critic_losses)
combined_stats['train/param_noise_distance'] = mpi_mean(epoch_adaptive_distances)
"""save the model and the result"""
saver.save(sess, model_directory + 'simulation_model')
# re_rewards = pd.DataFrame(epoch_episode_rewards)
# re_rewards.to_csv("re_rewards.csv", sep=',', header=False, index=False)
re_forcement = pd.DataFrame(Force_moments)
re_forcement.to_csv(model_directory + 'simulation_forcement', sep=',', header=False, index=False)
# re_steps = pd.DataFrame(epoch_episode_steps)
# re_steps.to_csv("re_steps.csv", sep=',', header=False, index=False)
# nf = pd.read_csv("data.csv", sep=',', header=None)
for key in sorted(combined_stats.keys()):
logger.record_tabular(key, combined_stats[key])
logger.dump_tabular()
logger.info('')
logdir = logger.get_dir()
if rank == 0 and logdir:
if hasattr(env, 'get_state'):
with open(os.path.join(logdir, 'env_state.pkl'), 'wb') as f:
pickle.dump(env.get_state(), f)
if eval_env and hasattr(eval_env, 'get_state'):
with open(os.path.join(logdir, 'eval_env_state.pkl'), 'wb') as f:
pickle.dump(eval_env.get_state(), f)
| 1.890625 | 2 |
secret/migrations/0002_auto_20191230_1047.py | sgilissen/PulseSecretServer | 0 | 12796541 | # Generated by Django 2.2.9 on 2019-12-30 10:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('secret', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='secret',
name='expiry_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| 1.421875 | 1 |
github_terminal/github_terminal.py | amritghimire/github-terminal | 0 | 12796542 | <gh_stars>0
"""Main module."""
from .issue import issue
from .pr import pr
def handle_category_action(args):
"""Handle the category specific action."""
category = args.category
return {'issue': issue, 'pr': pr}.get(category, issue)
| 2.09375 | 2 |
CursoemVideo/Desafio009.py | davihonorato/Curso-python | 0 | 12796543 | # Faça um programa que leia um número inteiro qualquer e mostre na tela a sua tabuada.
n1 = int(input('Digite um número qualquer: '))
print('-'*13)
print(f'1 x {n1} = {1*n1}')
print(f'2 x {n1} = {2*n1}')
print(f'3 x {n1} = {3*n1}')
print(f'4 x {n1} = {4*n1}')
print(f'5 x {n1} = {5*n1}')
print(f'6 x {n1} = {6*n1}')
print(f'7 x {n1} = {7*n1}')
print(f'8 x {n1} = {8*n1}')
print(f'9 x {n1} = {9*n1}')
print(f'10 x {n1} = {10*n1}')
print('-'*13)
| 3.9375 | 4 |
venv/lib/python3.6/site-packages/madmom/io/__init__.py | metu-sparg/higrid | 8 | 12796544 | # encoding: utf-8
"""
Input/output package.
"""
from __future__ import absolute_import, division, print_function
import io as _io
import contextlib
import numpy as np
from .audio import load_audio_file
from .midi import load_midi, write_midi
from ..utils import suppress_warnings, string_types
ENCODING = 'utf8'
# dtype for numpy structured arrays that contain labelled segments
# 'label' needs to be castable to str
SEGMENT_DTYPE = [('start', np.float), ('end', np.float), ('label', object)]
# overwrite the built-in open() to transparently apply some magic file handling
@contextlib.contextmanager
def open_file(filename, mode='r'):
"""
Context manager which yields an open file or handle with the given mode
and closes it if needed afterwards.
Parameters
----------
filename : str or file handle
File (handle) to open.
mode: {'r', 'w'}
Specifies the mode in which the file is opened.
Yields
------
Open file (handle).
"""
# check if we need to open the file
if isinstance(filename, string_types):
f = fid = _io.open(filename, mode)
else:
f = filename
fid = None
# yield an open file handle
yield f
# close the file if needed
if fid:
fid.close()
@suppress_warnings
def load_events(filename):
"""
Load a events from a text file, one floating point number per line.
Parameters
----------
filename : str or file handle
File to load the events from.
Returns
-------
numpy array
Events.
Notes
-----
Comments (lines starting with '#') and additional columns are ignored,
i.e. only the first column is returned.
"""
# read in the events, one per line
events = np.loadtxt(filename, ndmin=2)
# 1st column is the event's time, the rest is ignored
return events[:, 0]
def write_events(events, filename, fmt='%.3f', delimiter='\t', header=None):
"""
Write the events to a file, one event per line.
Parameters
----------
events : numpy array
Events to be written to file.
filename : str or file handle
File to write the events to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats, or a multi-format
string (e.g. '%.3f %.3f'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
events = np.array(events)
# reformat fmt to be a single string if needed
if isinstance(fmt, (list, tuple)):
fmt = delimiter.join(fmt)
# write output
with open_file(filename, 'wb') as f:
# write header
if header is not None:
f.write(bytes(('# ' + header + '\n').encode(ENCODING)))
# write events
for e in events:
try:
string = fmt % tuple(e.tolist())
except AttributeError:
string = e
except TypeError:
string = fmt % e
f.write(bytes((string + '\n').encode(ENCODING)))
f.flush()
load_onsets = load_events
write_onsets = write_events
@suppress_warnings
def load_beats(filename, downbeats=False):
"""
Load the beats from the given file, one beat per line of format
'beat_time' ['beat_number'].
Parameters
----------
filename : str or file handle
File to load the beats from.
downbeats : bool, optional
Load only downbeats instead of beats.
Returns
-------
numpy array
Beats.
"""
values = np.loadtxt(filename, ndmin=1)
if values.ndim > 1:
if downbeats:
# rows with a "1" in the 2nd column are downbeats
return values[values[:, 1] == 1][:, 0]
else:
# 1st column is the beat time, the rest is ignored
return values[:, 0]
return values
def write_beats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the beats to a file.
Parameters
----------
beats : numpy array
Beats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
"""
if fmt is None and beats.ndim == 2:
fmt = ['%.3f', '%d']
elif fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
def load_downbeats(filename):
"""
Load the downbeats from the given file.
Parameters
----------
filename : str or file handle
File to load the downbeats from.
Returns
-------
numpy array
Downbeats.
"""
return load_beats(filename, downbeats=True)
def write_downbeats(beats, filename, fmt=None, delimiter='\t', header=None):
"""
Write the downbeats to a file.
Parameters
----------
beats : numpy array
Beats or downbeats to be written to file.
filename : str or file handle
File to write the beats to.
fmt : str or sequence of strs, optional
A single format (e.g. '%.3f'), a sequence of formats (e.g.
['%.3f', '%d']), or a multi-format string (e.g. '%.3f %d'), in which
case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Notes
-----
If `beats` contains both time and number of the beats, they are filtered
to contain only the downbeats (i.e. only the times of those beats with a
beat number of 1).
"""
if beats.ndim == 2:
beats = beats[beats[:, 1] == 1][:, 0]
if fmt is None:
fmt = '%.3f'
write_events(beats, filename, fmt, delimiter, header)
@suppress_warnings
def load_notes(filename):
"""
Load the notes from the given file, one note per line of format
'onset_time' 'note_number' ['duration' ['velocity']].
Parameters
----------
filename: str or file handle
File to load the notes from.
Returns
-------
numpy array
Notes.
"""
return np.loadtxt(filename, ndmin=2)
def write_notes(notes, filename, fmt=None, delimiter='\t', header=None):
"""
Write the notes to a file.
Parameters
----------
notes : numpy array, shape (num_notes, 2)
Notes, row format 'onset_time' 'note_number' ['duration' ['velocity']].
filename : str or file handle
File to write the notes to.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%d', '%.3f', '%d']), or a
multi-format string, e.g. '%.3f %d %.3f %d', in which case `delimiter`
is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy array
Notes.
"""
# set default format
if fmt is None:
fmt = ['%.3f', '%d', '%.3f', '%d']
if not notes.ndim == 2:
raise ValueError('unknown format for `notes`')
# truncate format to the number of colums given
fmt = delimiter.join(fmt[:notes.shape[1]])
# write the notes
write_events(notes, filename, fmt=fmt, delimiter=delimiter, header=header)
def load_segments(filename):
"""
Load labelled segments from file, one segment per line. Each segment is of
form <start> <end> <label>, where <start> and <end> are floating point
numbers, and <label> is a string.
Parameters
----------
filename : str or file handle
File to read the labelled segments from.
Returns
-------
segments : numpy structured array
Structured array with columns 'start', 'end', and 'label',
containing the beginning, end, and label of segments.
"""
start, end, label = [], [], []
with open_file(filename) as f:
for line in f:
s, e, l = line.split()
start.append(float(s))
end.append(float(e))
label.append(l)
segments = np.zeros(len(start), dtype=SEGMENT_DTYPE)
segments['start'] = start
segments['end'] = end
segments['label'] = label
return segments
def write_segments(segments, filename, fmt=None, delimiter='\t', header=None):
"""
Write labelled segments to a file.
Parameters
----------
segments : numpy structured array
Labelled segments, one per row (column definition see SEGMENT_DTYPE).
filename : str or file handle
Output filename or handle.
fmt : str or sequence of strs, optional
A sequence of formats (e.g. ['%.3f', '%.3f', '%s']), or a multi-format
string (e.g. '%.3f %.3f %s'), in which case `delimiter` is ignored.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
numpy structured array
Labelled segments
Notes
-----
Labelled segments are represented as numpy structured array with three
named columns: 'start' contains the start position (e.g. seconds),
'end' the end position, and 'label' the segment label.
"""
if fmt is None:
fmt = ['%.3f', '%.3f', '%s']
write_events(segments, filename, fmt=fmt, delimiter=delimiter,
header=header)
load_chords = load_segments
write_chords = write_segments
def load_key(filename):
"""
Load the key from the given file.
Parameters
----------
filename : str or file handle
File to read key information from.
Returns
-------
str
Key.
"""
with open_file(filename) as f:
return f.read().strip()
def write_key(key, filename, header=None):
"""
Write key string to a file.
Parameters
----------
key : str
Key name.
filename : str or file handle
Output file.
header : str, optional
String that will be written at the beginning of the file as comment.
Returns
-------
key : str
Key name.
"""
write_events([key], filename, fmt='%s', header=header)
def load_tempo(filename, split_value=1., sort=None, norm_strengths=None,
max_len=None):
"""
Load tempo information from the given file.
Tempo information must have the following format:
'main tempo' ['secondary tempo' ['relative_strength']]
Parameters
----------
filename : str or file handle
File to load the tempo from.
split_value : float, optional
Value to distinguish between tempi and strengths.
`values` > `split_value` are interpreted as tempi [bpm],
`values` <= `split_value` are interpreted as strengths.
sort : bool, deprecated
Sort the tempi by their strength.
norm_strengths : bool, deprecated
Normalize the strengths to sum 1.
max_len : int, deprecated
Return at most `max_len` tempi.
Returns
-------
tempi : numpy array, shape (num_tempi[, 2])
Array with tempi. If no strength is parsed, a 1-dimensional array of
length 'num_tempi' is returned. If strengths are given, a 2D array
with tempi (first column) and their relative strengths (second column)
is returned.
"""
# try to load the data from file
values = np.loadtxt(filename, ndmin=1)
# split the filename according to their filename into tempi and strengths
# TODO: this is kind of hack-ish, find a better solution
tempi = values[values > split_value]
strengths = values[values <= split_value]
# make the strengths behave properly
strength_sum = np.sum(strengths)
# relative strengths are given (one less than tempi)
if len(tempi) - len(strengths) == 1:
strengths = np.append(strengths, 1. - strength_sum)
if np.any(strengths < 0):
raise AssertionError('strengths must be positive')
# no strength is given, assume an evenly distributed one
if strength_sum == 0:
strengths = np.ones_like(tempi) / float(len(tempi))
# normalize the strengths
if norm_strengths is not None:
import warnings
warnings.warn('`norm_strengths` is deprecated as of version 0.16 and '
'will be removed in 0.18. Please normalize strengths '
'separately.')
strengths /= float(strength_sum)
# tempi and strengths must have same length
if len(tempi) != len(strengths):
raise AssertionError('tempi and strengths must have same length')
# order the tempi according to their strengths
if sort:
import warnings
warnings.warn('`sort` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please sort the returned array '
'separately.')
# Note: use 'mergesort', because we want a stable sorting algorithm
# which keeps the order of the keys in case of duplicate keys
# but we need to apply this '(-strengths)' trick because we want
# tempi with uniformly distributed strengths to keep their order
sort_idx = (-strengths).argsort(kind='mergesort')
tempi = tempi[sort_idx]
strengths = strengths[sort_idx]
# return at most 'max_len' tempi and their relative strength
if max_len is not None:
import warnings
warnings.warn('`max_len` is deprecated as of version 0.16 and will be '
'removed in 0.18. Please truncate the returned array '
'separately.')
return np.vstack((tempi[:max_len], strengths[:max_len])).T
def write_tempo(tempi, filename, delimiter='\t', header=None, mirex=None):
"""
Write the most dominant tempi and the relative strength to a file.
Parameters
----------
tempi : numpy array
Array with the detected tempi (first column) and their strengths
(second column).
filename : str or file handle
Output file.
delimiter : str, optional
String or character separating columns.
header : str, optional
String that will be written at the beginning of the file as comment.
mirex : bool, deprecated
Report the lower tempo first (as required by MIREX).
Returns
-------
tempo_1 : float
The most dominant tempo.
tempo_2 : float
The second most dominant tempo.
strength : float
Their relative strength.
"""
# make the given tempi a 2d array
tempi = np.array(tempi, ndmin=2)
# default values
t1 = t2 = strength = np.nan
# only one tempo was detected
if len(tempi) == 1:
t1 = tempi[0][0]
strength = 1.
# consider only the two strongest tempi and strengths
elif len(tempi) > 1:
t1, t2 = tempi[:2, 0]
strength = tempi[0, 1] / sum(tempi[:2, 1])
# for MIREX, the lower tempo must be given first
if mirex is not None:
import warnings
warnings.warn('`mirex` argument is deprecated as of version 0.16 '
'and will be removed in version 0.17. Please sort the '
'tempi manually')
if t1 > t2:
t1, t2, strength = t2, t1, 1. - strength
# format as a numpy array and write to output
out = np.array([t1, t2, strength], ndmin=2)
write_events(out, filename, fmt=['%.2f', '%.2f', '%.2f'],
delimiter=delimiter, header=header)
| 2.40625 | 2 |
Plumber/Side.py | paldynaagata/plumber | 0 | 12796545 | <reponame>paldynaagata/plumber
from enum import Enum
class Side(Enum):
Left = 0
Up = 1
Right = 2
Down = 3 | 2.53125 | 3 |
typhoon/examples/airflow_docker/src/tests/integration/exchange_rates_api_test.py | typhoon-data-org/typhoon-orchestrator | 21 | 12796546 | <filename>typhoon/examples/airflow_docker/src/tests/integration/exchange_rates_api_test.py<gh_stars>10-100
from datetime import date
from functions import exchange_rates_api
def test_xr_get():
symbols = ['EUR', 'PHP', 'HKD']
response = exchange_rates_api.get(date(2020, 1, 1), base='USD', symbols=symbols)
assert set(response.keys()) == {'rates', 'date', 'base'}
assert set(response['rates'].keys()) == set(symbols)
def test_xr_get_history():
symbols = ['EUR', 'PHP', 'HKD']
start_at = date(2020, 1, 2)
end_at = date(2020, 1, 3)
response = exchange_rates_api.get_history(
start_at=start_at,
end_at=end_at,
base='USD',
symbols=symbols,
)
print(response)
assert set(response.keys()) == {'rates', 'start_at', 'end_at', 'base'}
assert set(response['rates'].keys()) == {start_at.isoformat(), end_at.isoformat()}
for k, v in response['rates'].items():
assert set(v.keys()) == set(symbols)
| 2.375 | 2 |
tests/models/test_example_add.py | TaiSakuma/acondbs | 0 | 12796547 | from acondbs.db.sa import sa
from acondbs.models import Map, Beam
# These tests are written primarily for the developer to understand
# how models in flask_sqlalchemy work.
# __________________________________________________________________||
def test_simple(app):
'''A simple test of adding an object
'''
with app.app_context():
# save the initial number of the maps to compare later
nmaps = len(Map.query.all())
# this instantiation doesn't need be within a app context
map1 = Map(name="map1")
with app.app_context():
sa.session.add(map1)
sa.session.commit()
with app.app_context():
# test the number of the maps is increased by one
assert (nmaps + 1) == len(Map.query.all())
# the new map can be retrieved in a different app context
map1_ = Map.query.filter_by(name='map1').first()
assert isinstance(map1_, Map)
# __________________________________________________________________||
def test_python_object(app):
'''A simple test about Python object
'''
map1 = Map(name="map1")
with app.app_context():
sa.session.add(map1)
sa.session.commit()
map1_ = Map.query.filter_by(name='map1').first()
# the query returns the same Python object
assert map1 is map1_
with app.app_context():
map1_ = Map.query.filter_by(name='map1').first()
# In a different app context, no longer the same Python object
assert map1 is not map1_
# __________________________________________________________________||
def test_primary_key(app):
'''A simple test about the primary key
'''
map1 = Map(name="map1")
# The primary key (map_id) is None at this point
assert map1.map_id is None
with app.app_context():
sa.session.add(map1)
sa.session.commit()
# After the commit, map_id is automatically assigned
map_id = map1.map_id
assert map_id is not None
with app.app_context():
# The object can be retrived by the map_id in another context
map1 = Map.query.filter_by(map_id=map_id).first()
assert 'map1' == map1.name
# __________________________________________________________________||
def test_relation(app):
'''A simple test of adding an object with relation
'''
map1 = Map(name="map1")
beam1 = Beam(name="beam1", map=map1)
# The relation has been already established
assert map1 is beam1.map
assert [beam1] == map1.beams
# The primary and foreign keys are still None
assert map1.map_id is None
assert beam1.beam_id is None
assert beam1.input_map_id is None
with app.app_context():
sa.session.add(map1)
sa.session.commit()
# The primary keys are assigned
assert map1.map_id is not None
assert beam1.beam_id is not None
# The foreign key is correctly set
assert map1.map_id == beam1.input_map_id
with app.app_context():
map1 = Map.query.filter_by(name='map1').first()
beam1 = Beam.query.filter_by(name='beam1').first()
# The relation is preserved in a different app context
assert map1 is beam1.map
assert beam1 is map1.beams[0]
assert map1.map_id == beam1.input_map_id
# __________________________________________________________________||
| 2.59375 | 3 |
UTAT_Matlab/Rocket Optimizer/reg_fuel.py | coursekevin/AerospikeDesign | 1 | 12796548 | <gh_stars>1-10
rho_fuel = 950
L = 0.3048 #fuel core length CHECK
r_fo = 0.068/2 # Inner combustion chamber radius
a0 = 0.000155 # Regression rate coeff (m/s**2)
n_reg = 0.5 # Regression rate exponent, FLUX EXP???
MW_ox = 44.013 # Molecular weight/mass of N2O (kg/kmol)
m_ox = #liquid ox mass in tank initial
n = m_ox/MW_ox # moels of liquid ox
r_port = r_fo - w #fuel port radius; r_fo = inner combustion chamber radius
G_ox = m_dot_ox[i]/(np.pi*r_port**2) #ox mass flux
reg_rate = a0*G_ox**n_reg # n_reg = reg. rate exp.; a0 = reg. rate coeff.
w = w - reg_rate*dt # w = initial fuel web thickness
n = n + dn*dt # consume moles of ox
m_f[i+1] = rho_fuel*L*np.pi*w*(2*r_fo-w) # mass fuel | 1.9375 | 2 |
tools/view_traces.py | PHILAE-PROJECT/agilkia | 1 | 12796549 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
View the traces within an Agilkia TraceSet.
It prints a one-line summary of each trace, plus some general statistics.
If the TraceSet is clustered, traces will be displayed in clusters, by default.
TODO: allow user to specify the 'Ok' status value (e.g. --ok=200 for HTTP results)
@author: <EMAIL>
"""
import pandas as pd
import argparse
from pathlib import Path
import textwrap
import agilkia
INDENT = " " # prefix for each trace (plus one extra space)
def make_action_status_table(df: pd.DataFrame, ok=0) -> pd.DataFrame:
"""From TraceSet DataFrame, creates a table of Actions showing how many got Ok vs Error."""
good = df[df.Status == ok].groupby("Action").size()
err = df[df.Status != ok].groupby("Action").size()
data = pd.DataFrame({"Ok": good, "Err": err})
data.fillna(0, inplace=True, downcast="infer")
data["Total"] = data.Ok + data.Err
totals = pd.DataFrame([data.sum().rename("Total")])
# add Totals row at bottom
return pd.concat([data, totals])
def main():
"""A command line program that prints a set of traces, plus some summary statistics."""
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose", help="show more information, such as the event-to-char map.",
action="store_true")
parser.add_argument("-e", "--eventchars", help="a csv file containing an event-to-char map.")
parser.add_argument("-n", "--noclusters", help="view traces in-order, without clusters.",
action="store_true")
parser.add_argument("-r", "--repeats", help="compress repeated events with action=REPEATS")
parser.add_argument("-s", "--status", help="color events with non-zero status red, to highlight errors",
action="store_true")
parser.add_argument("--ok", help="specify the status value that represents success (default 0).",
default="0", metavar="NUM", type=int)
parser.add_argument("traceset", help="an Agilkia traceset file (*.json)")
args = parser.parse_args()
# print(f"Args are:", args)
traceset = agilkia.TraceSet.load_from_json(Path(args.traceset))
actions = agilkia.all_action_names(traceset.traces)
if args.eventchars:
mapfile = pd.read_csv(args.eventchars, header=None)
# we assume this has just two columns: 0=action_name and 1=char.
char_map = dict(zip(mapfile.iloc[:, 0], mapfile.iloc[:, 1]))
# print("given map=", char_map)
traceset.set_event_chars(char_map)
# print("final map=", char_map)
repeats = [] if args.repeats is None else [args.repeats]
if traceset.is_clustered() and not args.noclusters:
clusters = traceset.get_clusters()
for c in range(traceset.get_num_clusters()):
print(f"Cluster {c}:")
for (i, tr) in enumerate(traceset.traces):
if clusters[i] == c:
print(INDENT, tr.to_string(compress=repeats, color_status=args.status))
else:
for tr in traceset.traces:
print(INDENT, tr.to_string(compress=repeats, color_status=args.status))
if args.verbose:
print("==== event chars ====")
ev_chars = traceset.get_event_chars()
for action,ch in ev_chars.items():
print(f" {action:20s} {ch}")
print("==== statistics ====")
df = traceset.to_pandas()
statuses = df.Status.value_counts()
percent_ok = 100.0 * statuses.get(args.ok, 0) / df.shape[0]
# print(df.head())
print(f"Number of traces : {len(traceset.traces)}")
print(f"Average trace length : {df.groupby('Trace').count().Action.mean():.2f}")
print(f"Number of clusters : {traceset.get_num_clusters()}")
print(f"Number of events : {df.shape[0]}")
print(f"Number of event kinds: {len(actions)}")
print(textwrap.indent(str(make_action_status_table(df, ok=args.ok)), " "))
print(f"Percent of status=ok : {percent_ok:.2f}%")
error_counts = df.groupby("Error").Action.count()
if len(error_counts) > 1:
print(f"Categories of errors : ({100.0 - percent_ok:.2f}% total)")
print(textwrap.indent(str(error_counts), " "))
if __name__ == "__main__":
main()
| 2.75 | 3 |
src/i3_battery_block_vgg/font_awesome_glyphs.py | vgoehler/python-i3-battery-block | 0 | 12796550 | FA_QUESTION = '\uf128'
FA_LAPTOP = '\uf109'
FA_PLUG = '\uf1e6'
FA_BUG = '\uf188'
FA_BATTERY_LIST = [
"\uf244", # empty
"\uf243", # 1 quarter
"\uf242", # half
"\uf241", # 3 quarters
"\uf240", # full
]
FA_NO_BATTERY = "\uf00d " + FA_BATTERY_LIST[4]
| 1.40625 | 1 |
Subsets and Splits