id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1775226
|
<filename>Jan2019/DataTypesDemo/TuplesDemo.py
# ----------------------------------
class DataTypesDemo:
Instances = 0
def __init__(self, tupleObject):
self.tupleObject = tupleObject
DataTypesDemo.Instances += 1
def displayDetails(self):
print("----- DataTypesDemo Details -----")
print('DataTypesDemo.Instances: ', self.Instances)
print('Tuple Data: ', self.tupleObject)
print()
# ----------------------------------
print("----- Tuples Demo -----")
# Defining a tuple without any element
tupleEmpty = ()
dataTypeObject = DataTypesDemo(tupleEmpty)
dataTypeObject.displayDetails()
# Person Tuples
tuplePerson = (1, '<NAME>', 24, 567.90)
dataTypeObject = DataTypesDemo(tuplePerson)
dataTypeObject.displayDetails()
# Nested Tuples
tupleEmployee = (1, 'A1001', 24)
dataTypeObject = DataTypesDemo((tuplePerson, tupleEmployee))
dataTypeObject.displayDetails()
# Repetition Tuple
repeatTuple = ('Python 3',) * 4
dataTypeObject = DataTypesDemo(repeatTuple)
dataTypeObject.displayDetails()
# Slicing with tuples
sample_tuple = (0, 1, 2, 3, 4)
withoutFirstItem = sample_tuple[1:]
dataTypeObject = DataTypesDemo(withoutFirstItem)
dataTypeObject.displayDetails()
tupleReverse = sample_tuple[::-1]
dataTypeObject = DataTypesDemo(tupleReverse)
dataTypeObject.displayDetails()
from3to5Tuple = sample_tuple[2:5]
dataTypeObject = DataTypesDemo(from3to5Tuple)
dataTypeObject.displayDetails()
|
StarcoderdataPython
|
3357046
|
<gh_stars>1-10
import torch
import cv2
import numpy as np
from core.inference import Inference
from core.yolo_v4 import YOLOv4
from configuration import Config
from utils.visualization import draw_boxes_on_image
def detect_one_picture(model, picture_dir, device):
inference = Inference(picture_dir, device)
with torch.no_grad():
boxes, scores, classes = inference(model)
boxes = boxes.cpu().numpy().astype(np.int32)
scores = scores.cpu().numpy().astype(np.float32)
classes = classes.cpu().numpy().astype(np.int32)
image = draw_boxes_on_image(cv2.imread(filename=picture_dir), boxes, scores, classes)
return image
def detect_multiple_pictures(model, pictures, epoch, device):
index = 0
for picture in pictures:
index += 1
result = detect_one_picture(model=model, picture_dir=picture, device=device)
cv2.imwrite(filename=Config.training_results_save_dir + "epoch-{}-picture-{}.jpg".format(epoch, index), img=result)
if __name__ == '__main__':
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device: ", device)
yolo_v4 = YOLOv4()
if Config.detect_on_cpu:
yolo_v4.load_state_dict(torch.load(Config.save_model_dir + "saved_model.pth", map_location=torch.device('cpu')))
else:
yolo_v4.load_state_dict(torch.load(Config.save_model_dir + "saved_model.pth"))
yolo_v4.to(device)
yolo_v4.eval()
image = detect_one_picture(yolo_v4, Config.test_single_image_dir, device)
cv2.namedWindow("detect result", flags=cv2.WINDOW_NORMAL)
cv2.imshow("detect result", image)
cv2.waitKey(0)
|
StarcoderdataPython
|
3268728
|
# -*- coding:utf-8 -*-
"""
File Name: model.py
Description: model definition
Author: steven.yi
date: 2019/04/17
"""
from keras.models import Model
from keras.layers import Conv2D, MaxPooling2D, Input, Concatenate, Dropout
def MCNN(input_shape=None):
inputs = Input(shape=input_shape)
# column 1
column_1 = Conv2D(16, (9, 9), padding='same', activation='relu', name='col1_conv1')(inputs)
column_1 = MaxPooling2D(2)(column_1)
column_1 = Conv2D(32, (7, 7), padding='same', activation='relu', name='col1_conv2')(column_1)
column_1 = MaxPooling2D(2)(column_1)
column_1 = Conv2D(16, (7, 7), padding='same', activation='relu', name='col1_conv3')(column_1)
column_1 = Conv2D(8, (7, 7), padding='same', activation='relu', name='col1_conv4')(column_1)
# column 2
column_2 = Conv2D(20, (7, 7), padding='same', activation='relu', name='col2_conv1')(inputs)
column_2 = MaxPooling2D(2)(column_2)
column_2 = Conv2D(40, (5, 5), padding='same', activation='relu', name='col2_conv2')(column_2)
column_2 = MaxPooling2D(2)(column_2)
column_2 = Conv2D(20, (5, 5), padding='same', activation='relu', name='col2_conv3')(column_2)
column_2 = Conv2D(10, (5, 5), padding='same', activation='relu', name='col2_conv4')(column_2)
# column 3
column_3 = Conv2D(24, (5, 5), padding='same', activation='relu', name='col3_conv1')(inputs)
column_3 = MaxPooling2D(2)(column_3)
column_3 = Conv2D(48, (3, 3), padding='same', activation='relu', name='col3_conv2')(column_3)
column_3 = MaxPooling2D(2)(column_3)
column_3 = Conv2D(24, (3, 3), padding='same', activation='relu', name='col3_conv3')(column_3)
column_3 = Conv2D(12, (3, 3), padding='same', activation='relu', name='col3_conv4')(column_3)
# merge feature map of 3 columns in last dimension
merges = Concatenate(axis=-1)([column_1, column_2, column_3])
# density map
density_map = Conv2D(1, (1, 1), padding='same', activation='linear', name='density_conv')(merges)
model = Model(inputs=inputs, outputs=density_map)
return model
|
StarcoderdataPython
|
1653183
|
import logging
from programy.clients.clients import BotClient
class ConsoleBotClient(BotClient):
def __init__(self):
BotClient.__init__(self)
self.clientid = "Console"
def set_environment(self):
self.bot.brain.predicates.pairs.append(["env", "Console"])
def run(self):
if self.arguments.noloop is False:
logging.info("Entering conversation loop...")
running = True
self.display_response(self.bot.get_version_string)
self.display_response(self.bot.brain.post_process_response(self.bot, self.clientid, self.bot.initial_question))
while running is True:
try:
question = self.get_question()
response = self.bot.ask_question(self.clientid, question)
if response is None:
self.display_response(self.bot.default_response)
self.log_unknown_response(question)
else:
self.display_response(response)
self.log_response(question, response)
except KeyboardInterrupt:
running = False
self.display_response(self.bot.exit_response)
except Exception as excep:
logging.exception(excep)
logging.error("Oops something bad happened !")
def get_question(self):
ask = "%s "%self.bot.prompt
return input(ask)
def display_response(self, response):
print(response)
if __name__ == '__main__':
def run():
print("Loading, please wait...")
console_app = ConsoleBotClient()
console_app.run()
run()
|
StarcoderdataPython
|
3208868
|
# class that is used to get the audiostream from Loomo
# for further use, the input gets played via connected speakers
# requires setup of the microphone with pulseaudio, so that the output get's redirected to it
# recommended to connect something to the aux output of the device, otherwise loopback input will be created
from threading import Thread
import socket
import pyaudio
import sys
# TODO: set IP to the own IP in the network
myHOST = "192.168.43.138"
myPORT = 65432
class socketServer(Thread):
# creates the class and opens a new thread for listening by calling the createListener function
def __init__(self):
super(socketServer, self).__init__()
thread = Thread(target=self.createListener)
thread.daemon = True
thread.start()
# function opens the socket and plays the received audio
def createListener(self):
self.mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mysocket.bind((myHOST, myPORT))
p = pyaudio.PyAudio()
audiostream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, output=True)
print("created socket at: ", socket.gethostname(), " ", myPORT)
self.mysocket.listen(1)
print("now listening...")
while True:
conn, addr = self.mysocket.accept()
print("Connected to: ", addr)
self.isStreaming = True
while True:
data = conn.recv(1024)
audiostream.write(data)
if not data:
print("Bye")
self.isStreaming = False
break
elif data == 'killsrv':
conn.close()
sys.exit()
|
StarcoderdataPython
|
3238468
|
def __read_lst(dat):
"""
lst形式のデータ(文字列)の内容を読み込む
"""
dat_list = dat.split('\t')
index = int(dat_list[0])
header_size = int(dat_list[1])
assert header_size == 2, 'header_sizeは2を想定:'+str(header_size)
label_width = int(dat_list[2])
assert label_width == 5, 'label_widthは5を想定: '+str(label_width)
label_data = dat_list[3:-1]
assert (len(label_data) % label_width) == 0 , 'label_dataの長さはlabel_widthの倍数のはず : '
file_path = dat_list[-1]
return (index, header_size, label_width, label_data, file_path)
def create_bb_img(input_lst_path, input_img_root_path, output_img_path, class_list=[]):
"""
画像データにlstファイルに基づくバウンディングボックスを加工した画像をつうる
"""
import random
import copy
import os
from os import path
import shutil
from PIL import Image
import numpy as np
import imgaug as ia
from tqdm import tqdm_notebook as tqdm
# 出力先をリセット
if path.isdir(output_img_path):
shutil.rmtree(output_img_path)
os.makedirs(output_img_path)
with open(input_lst_path) as lst_f:
for line in tqdm(lst_f.readlines()):
line = line.strip()
if not line: continue
#lst形式のデータを読み取って、変数に入れる
origin_img_index, header_size, label_width, label_data, img_path = __read_lst(line)
img_path = path.join(input_img_root_path, img_path)
# 画像を読み込む
origin_img = Image.open(img_path).convert('RGB')
img_height = origin_img.height
img_width = origin_img.width
max_edge = max(img_height, img_width)
# 画像を変換する
target_img = np.array(origin_img)
# バウンディングボックスを生成
bbs = []
for bb_index in range(len(label_data)//label_width):
bb = ia.BoundingBox(
x1 = float(label_data[bb_index * label_width + 1]) * img_width,
y1 = float(label_data[bb_index * label_width + 2]) * img_height,
x2 = float(label_data[bb_index * label_width + 3]) * img_width,
y2 = float(label_data[bb_index * label_width + 4]) * img_height
)
class_val = int(label_data[bb_index * label_width])
assert 0 <= class_val and class_val < len(class_list), 'classの値が不正です。 : '+str(class_val)
class_name = class_list[class_val] if class_list[class_val] else str(class_val)
target_img = ia.draw_text(target_img, bb.y1, bb.x1, class_name)
bbs.append(bb)
bbs_on_img = ia.BoundingBoxesOnImage(bbs, shape = target_img.shape)
after_bb_img = bbs_on_img.draw_on_image(target_img)
output_img_name = path.basename(img_path)
Image.fromarray(after_bb_img).save(path.join(output_img_path, output_img_name))
|
StarcoderdataPython
|
64602
|
# users/forms.py
# Django modules
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django import forms
class RegisterForm(UserCreationForm):
username = forms.CharField(max_length=50)
email = forms.EmailField(max_length=50)
password1 = forms.CharField()
password2 = forms.CharField()
class Meta(UserCreationForm):
model = User
fields = ('username','email','password1','<PASSWORD>')
|
StarcoderdataPython
|
37705
|
<gh_stars>0
"""
This tutorial shows how to download and render neurons from the MouseLight project
using the MouseLightAPI class.
You can also download data manually from the neuronbrowser website and render them by
passing the downloaded files to `scene.add_neurons`.
"""
import brainrender
brainrender.USE_MORPHOLOGY_CACHE = True
from brainrender.scene import Scene
from brainrender.Utils.MouseLightAPI.mouselight_api import MouseLightAPI
from brainrender.Utils.MouseLightAPI.mouselight_info import mouselight_api_info, mouselight_fetch_neurons_metadata
# Fetch metadata for neurons with some in the secondary motor cortex
neurons_metadata = mouselight_fetch_neurons_metadata(filterby='soma', filter_regions=['MOs'])
# Then we can download the files and save them as a .json file
ml_api = MouseLightAPI()
neurons_files = ml_api.download_neurons(neurons_metadata[:2]) # just saving the first couple neurons to speed things up
# Show neurons and ZI in the same scene:
scene = Scene()
scene.add_neurons(neurons_files, soma_color='orangered', dendrites_color='orangered',
axon_color='darkseagreen', neurite_radius=8) # add_neurons takes a lot of arguments to specify how the neurons should look
# make sure to check the source code to see all available optionsq
scene.add_brain_regions(['MOs'], alpha=0.15)
scene.render(camera='coronal')
|
StarcoderdataPython
|
1728186
|
from flask import Blueprint, render_template
error = Blueprint("error", __name__)
@error.app_errorhandler(403)
def error_403(error):
return render_template("error/404.html"), 403
@error.app_errorhandler(404)
def error_404(error):
return render_template("error/404.html"), 404
@error.app_errorhandler(500)
def error_500(error):
return render_template("error/500.html"), 500
|
StarcoderdataPython
|
1605162
|
<reponame>CrispyHarder/deep-weight-prior<gh_stars>0
import os
import torch
from models.tvae.grouper import Chi_Squared_from_Gaussian_2d
import torchvision
class TVAE(torch.nn.Module):
def __init__(self, z_encoder, u_encoder, decoder, grouper):
super(TVAE, self).__init__()
self.z_encoder = z_encoder
self.u_encoder = u_encoder
self.decoder = decoder
self.grouper = grouper
self.device = grouper.device
self.to(self.device)
def forward(self, x):
z, kl_z, _, _ = self.z_encoder(x)
u, kl_u, _, _ = self.u_encoder(x)
s = self.grouper(z, u)
x_recon, recon_loss = self.decoder(s, x)
return z, u, s, x_recon, kl_z, kl_u, recon_loss
def generate(self, batch_size, device=torch.device('cpu')):
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
n_caps = self.grouper.n_caps
cap_dim = self.grouper.cap_dim
s_dim = n_caps*cap_dim
z,u = torch.randn((batch_size, 2*s_dim, 1, 1)).to(device).chunk(2,dim=1)
s = self.grouper(z,u)
samples = self.decoder.only_decode(s)
return samples
# sampled_indices = torch.randint(0,self.num_embeddings,(num_samples,9))
# if device:
# sampled_indices = sampled_indices.to(device)
# codebook_vecs = self._vq_vae._embedding(sampled_indices)
# codebook_vecs = codebook_vecs.view(-1,self.embedding_dim,3,3)
# if device:
# codebook_vecs = codebook_vecs.to(device)
# samples = self.decode(codebook_vecs)
# return samples
def get_IS_estimate(self, x, n_samples=100):
log_likelihoods = []
for n in range(n_samples):
z, kl_z, log_q_z, log_p_z = self.z_encoder(x)
u, kl_u, log_q_u, log_p_u = self.u_encoder(x)
s = self.grouper(z, u)
probs_x, neg_logpx_z = self.decoder(s, x)
ll = (-1 * neg_logpx_z.flatten(start_dim=1).sum(-1, keepdim=True)
+ log_p_z.flatten(start_dim=1).sum(-1, keepdim=True)
+ log_p_u.flatten(start_dim=1).sum(-1, keepdim=True)
- log_q_z.flatten(start_dim=1).sum(-1, keepdim=True)
- log_q_u.flatten(start_dim=1).sum(-1, keepdim=True))
log_likelihoods.append(ll)
ll = torch.cat(log_likelihoods, dim=-1)
is_estimate = torch.logsumexp(ll, -1)
return is_estimate
class VAE(TVAE):
def get_IS_estimate(self, x, n_samples=100):
log_likelihoods = []
for n in range(n_samples):
z, kl_z, log_q_z, log_p_z = self.z_encoder(x)
s = self.grouper(z, torch.zeros_like(z))
probs_x, neg_logpx_z = self.decoder(s, x)
ll = (-1 * neg_logpx_z.flatten(start_dim=1).sum(-1, keepdim=True)
+ log_p_z.flatten(start_dim=1).sum(-1, keepdim=True)
- log_q_z.flatten(start_dim=1).sum(-1, keepdim=True))
log_likelihoods.append(ll)
ll = torch.cat(log_likelihoods, dim=-1)
is_estimate = torch.logsumexp(ll, -1)
return is_estimate
def forward(self, x):
z, kl_z, _, _ = self.z_encoder(x)
u = torch.zeros_like(z)
kl_u = torch.zeros_like(kl_z)
s = self.grouper(z, u)
probs_x, neg_logpx_z = self.decoder(s, x)
return z, u, s, probs_x, kl_z, kl_u, neg_logpx_z
|
StarcoderdataPython
|
176877
|
<reponame>rendinam/crds
"""This module defines replacement functionality for the CDBS "certify" program
used to check parameter values in .fits reference files. It verifies that FITS
files define required parameters and that they have legal values.
"""
from crds.core import log, utils
from . import core as core_validators
from . import synphot as synphot_validators
__all__ = [
"validator",
"get_validators",
]
_VALIDATOR_MODULES = [
core_validators,
synphot_validators
]
def validator(info, context=None):
"""Given TpnInfo object `info`, construct and return a Validator for it."""
if len(info.values) == 1 and info.values[0].startswith("&"):
# This block handles &-types like &PEDIGREE and &SYBDATE
# only called on static TPN infos.
class_name = "".join([v.capitalize() for v in info.values[0][1:].split("_")]) + "Validator"
module = next((m for m in _VALIDATOR_MODULES if hasattr(m, class_name)), None)
if module is None:
raise ValueError("Unrecognized validator {}, expected class {}".format(info.values[0], class_name))
rval = getattr(module, class_name)(info, context=context)
elif info.datatype == "C":
rval = core_validators.CharacterValidator(info, context=context)
elif info.datatype == "R":
rval = core_validators.RealValidator(info, context=context)
elif info.datatype == "D":
rval = core_validators.DoubleValidator(info, context=context)
elif info.datatype == "I":
rval = core_validators.IntValidator(info, context=context)
elif info.datatype == "L":
rval = core_validators.LogicalValidator(info, context=context)
elif info.datatype == "X":
if info.keytype == "C":
rval = core_validators.ColumnExpressionValidator(info, context=context)
else:
rval = core_validators.ExpressionValidator(info, context=context)
else:
raise ValueError("Unimplemented datatype " + repr(info.datatype))
return rval
def get_validators(observatory, refpath, context=None):
"""Given `observatory` and a path to a reference file `refpath`, load the
corresponding validators that define individual constraints that reference
should satisfy.
"""
tpns = _get_reffile_tpninfos(observatory, refpath)
checkers = [validator(x, context=context) for x in tpns]
log.verbose("Validators for", repr(refpath), "("+str(len(checkers))+"):\n", log.PP(checkers), verbosity=65)
return checkers
def _get_reffile_tpninfos(observatory, refpath):
"""Load just the TpnInfo objects for `observatory` and the given `refpath`.
This entails both "class" TpnInfo's from CDBS as well as TpnInfo objects
derived from the JWST data models.
"""
locator = utils.get_locator_module(observatory)
instrument, filekind = locator.get_file_properties(refpath)
tpns = list(locator.get_all_tpninfos(instrument, filekind, "tpn"))
tpns.extend(locator.get_extra_tpninfos(refpath))
return tpns
|
StarcoderdataPython
|
4810146
|
<filename>fpga/test_separable_conv2d.py<gh_stars>0
import tensorflow as tf
import sys
sys.path.append('../../../src')
import processMif as mif
#in_x=np.reshape(np.array(x).transpose(),[1,size,size,1])
img1 = tf.constant(value=[[[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]],[[1],[2],[3],[4]]]],dtype=tf.float32)
img2 = tf.constant(value=[[[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]],[[1],[1],[1],[1]]]],dtype=tf.float32)
img = tf.concat(values=[img1,img2],axis=3)
filter1 = tf.constant(value=0, shape=[3,3,1,1],dtype=tf.float32)
filter2 = tf.constant(value=1, shape=[3,3,1,1],dtype=tf.float32)
filter3 = tf.constant(value=2, shape=[3,3,1,1],dtype=tf.float32)
filter4 = tf.constant(value=3, shape=[3,3,1,1],dtype=tf.float32)
filter_out1 = tf.concat(values=[filter1,filter2],axis=2)
filter_out2 = tf.concat(values=[filter3,filter4],axis=2)
filter = tf.concat(values=[filter_out1,filter_out2],axis=3)
point_filter = tf.constant(value=1, shape=[1,1,4,4],dtype=tf.float32)
#out_img = tf.nn.depthwise_conv2d(input=img, filter=filter, strides=[1,1,1,1],rate=[1,1], padding='VALID')
#out_img = tf.nn.conv2d(input=out_img, filter=point_filter, strides=[1,1,1,1], padding='VALID')
'''also can be used'''
#out_img = tf.nn.separable_conv2d(input=img, depthwise_filter=filter, pointwise_filter=point_filter, strides=[1,1,1,1], rate=[1,1], padding='VALID')
def separable_conv2d(input, depthwise_filter, pointwise_filter):
net = tf.nn.depthwise_conv2d(input=input, filter=depthwise_filter, strides=[1,1,1,1],rate=[1,1], padding='SAME')
net = tf.nn.conv2d(input=net, filter=pointwise_filter, strides=[1,1,1,1], padding='SAME')
return net
with tf.Session() as sess:
x = img.eval()
print img
print filter
print point_filter
with tf.device("device:XLA_CPU:0"):
#out_img1 = tf.nn.depthwise_conv2d(img, filter, strides=[1,1,1,1],rate=[1,1], padding='VALID')
out_img1 = tf.nn.conv2d(img, filter, strides=[1,1,1,1], padding='SAME')
out_img = tf.nn.conv2d(out_img1, tf.reshape(point_filter,[1,2,2,4]), strides=[1,1,1,1], padding='VALID')
#out_img = tf.nn.conv2d(out_img1, point_filter, strides=[1,1,1,1], padding='SAME')
print 'result:'
print(sess.run(out_img, feed_dict={img: x}))
mif.createMem([x,filter.eval(), point_filter.eval()])
|
StarcoderdataPython
|
3274856
|
from __future__ import print_function
from subprocess import Popen, PIPE
import os
import sys
import shlex
def run_cmd(cmd, verbose=False):
if verbose:
print("Executing :",cmd)
p = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
o,e = p.communicate()
return o,e
if sys.platform == "darwin":
conda_os = "osx-64"
else:
conda_os = "linux-64"
conda_pkgs = os.path.abspath(os.path.join(os.environ.get("CONDA_EXE"),"..","..","pkgs"))
# Get list of package we are using
pkgs, err = run_cmd("conda list", verbose=True)
missing = []
for l in pkgs.decode("utf8").split("\n")[2:-1]:
sp = l.split()
name = sp[0]
version = sp[1]
build = sp[2]
tarname = "{}-{}-{}.tar.bz2".format(name,version,build)
tarball = os.path.join(conda_pkgs,tarname)
print("looking at:",tarball,os.path.exists(tarball))
if os.path.exists(tarball):
o,e = run_cmd("anaconda upload {} -u cdat-forge".format(tarball), verbose=True)
print("OUT:",o.decode("utf8"))
print("Err:",e.decode("utf8"))
else:
missing.append(tarball)
print(sys.prefix)
print(conda_pkgs)
print("Error on:",missing)
|
StarcoderdataPython
|
1617527
|
<filename>mtnlpmodel/__init__.py
__version__ = "0.9.1"
# for custom keras object auto discover
from seq2annotation import tf_contrib
import mtnlpmodel
|
StarcoderdataPython
|
52700
|
<reponame>chenjian158978/chenjian.github.io
# -*- coding:utf8 -*-
"""
@author: <EMAIL>
@date: Tue, May 23 2017
@time: 19:05:20 GMT+8
"""
import matplotlib.pyplot as plt
import numpy as np
# 都转换成列向量
X = np.array([[0, 1, 2, 4]]).T
Y = np.array([[0, 1, 2, 4]]).T
# 三个不同的theta_1值
theta1 = np.array([[0, 0]]).T
theta2 = np.array([[0, 0.5]]).T
theta3 = np.array([[0, 1]]).T
# 矩阵X的行列(m,n)
X_size = X.shape
# 创建一个(4,1)的单位矩阵
X_0 = np.ones((X_size[0], 1))
# 形成点的坐标
X_with_x0 = np.concatenate((X_0, X), axis=1)
# 两个数组点积
h1 = np.dot(X_with_x0, theta1)
h2 = np.dot(X_with_x0, theta2)
h3 = np.dot(X_with_x0, theta3)
# r:red x: x marker
plt.plot(X, Y, 'rx', label='y')
plt.title("Cost Function Example")
plt.grid(True)
plt.plot(X, h1, color='b', label='h1, theta_1=0')
plt.plot(X, h2, color='m', label='h2, theta_1=0.5')
plt.plot(X, h3, color='g', label='h3, theta_1=1')
# 坐标轴名称
plt.xlabel('X')
plt.ylabel('y/h')
# 坐标轴范围
plt.axis([-0.1, 4.5, -0.1, 4.5])
# plt.legend(loc='upper left')
plt.legend(loc='best')
plt.savefig('liner_gression_error.png', dpi=200)
plt.show()
|
StarcoderdataPython
|
127321
|
<reponame>michielkauwatjoe/Meta
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# https://github.com/michielkauwatjoe/Meta
class CubicBezier:
def __init__(self, bezierId=None, points=None, parent=None, isClosed=False):
u"""
Stores points of the cubic Bézier curve.
"""
self.bezierId = bezierId
self.points = points
self.parent = parent
self.isClosed = isClosed
|
StarcoderdataPython
|
1605590
|
<gh_stars>0
from .corrcal import *
|
StarcoderdataPython
|
3334086
|
from lexical.greibach_converter import greibach_converter
from lexical.alpha_to_var import alpha_to_var
from lexical.useless_variable_terminator import useless_variable_terminator
from lexical.unitary_rule_terminator import unitary_rule_terminator
from lexical.lambda_terminator import lambda_terminator
from structure.tree import Tree
from structure.stack import Stack
from file_manager import loader, output
from structure.GLC import GLC
from structure.greibach_path import GreibachPaths
from structure.stack import Stack
from structure.constants import LAMBDA
from structure.word_keeper import WordKeeper
from lexical.reviewer import variable_and_alpha_review
import sys
def main(args):
# command line: python3 interpreter.py ex.json 4
if len(args) < 3:
print('Usage: python3 interpreter.py json_file_name word_size_number')
else:
file_name = args[1]
word_size_limit = int(args[2])
artefact = loader.read_json(file_name)
if artefact != None:
# create the language based on json artefact
language = GLC()
language.set_variable_list(artefact['glc'][0])
language.set_alpha_list(artefact['glc'][1])
language.set_transitions_list(artefact['glc'][2])
language.set_initial_variable(artefact['glc'][3])
variable_l = language.get_variable_list()
alpha_l = language.get_alpha_list()
transition_l = language.get_transitions_list()
ist = language.get_initial_variable()
# just a check-up
variable_and_alpha_review(variable_l, alpha_l, transition_l, ist)
# 1st: removing lambda rules
lambda_terminator(language)
# 2nd: removing unitary rules
unitary_rule_terminator(language)
# 3rd: removing useless variable
useless_variable_terminator(language)
# 4th: swap alphas with new variables
alpha_to_var(language)
# 5th: some swaps can create useless variables
'''
Happens when a pretender variable is not used
'''
useless_variable_terminator(language)
# 6th: to greibach format
greibach_converter(language)
# 7th: the outlaw
'''
In some languages, I found some unitary rules after
the conversion. Happens when the start variable
had a left call and the language has lambda.
Imagine a new variable 'Z' to turn left call
into right call and when Z works with lambda
emerges a new transition #Z = Z. The solution:
lets call the unitary remover again
'''
unitary_rule_terminator(language)
# to file
output.file_generator("output_language.txt", str(language))
# lets see all transition as: ALPHA. TOP | STACK
paths = GreibachPaths(language)
# starts with the start variable
stack = Stack(ist)
# Greybach machine derivation tree
tree = Tree(stack)
# hold all found words
keeper = WordKeeper()
# in search for lambda
paths_dict = paths.get_paths_dict()
key = LAMBDA + ist
extend_alpha_l = alpha_l
if key in paths_dict.keys():
keeper.insert_word(LAMBDA)
extend_alpha_l += [LAMBDA]
# finding nexts nodes
tree.get_root().call_next_node(keeper, extend_alpha_l, paths_dict, word_size_limit)
# to files
output.file_generator("output_words.txt", str(keeper))
# to command line
print(str(keeper))
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
StarcoderdataPython
|
187740
|
<filename>tests/utils/test_compare.py
import pytest
from copy import deepcopy
from varg.utils.compare import Comparison
def test_basic(truth_set_path, vcf_record, compare_fields):
# Given an two cyvcf2 records, vcf keys to be compared and a sample to sample
# index map
record_1 = vcf_record(truth_set_path, variant_type="SNV")
record_2 = vcf_record(truth_set_path, variant_type="SV")
sample_idx_map = ((0, 1, 2), (0, 1, 2))
# WHEN making a comparison between records
comp = Comparison(
record_1, record_2, vcf_keys=compare_fields, sample_idx_map=sample_idx_map
)
# Then check that the records have been compared using the fields specified
assert set(comp.comparison.keys()).issubset(set(compare_fields.keys()))
def test_nonexisting_format_id(truth_set_path, vcf_record, compare_fields):
modified_keys = deepcopy(compare_fields)
modified_keys["SVLEN"] = {
"column": "FORMAT",
"ID": "SVLEN",
"type_conv": lambda x: x,
}
record_1 = vcf_record(truth_set_path, variant_type="SNV")
record_2 = vcf_record(truth_set_path, variant_type="SV")
sample_idx_map = ((0, 1, 2), (0, 1, 2))
# WHEN making a comparison between records
comp = Comparison(
record_1, record_2, vcf_keys=modified_keys, sample_idx_map=sample_idx_map
)
assert "SVLEN" not in comp.comparison.keys()
|
StarcoderdataPython
|
48405
|
from django.shortcuts import render, redirect
from django.views import View
from django import http
import re
from .models import User
from django.contrib.auth import login
from meiduo_mall.utils.response_code import RETCODE
class RegisterView(View):
"""用户注册"""
def get(self, request):
return render(request, 'register.html')
def post(self, request):
"""注册业务逻辑"""
# 接收请求体中的表单数据
query_dict = request.POST
username = query_dict.get('username')
password = query_dict.get('password')
password2 = query_dict.get('password2')
mobile = query_dict.get('mobile')
sms_code = query_dict.get('sms_code')
allow = query_dict.get('allow')
# 校验数据
if all([username, password, mobile, sms_code, allow])is False:
return http.HttpResponseForbidden('缺少必传参数')
if not re.match(r'^[a-zA-Z0-9_-]{5,20}$', username):
return http.HttpResponseForbidden('请输入5-20个字符的用户')
if not re.match(r'^[0-9A-Za-z]{8,20}$', password):
return http.HttpResponseForbidden('请输入8-20位的密码')
if password != <PASSWORD>:
return http.HttpResponseForbidden('两次输入的密码不一致')
if not re.match(r'^1[345789]\d{9}$', mobile):
return http.HttpResponseForbidden('请输入正确的手机号码')
# 业务逻辑处理
user = User.objects.create_user(username=username, password=password, mobile=mobile)
# 状态保持
login(request, user)
# 响应
return redirect('/') # 重定向到首页
class UsernameCountView(View):
"""判断用户名是否重复注册"""
def get(self, request, username):
# 使用username查询user表,得到username的数量
count = User.objects.filter(username=username).count()
# 响应
content = {'count': count, 'code': RETCODE.OK, 'errmsg': 'OK'} # 响应体数据
return http.JsonResponse(content)
class MobileCountView(View):
"""判断手机号是否重复注册"""
def get(self, request, mobile):
# 使用mobile查询user表,得到mobile的数量
count = User.objects.filter(mobile=mobile).count()
# 响应
content = {'count': count, 'code': RETCODE.OK, 'errmsg': 'OK'} # 响应体数据
return http.JsonResponse(content)
|
StarcoderdataPython
|
3289943
|
<reponame>aditya-agrawal-30502/vformer
class BaseTrainer: # pragma: no cover
pass
|
StarcoderdataPython
|
84218
|
<reponame>yc19890920/Learn
#!/usr/bin/python
#coding=utf8
__author__ = 'leo'
|
StarcoderdataPython
|
3276188
|
<reponame>sbraz/txamqp
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from twisted.internet.protocol import Factory
from txamqp.protocol import AMQClient
from txamqp.spec import DEFAULT_SPEC, load
from txamqp.client import TwistedDelegate
class AMQFactory(Factory):
"""A factory building AMQClient instances."""
protocol = AMQClient
def __init__(self, spec=None, clock=None):
"""
@param spec: Path to the spec file. Defaults to the standard AMQP 0.9.
@type spec: L{str} (native string)
"""
if spec is None:
spec = DEFAULT_SPEC
self._spec = load(spec)
self._clock = clock
self._vhost = "/"
self._heartbeat = 0
def set_vhost(self, vhost):
"""Set a custom vhost."""
self._vhost = vhost
def set_heartbeat(self, heartbeat):
"""Set a custom heartbeat."""
self._heartbeat = heartbeat
def buildProtocol(self, addr):
delegate = TwistedDelegate()
protocol = self.protocol(
delegate, vhost=self._vhost, spec=self._spec,
heartbeat=self._heartbeat, clock=self._clock)
return protocol
|
StarcoderdataPython
|
1797336
|
<reponame>Random1992/irspack
import warnings
from typing import List, Type
from ..optimizers.base_optimizer import BaseOptimizer, BaseOptimizerWithEarlyStopping
from ..parameter_tuning import (
CategoricalSuggestion,
IntegerSuggestion,
LogUniformSuggestion,
Suggestion,
UniformSuggestion,
)
from ..recommenders import (
AsymmetricCosineKNNRecommender,
AsymmetricCosineUserKNNRecommender,
CosineKNNRecommender,
CosineUserKNNRecommender,
DenseSLIMRecommender,
IALSRecommender,
JaccardKNNRecommender,
NMFRecommender,
P3alphaRecommender,
RP3betaRecommender,
SLIMRecommender,
TopPopRecommender,
TruncatedSVDRecommender,
TverskyIndexKNNRecommender,
)
default_tune_range_knn = [
IntegerSuggestion("top_k", 4, 1000),
UniformSuggestion("shrinkage", 0, 1000),
]
default_tune_range_knn_with_weighting = [
IntegerSuggestion("top_k", 4, 1000),
UniformSuggestion("shrinkage", 0, 1000),
CategoricalSuggestion("feature_weighting", ["NONE", "TF_IDF", "BM_25"]),
]
_BaseOptimizerArgsString = """Args:
data (Union[scipy.sparse.csr_matrix, scipy.sparse.csc_matrix]):
The train data.
val_evaluator (Evaluator):
The validation evaluator which measures the performance of the recommenders.
logger (Optional[logging.Logger], optional) :
The logger used during the optimization steps. Defaults to None.
If ``None``, the default logger of irspack will be used.
suggest_overwrite (List[Suggestion], optional) :
Customizes (e.g. enlarging the parameter region or adding new parameters to be tuned)
the default parameter search space defined by ``default_tune_range``
Defaults to list().
fixed_params (Dict[str, Any], optional):
Fixed parameters passed to recommenders during the optimization procedure.
If such a parameter exists in ``default_tune_range``, it will not be tuned.
Defaults to dict().
"""
_BaseOptimizerWithEarlyStoppingArgsString = """Args:
data (Union[scipy.sparse.csr_matrix, scipy.sparse.csc_matrix]):
The train data.
val_evaluator (Evaluator):
The validation evaluator which measures the performance of the recommenders.
logger (Optional[logging.Logger], optional):
The logger used during the optimization steps. Defaults to None.
If ``None``, the default logger of irspack will be used.
suggest_overwrite (List[Suggestion], optional):
Customizes (e.g. enlarging the parameter region or adding new parameters to be tuned)
the default parameter search space defined by ``default_tune_range``
Defaults to list().
fixed_params (Dict[str, Any], optional):
Fixed parameters passed to recommenders during the optimization procedure.
If such a parameter exists in ``default_tune_range``, it will not be tuned.
Defaults to dict().
max_epoch (int, optional):
The maximal number of epochs for the training. Defaults to 512.
validate_epoch (int, optional):
The frequency of validation score measurement. Defaults to 5.
score_degradation_max (int, optional):
Maximal number of allowed score degradation. Defaults to 5. Defaults to 5.
"""
def _add_docstring(
cls: Type[BaseOptimizer], args: str = _BaseOptimizerArgsString
) -> None:
if cls.default_tune_range:
ranges = ""
for suggest in cls.default_tune_range:
ranges += f" - ``{suggest!r}``\n"
ranges += "\n"
tune_range = f"""The default tune range is
{ranges}"""
else:
tune_range = " There is no tunable parameters."
docs = f"""Optimizer class for :class:`irspack.recommenders.{cls.recommender_class.__name__}`.
{tune_range}
{args}
"""
cls.__doc__ = docs
class TopPopOptimizer(BaseOptimizer):
default_tune_range: List[Suggestion] = []
recommender_class = TopPopRecommender
_add_docstring(TopPopOptimizer)
class IALSOptimizer(BaseOptimizerWithEarlyStopping):
default_tune_range = [
IntegerSuggestion("n_components", 4, 300),
LogUniformSuggestion("alpha", 1, 100),
LogUniformSuggestion("reg", 1e-10, 1e-2),
]
recommender_class = IALSRecommender
_add_docstring(IALSOptimizer, _BaseOptimizerWithEarlyStoppingArgsString)
class P3alphaOptimizer(BaseOptimizer):
default_tune_range = [
IntegerSuggestion("top_k", low=10, high=1000),
CategoricalSuggestion("normalize_weight", [True, False]),
]
recommender_class = P3alphaRecommender
_add_docstring(P3alphaOptimizer)
class DenseSLIMOptimizer(BaseOptimizer):
default_tune_range = [LogUniformSuggestion("reg", 1, 1e4)]
recommender_class = DenseSLIMRecommender
_add_docstring(DenseSLIMOptimizer)
class RP3betaOptimizer(BaseOptimizer):
default_tune_range = [
IntegerSuggestion("top_k", 2, 1000),
LogUniformSuggestion("beta", 1e-5, 5e-1),
CategoricalSuggestion("normalize_weight", [True, False]),
]
recommender_class = RP3betaRecommender
_add_docstring(RP3betaOptimizer)
class TruncatedSVDOptimizer(BaseOptimizer):
default_tune_range = [IntegerSuggestion("n_components", 4, 512)]
recommender_class = TruncatedSVDRecommender
_add_docstring(TruncatedSVDOptimizer)
class SLIMOptimizer(BaseOptimizer):
default_tune_range = [
LogUniformSuggestion("alpha", 1e-5, 1),
UniformSuggestion("l1_ratio", 0, 1),
]
recommender_class = SLIMRecommender
_add_docstring(SLIMOptimizer)
class NMFOptimizer(BaseOptimizer):
default_tune_range = [
IntegerSuggestion("n_components", 4, 512),
LogUniformSuggestion("alpha", 1e-10, 1e-1),
UniformSuggestion("l1_ratio", 0, 1),
CategoricalSuggestion("beta_loss", ["frobenius", "kullback-leibler"]),
]
recommender_class = NMFRecommender
_add_docstring(NMFOptimizer)
class CosineKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn_with_weighting.copy() + [
CategoricalSuggestion("normalize", [False, True])
]
recommender_class = CosineKNNRecommender
_add_docstring(CosineKNNOptimizer)
class AsymmetricCosineKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn_with_weighting + [
UniformSuggestion("alpha", 0, 1)
]
recommender_class = AsymmetricCosineKNNRecommender
_add_docstring(AsymmetricCosineKNNOptimizer)
class JaccardKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn.copy()
recommender_class = JaccardKNNRecommender
_add_docstring(JaccardKNNOptimizer)
class TverskyIndexKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn.copy() + [
UniformSuggestion("alpha", 0, 2),
UniformSuggestion("beta", 0, 2),
]
recommender_class = TverskyIndexKNNRecommender
_add_docstring(TverskyIndexKNNOptimizer)
class CosineUserKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn_with_weighting.copy() + [
CategoricalSuggestion("normalize", [False, True])
]
recommender_class = CosineUserKNNRecommender
_add_docstring(CosineUserKNNOptimizer)
class AsymmetricCosineUserKNNOptimizer(BaseOptimizer):
default_tune_range = default_tune_range_knn_with_weighting + [
UniformSuggestion("alpha", 0, 1)
]
recommender_class = AsymmetricCosineUserKNNRecommender
_add_docstring(AsymmetricCosineUserKNNOptimizer)
try:
from ..recommenders.bpr import BPRFMRecommender
class BPRFMOptimizer(BaseOptimizerWithEarlyStopping):
default_tune_range = [
IntegerSuggestion("n_components", 4, 256),
LogUniformSuggestion("item_alpha", 1e-9, 1e-2),
LogUniformSuggestion("user_alpha", 1e-9, 1e-2),
CategoricalSuggestion("loss", ["bpr", "warp"]),
]
recommender_class = BPRFMRecommender
_add_docstring(BPRFMOptimizer, _BaseOptimizerWithEarlyStoppingArgsString)
except:
pass
try:
from ..recommenders.multvae import MultVAERecommender
class MultVAEOptimizer(BaseOptimizerWithEarlyStopping):
default_tune_range = [
CategoricalSuggestion("dim_z", [32, 64, 128, 256]),
CategoricalSuggestion("enc_hidden_dims", [128, 256, 512]),
CategoricalSuggestion("kl_anneal_goal", [0.1, 0.2, 0.4]),
]
recommender_class = MultVAERecommender
_add_docstring(MultVAEOptimizer, _BaseOptimizerWithEarlyStoppingArgsString)
except:
warnings.warn("MultVAEOptimizer is not available.")
pass
|
StarcoderdataPython
|
3379364
|
from django.db import models
import datetime as dt
from django.contrib.auth.models import User, AbstractUser
from django.core.validators import MaxValueValidator,MinValueValidator
from django.db.models.signals import post_save
from django.db.models import Q
class School(models.Model):
user = models.OneToOneField(User,null=True)
name = models.CharField(max_length=30)
location = models.CharField(max_length=30)
username = models.CharField(max_length=30,null=True)
password = models.CharField(max_length=30,null=True)
def __str__(self):
return self.name
def save_school(self):
self.save()
def delete_school(self):
self.delete()
class Level(models.Model):
name = models.CharField(max_length=30)
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
school_key = models.ForeignKey(School,on_delete=models.CASCADE,null=True)
def __str__(self):
return self.name
def save_level(self):
self.save()
def delete_level(self):
self.delete()
class Guide(models.Model):
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
school_key = models.ForeignKey(School,on_delete=models.CASCADE,null=True)
fname = models.CharField(max_length=30)
lname = models.CharField(max_length=30)
username = models.CharField(max_length=30,null=True)
password = models.CharField(max_length=30,null=True)
def __str__(self):
return self.username
def save_guide(self):
self.save()
def delete_guide(self):
self.delete()
class Student(models.Model):
level = models.ForeignKey(Level,on_delete=models.CASCADE,null=True)
fname = models.CharField(max_length=30)
lname = models.CharField(max_length=30)
email = models.EmailField()
ID = models.CharField(max_length=30,null=True)
user = models.ForeignKey(User,on_delete=models.CASCADE,null=True)
school_key = models.ForeignKey(School,on_delete=models.CASCADE,null=True)
# def __str__(self):
# return self.fname
def save_student(self):
self.save()
def delete_student(self):
self.delete()
@classmethod
def search_student(cls,fname,lname):
student = cls.objects.filter(
Q(fname__icontains=fname) |
Q(lname__icontains=lname)
)
return student
class Marks(models.Model):
student = models.ForeignKey(Student,on_delete=models.CASCADE,null=True)
subject = models.CharField(max_length=30)
points = models.CharField(max_length=30)
comment = models.CharField(max_length=100)
pub_date = models.DateTimeField(auto_now_add=True, null=True, blank=True)
guide = models.ForeignKey(Guide,on_delete=models.CASCADE,null=True)
class Discipline(models.Model):
student = models.ForeignKey(Student,on_delete=models.CASCADE,null=True)
case = models.CharField(max_length=30)
comment = models.CharField(max_length=100)
pub_date = models.DateTimeField(auto_now_add=True, null=True, blank=True)
guide = models.ForeignKey(Guide,on_delete=models.CASCADE,null=True)
class Role(models.Model):
'''
'''
STUDENT = 1
GUIDE = 2
SCHOOL = 3
ADMIN = 4
ROLE_CHOICES = (
(STUDENT, 'student'),
(GUIDE, 'guide'),
(SCHOOL, 'school'),
(ADMIN, 'admin'),
)
id = models.PositiveSmallIntegerField(choices=ROLE_CHOICES, primary_key=True)
def __str__(self):
return self.get_id_display()
|
StarcoderdataPython
|
1631129
|
<filename>DataStructuresInPython/queue/Queue.py
'''
Created on Jun 4, 2018
@author: nishant.sethi
'''
class Queue:
def __init__(self):
self.queue = list()
# Insert method to add element
def addtoq(self,dataval):
if dataval not in self.queue:
self.queue.insert(0,dataval)
return True
return False
def size(self):
return len(self.queue)
# Pop method to remove element
def removefromq(self):
if len(self.queue)>0:
return self.queue.pop()
return ("No elements in Queue!")
|
StarcoderdataPython
|
121898
|
<reponame>semanticinsight/yetl-framework
from abc import ABC, abstractmethod
class DataSet(ABC):
pass
|
StarcoderdataPython
|
177928
|
import datetime
import random
import statistics
from typing import Dict, List, Any, Union, Set, Tuple
import sys
from sqlalchemy.ext.declarative import declarative_base, declared_attr
from app import db
from werkzeug.security import generate_password_hash, check_password_hash
from time import time
from flask import current_app, json
class Role(db.Model):
id = db.Column(db.String, primary_key=True)
title = db.Column(db.Text)
description = db.Column(db.Text)
deliverables = db.Column(db.Text)
specialism = db.Column(db.String)
family = db.Column(db.String)
organisation = db.Column(db.Text) # this should be linked to another table
address = db.Column(db.Text) # this should be linked to another table
def generate_key(self, prospective_key):
existing_keys = self.query(Role.id).all()
while prospective_key in existing_keys:
pass
|
StarcoderdataPython
|
4830216
|
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.sections.brain.file import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration.files.aiml_files._files = files=os.path.dirname(__file__)
self.configuration.brain_configuration.files._normal = os.path.dirname(__file__)+"/normal.txt"
class NormalizeAIMLTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
NormalizeAIMLTests.test_client = BasicTestClient()
def test_normalize(self):
response = NormalizeAIMLTests.test_client.bot.ask_question("test", "TEST NORMALIZE")
self.assertIsNotNone(response)
self.assertEqual(response, "keithsterling dot com")
def test_normalize_star(self):
response = NormalizeAIMLTests.test_client.bot.ask_question("test", "NORMALIZE test.org", srai=True)
self.assertIsNotNone(response)
self.assertEqual(response, "test dot org")
|
StarcoderdataPython
|
3308993
|
<reponame>tylerbenson/integrations-core
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.utils.common import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
# Networking
HOST = get_docker_hostname()
PORT = '8091'
QUERY_PORT = '8093'
# Tags and common bucket name
CUSTOM_TAGS = ['optional:tag1']
CHECK_TAGS = CUSTOM_TAGS + ['instance:http://{}:{}'.format(HOST, PORT)]
BUCKET_NAME = 'cb_bucket'
|
StarcoderdataPython
|
1787556
|
<filename>utils/logging.py
import logging
import os
import sys
def init_logger(log_path, log_file, print_log=True, level=logging.INFO):
if not os.path.isdir(log_path):
os.makedirs(log_path)
fileHandler = logging.FileHandler("{0}/{1}.log".format(log_path, log_file))
handlers = [fileHandler]
if print_log:
consoleHandler = logging.StreamHandler(sys.stdout)
handlers.append(consoleHandler)
logging.basicConfig(
level=level,
format="%(asctime)s [%(process)d] [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s",
handlers=handlers)
|
StarcoderdataPython
|
74022
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as la
bear_black = (0.141, 0.11, 0.11)
bear_white = (0.89, 0.856, 0.856)
magenta = (0xfc / 255, 0x75 / 255, 0xdb / 255) # Brighter magenta
orange = (218 / 255, 171 / 255, 115 / 255)
green = (175 / 255, 219 / 255, 133 / 255)
white = (240 / 255, 245 / 255, 250 / 255)
blue1 = (70 / 255, 101 / 255, 137 / 255)
blue2 = (122 / 255, 174 / 255, 215 / 255)
def gsBasis(A):
B = np.array(A, dtype=np.float_)
B[:, 0] = B[:, 0] / la.norm(B[:, 0])
B[:, 1] = B[:, 1] - B[:, 1] @ B[:, 0] * B[:, 0]
if la.norm(B[:, 1]) > 1e-14:
B[:, 1] = B[:, 1] / la.norm(B[:, 1])
else:
B[:, 1] = np.zeros_like(B[:, 1])
return B
def draw_mirror(bearVectors):
fig, ax = plt.subplots(figsize=(12, 12), dpi=80)
ax.set_xlim([-3.50, 3.50])
ax.set_ylim([-3.50, 3.50])
ax.set_aspect(1)
# ax.set_axis_bgcolor(blue1)
ax.set_facecolor(blue1)
gs = gsBasis(bearVectors)
ax.plot([gs[0, 0] * -5, gs[0, 0] * 5], [gs[1, 0] * -5, gs[1, 0] * 5], lw=2, color=green, zorder=4)
ax.fill([
-5 * gs[0, 0], -5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0] - 5 * gs[0, 1], 5 * gs[0, 0]
], [
-5 * gs[1, 0], -5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0] - 5 * gs[1, 1], 5 * gs[1, 0]
], color=blue2, zorder=0)
ax.arrow(0, 0, bearVectors[0, 0], bearVectors[1, 0], lw=3, color=orange, zorder=5, head_width=0.1)
ax.arrow(0, 0, bearVectors[0, 1], bearVectors[1, 1], lw=3, color=orange, zorder=5, head_width=0.1)
ax.arrow(0, 0, gs[0, 0], gs[1, 0], lw=3, color=magenta, zorder=6, head_width=0.1)
ax.arrow(0, 0, gs[0, 1], gs[1, 1], lw=3, color=magenta, zorder=6, head_width=0.1)
return ax
bear_black_fur = np.array(
[[2.0030351, 2.229253, 2.1639012, 2.0809546, 1.9728726,
1.8974666, 1.8924396, 2.0030351, np.nan, 2.7017972,
2.8500957, 2.9707453, 3.0159889, 2.94561, 2.8299874,
2.7017972, np.nan, 2.1639012, 2.2317666, 2.3147132,
2.299632, 2.2493613, 2.1890365, 2.1211711, 2.1337387,
2.1639012, np.nan, 2.4982011, 2.5610936, 2.6213642,
2.633986, 2.5536071, 2.5057417, 2.4982011, np.nan,
2.2468478, 2.3247673, 2.4429034, 2.4303357, 2.3448755,
2.2820372, 2.2468478, np.nan, 2.1966706, 2.2722074,
2.4055076, 2.481933, 2.449941, 2.4001756, 2.3237501,
2.222442, 2.1984479, 2.1966706, np.nan, 1.847196,
1.7818441, 1.7290599, 1.6310321, 1.4575984, 1.3369488,
1.2791375, 1.3671112, 1.8044659, 1.9577914, 2.2367936,
2.5962289, 2.7520679, 2.9028799, 3.4005595, 3.3150993,
3.0511783, 2.9531506, 2.8676905, 2.7746897, 2.4052003,
2.2795237, 2.1639012, 1.847196, np.nan, 2.0491517,
2.5112591, 2.3175294, 2.1326865, 2.0491517],
[-1.3186252, -1.0902537, -0.99238015, -0.96477475, -0.99488975,
-1.1153494, -1.2408283, -1.3186252, np.nan, -1.1881273,
-1.0852346, -1.1454645, -1.3286636, -1.4666904, -1.4641808,
-1.1881273, np.nan, -1.5545256, -1.5219011, -1.4014413,
-1.3512497, -1.3412115, -1.3989317, -1.4917862, -1.5419777,
-1.5545256, np.nan, -1.4265371, -1.3964222, -1.4968054,
-1.6097363, -1.64738, -1.5545256, -1.4265371, np.nan,
-1.6423608, -1.6699662, -1.677495, -1.7176483, -1.7477632,
-1.7176483, -1.6423608, np.nan, -1.7223509, -1.7622781,
-1.7764744, -1.7613908, -1.8767359, -1.9805465, -1.9991791,
-1.9672374, -1.913114, -1.7223509, np.nan, -1.5043341,
-1.5444873, -1.486767, -1.1504836, -1.0626484, -1.11284,
-1.2558858, -1.7452537, -2.3902152, -2.4378972, -2.3575907,
-2.1467861, -2.2446597, -2.5527822, -2.5527822, -2.1919586,
-1.7828973, -1.6850238, -1.677495, -1.8431272, -2.028836,
-2.0363647, -1.9485295, -1.5043341, np.nan, -2.5527822,
-2.5527822, -2.4570104, -2.4463632, -2.5527822]])
bear_white_fur = np.array(
[[2.229253, 2.4680387, 2.7017972, 2.8299874, 2.8676905,
2.7746897, 2.4052003, 2.2795237, 2.1639012, 1.847196,
2.0030351, 2.229253, np.nan, 1.8044659, 1.8974666,
2.0491517, 2.1326865, 2.3175294, 2.5112591, 2.9028799,
2.7520679, 2.5962289, 2.2367936, 1.9577914, 1.8044659],
[-1.0902537, -1.0601388, -1.1881273, -1.4641809, -1.677495,
-1.8431272, -2.028836, -2.0363647, -1.9485295, -1.5043341,
-1.3186252, -1.0902537, np.nan, -2.3902152, -2.5527822,
-2.5527822, -2.4463632, -2.4570104, -2.5527822, -2.5527822,
-2.2446597, -2.1467861, -2.3575907, -2.4378972, -2.3902152]])
bear_face = np.array(
[[2.2419927, 2.2526567, 2.3015334, 2.3477442, 2.441943,
np.nan, 2.5258499, 2.5113971, 2.5327621, 2.5632387,
2.5780058, 2.5726645, 2.5475292, 2.5258499, np.nan,
2.2858075, 2.2704121, 2.2402497, 2.2283105, 2.2484187,
2.273554, 2.2858075],
[-1.7605035, -1.9432811, -1.9707865, -1.9654629, -1.781798,
np.nan, -1.4688862, -1.4942957, -1.5099806, -1.5112354,
-1.4877081, -1.466063, -1.4588479, -1.4688862, np.nan,
-1.4346933, -1.4506918, -1.4463002, -1.418381, -1.4055194,
-1.4083427, -1.4346933]])
|
StarcoderdataPython
|
194378
|
from symcollab.theories.listing import Listing
from symcollab.theories.nat import Nat
# Empty list is of length 0
result = Listing.simplify(
Listing.length(Listing.nil)
)
print("length(nil) is", result, flush=True)
assert result == Nat.zero
# Tail of three element is
result = Listing.simplify(
Listing.cons(Nat.zero, Listing.cons(Nat.zero, Listing.cons(Nat.zero, Listing.nil)))
)
print("tail([0, 0, 0]) is", result, flush=True)
assert result == Listing.cons(Nat.zero, Listing.cons(Nat.zero, Listing.nil))
|
StarcoderdataPython
|
1750168
|
# -*- coding: utf-8 -*-
"""
:copyright: (c) 2015-2017 by <NAME>
:license: CC0 1.0 Universal, see LICENSE for more details.
"""
from tenki import create_app
class TestConfig:
def test_dev_config(self):
"""Test if the development config loads correctly
"""
app = create_app('tenki.settings.DevConfig')
assert app.config['DEBUG'] is True
assert app.config['REDIS_URL'] == "redis://127.0.0.1:6379/0"
assert app.config['CACHE_TYPE'] == 'null'
def test_test_config(self):
"""Test if the test config loads correctly
"""
app = create_app('tenki.settings.TestConfig')
assert app.config['DEBUG'] is True
assert app.config['REDIS_URL'] == "redis://127.0.0.1:6379/0"
assert app.config['CACHE_TYPE'] == 'null'
|
StarcoderdataPython
|
3239545
|
<gh_stars>10-100
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent.parent))
import datetime
from robotidy.version import __version__
project = 'Robotidy'
copyright = f'{datetime.datetime.now().year}, <NAME>'
author = '<NAME>'
release = __version__
version = __version__
master_doc = 'index'
extensions = [
'sphinx_tabs.tabs',
'sphinx_copybutton'
]
templates_path = ['_templates']
exclude_patterns = []
html_theme = 'alabaster'
html_theme_options = {
"description": "Robot Framework code formatter",
"logo": "robotidy_logo_small.png",
"logo_name": True,
"logo_text_align": "center",
"show_powered_by": False,
"github_user": "MarketSquare",
"github_repo": "robotframework-tidy",
"github_banner": False,
"github_button": True,
"show_related": False,
"note_bg": "#FFF59C",
"github_type": "star"
}
html_static_path = ['_static']
html_favicon = "_static/robotidy.ico"
|
StarcoderdataPython
|
3212660
|
<reponame>wzy9607/Anno1800CalculatorDataParser
# coding:utf-8
import bs4
from data_parser.template import ProductFilter
def parse_product_filters(tags: bs4.Tag, assets_map: dict) -> list:
product_filters = []
for tag in tags:
if tag.Template.string == "ItemFilter":
continue
product_filters.append(ProductFilter(tag, assets_map = assets_map).get_values())
return product_filters
|
StarcoderdataPython
|
4800204
|
class BinaryTreeNode:
def __init__(self, value, left=None, right=None):
self.value = value
self.left = left
self.right = right
def insertleft(self, left):
self.left = left
def insertright(self, right):
self.right = right
def preOrder(self):
yield self.value
if self.left is not None:
yield from self.left.preOrder()
if self.right is not None:
yield from self.right.preOrder()
def posOrder(self):
if self.left is not None:
yield from self.left.posOrder()
if self.right is not None:
yield from self.right.posOrder()
yield self.value
def inOrder(self):
if self.left is not None:
yield from self.left.inOrder()
yield self.value
if self.right is not None:
yield from self.right.inOrder()
def removeleft(self):
self.left = None
def removeright(self):
self.right = None
def search(self, value):
for node in self.inOrder():
if value == node:
return True
return False
|
StarcoderdataPython
|
1743705
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def memoize(fn):
'''Decorates |fn| to memoize.
'''
memory = {}
def impl(*args, **optargs):
full_args = args + tuple(optargs.iteritems())
if full_args not in memory:
memory[full_args] = fn(*args, **optargs)
return memory[full_args]
return impl
|
StarcoderdataPython
|
1744696
|
<filename>neodroidagent/utilities/exploration/sampling/random_process/random_process.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from abc import ABC
__author__ = "<NAME>"
__all__ = ["RandomProcess"]
class RandomProcess(ABC):
def __init__(self, **kwargs):
pass
def reset(self):
raise NotImplementedError
def sample(self, size):
raise NotImplementedError
|
StarcoderdataPython
|
74326
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-17 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('munigeo', '0003_add_modified_time_to_address_and_street'),
('stories', '0014_story_type'),
]
operations = [
migrations.RemoveField(
model_name='story',
name='location',
),
migrations.AddField(
model_name='story',
name='locations',
field=models.ManyToManyField(blank=True, related_name='stories', to='munigeo.AdministrativeDivision'),
),
]
|
StarcoderdataPython
|
3346933
|
<filename>zvt/recorders/baostock/quotes/bao_china_stock_kdata_recorder.py
# -*- coding: utf-8 -*-
import argparse
import pandas as pd
from zvt import init_log, zvt_config
from zvt.api.data_type import Region, Provider, EntityType
from zvt.api.quote import get_kdata, get_kdata_schema
from zvt.domain import Stock, StockKdataCommon, Stock1dHfqKdata
from zvt.contract import IntervalLevel, AdjustType
from zvt.contract.recorder import FixedCycleDataRecorder
from zvt.recorders.baostock.common import to_bao_trading_level, to_bao_entity_id, \
to_bao_trading_field, to_bao_adjust_flag
from zvt.networking.request import bao_get_bars
from zvt.utils.pd_utils import pd_is_not_null
from zvt.utils.time_utils import to_time_str, PD_TIME_FORMAT_DAY, PD_TIME_FORMAT_ISO8601
class BaoChinaStockKdataRecorder(FixedCycleDataRecorder):
# 数据来自jq
region = Region.CHN
provider = Provider.BaoStock
entity_schema = Stock
# 只是为了把recorder注册到data_schema
data_schema = StockKdataCommon
def __init__(self,
exchanges=['sh', 'sz'],
entity_ids=None,
codes=None,
batch_size=10,
force_update=True,
sleeping_time=0,
default_size=zvt_config['batch_size'],
real_time=False,
fix_duplicate_way='ignore',
start_timestamp=None,
end_timestamp=None,
level=IntervalLevel.LEVEL_1WEEK,
kdata_use_begin_time=False,
close_hour=15,
close_minute=0,
one_day_trading_minutes=4 * 60,
adjust_type=AdjustType.qfq,
share_para=None) -> None:
level = IntervalLevel(level)
adjust_type = AdjustType(adjust_type)
self.data_schema = get_kdata_schema(entity_type=EntityType.Stock, level=level, adjust_type=adjust_type)
self.bao_trading_level = to_bao_trading_level(level)
super().__init__(EntityType.Stock, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, real_time, fix_duplicate_way, start_timestamp, end_timestamp, close_hour,
close_minute, level, kdata_use_begin_time, one_day_trading_minutes, share_para=share_para)
self.adjust_type = adjust_type
def generate_domain_id(self, entity, df, time_fmt=PD_TIME_FORMAT_DAY):
format = PD_TIME_FORMAT_DAY if self.level >= IntervalLevel.LEVEL_1DAY else PD_TIME_FORMAT_ISO8601
return df['entity_id'] + '_' + df[self.get_evaluated_time_field()].dt.strftime(format)
def record(self, entity, start, end, size, timestamps, http_session):
start = to_time_str(start)
if self.bao_trading_level in ['d', 'w', 'm']:
start = start if start > "1990-12-19" else "1990-12-19"
else:
start = start if start > "1999-07-26" else "1999-07-26"
return bao_get_bars(to_bao_entity_id(entity),
start=start,
end=end if end is None else to_time_str(end),
frequency=self.bao_trading_level,
fields=to_bao_trading_field(self.bao_trading_level),
adjustflag=to_bao_adjust_flag(self.adjust_type))
def format(self, entity, df):
if self.bao_trading_level == 'd':
df.rename(columns={'turn': 'turnover', 'date': 'timestamp', 'preclose': 'pre_close', 'pctChg': 'change_pct',
'peTTM': 'pe_ttm', 'psTTM': 'ps_ttm', 'pcfNcfTTM': 'pcf_ncf_ttm', 'pbMRQ': 'pb_mrq', 'isST': 'is_st'}, inplace=True)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['is_st'] = df['is_st'].astype(int)
elif self.bao_trading_level == 'w' or self.bao_trading_level == 'm':
df.rename(columns={'turn': 'turnover', 'date': 'timestamp', 'pctChg': 'change_pct'}, inplace=True)
df['timestamp'] = pd.to_datetime(df['timestamp'])
else:
df.rename(columns={'time': 'timestamp'}, inplace=True)
df['timestamp'] = pd.to_datetime(df['timestamp'], format='%Y%m%d%H%M%S%f')
cols = df.select_dtypes('object').columns.to_list()
cols.remove('adjustflag')
df.replace(r'^\s*$', 0.0, regex=True, inplace=True)
df[cols] = df[cols].astype(float)
df['entity_id'] = entity.id
df['provider'] = self.provider.value
df['name'] = entity.name
df['code'] = entity.code
df['level'] = self.level.value
df.replace({'adjustflag': {'1': 'hfq', '2': 'qfq', '3': 'normal'}}, inplace=True)
df['id'] = self.generate_domain_id(entity, df)
return df
def on_finish(self):
pass
__all__ = ['BaoChinaStockKdataRecorder']
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--level', help='trading level', default='1d', choices=[item.value for item in IntervalLevel])
parser.add_argument('--codes', help='codes', default=['000001'], nargs='+')
args = parser.parse_args()
level = IntervalLevel(args.level)
codes = args.codes
init_log('bao_china_stock_{}_kdata.log'.format(args.level))
BaoChinaStockKdataRecorder(level=level, sleeping_time=0, codes=codes,
real_time=False, adjust_type=AdjustType.hfq).run()
print(get_kdata(region=Region.CHN, entity_id='stock_sz_000001', limit=10,
order=Stock1dHfqKdata.timestamp.desc(),
adjust_type=AdjustType.hfq))
|
StarcoderdataPython
|
3224953
|
from math import cos, sin
n, w = map(int, input().split())
sushi = [tuple(map(int, input().split())) for _ in range(n)]
def solve(t, sushi):
for x, y, r, v, a in sushi:
|
StarcoderdataPython
|
1756276
|
import asyncio
import logging
from aiogram import Bot, types
from aiogram.contrib.fsm_storage.memory import MemoryStorage
from aiogram.dispatcher import Dispatcher
from aiogram.utils import exceptions, executor
from aiogram.utils.markdown import text
import config
from medicines import Medicines
loop = asyncio.get_event_loop()
bot = Bot(token=config.API_TOKEN, loop=loop)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
medicines = Medicines()
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
@dp.message_handler(commands=['start'])
async def cmd_start(message: types.Message):
"""
Conversation's entry point
"""
logging.info('Старт работы бота у пользователя ' +
str(message.from_user.id))
line1 = 'Привет, этот бот ищет присылаемые ему названия лекарств ' +\
'в списке "Расстрельный список препаратов" сайта encyclopatia.ru.'
instructions = text(line1)
await bot.send_message(message.chat.id,
instructions)
@dp.message_handler()
async def process_text(message: types.Message):
medicine = message.text.strip()
logging.info('Пользователь ' + str(message.from_user.id) +
' cпросил ' + medicine + '.')
descriptions = medicines.get_descriptions(medicine)
for descr in descriptions:
await bot.send_message(message.chat.id, descr)
async def startup(dispatcher: Dispatcher):
logging.info('Старт бота.')
await medicines.load_medicine_list()
async def shutdown(dispatcher: Dispatcher):
logging.info('Убиваем бота.')
await dispatcher.storage.close()
await dispatcher.storage.wait_closed()
def main():
executor.start_polling(dp,
loop=loop,
skip_updates=True,
on_startup=startup,
on_shutdown=shutdown)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
170962
|
import asyncio
import time
from collections import defaultdict
from models.proxy import Proxy
class Saver(object):
RESULT_SAVE_NUM = 100
pattern_lock_map = defaultdict(asyncio.Lock)
success_count = 0
total_count = 0
def __init__(self, redis):
self.redis = redis
async def _save(self, key, response):
key += '_result'
if hasattr(response, 'info_json'):
info = await response.info_json()
await asyncio.gather(*[self.redis.lpush(key, info),
self.redis.ltrim(key, 0, self.RESULT_SAVE_NUM - 1)])
async def _score_counter(self, pattern_str, proxy_str, valid):
async with self.pattern_lock_map[pattern_str]:
proxy = await Proxy.discard(pattern_str, proxy_str, self.redis)
if proxy is None:
return
self.total_count += 1
if valid:
if proxy.score < 0:
proxy.score = 0
elif 0 <= proxy.score < 5:
proxy.score += 1
self.success_count += 1
else:
proxy.score -= 1
remain_time = proxy.insert_time + proxy.valid_time - int(time.time())
if (proxy.score <= -3 or (remain_time < 0 < proxy.valid_time)) and pattern_str != 'public_proxies':
await self._del_proxy_in_pattern(pattern_str, proxy)
return
proxy.used = True
await proxy.store(pattern_str, self.redis)
async def save_result(self, pattern_str, proxy_str, response):
tasks = [
self._score_counter(pattern_str, proxy_str, response.valid),
]
if not response.valid:
tasks.append(self._save(proxy_str, response))
tasks.append(self._save(pattern_str, response))
await asyncio.gather(*tasks)
async def _del_proxy_in_pattern(self, pattern_str, proxy):
fail_key = pattern_str + '_fail'
proxy.delete_time = int(time.time())
await asyncio.gather(*[self.redis.hdel(pattern_str, str(proxy)),
proxy.store(fail_key, self.redis)])
|
StarcoderdataPython
|
3436
|
<reponame>andreakropp/datarobot-user-models
#!/usr/bin/env python
# coding: utf-8
# pylint: disable-all
from __future__ import absolute_import
from sklearn.preprocessing import LabelEncoder
from pathlib import Path
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
class BinModel(nn.Module):
expected_target_type = torch.FloatTensor
def __init__(self, input_size):
super(BinModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
a3 = self.out(h2)
y = self.out_act(a3)
return y
class RegModel(nn.Module):
def __init__(self, input_size):
super(RegModel, self).__init__()
self.fc1 = nn.Linear(input_size, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
y = self.out(h2)
return y
class MultiModel(nn.Module):
expected_target_type = torch.LongTensor
def __init__(self, input_size, output_size):
super(MultiModel, self).__init__()
self.layer1 = nn.Linear(input_size, 8)
self.relu = nn.ReLU()
self.layer2 = nn.Linear(8, output_size)
self.out = nn.Softmax()
def forward(self, input_):
out = self.layer1(input_)
out = self.relu(out)
out = self.layer2(out)
out = self.out(out)
return out
def train_epoch(model, opt, criterion, X, y, batch_size=50):
model.train()
losses = []
for beg_i in range(0, X.size(0), batch_size):
x_batch = X[beg_i : beg_i + batch_size, :]
# y_hat will be (batch_size, 1) dim, so coerce target to look the same
y_batch = y[beg_i : beg_i + batch_size].reshape(-1, 1)
x_batch = Variable(x_batch)
y_batch = Variable(y_batch)
opt.zero_grad()
# (1) Forward
y_hat = model(x_batch)
# (2) Compute diff
loss = criterion(y_hat, y_batch)
# (3) Compute gradients
loss.backward()
# (4) update weights
opt.step()
losses.append(loss.data.numpy())
return losses
def build_classifier(X, num_labels):
class_model = BinModel(X.shape[1]) if num_labels == 2 else MultiModel(X.shape[1], num_labels)
class_opt = optim.Adam(class_model.parameters(), lr=0.001)
class_criterion = nn.BCELoss() if num_labels == 2 else nn.CrossEntropyLoss()
return class_model, class_opt, class_criterion
def build_regressor(X):
reg_model = RegModel(X.shape[1])
reg_opt = optim.Adam(reg_model.parameters(), lr=0.001)
reg_criterion = nn.MSELoss()
return reg_model, reg_opt, reg_criterion
def train_classifier(X, y, class_model, class_opt, class_criterion, n_epochs=5):
target_encoder = LabelEncoder()
target_encoder.fit(y)
transformed_y = target_encoder.transform(y)
bin_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
bin_t_y = torch.from_numpy(transformed_y).type(class_model.expected_target_type)
for e in range(n_epochs):
train_epoch(class_model, class_opt, class_criterion, bin_t_X, bin_t_y)
def train_regressor(X, y, reg_model, reg_opt, reg_criterion, n_epochs=5):
reg_t_X = torch.from_numpy(X.values).type(torch.FloatTensor)
reg_t_y = torch.from_numpy(y.values).type(torch.FloatTensor)
for e in range(n_epochs):
train_epoch(reg_model, reg_opt, reg_criterion, reg_t_X, reg_t_y)
def save_torch_model(model, output_dir_path, filename="torch_bin.pth"):
output_file_path = Path(output_dir_path) / filename
torch.save(model, output_file_path)
def subset_data(X):
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
# exclude any completely-missing columns when checking for numerics
num_features = list(X.dropna(axis=1, how="all").select_dtypes(include=numerics).columns)
# keep numeric features, zero-impute any missing values
# obviously this is a very rudimentary approach to handling missing values
# a more sophisticated imputer can be implemented by making use of custom transform, load, and predict hooks
return X[num_features].fillna(0)
|
StarcoderdataPython
|
3256012
|
<reponame>ChriPiv/stinespring-algo-paper
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
import autograd
import autograd.numpy as np
from scipy.optimize import minimize
from qiskit import *
from qiskit.quantum_info import *
from qiskit.aqua.components.variational_forms import *
from qiskit.providers.aer.noise import NoiseModel
from qiskit.providers.aer.utils import insert_noise
sys.path.append("..")
from json_tools import *
from channels import *
from variational_approximation import error_mean, get_approx_circuit, get_varform_circuit
from diamond_norm import *
import autograd.numpy as np
n_qubits = 3
full_connectivity = False
U = random_unitary(2**n_qubits, seed=1234).data
noise_model = NoiseModel.from_dict(json_from_file("2020_04_08.json"))
noise_model.add_quantum_error(noise_model._local_quantum_errors['cx']['2,3'], 'cx', [0,2])
noise_model.add_quantum_error(noise_model._local_quantum_errors['cx']['3,2'], 'cx', [2,0])
def dilation_channel(data, is_unitary=True, ideal=False):
exp = channel_expand(n_qubits-1,1)
if is_unitary:
qc = QuantumCircuit(n_qubits)
qc.unitary(data, list(range(n_qubits)))
else:
qc = data
if not ideal:
if not full_connectivity:
qc = qiskit.compiler.transpile(qc, basis_gates=noise_model.basis_gates,
coupling_map=[[0,1],[1,2]])
qc = insert_noise(qc, noise_model, transpile=True)
qc = SuperOp(qc)
tr = channel_trace(n_qubits-1,1)
channel = exp.compose(qc.compose(tr))
return Choi(channel).data
ch_ideal = dilation_channel(U, ideal=True)
ch_ref = dilation_channel(U)
assert Choi(ch_ideal).is_tp()
assert Choi(ch_ideal).is_cp()
assert Choi(ch_ref).is_tp()
assert Choi(ch_ref).is_cp()
print("Ref:", dnorm(ch_ideal - ch_ref))
if full_connectivity:
depth_list = [1,2,3,4,5,6,7,8,9,10,15]
else:
depth_list = [1,3,4,5,6,7,8,9,10,15,20,30,40]
for depth in depth_list:
U_approx,params = get_approx_circuit(U, n_qubits, depth, full_connectivity)
qc = get_varform_circuit(params, n_qubits, depth, full_connectivity)
ch = dilation_channel(qc, is_unitary=False)
print(depth, error_mean(U, U_approx, 2), dnorm(ch - ch_ideal))
|
StarcoderdataPython
|
1770736
|
<reponame>peihaowang/nerf-pytorch
import os, sys
import math, random, time
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import imageio
import lpips
import utils.ssim as ssim_utils
lpips_alex = lpips.LPIPS(net='alex') # best forward scores
lpips_vgg = lpips.LPIPS(net='vgg') # closer to "traditional" perceptual loss, when used for optimization
# Misc
def img2mse(x, y, reduction='mean'):
diff = torch.mean((x - y) ** 2, -1)
if reduction == 'mean':
return torch.mean(diff)
elif reduction == 'sum':
return torch.sum(diff)
elif reduction == 'none':
return diff
def mse2psnr(x):
if isinstance(x, float):
x = torch.tensor([x])
return -10. * torch.log(x) / torch.log(torch.tensor([10.], device=x.device))
def ssim(img1, img2, window_size = 11, size_average = True, format='NCHW'):
if format == 'HWC':
img1 = img1.permute([2, 0, 1])[None, ...]
img2 = img2.permute([2, 0, 1])[None, ...]
elif format == 'NHWC':
img1 = img1.permute([0, 3, 1, 2])
img2 = img2.permute([0, 3, 1, 2])
return ssim_utils.ssim(img1, img2, window_size, size_average)
def lpips(img1, img2, net='alex', format='NCHW'):
if format == 'HWC':
img1 = img1.permute([2, 0, 1])[None, ...]
img2 = img2.permute([2, 0, 1])[None, ...]
elif format == 'NHWC':
img1 = img1.permute([0, 3, 1, 2])
img2 = img2.permute([0, 3, 1, 2])
if net == 'alex':
return lpips_alex(img1, img2)
elif net == 'vgg':
return lpips_vgg(img1, img2)
def to8b(x):
return (255*(x-x.min())/(x.max()-x.min())).astype(np.uint8)
def export_images(rgbs, save_dir, H=0, W=0):
rgb8s = []
for i, rgb in enumerate(rgbs):
# Resize
if H > 0 and W > 0:
rgb = rgb.reshape([H, W])
filename = os.path.join(save_dir, '{:03d}.npy'.format(i))
np.save(filename, rgb)
# Convert to image
rgb8 = to8b(rgb)
filename = os.path.join(save_dir, '{:03d}.png'.format(i))
imageio.imwrite(filename, rgb8)
rgb8s.append(rgb8)
return np.stack(rgb8s, 0)
def export_video(rgbs, save_path, fps=30, quality=8):
imageio.mimwrite(save_path, to8b(rgbs), fps=fps, quality=quality)
|
StarcoderdataPython
|
4801593
|
import time
import pytest
import gevent
from eth_utils import int_to_big_endian, keccak
from raidex.raidex_node.offer_book import OfferDeprecated, OfferBook, OfferType, OfferView
from raidex.raidex_node.listener_tasks import OfferBookTask, SwapCompletedTask, OfferTakenTask
from raidex.utils import timestamp
from raidex.utils import get_market_from_asset_pair
from raidex.message_broker.message_broker import MessageBroker
from raidex.raidex_node.market import TokenPair
from raidex.raidex_node.commitment_service.mock import CommitmentServiceClientMock
from raidex.raidex_node.trades import TradesView
from raidex.signing import Signer
@pytest.fixture()
def market(assets):
return TokenPair(assets[0], assets[1])
@pytest.fixture()
def message_broker():
return MessageBroker()
@pytest.fixture()
def commitment_service(market, message_broker):
return CommitmentServiceClientMock(Signer.random(), market, message_broker)
def test_offer_comparison():
timeouts = [int(time.time() + i) for i in range(0, 4)]
offer_ids = list(range(0, 4))
offer1 = OfferDeprecated(OfferType.BUY, 50, 5, timeout=timeouts[0], offer_id=offer_ids[0])
offer2 = OfferDeprecated(OfferType.BUY, 100, 1, timeout=timeouts[1], offer_id=offer_ids[1])
offer3 = OfferDeprecated(OfferType.BUY, 100, 2, timeout=timeouts[2], offer_id=offer_ids[2])
offer4 = OfferDeprecated(OfferType.BUY, 100, 1, timeout=timeouts[3], offer_id=offer_ids[3])
offers = OfferView()
for offer in [offer1, offer2, offer3, offer4]:
offers.add_offer(offer)
assert list(offers.values()) == [offer2, offer4, offer3, offer1]
def test_offer_book_task(message_broker, commitment_service, market):
offer_book = OfferBook()
OfferBookTask(offer_book, market, message_broker).start()
gevent.sleep(0.001)
offer = OfferDeprecated(OfferType.SELL, 100, 1000, offer_id=123, timeout=timestamp.time_plus(20))
proof = commitment_service.maker_commit_async(offer).get()
message_broker.broadcast(proof)
gevent.sleep(0.001)
assert len(offer_book.sells) == 1
def test_taken_task(message_broker, commitment_service):
offer_book = OfferBook()
trades = TradesView()
OfferTakenTask(offer_book, trades, message_broker).start()
gevent.sleep(0.001)
offer = OfferDeprecated(OfferType.SELL, 100, 1000, offer_id=123, timeout=timestamp.time_plus(2))
# insert manually for the first time
offer_book.insert_offer(offer)
assert len(offer_book.sells) == 1
offer_taken = commitment_service.create_taken(offer.offer_id)
# send offer_taken
message_broker.broadcast(offer_taken)
gevent.sleep(0.001)
assert len(offer_book.sells) == 0
assert len(trades.pending_offer_by_id) == 1
def test_swap_completed_task(message_broker, commitment_service):
trades = TradesView()
SwapCompletedTask(trades, message_broker).start()
gevent.sleep(0.001)
offer = OfferDeprecated(OfferType.SELL, 100, 1000, offer_id=123, timeout=timestamp.time_plus(2))
# set it to pending, as it was taken
trades.add_pending(offer)
assert len(trades.pending_offer_by_id) == 1
swap_completed = commitment_service.create_swap_completed(offer.offer_id)
# send swap_completed
message_broker.broadcast(swap_completed)
gevent.sleep(0.001)
assert len(trades) == 1
|
StarcoderdataPython
|
1742885
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
sys.path.append('utils')
import json
import numpy as np
from .utils.box import *
from .utils.draw import *
from .utils.infrastructure import *
from .utils.detbox import *
def save_results(records,fpath):
with open(fpath,'w') as fid:
for record in records:
line = json.dumps(record)+'\n'
fid.write(line)
return fpath
def load_func(fpath):
assert os.path.exists(fpath)
with open(fpath,'r') as fid:
lines = fid.readlines()
records =[json.loads(line.strip('\n')) for line in lines]
return records
def clip_boundary(dtboxes,height,width):
num = dtboxes.shape[0]
dtboxes[:,0] = np.maximum(dtboxes[:,0], 0)
dtboxes[:,1] = np.maximum(dtboxes[:,1], 0)
dtboxes[:,2] = np.minimum(dtboxes[:,2], width)
dtboxes[:,3] = np.minimum(dtboxes[:,3], height)
return dtboxes
def recover_func(dtboxes):
assert dtboxes.shape[1]>=4
dtboxes[:,2] += dtboxes[:,0]
dtboxes[:,3] += dtboxes[:,1]
return dtboxes
|
StarcoderdataPython
|
3255349
|
from setuptools import setup
setup(
name='d3rlpy-addons',
version='0.1',
packages=[
'd3rlpy_addons',
'd3rlpy_addons.fitters',
'd3rlpy_addons.wrappers',
"d3rlpy_addons.models"
],
url='',
license='MIT',
author='<NAME>.',
author_email='<EMAIL>',
description='Addons for d3rpy RL library',
install_requires=[
"torch", "scikit-learn", "tqdm", "h5py", "gym", "d3rlpy"
],
)
|
StarcoderdataPython
|
1732394
|
import numpy as np
from . import tools
class IntervalTestData(object):
functions = [tools.f]
first_derivs = [tools.fd]
domains = [(1,2),(0,2),(-1,0),(-.2*np.pi,.2*np.e),(-1,1)]
integrals = [
[ 0.032346217980525, 0.030893429600387, -0.014887469493652,
-0.033389463703032, -0.016340257873789, ]
]
roots = [
[
np.array([
1.004742754531498, 1.038773298601836, 1.073913103930722,
1.115303578807479, 1.138876334576409, 1.186037005063195,
1.200100773491540, 1.251812490296546, 1.257982114030372,
1.312857486088040, 1.313296484543653, 1.365016316032836,
1.371027655848883, 1.414708808202124, 1.425447888640173,
1.462152640981920, 1.476924360913394, 1.507538306301423,
1.525765627652155, 1.551033406767893, 1.572233571395834,
1.592786143530423, 1.616552437657155, 1.632928169757349,
1.658915772490721, 1.671576942342459, 1.699491823230094,
1.708837673403015, 1.738427795274605, 1.744804960074507,
1.775853245044121, 1.779564153811983, 1.811882812082608,
1.813192517312102, 1.845760207165999, 1.846618439572035,
1.877331112646444, 1.880151194495009, 1.907963575049332,
1.912562771369236, 1.937711007329229, 1.943926743585850,
1.966622430081970, 1.974309611716701, 1.994742937003962,
]),
np.array([
0.038699154393837, 0.170621357069026, 0.196642349303247,
0.335710810755860, 0.360022217617733, 0.459687243605995,
0.515107092342894, 0.571365105600701, 0.646902333813374,
0.672854750953472, 0.761751991347867, 0.765783134619707,
0.851427319155724, 0.863669737544800, 0.930805860269712,
0.955368374256150,
1.004742754531498, 1.038773298601836, 1.073913103930722,
1.115303578807479, 1.138876334576409, 1.186037005063195,
1.200100773491540, 1.251812490296546, 1.257982114030372,
1.312857486088040, 1.313296484543653, 1.365016316032836,
1.371027655848883, 1.414708808202124, 1.425447888640173,
1.462152640981920, 1.476924360913394, 1.507538306301423,
1.525765627652155, 1.551033406767893, 1.572233571395834,
1.592786143530423, 1.616552437657155, 1.632928169757349,
1.658915772490721, 1.671576942342459, 1.699491823230094,
1.708837673403015, 1.738427795274605, 1.744804960074507,
1.775853245044121, 1.779564153811983, 1.811882812082608,
1.813192517312102, 1.845760207165999, 1.846618439572035,
1.877331112646444, 1.880151194495009, 1.907963575049332,
1.912562771369236, 1.937711007329229, 1.943926743585850,
1.966622430081970, 1.974309611716701, 1.994742937003962,
]),
np.array([
-0.928510879374692, -0.613329324979852, -0.437747415493617,
-0.357059979912156, -0.143371301774133, -0.075365172766102,
]),
np.array([
-0.613329324979852, -0.437747415493618, -0.357059979912156,
-0.143371301774133, -0.075365172766103, 0.038699154393837,
0.170621357069026, 0.196642349303248, 0.335710810755860,
0.360022217617734, 0.459687243605995, 0.515107092342894,
]),
np.array([
-0.928510879374692, -0.613329324979852, -0.437747415493617,
-0.357059979912156, -0.143371301774133, -0.075365172766102,
0.038699154393837, 0.170621357069026, 0.196642349303247,
0.335710810755860, 0.360022217617733, 0.459687243605995,
0.515107092342894, 0.571365105600701, 0.646902333813374,
0.672854750953472, 0.761751991347867, 0.765783134619707,
0.851427319155724, 0.863669737544800, 0.930805860269712,
0.955368374256150,
])
]
]
#------------------------------------------------------------------------------
# Variables utilised in the unit-tests
#------------------------------------------------------------------------------
flat_chebfun_vals = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.999999997, 0.999999986,
0.999999941, 0.999999789, 0.999999393, 0.999998563, 0.999997184,
0.999995348, 0.999993366, 0.99999154, 0.999989915, 0.999988271,
0.999986403, 0.999984359, 0.999982364, 0.999980474, 0.999978307,
0.999975142, 0.999970324, 0.999963643, 0.999955361, 0.999945898,
0.999935363, 0.999923205, 0.999908262, 0.999889202, 0.999864879,
0.999834246, 0.999796326, 0.99975088, 0.99969903, 0.999642342,
0.999580651, 0.999510781, 0.999427251, 0.999323793, 0.999194567,
0.999035416, 0.998844745, 0.998622138, 0.998364734, 0.998065025,
0.997713245, 0.997302283, 0.996829434, 0.996292659, 0.995685794,
0.994998503, 0.994220762, 0.993346077, 0.992368917, 0.99127826,
0.990054121, 0.988671759, 0.98710944, 0.985350209, 0.983374834,
0.981155291, 0.978659102, 0.975861583, 0.972752611, 0.969329423,
0.965579958, 0.961469433, 0.956940511, 0.95192781, 0.94637807,
0.940262855, 0.93357411, 0.926305629, 0.918439616, 0.909957562,
0.90086621, 0.891201576, 0.880993483, 0.870226297, 0.858845216,
0.84680415, 0.834100554, 0.820763048, 0.806814451, 0.792250772,
0.777051706, 0.761210269, 0.744753854, 0.727735506, 0.710206105,
0.692195444, 0.673708895, 0.65473279, 0.635254799, 0.615298016,
0.594939271, 0.574286603, 0.553432126, 0.532428562, 0.511317921,
0.490178078, 0.46912582, 0.448268747, 0.427662956, 0.40732279,
0.387259893, 0.367495569, 0.348043802, 0.328916616, 0.310158617,
0.291852411, 0.274075761, 0.256866646, 0.240236008, 0.22419954,
0.208787517, 0.19403154, 0.179950519, 0.166547846, 0.15381606,
0.14174234, 0.130312476, 0.1195121, 0.109325601, 0.099737909,
0.090739786, 0.082329246, 0.074508439, 0.067280706, 0.060641503,
0.054562011, 0.048986606, 0.043855339, 0.039128943, 0.034791594,
0.030835863, 0.027249885, 0.024013122, 0.021095398, 0.018461179,
0.01608212, 0.013947278, 0.012058915, 0.010417872, 0.009010476,
0.00780443, 0.006757475, 0.00583398, 0.005014532, 0.004291684,
0.003660989, 0.003116582, 0.002650172, 0.002250345, 0.001904272,
0.001602206, 0.00133936, 0.001113409, 0.000922497, 0.000765091,
0.000638606, 0.000537747, 0.000455515, 0.000385761, 0.000324789,
0.000271423, 0.000225886, 0.000188349, 0.000158104, 0.000133678,
0.000113573, 9.67E-05, 8.23E-05, 6.91E-05, 5.67E-05, 4.53E-05,
3.54E-05, 2.74E-05, 2.12E-05, 1.65E-05, 1.32E-05, 1.12E-05, 9.83E-06,
8.52E-06, 6.74E-06, 4.57E-06, 2.55E-06, 1.13E-06, 3.89E-07, 1.03E-07,
2.06E-08, 3.12E-09, 3.54E-10, 3.00E-11, 1.88E-12, 7.43E-14, -1.68E-14,
-1.49E-14, -1.99E-14, -1.38E-14, -2.05E-14, -1.37E-14, -2.15E-14,
-1.30E-14, -2.04E-14, -1.35E-14, -2.01E-14, -1.29E-14, -2.09E-14,
-1.35E-14, -2.11E-14, -1.25E-14, -2.13E-14, -1.25E-14, -2.16E-14,
-1.26E-14, -2.11E-14, -1.25E-14, -2.12E-14, -1.18E-14, -2.18E-14,
-1.16E-14, -2.23E-14, -1.03E-14, -2.15E-14, -1.07E-14, -2.25E-14,
-1.08E-14, -2.24E-14, -1.01E-14, -2.27E-14, -1.02E-14, -2.21E-14,
-1.06E-14, -2.25E-14, -9.85E-15, -2.33E-14, -1.01E-14, -2.37E-14,
-9.41E-15, -2.39E-14, -9.85E-15, -2.38E-14, -9.94E-15, -2.41E-14,
-9.74E-15, -2.41E-14, -9.62E-15, -2.45E-14, -8.83E-15, -2.45E-14,
-9.26E-15, -2.45E-14, -9.08E-15, -2.51E-14, -9.16E-15, -2.52E-14,
-8.80E-15, -2.51E-14, -7.80E-15, -2.57E-14, -8.15E-15, -2.55E-14,
-7.97E-15, -2.52E-14, -7.80E-15, -2.54E-14, -7.54E-15, -2.59E-14,
-6.99E-15, -2.63E-14, -7.51E-15, -2.65E-14, -6.88E-15, -2.65E-14,
-6.24E-15, -2.68E-14, -7.35E-15, -2.71E-14, -7.19E-15, -2.68E-14,
-6.97E-15, -2.64E-14, -7.16E-15, -2.68E-14, -7.66E-15, -2.72E-14,
-6.41E-15, -2.69E-14, -6.84E-15, -2.70E-14, -5.26E-15, -2.68E-14,
-6.39E-15, -2.72E-14, -5.83E-15, -2.88E-14, -6.13E-15, -2.74E-14,
-6.60E-15, -2.68E-14, -6.57E-15, -2.75E-14, -6.24E-15, -2.82E-14,
-6.09E-15, -2.65E-14, -5.79E-15, -2.74E-14, -5.21E-15, -2.85E-14,
-4.83E-15, -2.88E-14, -5.58E-15, -2.77E-14, -5.77E-15, -2.74E-14,
-5.66E-15, -2.83E-14, -5.44E-15, -2.80E-14, -5.87E-15, -2.81E-14,
-5.15E-15, -2.80E-14, -4.67E-15, -2.80E-14, -4.50E-15, -2.85E-14,
-5.25E-15, -2.83E-14, -4.83E-15, -2.79E-14, -4.88E-15, -2.86E-14,
-4.65E-15, -2.87E-14, -4.29E-15, -2.90E-14, -4.09E-15, -2.84E-14,
-5.16E-15, -2.87E-14, -4.55E-15, -2.93E-14, -4.36E-15, -2.93E-14,
-3.92E-15, -2.84E-14, -4.34E-15, -2.89E-14, -4.40E-15, -2.89E-14,
-4.51E-15, -2.90E-14, -3.67E-15, -2.86E-14, -5.12E-15, -2.92E-14,
-4.05E-15, -2.93E-14, -5.19E-15, -2.89E-14, -4.23E-15, -2.89E-14,
-4.74E-15, -2.88E-14, -4.28E-15, -2.94E-14, -4.26E-15, -2.95E-14,
-4.12E-15, -2.90E-14, -4.26E-15, -2.99E-14, -4.54E-15, -2.99E-14,
-4.64E-15, -3.00E-14, -3.64E-15, -2.90E-14, -4.40E-15, -3.03E-14,
-3.89E-15, -2.98E-14, -5.00E-15, -2.94E-14, -3.97E-15, -3.12E-14,
-4.19E-15, -3.01E-14, -4.36E-15, -3.01E-14, -4.58E-15, -2.94E-14,
-4.72E-15, -2.98E-14, -4.25E-15, -3.04E-14, -4.33E-15, -3.05E-14,
-4.22E-15, -2.99E-14, -4.39E-15, -3.00E-14, -4.44E-15, -2.98E-14,
-4.11E-15, -2.98E-14, -3.89E-15, -2.96E-14, -3.83E-15, -2.91E-14,
-5.05E-15, -2.99E-14, -3.72E-15, -2.96E-14, -3.94E-15, -3.04E-14,
-3.66E-15, -2.90E-14, -2.72E-15, -2.90E-14, -3.77E-15, -3.09E-14,
-3.16E-15, -2.88E-14, -2.28E-15, -3.09E-14, -3.44E-15, -2.93E-14,
-2.78E-15, -2.95E-14, -3.00E-15, -2.95E-14, -2.22E-15, -2.99E-14,
-4.27E-15, -2.89E-14, -2.28E-15, -2.93E-14, -4.16E-15, -2.98E-14,
-2.00E-15, -2.91E-14, -2.33E-15, -3.03E-14, -3.00E-15, -2.98E-14,
-3.05E-15, -2.96E-14, -2.61E-15, -3.00E-14, -2.33E-15, -3.00E-14,
-2.22E-15, -2.95E-14, -2.66E-15, -2.94E-14, -2.66E-15, -2.94E-14,
-2.39E-15, -3.03E-14, -2.22E-15, -2.97E-14, -2.66E-15, -2.93E-14,
-2.83E-15, -2.95E-14, -2.61E-15, -2.94E-14, -2.16E-15, -2.99E-14,
-2.28E-15, -2.94E-14, -2.00E-15, -2.92E-14, -3.66E-15, -2.90E-14,
-2.28E-15, -3.04E-14, -2.33E-15, -2.91E-14, -1.50E-15, -2.91E-14,
-1.39E-15, -3.01E-14, -1.67E-15, -2.88E-14, -2.28E-15, -2.80E-14,
-2.44E-15, -2.87E-14, -9.99E-16, -2.84E-14, -1.83E-15, -2.90E-14,
-1.33E-15, -2.87E-14, -2.78E-16, -2.86E-14, -1.33E-15, -2.85E-14,
-9.44E-16, -2.99E-14, -8.88E-16, -2.77E-14, -9.99E-16, -2.91E-14,
-1.17E-15, -2.82E-14, -2.78E-16, -2.83E-14, -1.55E-15, -2.76E-14,
-1.05E-15, -2.75E-14, -8.88E-16, -2.81E-14, -3.33E-16, -2.79E-14,
-1.11E-15, -2.93E-14, -4.44E-16, -2.81E-14, -2.78E-16, -2.85E-14,
-1.33E-15, -2.79E-14, -1.67E-16, -2.79E-14, -7.77E-16, -2.78E-14,
-5.55E-17, -2.84E-14, -6.66E-16, -2.85E-14, -7.77E-16, -2.74E-14,
-7.77E-16, -2.76E-14, -2.22E-16, -2.81E-14, 4.44E-16, -2.84E-14,
-7.22E-16, -2.75E-14, -6.66E-16, -2.75E-14, -6.66E-16, -2.78E-14,
2.78E-16, -2.77E-14, -8.88E-16, -2.70E-14, -3.89E-16, -2.69E-14,
-4.44E-16, -2.84E-14, 1.50E-15, -2.75E-14, -3.33E-16, -2.76E-14,
1.11E-16, -2.79E-14, 0, -2.82E-14, 2.78E-16, -2.82E-14, -7.22E-16,
-2.73E-14, 3.33E-16, -2.74E-14, 1.11E-16, -2.79E-14, -1.11E-16,
-2.80E-14, -2.22E-16, -2.79E-14, -7.77E-16, -2.74E-14, -1.11E-16,
-2.80E-14, 4.44E-16, -2.79E-14, 9.99E-16, -2.71E-14, -3.33E-16,
-2.65E-14, -3.33E-16, -2.73E-14, 1.44E-15, -2.70E-14, 0, -2.69E-14,
3.33E-16, -2.65E-14, 5.55E-16, -2.80E-14, 1.78E-15, -2.74E-14, 7.77E-16,
-2.74E-14, -5.55E-16, -2.76E-14, 8.88E-16, -2.80E-14, 1.22E-15, -2.76E-14,
6.66E-16, -2.81E-14, 3.33E-16, -2.80E-14, 1.11E-15, -2.93E-14, 7.77E-16,
-2.74E-14, 4.44E-16, -2.87E-14, 9.99E-16, -2.80E-14, 6.66E-16, -2.88E-14,
9.99E-16, -2.79E-14, 8.88E-16, -2.77E-14, 7.77E-16, -2.78E-14, -2.22E-16,
-2.83E-14, 1.11E-15, -2.71E-14, 1.22E-15, -2.78E-14, 5.55E-16, -2.66E-14,
4.44E-16, -2.76E-14, 1.11E-16, -2.73E-14, 7.77E-16, -2.70E-14, -1.11E-16,
-2.70E-14, -3.33E-16, -2.78E-14, 9.99E-16, -2.82E-14, 6.66E-16, -2.79E-14,
4.44E-16, -2.82E-14, -1.11E-16, -2.84E-14, 6.66E-16, -2.83E-14, 1.11E-16,
-2.69E-14, 2.22E-16, -2.72E-14, 8.88E-16, -2.80E-14, 1.11E-16, -2.81E-14,
2.22E-16, -2.84E-14, 7.77E-16, -2.79E-14, 8.88E-16, -2.83E-14, 4.44E-16,
-2.86E-14, 1.44E-15, -2.84E-14, 4.44E-16, -2.76E-14, 0, -2.76E-14,
-1.11E-16, -2.83E-14, 9.99E-16, -2.75E-14, 7.77E-16, -2.77E-14, 7.77E-16,
-2.86E-14, 2.22E-16, -2.90E-14, 2.22E-16, -2.81E-14, 3.33E-16, -2.76E-14,
-3.33E-16, -2.81E-14, 5.55E-16, -2.82E-14, -5.55E-16, -2.89E-14, 1.11E-15,
-2.80E-14, -1.11E-16, -2.82E-14, 6.66E-16, -2.84E-14, -6.66E-16, -2.83E-14,
-1.11E-16, -2.79E-14, 0, -2.98E-14, -3.33E-16, -2.86E-14, 1.11E-16,
-2.81E-14, -3.33E-16, -2.79E-14, -7.77E-16, -2.83E-14, -7.77E-16,
-2.90E-14, 2.22E-16, -2.83E-14, -5.55E-16, -2.88E-14, -3.33E-16, -2.84E-14,
-1.11E-16, -3.06E-14, -5.55E-16, -2.88E-14, -3.33E-16, -2.93E-14, 5.55E-16,
-2.91E-14, -1.11E-16, -2.96E-14, -1.22E-15, -2.83E-14, -5.55E-16,
-2.88E-14, -9.99E-16, -2.87E-14, -1.11E-15, -2.94E-14, -6.66E-16,
-3.01E-14, -3.33E-16, -2.90E-14, -6.66E-16, -3.01E-14, -8.88E-16,
-3.14E-14, -8.88E-16, -2.88E-14, -2.78E-15, -2.96E-14, -1.11E-15,
-3.01E-14, -9.99E-16, -3.05E-14, -9.99E-16, -2.99E-14, -1.67E-15,
-2.95E-14, -1.44E-15, -3.06E-14, -1.67E-15, -3.05E-14, -1.55E-15,
-3.12E-14, -1.55E-15, -2.96E-14, -1.55E-15, -3.11E-14, -1.55E-15,
-3.04E-14, -5.00E-16, -2.99E-14, -5.55E-16, -3.10E-14, -1.39E-15,
-3.14E-14, -1.72E-15, -3.10E-14, -2.22E-15, -3.06E-14, -7.22E-16,
-2.87E-14, -7.77E-16, -3.10E-14, -3.61E-15, -3.13E-14, -7.77E-16,
-3.05E-14, -3.83E-15, -3.08E-14, -1.72E-15, -3.18E-14, -2.05E-15,
-3.13E-14, -2.44E-15, -3.05E-14, -2.55E-15, -3.27E-14, -2.11E-15,
-3.04E-14, -2.00E-15, -3.29E-14, -2.05E-15, -2.98E-14, -6.66E-16,
-2.96E-14, -2.44E-15, -3.20E-14, -2.33E-15, -3.14E-14, -2.50E-15,
-3.05E-14, -2.94E-15, -3.02E-14, -2.05E-15, -3.03E-14, -1.78E-15,
-3.13E-14, -1.94E-15, -3.11E-14, -1.72E-15, -3.03E-14, -2.61E-15,
-3.14E-14, -2.28E-15, -3.14E-14, -2.16E-15, -3.06E-14, -1.78E-15,
-3.08E-14, -1.72E-15, -3.08E-14, -2.50E-15, -3.08E-14, -2.66E-15,
-3.04E-14, -2.44E-15, -3.21E-14, -1.22E-15, -3.13E-14, -3.61E-15,
-3.08E-14, -2.16E-15, -2.99E-14, -2.05E-15, -2.99E-14, -2.89E-15,
-3.09E-14, -1.83E-15, -3.16E-14, -5.55E-16, -3.08E-14, -1.50E-15,
3.02E-14, -9.99E-16, -3.23E-14, -2.72E-15, -3.04E-14, -1.39E-15,
-2.90E-14, -2.33E-15, -3.00E-14, -1.50E-15, -3.08E-14, -3.33E-15,
-3.12E-14, -9.99E-16, -3.11E-14, -2.66E-15, -3.06E-14, -1.44E-15,
-3.02E-14, -1.67E-15, -3.16E-14, -7.77E-16, -3.06E-14, -9.99E-16,
-3.05E-14, -1.50E-15, -3.11E-14, -1.33E-15, -3.14E-14, -3.33E-15,
3.09E-14, -1.39E-15, -3.08E-14, -2.00E-15, -3.16E-14, -1.61E-15,
-3.02E-14, -1.55E-15, -3.10E-14, -1.78E-15, -2.95E-14, -2.16E-15,
-3.09E-14, -1.33E-15, -3.06E-14, -9.44E-16, -3.10E-14, -1.11E-15,
-3.02E-14, -1.83E-15, -3.06E-14, -1.94E-15, -3.13E-14, -1.61E-15,
-3.00E-14, -1.11E-15, -3.00E-14, -9.99E-16, -3.00E-14, -1.61E-15,
-3.11E-14, -1.11E-15, -3.05E-14, -7.77E-16, -3.00E-14, -1.94E-15,
-2.90E-14, -1.28E-15, -3.11E-14, -1.39E-15, -2.95E-14, -8.33E-16,
-2.98E-14, -1.67E-15, -3.02E-14, -2.05E-15, -3.08E-14, -5.55E-16,
-2.96E-14, -9.44E-16, -2.95E-14, -6.66E-16, -3.06E-14, -1.39E-15,
-3.04E-14, -1.17E-15, -3.05E-14, -6.11E-16, -2.94E-14, -1.17E-15,
-2.96E-14, -7.22E-16, -2.96E-14, -5.00E-16, -3.03E-14, 2.78E-16,
-3.02E-14, -5.55E-17, -2.94E-14, -3.33E-16, -3.06E-14, -1.67E-16,
-2.89E-14, 8.33E-16, -2.93E-14, -1.05E-15, -2.88E-14, -6.11E-16,
-3.00E-14, -1.22E-15, -3.04E-14, -9.44E-16, -2.91E-14, -1.28E-15,
-2.93E-14, -5.55E-16, -2.99E-14, -1.33E-15, -2.97E-14, -1.17E-15,
-2.90E-14, -1.89E-15, -3.00E-14, -5.55E-17,
]
|
StarcoderdataPython
|
3379169
|
<filename>app_view_data/apps.py
from django.apps import AppConfig
class AppViewDataConfig(AppConfig):
name = 'app_view_data'
|
StarcoderdataPython
|
4822600
|
# Nuix Worker side script for Virus Total lookup
# v1.0
# updated 2021-01-28
import urllib2
import json
import time
# APIKEY must be set. Get one from Virus Total
# Please note Virus Total's requirements for the Public API below
####
# The Public API is limited to 500 requests per day and a rate of 4 requests per minute.
# The Public API must not be used in commercial products or services.
# The Public API must not be used in business workflows that do not contribute new files.
APIKEY = ""
# For PoCs using a Public API key, there is a rate limit pf 4 requests/minute.
# It is therefore advisable to set a sleep time here of 15 (seconds)
# When using a Premium Key this can be set to 0
SLEEP_TIME = 15
# Virus Total API url for file ID check
FILEURL = "https://www.virustotal.com/api/v3/files/"
# List mime types to INCLUDE here. Can reduce processing files not of interest
# To run against every item with an md5, set MIME_INCLUSIONS = None
MIME_INCLUSIONS = [
"application/exe",
"application/java-class",
"application/octet-stream",
"application/pdf"
]
# Define which properties you wish to be set on the item here.
# Items set to True will be added as a property / tag (if available)
# Change to False if you do not wish to add a particular property / tag
SET_VHASH = True
SET_IMPHASH = True
SET_AUTHENTIHASH = True
SET_TAGS = True
def nuixWorkerItemCallback(worker_item):
source_item = worker_item.getSourceItem()
mime_type = source_item.getType().getName()
if not MIME_INCLUSIONS or mime_type in MIME_INCLUSIONS:
# Get this item's MD5
md5 = worker_item.digests.md5
if md5 is not None:
fullUrl = FILEURL + str(md5)
try:
req = urllib2.Request(fullUrl)
req.add_header('x-apikey', APIKEY)
response = urllib2.urlopen(req)
data = json.load(response)
properties = source_item.getProperties()
# The count of AVs identifying the file as Malicious
worker_item.addCustomMetadata("AVs identifying item as malicious", data["data"]["attributes"]["last_analysis_stats"]["malicious"],'text','user')
# vHash
if SET_VHASH and data["data"]["attributes"].has_key("vhash"):
properties["vHash"] = data["data"]["attributes"]["vhash"]
# Import Hash
if SET_IMPHASH and data["data"]["attributes"].has_key("pe_info") and data["data"]["attributes"]["pe_info"].has_key("imphash"):
properties["Import Hash"] = data["data"]["attributes"]["pe_info"]["imphash"]
# Authentihash
if SET_AUTHENTIHASH and data["data"]["attributes"].has_key("authentihash"):
properties["Authentihash"] = data["data"]["attributes"]["authentihash"]
# Virus Total defined tags. Often this can be a list that needs to be looped through
if SET_TAGS and data["data"]["attributes"].has_key("tags"):
for tag in data["data"]["attributes"]["tags"]:
worker_item.addTag("VirusTotal|" + tag)
# Finally the analysis results provide the details from each AV, so loop through them
for scanner, res in data["data"]["attributes"]["last_analysis_results"].iteritems():
if res["result"] is not None:
worker_item.addCustomMetadata("VirusTotal " + scanner,res["result"],'text','user')
worker_item.setItemProperties(properties)
except urllib2.HTTPError, e:
# 404 returned when the md5 doesn't exist on VT
if str(e.code) == "404":
worker_item.addCustomMetadata("VirusTotal","Item md5 not matched in database",'text','user')
# 401 Auth error, likely API key issue
elif str(e.code) == "401":
worker_item.addCustomMetadata("VirusTotal","Unauthorised. Invalid API key?",'text','user')
else:
worker_item.addCustomMetadata('Processing Error','HTTPError = ' + str(e.code),'text','user')
except urllib2.URLError, e:
worker_item.addCustomMetadata('Processing Error','URLError = ' + str(e.reason),'text','user')
except Exception:
import traceback
worker_item.addCustomMetadata('Processing Error','exception: ' + traceback.format_exc(),'text','user')
time.sleep(SLEEP_TIME)
|
StarcoderdataPython
|
176878
|
from titrationFitter.titrationFitter import Component, System, Titration, loadModel
|
StarcoderdataPython
|
3308691
|
<reponame>SpeagleYao/IP_Final_Project<gh_stars>0
from img_aug import data_generator
from models import *
from loss import *
import numpy as np
import cv2
import torch
model = CENet_My()
model.load_state_dict(torch.load('./pth/CENet_My.pth'))
model.eval()
criterion = DiceLoss()
g_val = data_generator('./data/img_val.npy', './data/tar_val.npy', 10, train=False)
img, tar = g_val.gen()
out = model(img)
loss_val = criterion(1-out, 1-tar)
print("Loss_val:{0}".format(format(loss_val, ".4f")))
out = torch.where(out>=0.5, 1, 0)
out = out.numpy().reshape(10, 224, 224)*255
tar = tar.detach().numpy().reshape(10, 224, 224)*255
for i in range(out.shape[0]):
a = np.hstack((tar[i], out[i]))
cv2.imwrite('./prdimg/prdimg'+str(i)+'.png', a)
|
StarcoderdataPython
|
1741322
|
<filename>examples/neighborhood-2.py
from streamsvg import Drawing
s = Drawing()
s.addNode("a")
s.addNode("b")
s.addNode("c")
s.addNode("d")
s.addLink("a", "b", 0, 4,color="#BBBBBB",width=2)
s.addLink("a", "b", 6, 9,color="#BBBBBB",width=2)
s.addLink("a", "c", 2, 5, height=0.4,width=3)
s.addLink("b", "c", 1, 8,width=3)
s.addLink("b", "d", 7, 10, height=0.4,color="#BBBBBB",width=2)
s.addLink("c", "d", 6, 9,width=3)
s.addNodeCluster("a",[(2,5)],color="blue",width=3)
s.addNodeCluster("b",[(1,8)],color="blue",width=3)
s.addNodeCluster("d",[(6,9)],color="blue",width=3)
s.addTimeLine(ticks=2)
|
StarcoderdataPython
|
1624607
|
<reponame>ketsu8/prettycode
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from widgets.codeedit import QCodeEdit
from windows.settings import PreferencesWindow
from windows.projects import ProjectCreationWindow
from resources import __resourcesDirectory__
from settings import *
_ = returnLanguage(language)
class MainWindow(QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUI()
def setupEditor(self):
self.editor = QCodeEdit()
self.editor.cursorPositionChanged.connect(lambda: self.lineStatusLabel.setText(_('Ln {line}, Col {column}').format(column='<b>' + str(self.editor.textCursor().columnNumber() + 1) + '</b>', line='<b>' + str(self.editor.textCursor().blockNumber() + 1) + '</b>')))
def setupCompleter(self):
self.statusBar().showMessage(_('Setting-up completer...'))
self.completer = QCompleter(self)
self.completer.setModelSorting(QCompleter.CaseInsensitivelySortedModel)
self.completer.setCaseSensitivity(Qt.CaseInsensitive)
self.completer.setWrapAround(False)
self.completer.setModel(QStringListModel([], self.completer))
self.editor.setCompleter(self.completer)
def setupToolbar(self):
self.statusBar().showMessage(_('Setting-up toolbar...'))
self.toolbar = QToolBar('Toolbar')
self.toolbar.setVisible(toolBarEnable)
self.toolbar.setStyleSheet('padding: 8px; background: #333333; border-radius: 0px; spacing: 15px;')
self.addToolBar(Qt.LeftToolBarArea, self.toolbar)
self.toolbar.setMovable(False)
from os.path import join
self.toolbar.addAction(QIcon(join(__resourcesDirectory__, 'icons', 'run.png')), _('Build and Run'))
self.toolbar.addAction(QIcon(join(__resourcesDirectory__, 'icons', 'package.png')), _('Build in package'))
self.toolbar.addAction(QIcon(join(__resourcesDirectory__, 'icons', 'settings.png')), _('Project Settings'))
self.toolbar.addAction(QIcon(join(__resourcesDirectory__, 'icons', 'open.png')), _('Open Project'))
self.toolbar.addAction(QIcon(join(__resourcesDirectory__, 'icons', 'save.png')), _('Save Project'))
def setupButtomPanel(self):
self.statusBar().showMessage(_('Setting-up buttom panel...'))
self.buttomPanel = QListView()
self.buttomPanel.setVisible(buttomPanelEnable)
self.buttomPanel.setStyleSheet('color: white; padding: 10px; selection-background-color: #37373D; background: #252526; border-radius: 0px;')
model = QStandardItemModel()
self.buttomPanel.setModel(model)
def setupMenubar(self):
self.statusBar().showMessage(_('Setting-up menubar...'))
styleSheet = 'color: white; background: #3A3935; border-radius: 0px; min-height: 25px; spacing: 18px'
self.menuBar().setStyleSheet(styleSheet)
fileMenu = self.menuBar().addMenu(_('File'))
newMenu = fileMenu.addMenu(_('New...'))
newMenu.addAction(_('Project'), lambda: ProjectCreationWindow(self).showNormal())
fileMenu.addAction(_('Preferences'), lambda: PreferencesWindow(self).showNormal())
editMenu = self.menuBar().addMenu(_('Edit'))
editMenu.addAction(_('Undo'), lambda: self.editor.undo(), 'Ctrl+Z')
editMenu.addAction(_('Redo'), lambda: self.editor.redo(), 'Ctrl+Y')
editMenu.addSeparator()
editMenu.addAction(_('Cut'), lambda: self.editor.cut(), 'Ctrl+X')
editMenu.addAction(_('Copy'), lambda: self.editor.copy(), 'Ctrl+C')
editMenu.addAction(_('Paste'), lambda: self.editor.paste(), 'Ctrl+V')
selectionMenu = self.menuBar().addMenu(_('Selection'))
selectionMenu.addAction(_('Select All'), lambda: self.editor.selectAll(), 'Ctrl+A')
formatMenu = self.menuBar().addMenu(_('Format'))
fontFormatMenu = formatMenu.addMenu(_('Font'))
fontFormatMenu.addAction(_('Zoom In'), lambda: self.editor.zoomIn(), 'Ctrl++')
fontFormatMenu.addAction(_('Zoom Out'), lambda: self.editor.zoomOut(), 'Ctrl+-')
fontFormatMenu.addAction(_('Restore Defaults'), lambda: self.editor.setFont(self.editor.getFont()), 'Ctrl+0')
windowMenu = self.menuBar().addMenu(_('Window'))
windowMenu.addAction(_('Minimize'), lambda: self.showMinimized(), 'Ctrl+M')
windowMenu.addAction(_('Zoom'), lambda: self.showMaximized())
helpMenu = self.menuBar().addMenu(_('Help'))
helpMenu.addAction(_('About {productName}').format(productName=QCoreApplication.applicationName()), lambda: QMessageBox.about(self, _('About {productName}').format(productName=QCoreApplication.applicationName()), 'Pretty development IDE.\nVersion: {productVersion}'.format(productVersion=QCoreApplication.applicationVersion())))
helpMenu.addAction(_('About Qt'), lambda: QMessageBox.aboutQt(self, _('About Qt')))
def setupSplitter(self):
self.statusBar().showMessage(_('Setting-up splitter...'))
self.splitter = QSplitter()
self.splitter.setStyleSheet('background: #252526; border-radius: 0px;')
self.splitter.setOrientation(Qt.Orientation.Vertical)
self.splitter.addWidget(self.editor)
self.splitter.addWidget(self.buttomPanel)
def setupStatusbar(self):
self.statusBar().setStyleSheet('color: white; spacing: 15px; background: #A700C5; border-radius: 0px;')
self.statusBar().setVisible(statusBarEnable)
self.lineStatusLabel = QLabel(_('Ln {line}, Col {column}').format(column='<b>1</b>', line='<b>1</b>'))
self.statusBar().addPermanentWidget(self.lineStatusLabel)
def setupUI(self):
self.setupStatusbar()
self.setupEditor()
self.setupToolbar()
self.setupButtomPanel()
self.setupMenubar()
self.setupCompleter()
self.setupSplitter()
self.setCentralWidget(self.splitter)
self.statusBar().showMessage(_('All set!'))
self.setWindowTitle(QCoreApplication.applicationName())
self.setUnifiedTitleAndToolBarOnMac(True)
|
StarcoderdataPython
|
1625181
|
class basicdspalgorithm:
# parameterized constructor
def __init__(self):
self.first = 0
self.second= 0
def conv(self,x,h):
self.first=x
self.second=h
N=len(self.first)+len(self.second)-1
x1=[0]*N
h1=[0]*N
m=len(self.first)
n=len(self.second)
self.answer=[0]*N
for i in range(m):
x1[i]=self.first[i]
for i in range(n):
h1[i]=self.second[i]
for i in range(N):
for j in range(i+1):
self.answer[i]=self.answer[i]+ x1[j]*h1[i-j]
return self.answer
def circonv(self,x,h):
import operator as op
#self.first=x
#self.second=h
N=max(len(x),len(h))
y=[0]*N
x1=[0]*N
h1=[0]*N
for i in range(len(x)):
x1[i]=x[i]
for i in range(len(h)):
h1[i]=h[i]
for i in range(N):
for j in range(N):
y[i]=y[i]+x1[j]*h1[op.mod((i-j),N)]
return y
def fft(self,x):
import cmath as mt
N=len(x)
X=[0]*N
for k in range(N):
for n in range(N):
X[k]=X[k] + x[n]*mt.exp(-1j*2*mt.pi*k*n/N)
return X
def auto(self,x):
x1=x[::-1]
N=len(x)+len(x1)-1
x11=[0]*N
h1=[0]*N
m=len(x)
n=len(x1)
y=[0]*N
for i in range(m):
x11[i]=x[i]
for i in range(n):
h1[i]=x1[i]
for i in range(N):
for j in range(i+1):
y[i]=y[i]+ x11[j]*h1[i-j]
return y
def cross(self,x,h):
h1=h[::-1]
N=len(x)+len(h)-1
x11=[0]*N
h11=[0]*N
m=len(x)
n=len(h)
y=[0]*N
for i in range(m):
x11[i]=x[i]
for i in range(n):
h11[i]=h1[i]
for i in range(N):
for j in range(i+1):
y[i]=y[i]+ x11[j]*h11[i-j]
return y
|
StarcoderdataPython
|
78330
|
# Altere o Programa 7.2, o jogo da forca
# Utilize um arquivo em que uma palavra seja gravada a cada linha
# Use um editor de textos para gerar o arquivo
# Ao iniciar o programa, utilize esse arquivo para carregar (ler) a lista de palavras
# Experimente também perguntar o nome do jogador e gerar um arquivo com o número de acertos dos cinco melhores
import sys
import random
FILE_SCOREBOARD = 'placar.txt'
FILE_WORDS_LIST = 'palavras.txt'
wordsList = []
scoreboardDict = {}
def load_words():
try:
file = open(FILE_WORDS_LIST, 'r', encoding='utf-8')
except FileNotFoundError:
print(f'\n\nArquivo "{FILE_WORDS_LIST}" não encontrado!')
print(f'Para jogar, crie um arquivo de palavras com nome "{FILE_WORDS_LIST}", contendo uma palavra por linha.\n\n')
sys.exit(1)
for word in file.readlines():
word = word.strip().lower()
if word != '':
wordsList.append(word)
file.close()
def load_scoreboard():
try:
file = open(FILE_SCOREBOARD, 'r+')
except FileNotFoundError:
file = open(FILE_SCOREBOARD, 'w+')
for line in file.readlines():
line = line.strip()
if line != '':
user, counter = line.split(';')
scoreboardDict[user] = int(counter)
file.close()
def save_scoreboard():
file = open(FILE_SCOREBOARD, 'w', encoding='utf-8')
for user in scoreboardDict.keys():
file.write(f'{user};{scoreboardDict[user]}\n')
file.close()
def update_scoreboard(user):
if user in scoreboardDict:
scoreboardDict[user] += 1
else:
scoreboardDict[user] = 1
save_scoreboard()
def show_scoreboard():
scoreboardOrdered = []
for user, score in scoreboardDict.items():
scoreboardOrdered.append([user, score])
scoreboardOrdered.sort(key=lambda score: score[1])
print('\n\nMelhores jogadores por número de acertos:')
scoreboardOrdered.reverse()
for up in scoreboardOrdered:
print(f'{up[0]:30s} {up[1]:10d}')
load_words()
load_scoreboard()
word = wordsList[random.randint(0, len(wordsList)-1)]
typed = []
hits = []
errors = 0
while True:
password = ''
for letter in word:
password += letter if letter in hits else '_'
print(password)
if password == word:
print('Você acertou!')
name = input('Digite seu nome: ')
update_scoreboard(name)
break
attempt = input('\nDigite uma letra: ').lower().strip()
if attempt in typed:
print('Você já tentou esta letra!')
continue
else:
typed += attempt
if attempt in word:
hits += attempt
else:
errors += 1
print('Você errou!')
print('X==:==\nX : ')
print('X O ' if errors >= 1 else 'X')
line2 = ''
if errors == 2:
line2 = r' | '
elif errors == 3:
line2 = r' \| '
elif errors >= 4:
line2 = r' \|/ '
print(f'X{line2}')
line3 = ''
if errors == 5:
line3 += r' / '
elif errors >= 6:
line3 += r' / \ '
print(f'X{line3}')
print('X\n===========')
if errors == 6:
print('Enforcado!')
break
show_scoreboard()
|
StarcoderdataPython
|
1658403
|
<gh_stars>10-100
# coding: utf-8
import setuptools
setuptools.setup(
name='cloudkeeper',
packages=setuptools.find_packages(),
install_requires=[
'requests',
'websocket-client',
],
)
|
StarcoderdataPython
|
1714722
|
from .dynpaper import main as dpmain
from sys import argv
def main():
dpmain(argv)
|
StarcoderdataPython
|
3229007
|
<gh_stars>0
'''
Capsules for Object Segmentation (SegCaps)
Original Paper by <NAME> and <NAME> (https://arxiv.org/abs/1804.04241)
Code written by: <NAME>
If you use significant portions of this code or the ideas from our paper, please cite it :)
If you have any questions, please email me at <EMAIL>.
This file is used for loading training, validation, and testing data into the models.
It is specifically designed to handle 3D single-channel medical data.
Modifications will be needed to train/test on normal 3-channel images.
=====
This program includes all functions of 3D image processing for UNet, tiramisu, Capsule Nets (capsbasic) or SegCaps(segcapsr1 or segcapsr3).
@author: <NAME> a.k.a. Clark
@copyright: 2018 Cheng-Lin Li@Insight AI. All rights reserved.
@license: Licensed under the Apache License v2.0. http://www.apache.org/licenses/
@contact: <EMAIL>
Tasks:
The program based on parameters from main.py to load 3D image files from folders.
The program will convert all image files into numpy format then store training/testing images into
./data/np_files and training (and testing) file lists under ./data/split_list folders.
You need to remove these two folders every time if you want to replace your training image and mask files.
The program will only read data from np_files folders.
Data:
MS COCO 2017 or LUNA 2016 were tested on this package.
You can leverage your own data set but the mask images should follow the format of MS COCO or with background color = 0 on each channel.
Enhancement:
1. Porting to Python version 3.6
2. Remove program code cleaning
'''
from __future__ import print_function
import logging
from os.path import join, basename
from os import makedirs
import numpy as np
from numpy.random import rand, shuffle
import SimpleITK as sitk
import matplotlib.pyplot as plt
plt.ioff()
from utils.custom_data_aug import augmentImages
from utils.threadsafe import threadsafe_generator
debug = 0
mean = np.array([18.426106306720985, 24.430354760142666, 24.29803657467962, 19.420110564555472])
std = np.array([104.02684046042094, 136.06477850668273, 137.4833895418739, 109.29833288911334])
def convert_data_to_numpy(root_path, img_name, no_masks=False, overwrite=False):
fname = img_name[:-7]
numpy_path = join(root_path, 'np_files')
img_path = join(root_path, 'imgs')
mask_path = join(root_path, 'masks')
fig_path = join(root_path, 'figs')
try:
makedirs(numpy_path)
except:
pass
try:
makedirs(fig_path)
except:
pass
# The min and max pixel values in a ct image file
brats_min = -0.18
brats_max = 10
if not overwrite:
try:
with np.load(join(numpy_path, fname + '.npz')) as data:
return data['img'], data['mask']
except:
pass
try:
itk_img = sitk.ReadImage(join(img_path, img_name))
img = sitk.GetArrayFromImage(itk_img)
img = img.astype(np.float32)
img = np.rollaxis(img, 0, 4)
img = np.rollaxis(img, 0, 3)
img -= mean
img /= std
img = np.clip(img, + brats_min, brats_max)
img = (img - brats_min) / (brats_max - brats_min)
img = img[:, :, :, 0] # Select only flair during initial testing
if not no_masks:
itk_mask = sitk.ReadImage(join(mask_path, img_name))
mask = sitk.GetArrayFromImage(itk_mask)
mask = np.rollaxis(mask, 0, 3)
mask[mask < 0.5] = 0 # Background
mask[mask > 0.5] = 1 # Edema, Enhancing and Non enhancing tumor
mask = mask.astype(np.uint8)
try:
f, ax = plt.subplots(1, 3, figsize=(15, 5))
ax[0].imshow(img[:, :, img.shape[2] // 3], cmap='gray')
if not no_masks:
ax[0].imshow(mask[:, :, img.shape[2] // 3], alpha=0.15)
ax[0].set_title('Slice {}/{}'.format(img.shape[2] // 3, img.shape[2]))
ax[0].axis('off')
ax[1].imshow(img[:, :, img.shape[2] // 2], cmap='gray')
if not no_masks:
ax[1].imshow(mask[:, :, img.shape[2] // 2], alpha=0.15)
ax[1].set_title('Slice {}/{}'.format(img.shape[2] // 2, img.shape[2]))
ax[1].axis('off')
ax[2].imshow(img[:, :, img.shape[2] // 2 + img.shape[2] // 4], cmap='gray')
if not no_masks:
ax[2].imshow(mask[:, :, img.shape[2] // 2 + img.shape[2] // 4], alpha=0.15)
ax[2].set_title('Slice {}/{}'.format(img.shape[2] // 2 + img.shape[2] // 4, img.shape[2]))
ax[2].axis('off')
fig = plt.gcf()
fig.suptitle(fname)
plt.savefig(join(fig_path, fname + '.png'), format='png', bbox_inches='tight')
plt.close(fig)
except Exception as e:
logging.error('\n'+'-'*100)
logging.error('Error creating qualitative figure for {}'.format(fname))
logging.error(e)
logging.error('-'*100+'\n')
if not no_masks:
np.savez_compressed(join(numpy_path, fname + '.npz'), img=img, mask=mask)
else:
np.savez_compressed(join(numpy_path, fname + '.npz'), img=img)
if not no_masks:
return img, mask
else:
return img
except Exception as e:
logging.error('\n'+'-'*100)
logging.error('Unable to load img or masks for {}'.format(fname))
logging.error(e)
logging.error('Skipping file')
logging.error('-'*100+'\n')
return np.zeros(1), np.zeros(1)
@threadsafe_generator
def generate_train_batches(root_path, train_list, net_input_shape, net, batchSize=1, numSlices=1, subSampAmt=-1,
stride=1, downSampAmt=1, shuff=1, aug_data=1):
# Create placeholders for training
# (img_shape[1], img_shape[2], args.slices)
img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)
mask_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.uint8)
while True:
if shuff:
shuffle(train_list)
count = 0
for i, scan_name in enumerate(train_list):
try:
scan_name = scan_name[0]
path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')
logging.info('\npath_to_np=%s'%(path_to_np))
with np.load(path_to_np) as data:
train_img = data['img']
train_mask = data['mask']
except:
logging.info('\nPre-made numpy array not found for {}.\nCreating now...'.format(scan_name[:-7]))
train_img, train_mask = convert_data_to_numpy(root_path, scan_name)
if np.array_equal(train_img,np.zeros(1)):
continue
else:
logging.info('\nFinished making npz file.')
if numSlices == 1:
subSampAmt = 0
elif subSampAmt == -1 and numSlices > 1:
np.random.seed(None)
subSampAmt = int(rand(1)*(train_img.shape[2]*0.05))
indicies = np.arange(0, train_img.shape[2] - numSlices * (subSampAmt + 1) + 1, stride)
if shuff:
shuffle(indicies)
for j in indicies:
if not np.any(train_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]):
continue
if img_batch.ndim == 4:
img_batch[count, :, :, :] = train_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
mask_batch[count, :, :, :] = train_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
elif img_batch.ndim == 5:
# Assumes img and mask are single channel. Replace 0 with : if multi-channel.
img_batch[count, :, :, :, 0] = train_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
mask_batch[count, :, :, :, 0] = train_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
else:
logging.error('\nError this function currently only supports 2D and 3D data.')
exit(0)
count += 1
if count % batchSize == 0:
count = 0
if aug_data:
img_batch, mask_batch = augmentImages(img_batch, mask_batch)
if debug:
if img_batch.ndim == 4:
plt.imshow(np.squeeze(img_batch[0, :, :, 0]), cmap='gray')
plt.imshow(np.squeeze(mask_batch[0, :, :, 0]), alpha=0.15)
elif img_batch.ndim == 5:
plt.imshow(np.squeeze(img_batch[0, :, :, 0, 0]), cmap='gray')
plt.imshow(np.squeeze(mask_batch[0, :, :, 0, 0]), alpha=0.15)
plt.savefig(join(root_path, 'logs', 'ex_train.png'), format='png', bbox_inches='tight')
plt.close()
if net.find('caps') != -1: # if the network is capsule/segcaps structure
yield ([img_batch, mask_batch], [mask_batch, mask_batch*img_batch])
else:
yield (img_batch, mask_batch)
if count != 0:
if aug_data:
img_batch[:count,...], mask_batch[:count,...] = augmentImages(img_batch[:count,...],
mask_batch[:count,...])
if net.find('caps') != -1:
yield ([img_batch[:count, ...], mask_batch[:count, ...]],
[mask_batch[:count, ...], mask_batch[:count, ...] * img_batch[:count, ...]])
else:
yield (img_batch[:count,...], mask_batch[:count,...])
@threadsafe_generator
def generate_val_batches(root_path, val_list, net_input_shape, net, batchSize=1, numSlices=1, subSampAmt=-1,
stride=1, downSampAmt=1, shuff=1):
# Create placeholders for validation
img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)
mask_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.uint8)
while True:
if shuff:
shuffle(val_list)
count = 0
for i, scan_name in enumerate(val_list):
try:
scan_name = scan_name[0]
path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')
with np.load(path_to_np) as data:
val_img = data['img']
val_mask = data['mask']
except:
logging.info('\nPre-made numpy array not found for {}.\nCreating now...'.format(scan_name[:-7]))
val_img, val_mask = convert_data_to_numpy(root_path, scan_name)
if np.array_equal(val_img,np.zeros(1)):
continue
else:
logging.info('\nFinished making npz file.')
if numSlices == 1:
subSampAmt = 0
elif subSampAmt == -1 and numSlices > 1:
np.random.seed(None)
subSampAmt = int(rand(1)*(val_img.shape[2]*0.05))
indicies = np.arange(0, val_img.shape[2] - numSlices * (subSampAmt + 1) + 1, stride)
if shuff:
shuffle(indicies)
for j in indicies:
if not np.any(val_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]):
continue
if img_batch.ndim == 4:
img_batch[count, :, :, :] = val_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
mask_batch[count, :, :, :] = val_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
elif img_batch.ndim == 5:
# Assumes img and mask are single channel. Replace 0 with : if multi-channel.
img_batch[count, :, :, :, 0] = val_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
mask_batch[count, :, :, :, 0] = val_mask[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
else:
logging.error('\nError this function currently only supports 2D and 3D data.')
exit(0)
count += 1
if count % batchSize == 0:
count = 0
if net.find('caps') != -1:
yield ([img_batch, mask_batch], [mask_batch, mask_batch * img_batch])
else:
yield (img_batch, mask_batch)
if count != 0:
if net.find('caps') != -1:
yield ([img_batch[:count, ...], mask_batch[:count, ...]],
[mask_batch[:count, ...], mask_batch[:count, ...] * img_batch[:count, ...]])
else:
yield (img_batch[:count,...], mask_batch[:count,...])
@threadsafe_generator
def generate_test_batches(root_path, test_list, net_input_shape, batchSize=1, numSlices=1, subSampAmt=0,
stride=1, downSampAmt=1):
# Create placeholders for testing
logging.info('\nload_3D_data.generate_test_batches')
print("Batch size {}".format(batchSize))
img_batch = np.zeros((np.concatenate(((batchSize,), net_input_shape))), dtype=np.float32)
count = 0
logging.info('\nload_3D_data.generate_test_batches: test_list=%s'%(test_list))
for i, scan_name in enumerate(test_list):
try:
scan_name = scan_name[0]
path_to_np = join(root_path,'np_files',basename(scan_name)[:-6]+'npz')
with np.load(path_to_np) as data:
test_img = data['img']
except:
logging.info('\nPre-made numpy array not found for {}.\nCreating now...'.format(scan_name[:-7]))
test_img = convert_data_to_numpy(root_path, scan_name, no_masks=True)
if np.array_equal(test_img,np.zeros(1)):
continue
else:
logging.info('\nFinished making npz file.')
if numSlices == 1:
subSampAmt = 0
elif subSampAmt == -1 and numSlices > 1:
np.random.seed(None)
subSampAmt = int(rand(1)*(test_img.shape[2]*0.05))
print(test_img.shape)
indicies = np.arange(0, test_img.shape[2] - numSlices * (subSampAmt + 1) + 1, stride)
for j in indicies:
if img_batch.ndim == 4:
img_batch[count, :, :, :] = test_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
elif img_batch.ndim == 5:
# Assumes img and mask are single channel. Replace 0 with : if multi-channel.
img_batch[count, :, :, :, 0] = test_img[:, :, j:j + numSlices * (subSampAmt+1):subSampAmt+1]
else:
logging.error('Error this function currently only supports 2D and 3D data.')
exit(0)
count += 1
if count % batchSize == 0:
count = 0
yield (img_batch)
if count != 0:
yield (img_batch[:count,:,:,:])
|
StarcoderdataPython
|
47538
|
class Solution:
def numJewelsInStones(self, J: str, S: str) -> int:
#map = {}
#for i in range(len(J)):
# map[J[i]] = 0
count = 0
for i in range(len(S)):
if str([S[i]][0]) in J: count +=1
return count
J = "aAB"
S = "aAAbbbb"
print(Solution().numJewelsInStones(J, S))
|
StarcoderdataPython
|
1627012
|
import os
import redis
rdb = redis.StrictRedis(host = os.getenv('REDISTOGO_URL', 'redis'))
from bson.json_util import dumps
from utils.logger import log
class ConfigCls(object) :
def __init__(self) :
self.keys = {}
def __getattr__(self, attr) :
old_val = rdb.get(f'config-{attr}')
if old_val :
return old_val.decode('utf-8')
else :
return None
def ListAttrs(self) :
ret = {}
for k in self.keys.keys() :
ret[k] = self.__getattr__(k)
return ret
def SetValue(self, attr, value) :
old_val = self.__getattr__(attr)
rdb.set(f'config-{attr}', value)
self.keys[attr] = value
#log(obj = {'old_val': old_val, 'new_val': value})
Config = ConfigCls()
def _config(attr, default = '') :
Config.SetValue(attr, default)
def _config_env(attr, envvar, default = '') :
default = os.getenv(envvar, default)
_config(attr, default)
_config_env("BILICOOKIE_SESSDATA", "bilicookie_SESSDATA")
_config_env("BILICOOKIE_bili_jct", "bilicookie_bili_jct")
_config_env("YOUTUBE_API_KEYS", "GOOGLE_API_KEYs")
_config_env("DEFAULT_BLACKLIST", "DEFAULT_BLACKLIST")
_config_env("DEFAULT_BLACKLIST_POPULAR_TAG", "DEFAULT_BLACKLIST_POPULAR_TAG")
_config_env("MMDOCR_VERSION", "MMDOCR_VERSION")
|
StarcoderdataPython
|
52842
|
<reponame>youqad/oxford-hack-2020<gh_stars>0
from dataclasses import dataclass
import torch
import numpy as np
import pyro
import matplotlib.pyplot as plt
from pyro.infer import MCMC, NUTS
# import pyro.infer
# import pyro.optim
from pyro.distributions import Normal
# def model(data):
# """
# Explanation
# """
# coefs_mean = torch.zeros(dim)
# coefs = pyro.sample('beta', dist.Normal(coefs_mean, torch.ones(3)))
# y = pyro.sample('y', Bernoulli(logits=(coefs * data).sum(-1)), obs=labels)
# return y
# nuts_kernel = NUTS(model, adapt_step_size=True)
# mcmc = MCMC(nuts_kernel, num_samples=500, warmup_steps=300)
# mcmc.run(data)
# print(mcmc.get_samples()['beta'].mean(0))
# mcmc.summary(prob=0.5)
# def conditioned_model(model, sigma, y):
# return poutine.condition(model, data={"obs": y})(sigma)
# pyro.sample("obs_{}".format(i), dist.Bernoulli(f), obs=data[i])
# conditioned_scale = pyro.condition(scale, data={"measurement": 9.5})
# pyro.sample("measurement", dist.Normal(weight, 0.75), obs=9.5)
# def deferred_conditioned_scale(measurement, guess):
# return pyro.condition(scale, data={"measurement": measurement})(guess)
# svi = pyro.infer.SVI(model=conditioned_scale,
# guide=scale_parametrized_guide,
# optim=pyro.optim.SGD({"lr": 0.001, "momentum":0.1}),
# loss=pyro.infer.Trace_ELBO())
class Alternative:
"""
An alternative is a potential outcome for a decision making problem.
Example: Tesla is an alternative for the decision problem of choosing a car to buy.
"""
def __init__(self,name):
self.name=name
class Criterion:
"""
A criterion is a paramater in a decision making problem.
It is given y
- a name 'name'
- an optionnal boolean 'positive' to indicate whether the criterion has a positive or negative impact on the alternatives
Example: manoeuvrability might be a criterion when the alternatives are car brands.
"""
def __init__(self,name,positive=True):
self.name=name
self.positive=positive
class Weight:
"""
A weight represents how much a person values a certain criterion in a decision making problem.
A weight is given by
- a name 'name'
- an optionnal distribution name 'dist' for modelling its uncertainty
- a value 'value' for the weight
- a criterion 'criterion'
Example: a weight of 21 can be given for the criterion manoeuvrability when car brands is the decision making problem.
"""
def __init__(self,name,dist="Unif",value,variance=0,criterion):
self.name=name
self.dist=dist
self.positive=positive
self.value=value
self.variance=variance
self.criterion= criterion.name
class AlternativeCriterionMatrix:
"""
TODO:write
"""
def __init__(self):
class DecisionProblem:
"""
A decision problem consist of a choice of possible outcomes: alternatives
These alternatives depend on parameters: criteria
A person values certain criteria more than others. This is reflected in weights.
The weights and criteria for each alternative are fuzzy and are modelled with distributions.
These distributions may reflect a lack of knowledge, a lack of objective measure,
a true randomness in the process, etc.
Following the SMAA method, a person is guided to take a decision with three indicators.
- acceptabilityIndex: represents the approximate probability that a certain alternative is ranked first.
- centralWeightVector: represents a typical value for the weights that make a certain alternative ranked first.
- confidenceFactor: represents the probability of an alternative being ranked for weights given by centralWeightVector.
"""
def __init__(self,name,weights,criteria,alternatives):
self.name=name
self.weights=weights
self.criteria=criteria
self.alternatives=alternatives
def criteriaList(self):
return True
def alternativesList(self):
return True
def weightsSampler(self):
return True
def criteriaSampler(self):
return True
def rank(self,alternative_number,sample_crit_vector,sample_weight_vector):
return True
def rankAcceptabilityIndex(self,alternative_number,rank):
return True
def acceptabilityIndex(self,alternative_number):
"""
Test
"""
return self.rankAcceptabilityIndex(alternative_number,1)
def centralWeightVector(self,alternative_number):
"""
Test
"""
return True
def confidenceFactor(self,alternative_number):
"""
Test
"""
return True
|
StarcoderdataPython
|
3324148
|
from gpiozero import OutputDevice
from time import sleep
import sentry_sdk
# TODO: Add code comments to make it easier for a user to add additional pumps
sentry_sdk.init("https://[email protected]/1492001")
# Assign the pump based on what pin the Raspberry Pi is using
pump1 = OutputDevice(4)
# Set the pump toggle as off and toggle the pump on and running
def pump_on():
pump1.active_high = False
pump1.toggle()
# Pump is now running - Define how long the pump should run in seconds in the on position from run_pump above:
def pump_time():
seconds = 5
sleep(seconds)
# Toggle the pump into the off position and pause (in seconds):
def pump_stop():
pump1.toggle()
pause_seconds = 2
sleep(pause_seconds)
# Define how many times (cycles) the pump should turn on, then pause, and then turn off again:
cycles = 4
# This function will turn the pump and and off based on how many cycles above. Do not changes this code:
def pump_series():
for cycle in range(cycles):
pump_on()
pump_time()
pump_stop()
print("Cycle " f"{cycle} " "completed.")
if __name__ == '__main__':
pump_series()
print("All done! Good-bye!")
|
StarcoderdataPython
|
4837911
|
from setuptools import setup
setup(
name='oboe',
version='0.2',
description='Converts an Obsidian vault into HTML',
url='https://github.com/kmaasrud/oboe',
author='kmaasrud',
author_email='<EMAIL>',
license='MIT',
packages=['oboe'],
install_requires=[
'markdown2',
'regex',
'pypandoc'
],
zip_safe=False,
entry_points={
'console_scripts': [
'oboe=oboe:main'
]
}
)
|
StarcoderdataPython
|
183975
|
from graphene_django import DjangoObjectType
from graphene_django.forms.mutation import DjangoModelFormMutation
from graphene_django import DjangoListField
from graphql_jwt.decorators import login_required
from .models import *
from .forms import MemberCreationForm
import graphene
##################################
################################## OBJECTS TYPES
class MembersType(DjangoObjectType):
class Meta:
model = CustomUser
fields = '__all__'
class TagType(DjangoObjectType):
class Meta:
model = Tag
fields = '__all__'
########################################
########################################
######################################## Forms Mutations
class MembersMutation(DjangoModelFormMutation):
member = graphene.Field(MembersType)
class Meta:
form_class = MemberCreationForm
### main mutation
class Mutation(graphene.ObjectType):
add_member = MembersMutation.Field()
### main query
class Query(graphene.ObjectType):
all_members = graphene.List(MembersType)
get_current_member = graphene.Field(MembersType)
def resolve_all_members(root, info):
return CustomUser.objects.all()
@login_required
def resolve_get_current_member(root, info):
print(info.context.user)
return CustomUser.objects.get(pk=3)
|
StarcoderdataPython
|
1703991
|
# Generated by Django 3.1 on 2021-02-03 21:39
from django.db import migrations, models
from django.utils.text import slugify
import websites.models
COURSE_STARTER_SLUG = "course"
COURSE_STARTER_REPO_URL = "https://github.com/mitodl/ocw-course-hugo-starter"
COURSE_STARTER_REPO_NAME = "OCW Course Hugo Starter"
STARTER_SOURCE_GITHUB = "github"
STARTER_CONFIG = """
collections:
- label: "Page"
name: "page"
fields:
- {label: "Title", name: "title", widget: "string"}
- {label: "Content", name: "content", widget: "markdown"}
- label: "Resource"
name: "resource"
fields:
- {label: "Title", name: "title", widget: "string"}
- {label: "Description", name: "description", widget: "markdown"}
"""
def add_first_starter_repo(apps, schema_editor):
WebsiteStarter = apps.get_model("websites", "WebsiteStarter")
starter, created = WebsiteStarter.objects.get_or_create(
path=COURSE_STARTER_REPO_URL,
defaults=dict(
slug=COURSE_STARTER_SLUG,
name=COURSE_STARTER_REPO_NAME,
source=STARTER_SOURCE_GITHUB,
commit=None,
config=STARTER_CONFIG,
),
)
if created is False and starter.slug is None:
starter.slug = COURSE_STARTER_SLUG
starter.save()
def fill_in_slug_values(apps, schema_editor):
WebsiteStarter = apps.get_model("websites", "WebsiteStarter")
starters = WebsiteStarter.objects.filter(slug=None)
for starter in starters:
starter.slug = slugify(starter.name)[0:30]
starter.save()
class Migration(migrations.Migration):
dependencies = [
("websites", "0003_add_website_starter_model"),
]
operations = [
migrations.AddField(
model_name="websitestarter",
name="slug",
field=models.CharField(
help_text="Short string that can be used to identify this starter.",
max_length=30,
null=True,
),
),
migrations.RunPython(add_first_starter_repo, migrations.RunPython.noop),
migrations.RunPython(fill_in_slug_values, migrations.RunPython.noop),
migrations.AlterField(
model_name="websitestarter",
name="slug",
field=models.CharField(
help_text="Short string that can be used to identify this starter.",
max_length=30,
unique=True,
validators=[websites.models.validate_slug],
),
),
]
|
StarcoderdataPython
|
1698507
|
MOCK_USERS = [{"email": "<EMAIL>", "salt": "8Fb23mMNHD5Zb8pr2qWA3PE9bH0=", "hashed":
"1736f83698df3f8153c1fbd6ce2840f8aace4f200771a46672635374073cc876cf0aa6a31f780e576578f791b5555b50df46303f0c3a7f2d21f91aa1429ac22e"}]
class MockDBHelper:
def get_user(self, email):
user = [x for x in MOCK_USERS if x.get("email") == email]
if user:
return user[0]
return None
def add_user(self, email, salt, hashed):
MOCK_USERS.append({"email": email, "salt": salt, "hashed": hashed})
|
StarcoderdataPython
|
60462
|
"""
Faça um programa que pergunte a hora para o usuário e, se baseando no horário descrito, exiba a saudação apropriada.
"""
hora = input("Que horas são aí? ")
if hora.isnumeric():
hora = int(hora)
else:
print("Por favor, digite somente números.")
if hora < 0 or hora > 23:
print("Horário inválido")
elif hora <= 5:
print(f"Ainda é de madrugada, são {hora} horas, então podemos considerar boa noite!")
elif hora >= 6 and hora <= 11:
print(f"Bom dia! Agora são {hora} horas.")
elif hora >= 12 and hora <= 17:
print(f"Boa tarde! Agora são {hora} horas da tarde.")
else:
print(f"Boa noite! Agora são {hora} horas da noite.")
|
StarcoderdataPython
|
1669581
|
from abc import ABC, abstractmethod
from aiogram import Bot
class AbstractTelegramAPI(ABC):
@abstractmethod
async def send_message(self, to_chat_id: int, text: str):
raise NotImplementedError
@abstractmethod
async def forward_message(
self, from_chat_id: int, to_chat_id: int, message_id: int
) -> int:
raise NotImplementedError
@abstractmethod
async def copy_message(
self, from_chat_id: int, to_chat_id: int, message_id: int
):
raise NotImplementedError
class TelegramAPI(AbstractTelegramAPI):
def __init__(self, bot: Bot):
self._bot = bot
async def send_message(self, to_chat_id: int, text: str):
await self._bot.send_message(chat_id=to_chat_id, text=text)
async def forward_message(
self, from_chat_id: int, to_chat_id: int, message_id: int
):
tg_forwarded_message = await self._bot.forward_message(
chat_id=to_chat_id, from_chat_id=from_chat_id, message_id=message_id
)
return tg_forwarded_message.message_id
async def copy_message(
self, from_chat_id: int, to_chat_id: int, message_id: int
):
return await self._bot.copy_message(
chat_id=to_chat_id, from_chat_id=from_chat_id, message_id=message_id
)
|
StarcoderdataPython
|
1660926
|
<reponame>madvid/42_Gomoku
from metrics import *
def test_row1():
g = np.array([
[1, 0, 0],
[1, 0, 0],
[1, 1, 0]
])
assert measure_row(g, 1) == [Row(2, Position(2,0), 1, g)]
def test_row2():
g = np.array([
[1, 0, 0],
[1, 0, 0],
[1, 1, 1]
])
assert measure_row(g, 1) == [Row(3, Position(2,0), 1, g)]
def test_row3():
g = np.array([
[1, 0, 0],
[1, 0, 0],
[0, 1, 1]
])
assert measure_row(g, 1) == [Row(2, Position(2,1), 1, g)]
def test_row4():
g = np.array([
[0, 1, 1],
[1, 1, 0],
[1, 1, 1]
])
assert measure_row(g, 1) == [Row(2, Position(0, 1), 1, g), Row(2, Position(1, 0), 1, g), Row(3, Position(2,0), 1, g)]
def test_row5():
g = np.array([
[0, 1, 0],
[1, 0, 1],
[0, 1, 0]
])
assert measure_row(g, 1) == []
def test_row6():
g = np.array([
[0, 0, 1],
[1, 0, 1],
[0, 0, 1]
])
assert measure_row(g, 1) == []
def test_row7():
g = np.array([
[0, 0, 1, 0, 1],
[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 1, 1],
])
assert measure_row(g, 1) == [Row(2, Position(4, 0), 1, g), Row(2, Position(4,3), 1, g)]
def test_row8():
g = np.array([
[0, 1, 0, 0, 0],
[1, 0, 1, 1, 1],
[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
])
assert measure_row(g, 1) == [Row(3, Position(1, 2), 1, g)] #, Row(2, (0, 4), 1, g), Row(2, (3,4), 1, g)]
|
StarcoderdataPython
|
3244990
|
<filename>problems/test_0169_boyer_moore_vote.py
import unittest
class Solution:
def majorityElement(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
major = count = 0
for num in nums:
if num == major:
count += 1
elif count > 0:
count -= 1
else:
major = num
count = 1
return major
class Test(unittest.TestCase):
def test(self):
self._test([1, 2, 2, 2], 2)
self._test([1, 2, 2, 2, 1], 2)
self._test([-1], -1)
def _test(self, nums, expected):
actual = Solution().majorityElement(nums)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
111968
|
<filename>exquiro/tests/test_ea_activity_diagram_parser.py
import unittest
from exquiro.parsers.enterprise_architect.ea_activity_diagram_parser import EAActivityDiagramParser
from exquiro.models.activity_diagram.activity_diagram_model import ActivityDiagramModel
from exquiro.models.activity_diagram.activity_relation import ActivityRelation
from exquiro.models.activity_diagram.activity_node import ActivityNode
class TestEAActivityDiagramParser(unittest.TestCase):
def setUp(self):
self.test_file = "exquiro/tests/test_models/activity/ea_OrderPayment.xml"
self.parser = EAActivityDiagramParser()
self.namespaces = self.parser.get_namespaces(self.test_file)
self.model = self.parser.get_model(self.test_file, self.namespaces)
def test_get_namespaces(self):
namespaces = self.parser.get_namespaces(self.test_file)
self.assertGreaterEqual(len(namespaces), 2)
self.assertTrue("xmi" in namespaces)
self.assertTrue("uml" in namespaces)
def test_get_model(self):
model = self.parser.get_model(self.test_file, self.namespaces)
self.assertIsNotNone(model)
self.assertEqual(model.attrib["name"], "EA_Model")
self.assertEqual(model.attrib["{" + self.namespaces['xmi'] + "}" + "type"], "uml:Model")
self.assertFalse('{' + self.namespaces['xmi'] + '}' + 'id' in model.attrib)
def test_parse_model_type(self):
model = self.parser.parse_model(self.model, self.namespaces)
self.assertEqual(type(model), ActivityDiagramModel)
def test_parse_nodes_count(self):
nodes = self.parser.parse_nodes(self.model, self.namespaces)
self.assertEqual(len(nodes), 17)
def test_parse_nodes_type(self):
nodes = self.parser.parse_nodes(self.model, self.namespaces)
for node in nodes:
self.assertEqual(type(node), ActivityNode)
def test_parse_nodes_no_model(self):
with self.assertRaises(AttributeError):
self.parser.parse_nodes(None, self.namespaces)
def test_parse_relations_count(self):
relations = self.parser.parse_relations(self.model, self.namespaces)
self.assertEqual(len(relations), 27)
def test_parse_relations_type(self):
relations = self.parser.parse_relations(self.model, self.namespaces)
for relation in relations:
self.assertEqual(type(relation), ActivityRelation)
def test_parse_relations_no_model(self):
with self.assertRaises(AttributeError):
self.parser.parse_relations(None, self.namespaces)
def test_parse_activity_node_id(self):
input_pins = self.model.findall('.//input[@xmi:type="uml:InputPin"]', self.namespaces)
node = self.parser.parse_activity_node(input_pins[0], self.namespaces, "InputPin")
self.assertEqual(node.id, "EAID_704D11B7_23DF_40c8_B974_350C4398D30B")
def test_parse_activity_node_name_empty(self):
input_pins = self.model.findall('.//input[@xmi:type="uml:InputPin"]', self.namespaces)
node = self.parser.parse_activity_node(input_pins[0], self.namespaces, "InputPin")
self.assertEqual(node.name, None)
def test_parse_activity_node_name_missing(self):
fork_join = self.model.findall('.//node[@xmi:type="uml:ForkNode"]', self.namespaces)
node = self.parser.parse_activity_node(fork_join[0], self.namespaces, "ForkJoin")
self.assertEqual(node.name, None)
def test_parse_activity_node_name_exists(self):
actions = self.model.findall('.//node[@xmi:type="uml:Action"]', self.namespaces)
node = self.parser.parse_activity_node(actions[0], self.namespaces, "Action")
self.assertNotEqual(node.name, None)
def test_parse_activity_node_name_data(self):
stores = self.model.findall('.//node[@xmi:type="uml:DataStoreNode"]', self.namespaces)
node = self.parser.parse_activity_node(stores[0], self.namespaces, "DataStore")
self.assertEqual(node.name, "Invoice Data store")
def test_parse_activity_node_type(self):
input_pins = self.model.findall('.//input[@xmi:type="uml:InputPin"]', self.namespaces)
node_type = "InputPin"
node = self.parser.parse_activity_node(input_pins[0], self.namespaces, node_type)
self.assertEqual(node.node_type, node_type)
def test_parse_activity_node_visibility(self):
input_pins = self.model.findall('.//input[@xmi:type="uml:InputPin"]', self.namespaces)
node = self.parser.parse_activity_node(input_pins[0], self.namespaces, "InputPin")
self.assertEqual(node.visibility, "public")
def test_parse_activity_node_ordering_data(self):
input_pins = self.model.findall('.//input[@xmi:type="uml:InputPin"]', self.namespaces)
node = self.parser.parse_activity_node(input_pins[0], self.namespaces, "InputPin")
self.assertEqual(node.ordering, "FIFO")
def test_parse_activity_node_ordering_missing(self):
actions = self.model.findall('.//node[@xmi:type="uml:Action"]', self.namespaces)
node = self.parser.parse_activity_node(actions[0], self.namespaces, "Action")
self.assertEqual(node.ordering, None)
def test_parse_activity_node_empty(self):
with self.assertRaises(AttributeError):
self.parser.parse_activity_node(None, self.namespaces, "Action")
def test_parse_actions_count(self):
actions = self.parser.parse_actions(self.model, self.namespaces)
self.assertEqual(len(actions), 6)
def test_parse_actions_type(self):
actions = self.parser.parse_actions(self.model, self.namespaces)
for action in actions:
self.assertEqual(action.node_type, "Action")
def test_parse_initial_nodes_count(self):
initials = self.parser.parse_initial_nodes(self.model, self.namespaces)
self.assertEqual(len(initials), 1)
def test_parse_initial_nodes_type(self):
initials = self.parser.parse_initial_nodes(self.model, self.namespaces)
for init in initials:
self.assertEqual(init.node_type, "Initial")
def test_parse_activity_finals_count(self):
finals = self.parser.parse_activity_finals(self.model, self.namespaces)
self.assertEqual(len(finals), 1)
def test_parse_activity_finals_type(self):
finals = self.parser.parse_activity_finals(self.model, self.namespaces)
for final in finals:
self.assertEqual(final.node_type, "ActivityFinal")
def test_parse_flow_finals_count_zero(self):
flow_finals = self.parser.parse_flow_finals(self.model, self.namespaces)
self.assertEqual(len(flow_finals), 0)
def test_parse_flow_finals_count(self):
model = self.parser.get_model("exquiro/tests/test_models/activity/ea_FlowFinal.xml", self.namespaces)
flow_finals = self.parser.parse_flow_finals(model, self.namespaces)
self.assertEqual(len(flow_finals), 1)
def test_parse_flow_finals_type(self):
model = self.parser.get_model("exquiro/tests/test_models/activity/ea_FlowFinal.xml", self.namespaces)
flow_finals = self.parser.parse_flow_finals(model, self.namespaces)
for flow in flow_finals:
self.assertEqual(flow.node_type, "FlowFinal")
def test_parse_forks_joins_count(self):
forks_joins = self.parser.parse_forks_joins(self.model, self.namespaces)
self.assertEqual(len(forks_joins), 2)
def test_parse_forks_joins_type(self):
forks_joins = self.parser.parse_forks_joins(self.model, self.namespaces)
for node in forks_joins:
self.assertEqual(node.node_type, "ForkJoin")
def test_parse_decisions_merges_count(self):
decisions_merges = self.parser.parse_decisions_merges(self.model, self.namespaces)
self.assertEqual(len(decisions_merges), 2)
def test_parse_decisions_merges_type(self):
decisions_merges = self.parser.parse_decisions_merges(self.model, self.namespaces)
for node in decisions_merges:
self.assertEqual(node.node_type, "DecisionMerge")
def test_parse_central_buffers_count_zero(self):
buffers = self.parser.parse_central_buffers(self.model, self.namespaces)
self.assertEqual(len(buffers), 0)
def test_parse_central_buffers_count(self):
model = self.parser.get_model(
"exquiro/tests/test_models/activity/ea_CentralBufferNode.xml",
self.namespaces)
buffers = self.parser.parse_central_buffers(model, self.namespaces)
self.assertEqual(len(buffers), 1)
def test_parse_central_buffers_type(self):
model = self.parser.get_model(
"exquiro/tests/test_models/activity/ea_CentralBufferNode.xml",
self.namespaces)
buffers = self.parser.parse_central_buffers(model, self.namespaces)
for buffer in buffers:
self.assertEqual(buffer.node_type, "CentralBuffer")
def test_parse_flow_relation_id(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"]', self.namespaces)
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, "ControlFlow")
self.assertEqual(flow.id, "EAID_3D2AE4C1_536A_41cc_BE4D_348AFA0DA376")
def test_parse_flow_relation_type(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"]', self.namespaces)
c_type = "ControlFlow"
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, c_type)
self.assertEqual(flow.relation_type, c_type)
def test_parse_flow_relation_target(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"]', self.namespaces)
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, "ControlFlow")
self.assertEqual(flow.target, "EAID_F2D3CAD9_E086_4cb9_851B_6BF37D8BDD34")
def test_parse_flow_relation_source(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"]', self.namespaces)
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, "ControlFlow")
self.assertEqual(flow.source, "EAID_8408A357_401F_4622_802B_0B9F7DB0E884")
def test_parse_flow_relation_guard_empty(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"]', self.namespaces)
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, "ControlFlow")
self.assertEqual(flow.guard, None)
def test_parse_flow_relation_guard_exists(self):
c_flow = self.model.find('.//edge[@xmi:type="uml:ControlFlow"][@xmi:id="EAID_5125A0AC_4912_45f9_AC92_2D335FE6B382"]', self.namespaces)
flow = self.parser.parse_flow_relation(c_flow, self.namespaces, "ControlFlow")
self.assertEqual(flow.guard, "No")
def test_parse_flow_relation_error(self):
with self.assertRaises(AttributeError):
self.parser.parse_flow_relation(None, self.namespaces, "ControlFlow")
def test_parse_control_flows_count(self):
controls = self.parser.parse_control_flows(self.model, self.namespaces)
self.assertEqual(len(controls), 12)
def test_parse_control_flows_type(self):
controls = self.parser.parse_control_flows(self.model, self.namespaces)
for control in controls:
self.assertEqual(control.relation_type, "ControlFlow")
def test_parse_object_flows_count(self):
objects = self.parser.parse_object_flows(self.model, self.namespaces)
self.assertEqual(len(objects), 2)
def test_parse_object_flows_type(self):
objects = self.parser.parse_object_flows(self.model, self.namespaces)
for o in objects:
self.assertEqual(o.relation_type, "ObjectFlow")
def test_parse_partitions_count(self):
partitions = self.parser.parse_partitions(self.model, self.namespaces)
self.assertEqual(len(partitions), 2)
def test_parse_partitions_type(self):
partitions = self.parser.parse_partitions(self.model, self.namespaces)
for partition in partitions:
self.assertEqual(partition.node_type, "Partition")
def test_parse_partition_relations_count(self):
rels = self.parser.parse_partition_relations(self.model, self.namespaces)
self.assertEqual(len(rels), 11)
def test_parse_partition_relations_type(self):
rels = self.parser.parse_partition_relations(self.model, self.namespaces)
for rel in rels:
self.assertEqual(rel.relation_type, "PartitionMember")
def test_parse_partition_relation(self):
pass
def test_parse_pins_count(self):
pins = self.parser.parse_pins(self.model, self.namespaces)
self.assertEqual(len(pins), 2)
def test_parse_pins_type(self):
pins = self.parser.parse_pins(self.model, self.namespaces)
for pin in pins:
self.assertTrue("Pin" in pin.node_type)
def test_parse_input_pins_count(self):
input_pins = self.parser.parse_input_pins(self.model, self.namespaces)
self.assertEqual(len(input_pins), 1)
def test_parse_input_pins_type(self):
input_pins = self.parser.parse_input_pins(self.model, self.namespaces)
for pin in input_pins:
self.assertEqual(pin.node_type, "InputPin")
def test_parse_output_pins_count(self):
output_pins = self.parser.parse_output_pins(self.model, self.namespaces)
self.assertEqual(len(output_pins), 1)
def test_parse_output_pins_type(self):
output_pins = self.parser.parse_output_pins(self.model, self.namespaces)
for pin in output_pins:
self.assertEqual(pin.node_type, "OutputPin")
def test_parse_data_stores_count(self):
stores = self.parser.parse_data_stores(self.model, self.namespaces)
self.assertEqual(len(stores), 1)
def test_parse_data_stores_type(self):
stores = self.parser.parse_data_stores(self.model, self.namespaces)
for store in stores:
self.assertEqual(store.node_type, "DataStore")
def test_parse_pin_relations_count(self):
rels = self.parser.parse_pin_relations(self.model, self.namespaces)
self.assertEqual(len(rels), 2)
def test_parse_pin_relations_type(self):
rels = self.parser.parse_pin_relations(self.model, self.namespaces)
for rel in rels:
self.assertEqual(rel.relation_type, "HasPin")
def test_parse_relations_unique_id(self):
relations = self.parser.parse_relations(self.model, self.namespaces)
ids = set()
for relation in relations:
ids.add(relation.id)
self.assertEqual(len(ids), len(relations))
def test_parse_nodes_unique_id(self):
nodes = self.parser.parse_nodes(self.model, self.namespaces)
ids = set()
for node in nodes:
ids.add(node.id)
self.assertEqual(len(ids), len(nodes))
def test_unique_ids(self):
relations = self.parser.parse_relations(self.model, self.namespaces)
nodes = self.parser.parse_nodes(self.model, self.namespaces)
ids = set()
for relation in relations:
ids.add(relation.id)
for node in nodes:
ids.add(node.id)
self.assertEqual(len(ids), len(relations) + len(nodes))
|
StarcoderdataPython
|
110549
|
<reponame>runzezhang/Data-Structure-and-Algorithm-Notebook
# Description
# Count how many nodes in a linked list.
# Example
# Example 1:
# Input: 1->3->5->null
# Output: 3
# Explanation:
# return the length of the list.
# Example 2:
# Input: null
# Output: 0
# Explanation:
# return the length of list.
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: the first node of linked list.
@return: An integer
"""
def countNodes(self, head):
# write your code here
counter = 0
current = head
while current != None:
counter = counter + 1
current = current.next
return counter
|
StarcoderdataPython
|
3206262
|
import dns.resolver
import json
import known_tlds
def get_a(domain, server=None):
try:
if server:
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = [server]
answers = my_resolver.resolve(domain, 'A')
else:
answers = dns.resolver.resolve(domain, 'A')
first_level_dns = []
for rdata in answers:
first_level_dns.append(str(rdata.to_text()))
return first_level_dns
except:
return []
def get_ns(domain):
first_level_dns = []
try:
answers = dns.resolver.resolve(domain, 'NS')
first_level_dns = []
for rdata in answers:
first_level_dns.append(str(rdata.to_text()))
return first_level_dns
except dns.resolver.NXDOMAIN as e:
for r in e.response(e.qnames()[0]).authority:
for rr in r:
first_level_dns.append(str(rr.mname.to_text()))
return first_level_dns
except:
return []
def get_soa(domain, server=None):
parent_domain = None
try:
if server:
my_resolver = dns.resolver.Resolver()
my_resolver.nameservers = [server]
answers = my_resolver.resolve(domain, 'SOA', raise_on_no_answer=False)
else:
answers = dns.resolver.resolve(domain, 'SOA', raise_on_no_answer=False)
for rdata in answers:
return parent_domain, rdata.mname.to_text()
for r in answers.response.authority:
parent_domain = r.name.to_text()
for rr in r:
return parent_domain, rr.mname.to_text()
except dns.resolver.NXDOMAIN as e:
for r in e.response(e.qnames()[0]).authority:
parent_domain = r.name.to_text()
for rr in r:
return parent_domain, rr.mname.to_text()
except Exception as e:
print(e)
pass
return None, None
def check_domain(domain):
results = {
'master_server': {
'name': '',
'ips': []
},
'inner_master_servers': [],
'parent_domain': known_tlds.get_root_domain(domain)
}
parent_domain, master_server = get_soa(domain)
if not master_server:
return None
master_server_ips = get_a(master_server)
results['master_server']['ips'] = master_server_ips
results['master_server']['name'] = master_server
# if master_server.lower().endswith(parent_domain.lower()):
# return json.dumps(results)
target_domain = parent_domain if parent_domain else domain
for nameserver in get_ns(target_domain):
maybe_soa = get_soa(target_domain, get_a(nameserver)[0])[1]
maybe_soa_ips = get_a(maybe_soa, get_a(nameserver)[0])
results['inner_master_servers'].append((nameserver, maybe_soa, maybe_soa_ips))
return json.dumps(results)
def lambda_handler(event, context):
try:
data = event.get('body')
if not data:
return {'statusCode': 500, 'body': '500 is for grown ups'}
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
},
'body': check_domain(data.strip())
}
except Exception as e:
return {
'statusCode': 200,
'headers': {
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': True,
},
'body': str(e)
}
if __name__ == "__main__":
test_domain = input("Domain: ").strip()
print(test_domain)
print(lambda_handler({'body': test_domain.strip()}, None))
|
StarcoderdataPython
|
3227248
|
<reponame>akutkin/SACA<gh_stars>0
import math
#from model import Model
import glob
import numpy as np
import scipy as sp
from utils import is_sorted
# FIXME: For ``average_freq=True`` got shitty results
class LnLikelihood(object):
def __init__(self, uvdata, model, average_freq=True, amp_only=False,
use_V=False, use_weights=False):
error = uvdata.error(average_freq=average_freq, use_V=use_V)
self.amp_only = amp_only
self.model = model
self.data = uvdata
stokes = model.stokes
self.stokes = stokes
self.average_freq = average_freq
if average_freq:
if stokes == 'I':
self.uvdata = 0.5 * (uvdata.uvdata_freq_averaged[:, 0] +
uvdata.uvdata_freq_averaged[:, 1])
# self.error = 0.5 * np.sqrt(error[:, 0] ** 2. +
# error[:, 1] ** 2.)
self.error = 0.5 * (error[:, 0] +
error[:, 1])
if use_weights:
self.error = uvdata.errors_from_weights_masked_freq_averaged
elif stokes == 'RR':
self.uvdata = uvdata.uvdata_freq_averaged[:, 0]
self.error = error[:, 0]
elif stokes == 'LL':
self.uvdata = uvdata.uvdata_freq_averaged[:, 1]
self.error = error[:, 1]
else:
raise Exception("Working with only I, RR or LL!")
else:
if stokes == 'I':
# (#, #IF)
self.uvdata = 0.5 * (uvdata.uvdata[..., 0] + uvdata.uvdata[..., 1])
# (#, #IF)
# self.error = 0.5 * np.sqrt(error[..., 0] ** 2. +
# error[..., 1] ** 2.)
self.error = 0.5 * (error[..., 0] +
error[..., 1])
elif stokes == 'RR':
self.uvdata = uvdata.uvdata[..., 0]
self.error = error[..., 0]
elif stokes == 'LL':
self.uvdata = uvdata.uvdata[..., 1]
self.error = error[..., 1]
else:
raise Exception("Working with only I, RR or LL!")
def __call__(self, p):
"""
Returns ln of likelihood for data and model with parameters ``p``.
:param p:
:return:
"""
# Data visibilities and noise
data = self.uvdata
error = self.error
self.model.p = p[:self.model.size]
model_data = self.model.ft(self.data.uv)
k = 1.
if self.stokes == 'I':
k = 2.
lnlik = k * (-np.log(2. * math.pi * (p[-1] + error ** 2.)) -
(data - model_data) * (data - model_data).conj() /
(2. * (p[-1] + error ** 2.)))
lnlik = lnlik.real
return np.ma.sum(lnlik)
class LnPrior(object):
def __init__(self, model):
self.model = model
def __call__(self, p):
self.model.p = p[:-1]
distances = list()
for component in self.model._components:
distances.append(np.sqrt(component.p[1] ** 2. +
component.p[2] ** 2.))
if not is_sorted(distances):
print "Components are not sorted:("
return -np.inf
lnpr = list()
for component in self.model._components:
lnpr.append(component.lnpr)
lnpr.append(sp.stats.uniform.logpdf(p[-1], 0, 2))
return sum(lnpr)
class LnPost(object):
def __init__(self, uvdata, model, average_freq=True, use_V=False,
use_weights=False):
self.lnlik = LnLikelihood(uvdata, model, average_freq=average_freq,
use_V=use_V, use_weights=use_weights)
self.lnpr = LnPrior(model)
def __call__(self, p):
lnpr = self.lnpr(p[:])
if not np.isfinite(lnpr):
return -np.inf
return self.lnlik(p[:]) + lnpr
if __name__ == '__main__':
from spydiff import import_difmap_model
from uv_data import UVData
from model import Model, Jitter
uv_fits = '/home/ilya/code/vlbi_errors/pet/0235+164_X.uvf_difmap'
uvdata = UVData(uv_fits)
# Create model
mdl = Model(stokes='RR')
comps = import_difmap_model('0235+164_X.mdl',
'/home/ilya/code/vlbi_errors/pet')
comps[0].add_prior(flux=(sp.stats.uniform.logpdf, [0., 10], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 1], dict(),),
e=(sp.stats.uniform.logpdf, [0, 1.], dict(),),
bpa=(sp.stats.uniform.logpdf, [0, np.pi], dict(),))
comps[1].add_prior(flux=(sp.stats.uniform.logpdf, [0., 3], dict(),),
bmaj=(sp.stats.uniform.logpdf, [0, 5], dict(),))
mdl.add_components(*comps)
# Create log of likelihood function
lnlik = LnLikelihood(uvdata, mdl)
lnpr = LnPrior(mdl)
lnpost = LnPost(uvdata, mdl)
p = mdl.p + [0.04]
print lnpr(p)
print lnlik(p)
print lnpost(p)
import emcee
sampler = emcee.EnsembleSampler(100, len(p), lnpost)
p0 = emcee.utils.sample_ball(p, [0.1, 0.01, 0.01, 0.01, 0.03, 0.01, 0.1, 0.01, 0.01, 0.1] + [0.001],
size=100)
pos, lnp, _ = sampler.run_mcmc(p0, 100)
print "Acceptance fraction for initial burning: ", sampler.acceptance_fraction
sampler.reset()
# Run second burning
pos, lnp, _ = sampler.run_mcmc(pos, 500)
print "Acceptance fraction for second burning: ", sampler.acceptance_fraction
sampler.reset()
pos, lnp, _ = sampler.run_mcmc(pos, 1000)
print "Acceptance fraction for production: ", sampler.acceptance_fraction
|
StarcoderdataPython
|
3303055
|
<reponame>marcelabbc07/TrabalhosPython
import sys
sys.path.append('')
from model.endereco import Endereco
from dao.endereco_dao import EnderecoDao
class EnderecoController:
dao=EnderecoDao()
def listar_todos(self):
return self.dao.listar_todos
def buscar_id(self,id):
return self.dao.buscar_id(id)
def salvar(self,endereco:Endereco):
self.dao.salvar(endereco)
def alterar(self,endereco:Endereco):
self.dao.alterar(endereco)
def deletar(self,id):
self.dao.deletar(id)
controller=EnderecoController()
e=controller.buscar_id(1)
e=controller.listar_todos()
print(e)
|
StarcoderdataPython
|
1625266
|
<gh_stars>0
import datetime, itertools
from django.views.generic import ListView
from django.shortcuts import get_object_or_404, render
from .models import ProductCategory, Vendor, Product
from .forms import OrderForm
def get_current_time_and_hour():
'''Returns a dictionary containing current_time and current_hour.'''
current_time = datetime.datetime.now()
current_hour = current_time.timetuple().tm_hour
return {'current_time': current_time, 'current_hour': current_hour}
class ProductCategoriesView(ListView):
'''A view that displays all product categories.'''
queryset = list(itertools.chain(
ProductCategory.objects.exclude(prodcat_name='Others'),
ProductCategory.objects.filter(prodcat_name='Others')
))
template_name = 'shop/product_categories.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(get_current_time_and_hour())
return context
class ProductCategoryProductsView(ListView):
'''A view that displays all products of a specific product category.'''
template_name = 'shop/product_category_products.html'
def get_queryset(self):
self.product_category = get_object_or_404(ProductCategory, id=self.kwargs['pk'])
return Product.objects.filter(prodcat=self.product_category)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['product_category'] = self.product_category
context.update(get_current_time_and_hour())
return context
class VendorsView(ListView):
'''A view that displays all vendors.'''
queryset = list(itertools.chain(
Vendor.objects.exclude(vend_name='Unknown'),
Vendor.objects.filter(vend_name='Unknown')
))
template_name = 'shop/vendors.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(get_current_time_and_hour())
return context
class VendorProductsView(ListView):
'''A view that displays all products of a specific vendor.'''
template_name = 'shop/vendor_products.html'
def get_queryset(self):
self.vendor = get_object_or_404(Vendor, id=self.kwargs['pk'])
return Product.objects.filter(vend=self.vendor)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['vendor'] = self.vendor
context.update(get_current_time_and_hour())
return context
class ProductsView(ListView):
'''A view that displays all products.'''
model = Product
template_name = 'shop/products.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(get_current_time_and_hour())
return context
def product_view(request, pk):
'''
A view that displays details of a specific product.
If an user is authenticated, he or she can order the product.
'''
product = get_object_or_404(Product, id=pk)
form, order, purchased = None, None, False
if request.user.is_authenticated:
if request.method == 'POST':
form = OrderForm(request.POST)
if form.is_valid():
order = form.save(commit=False)
order.cust = request.user
order.prod = product
order.order_totalprice = product.prod_price * form.cleaned_data['order_quantity']
order.save()
purchased = True
else:
form = OrderForm()
context = {
'product': product,
'form': form,
'order': order,
'purchased': purchased,
**get_current_time_and_hour(),
}
return render(request, 'shop/product.html', context=context)
|
StarcoderdataPython
|
4810327
|
<filename>mini/migrations/0002_auto_20210729_1316.py
# Generated by Django 3.2.5 on 2021-07-29 13:16
from django.db import migrations, models
import mini.validators
class Migration(migrations.Migration):
dependencies = [
('mini', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='music',
options={'ordering': ['id']},
),
migrations.AlterField(
model_name='music',
name='audio_file',
field=models.FileField(upload_to='musics', validators=[mini.validators.validate_audio]),
),
]
|
StarcoderdataPython
|
3268267
|
<gh_stars>10-100
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2009-2014, <NAME>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
###############################################################################
__version__ = "0.1"
# Small advice:
# given that this script is meant to be used launched by a shell, pay the
# utmost attention to the characters you'll be use, be sure that they aren't
# going to be interpreted by the shell or find a way to escape them. For bash
# it's best to use double quotes (") at the start and end of your message and
# not to use the exclamation mark. Other characters like the period, the
# semi-period, the comma and the interrogative point should be ok. For quoting
# inside the message obviously make use of the single quotes
from urllib.parse import urlencode
import urllib.request, urllib.error, urllib.parse
from base64 import b64encode
import sys
import re
from argparse import ArgumentParser
# regexp to match any whitespace character
RWhitespaces = re.compile("\s")
def msgtoolong():
"""Complain if the message is too long and exit"""
print("Are you going to post the entire Divine Comedy? Keep it short please")
exit(1)
def truncate(string, target):
"""Truncate a string to respect the twitter 140 characters limit and
preserve word boundaries"""
if len(string) < target:
# string is shorter than target
return string
elif len(string) >= target*2:
# string is equal or bigger than double the target, too much
msgtoolong()
else:
# string is bigger than target but shorter than 280 characters. It's ok
lastchar = string[140]
if RWhitespaces.match(lastchar):
# last character is a space, good, split the string there
msg1 = string[:140]
msg2 = string[141:]
return msg1, msg2
else:
# loop to catch latest whitespace to split the message there
num = target
for char in reversed(string[:140]):
num = num-1
if RWhitespaces.match(char):
# if found it, split the two messages there but only if the
# whitespace is at least at the 137th character so to have
# room for the three suspension dots; if not, search the
# second to last one instead and move the rest to the
# second message
if num >= target-3:
continue
else:
msg1 = string[:num]
msg2 = string[num:]
if len(msg2) > target:
# if the second message is also longer than target
# just quit, not going to split in 3 parts...
msgtoolong()
else:
return msg1, msg2
def twitterpost(username, password, message):
"""Just a post to twitter function with basic authentication"""
auth_header = username + ':' + password
req = urllib.request.Request('https://twitter.com/statuses/update.json')
req.add_header('Authorization', 'Basic %s' % b64encode(auth_header.encode()))
req.data = message.encode()
urllib.request.urlopen(req)
def argument_parser():
"""Argument parser"""
usage = "usage: clitwitter.py -u [username] -p [password]"
arguments = ArgumentParser(usage=usage)
arguments.add_argument("-v", "--version",
action="version",
version=__version__)
arguments.add_argument("-u", "--user",
help="the twitter username",
action="store",
type=str,
dest='username')
arguments.add_argument("-p", "--password",
help="the twitter password",
action="store",
type=str,
dest='password')
args = arguments.parse_args()
return args
def main():
"""Main loop"""
# get twitter login data
args = argument_parser()
if not args.username or not args.password:
# we need both!
print("Please insert both username and password for your twitter account")
print("See -h for more help")
exit(1)
target = 140 # twitter messages limit
# catch the arguments list and make it a string
arguments = sys.argv[5:]
str_arguments = " ".join(arguments)
if len(str_arguments) <= target:
# the message is already shorter than 140 characters? Post it then
message1 = str_arguments
twitterpost(args.username, args.password, message1)
return 0
else:
# longer than 140? Truncate it in two
msg1, msg2 = truncate(str_arguments, target)
message1 = msg1 + '...'
message2 = msg2
# post both messages then
for msg in message1, message2:
twitterpost(args.username, args.password, msg)
return 0
if __name__ == '__main__':
status = main()
sys.exit(status)
|
StarcoderdataPython
|
188401
|
<reponame>nziokaivy/instagram
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Profile, Comments, Image
from django import forms
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True)
class Meta:
model = User
fields = ("username", "email", "<PASSWORD>", "<PASSWORD>")
class CommentForm(forms.ModelForm):
class Meta:
model = Comments
fields = ('comment',)
class ImageForm(forms.ModelForm):
class Meta:
model = Image
exclude=['likes','poster']
|
StarcoderdataPython
|
3289977
|
<reponame>VirtualL/home-assistant
"""Support for Rflink Cover devices."""
import logging
import voluptuous as vol
from homeassistant.components.cover import PLATFORM_SCHEMA, CoverDevice
from homeassistant.const import CONF_NAME, STATE_OPEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import (
CONF_ALIASES, CONF_DEVICE_DEFAULTS, CONF_DEVICES, CONF_FIRE_EVENT,
CONF_GROUP, CONF_GROUP_ALIASES, CONF_NOGROUP_ALIASES,
CONF_SIGNAL_REPETITIONS, DEVICE_DEFAULTS_SCHEMA, RflinkCommand)
DEPENDENCIES = ['rflink']
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DEVICE_DEFAULTS, default=DEVICE_DEFAULTS_SCHEMA({})):
DEVICE_DEFAULTS_SCHEMA,
vol.Optional(CONF_DEVICES, default={}): vol.Schema({
cv.string: {
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_GROUP_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NOGROUP_ALIASES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_FIRE_EVENT, default=False): cv.boolean,
vol.Optional(CONF_SIGNAL_REPETITIONS): vol.Coerce(int),
vol.Optional(CONF_GROUP, default=True): cv.boolean,
},
}),
})
def devices_from_config(domain_config):
"""Parse configuration and add Rflink cover devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
device_config = dict(domain_config[CONF_DEVICE_DEFAULTS], **config)
device = RflinkCover(device_id, **device_config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the Rflink cover platform."""
async_add_entities(devices_from_config(config))
class RflinkCover(RflinkCommand, CoverDevice, RestoreEntity):
"""Rflink entity which can switch on/stop/off (eg: cover)."""
async def async_added_to_hass(self):
"""Restore RFLink cover state (OPEN/CLOSE)."""
await super().async_added_to_hass()
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_OPEN
def _handle_event(self, event):
"""Adjust state if Rflink picks up a remote command for this device."""
self.cancel_queued_send_commands()
command = event['command']
if command in ['on', 'allon', 'up']:
self._state = True
elif command in ['off', 'alloff', 'down']:
self._state = False
@property
def should_poll(self):
"""No polling available in RFlink cover."""
return False
@property
def is_closed(self):
"""Return if the cover is closed."""
return not self._state
@property
def assumed_state(self):
"""Return True because covers can be stopped midway."""
return True
def async_close_cover(self, **kwargs):
"""Turn the device close."""
return self._async_handle_command("close_cover")
def async_open_cover(self, **kwargs):
"""Turn the device open."""
return self._async_handle_command("open_cover")
def async_stop_cover(self, **kwargs):
"""Turn the device stop."""
return self._async_handle_command("stop_cover")
|
StarcoderdataPython
|
188714
|
# Copyright (c) 2020 Graphcore Ltd. All rights reserved
import datetime
import sys
from popgen import registry, transform
# emit_handlers(namespace, aten, handlers, f=sys.stdout)
#
# Emits the C++ handlers for one operator.
# Parameters:
# namespace - namespace the operator is in
# aten - name of the operator
# handlers - list of handlers. must differ in arity.
# f - output stream
def emit_handlers(namespace, aten, handlers, f=sys.stdout):
values = dict()
opname = get_op_name(aten)
emit_arity_check = len(handlers) > 1
decl = "torch::jit::Node *" + opname + "Handler(" + \
"torch::jit::Graph *graph, " + "torch::jit::Node *node) {"
if len(decl) <= 80:
f.write(decl + "\n")
else:
decl = "torch::jit::Node *" + opname + "Handler("
f.write(decl + "torch::jit::Graph *graph,\n")
f.write(" " * len(decl))
f.write("torch::jit::Node *node) {\n")
arities = set()
for handler in handlers:
assert handler.graph_arity not in arities, \
aten + " has multiple handlers with the same arity"
arities.add(handler.graph_arity)
values.clear()
handler = transform.generate_complex_ops(handler)
handler = transform.value_numbering(handler)
handler = transform.generate_typed_constants(handler)
handler.annotate("// " + handler.render())
if emit_arity_check:
f.write(" if (node->inputs().size() == " +
str(handler.graph_arity) + ") {\n")
handler.emit(values, 0, " ", f, True)
f.write(" }\n")
else:
handler.emit(values, 0, " ", f, True)
if emit_arity_check:
arity_list = sorted(list(arities))
expect_str = "Expecting " + str(arity_list[0])
for i in range(1, len(arity_list) - 1):
expect_str += ', ' + str(arity_list[i])
if len(arity_list) > 1:
expect_str += ' or ' + str(arity_list[-1])
if len(arity_list) > 1 or arity_list[0] > 1:
expect_str += " operands, "
else:
expect_str += " operand, "
f.write('\n std::stringstream errmsg;\n')
f.write(' errmsg << "Incorrect number of arguments for operator ";\n')
f.write(' errmsg << "' + namespace + '::' + aten + '. ";\n')
f.write(' errmsg << "' + expect_str + '";\n')
f.write(
' errmsg << "got " << node->inputs().size() << " operand(s).";\n')
f.write(" ERROR(&errmsg);\n")
f.write(" return nullptr;\n")
f.write("}\n\n")
# generate(script, namespace, filename, global_symbols)
#
# Generate a file containg C++ implementation of handlers
# Parameters:
# script - name of top-level script
# namespace - the namespace the operators are in
# filename - the output fil
# global_symbols - dictionary of globals from top-level
def generate(script, namespace, filename, global_symbols):
f = open(filename, 'w')
now = datetime.datetime.now()
f.write('// DO NOT EDIT! Generated by ' + script + '\n')
f.write('// Copyright (c) ' + str(now.year) +
' Graphcore Ltd. All rights reserved.\n\n')
f.write('#include "../PoptorchStaticInit.hpp"\n')
f.write('#include "../PoptorchSymbols.hpp"\n')
f.write('#include "PopartCanonicalizationUtils.hpp"\n')
f.write('#include "poptorch/OpBuilder.hpp"\n')
f.write('#include "poptorch/Utils.hpp"\n')
f.write('#include "poptorch_logging/Error.hpp"\n')
f.write('#include "poptorch_logging/Logging.hpp"\n')
f.write("\nnamespace poptorch {\n")
f.write("\nnamespace {\n\n")
registry.add_implicit_handlers(global_symbols)
for (aten, handler) in sorted(registry.handlers.items()):
emit_handlers(namespace, aten, handler, f)
f.write("} // namespace\n")
f.write("\n__attribute__((constructor(HANDLER_INIT_PRIORITY))) ")
f.write("static void registration() {\n")
for (source, _) in registry.forwardings.items():
transform.validate_forwarding(source)
to_register = sorted(
list(registry.handlers.keys()) + list(registry.forwardings.keys()))
for aten in to_register:
opname = get_op_name(registry.forwardings.get(aten, aten))
reg_handler_line = (" registerHandler(" + namespace + "::" + aten +
", " + opname + "Handler);\n")
if len(reg_handler_line) > 81:
reg_handler_line = reg_handler_line.replace(
", ", ",\n ")
f.write(reg_handler_line)
f.write("}\n\n")
f.write("} // namespace poptorch\n")
f.close()
registry.clear()
# get_op_name(aten)
#
# Returns the name of the C++ handler function for an operator
# Parameters:
# aten - the name of the operator
def get_op_name(aten):
opname = aten.split(':')[-1]
return opname
|
StarcoderdataPython
|
123294
|
#!/usr/bin/python3.7
from std_msgs.msg import String
import opensim as osim
from basic_example.srv import *
import rospy
import sys
import os
# ----------------------------------------------------------------------
# Load the musculoskeletal model from a file.
# ----------------------------------------------------------------------
path = os.path.dirname(os.path.abspath(__file__))
model = osim.Model(path + "/../model/UPAT_Eye_Model_Passive_Pulleys_v2.osim")
# ----------------------------------------------------------------------
# Add a controller to the model's muscles.
# ----------------------------------------------------------------------
actuator_set = model.getActuators()
lateral_rectus = actuator_set.get("r_Lateral_Rectus")
medial_rectus = actuator_set.get("r_Medial_Rectus")
brain = osim.PrescribedController()
brain.addActuator(lateral_rectus)
brain.addActuator(medial_rectus)
brain.prescribeControlForActuator("r_Lateral_Rectus", osim.Constant(0.0))
brain.prescribeControlForActuator("r_Medial_Rectus", osim.Constant(0.0))
model.addController(brain)
# ----------------------------------------------------------------------
# Add a console reporter to print the following values:
# Position and speed of the adduction-abduction rotational Degree of
# Freedom (y-axis).
# Current fiber force applied to the Lateral Rectus and the
# Medial Rectus tendons.
# ----------------------------------------------------------------------
coordinate_set = model.getCoordinateSet()
eye_add_abd = coordinate_set.get("r_eye_add_abd")
reporter = osim.ConsoleReporter()
reporter.set_report_time_interval(0.002)
reporter.addToReport(eye_add_abd.getOutput("value"), "position")
reporter.addToReport(eye_add_abd.getOutput("speed"), "speed")
reporter.addToReport(lateral_rectus.getOutput("fiber_force"), "lateral_force")
reporter.addToReport(medial_rectus.getOutput("fiber_force"), "medial_force")
model.addComponent(reporter)
# --------------------------------------------------------------------------
# Configure the simulation of the model
# --------------------------------------------------------------------------
state = model.initSystem()
model.equilibrateMuscles(state)
manager = osim.Manager(model)
state.setTime(0)
manager.initialize(state)
# --------------------------------------------------------------------------
# Get the control signals of the Lateral Rectus an the Medial Rectus
# --------------------------------------------------------------------------
def getControlSignal(current_pos, time):
rospy.wait_for_service("get_control_signal")
try:
get_control_signal = rospy.ServiceProxy("get_control_signal", GetControlSignal)
response = get_control_signal(current_pos, time)
return response
except rospy.ServiceException as e:
print("Service call failed: %s"%e)
if __name__ == '__main__':
time = 0.0 # in seconds
sim_time = 0.002 # in seconds
while time < 5.0:
current_pos = eye_add_abd.getValue(state)
res = getControlSignal(current_pos, time)
brain.prescribeControlForActuator("r_Lateral_Rectus", osim.Constant(res.lateral_control))
brain.prescribeControlForActuator("r_Medial_Rectus", osim.Constant(res.medial_control))
time += sim_time
state = manager.integrate(time)
|
StarcoderdataPython
|
3309494
|
import abc
class PayloadFormatter(metaclass=abc.ABCMeta):
@staticmethod
@abc.abstractmethod
def generate(instance):
pass
|
StarcoderdataPython
|
3281081
|
<reponame>isabella232/onefuzz<gh_stars>1-10
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from enum import Enum
from typing import List
class OS(Enum):
windows = "windows"
linux = "linux"
class DashboardEvent(Enum):
heartbeat = "heartbeat"
new_file = "new_file"
repro_state = "repro_state"
task_state = "task_state"
job_state = "job_state"
proxy_state = "proxy_state"
pool_state = "pool_state"
node_state = "node_state"
scaleset_state = "scaleset_state"
class TelemetryEvent(Enum):
task = "task"
state_changed = "state_changed"
@classmethod
def can_share(cls) -> List["TelemetryEvent"]:
""" only these events will be shared to the central telemetry """
return [cls.task, cls.state_changed]
class TelemetryData(Enum):
component_type = "component_type"
current_state = "current_state"
job_id = "job_id"
task_id = "task_id"
task_type = "task_type"
vm_id = "vm_id"
@classmethod
def can_share(cls) -> List["TelemetryData"]:
""" only these types of data will be shared to the central telemetry """
return [cls.current_state, cls.vm_id, cls.job_id, cls.task_id, cls.task_type]
class TaskFeature(Enum):
input_queue_from_container = "input_queue_from_container"
supervisor_exe = "supervisor_exe"
supervisor_env = "supervisor_env"
supervisor_options = "supervisor_options"
supervisor_input_marker = "supervisor_input_marker"
stats_file = "stats_file"
stats_format = "stats_format"
target_exe = "target_exe"
target_exe_optional = "target_exe_optional"
target_env = "target_env"
target_options = "target_options"
analyzer_exe = "analyzer_exe"
analyzer_env = "analyzer_env"
analyzer_options = "analyzer_options"
rename_output = "rename_output"
target_options_merge = "target_options_merge"
target_workers = "target_workers"
generator_exe = "generator_exe"
generator_env = "generator_env"
generator_options = "generator_options"
wait_for_files = "wait_for_files"
target_timeout = "target_timeout"
check_asan_log = "check_asan_log"
check_debugger = "check_debugger"
check_retry_count = "check_retry_count"
ensemble_sync_delay = "ensemble_sync_delay"
preserve_existing_outputs = "preserve_existing_outputs"
check_fuzzer_help = "check_fuzzer_help"
expect_crash_on_failure = "expect_crash_on_failure"
# Permissions for an Azure Blob Storage Container.
#
# See: https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas#permissions-for-a-container # noqa: E501
class ContainerPermission(Enum):
Read = "Read"
Write = "Write"
List = "List"
Delete = "Delete"
class JobState(Enum):
init = "init"
enabled = "enabled"
stopping = "stopping"
stopped = "stopped"
@classmethod
def available(cls) -> List["JobState"]:
""" set of states that indicate if tasks can be added to it """
return [x for x in cls if x not in [cls.stopping, cls.stopped]]
@classmethod
def needs_work(cls) -> List["JobState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.stopping]
@classmethod
def shutting_down(cls) -> List["JobState"]:
return [cls.stopping, cls.stopped]
class TaskState(Enum):
init = "init"
waiting = "waiting"
scheduled = "scheduled"
setting_up = "setting_up"
running = "running"
stopping = "stopping"
stopped = "stopped"
wait_job = "wait_job"
@classmethod
def has_started(cls) -> List["TaskState"]:
return [cls.running, cls.stopping, cls.stopped]
@classmethod
def needs_work(cls) -> List["TaskState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.stopping]
@classmethod
def available(cls) -> List["TaskState"]:
""" set of states that indicate if the task isn't stopping """
return [x for x in cls if x not in [TaskState.stopping, TaskState.stopped]]
@classmethod
def shutting_down(cls) -> List["TaskState"]:
return [TaskState.stopping, TaskState.stopped]
class TaskType(Enum):
libfuzzer_fuzz = "libfuzzer_fuzz"
libfuzzer_coverage = "libfuzzer_coverage"
libfuzzer_crash_report = "libfuzzer_crash_report"
libfuzzer_merge = "libfuzzer_merge"
generic_analysis = "generic_analysis"
generic_supervisor = "generic_supervisor"
generic_merge = "generic_merge"
generic_generator = "generic_generator"
generic_crash_report = "generic_crash_report"
class VmState(Enum):
init = "init"
extensions_launch = "extensions_launch"
extensions_failed = "extensions_failed"
vm_allocation_failed = "vm_allocation_failed"
running = "running"
stopping = "stopping"
stopped = "stopped"
@classmethod
def needs_work(cls) -> List["VmState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.extensions_launch, cls.stopping]
@classmethod
def available(cls) -> List["VmState"]:
""" set of states that indicate if the repro vm isn't stopping """
return [x for x in cls if x not in [cls.stopping, cls.stopped]]
class UpdateType(Enum):
Task = "Task"
Job = "Job"
Repro = "Repro"
Proxy = "Proxy"
Pool = "Pool"
Node = "Node"
Scaleset = "Scaleset"
TaskScheduler = "TaskScheduler"
class Compare(Enum):
Equal = "Equal"
AtLeast = "AtLeast"
AtMost = "AtMost"
class ContainerType(Enum):
analysis = "analysis"
coverage = "coverage"
crashes = "crashes"
inputs = "inputs"
no_repro = "no_repro"
readonly_inputs = "readonly_inputs"
reports = "reports"
setup = "setup"
tools = "tools"
unique_inputs = "unique_inputs"
unique_reports = "unique_reports"
@classmethod
def reset_defaults(cls) -> List["ContainerType"]:
return [
cls.analysis,
cls.coverage,
cls.crashes,
cls.inputs,
cls.no_repro,
cls.readonly_inputs,
cls.reports,
cls.setup,
cls.unique_reports,
cls.unique_inputs,
]
@classmethod
def user_config(cls) -> List["ContainerType"]:
return [cls.setup, cls.inputs, cls.readonly_inputs]
class StatsFormat(Enum):
AFL = "AFL"
class ErrorCode(Enum):
INVALID_REQUEST = 450
INVALID_PERMISSION = 451
MISSING_EULA_AGREEMENT = 452
INVALID_JOB = 453
INVALID_TASK = 453
UNABLE_TO_ADD_TASK_TO_JOB = 454
INVALID_CONTAINER = 455
UNABLE_TO_RESIZE = 456
UNAUTHORIZED = 457
UNABLE_TO_USE_STOPPED_JOB = 458
UNABLE_TO_CHANGE_JOB_DURATION = 459
UNABLE_TO_CREATE_NETWORK = 460
VM_CREATE_FAILED = 461
MISSING_NOTIFICATION = 462
INVALID_IMAGE = 463
UNABLE_TO_CREATE = 464
UNABLE_TO_PORT_FORWARD = 465
UNABLE_TO_FIND = 467
TASK_FAILED = 468
INVALID_NODE = 469
NOTIFICATION_FAILURE = 470
UNABLE_TO_UPDATE = 471
PROXY_FAILED = 472
class HeartbeatType(Enum):
MachineAlive = "MachineAlive"
TaskAlive = "TaskAlive"
class PoolType(Enum):
managed = "managed"
unmanaged = "unmanaged"
class PoolState(Enum):
init = "init"
running = "running"
shutdown = "shutdown"
halt = "halt"
@classmethod
def needs_work(cls) -> List["PoolState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.shutdown, cls.halt]
@classmethod
def available(cls) -> List["PoolState"]:
""" set of states that indicate if it's available for work """
return [cls.running]
class ScalesetState(Enum):
init = "init"
setup = "setup"
resize = "resize"
running = "running"
shutdown = "shutdown"
halt = "halt"
creation_failed = "creation_failed"
@classmethod
def needs_work(cls) -> List["ScalesetState"]:
"""
set of states that indicate work is needed during eventing
"""
return [cls.init, cls.setup, cls.resize, cls.shutdown, cls.halt]
@classmethod
def available(cls) -> List["ScalesetState"]:
""" set of states that indicate if it's available for work """
unavailable = [cls.shutdown, cls.halt, cls.creation_failed]
return [x for x in cls if x not in unavailable]
@classmethod
def modifying(cls) -> List["ScalesetState"]:
""" set of states that indicate scaleset is resizing """
return [
cls.halt,
cls.init,
cls.setup,
]
class Architecture(Enum):
x86_64 = "x86_64"
class NodeTaskState(Enum):
init = "init"
setting_up = "setting_up"
running = "running"
class AgentMode(Enum):
fuzz = "fuzz"
repro = "repro"
proxy = "proxy"
class NodeState(Enum):
init = "init"
free = "free"
setting_up = "setting_up"
rebooting = "rebooting"
ready = "ready"
busy = "busy"
done = "done"
shutdown = "shutdown"
halt = "halt"
@classmethod
def needs_work(cls) -> List["NodeState"]:
return [cls.done, cls.shutdown, cls.halt]
@classmethod
def ready_for_reset(cls) -> List["NodeState"]:
# If Node is in one of these states, ignore updates
# from the agent.
return [cls.done, cls.shutdown, cls.halt]
class GithubIssueState(Enum):
open = "open"
closed = "closed"
class GithubIssueSearchMatch(Enum):
title = "title"
body = "body"
class TaskDebugFlag(Enum):
keep_node_on_failure = "keep_node_on_failure"
keep_node_on_completion = "keep_node_on_completion"
class WebhookMessageState(Enum):
queued = "queued"
retrying = "retrying"
succeeded = "succeeded"
failed = "failed"
class UserFieldOperation(Enum):
add = "add"
replace = "replace"
class UserFieldType(Enum):
Bool = "Bool"
Int = "Int"
Str = "Str"
DictStr = "DictStr"
ListStr = "ListStr"
|
StarcoderdataPython
|
4825644
|
<filename>rpython/translator/platform/openbsd.py
"""Support for OpenBSD."""
import os
from rpython.translator.platform.bsd import BSD
class OpenBSD(BSD):
DEFAULT_CC = "cc"
name = "openbsd"
link_flags = os.environ.get("LDFLAGS", "").split() + ['-pthread']
cflags = ['-O3', '-pthread', '-fomit-frame-pointer', '-D_BSD_SOURCE'
] + os.environ.get("CFLAGS", "").split()
def _libs(self, libraries):
libraries=set(libraries + ("intl", "iconv"))
return ['-l%s' % lib for lib in libraries if lib not in ["crypt", "dl", "rt"]]
class OpenBSD_64(OpenBSD):
shared_only = ('-fPIC',)
|
StarcoderdataPython
|
92026
|
import json
import os
import re
import subprocess
from functools import cached_property
import requests
import yaml
# Changelog types
PULL_REQUEST = 'pull_request'
COMMIT = 'commit_message'
class ChangelogCIBase:
"""Base Class for Changelog CI"""
github_api_url = 'https://api.github.com'
def __init__(
self,
repository,
event_path,
config,
pull_request_branch,
filename='CHANGELOG.md',
token=None
):
self.repository = repository
self.filename = filename
self.config = config
self.pull_request_branch = pull_request_branch
self.token = token
title, number = self._get_pull_request_title_and_number(event_path)
self.pull_request_title = title
self.pull_request_number = number
@staticmethod
def _get_pull_request_title_and_number(event_path):
"""Gets pull request title from `GITHUB_EVENT_PATH`"""
with open(event_path, 'r') as json_file:
# This is just a webhook payload available to the Action
data = json.load(json_file)
title = data["pull_request"]['title']
number = data['number']
return title, number
@cached_property
def _get_request_headers(self):
"""Get headers for GitHub API request"""
headers = {
'Accept': 'application/vnd.github.v3+json'
}
# if the user adds `GITHUB_TOKEN` add it to API Request
# required for `private` repositories
if self.token:
headers.update({
'authorization': 'Bearer {token}'.format(token=self.token)
})
return headers
def get_changes_after_last_release(self):
return NotImplemented
def parse_changelog(self, version, changes):
return NotImplemented
def _validate_pull_request(self):
"""Check if changelog should be generated for this pull request"""
pattern = re.compile(self.config.pull_request_title_regex)
match = pattern.search(self.pull_request_title)
if match:
return True
return
def _get_version_number(self):
"""Get version number from the pull request title"""
pattern = re.compile(self.config.version_regex)
match = pattern.search(self.pull_request_title)
if match:
return match.group()
return
def _get_file_mode(self):
"""Gets the mode that the changelog file should be opened in"""
if os.path.exists(self.filename):
# if the changelog file exists
# opens it in read-write mode
file_mode = 'r+'
else:
# if the changelog file does not exists
# opens it in read-write mode
# but creates the file first also
file_mode = 'w+'
return file_mode
def _get_latest_release_date(self):
"""Using GitHub API gets latest release date"""
url = (
'{base_url}/repos/{repo_name}/releases/latest'
).format(
base_url=self.github_api_url,
repo_name=self.repository
)
response = requests.get(url, headers=self._get_request_headers)
published_date = ''
if response.status_code == 200:
response_data = response.json()
# get the published date of the latest release
published_date = response_data['published_at']
else:
# if there is no previous release API will return 404 Not Found
msg = (
f'Could not find any previous release for '
f'{self.repository}, status code: {response.status_code}'
)
print_message(msg, message_type='warning')
return published_date
def _commit_changelog(self, string_data):
"""Write changelog to the changelog file"""
file_mode = self._get_file_mode()
with open(self.filename, file_mode) as f:
# read the existing data and store it in a variable
body = f.read()
# write at the top of the file
f.seek(0, 0)
f.write(string_data)
if body:
# re-write the existing data
f.write('\n\n')
f.write(body)
subprocess.run(['git', 'add', self.filename])
subprocess.run(
['git', 'commit', '-m', '(Changelog CI) Added Changelog']
)
subprocess.run(
['git', 'push', '-u', 'origin', self.pull_request_branch]
)
def _comment_changelog(self, string_data):
"""Comments Changelog to the pull request"""
if not self.token:
# Token is required by the GitHub API to create a Comment
# if not provided exit with error message
msg = (
"Could not add a comment. "
"`GITHUB_TOKEN` is required for this operation. "
"If you want to enable Changelog comment, please add "
"`GITHUB_TOKEN` to your workflow yaml file. "
"Look at Changelog CI's documentation for more information."
)
print_message(msg, message_type='error')
return
owner, repo = self.repository.split('/')
payload = {
'owner': owner,
'repo': repo,
'issue_number': self.pull_request_number,
'body': string_data
}
url = (
'{base_url}/repos/{repo}/issues/{number}/comments'
).format(
base_url=self.github_api_url,
repo=self.repository,
number=self.pull_request_number
)
response = requests.post(
url, headers=self._get_request_headers, json=payload
)
if response.status_code != 201:
# API should return 201, otherwise show error message
msg = (
f'Error while trying to create a comment. '
f'GitHub API returned error response for '
f'{self.repository}, status code: {response.status_code}'
)
print_message(msg, message_type='error')
def run(self):
"""Entrypoint to the Changelog CI"""
if (
not self.config.commit_changelog and
not self.config.comment_changelog
):
# if both commit_changelog and comment_changelog is set to false
# then exit with warning and don't generate Changelog
msg = (
'Skipping Changelog generation as both `commit_changelog` '
'and `comment_changelog` is set to False. '
'If you did not intend to do this please set '
'one or both of them to True.'
)
print_message(msg, message_type='error')
return
is_valid_pull_request = self._validate_pull_request()
if not is_valid_pull_request:
# if pull request regex doesn't match then exit
# and don't generate changelog
msg = (
f'The title of the pull request did not match. '
f'Regex tried: "{self.config.pull_request_title_regex}", '
f'Aborting Changelog Generation.'
)
print_message(msg, message_type='error')
return
version = self._get_version_number()
if not version:
# if the pull request title is not valid, exit the method
# It might happen if the pull request is not meant to be release
# or the title was not accurate.
msg = (
f'Could not find matching version number. '
f'Regex tried: {self.config.version_regex} '
f'Aborting Changelog Generation'
)
print_message(msg, message_type='error')
return
changes = self.get_changes_after_last_release()
# exit the method if there is no changes found
if not changes:
return
string_data = self.parse_changelog(version, changes)
if self.config.commit_changelog:
print_message('Commit Changelog', message_type='group')
self._commit_changelog(string_data)
print_message('', message_type='endgroup')
# Not needed in our Case
#if self.config.comment_changelog:
#print_message('Comment Changelog', message_type='group')
#self._comment_changelog(string_data)
#print_message('', message_type='endgroup')
class ChangelogCIPullRequest(ChangelogCIBase):
"""The class that generates, commits and/or comments changelog using pull requests"""
github_api_url = 'https://api.github.com'
@staticmethod
def _get_changelog_line(item):
"""Generate each line of changelog"""
return "* [#{number}]({url}): {title}\n".format(
number=item['number'],
url=item['url'],
title=item['title']
)
def get_changes_after_last_release(self):
"""Get all the merged pull request after latest release"""
previous_release_date = self._get_latest_release_date()
if previous_release_date:
merged_date_filter = 'merged:>=' + previous_release_date
else:
# if there is no release for the repo then
# do not filter by merged date
merged_date_filter = ''
url = (
'{base_url}/search/issues'
'?q=repo:{repo_name}+'
'is:pr+'
'is:merged+'
'sort:author-date-asc+'
'{merged_date_filter}'
'&sort=merged'
).format(
base_url=self.github_api_url,
repo_name=self.repository,
merged_date_filter=merged_date_filter
)
items = []
response = requests.get(url, headers=self._get_request_headers)
if response.status_code == 200:
response_data = response.json()
# `total_count` represents the number of
# pull requests returned by the API call
if response_data['total_count'] > 0:
for item in response_data['items']:
data = {
'title': item['title'],
'number': item['number'],
'url': item['html_url'],
'labels': [label['name'] for label in item['labels']]
}
items.append(data)
else:
msg = (
f'There was no pull request '
f'made on {self.repository} after last release.'
)
print_message(msg, message_type='error')
else:
msg = (
f'Could not get pull requests for '
f'{self.repository} from GitHub API. '
f'response status code: {response.status_code}'
)
print_message(msg, message_type='error')
return items
def parse_changelog(self, version, changes):
"""Parse the pull requests data and return a string"""
string_data = (
'# ' + self.config.header_prefix + ' ' + version + '\n\n'
)
group_config = self.config.group_config
if group_config:
for config in group_config:
if len(changes) == 0:
break
items_string = ''
for pull_request in changes:
# check if the pull request label matches with
# any label of the config
if (
any(
label in pull_request['labels']
for label in config['labels']
)
):
items_string += self._get_changelog_line(pull_request)
# remove the item so that one item
# does not match multiple groups
changes.remove(pull_request)
if items_string:
string_data += '\n#### ' + config['title'] + '\n\n'
string_data += '\n' + items_string
else:
# If group config does not exist then append it without and groups
string_data += ''.join(
map(self._get_changelog_line, changes)
)
return string_data
class ChangelogCICommitMessage(ChangelogCIBase):
"""The class that generates, commits and/or comments changelog using commit messages"""
@staticmethod
def _get_changelog_line(item):
"""Generate each line of changelog"""
return "* [{sha}]({url}): {message}\n".format(
sha=item['sha'][:6],
url=item['url'],
message=item['message']
)
def get_changes_after_last_release(self):
"""Get all the merged pull request after latest release"""
previous_release_date = self._get_latest_release_date()
url = '{base_url}/repos/{repo_name}/commits?since={date}'.format(
base_url=self.github_api_url,
repo_name=self.repository,
date=previous_release_date or ''
)
items = []
response = requests.get(url, headers=self._get_request_headers)
if response.status_code == 200:
response_data = response.json()
if len(response_data) > 0:
for item in response_data:
message = item['commit']['message']
# Exclude merge commit
if not (
message.startswith('Merge pull request #') or
message.startswith('Merge branch')
):
data = {
'sha': item['sha'],
'message': message,
'url': item['html_url']
}
items.append(data)
else:
print_message(f'Skipping Merge Commit "{message}"')
else:
msg = (
f'There was no commit '
f'made on {self.repository} after last release.'
)
print_message(msg, message_type='error')
else:
msg = (
f'Could not get commits for '
f'{self.repository} from GitHub API. '
f'response status code: {response.status_code}'
)
print_message(msg, message_type='error')
return items
def parse_changelog(self, version, changes):
"""Parse the commit data and return a string"""
string_data = (
'# ' + self.config.header_prefix + ' ' + version + '\n\n'
)
string_data += ''.join(map(self._get_changelog_line, changes))
return string_data
class ChangelogCIConfiguration:
"""Configuration class for Changelog CI"""
# The regular expression used to extract semantic versioning is a
# slightly less restrictive modification of the following regular expression
# https://semver.org/#is-there-a-suggested-regular-expression-regex-to-check-a-semver-string
DEFAULT_SEMVER_REGEX = (
r"v?(0|[1-9]\d*)\.(0|[1-9]\d*)\.?(0|[1-9]\d*)?(?:-(("
r"?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|["
r"1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+(["
r"0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?"
)
DEFAULT_PULL_REQUEST_TITLE_REGEX = r"^(?i:release)"
DEFAULT_VERSION_PREFIX = "Version:"
DEFAULT_GROUP_CONFIG = []
COMMIT_CHANGELOG = True
COMMENT_CHANGELOG = False
def __init__(self, config_file):
# Initialize with default configuration
self.header_prefix = self.DEFAULT_VERSION_PREFIX
self.commit_changelog = self.COMMIT_CHANGELOG
self.comment_changelog = self.COMMENT_CHANGELOG
self.pull_request_title_regex = self.DEFAULT_PULL_REQUEST_TITLE_REGEX
self.version_regex = self.DEFAULT_SEMVER_REGEX
self.changelog_type = PULL_REQUEST
self.group_config = self.DEFAULT_GROUP_CONFIG
self.user_raw_config = self.get_user_config(config_file)
self.validate_configuration()
@staticmethod
def get_user_config(config_file):
"""Read user provided configuration file and return user configuration"""
if not config_file:
print_message(
'No Configuration file found, '
'falling back to default configuration to parse changelog',
message_type='warning'
)
return
try:
# parse config files with the extension .yml and .yaml
# using YAML syntax
if config_file.endswith('yml') or config_file.endswith('yaml'):
loader = yaml.safe_load
# parse config files with the extension .json
# using JSON syntax
elif config_file.endswith('json'):
loader = json.load
else:
print_message(
'We only support `JSON` or `YAML` file for configuration '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
with open(config_file, 'r') as file:
config = loader(file)
return config
except Exception as e:
msg = (
f'Invalid Configuration file, error: {e}, '
'falling back to default configuration to parse changelog'
)
print_message(msg, message_type='error')
return
def validate_configuration(self):
"""Validate all the configuration options and update configuration attributes"""
if not self.user_raw_config:
return
if not isinstance(self.user_raw_config, dict):
print_message(
'Configuration does not contain required mapping '
'falling back to default configuration to parse changelog',
message_type='error'
)
return
self.validate_header_prefix()
self.validate_commit_changelog()
self.validate_comment_changelog()
self.validate_pull_request_title_regex()
self.validate_version_regex()
self.validate_changelog_type()
self.validate_group_config()
def validate_header_prefix(self):
"""Validate and set header_prefix configuration option"""
header_prefix = self.user_raw_config.get('header_prefix')
if not header_prefix or not isinstance(header_prefix, str):
msg = (
'`header_prefix` was not provided or not valid, '
f'falling back to `{self.header_prefix}`.'
)
print_message(msg, message_type='warning')
else:
self.header_prefix = header_prefix
def validate_commit_changelog(self):
"""Validate and set commit_changelog configuration option"""
commit_changelog = self.user_raw_config.get('commit_changelog')
if commit_changelog not in [0, 1, False, True]:
msg = (
'`commit_changelog` was not provided or not valid, '
f'falling back to `{self.commit_changelog}`.'
)
print_message(msg, message_type='warning')
else:
self.commit_changelog = bool(commit_changelog)
def validate_comment_changelog(self):
"""Validate and set comment_changelog configuration option"""
comment_changelog = self.user_raw_config.get('comment_changelog')
if comment_changelog not in [0, 1, False, True]:
msg = (
'`comment_changelog` was not provided or not valid, '
f'falling back to `{self.comment_changelog}`.'
)
print_message(msg, message_type='warning')
else:
self.comment_changelog = bool(comment_changelog)
def validate_pull_request_title_regex(self):
"""Validate and set pull_request_title_regex configuration option"""
pull_request_title_regex = self.user_raw_config.get('pull_request_title_regex')
if not pull_request_title_regex:
msg = (
'`pull_request_title_regex` is not provided, '
f'Falling back to {self.pull_request_title_regex}.'
)
print_message(msg, message_type='warning')
return
try:
# This will raise an error if the provided regex is not valid
re.compile(pull_request_title_regex)
self.pull_request_title_regex = pull_request_title_regex
except Exception:
msg = (
'`pull_request_title_regex` is not valid, '
f'Falling back to {self.pull_request_title_regex}.'
)
print_message(msg, message_type='error')
def validate_version_regex(self):
"""Validate and set validate_version_regex configuration option"""
version_regex = self.user_raw_config.get('version_regex')
if not version_regex:
msg = (
'`version_regex` is not provided, '
f'Falling back to {self.version_regex}.'
)
print_message(msg, message_type='warning')
return
try:
# This will raise an error if the provided regex is not valid
re.compile(version_regex)
self.version_regex = version_regex
except Exception:
msg = (
'`version_regex` is not valid, '
f'Falling back to {self.version_regex}.'
)
print_message(msg, message_type='warning')
def validate_changelog_type(self):
"""Validate and set changelog_type configuration option"""
changelog_type = self.user_raw_config.get('changelog_type')
if not (
changelog_type and
isinstance(changelog_type, str) and
changelog_type in [PULL_REQUEST, COMMIT]
):
msg = (
'`changelog_type` was not provided or not valid, '
f'the options are "{PULL_REQUEST}" or "{COMMIT}", '
f'falling back to default value of "{self.changelog_type}".'
)
print_message(msg, message_type='warning')
else:
self.changelog_type = changelog_type
def validate_group_config(self):
"""Validate and set group_config configuration option"""
group_config = self.user_raw_config.get('group_config')
if not group_config:
msg = '`group_config` was not provided'
print_message(msg, message_type='warning')
return
if not isinstance(group_config, list):
msg = '`group_config` is not valid, It must be an Array/List.'
print_message(msg, message_type='error')
return
for item in group_config:
self.validate_group_config_item(item)
def validate_group_config_item(self, item):
"""Validate and set group_config item configuration option"""
if not isinstance(item, dict):
msg = (
'`group_config` items must have key, '
'value pairs of `title` and `labels`'
)
print_message(msg, message_type='error')
return
title = item.get('title')
labels = item.get('labels')
if not title or not isinstance(title, str):
msg = (
'`group_config` item must contain string title, '
f'but got `{title}`'
)
print_message(msg, message_type='error')
return
if not labels or not isinstance(labels, list):
msg = (
'`group_config` item must contain array of labels, '
f'but got `{labels}`'
)
print_message(msg, message_type='error')
return
if not all(isinstance(label, str) for label in labels):
msg = (
'`group_config` labels array must be string type, '
f'but got `{labels}`'
)
print_message(msg, message_type='error')
return
self.group_config.append(item)
def print_message(message, message_type=None):
"""Helper function to print colorful outputs in GitHub Actions shell"""
# docs: https://docs.github.com/en/actions/reference/workflow-commands-for-github-actions
if not message_type:
return subprocess.run(['echo', f'{message}'])
if message_type == 'endgroup':
return subprocess.run(['echo', '::endgroup::'])
return subprocess.run(['echo', f'::{message_type}::{message}'])
CI_CLASSES = {
PULL_REQUEST: ChangelogCIPullRequest,
COMMIT: ChangelogCICommitMessage
}
if __name__ == '__main__':
# Default environment variable from GitHub
# https://docs.github.com/en/actions/configuring-and-managing-workflows/using-environment-variables
event_path = os.environ['GITHUB_EVENT_PATH']
repository = os.environ['GITHUB_REPOSITORY']
pull_request_branch = os.environ['GITHUB_HEAD_REF']
# User inputs from workflow
filename = os.environ['INPUT_CHANGELOG_FILENAME']
config_file = os.environ['INPUT_CONFIG_FILE']
# Token provided from the workflow
token = os.environ.get('GITHUB_TOKEN')
# Committer username and email address
username = os.environ['INPUT_COMMITTER_USERNAME']
email = os.environ['INPUT_COMMITTER_EMAIL']
# Group: Checkout git repository
print_message('Checkout git repository', message_type='group')
subprocess.run(['git', 'fetch', '--prune', '--unshallow', 'origin', pull_request_branch])
subprocess.run(['git', 'checkout', pull_request_branch])
print_message('', message_type='endgroup')
# Group: Configure Git
print_message('Configure Git', message_type='group')
subprocess.run(['git', 'config', 'user.name', username])
subprocess.run(['git', 'config', 'user.email', email])
print_message('', message_type='endgroup')
print_message('Parse Configuration', message_type='group')
config = ChangelogCIConfiguration(config_file)
print_message('', message_type='endgroup')
# Group: Generate Changelog
print_message('Generate Changelog', message_type='group')
# Get CI class using configuration
changelog_ci_class = CI_CLASSES.get(
config.changelog_type
)
# Initialize the Changelog CI
ci = changelog_ci_class(
repository,
event_path,
config,
pull_request_branch,
filename=filename,
token=token
)
# Run Changelog CI
ci.run()
print_message('', message_type='endgroup')
|
StarcoderdataPython
|
1678984
|
import os.path as path
import logging
import sqlite3
import pickle
from collections import deque
from ipaddress import ip_address
from threading import Lock
from time import time, sleep
from urllib.parse import urlparse
from tracker import Tracker
max_input_length = 20000
submitted_trackers = deque(maxlen=10000)
if path.exists('raw_data.pickle'):
raw_data = pickle.load(open('raw_data.pickle', 'rb'))
else:
raw_data = deque(maxlen=300)
if path.exists('submitted_data.pickle'):
submitted_data = pickle.load(open('submitted_data.pickle', 'rb'))
else:
submitted_data = deque(maxlen=300)
deque_lock = Lock()
list_lock = Lock()
trackers_list = []
processing_trackers = False
logger = logging.getLogger('trackon_logger')
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def get_all_data_from_db():
conn = sqlite3.connect('trackon.db')
conn.row_factory = dict_factory
c = conn.cursor()
trackers_from_db = []
for row in c.execute("SELECT * FROM STATUS ORDER BY uptime DESC"):
tracker_in_db = Tracker(url=row.get('url'),
host=row.get('host'),
ip=eval(row.get('ip')),
latency=row.get('latency'),
last_checked=row.get('last_checked'),
interval=row.get('interval'),
status=row.get('status'),
uptime=row.get('uptime'),
country=eval(row.get('country')),
country_code=eval(row.get('country_code')),
historic=eval(row.get('historic')),
added=row.get('added'),
network=eval(row.get('network')),
last_downtime=row.get('last_downtime'),
last_uptime=row.get('last_uptime'))
trackers_from_db.append(tracker_in_db)
conn.close()
return trackers_from_db
def process_uptime_and_downtime_time(trackers_unprocessed):
for tracker in trackers_unprocessed:
if tracker.status == 1:
if not tracker.last_downtime:
tracker.status_string = "Working"
else:
time_string = calculate_time_ago(tracker.last_downtime)
tracker.status_string = "Working for " + time_string
elif tracker.status == 0:
if not tracker.last_uptime:
tracker.status_string = "Down"
else:
time_string = calculate_time_ago(tracker.last_uptime)
tracker.status_string = "Down for " + time_string
return trackers_unprocessed
def calculate_time_ago(last_time):
now = int(time())
relative = now - int(last_time)
if relative < 60:
if relative == 1:
return str(int(round(relative))) + " second"
else:
return str(int(round(relative))) + " seconds"
minutes = round(relative / 60)
if minutes < 60:
if minutes == 1:
return str(minutes) + " minute"
else:
return str(minutes) + " minutes"
hours = round(relative / 3600)
if hours < 24:
if hours == 1:
return str(hours) + " hour"
else:
return str(hours) + " hours"
days = round(relative / 86400)
if days < 31:
if days == 1:
return str(days) + " day"
else:
return str(days) + " days"
months = round(relative / 2592000)
if months < 12:
if months == 1:
return str(months) + " month"
else:
return str(months) + " months"
years = round(relative / 31536000)
if years == 1:
return str(years) + " year"
else:
return str(years) + " years"
def enqueue_new_trackers(input_string):
global trackers_list
trackers_list = get_all_data_from_db()
if len(input_string) > max_input_length:
return
new_trackers_list = input_string.split()
for url in new_trackers_list:
print("SUBMITTED " + url)
add_one_tracker_to_submitted_deque(url)
if processing_trackers is False:
process_submitted_deque()
def add_one_tracker_to_submitted_deque(url):
try:
ip_address(urlparse(url).hostname)
print("ADDRESS IS IP")
return
except ValueError:
pass
with deque_lock:
for tracker_in_deque in submitted_trackers:
if urlparse(tracker_in_deque.url).netloc == urlparse(url).netloc:
print("Tracker already in the queue.")
return
with list_lock:
for tracker_in_list in trackers_list:
if tracker_in_list.host == urlparse(url).hostname:
print("Tracker already being tracked.")
return
try:
tracker_candidate = Tracker.from_url(url)
except (RuntimeError, ValueError) as e:
print(e)
return
all_ips_tracked = get_all_ips_tracked()
exists_ip = set(tracker_candidate.ip).intersection(all_ips_tracked)
if exists_ip:
print("IP of the tracker already in the list.")
return
with deque_lock:
submitted_trackers.append(tracker_candidate)
print("Tracker added to the submitted queue")
def process_submitted_deque():
global processing_trackers
processing_trackers = True
while submitted_trackers:
with deque_lock:
tracker = submitted_trackers.popleft()
print("Size of deque: ", len(submitted_trackers))
process_new_tracker(tracker)
pickle.dump(submitted_data, open('submitted_data.pickle', 'wb'))
print("Finished processing new trackers")
processing_trackers = False
def process_new_tracker(tracker_candidate):
print('New tracker: ' + tracker_candidate.url)
all_ips_tracked = get_all_ips_tracked()
exists_ip = set(tracker_candidate.ip).intersection(all_ips_tracked)
if exists_ip:
print("IP of the tracker already in the list.")
return
with list_lock:
for tracker_in_list in trackers_list:
if tracker_in_list.host == urlparse(tracker_candidate.url).hostname:
print("Tracker already being tracked.")
return
logger.info('Contact new tracker ' + tracker_candidate.url)
tracker_candidate.last_checked = int(time())
try:
tracker_candidate.latency, tracker_candidate.interval, tracker_candidate.url = tracker_candidate.scrape()
except (RuntimeError, ValueError):
return
if 300 > tracker_candidate.interval or tracker_candidate.interval > 10800: # trackers with an update interval
# less than 5' and more than 3h
debug = submitted_data.popleft()
info = debug['info']
debug.update({'status': 0,
'info': info + '<br>Tracker rejected for having an interval shorter than 5 minutes or longer than 3 hours'})
submitted_data.appendleft(debug)
return
tracker_candidate.update_ipapi_data()
tracker_candidate.is_up()
tracker_candidate.update_uptime()
insert_in_db(tracker_candidate)
logger.info('TRACKER ADDED TO LIST: ' + tracker_candidate.url)
def update_outdated_trackers():
while True:
now = int(time())
trackers_outdated = []
for tracker in get_all_data_from_db():
if (now - tracker.last_checked) > tracker.interval:
trackers_outdated.append(tracker)
for tracker in trackers_outdated:
print("GONNA UPDATE " + tracker.url)
tracker.update_status()
pickle.dump(raw_data, open('raw_data.pickle', 'wb'))
detect_new_ip_duplicates()
sleep(5)
def detect_new_ip_duplicates():
all_ips = get_all_ips_tracked()
non_duplicates = set()
for ip in all_ips:
if ip not in non_duplicates:
non_duplicates.add(ip)
else:
logger.info('IP' + ip + 'is duplicated, manual action required')
print("IP DUPLICATED: " + ip)
def insert_in_db(tracker):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('INSERT INTO status VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)',
(tracker.url, tracker.host, str(tracker.ip), tracker.latency, tracker.last_checked, tracker.interval,
tracker.status, tracker.uptime, str(tracker.country), str(tracker.country_code), str(tracker.network),
tracker.added, str(tracker.historic), tracker.last_downtime, tracker.last_uptime,))
conn.commit()
conn.close()
def update_in_db(tracker):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute(
"UPDATE status SET ip=?, latency=?, last_checked=?, status=?, interval=?, uptime=?,"
" historic=?, country=?, country_code=?, network=?, last_downtime=?, last_uptime=? WHERE url=?",
(str(tracker.ip), tracker.latency, tracker.last_checked, tracker.status, tracker.interval, tracker.uptime,
str(tracker.historic), str(tracker.country), str(tracker.country_code), str(tracker.network),
tracker.last_downtime, tracker.last_uptime, tracker.url)).fetchone()
conn.commit()
conn.close()
def get_all_ips_tracked():
all_ips_of_all_trackers = []
all_data = get_all_data_from_db()
for tracker_in_list in all_data:
for ip in tracker_in_list.ip:
all_ips_of_all_trackers.append(ip)
return all_ips_of_all_trackers
def list_live():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE STATUS = 1 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def list_uptime(uptime):
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE UPTIME >= ? ORDER BY UPTIME DESC', (uptime,))
raw_list = c.fetchall()
conn.close()
return format_list(raw_list), len(raw_list)
def api_udp():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE URL LIKE "udp://%" AND UPTIME >= 95 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def api_http():
conn = sqlite3.connect('trackon.db')
c = conn.cursor()
c.execute('SELECT URL FROM STATUS WHERE URL LIKE "http%" AND UPTIME >= 95 ORDER BY UPTIME DESC')
raw_list = c.fetchall()
conn.close()
return format_list(raw_list)
def format_list(raw_list):
formatted_list = ''
for url in raw_list:
url_string = url[0]
formatted_list += url_string + '\n' + '\n'
return formatted_list
|
StarcoderdataPython
|
4211
|
<reponame>Aditya239233/MDP
import matplotlib.pyplot as plt
import numpy as np
import math
from algorithm.planner.utils.car_utils import Car_C
PI = np.pi
class Arrow:
def __init__(self, x, y, theta, L, c):
angle = np.deg2rad(30)
d = 0.3 * L
w = 2
x_start = x
y_start = y
x_end = x + L * np.cos(theta)
y_end = y + L * np.sin(theta)
theta_hat_L = theta + PI - angle
theta_hat_R = theta + PI + angle
x_hat_start = x_end
x_hat_end_L = x_hat_start + d * np.cos(theta_hat_L)
x_hat_end_R = x_hat_start + d * np.cos(theta_hat_R)
y_hat_start = y_end
y_hat_end_L = y_hat_start + d * np.sin(theta_hat_L)
y_hat_end_R = y_hat_start + d * np.sin(theta_hat_R)
plt.plot([x_start, x_end], [y_start, y_end], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_L],
[y_hat_start, y_hat_end_L], color=c, linewidth=w)
plt.plot([x_hat_start, x_hat_end_R],
[y_hat_start, y_hat_end_R], color=c, linewidth=w)
class Car:
def __init__(self, x, y, yaw, w, L):
theta_B = PI + yaw
xB = x + L / 4 * np.cos(theta_B)
yB = y + L / 4 * np.sin(theta_B)
theta_BL = theta_B + PI / 2
theta_BR = theta_B - PI / 2
x_BL = xB + w / 2 * np.cos(theta_BL) # Bottom-Left vertex
y_BL = yB + w / 2 * np.sin(theta_BL)
x_BR = xB + w / 2 * np.cos(theta_BR) # Bottom-Right vertex
y_BR = yB + w / 2 * np.sin(theta_BR)
x_FL = x_BL + L * np.cos(yaw) # Front-Left vertex
y_FL = y_BL + L * np.sin(yaw)
x_FR = x_BR + L * np.cos(yaw) # Front-Right vertex
y_FR = y_BR + L * np.sin(yaw)
plt.plot([x_BL, x_BR, x_FR, x_FL, x_BL],
[y_BL, y_BR, y_FR, y_FL, y_BL],
linewidth=1, color='black')
Arrow(x, y, yaw, L / 2, 'black')
def draw_car(x, y, yaw, steer, color='black', extended_car=True):
if extended_car:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB, Car_C.ACTUAL_RF, Car_C.ACTUAL_RF, -Car_C.ACTUAL_RB, -Car_C.ACTUAL_RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2, Car_C.W/2, -Car_C.W/2, -Car_C.W/2, Car_C.W/2]])
else:
car = np.array([[-Car_C.RB, -Car_C.RB, Car_C.RF, Car_C.RF, -Car_C.RB],
[Car_C.W / 2, -Car_C.W / 2, -Car_C.W / 2, Car_C.W / 2, Car_C.W / 2]])
wheel = np.array([[-Car_C.TR, -Car_C.TR, Car_C.TR, Car_C.TR, -Car_C.TR],
[Car_C.TW / 4, -Car_C.TW / 4, -Car_C.TW / 4, Car_C.TW / 4, Car_C.TW / 4]])
rlWheel = wheel.copy()
rrWheel = wheel.copy()
frWheel = wheel.copy()
flWheel = wheel.copy()
Rot1 = np.array([[math.cos(yaw), -math.sin(yaw)],
[math.sin(yaw), math.cos(yaw)]])
Rot2 = np.array([[math.cos(steer), math.sin(steer)],
[-math.sin(steer), math.cos(steer)]])
frWheel = np.dot(Rot2, frWheel)
flWheel = np.dot(Rot2, flWheel)
frWheel += np.array([[Car_C.WB], [-Car_C.WD / 2]])
flWheel += np.array([[Car_C.WB], [Car_C.WD / 2]])
rrWheel[1, :] -= Car_C.WD / 2
rlWheel[1, :] += Car_C.WD / 2
frWheel = np.dot(Rot1, frWheel)
flWheel = np.dot(Rot1, flWheel)
rrWheel = np.dot(Rot1, rrWheel)
rlWheel = np.dot(Rot1, rlWheel)
car = np.dot(Rot1, car)
frWheel += np.array([[x], [y]])
flWheel += np.array([[x], [y]])
rrWheel += np.array([[x], [y]])
rlWheel += np.array([[x], [y]])
car += np.array([[x], [y]])
plt.plot(car[0, :], car[1, :], color)
plt.plot(frWheel[0, :], frWheel[1, :], color)
plt.plot(rrWheel[0, :], rrWheel[1, :], color)
plt.plot(flWheel[0, :], flWheel[1, :], color)
plt.plot(rlWheel[0, :], rlWheel[1, :], color)
Arrow(x, y, yaw, Car_C.WB * 0.8, color)
|
StarcoderdataPython
|
1637972
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starlark test rules for matching text in (non-bundle) rule outputs."""
load(
"@bazel_skylib//lib:dicts.bzl",
"dicts",
)
load(
"@bazel_skylib//lib:paths.bzl",
"paths",
)
def _output_text_match_test_impl(ctx):
"""Implementation of the `output_text_match_test` rule."""
target_under_test = ctx.attr.target_under_test
path_suffixes = dicts.add(
ctx.attr.files_match,
ctx.attr.files_not_match,
).keys()
# Map the path suffixes to files output by the target. If multiple outputs
# match, fail the build.
path_suffix_to_output = {}
for path_suffix in path_suffixes:
for output in target_under_test[DefaultInfo].files.to_list():
if output.short_path.endswith(path_suffix):
if path_suffix in path_suffix_to_output:
fail(("Target {} had multiple outputs whose paths end in " +
"'{}'; use additional path segments to distinguish " +
"them.").format(target_under_test.label, path_suffix))
path_suffix_to_output[path_suffix] = output
# If a path suffix did not match any of the outputs, fail.
for path_suffix in path_suffixes:
if path_suffix not in path_suffix_to_output:
fail(("Target {} did not output a file whose path ends in " +
"'{}'.").format(target_under_test.label, path_suffix))
# Generate a script that uses the regex matching assertions from
# unittest.bash to verify the matches (or not-matches) in the outputs.
unittest_bash_path = "test/unittest.bash"
workspace = target_under_test.label.workspace_name
if workspace != "":
unittest_bash_path = paths.join("..", workspace, unittest_bash_path)
generated_script = [
"#!/usr/bin/env bash",
"set -euo pipefail",
"source {}".format(unittest_bash_path),
]
for path_suffix, patterns in ctx.attr.files_match.items():
for pattern in patterns:
generated_script.append("assert_contains '{}' \"{}\"".format(
pattern,
path_suffix_to_output[path_suffix].short_path,
))
for path_suffix, patterns in ctx.attr.files_not_match.items():
for pattern in patterns:
generated_script.append("assert_not_contains '{}' \"{}\"".format(
pattern,
path_suffix_to_output[path_suffix].short_path,
))
output_script = ctx.actions.declare_file(
"{}_test_script".format(ctx.label.name),
)
ctx.actions.write(
output = output_script,
content = "\n".join(generated_script),
is_executable = True,
)
return [
DefaultInfo(
executable = output_script,
runfiles = ctx.runfiles(
files = (
path_suffix_to_output.values() +
ctx.attr._test_deps.files.to_list()
),
),
),
]
output_text_match_test = rule(
attrs = {
"files_match": attr.string_list_dict(
mandatory = False,
doc = """\
A dictionary where each key is the path suffix of a file output by the target
under test, and the corresponding value is a list of regular expressions that
are expected to be found somewhere in that file.
""",
),
"files_not_match": attr.string_list_dict(
mandatory = False,
doc = """\
A dictionary where each key is the path suffix of a file output by the target
under test, and the corresponding value is a list of regular expressions that
are expected to **not** be found somewhere in that file.
""",
),
"target_under_test": attr.label(
mandatory = True,
doc = "The target whose outputs are to be verified.",
),
"_test_deps": attr.label(
default = "@build_bazel_rules_apple//test:apple_verification_test_deps",
),
},
implementation = _output_text_match_test_impl,
test = True,
)
|
StarcoderdataPython
|
139116
|
<filename>openmdao/devtools/docs_experiment/experimental_source/core/experimental_driver.py
"""Define a base class for all Drivers in OpenMDAO."""
from collections import OrderedDict
import warnings
import numpy as np
from openmdao.recorders.recording_manager import RecordingManager
from openmdao.recorders.recording_iteration_stack import Recording
from openmdao.utils.record_util import create_local_meta, check_path
from openmdao.utils.mpi import MPI
from openmdao.utils.options_dictionary import OptionsDictionary
class ExperimentalDriver(object):
"""
A fake driver class used for doc generation testing.
Attributes
----------
fail : bool
Reports whether the driver ran successfully.
iter_count : int
Keep track of iterations for case recording.
options : list
List of options
options : <OptionsDictionary>
Dictionary with general pyoptsparse options.
recording_options : <OptionsDictionary>
Dictionary with driver recording options.
cite : str
Listing of relevant citations that should be referenced when
publishing work that uses this class.
_problem : <Problem>
Pointer to the containing problem.
supports : <OptionsDictionary>
Provides a consistant way for drivers to declare what features they support.
_designvars : dict
Contains all design variable info.
_cons : dict
Contains all constraint info.
_objs : dict
Contains all objective info.
_responses : dict
Contains all response info.
_rec_mgr : <RecordingManager>
Object that manages all recorders added to this driver.
_vars_to_record : dict
Dict of lists of var names indicating what to record
_model_viewer_data : dict
Structure of model, used to make n2 diagram.
_remote_dvs : dict
Dict of design variables that are remote on at least one proc. Values are
(owning rank, size).
_remote_cons : dict
Dict of constraints that are remote on at least one proc. Values are
(owning rank, size).
_remote_objs : dict
Dict of objectives that are remote on at least one proc. Values are
(owning rank, size).
_remote_responses : dict
A combined dict containing entries from _remote_cons and _remote_objs.
_total_coloring : tuple of dicts
A data structure describing coloring for simultaneous derivs.
_res_jacs : dict
Dict of sparse subjacobians for use with certain optimizers, e.g. pyOptSparseDriver.
"""
def __init__(self):
"""
Initialize the driver.
"""
self._rec_mgr = RecordingManager()
self._vars_to_record = {
'desvarnames': set(),
'responsenames': set(),
'objectivenames': set(),
'constraintnames': set(),
'sysinclnames': set(),
}
self._problem = None
self._designvars = None
self._cons = None
self._objs = None
self._responses = None
self.options = OptionsDictionary()
self.recording_options = OptionsDictionary()
###########################
self.recording_options.declare('record_desvars', types=bool, default=True,
desc='Set to True to record design variables at the \
driver level')
self.recording_options.declare('record_responses', types=bool, default=False,
desc='Set to True to record responses at the driver level')
self.recording_options.declare('record_objectives', types=bool, default=True,
desc='Set to True to record objectives at the \
driver level')
self.recording_options.declare('record_constraints', types=bool, default=True,
desc='Set to True to record constraints at the \
driver level')
self.recording_options.declare('includes', types=list, default=[],
desc='Patterns for variables to include in recording. \
Uses fnmatch wildcards')
self.recording_options.declare('excludes', types=list, default=[],
desc='Patterns for vars to exclude in recording '
'(processed post-includes). Uses fnmatch wildcards')
self.recording_options.declare('record_derivatives', types=bool, default=False,
desc='Set to True to record derivatives at the driver \
level')
###########################
# What the driver supports.
self.supports = OptionsDictionary()
self.supports.declare('inequality_constraints', types=bool, default=False)
self.supports.declare('equality_constraints', types=bool, default=False)
self.supports.declare('linear_constraints', types=bool, default=False)
self.supports.declare('two_sided_constraints', types=bool, default=False)
self.supports.declare('multiple_objectives', types=bool, default=False)
self.supports.declare('integer_design_vars', types=bool, default=False)
self.supports.declare('gradients', types=bool, default=False)
self.supports.declare('active_set', types=bool, default=False)
self.supports.declare('simultaneous_derivatives', types=bool, default=False)
self.supports.declare('distributed_design_vars', types=bool, default=False)
self.iter_count = 0
self.options = None
self._model_viewer_data = None
self.cite = ""
# TODO, support these in OpenMDAO
self.supports.declare('integer_design_vars', types=bool, default=False)
self._res_jacs = {}
self.fail = False
def add_recorder(self, recorder):
"""
Add a recorder to the driver.
Parameters
----------
recorder : CaseRecorder
A recorder instance.
"""
self._rec_mgr.append(recorder)
def cleanup(self):
"""
Clean up resources prior to exit.
"""
self._rec_mgr.close()
def _setup_driver(self, problem):
"""
Prepare the driver for execution.
This is the final thing to run during setup.
Parameters
----------
problem : <Problem>
Pointer to the containing problem.
"""
pass
def _get_voi_val(self, name, meta, remote_vois):
"""
Get the value of a variable of interest (objective, constraint, or design var).
This will retrieve the value if the VOI is remote.
Parameters
----------
name : str
Name of the variable of interest.
meta : dict
Metadata for the variable of interest.
remote_vois : dict
Dict containing (owning_rank, size) for all remote vois of a particular
type (design var, constraint, or objective).
Returns
-------
float or ndarray
The value of the named variable of interest.
"""
model = self._problem.model
comm = model.comm
vec = model._outputs._views_flat
indices = meta['indices']
if name in remote_vois:
owner, size = remote_vois[name]
if owner == comm.rank:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
else:
if indices is not None:
size = len(indices)
val = np.empty(size)
comm.Bcast(val, root=owner)
else:
if indices is None:
val = vec[name].copy()
else:
val = vec[name][indices]
if self._has_scaling:
# Scale design variable values
adder = meta['adder']
if adder is not None:
val += adder
scaler = meta['scaler']
if scaler is not None:
val *= scaler
return val
def get_design_var_values(self, filter=None):
"""
Return the design variable values.
This is called to gather the initial design variable state.
Parameters
----------
filter : list
List of desvar names used by recorders.
Returns
-------
dict
Dictionary containing values of each design variable.
"""
if filter:
dvs = filter
else:
# use all the designvars
dvs = self._designvars
return {n: self._get_voi_val(n, self._designvars[n], self._remote_dvs) for n in dvs}
def set_design_var(self, name, value):
"""
Set the value of a design variable.
Parameters
----------
name : str
Global pathname of the design variable.
value : float or ndarray
Value for the design variable.
"""
if (name in self._remote_dvs and
self._problem.model._owning_rank['output'][name] != self._problem.comm.rank):
return
meta = self._designvars[name]
indices = meta['indices']
if indices is None:
indices = slice(None)
desvar = self._problem.model._outputs._views_flat[name]
desvar[indices] = value
if self._has_scaling:
# Scale design variable values
scaler = meta['scaler']
if scaler is not None:
desvar[indices] *= 1.0 / scaler
adder = meta['adder']
if adder is not None:
desvar[indices] -= adder
def get_response_values(self, filter=None):
"""
Return response values.
Parameters
----------
filter : list
List of response names used by recorders.
Returns
-------
dict
Dictionary containing values of each response.
"""
if filter:
resps = filter
else:
resps = self._responses
return {n: self._get_voi_val(n, self._responses[n], self._remote_objs) for n in resps}
def get_objective_values(self, filter=None):
"""
Return objective values.
Parameters
----------
filter : list
List of objective names used by recorders.
Returns
-------
dict
Dictionary containing values of each objective.
"""
if filter:
objs = filter
else:
objs = self._objs
return {n: self._get_voi_val(n, self._objs[n], self._remote_objs) for n in objs}
def get_constraint_values(self, ctype='all', lintype='all', filter=None):
"""
Return constraint values.
Parameters
----------
ctype : str
Default is 'all'. Optionally return just the inequality constraints
with 'ineq' or the equality constraints with 'eq'.
lintype : str
Default is 'all'. Optionally return just the linear constraints
with 'linear' or the nonlinear constraints with 'nonlinear'.
filter : list
List of constraint names used by recorders.
Returns
-------
dict
Dictionary containing values of each constraint.
"""
if filter is not None:
cons = filter
else:
cons = self._cons
con_dict = {}
for name in cons:
meta = self._cons[name]
if lintype == 'linear' and not meta['linear']:
continue
if lintype == 'nonlinear' and meta['linear']:
continue
if ctype == 'eq' and meta['equals'] is None:
continue
if ctype == 'ineq' and meta['equals'] is not None:
continue
con_dict[name] = self._get_voi_val(name, meta, self._remote_cons)
return con_dict
def run(self):
"""
Execute this driver.
The base `Driver` just runs the model. All other drivers overload
this method.
Returns
-------
bool
Failure flag; True if failed to converge, False is successful.
"""
with Recording(self._get_name(), self.iter_count, self) as rec:
self._problem.model.run_solve_nonlinear()
self.iter_count += 1
return False
def _dict2array_jac(self, derivs):
osize = 0
isize = 0
do_wrt = True
islices = {}
oslices = {}
for okey, oval in derivs.items():
if do_wrt:
for ikey, val in oval.items():
istart = isize
isize += val.shape[1]
islices[ikey] = slice(istart, isize)
do_wrt = False
ostart = osize
osize += oval[ikey].shape[0]
oslices[okey] = slice(ostart, osize)
new_derivs = np.zeros((osize, isize))
relevant = self._problem.model._relevant
for okey, odict in derivs.items():
for ikey, val in odict.items():
if okey in relevant[ikey] or ikey in relevant[okey]:
new_derivs[oslices[okey], islices[ikey]] = val
return new_derivs
def _compute_totals(self, of=None, wrt=None, return_format='flat_dict', use_abs_names=True):
"""
Compute derivatives of desired quantities with respect to desired inputs.
All derivatives are returned using driver scaling.
Parameters
----------
of : list of variable name str or None
Variables whose derivatives will be computed. Default is None, which
uses the driver's objectives and constraints.
wrt : list of variable name str or None
Variables with respect to which the derivatives will be computed.
Default is None, which uses the driver's desvars.
return_format : str
Format to return the derivatives. Default is a 'flat_dict', which
returns them in a dictionary whose keys are tuples of form (of, wrt). For
the scipy optimizer, 'array' is also supported.
use_abs_names : bool
Set to True when passing in global names to skip some translation steps.
Returns
-------
derivs : object
Derivatives in form requested by 'return_format'.
"""
prob = self._problem
# Compute the derivatives in dict format...
if prob.model._owns_approx_jac:
derivs = prob._compute_totals_approx(of=of, wrt=wrt, return_format='dict',
use_abs_names=use_abs_names)
else:
derivs = prob._compute_totals(of=of, wrt=wrt, return_format='dict',
use_abs_names=use_abs_names)
# ... then convert to whatever the driver needs.
if return_format in ('dict', 'array'):
if self._has_scaling:
for okey, odict in derivs.items():
for ikey, val in odict.items():
iscaler = self._designvars[ikey]['scaler']
oscaler = self._responses[okey]['scaler']
# Scale response side
if oscaler is not None:
val[:] = (oscaler * val.T).T
# Scale design var side
if iscaler is not None:
val *= 1.0 / iscaler
else:
raise RuntimeError("Derivative scaling by the driver only supports the 'dict' and "
"'array' formats at present.")
if return_format == 'array':
derivs = self._dict2array_jac(derivs)
return derivs
def record_iteration(self):
"""
Record an iteration of the current Driver.
"""
if not self._rec_mgr._recorders:
return
metadata = create_local_meta(self._get_name())
# Get the data to record
data = {}
if self.recording_options['record_desvars']:
# collective call that gets across all ranks
desvars = self.get_design_var_values()
else:
desvars = {}
if self.recording_options['record_responses']:
# responses = self.get_response_values() # not really working yet
responses = {}
else:
responses = {}
if self.recording_options['record_objectives']:
objectives = self.get_objective_values()
else:
objectives = {}
if self.recording_options['record_constraints']:
constraints = self.get_constraint_values()
else:
constraints = {}
desvars = {name: desvars[name]
for name in self._filtered_vars_to_record['des']}
# responses not working yet
# responses = {name: responses[name] for name in self._filtered_vars_to_record['res']}
objectives = {name: objectives[name]
for name in self._filtered_vars_to_record['obj']}
constraints = {name: constraints[name]
for name in self._filtered_vars_to_record['con']}
if self.recording_options['includes']:
root = self._problem.model
outputs = root._outputs
# outputsinputs, outputs, residuals = root.get_nonlinear_vectors()
sysvars = {}
for name, value in outputs._names.items():
if name in self._filtered_vars_to_record['sys']:
sysvars[name] = value
else:
sysvars = {}
if MPI:
root = self._problem.model
desvars = self._gather_vars(root, desvars)
responses = self._gather_vars(root, responses)
objectives = self._gather_vars(root, objectives)
constraints = self._gather_vars(root, constraints)
sysvars = self._gather_vars(root, sysvars)
data['des'] = desvars
data['res'] = responses
data['obj'] = objectives
data['con'] = constraints
data['sys'] = sysvars
self._rec_mgr.record_iteration(self, data, metadata)
def _gather_vars(self, root, local_vars):
"""
Gather and return only variables listed in `local_vars` from the `root` System.
Parameters
----------
root : <System>
the root System for the Problem
local_vars : dict
local variable names and values
Returns
-------
dct : dict
variable names and values.
"""
# if trace:
# debug("gathering vars for recording in %s" % root.pathname)
all_vars = root.comm.gather(local_vars, root=0)
# if trace:
# debug("DONE gathering rec vars for %s" % root.pathname)
if root.comm.rank == 0:
dct = all_vars[-1]
for d in all_vars[:-1]:
dct.update(d)
return dct
def _get_name(self):
"""
Get name of current Driver.
Returns
-------
str
Name of current Driver.
"""
return "Driver"
|
StarcoderdataPython
|
3280172
|
from . import angles
from . import waveforms
from . import harmonics
from . import qnms
from . import utils
from . import gwmemory
from .gwmemory import time_domain_memory, frequency_domain_memory
name = "gwmemory"
|
StarcoderdataPython
|
1604383
|
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import mars.dataframe as md
from mars.tensor.core import CHUNK_TYPE as TENSOR_CHUNK_TYPE
from mars.tests.core import TestBase
from mars.dataframe.core import SERIES_CHUNK_TYPE, Series, DataFrame, DATAFRAME_CHUNK_TYPE
from mars.dataframe.indexing.iloc import DataFrameIlocGetItem, DataFrameIlocSetItem
class Test(TestBase):
def testSetIndex(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df3 = df2.set_index('y', drop=True)
df3.tiles()
self.assertEqual(df3.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df3.chunks[0].columns.to_pandas(), pd.Index(['x']))
pd.testing.assert_index_equal(df3.chunks[1].columns.to_pandas(), pd.Index(['z']))
df4 = df2.set_index('y', drop=False)
df4.tiles()
self.assertEqual(df4.chunk_shape, (2, 2))
pd.testing.assert_index_equal(df4.chunks[0].columns.to_pandas(), pd.Index(['x', 'y']))
pd.testing.assert_index_equal(df4.chunks[1].columns.to_pandas(), pd.Index(['z']))
def testILocGetItem(self):
df1 = pd.DataFrame([[1, 3, 3], [4, 2, 6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
# plain index
df3 = df2.iloc[1]
df3.tiles()
self.assertIsInstance(df3, Series)
self.assertIsInstance(df3.op, DataFrameIlocGetItem)
self.assertEqual(df3.shape, (3,))
self.assertEqual(df3.chunk_shape, (2,))
self.assertEqual(df3.chunks[0].shape, (2,))
self.assertEqual(df3.chunks[1].shape, (1,))
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df3.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df3.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df3.chunks[1].inputs[0].shape, (2, 1))
# slice index
df4 = df2.iloc[:, 2:4]
df4.tiles()
self.assertIsInstance(df4, DataFrame)
self.assertIsInstance(df4.op, DataFrameIlocGetItem)
self.assertEqual(df4.shape, (3, 1))
self.assertEqual(df4.chunk_shape, (2, 1))
self.assertEqual(df4.chunks[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].shape, (1, 1))
self.assertEqual(df4.chunks[0].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df4.chunks[0].inputs[0].shape, (2, 1))
self.assertEqual(df4.chunks[1].inputs[0].index, (1, 1))
self.assertEqual(df4.chunks[1].inputs[0].shape, (1, 1))
# plain fancy index
df5 = df2.iloc[[0], [0, 1, 2]]
df5.tiles()
self.assertIsInstance(df5, DataFrame)
self.assertIsInstance(df5.op, DataFrameIlocGetItem)
self.assertEqual(df5.shape, (1, 3))
self.assertEqual(df5.chunk_shape, (1, 2))
self.assertEqual(df5.chunks[0].shape, (1, 2))
self.assertEqual(df5.chunks[1].shape, (1, 1))
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
self.assertEqual(df5.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df5.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df5.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df5.chunks[1].inputs[0].shape, (2, 1))
# fancy index
df6 = df2.iloc[[1, 2], [0, 1, 2]]
df6.tiles()
self.assertIsInstance(df6, DataFrame)
self.assertIsInstance(df6.op, DataFrameIlocGetItem)
self.assertEqual(df6.shape, (2, 3))
self.assertEqual(df6.chunk_shape, (2, 2))
self.assertEqual(df6.chunks[0].shape, (1, 2))
self.assertEqual(df6.chunks[1].shape, (1, 1))
self.assertEqual(df6.chunks[2].shape, (1, 2))
self.assertEqual(df6.chunks[3].shape, (1, 1))
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
self.assertEqual(df6.chunks[0].inputs[0].index, (0, 0))
self.assertEqual(df6.chunks[0].inputs[0].shape, (2, 2))
self.assertEqual(df6.chunks[1].inputs[0].index, (0, 1))
self.assertEqual(df6.chunks[1].inputs[0].shape, (2, 1))
self.assertEqual(df6.chunks[2].inputs[0].index, (1, 0))
self.assertEqual(df6.chunks[2].inputs[0].shape, (1, 2))
self.assertEqual(df6.chunks[3].inputs[0].index, (1, 1))
self.assertEqual(df6.chunks[3].inputs[0].shape, (1, 1))
# plain index
df7 = df2.iloc[1, 2]
df7.tiles()
self.assertIsInstance(df7, Series)
self.assertIsInstance(df7.op, DataFrameIlocGetItem)
self.assertEqual(df7.shape, ())
self.assertEqual(df7.chunk_shape, ())
self.assertEqual(df7.chunks[0].dtype, df7.dtype)
self.assertEqual(df7.chunks[0].shape, ())
self.assertEqual(df7.chunks[0].op.indexes, (1, 0))
self.assertEqual(df7.chunks[0].inputs[0].index, (0, 1))
self.assertEqual(df7.chunks[0].inputs[0].shape, (2, 1))
def testILocSetItem(self):
df1 = pd.DataFrame([[1,3,3], [4,2,6], [7, 8, 9]],
index=['a1', 'a2', 'a3'], columns=['x', 'y', 'z'])
df2 = md.DataFrame(df1, chunk_size=2)
df2.tiles()
# plain index
df3 = md.DataFrame(df1, chunk_size=2)
df3.iloc[1] = 100
df3.tiles()
self.assertIsInstance(df3.op, DataFrameIlocSetItem)
self.assertEqual(df3.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df3.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df3.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df3.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df3.chunks[0].op.indexes, (1, slice(None, None, None)))
self.assertEqual(df3.chunks[1].op.indexes, (1, slice(None, None, None)))
# # slice index
df4 = md.DataFrame(df1, chunk_size=2)
df4.iloc[:, 2:4] = 1111
df4.tiles()
self.assertIsInstance(df4.op, DataFrameIlocSetItem)
self.assertEqual(df4.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df4.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df4.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df4.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df4.chunks[1].op.indexes, (slice(None, None, None), slice(None, None, None)))
self.assertEqual(df4.chunks[3].op.indexes, (slice(None, None, None), slice(None, None, None)))
# plain fancy index
df5 = md.DataFrame(df1, chunk_size=2)
df5.iloc[[0], [0, 1, 2]] = 2222
df5.tiles()
self.assertIsInstance(df5.op, DataFrameIlocSetItem)
self.assertEqual(df5.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df5.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df5.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df5.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df5.chunks[0].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[0], [0])
np.testing.assert_array_equal(df5.chunks[1].op.indexes[1], [0])
# fancy index
df6 = md.DataFrame(df1, chunk_size=2)
df6.iloc[[1, 2], [0, 1, 2]] = 3333
df6.tiles()
self.assertIsInstance(df6.op, DataFrameIlocSetItem)
self.assertEqual(df6.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df6.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df6.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df6.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
np.testing.assert_array_equal(df6.chunks[0].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[0].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[0], [1])
np.testing.assert_array_equal(df6.chunks[1].op.indexes[1], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[2].op.indexes[1], [0, 1])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[0], [0])
np.testing.assert_array_equal(df6.chunks[3].op.indexes[1], [0])
# plain index
df7 = md.DataFrame(df1, chunk_size=2)
df7.iloc[1, 2] = 4444
df7.tiles()
self.assertIsInstance(df7.op, DataFrameIlocSetItem)
self.assertEqual(df7.chunk_shape, df2.chunk_shape)
pd.testing.assert_index_equal(df2.index_value.to_pandas(), df7.index_value.to_pandas())
pd.testing.assert_index_equal(df2.columns.to_pandas(), df7.columns.to_pandas())
for c1, c2 in zip(df2.chunks, df7.chunks):
self.assertEqual(c1.shape, c2.shape)
pd.testing.assert_index_equal(c1.index_value.to_pandas(), c2.index_value.to_pandas())
pd.testing.assert_index_equal(c1.columns.to_pandas(), c2.columns.to_pandas())
if isinstance(c2.op, DataFrameIlocSetItem):
self.assertEqual(c1.key, c2.inputs[0].key)
else:
self.assertEqual(c1.key, c2.key)
self.assertEqual(df7.chunks[1].op.indexes, (1, 0))
def testDataFrameGetitem(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
series = df['c3']
self.assertIsInstance(series, Series)
self.assertEqual(series.shape, (10,))
self.assertEqual(series.name, 'c3')
self.assertEqual(series.dtype, data['c3'].dtype)
self.assertEqual(series.index_value, df.index_value)
series.tiles()
self.assertEqual(series.nsplits, ((2, 2, 2, 2, 2),))
self.assertEqual(len(series.chunks), 5)
for i, c in enumerate(series.chunks):
self.assertIsInstance(c, SERIES_CHUNK_TYPE)
self.assertEqual(c.index, (i,))
self.assertEqual(c.shape, (2,))
df1 = df[['c1', 'c2', 'c3']]
self.assertIsInstance(df1, DataFrame)
self.assertEqual(df1.shape, (10, 3))
self.assertEqual(df1.index_value, df.index_value)
pd.testing.assert_index_equal(df1.columns.to_pandas(), data[['c1', 'c2', 'c3']].columns)
pd.testing.assert_series_equal(df1.dtypes, data[['c1', 'c2', 'c3']].dtypes)
df1.tiles()
self.assertEqual(df1.nsplits, ((2, 2, 2, 2, 2), (2, 1)))
self.assertEqual(len(df1.chunks), 10)
for i, c in enumerate(df1.chunks[slice(0, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 0))
self.assertEqual(c.shape, (2, 2))
for i, c in enumerate(df1.chunks[slice(1, 10, 2)]):
self.assertIsInstance(c, DATAFRAME_CHUNK_TYPE)
self.assertEqual(c.index, (i, 1))
self.assertEqual(c.shape, (2, 1))
def testDataFrameGetitemBool(self):
data = pd.DataFrame(np.random.rand(10, 5), columns=['c1', 'c2', 'c3', 'c4', 'c5'])
df = md.DataFrame(data, chunk_size=2)
mask_data1 = data.c1 > 0.5
mask_data2 = data.c1 < 0.5
mask1 = md.Series(mask_data1, chunk_size=2)
mask2 = md.Series(mask_data2, chunk_size=2)
r1 = df[mask1]
r2 = df[mask2]
r3 = df[mask1]
self.assertNotEqual(r1.index_value.key, df.index_value.key)
self.assertNotEqual(r1.index_value.key, mask1.index_value.key)
self.assertEqual(r1.columns.key, df.columns.key)
self.assertIs(r1.columns, df.columns)
self.assertNotEqual(r1.index_value.key, r2.index_value.key)
self.assertEqual(r1.columns.key, r2.columns.key)
self.assertIs(r1.columns, r2.columns)
self.assertEqual(r1.index_value.key, r3.index_value.key)
self.assertEqual(r1.columns.key, r3.columns.key)
self.assertIs(r1.columns, r3.columns)
def testSeriesGetitem(self):
data = pd.Series(np.random.rand(10,), name='a')
series = md.Series(data, chunk_size=3)
result1 = series[2]
self.assertEqual(result1.shape, ())
result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(len(result1.chunks), 1)
self.assertIsInstance(result1.chunks[0], TENSOR_CHUNK_TYPE)
self.assertEqual(result1.chunks[0].shape, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
result2 = series[[4, 5, 1, 2, 3]]
self.assertEqual(result2.shape, (5,))
result2.tiles()
self.assertEqual(result2.nsplits, ((2, 2, 1),))
self.assertEqual(len(result2.chunks), 3)
self.assertEqual(result2.chunks[0].op.labels, [4, 5])
self.assertEqual(result2.chunks[1].op.labels, [1, 2])
self.assertEqual(result2.chunks[2].op.labels, [3])
data = pd.Series(np.random.rand(10), index=['i' + str(i) for i in range(10)])
series = md.Series(data, chunk_size=3)
result1 = series['i2']
self.assertEqual(result1.shape, ())
result1.tiles()
self.assertEqual(result1.nsplits, ())
self.assertEqual(result1.chunks[0].dtype, data.dtype)
self.assertTrue(result1.chunks[0].op.labels, ['i2'])
result2 = series[['i2', 'i4']]
self.assertEqual(result2.shape, (2,))
result2.tiles()
self.assertEqual(result2.nsplits, ((2,),))
self.assertEqual(result2.chunks[0].dtype, data.dtype)
self.assertTrue(result2.chunks[0].op.labels, [['i2', 'i4']])
|
StarcoderdataPython
|
108142
|
# -*- coding: utf-8 -*-
__author__ = 'raek'
__updated__ = 'kmu'
import requests
# import datetime
# import getdangers as gd
# import makelogs as md
# import types
def get_warnings_as_json(region_ids, start_date, end_date, lang_key=1, simple=False, recursive_count=5):
"""Selects warnings and returns the json structured result as given on the api.
:param region_id: [int or list of ints] RegionID as given in the forecast api [1-99] or in regObs [101-199]
:param start_date: [date or string as yyyy-mm-dd]
:param end_date: [date or string as yyyy-mm-dd]
:param simple: [bool] default "False" - returns a minimum of data when True (used for speed-up)
:param recursive_count [int] by default atempt the same request # times before giving up
:return warnings: [string] String as json
Eg. http://api01.nve.no/hydrology/forecast/avalanche/v2.0.2/api/AvalancheWarningByRegion/Detail/10/1/2013-01-10/2013-01-20
"""
# If input isn't a list, make it so
if not isinstance(region_ids, list):
region_ids = [region_ids]
warnings = []
recursive_count_default = recursive_count # need the default for later
for region_id in region_ids:
if len(region_ids) > 1:
# if we are looping the initial list make sure each item gets the recursive count default
recursive_count = recursive_count_default
# if region_id > 100:
# region_id = region_id - 100
if simple:
api_type = 'Simple'
else:
api_type = 'Detail'
# md.log_and_print("getForecastApi -> get_warnings_as_json: Getting AvalancheWarnings for {0} from {1} til {2}"\
# .format(region_id, start_date, end_date))
url = "https://api01.nve.no/hydrology/forecast/avalanche/v4.0.0/api/AvalancheWarningByRegion/{4}/{0}/{3}/{1}/{2}"\
.format(region_id, start_date, end_date, lang_key, api_type)
# If at first you don't succeed, try and try again.
try:
warnings += requests.get(url).json()
# md.log_and_print("getForecastApi -> get_warnings_as_json: {0} warnings found for {1}.".format(len(warnings), region_id))
except:
# md.log_and_print("getForecastApi -> get_warnings_as_json: EXCEPTION. RECURSIVE COUNT {0}".format(recursive_count))
if recursive_count > 1:
recursive_count -= 1 # count down
warnings += get_warnings_as_json(region_id, start_date, end_date, lang_key, recursive_count=recursive_count)
# TODO: remove line below and use proper logging
print("Rec", recursive_count)
return warnings, url
'''
def get_warnings(region_ids, start_date, end_date, lang_key=1):
"""Selects warnings and returns a list of AvalancheDanger Objects. This method does NOT add the
avalanche problems to the warning.
:param region_id: [int or list of ints] RegionID as given in the forecast api [1-99] or in regObs [101-199]
:param start_date: [date or string as yyyy-mm-dd]
:param end_date: [date or string as yyyy-mm-dd]
:return avalanche_danger_list: List of AvalancheDanger objects
"""
warnings = get_warnings_as_json(region_ids,start_date, end_date, lang_key=lang_key)
avalanche_danger_list = []
for w in warnings:
region_id = int(w['RegionId'])
region_name = w['RegionName']
date = datetime.datetime.strptime(w['ValidFrom'][0:10], '%Y-%m-%d').date()
danger_level = int(w['DangerLevel'])
danger_level_name = w['DangerLevelName']
author = w['Author']
avalanche_forecast = w['AvalancheDanger']
avalanche_nowcast = w['AvalancheWarning']
danger = gd.AvalancheDanger(region_id, region_name, 'Forecast API', date, danger_level, danger_level_name)
danger.set_source('Varsel')
danger.set_nick(author)
danger.set_avalanche_nowcast(avalanche_nowcast)
danger.set_avalanche_forecast(avalanche_forecast)
if lang_key == 1:
danger.set_main_message_no(w['MainText'])
if lang_key == 2:
danger.set_main_message_en(w['MainText'])
avalanche_danger_list.append(danger)
# Sort by date
avalanche_danger_list = sorted(avalanche_danger_list, key=lambda AvalancheDanger: AvalancheDanger.date)
return avalanche_danger_list
'''
def get_valid_regids(region_id, start_date, end_date):
"""Method looks up all forecasts for a region and selects and returns the RegIDs used in regObs. Thus, the list of
RegIDs are for published forecasts.
:param region_id: [int] RegionID as given in the forecast api [1-99] or in regObs [101-199]
:param start_date: [string] date as yyyy-mm-dd
:param end_date: [string] date as yyyy-mm-dd
:return: {RegID:date, RegID:date, ...}
"""
warnings = get_warnings_as_json(region_id, start_date, end_date)
valid_regids = {}
for w in warnings:
danger_level = int(w["DangerLevel"])
if danger_level > 0:
valid_regids[w["RegId"]] = w["ValidFrom"]
return valid_regids
if __name__ == "__main__":
import datetime as dt
import pandas as pd
# get data for Bardu (112) and Tamokdalen (129)
# warnings_for_129 = get_warnings([129, 118, 131], dt.date(2016, 4, 1), dt.date(2016, 4, 2))
warns_json = get_warnings_as_json([129, 118, 131], dt.date(2016, 4, 1), dt.date(2016, 4, 2),
simple=True, lang_key=1, recursive_count=5)
# p = get_valid_regids(10, "2013-03-01", "2013-03-09")
# Retrieve danger level for a specific region
# TODO: make the output two separate lists - one containing DL the other date
dl = [{warns['ValidFrom']: warns['DangerLevel']} for warns in warns_json if warns['RegionId']==29]
df = pd.DataFrame(warns_json[0]) # all elements get doubled when converting to DataFrame
# conversion to dataframe does not work with api_type='Simple' - but maybe I don't need the dataframe at all.
print(warns_json[0]['DangerLevel'], warns_json[0]['AvalancheWarning'])
print('---')
print(df['DangerLevel'], df['AvalancheWarning'])
a = 1
|
StarcoderdataPython
|
3285125
|
<gh_stars>1-10
##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Common utilities needed for writing WebDAV functional tests.
XXX - This really needs some tidying up, also the setup should be moved to
a global setup method so that individual tests can call it if they need to.
"""
from cStringIO import StringIO
from BTrees.OOBTree import OOBTree
import transaction
from zope import interface
from zope import component
from zope import schema
from zope.security.proxy import removeSecurityProxy
from zope.app.folder.folder import Folder
from zope.app.publication.http import HTTPPublication
from zope.security.management import newInteraction, endInteraction
from zope.security.testing import Principal, Participation
from zope.dublincore.interfaces import IWriteZopeDublinCore
from z3c.dav.publisher import WebDAVRequest
from z3c.dav.properties import DAVProperty
import z3c.dav.testing
import z3c.dav.ftests
class IExamplePropertyStorage(interface.Interface):
exampleintprop = schema.Int(
title = u"Example Integer Property",
description = u"")
exampletextprop = schema.Text(
title = u"Example Text Property",
description = u"")
exampleIntProperty = DAVProperty("{DAVtest:}exampleintprop",
IExamplePropertyStorage)
exampleTextProperty = DAVProperty("{DAVtest:}exampletextprop",
IExamplePropertyStorage)
exampleTextProperty.restricted = True
ANNOT_KEY = "EXAMPLE_PROPERTY"
class ExamplePropertyStorage(object):
interface.implements(IExamplePropertyStorage)
def __init__(self, context, request):
self.context = context
self.request = request
def _getproperty(name, default = None):
def get(self):
annots = getattr(removeSecurityProxy(self.context),
"exampleannots", {})
return annots.get("%s_%s" %(ANNOT_KEY, name), default)
def set(self, value):
annots = getattr(removeSecurityProxy(self.context),
"exampleannots", None)
if annots is None:
annots = removeSecurityProxy(
self.context).exampleannots = OOBTree()
annots["%s_%s" %(ANNOT_KEY, name)] = value
return property(get, set)
exampleintprop = _getproperty("exampleintprop", default = 0)
exampletextprop = _getproperty("exampletextprop", default = u"")
class TestWebDAVRequest(WebDAVRequest):
"""."""
def __init__(self, elem = None):
if elem is not None:
body = """<?xml version="1.0" encoding="utf-8" ?>
<D:propertyupdate xmlns:D="DAV:">
<D:set>
<D:prop />
</D:set>
</D:propertyupdate>
"""
f = StringIO(body)
else:
f = StringIO('')
super(TestWebDAVRequest, self).__init__(
f, {'CONTENT_TYPE': 'text/xml',
'CONTENT_LENGTH': len(f.getvalue()),
})
# processInputs to test request
self.processInputs()
# if elem is given insert it into the proppatch request.
if elem is not None:
self.xmlDataSource[0][0].append(elem)
class IResource(interface.Interface):
title = interface.Attribute("Title of resource")
class Resource(object):
interface.implements(IResource)
def __init__(self, data, contentType, title = None):
self.data = data
self.contentType = contentType
self.title = title
class DAVTestCase(z3c.dav.testing.WebDAVTestCase):
layer = z3c.dav.testing.WebDAVLayer(z3c.dav.ftests)
def login(self, principalid = "mgr"):
"""Some locking methods new an interaction in order to lock a resource
"""
principal = Principal(principalid)
participation = Participation(principal)
newInteraction(participation)
def logout(self):
"""End the current interaction so we run the publish method.
"""
endInteraction()
#
# Some methods for creating dummy content.
#
def createCollections(self, path):
collection = self.getRootFolder()
if path[0] == '/':
path = path[1:]
path = path.split('/')
for id in path[:-1]:
try:
collection = collection[id]
except KeyError:
collection[id] = Folder()
collection = collection[id]
return collection, path[-1]
def createObject(self, path, obj):
collection, id = self.createCollections(path)
collection[id] = obj
transaction.commit()
return collection[id]
def addResource(self, path, content, title = None, contentType = ''):
resource = Resource(data = content, contentType = contentType,
title = title)
return self.createObject(path, resource)
def addCollection(self, path, title = None):
coll = Folder()
if title is not None:
IWriteZopeDublinCore(coll).title = title
return self.createObject(path, coll)
def createCollectionResourceStructure(self):
""" _____ rootFolder/ _____
/ \ \
r1 __ a/ __ b/
/ \
r2 r3
"""
self.addResource("/r1", "first resource")
self.addResource("/a/r2", "second resource")
self.addResource("/a/r3", "third resource")
self.addCollection("/b")
def createFolderFileStructure(self):
""" _____ rootFolder/ _____
/ \ \
r1 __ a/ __ b/
/ \
r2 r3
"""
self.addResource("/r1", "first resource", contentType = "test/plain")
self.addResource("/a/r2", "second resource", contentType = "text/plain")
self.addResource("/a/r3", "third resource", contentType = "text/plain")
self.createObject("/b", Folder())
def checkPropfind(self, path = "/", basic = None, env = {},
properties = None, handle_errors = True):
# - properties if set is a string containing the contents of the
# propfind XML element has specified in the WebDAV spec.
if properties is not None:
body = """<?xml version="1.0" encoding="utf-8" ?>
<propfind xmlns:D="DAV:" xmlns="DAV:">
%s
</propfind>
""" % properties
if not env.has_key("CONTENT_TYPE"):
env["CONTENT_TYPE"] = "application/xml"
env["CONTENT_LENGTH"] = len(body)
else:
body = ""
env["CONTENT_LENGTH"] = 0
if not env.has_key("REQUEST_METHOD"):
env["REQUEST_METHOD"] = "PROPFIND"
response = self.publish(path, basic = basic, env = env,
request_body = body,
handle_errors = handle_errors)
self.assertEqual(response.getStatus(), 207)
self.assertEqual(response.getHeader("content-type"), "application/xml")
return response
def checkProppatch(self, path = '/', basic = None, env = {},
set_properties = None, remove_properties = None,
handle_errors = True):
# - set_properties is None or a string that is the XML fragment
# that should be included within the <D:set><D:prop> section of
# a PROPPATCH request.
# - remove_properties is None or a string that is the XML fragment
# that should be included within the <D:remove><D:prop> section of
# a PROPPATCH request.
set_body = ""
if set_properties:
set_body = "<D:set><D:prop>%s</D:prop></D:set>" % set_properties
remove_body = ""
if remove_properties:
remove_body = "<D:remove><D:prop>%s</D:prop></D:remove>" % \
remove_properties
body = """<?xml version="1.0" encoding="utf-8" ?>
<D:propertyupdate xmlns:D="DAV:" xmlns="DAV:">
%s %s
</D:propertyupdate>
""" %(set_body, remove_body)
body = body.encode("utf-8")
if not env.has_key("CONTENT_TYPE"):
env["CONTENT_TYPE"] = "application/xml"
env["CONTENT_LENGTH"] = len(body)
if not env.has_key("REQUEST_METHOD"):
env["REQUEST_METHOD"] = "PROPPATCH"
response = self.publish(path, basic = basic, env = env,
request_body = body,
handle_errors = handle_errors)
self.assertEqual(response.getStatus(), 207)
self.assertEqual(response.getHeader("content-type"), "application/xml")
return response
|
StarcoderdataPython
|
3240548
|
<gh_stars>1-10
#!/usr/bin/env python
import sys
import json
import yaml # need to 'pip install pyyaml' for this to work; 'brew install libyaml && sudo python -m easy_install pyyaml' on Mac
print(yaml.dump(yaml.load(json.dumps(json.loads(open(sys.argv[1]).read()))), default_flow_style=False))
|
StarcoderdataPython
|
13744
|
from zzcore import StdAns, mysakuya
import requests
class Ans(StdAns):
def GETMSG(self):
msg=''
try:
msg += xs()
except:
msg += '可能是机器人笑死了!'
return msg
def xs():
url = "http://api-x.aya1.xyz:6/"
text = requests.get(url=url).text
return text
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.