id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
4816389
|
<reponame>crocs-muni/cert-validataion-stats
"""This package contains analytical functions and tools for quantitative analysis of certificate datasets."""
__version__ = '1.1'
__author__ = '<NAME>'
__all__ = (
'CertAnalyser',
'ChainValidator',
)
from .cert_analyser import CertAnalyser
from .chain_validator import ChainValidator
|
StarcoderdataPython
|
119595
|
<reponame>zerforschung/Covid32Counter<filename>firmware/captive_bvg.py<gh_stars>10-100
import gc
import util
import uuurequests
# collect all hidden form fields from HTML
def parseFormValues(text: str) -> str:
startIndex = 0
postFields = []
postFields.append("termsOK=1")
postFields.append("button=kostenlos+einloggen")
while True:
nameFindStr = '<input type="hidden" name="'
valueFindStr = 'value="'
findStartIndex = text.find(nameFindStr, startIndex)
if findStartIndex == -1:
break
nameStartIndex = findStartIndex + len(nameFindStr)
nameEndIndex = text.find('"', nameStartIndex)
name = text[nameStartIndex:nameEndIndex]
valueStartIndex = text.find(valueFindStr, nameEndIndex) + len(valueFindStr)
valueEndIndex = text.find('"', valueStartIndex)
value = text[valueStartIndex:valueEndIndex]
postFields.append(name + "=" + value)
startIndex = valueEndIndex
return "&".join(postFields)
def accept_captive_portal() -> bool:
try:
# portalDetectResponse = uuurequests.get(
# "https://www.hotsplots.de/auth/login.php?res=notyet&uamip=10.0.160.1&uamport=80&challenge=8638ce7ac8088c170ae0076b0d4932cb&called=F6-F0-3E-40-07-DE&mac=E8-80-2E-EA-2A-2D&ip=10.0.175.201&nasid=BVG-Bahnhoefe&sessionid=5f93869200000463&userurl=http%3a%2f%2finit-p01st.push.apple.com%2fbag%3fv%3d1"
# )
# portalDetectResponse = uuurequests.get(
# "http://clients1.google.com/generate_204"
# )
portalDetectResponse = uuurequests.get("http://captive.apple.com")
except Exception:
return False
# if portalDetectResponse.status_code == 204:
if (
portalDetectResponse.text.find(
"<HTML><HEAD><TITLE>Success</TITLE></HEAD><BODY>Success</BODY></HTML>"
)
== -1
):
headers = {
"Content-Type": "application/x-www-form-urlencoded",
"Origin": "https://www.hotsplots.de",
"Cookie": "div=1",
"Connection": "keep-alive",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (iPhone; CPU OS 12_4_8 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/29.0 Mobile/15E148 Safari/605.1.15",
}
util.syslog("Captive Portal BVG", "Portal detected")
gc.collect()
loginReqest = uuurequests.request(
"POST",
"https://www.hotsplots.de/auth/login.php",
headers=headers,
data=parseFormValues(portalDetectResponse.text),
)
loginResponse = loginReqest.text
gc.collect()
util.syslog("Captive Portal BVG", "Submitted first stage of captive portal")
redirectSearch = '<meta http-equiv="refresh" content="0;url='
redirectStartIndex = loginResponse.find(redirectSearch) + len(redirectSearch)
redirectEndIndex = loginResponse.find('"', redirectStartIndex)
redirectUrl = loginResponse[redirectStartIndex:redirectEndIndex]
redirectUrl = redirectUrl.replace("&", "&")
gc.collect()
util.syslog(
"Captive Portal BVG",
"Detected URL for second stage. Submitting request (probably to local router)",
)
try:
uuurequests.get(redirectUrl)
gc.collect()
except Exception:
util.syslog(
"Captive Portal BVG",
"Problem open second stage of captive portal login",
)
return False
util.syslog("Captive Portal BVG", "Successfully logged into captive portal")
return True
else:
util.syslog("Captive Portal BVG", "No captive portal in place")
return True
|
StarcoderdataPython
|
102120
|
import pexpect
from pexpect import exceptions
import time
from rassh.managers.expect_manager import ExpectManager
from rassh.managers.expect_commands import ExpectCommands
from rassh.managers.blackhole_telnet_commands import BlackholeTelnetCommands
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class BlackholeTelnetManager(ExpectManager):
@staticmethod
def get_new_expect_connection(controller_ip):
try:
new_ssh_connection = pexpect.spawn("telnet " + controller_ip)
# PROMPT is a regular expression which will match the prompt on the remote system.
# This matches the Bonaire "Enable" prompt, and the "conf t" prompt.
new_ssh_connection.PROMPT = "\S+#"
new_ssh_connection.expect(":")
time.sleep(1)
new_ssh_connection.sendline(ExpectManager.config['api_ssh_password'] + "\r")
new_ssh_connection.expect(">")
time.sleep(1)
new_ssh_connection.sendline("enable \r")
new_ssh_connection.expect("#")
time.sleep(1)
return new_ssh_connection
except exceptions.EOF as e:
logger.error("EOF Exception when getting a new telnet connection." +
" Is the destination host rejecting connections?" +
" Was this a login with bad username / password?")
logger.error(e.get_trace())
return None
except exceptions.TIMEOUT as e:
logger.error("Timeout Exception when getting a new telnet connection. Is the destination host accessible?")
logger.error(e.get_trace())
return None
except pexpect.ExceptionPexpect as e:
logger.error("Unknown exception when getting a new telnet connection.")
logger.error(e.get_trace())
return None
def _my_commands(self) -> ExpectCommands:
return BlackholeTelnetCommands(self)
|
StarcoderdataPython
|
3242166
|
#!/usr/bin/env python3
'''
Load data files from the game's "data" directory.
'''
# TODO: Add caching.
import os
import sys
base_dir = os.path.dirname(sys.executable if hasattr(sys, "frozen") else sys.argv[0])
data_dir = os.path.normpath(os.path.join(base_dir, 'data'))
if not os.path.isdir(data_dir):
try_base_dir = os.path.dirname(os.path.abspath(base_dir))
try_data_dir = os.path.join(try_base_dir, 'data')
if os.path.isdir(try_data_dir):
base_dir = try_base_dir
data_dir = try_data_dir
print("* The data dir was detected as \"{}\"".format(data_dir))
else:
print("Error: data dir was not found in base_dir \"{}\""
" nor its parent directory.".format(base_dir))
def get_data_dir():
return data_dir
def filepath(filename):
'''Determine the path to a file in the data directory.
'''
path = os.path.join(data_dir, filename)
# if not os.path.isfile(path):
return path
def basepath(filename):
'''Determine the path to a file in the base directory.
'''
return os.path.join(base_dir, filename)
def load(filename, mode='rb'):
'''Open a file in the data directory.
"mode" is passed as the second arg to open().
'''
return open(os.path.join(data_dir, filename), mode)
|
StarcoderdataPython
|
1726711
|
<reponame>Rexarrior/ALT
import json
import pickle
import os
from typing import Dict, Iterable, TypeVar, Type, List, Union, Any
if __package__:
from link_analysis.models import Header, DocumentHeader, CleanLink
else:
from models import Header, DocumentHeader, CleanLink # type: ignore
# Don't forget to add to this place new classes which contains implementation
# method convert_to_class_format()
classname = Union[Type[DocumentHeader], Type[CleanLink]]
classobjects = Union[DocumentHeader, CleanLink]
def convert_to_class_format(
data: Iterable[Dict[str, str]],
className: classname) -> Union[Dict[str, classobjects],
List[classobjects]]:
'''
argument data: iterable stadard python object like dictionary or list
with dictionary elements for that class format exist\n
if data is dictionary, data's keys must be standard python objects\n
argument className: name of class that contains static method
'convert_from_dict'. This class is format for element of data argument\n
returns dictionary or list with instances of class className
'''
if not hasattr(data, '__iter__'):
raise ValueError("'data' is not iterable object")
if isinstance(data, dict):
convertedDataDict = {} # type: Dict[str, classobjects]
for key in data:
convertedDataDict[key] = \
className.convert_from_dict(key, data[key]) # type: ignore
return convertedDataDict
else:
convertedDataList = [] # type: List[classobjects]
for el in data:
convertedDataList.append(
className.convert_from_dict(el)) # type: ignore
return convertedDataList
def convert_to_json_serializable_format(
data: Iterable[classobjects]) -> Union[Dict[str, Dict[str, str]],
List[Dict[str, str]]]:
'''
argument data: iterable stadard python object like dictionary or list
if data is dictionary, data's keys must be standard python objects
with instances of classes which has method 'convert_to_dict';\n
returns dictionary or list with dictionary elements
'''
if not hasattr(data, '__iter__'):
raise ValueError("'data' is not iterable object")
if isinstance(data, dict):
convertedDataDict = {} # type: Dict[str, Dict[str, str]]
for key in data:
convertedDataDict[key] = data[key].convert_to_dict()
return convertedDataDict
else:
convertedDataList = [] # type: List[Dict[str, str]]
for el in data:
convertedDataList.append(el.convert_to_dict()) # type: ignore
return convertedDataList
def convert_dict_list_cls_to_json_serializable_format(data):
dataLists = list(data[key] for key in data if data[key])
resultList = []
for L in dataLists:
resultList.extend(L)
JSONcleanLinks = convert_to_json_serializable_format(resultList)
return JSONcleanLinks
def save_json(jsonSerializableData: object, pathToFile: str) -> bool:
try:
dirname = os.path.dirname(pathToFile)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(pathToFile, 'w', encoding='utf-8') as jsonFile:
json.dump(jsonSerializableData, jsonFile, ensure_ascii=False)
except FileExistsError:
return False
return True
def load_json(pathToFile: str) -> Union[object, None]:
try:
with open(pathToFile, encoding='utf-8') as jsonFile:
data = json.load(jsonFile)
except FileNotFoundError:
return None
return data
def save_pickle(anyData: Any, pathToFile: str) -> bool:
try:
dirname = os.path.dirname(pathToFile)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(pathToFile, 'wb') as pickleFile:
pickle.dump(anyData, pickleFile)
except FileExistsError:
return False
return True
def load_pickle(pathToFile: str) -> Any:
try:
with open(pathToFile, 'rb') as pickleFile:
data = pickle.load(pickleFile, encoding='UTF-8')
except FileNotFoundError:
return None
return data
|
StarcoderdataPython
|
1676135
|
#!/usr/bin/env python
# coding: utf-8
# this code is a modification of:
# notes:
# todo : I canceled the randomize weights for the last layer + freezed the weights for all of the layers (some weights were trained anyway).
#todo : mayb -- save to fule during evaluate function the outputs
# **Outline of Steps**
# + Initialization
# + Download COCO detection data from http://cocodataset.org/#download
# + http://images.cocodataset.org/zips/train2014.zip <= train images
# + http://images.cocodataset.org/zips/val2014.zip <= validation images
# + http://images.cocodataset.org/annotations/annotations_trainval2014.zip <= train and validation annotations
# + Run this script to convert annotations in COCO format to VOC format
# + https://gist.github.com/chicham/6ed3842d0d2014987186#file-coco2pascal-py
# + Download pre-trained weights from https://pjreddie.com/darknet/yolo/
# + https://pjreddie.com/media/files/yolo.weights
# + Specify the directory of train annotations (train_annot_folder) and train images (train_image_folder)
# + Specify the directory of validation annotations (valid_annot_folder) and validation images (valid_image_folder)
# + Specity the path of pre-trained weights by setting variable *wt_path*
# + Construct equivalent network in Keras
# + Network arch from https://github.com/pjreddie/darknet/blob/master/cfg/yolo-voc.cfg
# + Load the pretrained weights
# + Perform training
# + Perform detection on an image with newly trained weights
# + Perform detection on an video with newly trained weights
# # Initialization
# In[51]:
#from IPython import get_ipython
from keras.models import Sequential, Model
from keras.layers import Reshape, Activation, Conv2D, Input, MaxPooling2D, BatchNormalization, Flatten, Dense, Lambda, \
UpSampling2D, TimeDistributed, LSTM
from keras.layers.advanced_activations import LeakyReLU
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard
from keras.optimizers import SGD, Adam, RMSprop
from keras.layers.merge import concatenate
import matplotlib.pyplot as plt
import keras.backend as K
import tensorflow as tf
import imgaug as ia
from tqdm import tqdm
from imgaug import augmenters as iaa
import numpy as np
import pickle
import os, cv2
from preprocessing import parse_annotation, BatchGenerator, LSTMBatchGenerator
from utils import WeightReader, decode_netout, draw_boxes
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# get_ipython().run_line_magic('matplotlib', 'inline')
# In[52]:
SUP_NUM_IMAGES = 3
UNSUP_NUM_IMAGES = 3
EVAL_NUM_IMAGES = 3
LABELS = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
'hair drier', 'toothbrush']
IMAGE_H, IMAGE_W = 416, 416
GRID_H, GRID_W = 13, 13
BOX = 5
CLASS = len(LABELS)
CLASS_WEIGHTS = np.ones(CLASS, dtype='float32')
OBJ_THRESHOLD = 0.3 # 0.5
NMS_THRESHOLD = 0.3 # 0.45
ANCHORS = [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828]
NO_OBJECT_SCALE = 1.0
OBJECT_SCALE = 5.0
COORD_SCALE = 1.0
CLASS_SCALE = 1.0
BATCH_SIZE = 16
WARM_UP_BATCHES = 0
TRUE_BOX_BUFFER = 50
MAX_BOX_PER_IMAGE = 10
# In[53]:
wt_path = 'yolov2.weights'
train_image_folder = './data/images/train2014/'
train_annot_folder = './data/train_converted/'
valid_image_folder = './data/images/val2014/'
valid_annot_folder = './data/val_converted/'
# # Construct the network
# the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
def space_to_depth_x2(x):
return tf.space_to_depth(x, block_size=2)
import frontend
""" creates a new dir names coco_x with the results, weights, and all the relevant files"""
# TB_COUNT = len([d for d in os.listdir(os.path.expanduser('./results_lstm/')) if 'coco_' in d]) + 1
# PATH = os.path.expanduser('./results_lstm/') + 'coco_' + '_' + str(TB_COUNT)
# os.makedirs(PATH)
PATH = './lstm/'
print("=================== Directory " , PATH , " Created ")
# PATH = "./results/coco__25"
class ToharGenerator2(BatchGenerator):
def __getitem__(self, item):
# t= [x_batch,b_batch],y_batch
# [input,goutndtruth],desired network output]
t = super().__getitem__(item)
x_batch = t[0][0] #the input
GT = t[0][1]
y_batch = t[1]
new_x_batch = predict(model,x_batch) #instead of input img vector we want the YOLO's output vector
t[0][0]= new_x_batch
return [new_x_batch, GT], y_batch
input_image = Input(shape=(IMAGE_H, IMAGE_W, 3))
true_boxes = Input(shape=(1, 1, 1, TRUE_BOX_BUFFER, 4))
# Layer 1
x = Conv2D(32, (3, 3), strides=(1, 1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
encoded = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2
x = Conv2D(64, (3, 3), strides=(1, 1), padding='same', name='conv_2', use_bias=False, trainable=False)(encoded)
x = BatchNormalization(name='norm_2', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 3
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_3', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_3', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 4
x = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_4', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_4', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 5
x = Conv2D(128, (3, 3), strides=(1, 1), padding='same', name='conv_5', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_5', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_6', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_6', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 7
x = Conv2D(128, (1, 1), strides=(1, 1), padding='same', name='conv_7', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_7', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(256, (3, 3), strides=(1, 1), padding='same', name='conv_8', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_8', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 9
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_9', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_9', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 10
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_10', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_10', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 11
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_11', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_11', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 12
x = Conv2D(256, (1, 1), strides=(1, 1), padding='same', name='conv_12', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_12', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 13
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_13', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_13', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
skip_connection = x
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 14
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_14', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_14', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 15
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_15', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_15', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 16
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_16', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_16', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 17
x = Conv2D(512, (1, 1), strides=(1, 1), padding='same', name='conv_17', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_17', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 18
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_18', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_18', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 19
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_19', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_19', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 20
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_20', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_20', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 21
skip_connection = Conv2D(64, (1, 1), strides=(1, 1), padding='same', name='conv_21', use_bias=False, trainable=False)(
skip_connection)
skip_connection = BatchNormalization(name='norm_21', trainable=False)(skip_connection)
skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
skip_connection = Lambda(space_to_depth_x2)(skip_connection)
x = concatenate([skip_connection, x])
# Layer 22
x = Conv2D(1024, (3, 3), strides=(1, 1), padding='same', name='conv_22', use_bias=False, trainable=False)(x)
x = BatchNormalization(name='norm_22', trainable=False)(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 23
x = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='conv_23')(x)
output = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
# small hack to allow true_boxes to be registered when Keras build the model
# for more information: https://github.com/fchollet/keras/issues/2790
output = Lambda(lambda args: args[0])([output, true_boxes])
model = Model([input_image, true_boxes], output)
# model.summary()
print("output=====")
print(output.shape)
'''build lstm model: '''
lstm_input = Input(shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS))
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
# input_dim=(GRID_H,GRID_W, BOX, 4 + 1 + CLASS, 1, 1, 1, TRUE_BOX_BUFFER, 4)
print(input_dim)
timesteps = EVAL_NUM_IMAGES
# lstm.add(units= Dense(input_shape=(GRID_H, GRID_W, BOX, 4 + 1 + CLASS)))
# l=Lambda(lambda x: K.batch_flatten(x))(lstm_input)
# l=LSTM(input_dim, batch_input_shape= (None, timesteps, input_dim), activation='sigmoid',recurrent_activation='hard_sigmoid',return_sequences=True)(l)
# # l = (Dense(output_dim=input_dim, activation="relu"))(lstm)
# #
# # # l = LSTM(input_dim)(l)
# # # # hidden_layer = Dense(output_dim=input_shape, activation="relu")(x)
# # # # outputs = Dense(output_dim=input_shape, activation="softmax")(hidden_layer)
# #
# loutput = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(l)
# #
# # # small hack to allow true_boxes to be registered when Keras build the model
# # # for more information: https://github.com/fchollet/keras/issues/2790
# out = Lambda(lambda args: args[0])([loutput, true_boxes])
#
#
#
# lstm = Model([lstm_input, true_boxes], out)
# lstm.summary()
input_dim = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
#take 5 frames every time
frames = Input(shape=(5, IMAGE_H, IMAGE_W, 3))
x = TimeDistributed(model)(frames)
x = TimeDistributed(Flatten())(x)
#now- timestamsp=5
x = LSTM(input_dim, name='lstm')(x)
out = Dense(input_dim, name='out')(x)
lstm = Model(inputs=frames, outputs=out)
exit()
# # Load pretrained weights
# **Load the weights originally provided by YOLO**
print("**Load the weights originally provided by YOLO**")
weight_reader = WeightReader(wt_path)
weight_reader.reset() # don't worry! it doesn't delete the weights.
nb_conv = 23
for i in range(1, nb_conv + 1):
conv_layer = model.get_layer('conv_' + str(i))
if i < nb_conv:
norm_layer = model.get_layer('norm_' + str(i))
size = np.prod(norm_layer.get_weights()[0].shape)
beta = weight_reader.read_bytes(size)
gamma = weight_reader.read_bytes(size)
mean = weight_reader.read_bytes(size)
var = weight_reader.read_bytes(size)
weights = norm_layer.set_weights([gamma, beta, mean, var])
if len(conv_layer.get_weights()) > 1:
bias = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[1].shape))
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel, bias])
else:
kernel = weight_reader.read_bytes(np.prod(conv_layer.get_weights()[0].shape))
kernel = kernel.reshape(list(reversed(conv_layer.get_weights()[0].shape)))
kernel = kernel.transpose([2, 3, 1, 0])
conv_layer.set_weights([kernel])
# model_t = model #model that trained but not pre-trained
# model_un = model #model without training at all
# **Randomize weights of the last layer**
# In[ ]:
# print("========randomize last layer")
# layer = model.layers[-4] # the last convolutional layer
# weights = layer.get_weights()
#
# new_kernel = np.random.normal(size=weights[0].shape)/(GRID_H*GRID_W)
# new_bias = np.random.normal(size=weights[1].shape)/(GRID_H*GRID_W)
#
# layer.set_weights([new_kernel, new_bias])
# # Perform training
# **Loss function**
# $$\begin{multline}
# \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# x_i - \hat{x}_i
# \right)^2 +
# \left(
# y_i - \hat{y}_i
# \right)^2
# \right]
# \\
# + \lambda_\textbf{coord}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left[
# \left(
# \sqrt{w_i} - \sqrt{\hat{w}_i}
# \right)^2 +
# \left(
# \sqrt{h_i} - \sqrt{\hat{h}_i}
# \right)^2
# \right]
# \\
# + \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{obj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \lambda_\textrm{noobj}
# \sum_{i = 0}^{S^2}
# \sum_{j = 0}^{B}
# L_{ij}^{\text{noobj}}
# \left(
# C_i - \hat{C}_i
# \right)^2
# \\
# + \sum_{i = 0}^{S^2}
# L_i^{\text{obj}}
# \sum_{c \in \textrm{classes}}
# \left(
# p_i(c) - \hat{p}_i(c)
# \right)^2
# \end{multline}$$
# In[ ]:
import backend
def predict(model, image, i, img_name, path=""):
"""
input_size = IMAGE_H
image_h, image_w, _ = image.shape
feature_extractor = backend.FullYoloFeature()
image = cv2.resize(image, (input_size, input_size))
image =feature_extractor.normalize(image)
input_image = image[:,:,::-1]
input_image = np.expand_dims(input_image, 0)
dummy_array = np.zeros((1,1,1,1, MAX_BOX_PER_IMAGE,4))
netout = model.predict([input_image, dummy_array])[0]
boxes = decode_netout(netout, ANCHORS, len(LABELS))
"""
dummy_array = np.zeros((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))
# print("dummy array:", dummy_array)
plt.figure(figsize=(10, 10))
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=OBJ_THRESHOLD,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
plt.imshow(image[:, :, ::-1])
path = str(path)
if i <= 100:
# Create target directory & all intermediate directories if don't exists
if not os.path.exists(path):
os.makedirs(path)
print("Directory ", path, " Created ")
else:
pass
# print("Directory ", path, " already exists")
#os.makedirs(path) # create the directory on given path, also if any intermediate-level directory don’t exists then it will create that too.
plt.savefig(path+ "/" + img_name)
return boxes
from utils import decode_netout, compute_overlap, compute_ap
from os.path import normpath, basename
def evaluate(model, generator,
iou_threshold=0.3,
score_threshold=0.3,
max_detections=100,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
path = generator.images[i]['filename']
img_name = basename(normpath(path))
raw_height, raw_width, raw_channels = raw_image.shape
# make the boxes and the labels
pred_boxes = predict(model, raw_image, i, img_name, path=save_path)
score = np.array([box.score for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
box.ymax * raw_height, box.score] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
import pickle
f = open(save_path+"/mAP.pkl", "wb")
pickle.dump(average_precisions, f)
f.close()
return average_precisions
def custom_loss(y_true, y_pred):
mask_shape = tf.shape(y_true)[:4]
cell_x = tf.to_float(tf.reshape(tf.tile(tf.range(GRID_W), [GRID_H]), (1, GRID_H, GRID_W, 1, 1)))
cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))
cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [BATCH_SIZE, 1, 1, 5, 1])
coord_mask = tf.zeros(mask_shape)
conf_mask = tf.zeros(mask_shape)
class_mask = tf.zeros(mask_shape)
seen = tf.Variable(0.)
total_recall = tf.Variable(0.)
"""
Adjust prediction
"""
### adjust x and y
pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid
### adjust w and h
pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(ANCHORS, [1, 1, 1, BOX, 2])
### adjust confidence
pred_box_conf = tf.sigmoid(y_pred[..., 4])
### adjust class probabilities
pred_box_class = y_pred[..., 5:]
"""
Adjust ground truth
"""
### adjust x and y
true_box_xy = y_true[..., 0:2] # relative position to the containing cell
### adjust w and h
true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically
### adjust confidence
true_wh_half = true_box_wh / 2.
true_mins = true_box_xy - true_wh_half
true_maxes = true_box_xy + true_wh_half
pred_wh_half = pred_box_wh / 2.
pred_mins = pred_box_xy - pred_wh_half
pred_maxes = pred_box_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
true_box_conf = iou_scores * y_true[..., 4]
### adjust class probabilities
true_box_class = tf.argmax(y_true[..., 5:], -1)
"""
Determine the masks
"""
### coordinate mask: simply the position of the ground truth boxes (the predictors)
coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * COORD_SCALE
### confidence mask: penelize predictors + penalize boxes with low IOU
# penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
true_xy = true_boxes[..., 0:2]
true_wh = true_boxes[..., 2:4]
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy, 4)
pred_wh = tf.expand_dims(pred_box_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
conf_mask = conf_mask + tf.to_float(best_ious < 0.6) * (1 - y_true[..., 4]) * NO_OBJECT_SCALE
# penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
conf_mask = conf_mask + y_true[..., 4] * OBJECT_SCALE
### class mask: simply the position of the ground truth boxes (the predictors)
class_mask = y_true[..., 4] * tf.gather(CLASS_WEIGHTS, true_box_class) * CLASS_SCALE
"""
Warm-up training
"""
no_boxes_mask = tf.to_float(coord_mask < COORD_SCALE / 2.)
seen = tf.assign_add(seen, 1.)
true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, WARM_UP_BATCHES),
lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
true_box_wh + tf.ones_like(true_box_wh) * np.reshape(
ANCHORS, [1, 1, 1, BOX, 2]) * no_boxes_mask,
tf.ones_like(coord_mask)],
lambda: [true_box_xy,
true_box_wh,
coord_mask])
"""
Finalize the loss
"""
nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))
loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_conf = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask) / (nb_conf_box + 1e-6) / 2.
loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)
loss = loss_xy + loss_wh + loss_conf + loss_class
nb_true_box = tf.reduce_sum(y_true[..., 4])
nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.5) * tf.to_float(pred_box_conf > 0.3))
"""
Debugging code
"""
current_recall = nb_pred_box / (nb_true_box + 1e-6)
total_recall = tf.assign_add(total_recall, current_recall)
loss = tf.Print(loss, [tf.zeros((1))], message='Dummy Line \t', summarize=1000)
loss = tf.Print(loss, [loss_xy], message='Loss XY \t', summarize=1000)
loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000)
loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000)
loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000)
loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000)
loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000)
loss = tf.Print(loss, [total_recall / seen], message='Average Recall \t', summarize=1000)
return loss
# **Parse the annotations to construct train generator and validation generator**
generator_config = {
'IMAGE_H': IMAGE_H,
'IMAGE_W': IMAGE_W,
'GRID_H': GRID_H,
'GRID_W': GRID_W,
'BOX': BOX,
'LABELS': LABELS,
'CLASS': len(LABELS),
'ANCHORS': ANCHORS,
'BATCH_SIZE': BATCH_SIZE,
'TRUE_BOX_BUFFER': 50,
}
def normalize(image):
return image / 255.
# train_imgs, seen_train_labels = parse_annotation(train_annot_folder, train_image_folder, labels=LABELS)
# ## write parsed annotations to pickle for fast retrieval next time
# with open('train_imgs', 'wb') as fp:
# pickle.dump(train_imgs, fp)
# ## read saved pickle of parsed annotations
# with open('train_imgs', 'rb') as fp:
# train_imgs = pickle.load(fp)
#
# from random import shuffle
# shuffle(train_imgs)
#
# with open('train_imgs_shuffled', 'wb') as fp:
# pickle.dump(train_imgs, fp)
with open('train_imgs_shuffled', 'rb') as fp:
train_imgs = pickle.load(fp)
# valid_imgs, seen_valid_labels = parse_annotation(valid_annot_folder, valid_image_folder, labels=LABELS)
# ## write parsed annotations to pickle for fast retrieval next time
# with open('valid_imgs', 'wb') as fp:
# pickle.dump(valid_imgs, fp)
## read saved pickle of parsed annotations
with open('valid_imgs', 'rb') as fp:
valid_imgs = pickle.load(fp)
sup_train_imgs = train_imgs[:SUP_NUM_IMAGES]
# split the training set (supervised date) into train and validation 80%, 20% respectively:
train = sup_train_imgs[:int(SUP_NUM_IMAGES*0.8)]
val = sup_train_imgs[-int(SUP_NUM_IMAGES*0.2):] #takes the last 20% images from the training
train_batch = BatchGenerator(train, generator_config, norm=normalize)
eval_imgs = valid_imgs[:EVAL_NUM_IMAGES] #we use the valid_imgs as our evaluation set (testing). while we use 20% of the training for validation.
valid_batch = BatchGenerator(val, generator_config, norm=normalize, jitter=False)
"""we evaluate the model on the validation set"""
tohar_eval_batch = BatchGenerator(eval_imgs, generator_config, norm=normalize, jitter=False,
shuffle=False)
# **Setup a few callbacks and start the training**
early_stop = EarlyStopping(monitor='val_loss',
min_delta=0.001,
patience=3,
mode='min',
verbose=1)
checkpoint = ModelCheckpoint(PATH+'/LSTM_weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
org_checkpoint = ModelCheckpoint(PATH+'/original_weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min',
period=1)
# In[ ]:
tb_counter = len([log for log in os.listdir(os.path.expanduser('./lstm/')) if 'coco_' in log]) + 1
tensorboard = TensorBoard(log_dir=os.path.expanduser('./lstm/') + 'coco_' + '_' + str(tb_counter),
histogram_freq=0,
write_graph=True,
write_images=False)
optimizer = Adam(lr=0.5e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
# optimizer = SGD(lr=1e-4, decay=0.0005, momentum=0.9)
# optimizer = RMSprop(lr=1e-4, rho=0.9, epsilon=1e-08, decay=0.0)
model.compile(loss=custom_loss, optimizer=optimizer)
# model_t.compile(loss=custom_loss, optimizer=optimizer)
from keras.callbacks import TensorBoard
"""evaluating on original YOLO (no training at all)"""
model.load_weights("yolo.h5")
# YOLO = evaluate(model, tohar_eval_batch, save_path=PATH+"/YOLO")
# print("YOLO:\n", YOLO)
# print(np.average(list(YOLO.values())))
'''creating a modified batch to the lstm:'''
# [x_batch, GT], y_batch
# [x_batch, GT], \
lstm_batch = LSTMBatchGenerator(eval_imgs, generator_config, model, norm=None, jitter=False, shuffle=False)
print(len(lstm_batch))
exit()
"""X_train2 should be YOLO's output vectors
y_train2 should be the ground truth in the exact same format of YOLO's output
"""
# autoencoder.fit_generator(generator=train_batch_lstm, #(input, input)
# steps_per_epoch=len(train_batch_lstm),
# epochs=100,
# verbose=1,
# # validation_data=tohar_valid_batch,
# # validation_steps=len(tohar_valid_batch),
# callbacks=[early_stop, ae_checkpoint, tensorboard],
# max_queue_size=3)
# print("===================== Done training AE")
# print("===================== Save weights to AE_weights_coco.h5")
# autoencoder.save_weights(PATH+"/AE_weights_coco.h5") # save the autoencoder's weights in this file
# print("===================== Load weights from AE_weights_coco.h5")
# model.load_weights(PATH+'/AE_weights_coco.h5',
# by_name=True) # copy the AE's weights to the "YOLO model" weights, only to layers with the same name as the AE
## end ae
##uncomment for training:
# Perform detection on image
# print("===================== load YOLO model's weights to weights_coco.h5")
# evaluate:
# train_batch_lstm = ToharGenerator2(train, generator_config, norm=normalize)
""" Add lstm on top of the trained YOLO model. the lstm should have many to many sturcture. each latm cell predict 1 output . help:"""
# https://stackoverflow.com/questions/49535488/lstm-on-top-of-a-pre-trained-cnn
# https://github.com/keras-team/keras/issues/5527
''' Freeze previous layers '''
for layer in model.layers:
layer.trainable = False
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense, Input
from keras.layers.pooling import GlobalAveragePooling2D
from keras.layers.recurrent import LSTM
from keras.layers.wrappers import TimeDistributed
from keras.optimizers import Nadam
frames = len(tohar_eval_batch)
print(frames)
units = GRID_H * GRID_W * BOX * (4 + 1 + CLASS)
print("==========",units)
length=5 #todo:batch size
#todo: input dim is problematic.
# input_images = Input(shape=( None, frames ,IMAGE_H, IMAGE_W, 3))
#https://riptutorial.com/keras/example/29812/vgg-16-cnn-and-lstm-for-video-classification
# frames, rows, columns, channels = 10, IMAGE_H, IMAGE_W, 3
# video = Input(shape=(frames,
# rows,
# columns,
# channels))
#
# # cnn_base = VGG16(input_shape=(rows, columns, channels),
# # weights="imagenet",
# # include_top=False)
# # cnn_out = GlobalAveragePooling2D()(cnn_base.output)
# # cnn = Model(input=cnn_base.input, output=cnn_out)
#
# model.trainable = False
#
# encoded_frames = TimeDistributed(model)(video)
# encoded_sequence = LSTM(256)(encoded_frames)
# hidden_layer = Dense(output_dim=1024, activation="relu")(encoded_sequence)
# outputs = Dense(output_dim=units, activation="softmax")(hidden_layer)
# lstm = Model([video], outputs)
#
# # x = Reshape((len(train_batch)*10 ,IMAGE_H, IMAGE_W, 3))(input_images)
# x = TimeDistributed(model)(x)
# x = TimeDistributed(Flatten())(x)
# x = LSTM(units, name='lstm')(x) # This has the effect of each LSTM unit returning a sequence of 1 output, one for each time step in the input data
# # x = Dense( n_output,name='lstm_out')(x)
# # x = Conv2D(BOX * (4 + 1 + CLASS), (1, 1), strides=(1, 1), padding='same', name='lstm_conv')(x)
# out = Reshape((GRID_H, GRID_W, BOX, 4 + 1 + CLASS))(x)
print("======== lstm:")
lstm.summary()
lstm.compile(loss=custom_loss, optimizer=optimizer)
exit()
lstm.fit_generator(generator=train_batch, # train_batch #(input, ground_truth)
steps_per_epoch=len(train_batch),
epochs=3,
verbose=1,
validation_data=valid_batch,
validation_steps=len(valid_batch),
callbacks=[early_stop, checkpoint, tensorboard],
max_queue_size=3)
"""evaluating on LSTM YOLO """
LSTM = evaluate(model, tohar_eval_batch, save_path=PATH+"/LSTM")
print("LSTM:\n",LSTM)
print(np.average(list(LSTM.values())))
# """evaluating on original YOLO (no training at all)"""
# model.load_weights("yolo.h5")
# YOLO = evaluate(model, tohar_eval_batch, save_path=PATH+"/YOLO")
# print("YOLO:\n", YOLO)
# print(np.average(list(YOLO.values())))
#
#
# """evaluating on original YOLO (no training at all) """
# model_t.load_weights(PATH+"/T_weights_coco.h5")
# NO_AE = evaluate(model_t, tohar_eval_batch, save_path=PATH+"/NO_AE")
# print("NO_AE:\n", NO_AE)
# print(np.average(list(NO_AE.values())))
params={"SUP_NUM_IMAGES:": SUP_NUM_IMAGES,
"UNSUP_NUM_IMAGES:":UNSUP_NUM_IMAGES,
"EVAL_NUM_IMAGES:":EVAL_NUM_IMAGES}
f = open(PATH + "/mAP.txt", "w")
f.write("LSTM:\n")
f.write(str(LSTM)+"\n")
f.write("NO_AE:\n")
# f.write(str(NO_AE)+"\n")
f.write("YOLO:\n")
# f.write(str(YOLO)+"\n")
f.write("AVG:"+"\n")
f.write(str(np.average(list(LSTM.values())))+"\n")
# f.write(str(np.average(list(NO_AE.values())))+"\n")
# f.write(str(np.average(list(YOLO.values())))+"\n")
f.write("LOG:"+"\n")
f.write(str(params) )
f.close()
# image = cv2.imread('images/giraffe.jpg')
# dummy_array = np.zeros((1,1,1,1,TRUE_BOX_BUFFER,4))
# plt.figure(figsize=(10,10))
#
# input_image = cv2.resize(image, (416, 416))
# input_image = input_image / 255.
# input_image = input_image[:,:,::-1]
# input_image = np.expand_dims(input_image, 0)
#
# netout = model.predict([input_image, dummy_array])
#
# boxes = decode_netout(netout[0],
# obj_threshold=OBJ_THRESHOLD,
# nms_threshold=NMS_THRESHOLD,
# anchors=ANCHORS,
# nb_class=CLASS)
#
# image = draw_boxes(image, boxes, labels=LABELS)
#
# plt.imshow(image[:,:,::-1]); #plt.show()
# i=0
# plt.savefig("./predictions/figure"+str(i))
print('\a')
print('\a')
exit()
# # Perform detection on video
# In[ ]:
model.load_weights("weights_coco.h5")
dummy_array = np.zeros((1, 1, 1, 1, TRUE_BOX_BUFFER, 4))
# In[ ]:
video_inp = '../basic-yolo-keras/images/phnom_penh.mp4'
video_out = '../basic-yolo-keras/images/phnom_penh_bbox.mp4'
video_reader = cv2.VideoCapture(video_inp)
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
video_writer = cv2.VideoWriter(video_out,
cv2.VideoWriter_fourcc(*'XVID'),
50.0,
(frame_w, frame_h))
for i in tqdm(range(nb_frames)):
ret, image = video_reader.read()
input_image = cv2.resize(image, (416, 416))
input_image = input_image / 255.
input_image = input_image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
netout = model.predict([input_image, dummy_array])
boxes = decode_netout(netout[0],
obj_threshold=0.3,
nms_threshold=NMS_THRESHOLD,
anchors=ANCHORS,
nb_class=CLASS)
image = draw_boxes(image, boxes, labels=LABELS)
video_writer.write(np.uint8(image))
video_reader.release()
video_writer.release()
|
StarcoderdataPython
|
3272746
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
GWCS - Generalized World Coordinate System
==========================================
Generalized World Coordinate System (GWCS) is an Astropy affiliated package
providing tools for managing the World Coordinate System of astronomical data.
GWCS takes a general approach to the problem of expressing transformations between
pixel and world coordinates. It supports a data model which includes the entire
transformation pipeline from input coordinates (detector by default) to world
coordinates. It is tightly integrated with Astropy.
- Transforms are instances of ``astropy.Model``. They can be chained, joined or
combined with arithmetic operators using the flexible framework of
compound models in ``astropy.modeling``.
- Celestial coordinates are instances of ``astropy.SkyCoord`` and are transformed
to other standard celestial frames using ``astropy.coordinates``.
- Time coordinates are represented by ``astropy.Time`` and can be further manipulated
using the tools in ``astropy.time``
- Spectral coordinates are ``astropy.Quantity`` objects and can be converted to other
units using the tools in ``astropy.units``.
For complete features and usage examples see the documentation site:
http://gwcs.readthedocs.org
Note
----
GWCS supports only Python 3.
Installation
------------
To install::
pip install gwcs
To clone from github and install the master branch::
git clone https://github.com/spacetelescope/gwcs.git
cd gwcs
python setup.py install
Contributing Code, Documentation, or Feedback
---------------------------------------------
GWCS is developed on github. We welcome feedback and contributions to the project.
Contributions of code, documentation, or general feedback are all appreciated. More
information about contributing is in the github repository.
"""
import sys
from pkg_resources import get_distribution, DistributionNotFound
if sys.version_info < (3, 6):
raise ImportError("GWCS supports Python versions 3.6 and above.") # pragma: no cover
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound: # pragma: no cover
# package is not installed
pass # pragma: no cover
from .wcs import * # noqa
from .wcstools import * # noqa
from .coordinate_frames import * # noqa
from .selector import * # noqa
|
StarcoderdataPython
|
3338895
|
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import numpy as np
import sys
sys.path.append("..")
from utils import *
from funcs import *
from sklearn.metrics import log_loss
from sklearn.metrics import confusion_matrix
from sklearn.linear_model import LogisticRegression
import pandas as pd
import time
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.compat.v1.InteractiveSession(config=config)
# Define a helper function to evaluate the TFlite model using test dataset.
def evaluate_model(interpreter, test_images, num_class):
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
# Run predictions on every image in the test dataset.
prediction_digits = []
pred_output_all = np.empty([1,num_class])
# time_start = time.time()
for test_image in test_images:
test_image = np.expand_dims(test_image, axis=0).astype(np.float32)
interpreter.set_tensor(input_index, test_image)
interpreter.invoke()
output = interpreter.get_tensor(output_index)
pred_output = output[0]
pred_output.reshape([1,num_class])
pred_output_all = np.vstack((pred_output_all, pred_output))
if (len(pred_output_all) - 1) % 1000 == 0:
print('%d testing samples have been processed' % (len(pred_output_all) - 1))
digit = np.argmax(output[0])
prediction_digits.append(digit)
pred_output_all = pred_output_all[1:,:]
return pred_output_all, prediction_digits
num_freq_bin = 128 # number of the frequency bins
num_classes = 3 # number of the classes, for DCASE2020 1b, num_classes is 3
num_samples = 8640 # number of the evluation samples
eval_csv = 'data_2020/evaluate_setup/fold1_evaluate.csv' # path to the evaluation set
feat_path = 'features/logmel128_scaled_d_dd/' # path to the extracted features of the evaluation set
test_output_path = 'saved-model-fusion.csv' # path to dump the results
model_path_A_quantized = '../pretrained_models/smallfcnn-model-0.9618-quantized.tflite' # path to the quantized model A, which is trianed with the whole development set
model_path_B_quantized = '../pretrained_models/mobnet-model-0.9517-quantized.tflite' # path to the quantized model B, which is trianed with the whole development set
print('Loading the evaluation set ...')
LM_eval = load_data_2020_eval(feat_path, eval_csv, num_freq_bin, 'logmel')
# Load the model A into an interpreter
print('Loading the pre-trained quantized models ...')
interpreter_quant_A = tf.lite.Interpreter(model_path=model_path_A_quantized)
interpreter_quant_A.allocate_tensors()
# Load the model B into an interpreter
interpreter_quant_B = tf.lite.Interpreter(model_path=model_path_B_quantized)
interpreter_quant_B.allocate_tensors()
# get the results of the quantized sfcnn model
print('Predicting the evaluation set with loaded sfcnn model...')
preds_A, preds_class_idx = evaluate_model(interpreter_quant_A,
LM_eval,
num_class=num_classes)
# get the results of the quantized mobnetv2 model
print('Predicting the evaluation set with loaded mobnetv2 model...')
preds_B, preds_class_idx = evaluate_model(interpreter_quant_B,
LM_eval,
num_class=num_classes)
# get the results of the quantized model by fusion
print('Starting the fusion of the predictions from two quantized models...')
preds = 0.5 * preds_A + 0.5 * preds_B
print('The following are the results of the fusion by applying stacking ensemble')
test_output_df = get_output_dic(preds, num_samples)
print('Saving the output csv file......')
test_output_df.to_csv(test_output_path, index=False, float_format='%.2f', sep=' ')
print('Output csv file has been saved successfully.')
print('ALL DONE !!!')
|
StarcoderdataPython
|
87973
|
from wpilib.command import Command
import robotmap
import subsystems
import oi
class TankLiftTeleopDefault(Command):
def __init__(self):
super().__init__('TankLiftTeleopDefault')
self.requires(subsystems.drivelift)
self.setInterruptible(True)
self.setRunWhenDisabled(False)
def execute(self):
#if subsystems.drivelift.allLiftToggle == True:
# subsystems.drivelift.extendAll()
#else:
# subsystems.drivelift.retractAll()
#if subsystems.drivelift.frontLiftToggle == True:
# subsystems.drivelift.extendFront()
#else:
# subsystems.drivelift.retractFront()
#if subsystems.drivelift.backLiftToggle == True:
# subsystems.drivelift.extendBack()
#else:
# subsystems.drivelift.retractBack()
subsystems.drivelift.backIRToBool()
subsystems.drivelift.frontIRToBool()
def isFinished(self):
# default commands never "finish", they're just interrupted by other commands
return False
|
StarcoderdataPython
|
3275452
|
from scriptcore.testing.testcase import TestCase
from scriptcore.filesystem.mimetype import MimeType
class TestMimeType(TestCase):
def test_guess_type(self):
"""
Test guess type
:return: void
"""
data = [
('.pdf', 'application/pdf'),
('.gdoc', 'application/vnd.google-apps.document'),
('.gdraw', 'application/vnd.google-apps.drawing'),
('.gsheet', 'application/vnd.google-apps.spreadsheet'),
('.gform', 'application/vnd.google-apps.form'),
('.gsite', 'application/vnd.google-apps.site'),
('.gmap', 'application/vnd.google-apps.map'),
('.gslides', 'application/vnd.google-apps.presentation'),
('.docx', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document'),
('.xslx', 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'),
('.pptx', 'application/vnd.openxmlformats-officedocument.presentationml.presentation'),
('.kml', 'application/vnd.google-earth.kml+xml')
]
for extension, exp_mime_type in data:
mime_type, encoding = MimeType.guess_type(extension)
self.assert_equal(exp_mime_type, mime_type)
mime_type, encoding = MimeType.guess_type('dummy.%s' % extension)
self.assert_equal(exp_mime_type, mime_type)
|
StarcoderdataPython
|
3350391
|
<gh_stars>0
"""
These for the flow builder, the flow builder is the code
which build flows from Operators using the greater than
mathematical Operator (>).
"""
import pytest
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from mabel.logging import get_logger
from mabel.operators import FilterOperator, EndOperator, NoOpOperator
from mabel.flows import Flow
from rich import traceback
traceback.install()
def test_flow_builder_valid():
"""
Test the flow builder
"""
e = EndOperator()
f = FilterOperator()
n = NoOpOperator()
flow = f > n > e
assert isinstance(flow, Flow)
assert f"EndOperator-{id(e)}" in flow.nodes.keys()
assert f"FilterOperator-{id(f)}" in flow.nodes.keys()
assert f"NoOpOperator-{id(n)}" in flow.nodes.keys()
assert len(flow.edges) == 2
def test_flow_builder_invalid_uninstantiated():
"""
Test the flow builder doesn't succeed with an invalid Operator
"""
e = EndOperator # <- this should fail as it's not initialized
n = NoOpOperator()
with pytest.raises(TypeError):
flow = n > e
def test_flow_builder_invalid_wrong_type():
"""
Test the flow builder doesn't succeed with an invalid Operator
"""
e = get_logger() # <- this should fail as it's not an Operator
n = NoOpOperator()
with pytest.raises(TypeError):
flow = n > e
class TestOperator(NoOpOperator):
def execute(self, data={}, context={}):
return data, context
class OperatorA(TestOperator):
pass
class OperatorB(TestOperator):
pass
class OperatorC(TestOperator):
pass
class OperatorD(TestOperator):
pass
def test_branching():
z = EndOperator()
a = OperatorA()
b = OperatorB()
c = OperatorC()
d = OperatorD()
flow = a > [b > z, c > d > z]
RESULTS = {
"OperatorA": ["OperatorB", "OperatorC"],
"OperatorB": ["EndOperator"],
"OperatorC": ["OperatorD"],
"OperatorD": ["EndOperator"],
}
for source, target in RESULTS.items():
assert (
sorted(
[t.split("-")[0] for s, t in flow.edges if str(s).startswith(source)]
)
== target
)
with flow as runner:
runner(">", {})
def test_context_manager():
z = EndOperator()
a = OperatorA()
b = OperatorB()
c = OperatorC()
d = OperatorD()
flow = a > [b > z, c > d > z]
RESULTS = {
"OperatorA": ["OperatorB", "OperatorC"],
"OperatorB": ["EndOperator"],
"OperatorC": ["OperatorD"],
"OperatorD": ["EndOperator"],
}
for source, target in RESULTS.items():
assert (
sorted(
[t.split("-")[0] for s, t in flow.edges if str(s).startswith(source)]
)
== target
)
payloads = ["a", "b", "c", "d", "e"]
with flow as runner:
for payload in payloads:
runner(payload, {})
if __name__ == "__main__": # pragma: no cover
test_flow_builder_valid()
test_flow_builder_invalid_uninstantiated()
test_flow_builder_invalid_wrong_type()
test_branching()
test_context_manager()
print("okay")
|
StarcoderdataPython
|
1693979
|
<filename>mokapapp/lib.py
"""lib.py
General class and function library for updating Moka panels.
"""
import argparse
import configparser
import itertools
import logging
import requests
logger = logging.getLogger(__name__)
def _config_reader(config_file):
config = configparser.ConfigParser()
config.read(config_file)
return config
def cli(args):
"""Parse command line arguments.
Args:
args (List): Command line arguments e.g. ['-c', 'config.ini']
"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help="A mokapapp config.ini file", type=_config_reader)
parser.add_argument('--logfile', help="A file to write application log outputs", default=None)
parser.add_argument(
'--head', help="An integer limit for the number of PanelApp panels to process", default=None, type=int
)
return parser.parse_args(args)
def get_hgnc_set(panels):
"""Returns a set of all HGNC ids from a list of MokaPanel objects"""
# Create a list of all unique HGNCID-GeneSymbol combinations in the MokaPanels
genes_nest = [panel.genes for panel in panels]
genes_list = itertools.chain.from_iterable(genes_nest) # Flatten list of lists
# Return the set of unique HGNCIDs
hgnc_list, _ = zip(*genes_list)
hgnc_set = set(hgnc_list)
logger.debug('Pulled {} hgnc ids from {} MokaPanel objects'.format(len(hgnc_set), len(panels)))
return hgnc_set
class MokaPanel():
"""Convert PanelApp data into Moka readable panels.
Args:
panel_hash(str): A panelapp panel hash e.g. 58346b8b8f62036225ca8a7d
name(str): Panel name e.g. Congenital disorders of glycosylation
version(str): Panel version e.g. 1.6
genes(List[Tuple,]): A list of (HGNC, SYMBOL) tuples from PanelApp
colour(str): Human readable panel colour converted from PanelApp
gene confidence level scores. e.g. 'Amber'.
"""
def __init__(self, panel_hash, name, version, genes, colour):
self.hash = panel_hash
self.name = name
self.version = version
self.genes = genes
self.colour = colour
def __str__(self):
return f"{self.hash}, {self.name}. No Genes: {len(self.genes)}"
def as_dict(self):
return {
"hash": self.hash,
"name": self.name,
"version": self.version,
"genes": self.genes,
"colour": self.colour
}
@staticmethod
def from_dict(data):
"""Returns a MokaPanel object from a dictionary of MokaPanel key-value pairs.
@staticmethod allows panels to be build from the class rather than from instances e.g.:
MokaPanel.from_dict(data)
"""
genes = [tuple(hgnc_symbol) for hgnc_symbol in data['genes']]
return MokaPanel(
data['hash'], data['name'], data['version'], genes,
data['colour']
)
class MokaPanelFactory():
"""Build Moka Panels from PanelApp data and separate panels by gene colours.
Args:
colours(List): Gene colours to filter for each panel
panels(List[dict,]): List of dictionary objects containing PanelApp
/panels API endpoint responses."""
def __init__(self, panels, colours=None):
self.colours = colours
self.panels = panels
self.logger = logging.getLogger(__name__)
self.logger.debug(f'Building Moka Panels with {self.colours}')
def build(self):
"""Returns a list of MokaPanel objects for each colour-panel combination.
Args:
panels(dict): Panel response from Panel App /panels endpoint
"""
# All panels and colours
all_panels = itertools.product(self.colours, self.panels)
# Build MokaPanels
for colour, panel in all_panels:
self.logger.debug('Getting {}, {}, {}'.format(colour, panel['id'], panel['name']))
# Get MokaPanel object.
moka_panel = self._get_moka_panel(colour, panel)
self.logger.debug(moka_panel)
# moka_panel is None if panel has 0 genes or 0 hash present in the API
if moka_panel:
# Yield panel. Generator can be chained to more efficiently process
yield(moka_panel)
def _get_moka_panel(self, colour, panel):
"""Returns a MokaPanel object for the colour and panel provided"""
# To create accurate Moka Panel name and key binding, colour must be
# capitalized. E.g. "Amber"
_colour = colour.capitalize()
# Get genes in panel filtered to the colour
genes = self._get_panel_genes(_colour, panel['id'])
# Return none if panel has no genes or hash
if len(genes) == 0 or panel['hash_id'] is None:
self.logger.debug(
f'{panel["name"], panel["id"]} Skipping MokaPanel build: HashID {panel["hash_id"]}, gene_count {len(genes)})'
)
return None
else: # Return MokaPanel
mp = MokaPanel(
"{}_{}".format(panel['hash_id'], _colour),
self._get_moka_name(panel['name'], _colour, panel['version']),
panel['version'],
genes,
_colour
)
self.logger.debug(f"Returning {mp}")
return mp
def _get_moka_name(self, name, colour, version):
"""Return a string containing the human-readable panel name for Moka"""
clean_name = name.replace('_', '-')
return "{} (Panel App {} v{})".format(clean_name, colour, version)
def _get_panel_genes(self, colour, panel_id):
"""Query PanelApp API for panel genes filtered by colour.
Args:
colour (str): Filter for genes returned for input panel. Options: Green, Amber, Red
panel_id (str): ID for a PanelApp panel. E.g. 67 (id) or 595ce30f8f62036352471f39 (hash)
"""
endpoint = "https://panelapp.genomicsengland.co.uk/api/v1/panels"
# PanelApp genes API contains a confidence_level field with values (0-4). The PanelApp
# handbook describes the mapping of these values to gene colours. We represent this as a
# dictionary for lookups.
panel_colour_map = {
"4": "Green", "3": "Green", "2": "Amber", "1": "Red", "0": "Red"
}
# Call PanelApp API for panel data using ID
response = requests.get(f"{endpoint}/{panel_id}")
response.raise_for_status()
r_json = response.json()
self.logger.debug(f"Found genes for {panel_id}")
# Return a list of (HGNCID, GeneSymbol) tuples for each gene in the API response,
# filtered by colour
genes = [
(record['gene_data']['hgnc_id'], record['gene_data']['hgnc_symbol'])
for record in r_json['genes']
if panel_colour_map[record['confidence_level']] == colour
]
self.logger.debug(f'{len(genes)} {colour} present in panel {panel_id}')
return genes
|
StarcoderdataPython
|
161740
|
"""
Data loader for TUM RGBD benchmark
@author: <NAME>
@date: March 2019
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys, os, random
import pickle
import numpy as np
import os.path as osp
import torch.utils.data as data
from scipy.misc import imread
from tqdm import tqdm
from transforms3d import quaternions
from cv2 import resize, INTER_NEAREST
"""
The following scripts use the directory structure as:
root
---- fr1
-------- rgbd_dataset_freiburg1_360
-------- rgbd_dataset_freiburg1_desk
-------- rgbd_dataset_freiburg1_desk2
-------- ...
---- fr2
-------- rgbd_dataset_freiburg2_360_hemisphere
-------- rgbd_dataset_freiburg2_360_kidnap
-------- rgbd_dataset_freiburg2_coke
-------- ....
---- fr3
-------- rgbd_dataset_freiburg3_cabinet
-------- rgbd_dataset_freiburg3_long_office_household
-------- rgbd_dataset_freiburg3_nostructure_notexture_far
-------- ....
"""
def tum_trainval_dict():
""" the sequence dictionary of TUM dataset
https://vision.in.tum.de/data/datasets/rgbd-dataset/download
The calibration parameters refers to:
https://vision.in.tum.de/data/datasets/rgbd-dataset/file_formats
"""
return {
'fr1': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg1_desk2',
'rgbd_dataset_freiburg1_floor',
'rgbd_dataset_freiburg1_room',
'rgbd_dataset_freiburg1_xyz',
'rgbd_dataset_freiburg1_rpy',
'rgbd_dataset_freiburg1_plant',
'rgbd_dataset_freiburg1_teddy']
},
'fr2': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg2_360_hemisphere',
'rgbd_dataset_freiburg2_large_no_loop',
'rgbd_dataset_freiburg2_large_with_loop',
'rgbd_dataset_freiburg2_pioneer_slam',
'rgbd_dataset_freiburg2_pioneer_slam2',
'rgbd_dataset_freiburg2_pioneer_slam3',
'rgbd_dataset_freiburg2_xyz',
'rgbd_dataset_freiburg2_360_kidnap',
'rgbd_dataset_freiburg2_rpy',
'rgbd_dataset_freiburg2_coke',
'rgbd_dataset_freiburg2_desk_with_person',
'rgbd_dataset_freiburg2_dishes',
'rgbd_dataset_freiburg2_flowerbouquet_brownbackground',
'rgbd_dataset_freiburg2_metallic_sphere2',
'rgbd_dataset_freiburg2_flowerbouquet'
]
},
'fr3': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': [
'rgbd_dataset_freiburg3_walking_halfsphere',
'rgbd_dataset_freiburg3_walking_rpy',
'rgbd_dataset_freiburg3_cabinet',
'rgbd_dataset_freiburg3_nostructure_notexture_far',
'rgbd_dataset_freiburg3_nostructure_notexture_near_withloop',
'rgbd_dataset_freiburg3_nostructure_texture_far',
'rgbd_dataset_freiburg3_nostructure_texture_near_withloop',
'rgbd_dataset_freiburg3_sitting_rpy',
'rgbd_dataset_freiburg3_sitting_static',
'rgbd_dataset_freiburg3_sitting_xyz',
'rgbd_dataset_freiburg3_structure_notexture_near',
'rgbd_dataset_freiburg3_structure_texture_far',
'rgbd_dataset_freiburg3_structure_texture_near',
'rgbd_dataset_freiburg3_teddy']
}
}
def tum_test_dict():
""" the trajectorys held out for testing TUM dataset
"""
return {
'fr1': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg1_360',
'rgbd_dataset_freiburg1_desk']
},
'fr2': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg2_desk',
'rgbd_dataset_freiburg2_pioneer_360']
},
'fr3': {
'calib': [525.0, 525.0, 319.5, 239.5],
'seq': ['rgbd_dataset_freiburg3_walking_static', # dynamic scene
'rgbd_dataset_freiburg3_walking_xyz', # dynamic scene
'rgbd_dataset_freiburg3_long_office_household']
}
}
class TUM(data.Dataset):
base = 'data'
def __init__(self, root = '', category='train',
keyframes=[1], data_transform=None, select_traj=None):
"""
:param the root directory of the data
:param select the category (train, validation,test)
:param select the number of keyframes
Test data only support one keyfame at one time
Train/validation data support mixing different keyframes
:param select one particular trajectory at runtime
Only support for testing
"""
super(TUM, self).__init__()
self.image_seq = []
self.depth_seq = []
self.invalid_seq = []
self.cam_pose_seq= []
self.calib = []
self.seq_names = []
self.ids = 0
self.seq_acc_ids = [0]
self.keyframes = keyframes
self.transforms = data_transform
if category == 'test':
self.__load_test(root+'/data_tum', select_traj)
else: # train and validation
self.__load_train_val(root+'/data_tum', category)
# downscale the input image to a quarter
self.fx_s = 0.25
self.fy_s = 0.25
print('TUM dataloader for {:} using keyframe {:}: \
{:} valid frames'.format(category, keyframes, self.ids))
def __load_train_val(self, root, category):
tum_data = tum_trainval_dict()
for ks, scene in tum_data.items():
for seq_name in scene['seq']:
seq_path = osp.join(ks, seq_name)
self.calib.append(scene['calib'])
# synchronized trajectory file
sync_traj_file = osp.join(root, seq_path, 'sync_trajectory.pkl')
if not osp.isfile(sync_traj_file):
print("The synchronized trajectory file {:} has not been generated.".format(seq_path))
print("Generate it now...")
write_sync_trajectory(root, ks, seq_name)
with open(sync_traj_file, 'rb') as p:
trainval = pickle.load(p)
total_num = len(trainval)
# the ratio to split the train & validation set
if category == 'train':
start_idx, end_idx = 0, int(0.95*total_num)
else:
start_idx, end_idx = int(0.95*total_num), total_num
images = [trainval[idx][1] for idx in range(start_idx, end_idx)]
depths = [trainval[idx][2] for idx in range(start_idx, end_idx)]
extrin = [tq2mat(trainval[idx][0]) for idx in range(start_idx, end_idx)]
self.image_seq.append(images)
self.depth_seq.append(depths)
self.cam_pose_seq.append(extrin)
self.seq_names.append(seq_path)
self.ids += max(0, len(images) - max(self.keyframes))
self.seq_acc_ids.append(self.ids)
def __load_test(self, root, select_traj=None):
""" Note:
The test trajectory is loaded slightly different from the train/validation trajectory.
We only select keyframes from the entire trajectory, rather than use every individual frame.
For a given trajectory of length N, using key-frame 2, the train/validation set will use
[[1, 3], [2, 4], [3, 5],...[N-1, N]],
while test set will use pair
[[1, 3], [3, 5], [5, 7],...[N-1, N]]
This difference result in a change in the trajectory length when using different keyframes.
The benefit of sampling keyframes of the test set is that the output is a more reasonable trajectory;
And in training/validation, we fully leverage every pair of image.
"""
tum_data = tum_test_dict()
assert(len(self.keyframes) == 1)
kf = self.keyframes[0]
self.keyframes = [1]
for ks, scene in tum_data.items():
for seq_name in scene['seq']:
seq_path = osp.join(ks, seq_name)
if select_traj is not None:
if seq_path != select_traj: continue
self.calib.append(scene['calib'])
# synchronized trajectory file
sync_traj_file = osp.join(root, seq_path, 'sync_trajectory.pkl')
if not osp.isfile(sync_traj_file):
print("The synchronized trajectory file {:} has not been generated.".format(seq_path))
print("Generate it now...")
write_sync_trajectory(root, ks, seq_name)
with open(sync_traj_file, 'rb') as p:
frames = pickle.load(p)
total_num = len(frames)
images = [frames[idx][1] for idx in range(0, total_num, kf)]
depths = [frames[idx][2] for idx in range(0, total_num, kf)]
extrin = [tq2mat(frames[idx][0]) for idx in range(0, total_num, kf)]
self.image_seq.append(images)
self.depth_seq.append(depths)
self.cam_pose_seq.append(extrin)
self.seq_names.append(seq_path)
self.ids += max(0, len(images)-1)
self.seq_acc_ids.append(self.ids)
if len(self.image_seq) == 0:
raise Exception("The specified trajectory is not in the test set.")
def __getitem__(self, index):
seq_idx = max(np.searchsorted(self.seq_acc_ids, index+1) - 1, 0)
frame_idx = index - self.seq_acc_ids[seq_idx]
this_idx = frame_idx
next_idx = frame_idx + random.choice(self.keyframes)
color0 = self.__load_rgb_tensor(self.image_seq[seq_idx][this_idx])
color1 = self.__load_rgb_tensor(self.image_seq[seq_idx][next_idx])
depth0 = self.__load_depth_tensor(self.depth_seq[seq_idx][this_idx])
depth1 = self.__load_depth_tensor(self.depth_seq[seq_idx][next_idx])
if self.transforms:
color0, color1 = self.transforms([color0, color1])
# normalize the coordinate
calib = np.asarray(self.calib[seq_idx], dtype=np.float32)
calib[0] *= self.fx_s
calib[1] *= self.fy_s
calib[2] *= self.fx_s
calib[3] *= self.fy_s
cam_pose0 = self.cam_pose_seq[seq_idx][this_idx]
cam_pose1 = self.cam_pose_seq[seq_idx][next_idx]
transform = np.dot(np.linalg.inv(cam_pose1), cam_pose0).astype(np.float32)
name = '{:}_{:06d}to{:06d}'.format(self.seq_names[seq_idx],
this_idx, next_idx)
return color0, color1, depth0, depth1, transform, calib, name
def __len__(self):
return self.ids
def __load_rgb_tensor(self, path):
""" Load the rgb image
"""
image = imread(path)[:, :, :3]
image = image.astype(np.float32) / 255.0
image = resize(image, None, fx=self.fx_s, fy=self.fy_s)
return image
def __load_depth_tensor(self, path):
""" Load the depth:
The depth images are scaled by a factor of 5000, i.e., a pixel
value of 5000 in the depth image corresponds to a distance of
1 meter from the camera, 10000 to 2 meter distance, etc.
A pixel value of 0 means missing value/no data.
"""
depth = imread(path).astype(np.float32) / 5e3
depth = resize(depth, None, fx=self.fx_s, fy=self.fy_s, interpolation=INTER_NEAREST)
depth = np.clip(depth, a_min=0.5, a_max=5.0) # the accurate range of kinect depth
return depth[np.newaxis, :]
"""
Some utility files to work with the data
"""
def tq2mat(tq):
""" transform translation-quaternion (tq) to (4x4) matrix
"""
tq = np.array(tq)
T = np.eye(4)
T[:3,:3] = quaternions.quat2mat(np.roll(tq[3:], 1))
T[:3, 3] = tq[:3]
return T
def write_sync_trajectory(local_dir, dataset, subject_name):
"""
:param the root of the directory
:param the dataset category 'fr1', 'fr2' or 'fr3'
"""
rgb_file = osp.join(local_dir, dataset, subject_name, 'rgb.txt')
depth_file= osp.join(local_dir, dataset, subject_name, 'depth.txt')
pose_file = osp.join(local_dir, dataset, subject_name, 'groundtruth.txt')
rgb_list = read_file_list(rgb_file)
depth_list=read_file_list(depth_file)
pose_list = read_file_list(pose_file)
matches = associate_three(rgb_list, depth_list, pose_list, offset=0.0, max_difference=0.02)
trajectory_info = []
for (a,b,c) in matches:
pose = [float(x) for x in pose_list[c]]
rgb_file = osp.join(local_dir, dataset, subject_name, rgb_list[a][0])
depth_file = osp.join(local_dir, dataset, subject_name, depth_list[b][0])
trajectory_info.append([pose, rgb_file, depth_file])
dataset_path = osp.join(local_dir, dataset, subject_name, 'sync_trajectory.pkl')
with open(dataset_path, 'wb') as output:
pickle.dump(trajectory_info, output)
txt_path = osp.join(local_dir, dataset, subject_name, 'sync_trajectory.txt')
pickle2txts(dataset_path, txt_path)
def pickle2txts(pickle_file, txt_file):
'''
write the pickle_file into a txt_file
'''
with open(pickle_file, 'rb') as pkl_file:
traj = pickle.load(pkl_file)
with open(txt_file, 'w') as f:
for frame in traj:
f.write(' '.join(['%f ' % x for x in frame[0]]))
f.write(frame[1] + ' ')
f.write(frame[2] + '\n')
"""
The following utility files are provided by TUM RGBD dataset benchmark
Refer: https://vision.in.tum.de/data/datasets/rgbd-dataset/tools
"""
def read_file_list(filename):
"""
Reads a trajectory from a text file.
File format:
The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched)
and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp.
Input:
filename -- File name
Output:
dict -- dictionary of (stamp,data) tuples
"""
file = open(filename)
data = file.read()
lines = data.replace(","," ").replace("\t"," ").split("\n")
list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"]
list = [(float(l[0]),l[1:]) for l in list if len(l)>1]
return dict(list)
def associate(first_list, second_list,offset,max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim
to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples
second_list -- second dictionary of (stamp,data) tuples
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2))
"""
first_keys = first_list.keys()
second_keys = second_list.keys()
potential_matches = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches.sort()
matches = []
for diff, a, b in potential_matches:
if a in first_keys and b in second_keys:
first_keys.remove(a)
second_keys.remove(b)
matches.append((a, b))
matches.sort()
return matches
def associate_three(first_list, second_list, third_list, offset, max_difference):
"""
Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim to find the closest match for every input tuple.
Input:
first_list -- first dictionary of (stamp,data) tuples (default to be rgb)
second_list -- second dictionary of (stamp,data) tuples (default to be depth)
third_list -- third dictionary of (stamp,data) tuples (default to be pose)
offset -- time offset between both dictionaries (e.g., to model the delay between the sensors)
max_difference -- search radius for candidate generation
Output:
matches -- list of matched tuples ((stamp1,data1),(stamp2,data2),(stamp3,data3))
"""
first_keys = list(first_list)
second_keys = list(second_list)
third_keys = list(third_list)
# find the potential matches in (rgb, depth)
potential_matches_ab = [(abs(a - (b + offset)), a, b)
for a in first_keys
for b in second_keys
if abs(a - (b + offset)) < max_difference]
potential_matches_ab.sort()
matches_ab = []
for diff, a, b in potential_matches_ab:
if a in first_keys and b in second_keys:
matches_ab.append((a, b))
matches_ab.sort()
# find the potential matches in (rgb, depth, pose)
potential_matches = [(abs(a - (c + offset)), abs(b - (c + offset)), a,b,c)
for (a,b) in matches_ab
for c in third_keys
if abs(a - (c + offset)) < max_difference and
abs(b - (c + offset)) < max_difference]
potential_matches.sort()
matches_abc = []
for diff_rgb, diff_depth, a, b, c in potential_matches:
if a in first_keys and b in second_keys and c in third_keys:
first_keys.remove(a)
second_keys.remove(b)
third_keys.remove(c)
matches_abc.append((a,b,c))
matches_abc.sort()
return matches_abc
if __name__ == '__main__':
loader = TUM(category='test', keyframes=[1])
import torchvision.utils as torch_utils
torch_loader = data.DataLoader(loader, batch_size=16,
shuffle=False, num_workers=4)
for batch in torch_loader:
color0, color1, depth0, depth1, transform, K, name = batch
B, C, H, W = color0.shape
bcolor0_img = torch_utils.make_grid(color0, nrow=4)
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(bcolor0_img.numpy().transpose(1,2,0))
plt.show()
|
StarcoderdataPython
|
1765717
|
"""Reward processors
Each method takes the metadata with the following keys:
- env_reward: MiniWoB official reward
- raw_reward: Raw task reward without time penalty
- done: Whether the task is done
Then it returns a reward (float).
"""
def get_original_reward(metadata):
return float(metadata['env_reward'])
def get_raw_reward(metadata):
"""Get the raw reward without time penalty.
This is usually 1 for success and -1 for failure, but not always.
"""
return float(metadata['raw_reward'])
def get_click_checkboxes_hard(metadata):
"""(click-checkboxes task) Reward without partial credits.
Give 1 if the raw reward is 1. Otherwise, give -1.
"""
if not metadata['done']:
return 0.
return 1. if metadata['raw_reward'] == 1. else -1.
def raw_reward_threshold(threshold):
"""Return a reward processor that cut off at a threshold."""
def fn(metadata):
if metadata['raw_reward'] > threshold:
return 1.
elif metadata['raw_reward'] > 0:
return -1
return metadata['raw_reward']
return fn
def get_reward_processor(config):
if config.type == 'augmented':
return get_raw_reward
elif config.type == 'hard':
return get_click_checkboxes_hard
elif config.type == "time_independent":
return get_raw_reward
elif config.type == "time_discounted":
return get_original_reward
elif config.type == "click_checkboxes_hard":
return get_click_checkboxes_hard
else:
raise ValueError(
"{} not a valid reward processor type".format(config.type))
|
StarcoderdataPython
|
136225
|
from setuptools import setup
setup(name='gym_gridworld',
version='0.0.1',
install_requires=['gym'],
author="<NAME>",
author_email="<EMAIL>",
description="A Gym environment representing a 2d rectangular grid world",
packages=setuptools.find_packages(),
)
|
StarcoderdataPython
|
72688
|
#!/bin/python3
from contextlib import contextmanager
# pip3 install datetime
import datetime
import errno
import time
import shutil
import sys
import tempfile
from os import listdir, sep as os_sep
from os.path import isdir, isfile, join
# local imports
import consts
from args import Arguments, UpdateType
from github import Github
from globals import CRATES_VERSION, PULL_REQUESTS, SEARCH_INDEX, SEARCH_INDEX_BEFORE
from globals import SEARCH_INDEX_AFTER
from my_toml import TomlHandler
from utils import add_to_commit, clone_repo, exec_command_and_print_error, get_features
from utils import checkout_target_branch, get_file_content, write_error, write_into_file
from utils import commit, commit_and_push, create_pull_request, push, revert_changes, write_msg
from utils import create_tag_and_push, get_last_commit_date, merging_branches, publish_crate
from utils import check_rustdoc_is_nightly, check_if_up_to_date
@contextmanager
def temporary_directory():
name = tempfile.mkdtemp()
try:
yield name
finally:
try:
shutil.rmtree(name)
except OSError as err:
# if the directory has already been removed, no need to raise an error
if err.errno != errno.ENOENT:
raise
# Doesn't handle version number containing something else than numbers and '.'!
def update_version(version, update_type, section_name, place_type="section"):
version_split = version.replace('"', '').split('.')
if len(version_split) != 3:
# houston, we've got a problem!
write_error('Invalid version in {} "{}": {}'.format(place_type, section_name, version))
return None
if update_type == UpdateType.MINOR:
version_split[update_type] = str(int(version_split[update_type]) + 1)
elif update_type == UpdateType.MEDIUM:
version_split[update_type] = str(int(version_split[update_type]) + 1)
version_split[UpdateType.MINOR] = '0'
else:
version_split[update_type] = str(int(version_split[update_type]) + 1)
version_split[UpdateType.MEDIUM] = '0'
version_split[UpdateType.MINOR] = '0'
return '"{}"'.format('.'.join(version_split))
def check_and_update_version(entry, update_type, dependency_name, versions_update):
if entry.startswith('"') or entry.startswith("'"):
return update_version(entry, update_type, dependency_name, place_type="dependency")
# get version and update it
entry = [e.strip() for e in entry.split(',')]
dic = {}
for part in entry:
if part.startswith('{'):
part = part[1:].strip()
if part.endswith('}'):
part = part[:-1].strip()
part = [p.strip() for p in part.split('=')]
dic[part[0]] = part[1]
if part[0] == 'version':
old_version = part[1]
new_version = update_version(old_version, update_type, dependency_name,
place_type="dependency")
if new_version is None:
return None
# Mostly for debugging, not really useful otherwise...
versions_update.append({'dependency_name': dependency_name,
'old_version': old_version,
'new_version': new_version})
dic[part[0]] = '"{}"'.format(new_version)
return '{{{}}}'.format(', '.join(['{} = {}'.format(entry, dic[entry]) for entry in dic]))
def find_crate(crate_name):
for entry in consts.CRATE_LIST:
if entry['crate'] == crate_name:
return True
return False
def update_crate_version(repo_name, crate_name, crate_dir_path, temp_dir, specified_crate):
file_path = join(join(join(temp_dir, repo_name), crate_dir_path), "Cargo.toml")
output = file_path.replace(temp_dir, "")
if output.startswith('/'):
output = output[1:]
write_msg('=> Updating crate versions for {}'.format(file_path))
content = get_file_content(file_path)
if content is None:
return False
toml = TomlHandler(content)
for section in toml.sections:
if section.name == 'package':
section.set('version', CRATES_VERSION[crate_name])
elif specified_crate is not None:
continue
elif section.name.startswith('dependencies.') and find_crate(section.name[13:]):
if specified_crate is None and section.name[13:] not in CRATES_VERSION:
input('"{}" dependency not found in versions for crate "{}"...'
.format(section.name[13:], crate_name))
continue
section.set('version', CRATES_VERSION[section.name[13:]])
elif section.name == 'dependencies':
for entry in section.entries:
if find_crate(entry['key']):
section.set(entry['key'], CRATES_VERSION[entry['key']])
result = write_into_file(file_path, str(toml))
write_msg('=> {}: {}'.format(output.split(os_sep)[-2],
'Failure' if result is False else 'Success'))
return result
def update_repo_version(repo_name, crate_name, crate_dir_path, temp_dir, update_type, no_update):
# pylint: disable=too-many-branches,too-many-locals
file_path = join(join(join(temp_dir, repo_name), crate_dir_path), "Cargo.toml")
output = file_path.replace(temp_dir, "")
if output.startswith('/'):
output = output[1:]
write_msg('=> Updating versions for {}'.format(file_path))
content = get_file_content(file_path)
if content is None:
return False
toml = TomlHandler(content)
versions_update = []
for section in toml.sections:
if (section.name == 'package' or
(section.name.startswith('dependencies.') and find_crate(section.name[13:]))):
version = section.get('version', None)
if version is None:
continue
new_version = None
if no_update is False:
new_version = update_version(version, update_type, section.name)
else:
new_version = version
if new_version is None:
return False
# Print the status directly if it's the crate's version.
if section.name == 'package':
write_msg('\t{}: {} => {}'.format(output.split(os_sep)[-2], version, new_version))
CRATES_VERSION[crate_name] = new_version
else: # Otherwise add it to the list to print later.
versions_update.append({'dependency_name': section.name[13:],
'old_version': version,
'new_version': new_version})
section.set('version', new_version)
elif section.name == 'dependencies':
for entry in section.entries:
if find_crate(entry):
new_version = check_and_update_version(section.entries[entry],
update_type,
entry,
[])
section.set(entry, new_version)
for update in versions_update:
write_msg('\t{}: {} => {}'.format(update['dependency_name'],
update['old_version'],
update['new_version']))
out = str(toml)
if not out.endswith("\n"):
out += '\n'
result = True
if no_update is False:
# We only write into the file if we're not just getting the crates version.
result = write_into_file(file_path, out)
write_msg('=> {}: {}'.format(output.split(os_sep)[-2],
'Failure' if result is False else 'Success'))
return result
def update_badges(repo_name, temp_dir, specified_crate):
path = join(join(temp_dir, repo_name), "_data/crates.json")
content = get_file_content(path)
current = None
out = []
for line in content.split("\n"):
if line.strip().startswith('"name": "'):
current = line.split('"name": "')[-1].replace('",', '')
if specified_crate is not None and current != specified_crate:
current = None
elif line.strip().startswith('"max_version": "') and current is not None:
version = line.split('"max_version": "')[-1].replace('"', '').replace(',', '')
out.append(line.replace('": "{}"'.format(version),
'": {}'.format(CRATES_VERSION[current])) + '\n')
current = None
continue
out.append(line + '\n')
return write_into_file(path, ''.join(out).replace('\n\n', '\n'))
def cleanup_doc_repo(temp_dir):
path = join(temp_dir, consts.DOC_REPO)
dirs = ' '.join(['"{}"'.format(join(path, f)) for f in listdir(path)
if isdir(join(path, f)) and f.startswith('.') is False])
command = ['bash', '-c', 'cd {} && rm -rf {}'.format(path, dirs)]
if not exec_command_and_print_error(command):
input("Couldn't clean up docs! Try to fix it and then press ENTER to continue...")
def build_docs(repo_name, temp_dir, extra_path, crate_name):
# pylint: disable=too-many-locals
path = join(join(temp_dir, repo_name), extra_path)
features = get_features(join(path, 'Cargo.toml'))
# We can't add "--no-deps" argument to cargo doc, otherwise we lose links to items of
# other crates...
#
# Also, we run "cargo update" in case the lgpl-docs repository has been updated (so we get the
# last version).
command = ['bash', '-c',
('cd {} && cargo update && cargo rustdoc --no-default-features '
'--features "{}"').format(path, features)]
if not exec_command_and_print_error(command):
input("Couldn't generate docs! Try to fix it and then press ENTER to continue...")
doc_folder = join(path, 'target/doc')
try:
file_list = ' '.join(['"{}"'.format(f) for f in listdir(doc_folder)
if isfile(join(doc_folder, f))])
except Exception as err:
write_error('Error occured in build docs: {}'.format(err))
input("It seems like the \"{}\" folder doesn't exist. Try to fix it then press ENTER..."
.format(doc_folder))
# Copy documentation files
command = ['bash', '-c',
'cd {} && cp -r "{}" {} "{}"'
.format(doc_folder,
crate_name.replace('-', '_'),
file_list,
join(temp_dir, consts.DOC_REPO))]
if not exec_command_and_print_error(command):
input("Couldn't copy docs! Try to fix it and then press ENTER to continue...")
# Copy source files
destination = "{}/src".format(join(temp_dir, consts.DOC_REPO))
command = ['bash', '-c',
'cd {0} && mkdir -p "{1}" && cp -r "src/{2}" "{1}/"'
.format(doc_folder,
destination,
crate_name.replace('-', '_'))]
if not exec_command_and_print_error(command):
input("Couldn't copy doc source files! Try to fix it and then press ENTER to continue...")
search_index = join(path, 'target/doc/search-index.js')
lines = get_file_content(search_index).split('\n')
before = True
fill_extras = len(SEARCH_INDEX_BEFORE) == 0
found = False
for line in lines:
if line.startswith('searchIndex['):
before = False
# We need to be careful in here if we're in a sys repository (which should never be the
# case!).
if line.startswith('searchIndex["{}"]'.format(crate_name.replace('-', '_'))):
SEARCH_INDEX.append(line)
found = True
elif fill_extras is True:
if before is True:
SEARCH_INDEX_BEFORE.append(line)
else:
SEARCH_INDEX_AFTER.append(line)
if found is False:
input("Couldn't find \"{}\" in `{}`!\nTry to fix it and then press ENTER to continue..."
.format(crate_name.replace('-', '_'), search_index))
def end_docs_build(temp_dir):
path = join(temp_dir, consts.DOC_REPO)
revert_changes(consts.DOC_REPO, temp_dir,
['COPYRIGHT.txt', 'LICENSE-APACHE.txt', 'LICENSE-MIT.txt'])
try:
with open(join(path, 'search-index.js'), 'w') as file:
file.write('\n'.join(SEARCH_INDEX_BEFORE))
file.write('\n'.join(SEARCH_INDEX))
file.write('\n'.join(SEARCH_INDEX_AFTER))
command = ['bash', '-c',
'cd minifier && cargo run --release -- "{}"'.format(path)]
if not exec_command_and_print_error(command):
input("Couldn't run minifier! Try to fix it and then press ENTER to continue...")
add_to_commit(consts.DOC_REPO, temp_dir, ['.'])
except Exception as err:
write_error('An exception occured in "end_docs_build": {}'.format(err))
input("Press ENTER to continue...")
input('If you want to prevent "{}" to be updated, now is the good time! Press ENTER to '
'continue...'.format(join(path, "main.js")))
def write_merged_prs(merged_prs, contributors, repo_url):
content = ''
for merged_pr in reversed(merged_prs):
if merged_pr.title.startswith('[release] '):
continue
if merged_pr.author not in contributors:
contributors.append(merged_pr.author)
md_content = (merged_pr.title.replace('<', '<')
.replace('>', '>')
.replace('[', '\\[')
.replace(']', '\\]')
.replace('*', '\\*')
.replace('_', '\\_'))
content += ' * [{}]({}/pull/{})\n'.format(md_content, repo_url, merged_pr.number)
return content + '\n'
def build_blog_post(repositories, temp_dir, token):
# pylint: disable=too-many-locals
write_msg('=> Building blog post...')
content = '''---
layout: post
author: {}
title: {}
categories: [front, crates]
date: {}
---
* Write intro here *
### Changes
For the interested ones, here is the list of the merged pull requests:
'''.format(input('Enter author name: '), input('Enter title: '),
time.strftime("%Y-%m-%d %H:00:00 +0000"))
contributors = []
git = Github(token)
oldest_date = None
for repo in repositories:
checkout_target_branch(repo, temp_dir, "crate")
success, out, err = get_last_commit_date(repo, temp_dir)
if not success:
write_msg("Couldn't get PRs for '{}': {}".format(repo, err))
continue
max_date = datetime.date.fromtimestamp(int(out))
if oldest_date is None or max_date < oldest_date:
oldest_date = max_date
write_msg("Gettings merged PRs from {}...".format(repo))
merged_prs = git.get_pulls(repo, consts.ORGANIZATION, 'closed', max_date, only_merged=True)
write_msg("=> Got {} merged PRs".format(len(merged_prs)))
if len(merged_prs) < 1:
continue
repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, repo)
content += '[{}]({}):\n\n'.format(repo, repo_url)
content += write_merged_prs(merged_prs, contributors, repo_url)
write_msg("Gettings merged PRs from gir...")
merged_prs = git.get_pulls('gir', consts.ORGANIZATION, 'closed', oldest_date, only_merged=True)
write_msg("=> Got {} merged PRs".format(len(merged_prs)))
if len(merged_prs) > 0:
repo_url = '{}/{}/{}'.format(consts.GITHUB_URL, consts.ORGANIZATION, 'gir')
content += ('All this was possible thanks to the [gtk-rs/gir]({}) project as well:\n\n'
.format(repo_url))
content += write_merged_prs(merged_prs, contributors, repo_url)
content += 'Thanks to all of our contributors for their (awesome!) work on this release:\n\n'
# Sort contributors list alphabetically with case insensitive.
contributors = sorted(contributors, key=lambda s: s.casefold())
content += '\n'.join([' * [@{}]({}/{})'.format(contributor, consts.GITHUB_URL, contributor)
for contributor in contributors])
content += '\n'
file_name = join(join(temp_dir, consts.BLOG_REPO),
'_posts/{}-new-release.md'.format(time.strftime("%Y-%m-%d")))
try:
with open(file_name, 'w') as outfile:
outfile.write(content)
write_msg('New blog post written into "{}".'.format(file_name))
add_to_commit(consts.BLOG_REPO, temp_dir, [file_name])
commit(consts.BLOG_REPO, temp_dir, "Add new blog post")
except Exception as err:
write_error('build_blog_post failed: {}'.format(err))
write_msg('\n=> Here is the blog post content:\n{}\n<='.format(content))
write_msg('Done!')
def generate_new_tag(repository, temp_dir, specified_crate, args):
# We make a new tag for every crate:
#
# * If it is a "sys" crate, then we add its name to the tag
# * If not, then we just keep its version number
for crate in args.crates:
crate = crate['crate']
if crate['repository'] == repository:
if specified_crate is not None and crate['crate'] != specified_crate:
continue
tag_name = CRATES_VERSION[crate['crate']]
if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'):
tag_name = '{}-{}'.format(crate['crate'], tag_name)
write_msg('==> Creating new tag "{}" for repository "{}"...'.format(tag_name,
repository))
create_tag_and_push(tag_name, repository, temp_dir)
def generate_new_branches(repository, temp_dir, specified_crate, args):
# We make a new branch for every crate based on the current "crate" branch:
#
# * If it is a "sys" crate, then we ignore it.
# * If not, then we create a new branch
for crate in args.crates:
crate = crate['crate']
if crate['repository'] == repository:
if specified_crate is not None and crate['crate'] != specified_crate:
continue
if crate['crate'].endswith('-sys') or crate['crate'].endswith('-sys-rs'):
continue
branch_name = CRATES_VERSION[crate['crate']]
write_msg('==> Creating new branch "{}" for repository "{}"...'.format(branch_name,
repository))
push(repository, temp_dir, branch_name)
def update_doc_content_repository(repositories, temp_dir, token, no_push, args):
if clone_repo(consts.DOC_CONTENT_REPO, temp_dir) is False:
input('Try to fix the problem then press ENTER to continue...')
write_msg("Done!")
repo_path = join(temp_dir, consts.DOC_CONTENT_REPO)
write_msg("=> Generating documentation for crates...")
for repo in repositories:
current = None
for crate in args.crates:
crate = crate['crate']
if crate['repository'] == repo:
current = crate
break
if current is None:
input('No repository matches "{}", something is weird. (Press ENTER TO CONTINUE)')
continue
if current.get("doc", True) is False:
continue
write_msg('==> Generating documentation for "{}"'.format(current))
path = join(temp_dir, current['repository'])
command = ['bash', '-c',
'cd {} && make doc && mv vendor.md {}'.format(path,
join(repo_path,
current['crate']))]
if not exec_command_and_print_error(command):
input("Fix the error and then press ENTER")
write_msg('Done!')
write_msg('Committing "{}" changes...'.format(consts.DOC_CONTENT_REPO))
commit(consts.DOC_CONTENT_REPO, temp_dir, "Update vendor files")
if no_push is False:
push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH)
# We always make minor releases in here, no need for a more important one considering we don't
# change the API.
if update_repo_version(consts.DOC_CONTENT_REPO, consts.DOC_CONTENT_REPO, "",
temp_dir, UpdateType.MINOR, False) is False:
write_error('The update for the "{}" crate failed...'.format(consts.DOC_CONTENT_REPO))
input('Fix the error and then press ENTER')
commit(consts.DOC_CONTENT_REPO, temp_dir, "Update version")
if no_push is False:
push(consts.DOC_CONTENT_REPO, temp_dir, consts.MASTER_TMP_BRANCH)
create_pull_request(consts.DOC_CONTENT_REPO, consts.MASTER_TMP_BRANCH, "master", token,
False)
input(('All done with the "{}" update: please merge the PR then press ENTER so the '
'publication can performed...').format(consts.DOC_CONTENT_REPO))
publish_crate(consts.DOC_CONTENT_REPO, "", temp_dir, consts.DOC_CONTENT_REPO,
checkout_branch='master')
write_msg('Ok all done! We can move forward now!')
else:
write_msg(('All with "{}", you still need to publish a new version if you want the changes '
'to be taken into account').format(consts.DOC_CONTENT_REPO))
def clone_repositories(args, temp_dir):
write_msg('=> Cloning the repositories...')
repositories = []
for crate in args.crates:
crate = crate['crate']
if args.specified_crate is not None and crate['crate'] != args.specified_crate:
continue
if crate["repository"] not in repositories:
repositories.append(crate["repository"])
if clone_repo(crate["repository"], temp_dir) is False:
write_error('Cannot clone the "{}" repository...'.format(crate["repository"]))
return []
if len(repositories) < 1:
write_msg('No crate "{}" found. Aborting...'.format(args.specified_crate))
return []
if args.doc_only is False:
if clone_repo(consts.BLOG_REPO, temp_dir, depth=1) is False:
write_error('Cannot clone the "{}" repository...'.format(consts.BLOG_REPO))
return []
if clone_repo(consts.DOC_REPO, temp_dir, depth=1) is False:
write_error('Cannot clone the "{}" repository...'.format(consts.DOC_REPO))
return []
write_msg('Done!')
return repositories
def update_crates_versions(args, temp_dir, repositories):
write_msg('=> Updating [master] crates version...')
for crate in args.crates:
update_type = crate['up-type']
crate = crate['crate']
if args.specified_crate is not None and crate['crate'] != args.specified_crate:
continue
if update_repo_version(crate["repository"], crate["crate"], crate["path"],
temp_dir, update_type,
args.badges_only or args.tags_only) is False:
write_error('The update for the "{}" crate failed...'.format(crate["crate"]))
return False
write_msg('Done!')
if args.badges_only is False and args.tags_only is False:
write_msg('=> Committing{} to the "{}" branch...'
.format(" and pushing" if args.no_push is False else "",
consts.MASTER_TMP_BRANCH))
for repo in repositories:
commit(repo, temp_dir, "Update versions [ci skip]")
if args.no_push is False:
push(repo, temp_dir, consts.MASTER_TMP_BRANCH)
write_msg('Done!')
if args.no_push is False:
write_msg('=> Creating PRs on master branch...')
for repo in repositories:
create_pull_request(repo, consts.MASTER_TMP_BRANCH, "master", args.token)
write_msg('Done!')
return True
def update_crate_repositories_branches(args, temp_dir, repositories):
write_msg('=> Merging "master" branches into "crate" branches...')
for repo in repositories:
merging_branches(repo, temp_dir, "master")
write_msg('Done!')
write_msg('=> Updating [crate] crates version...')
for crate in args.crates:
crate = crate['crate']
if args.specified_crate is not None and crate['crate'] != args.specified_crate:
continue
if update_crate_version(crate["repository"], crate["crate"], crate["path"],
temp_dir, args.specified_crate) is False:
write_error('The update for the "{}" crate failed...'.format(crate["crate"]))
return False
write_msg('Done!')
write_msg('=> Committing{} to the "{}" branch...'
.format(" and pushing" if args.no_push is False else "",
consts.CRATE_TMP_BRANCH))
for repo in repositories:
commit(repo, temp_dir, "Update versions [ci skip]")
if args.no_push is False:
push(repo, temp_dir, consts.CRATE_TMP_BRANCH)
write_msg('Done!')
if args.no_push is False:
write_msg('=> Creating PRs on crate branch...')
for repo in repositories:
create_pull_request(repo, consts.CRATE_TMP_BRANCH, "crate", args.token)
write_msg('Done!')
return True
def publish_crates(args, temp_dir):
write_msg('+++++++++++++++')
write_msg('++ IMPORTANT ++')
write_msg('+++++++++++++++')
write_msg('Almost everything has been done. Take a deep breath, check for opened '
'pull requests and once done, we can move forward!')
write_msg("\n{}\n".format('\n'.join(PULL_REQUESTS)))
PULL_REQUESTS.append('=============')
input('Press ENTER to continue...')
write_msg('=> Publishing crates...')
for crate in args.crates:
crate = crate['crate']
if args.specified_crate is not None and crate['crate'] != args.specified_crate:
continue
publish_crate(crate["repository"], crate["path"], temp_dir, crate['crate'])
write_msg('Done!')
def create_example_repository_pull_request(args):
write_msg('=> Creating PR for examples repository')
create_pull_request("examples", "pending", "master", args.token)
write_msg('Done!')
def generate_tags_and_version_branches(args, temp_dir, repositories):
if args.no_push is True or args.doc_only is True or args.badges_only is True:
return
write_msg("=> Generating tags and branches...")
for repo in repositories:
generate_new_tag(repo, temp_dir, args.specified_crate, args)
generate_new_branches(repo, temp_dir, args.specified_crate, args)
write_msg('Done!')
def regenerate_documentation(args, temp_dir, repositories):
if args.badges_only is True or args.tags_only is True:
return
input("About to regenerate documentation. Are you sure you want to continue? " +
"(Press ENTER to continue)")
update_doc_content_repository(repositories, temp_dir, args.token, args.no_push, args)
write_msg('=> Preparing doc repo (too much dark magic in here urg)...')
cleanup_doc_repo(temp_dir)
write_msg('Done!')
write_msg('=> Building docs...')
for crate in args.crates:
crate = crate['crate']
if crate['crate'] == 'gtk-test':
continue
write_msg('-> Building docs for {}...'.format(crate['crate']))
build_docs(crate['repository'], temp_dir, crate['path'],
crate.get('doc_name', crate['crate']))
end_docs_build(temp_dir)
write_msg('Done!')
write_msg('=> Committing{} docs to the "{}" branch...'
.format(" and pushing" if args.no_push is False else "",
consts.CRATE_TMP_BRANCH))
commit(consts.DOC_REPO, temp_dir, "Regen docs")
if args.no_push is False:
push(consts.DOC_REPO, temp_dir, consts.CRATE_TMP_BRANCH)
create_pull_request(
consts.DOC_REPO,
consts.CRATE_TMP_BRANCH,
"gh-pages",
args.token)
write_msg("New pull request(s):\n\n{}\n".format('\n'.join(PULL_REQUESTS)))
write_msg('Done!')
def update_gtk_rs_blog(args, temp_dir):
if args.doc_only is True or args.tags_only is True:
return
write_msg('=> Updating blog...')
if update_badges(consts.BLOG_REPO, temp_dir, args.specified_crate) is False:
write_error("Error when trying to update badges...")
elif args.no_push is False:
commit_and_push(consts.BLOG_REPO, temp_dir, "Update versions",
consts.MASTER_TMP_BRANCH)
create_pull_request(
consts.BLOG_REPO,
consts.MASTER_TMP_BRANCH,
"master",
args.token)
write_msg('Done!')
def checkout_crate_branches(temp_dir, repositories):
write_msg('=> Checking out "crate" branches')
for repo in repositories:
checkout_target_branch(repo, temp_dir, "crate")
write_msg('Done!')
def start(args, temp_dir):
repositories = clone_repositories(args, temp_dir)
if len(repositories) < 1:
return
if args.doc_only is False:
if update_crates_versions(args, temp_dir, repositories) is False:
return
if args.badges_only is False and args.tags_only is False:
build_blog_post(repositories, temp_dir, args.token)
checkout_crate_branches(temp_dir, repositories)
if args.doc_only is False and args.badges_only is False and args.tags_only is False:
if update_crate_repositories_branches(args, temp_dir, repositories) is False:
return
if args.no_push is False:
publish_crates(args, temp_dir)
create_example_repository_pull_request(args)
generate_tags_and_version_branches(args, temp_dir, repositories)
regenerate_documentation(args, temp_dir, repositories)
update_gtk_rs_blog(args, temp_dir)
write_msg('Seems like most things are done! Now remains:')
write_msg(" * Check generated docs for all crates (don't forget to enable features!).")
input('Press ENTER to leave (once done, the temporary directory "{}" will be destroyed)'
.format(temp_dir))
def main(argv):
args = Arguments.parse_arguments(argv)
if args is None:
sys.exit(1)
if check_rustdoc_is_nightly() is False:
return
if check_if_up_to_date() is False:
return
write_msg('=> Creating temporary directory...')
with temporary_directory() as temp_dir:
write_msg('Temporary directory created in "{}"'.format(temp_dir))
start(args, temp_dir)
# Beginning of the script
if __name__ == "__main__":
main(sys.argv[1:])
|
StarcoderdataPython
|
3263848
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 15:02:55 2020
@author: posch
"""
import numpy as np
class model():
# HOOK
def hook(SIG_old,dEPS,E, DT):
dSIG = E * dEPS
SIG = SIG_old + dSIG * DT
return SIG
# KELVIN VOIGT
def kelvinvoigt(EPS, dEPS, E, ETA, DT):
SIG = E * EPS + ETA * dEPS
return SIG
# MAXWELL
def maxwell(SIG, dEPS, E, ETA, DT):
dSIG = E*dEPS-E/ETA*SIG
SIG = SIG + dSIG * DT
return SIG
# ZENER
def zener(SIG,dEPS,EPS_D,EPS,E_1,E_2,ETA,DT):
dEPS_D=E_2/ETA*(EPS-EPS_D)
dSIG=E_1*dEPS+E_2*(dEPS-dEPS_D)
SIG=SIG+dSIG*DT
EPS_D=EPS_D+dEPS_D*DT
return SIG,EPS_D
# ELASTIC PLASTIC ISO
def elsatic_plastic_iso(SIG,dEPS_PL,E,SIG_Y,DT,K,ALPHA): #iso IH
SIG_TRIAL= SIG+E*dEPS_PL*DT
if (np.abs(SIG_TRIAL)-(SIG_Y+K*ALPHA))<0:
SIG=SIG_TRIAL
else:
dGAMMA=E/(E+K)*dEPS_PL*np.sign(SIG)
dEPS_P=dGAMMA*np.sign(SIG)
dALPHA=np.abs(dEPS_P)
dSIG=E*(dEPS_PL-dEPS_P)
ALPHA=ALPHA+dALPHA*DT
SIG=SIG+dSIG*DT
return SIG,ALPHA
# ELASTIC PLASTIC KIN
def elsatic_plastic_kin(SIG,dEPS_PL,E,SIG_Y,DT,Q,H): #kin ih
SIG_TRIAL= SIG+E*dEPS_PL*DT
if (np.abs(SIG_TRIAL-Q)-SIG_Y)<0:
SIG=SIG_TRIAL
else:
dGAMMA=(E/(E+H))*dEPS_PL*np.sign(SIG-Q)
dEPS_P=dGAMMA*np.sign(SIG-Q)
dQ=H*dEPS_P
dSIG=E*(dEPS_PL-dEPS_P)
Q=Q+dQ*DT
SIG=SIG+dSIG*DT
#EPS_P=EPS_P+dEPS_P*DT
return SIG,Q
# PERZYNA
def perzyna(SIG,dEPS,E,ETA,SIG_Y,DT):
SIG_TRIAL= SIG+E*dEPS*DT
if (np.abs(SIG_TRIAL)-SIG_Y)<0:
SIG=SIG_TRIAL
else:
dEPS_VP =(np.abs(SIG)-SIG_Y)*np.sign(SIG)/ETA
dSIG=E*(dEPS-dEPS_VP)
SIG=SIG+dSIG*DT
return SIG
|
StarcoderdataPython
|
1713238
|
# -*- coding: utf-8 -*
content = {
'综述':
{
'综述型简介':
[
"唐三彩以三彩统称,年代另述,而非以“唐三彩”一言以蔽之。所谓唐三彩,“唐”为时代,“三彩”指工艺,仅指公元618年-公元907年所烧造的三彩器物,距今有1300多年的历史。",
],
'唐三彩的历史':
[
"1899年勘探某铁路的时候,古器物学家罗振宇和王国维发现了之前未命名过的唐代陶制品。在鉴定后,他们将其命名为唐三彩。在洛阳出土的唐三彩以“洛阳唐三彩”更得美名。",
],
'风俗':
[
"《旧唐书· 舆服志》中有一段简明清楚的概括:“近来王公百官竟为厚葬,偶人像马,雕饰如生。徒以眩耀路人,本不用心致札。更相扇慕,破产倾资,风俗流行,遂下兼士庶”。这说明,当时厚葬之风并不在于对亡灵的悼念,而是在于相互攀比眩耀,以致于倾家荡产也在所不惜。唐三彩在一定程度上,可能是适应这种厚葬风气而兴起的,所以生活用器极少。",
],
'艺术价值':
[
"唐三彩的艺术价值在于精准的动态结构和传神的雕刻技巧:以马和骆驼为例,艺人必须十分熟悉马和骆驼在休息或运动状态下的全部身体结构。",
"唐三彩不仅展示出传统中国画的写意韵味,更表现出古代雕塑技艺的精湛。它的艺术价值在中国传统工艺美术品类中具有重要的地位。",
'异域风格,体现民族交流',
],
},
'大类分析和介绍':
{
'大类介绍':
[
"目前发现的唐三彩大致分为三大类:人物、动物和日常器皿。目前洛阳地区出土的唐三彩没有发现生活用具,基本上都是作为明器使用的马、骆驼、镇墓兽、胡人佣、天王俑等,造型规矩大气、雍容华贵,且均成对成批量的出现,因此可以看出当时有资格随葬唐三彩的人基本都是身份显贵的钱权大家之人。",
"目前发现的唐三彩实物除用于随葬的人物、动物等明器外,还有日用器皿、建筑构件等。特别是人物、动物的造型,比例适度,形态自然,生动逼真。武士肌肉发达,怒目圆睁;仕女佣高髻广袖,亭亭玉立,神态雍容娴雅。大凡盘、碟多采用阳文印花,琢器多采用划花。",
"唐三彩的品种很多,大致有人物俑(包括镇墓兽)、动物俑(包括飞禽走兽)、生活用品和模型等类型。林林总总的唐三彩各式器物,折射出唐代各阶层的社会生活。唐三彩的器物造型丰富多样,诸如猪、羊、犬、兔、鸡、鸭、鸟动物等,以及盘、洗、碗、壶、罐、尊、钵和化妆用器、储藏用器应有尽有。",
"这些唐三彩根据用途可大致分为两类,一类是专供随葬用的明器,如动物俑(马、骆驼等)、仪仗俑、镇墓俑等;另一类为日常生活用品,如瓶、钵、杯、盘、罐等。",
],
'马':
{
'综述':
"马在唐朝备受重视,与李唐王朝本身具有游牧民族血统有关,"
"李唐王朝具有西北民族血统,骑马狩猎为其民族习俗。"
"如唐太宗所言:”大丈夫在世,乐事有三:天下太平,家给人足,一乐也;"
"草浅兽肥, 以礼畋狩,弓不虚发,箭不妄中,二乐也;"
"六合大同,万方咸庆,张乐高宴,上下欢洽,三乐也。“在生活中,"
"老百姓把它作为交通工具来协助运货和出行,"
"而在战场上,兵将们必须借助它来克敌制胜、驱逐贼寇。"
"它们或奔走,或仰啸,或静立,或饮水,或负重,或漫步"
"同时三彩马的釉色富丽堂皇,神态生气盎然,釉光莹润明亮,神完气足,体现了大唐磅礴的时代风貌"
"马俑的独特性、复杂性能多方位折射出唐三彩的绚丽光彩,为人们认识唐文化历史价值提供了宝贵实物资料",
'颜色':
[
],
'名贵程度':
[
"唐三彩马中,最为珍贵的是白釉、黑釉和白斑益釉三彩马,而黄、绿、褐三彩为主的马则为平常器物。比如早在1989年12月12日,国际苏富比拍卖行拍卖的一件唐三彩黑马在英国伦敦以4955万英镑成交。以黄、绿、褐为主的马则价值略低, 又如1991年6月10日,佳士得拍卖行有一件黄、褐彩为主的“三花饰马”仅拍得6.05万英镑。"
],
'艺术特征':
[
"",
"此马为立马俑,伸颈低头,釉色鲜艳,马鬃和鞍鞯等细节处理一丝不苟,工艺精湛。",
"此马两耳上耸,双目圆睁,直立于托板上,表现出伫立时宁静的神态。头戴络头,身披攀胸和秋带,马背配鞍,外包鞍袱,下衬雕花垫和障泥,尾系花结。",
"此马的造型遵循着圆一方一圆的构成法则, 强调了其圆圆的胸脯, 方方的腰, 圆圆的后臀, 再加上一个近乎长方形的头, 就更加强了其雄强的气势。",
"此马的每一处体积和线条都经过提炼加工, 达到了炉火纯青的完美程度。从侧面看去, 仿佛是强烈的视觉符号, 充满了内在的生命张力。",
"此马三彩马头小颈长,膘肥体壮,眼睛炯炯有神,富于艺术的概括力。",
"此马是头小颈短,膘肥体壮,骨肉停匀,比例协调、准确,眼睛炯炯有神。",
"此马昂首挺立、英气勃发,大有奔腾万里之势,其英武之姿正体现了盛唐昂扬向上的精神。",
"此马马首略低,戴辔头,物饰于额前,短鬃,前有攀胸,后有鞅鞦,配有马镫。鞍鞯色彩丰富,鲜艳夺目。",
"此马马首微倾,戴辔头,双耳竖立,眼睛圆睁,张口,马鬃短齐,前有攀胸,后有鞅鞦,鞍鞯上铺绒毯类鞍袱。鞍袱又称鞍帕,覆在马鞍上防止尘污。杜甫《骢马行》:“银鞍却覆香罗帕”即指此。",
],
'马鞍':
[
"",
"马鞍采用贴花饰法,施以浅釉与彩绘,庄重华贵,更显此马体态矫健骨肉匀称"
"使得观者过目难忘。",
"马鞍庄重厚实,加于马背之上,更显此马筋肉坚实、"
"气力旺盛,隐隐有背负万斤之力、日行千里之能。",
],
'花纹':
[
"",
"条形花纹似为彩带装饰法,即利用釉的流动性在此马底釉一定部位施以浓厚釉汁,"
"任其自然流淌成型,极具自然之美。",
"此马具有明显点彩装饰的特色,在其身上具有毛笔点染的异色釉汁斑点,"
"疏密有致,浑然天成。",
],
'骑者':
[
"",
"此骑俑骑者戴四方形帷帽,"
"头部用软帽紧裹至颈部,神态自若、端庄华贵,双足蹬于马镫之上,悠然典雅。",
"此俑骑者横跨于马背之上,身体微倾,目光凝视,胯下马匹神态安详,驻足直立。",
"此俑骑者横跨马背,身体略转,头戴方形帷帽,软帛及肩,面若桃花,神态悠闲。",
"此俑马身直立,微向前倾,作欲发之势。骑马者高鼻深目,络腮胡,双手持缰,凝视前方。完美展现了苏轼《江城子》描述的:”老夫聊发少年狂,左牵黄,右擎苍。巾帽貂裘,千骑卷平冈。“的胡服骑射形象。",
"此俑狩猎者身穿窄袖衣,足着靴,踏马蹬端坐于马鞍之上。马昂首直立。体现了唐朝在皇室贵戚的带动下狩猎盛行的风貌。",
"此俑骑者头戴风帽,身着圆领窄袖袍,足蹬靴,双脚登踏马蹬。马眼圆睁,双耳前竖,未佩戴笼套等物。",
],
'白马':
[
"三彩白马:雕工精细, 装饰华丽,马鬃梳剪整齐,留有三花。 马头辔饰齐全, 笼套上饰黄色的八瓣花朵, 两耳下和鼻上革带系有杏叶形垂饰。 鞍具羁辔齐全, 鞍下垫绿色障泥,褶纹简洁。 白马的筋骨、 肌肉、 眼睛、 累毛、鞍饰等细部雕刻精细,刀痕清楚。",
],
'黑马':
[
"三彩黑马:马身为黑色,脸部、娱毛、尾和四蹄均为白 色。鞍鞘为绿、黄、白三色。 马身膘肥雄浑,剪娱挽尾,双耳后拫,颈似弯弓,四蹄抓地,状若飞奔启程",
],
'四花马':
[
"造型完美罕见的蓝釉唐三彩四花马----马秀银:四花马极显珍贵、少见和难得。笔者在中国国家博物馆工作及从事文物鉴定几十年来见到 “四花马”尚属首次,所见唐三彩马颜色多以绿、白、黄三色为主,且是“三花马”。在等级制度非常严格的唐朝,能够拥有这种大型蓝色三彩釉陶器马作为明器的人都是些名望贵族、高官显爵,生前不是皇亲国戚就是宰相重臣。可想而知,以蓝彩釉作为主要色调而烧造的这匹马是多么珍贵,它的历史价值、艺术价值、经济价值以及稀有程度也就不难理解了。当前在全世界范围内,无论是馆藏唐代三彩釉陶器还是私人收藏的三彩釉陶器,蓝釉三彩器的确少之又少,而大型的、完整的唐代蓝釉三彩釉陶器存世量则更少,这件大型的蓝釉唐三彩四花马可谓是凤毛麟角。",
],
'工艺':
"马蹄部位皆不施以釉彩,以应“龙种神驹,四季踏雪”一说。且毛毡部分大多也不施釉彩,有时会以大红色做装饰,以竹签刻印以做出拉毛效果,质感逼真。",
},
'骆驼':
{
'综述':
"在唐代,骆驼是丝绸之路上中外经济、文化交流的使者,其造型有立式、跪式、载人载物式等,均结构端庄,外形饱满,表现出温厚而伟岸、稳健与高亢的特征。",
'颜色':
[
"",
'唐三骆驼多为黄褐、棕褐釉三彩,也即颜色多为黄褐色和棕褐色。',
],
},
'镇墓兽':
{
'综述':
[
"镇墓是唐三彩的最大功用。长安、洛阳发现的镇墓物大多是神人塑像(天王俑)————人面兽身、兽面兽身。一般来说,他们都是成对出现在门前,作为门卫镇守着墓,寓意着主人生后的生活安康,也寓意着唐代的自信、活力和勇敢的文化性格。",
"镇墓兽作为守护墓主人的器物,均成对出现在墓门两边,一个是人面兽身,另一个时兽面兽身,两两对称出现。唐代镇墓兽十分华丽,一般为多种动物组合,蹄子多为牛蹄或兽爪,表现出力量或速度,肩部带有翅膀,寓意可以翱翔,兽头均有角伸出。",
],
'工艺':
[
"兽面通体施釉而人面不施釉,原因如下:其一由于开相工艺,即烧制之后再进行脸部描摹更显栩栩如生;其二,由于唐三彩釉色极易流淌,若人面施釉,则釉彩流淌后面部表情显得较为狰狞,无法呈现出人面的特征,所以迄今为止,牵扯到人面的唐三彩均无施釉。",
],
},
'天王俑':
{
'综述':
[
"天王俑在墓中位置逐渐替代以前的武士佣,其形象受佛教文化的影响,作用是镇墓辟邪。天王俑一般呈现出唐代时身穿华丽光亮“明光铠甲”的将军形象,脚踩地鬼或青牛青羊以彰显力量,尽享威武阳刚之气。",
"天王俑夸张其双目, 以显威武; 文吏俑则与其形成鲜明对比——他们的形象则是微收双唇, 以露其亲和。",
],
'艺术特征':
[
"值得一提的是,唐代时期的天王造像身材开始凸凹有致,符合人体工学比例。而唐朝之前,天王造像则是以写意的艺术手法表现,以线条勾勒人物,身材曲线均隐藏在衣纹下面。",
],
},
'人':
{
'综述':
"盛唐时期,无论男女,都讲究体态的健壮丰腴。这一时期女性的社会地位也得到提高人们豁达自信."
"对女性的审美也有了新标准,雍容华贵、富态健康成了主导标准。"
"人物俑中分类较为复杂,有男佣、女佣、文官佣、武士佣、乐舞俏、天王佣、骑驼佣以及胡俑等多种分类。",
'艺术特征':
[
"",
"从陶瓷艺术的角度看,唐三彩人物俑最重要的特点就是神形兼备,由于主要是用写实的手法来塑造人物,因此人物表情细腻而传神。",
"唐三彩人俑继承了周、汉以来陶俑制作的写实传统,简明、明快、概括、凝练的风格以及忠实、健康、丰满、自由舒展的情调。",
'唐三彩人俑确立了新的艺术法则和规范',
'唐三彩的人俑造型丰满而对于饱满、圆润形体的追求',
"唐三彩侍女俑都显得年轻,她们眉清目秀、五官墙正,面部稍显丰腴,但身体却没有贵妇那般富态.",
"唐三彩侍女俑打扮简单, 但身段窈窕, 柔顺的线条凸显了她们体态的修长、优美。",
"唐三彩侍女俑虽然她们为奴脾, 但举止落落大方、态度谦恭, 并不卑下。她们高高的发髻显了少女特有的活泼。",
],
'袖子':
[
"",
"此俑服饰宽大,长裙曳地,宽袖过膝,披帛绕肩,尽显雍容华贵。",
"此女身着儒服宽袖,隐隐有及地之势,宽袖儒服正是唐之特色。"
"如唐文宗之令“每遇对日,不得广插钗梳,不须著短窄衣服”",
],
'女佣':
[
"西安鲜于庭诲墓出土的唐三彩女佣,身披蓝色宽衣,内着窄袖绿衣,下着黄色长裙,袒胸, 肌肤丰腴, 神态安详, 是盛唐宫廷妇女的典型形象。",
"侍女俑都显得年轻,她们眉清目秀、五官墙正,面部稍显丰腴,但身体却没有贵妇那般富态.她们打扮简单, 但身段窈窕, 柔顺的线条凸显了她们体态的修长、优美。她们神态各异, 或高兴, 或开朗, 或愁苦,或郁闷。她们姿态各不相同, 或双手抱于胸前, 或捧物侍立, 或俯首帖耳为主人梳妆。她们为奴脾, 态度谦恭, 但举止落落大方, 并不卑下。她们高高的发髻显 了少女特有的活泼。",
"唐俑塑造的各种人物, 即使身份低微如侍女、乐伎、马夫等, 从他们的表情中也看不出丝毫的怯懦, 没有半点卑微与奴气。相反, 他们大都和达官贵人一样, 呈现出乐观、自信的精神风貌。这些形象已越过审美范畴的需要, 变成了对人性、人格独立平等的热烈追求。",
],
'骆驼载乐俑':
[
'同墓出土的骆驼载乐佣更为生动,高大雄健的骆驼,昂首站立在长方形的底板上, 骆驼之间平台 上四个乐俏分坐两侧, 中间为一舞佣, 五佣 中两佣深目高鼻多须, 一看就是胡人。 他们有的弹奏琵琶, 有的吹笛击鼓, 有的翩贬起舞, 生动地反映了盛唐时期, 各民族亲密交往, 经济文化交流的欢快场面。',
],
'黑人陶俑':
[
'除此之外,还塑造有黑人陶佣,例如,西安南郊裴氏墓中出土的黑人陶佣,形象十分真实,卷曲的头发成细螺旋状,肤色蚴黑,红红的嘴唇,低鼻梁宽鼻翼,这些特征,都是典型的非洲黑人的形象,唐代工匠如此熟枣自己创作的对象, 是现实生活提供了 “模特儿” ,说明 唐代中国和非洲的经济文化已有往来。 唐代 的人物佣艺术, 也受到佛教造型艺术的影',
]
},
'器皿':
{
'综述':
'此品属于唐三彩器皿。唐代经济发达,唐人自然特别注重日常生活,这一点明显地反映到唐三彩的制造中。'
'唐三彩生活用具在造型上借鉴了我国唐代金银器、铜器、漆器、藤编器物的式样,'
'特别是与我国邻近的波斯和阿拉伯国家中生活用具的造型特点以及宗教神器中的一些式样。'
'无论是盘碗盅碟还是瓶罐壶钵,都是以浑圆和丰满为主流,端庄而规整,浑厚而饱满。'
'这些器皿大多造型浑圆厚实、庄重大气,其油彩更是灿烂夺目、富丽堂皇,反映出当时社会多姿多彩的生活内容,'
'具有浓郁的时代气息和独特的艺术风格。与唐三彩其他类型的器物一样,'
'生活器具同样体现出唐人崇尚完美、大胆追求、兼收并蓄、意气飞扬的时代精神。',
'艺术特征':
[
"",
"此件器物浑厚圆润,端庄典雅,雍容华贵,精巧而有气势。具有明快、洗练、生动、形象的表现手法。",
"此件器物具有丰富、生动的装饰纹样与斑驳陆离的釉彩,为实用与美观高度结合的典范。",
"此件器物表达了我国装饰艺术写实主义传统的艺术风格,在思想性和艺术性方面都开创了新的境界,大大丰富了陶瓷制品的装饰手段。",
"它器表装饰匀器体造型、釉彩的运用紧密结合,强讽整体的完美性。",
"它装饰纹样的运用不拘常法以器饰纹,显得洒脱自如,得心应手;",
"此件器物蕴含着从过去的古朴、浑浊,到现在的浑厚、饱满、端庄而丰腴,体现了海纳百川的气魄。",
"它造型直线中有曲线,曲线中有直线,曲线又根据需要而采用不同的弧线,使壶体的线条对比强烈,生动而挺拔。",
"它晶莹明亮的颜色,柔和的色彩变化,装饰效果是活泼的,尤其是各种颜色釉过程中的热流,",
"釉颜色混合在热环境中,彩釉形成自然艳丽多彩的变化,以独特的魅力的影响着唐代。",
],
'花纹':
[
"",
"此器皿采用了滴蜡留白技法,边线若隐若现,十分柔和,"
"所留白点的四周也被其周边釉料所晕染,从而形成丰富的渐变色彩,增加了装饰纹样的色彩层次感。",
"此器皿采用了泼釉溅彩技法,色彩层次非常丰富,朦胧中透漏出流动的美感。"
"朦胧而又鲜艳的色彩,流动的肌理效果展现了唐三彩特有的美感。",
"此器皿采用了搅釉技法,采用同一色系的明度差异大的两种釉彩,"
"视觉上色调统一却又对比强烈。",
],
},
},
'釉色分析':
{
'综述': "唐三彩是一种生产于唐代(618一907年)的彩色低温铅釉"
"。它用白色粘土作胎,用含铜、铁、钻、猛等元素的矿物作釉料的着色剂,"
"井在釉里加入很多的炼铅熔渣和铅灰做助熔剂,经过约800度的高温烧制而成。"
"釉面呈现深绿、浅绿、翠绿及蓝、黄、白、赫、褐等多种色彩,但以黄、绿、白三彩为主,"
"后来釉色日渐丰富,但汉语中“三”也有“多”之意,因此三彩一名被沿用下来。"
"唐三彩以铅的氧化物作为助熔剂,目的是降低釉料的熔融温度,在窑里烧制时,"
"各种着色金属氧化物熔化后向四方扩散和流动,各种颜色互相浸润,形成斑驳灿烂的彩色釉"
"除此之外,铅的另一个作用是增加釉面的光亮度,使色彩更加艳丽,更加富丽堂皇。"
,
0:"古代三彩釉色深沉、柔和、微暗,尤其是其中的绿釉,色泽较深、柔和。造型正确、雕塑细致, 神态逼真, "
"显示了唐三彩保持了秦汉以来写实的传统,又利用釉色的绚丽, 烘托出富丽堂皇和浪漫的风采, 从而表现了盛唐的气氛。",
1:"在唐朝之前,人们所崇尚的基本都是素色主义,陶器大多数都是以单色釉的形式呈现在人们的面前,"
"然而在唐朝以后,受到了诸多外来风俗文化的冲击与影响,人们也开始接纳和喜爱斑斓绚丽的陶瓷制品。",
2: ".古代三彩中的黄釉或红釉则多呈现出像蚕吐出的细丝一样的流纹。"
"造型正确、雕塑细致, 神态逼真, 显示了唐三彩保持了秦汉以来写实的传统,又利用釉色的绚丽, 烘托出富丽堂皇和浪漫的风采, 从而表现了盛唐的气氛。",
3: "初唐的唐三彩以黄和褐色釉为大宗,间或以白、绿釉彩,在素胎的印花上,施釉草率,釉色黜暗,釉层厚实。",
4:".唐三彩的釉色除了常用的红、绿、白以外,"
"还有蓝、紫、黑, 在当时视蓝釉为贵, 故有“蓝三彩” 之说。"
"蓝釉色莹亮、斑斓绚丽而倍受时人喜爱。"
"在等级制度非常严格的唐朝,能够拥有这种大型蓝色三彩釉陶器作为明器的人都是些名望贵族、"
"高官显爵,生前不是皇亲国戚就是宰相重臣。没有气泡,褐、黑等色的杂质带入釉里。"
"釉面沁蚀自然,有返铅银斑,蓝釉中带有黑斑才是真正的进口料蓝釉唐三彩马,"
"是鉴定唐代进口钴釉料蓝釉的根据痕迹。",
5:"黑色是多种氧化物的堆积,科技含量很高。而且三彩黑釉陶马的出土震惊了世界,奠定了中国陶瓷在世界上的地位。",
'釉色颜色分析':
{
"",
'唐三彩有黄、绿、褐、蓝、黑等色, 而以黄、绿、白三种颜色为主调。其中,黄、褐、绿为釉色的基本组成。',
'在唐朝之前,人们所崇尚的基本都是素色主义,陶器大多数都是以单色釉的形式呈现在人们的面前,然而在唐朝以后,受到了诸多外来风俗文化的冲击与影响,人们也开始接纳和喜爱斑斓绚丽的陶瓷制品。',
'受土壤环境和土壤中化学物质如酸、碱的侵蚀,夺目的光泽已变得柔和莹润,釉面的开片均匀细小,有规律,开片四边微微上翘,用放大镜仔细观察片与片之间的间隙有锈蚀的土沁、银沁,类似于汉代绿釉陶壶的泛铅现象',
'洛阳唐三彩有一部分器物的釉色相当鲜亮华丽,但这种亮并非贼亮而是润亮。一部分七五的釉色由于受到酸碱度不同的土侵蚀或明或暗,甚至时整体釉色都暗淡无光,或釉面出碱形成一层厚厚的碱釉非常不漂亮。更有甚者由于胎的文体而导致釉脱落,露出斑驳的胎体。洛阳唐三彩的釉面多有细小的开片,开片开列出总有上翘的感觉,开大片者极少。',
'凡烧成色为褐红、浅黄、赫黄的,是铁元素的呈色;各种绿色的,是铜元素的呈色;蓝色的,是铜或钻元素的呈色;紫色的,是锰元素的呈色;呈白色的,则是以铅的化合物与 含铁量低的白色粘土的配成。',
},
'艺术效果':
{
'关系色效果':
[
'1.关系色效果。唐三彩使用有限的几种色釉, 其色彩运用, 既没有自然的固有色, 也不可能去关注条件色( 光源色、环境色、固有色、空气色) , 而是以数种色釉并置与交融,或同类色调, 或邻近色调, 或清浊对比, 或互补, 利用色彩与色彩之间所形成的美感效应, 自然而巧妙地构成了优美的“ 关系色” , 产生流光溢彩的美妙效果',
],
'肌理效果':
[
'2. 肌理效果。唐三彩之用色不去注重客观抽象的质感表现, 而是充分利用其色釉厚度大和烧窑过程中因流动而形成垂滴、混合、晕开等现象, 追求形象的肌理效果。',
],
'质材互衬效果':
[
'3. 质材互衬效果。就物质材料而言, 唐三彩当属陶瓷美术的范畴, 唐三彩是以具体的陶土、色釉等质材构成、这些质材本身的审美性能, 直接影响作品的艺术形式和艺术效果。故其格外显示了其质地上独特的工艺审美价值。',
],
},
'制釉工艺':
{
'综述':
[
'唐三彩工艺从诞生到成熟只用了50年,这在世界工艺的历史上都是罕见的。',
'可以总结出散点配彩、瓷土堆花、贴花涂彩、刻花填彩、印花加彩、点描线描、开相勾画等十余种装饰技法,为历代之大全。',
'浇釉.浸釉.泼釉、点釉、出釉等施釉技法.创造出或数色相互浸润,或两彩相间形成条带.或以一种釉彩为主凋加缀其它釉彩.或运用当时染织品流行的绞缬技术从而形成朵花、散点斑贞斑块、串珠网格,条纹等纹',
'在汉代低温铅釉的基础上发展起来的,是一种低温釉陶器',
'胎料为白色粘土,用含铁、铜、钻、钻等 元素的矿物作釉料的着色剂',
'釉料中配以大量的铅化合物来降低釉料的熔融温度,并增加色泽的光亮',
],
'二次烧制法':
[
'唐三彩可能是用二次烧成法制成的。 第一次是烧胎, 称为素烧, 素烧温度约 1000°C到1100°C.第二次是烧釉,烧成温度 比第一次低一些, 约850°C 一 950°C左右。采用二次烧成法,是因胎与釉所要求的烧成温度不一致, 同时可以提高成品率,变形和烧坏的素烧还件, 不再上釉烘烧。',
],
'土沁':
[
'土沁:相当一部分真器除底外,周围均不沾土,由于植物的作用,常常在一些三彩器物上有一些植物根系枯干后,留下得根爬行的痕迹。一些三彩器物的釉面上经常看到很多不易去掉土锈,这些东西往往是釉中碱性物质和土质中酸性物质相互作用留下的,也是鉴定一些三彩器真假较好的证据。',
],
'流窜':
[
'烧制唐三彩的艺人们在釉彩的使用上独具匠心地创造了我国特有的“流串”工艺。即在锻烧的过程中,让釉彩自然流动,融会渗化,互相调配,使之色彩浓丽、线条柔美。',
'三色的釉成分主要是铅元素, 还可以增加锰、钻石、铁等金属组成。周围的流动烧烤下,陶瓷窑釉在高温下容易出现流窜, 形成自然的五彩缤纷的色彩及形状。这是唐代独特的术特色和吸引力。此外, 三彩花卉的风格, 被世人认为异国情调的追求, 流行为一种“时尚”元素, 三彩釉色结合外国文化以及自己的艺术特点己经成为著名的遗产, 并在年经河北省物专家组鉴定为二级文物。',
'色釉在烧制过程中具有熔点降低而自然流出的特性',
],
'分区施釉法':
[
'在不同部位涂上不同的釉色,几种釉色同施在一件器物上,釉色之间的组合和交融会产生一种全新的效果,即窑变',
],
'点彩施釉法':
[
'用毛笔在器物的一些部位点上大小一致或不一致、或点状、或块状的点,或带状线条',
],
'胎体':
[
'洛阳唐三彩中的胎体有相当部分白中泛红,有些胎体甚至呈肉红色。',
'洛阳与西安出土的有很大相似度。但洛阳唐三彩逐步形成了独自的特点。如“胎质上,是由瓷土烧成的白胎,而西安则是一般陶土烧成的红胎;在装饰方面,洛阳的三彩器皿装饰花纹艳丽繁缛,多见几何形,点彩和垂带状花纹,而西安则比较清淡素雅;在种类上,洛阳唐三彩不及西安唐三彩丰富;在造型上,洛阳唐三彩女俑秀丽飘逸,而西安出土的唐三彩女俑较为圆润丰满。”',
]
}
},
'鉴别':
{
'艺术水准':
[
'色彩、造型极具艺术价值的自然是极品。在具体造型中,又以人物俑、马俑和骆驼俑的塑造最为有名。就陶马而言,则以白色三彩马和黑色三彩马最为名贵。',
],
'年代和产地':
[
'国际市场能见到的有产自中国、日本和朝鲜的三彩陶器,分别叫作唐三彩、奈良三彩和新罗三彩,因为价值不同,所以要区分清楚。其中就中国产的而言,产自唐代的要比“辽三彩”和“金三彩”的价高;产自中原地区的要比产自偏远地区的品质好。',
],
'真伪鉴别':
[
'先看胎质,真品质地细腻、疏松,有腐蚀痕迹,往往还有钙一类的锈斑,清理后露胎部分表层多有沙质小坑呈现;仿造的则胎质坚硬,呈色或细腻洁白或者发灰。',
'再看色彩,古代三彩釉色深沉、柔和、微暗,尤其是其中的绿釉,色泽较深、柔和;而新的三彩则色泽光亮、鲜艳、刺眼,绿釉部分仿品则是色泽较淡且浮在表层,缺乏润泽感。古代三彩中的黄釉或红釉则多呈现出像蚕吐出的细丝一样的流纹,而新烧制的三彩陶器却一般没有这种现象。',
'其釉色是否与质地同一光泽,若质地很旧,而彩绘光泽独强者,显然有假。',
'造型:真品以古朴典雅、庄重丰满为特色,而仿品则有较多灵巧潇洒、富丽清新的气氛。',
'制作:真品多捏塑手制或轮制兼 用, 同类产品大小不一,而仿品大多采用模 制注浆而成,产品规格比较规范统一。',
'综合考量其外观成色、辨识气味、辨别造型等。原品表层附着土锈,有特异的土腥气味,尤其遇水后气味更加明显。造型方面也自由、娴熟,胎体起伏、转折自然。胎壁有的有厚薄不均的现象。新品则多有烟熏味,造型规整、古板。',
'真器胎面上,留下很多当时工艺上略显粗糙的痕迹。唐三彩中少数精品处理的较为精细以外,大多数处理得并非干净利索,在胎得表面可看出一些高低不平和不精细处。唐朝的制瓷工艺非常成熟,凡轮制拉坯器,多周正饱满,丰满而圆浑是唐三彩圆器共同特征。此时对拉坯技术的熟练掌握,轮转进度均匀,但胎体上往往留下拉坯轮制痕。真器的贴花较薄,且贴花与器身接合处过渡较自然平滑。',
],
'釉色鉴别':
{
'受土壤环境和土壤中化学物质如酸、碱的侵蚀,夺目的光泽已变得柔和莹润,釉面的开片均匀细小,有规律,开片四边微微上翘,用放大镜仔细观察片与片之间的间隙有锈蚀的土沁、银沁,类似于汉代绿釉陶壶的泛铅现象',
'包浆:肉眼直接观察,从侧面看釉面的表层,生成一种像蝇翅一样的薄层,开片之间像是鼓起的线条',
'仿制品用氢氟酸去光,再用碱中和,因此看上去很生涩',
'一些瓶、罐、壶、碗等器物真品的器底和圈足往往不施釉,使器物部分露胎,底足很干净。而仿品全部施釉,底足往往黑而脏',
'真品釉色自然柔和,釉中开片细而均匀,釉面的突起感、翘起感如苍蝇翅膀上的纹路一样,釉面很干很薄,无积釉或少积釉现象;而仿品釉色过于艳丽、刺目。釉中开片大小不均,有生硬感,釉面常有积釉现象,釉面干涩',
},
},
'墓穴分析':
{
'文案': ["昭陵是唐太宗李世民与文德皇后长孙氏的合葬陵墓,"
"位于陕西省咸阳市礼泉县城西北22.5千米的九嵕山上,"
"是关中“唐十八陵”,也是中国历代帝王陵园中规模最大、陪葬墓最多的一座。"
"昭陵是初唐走向盛唐的实物见证,是了解、研究唐代政治、经济、文化难得的文物宝库。",
"在《旧唐书》卷九十五中记载:“惠庄太子撝,睿宗第二子也。"
"本名成义,母柳氏,掖庭宫人。撝之初生,则天尝以示僧万回,万回曰:"
"‘此儿是西域大树之精,养之宜兄弟。’则天甚悦,始令列于兄弟之次。”",
"中国唐代中宗长子李重润的墓。位于乾陵东南隅,在今乾县城西北约3公里的乾陵乡韩家堡村北面。1971年发掘 。"
"李重润于大足元年(701)与其妹永泰公主被武则天杖杀。神龙二年(706),中宗复位后陪葬乾陵,并号墓为陵。",
],
},
'窑址':
{
'综述': ['迄今发现烧造唐三彩的窑址有河南巩县、陕西铜川、河北内邱等,此外在陕西西安及附近地区也发现有零星的烧造唐三彩的窑。'
'窑址主要集中在陕西省、河南省的西安和洛阳周围,这两地出土的唐三彩是唐代三彩的代表。'
'这与古时长安(今称西安)、洛阳所处的政治、经济地位,以及当时崇尚的厚葬之风有着必然的联系。'
'长安是唐朝的国都,唐墓理应十分集中,目前已发掘的就有2000多座,上至皇室贵族,下至平民百姓,'
'类型多样。洛阳作为陪都,唐墓也相当集中,已发现了数百座唐代墓葬。唐代厚葬之风盛行,古人入葬,伴随有大量的物品。唐三彩作为明器,是重要的随葬品。'
'除陕西、河南两省之外,中国的湖北、江 苏、山西、安徽、山东、甘肃、辽宁等省以及伊朗、'
'叙利亚、伊拉克、苏丹、埃及、印尼、日本等国也发现 了唐三彩的器物或残片,但数量和品种都无法与陕西、河南两省相比。', ],
'唐河南省巩义市黄冶窑': ['目前发现最早烧造唐三彩制品而且规模大、延续时间长、'
'产品数量最多的一个窑址,是当时宫廷、豪门贵族乃至民间三彩制品的主要供应地。'
'作为国际贵重贸易商品远销东亚、东南亚、中亚、西亚乃至非洲、欧洲等地',
'日常生活用品:炉、罐、钵等;专供随葬用的冥器:人物俑、动物俑和建筑模型;玩具类小型器物',
'突破了常见的圆形格式而呈现多样化的发展趋势,如扁体、椭圆体。',
'出现将造型与装饰融为一体的象生性器皿。',
'装饰技法主要采取贴花、印花、刻画、刻花、划花、堆塑、雕镂、施釉、绘彩、绞胎.搅釉。',
],
'唐长安醴泉坊三彩窑址': [
'窑址中心位于西安市西门外西关正街——丰镐路以南、草阳村及劳动南路以西、'
'原西安民航机场跑道北端偏东处,所在范围就是当年隋唐长安城醴泉坊。'
'1999年5月至7月,陕西省考古研究所对这一遗址进行了抢救性发掘。'
'共发掘出唐代残窑4座、灰坑10个。发掘面积140平方米,发现大量窑业堆积,'
'出土包括唐三彩、素烧器、模具、窑具等残片在内的各类陶制品残片近万片,并出土部分玻璃残块及骨器边角料等物品。',
],
},
'朝代':
{
'综述': '唐三彩起源于唐高宗时期,在公元713至741年间达到鼎盛,在平定安史之乱后逐渐没落下来',
'初唐': '初唐之末期,始兴厚葬之风,其后自盛唐至中、晚唐,厚葬之风益甚,'
'舁明器而行街衢,陈墓所,奏歌舞音乐,张帷幕,设盘牀,以造花、人形、饮食施路人,殆如祭祀。',
'盛唐': '唐玄宗开元年间,“政治清明,经济昌隆,军事强大,文艺繁荣,三教并弘,盛唐气象,'
'无与伦比。”产生在这一时期的唐三彩,在写实的基础上充分发挥想象力,'
'以概括和夸张的手法塑造出品类繁多的形象,形成了独具魅力的风格,'
'体现出唐代各个方面的社会生活,充分体现了“盛唐气象”蕴涵的丰富文化气息。',
},
}
def get_content(title, topic, index1, index2):
if index1 == 0 :
return content[title][topic]
elif index2 == 0:
return content[title][topic][index1]
else:
return content[title][topic][index1][index2]
|
StarcoderdataPython
|
3291095
|
from featuretools.primitives import AggregationPrimitive
from featuretools.variable_types import Numeric
from tsfresh.feature_extraction.feature_calculators import abs_energy
class AbsEnergy(AggregationPrimitive):
"""Returns the absolute energy of the time series
which is the sum over the squared values.
Docstring source:
https://tsfresh.readthedocs.io/en/latest/api/tsfresh.feature_extraction.html#tsfresh.feature_extraction.feature_calculators.abs_energy
"""
name = "abs_energy"
input_types = [Numeric]
return_type = Numeric
stack_on_self = False
def get_function(self):
return abs_energy
|
StarcoderdataPython
|
161744
|
<filename>examples/similarity_conf.py
"""
This module gives an example of how to configure similarity measures
computation.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from amaze import KNNBasic
from amaze import Dataset
from amaze.model_selection import cross_validate
# Load the movielens-100k dataset.
data = Dataset.load_builtin('ml-100k')
# Example using cosine similarity
sim_options = {'name': 'cosine',
'user_based': False # compute similarities between items
}
algo = KNNBasic(sim_options=sim_options)
cross_validate(algo, data, verbose=True)
# Example using pearson_baseline similarity
sim_options = {'name': 'pearson_baseline',
'shrinkage': 0 # no shrinkage
}
algo = KNNBasic(sim_options=sim_options)
cross_validate(algo, data, verbose=True)
|
StarcoderdataPython
|
47153
|
<filename>tests/test_admin.py<gh_stars>0
"""Test Admin interface provided by Improved User"""
import os
import re
from django import VERSION as DjangoVersion
from django.contrib.admin.models import LogEntry
from django.contrib.auth import SESSION_KEY
from django.test import TestCase, override_settings
from django.test.utils import patch_logger
from django.utils.encoding import force_text
from improved_user.admin import UserAdmin
from improved_user.forms import UserChangeForm, UserCreationForm
from improved_user.models import User
# TODO: remove conditional import when Dj 1.8 dropped
# pylint: disable=ungrouped-imports
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
# pylint: enable=ungrouped-imports
# Redirect in test_user_change_password will fail if session auth hash
# isn't updated after password change (#21649)
@override_settings(ROOT_URLCONF='tests.urls')
class UserAdminTests(TestCase):
"""Based off django.tests.auth_tests.test_views.ChangelistTests"""
@classmethod
def setUpTestData(cls):
"""Called by TestCase during initialization; creates users"""
cls.user1 = User.objects.create_user(
email='<EMAIL>',
password='password',
)
cls.user2 = User.objects.create_user(
email='<EMAIL>',
password='password',
)
def setUp(self):
"""Make user1 a superuser before logging in."""
User.objects\
.filter(email='<EMAIL>')\
.update(is_staff=True, is_superuser=True)
self.login()
self.admin = User.objects.get(pk=self.user1.pk)
def login(self, username='<EMAIL>', password='password'):
"""Helper function to login the user (specified or default)"""
response = self.client.post('/login/', {
'username': username,
'password': password,
})
self.assertIn(SESSION_KEY, self.client.session)
return response
def logout(self):
"""Helper function to logout the user"""
response = self.client.get('/admin/logout/')
self.assertEqual(response.status_code, 200)
self.assertNotIn(SESSION_KEY, self.client.session)
def get_user_data(self, user): # pylint: disable=no-self-use
"""Generate dictionary of values to compare against"""
return {
'email': user.email,
'password': <PASSWORD>,
'is_active': user.is_active,
'is_staff': user.is_staff,
'is_superuser': user.is_superuser,
'last_login_0': user.last_login.strftime('%Y-%m-%d'),
'last_login_1': user.last_login.strftime('%H:%M:%S'),
'initial-last_login_0': user.last_login.strftime('%Y-%m-%d'),
'initial-last_login_1': user.last_login.strftime('%H:%M:%S'),
'date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'initial-date_joined_0': user.date_joined.strftime('%Y-%m-%d'),
'initial-date_joined_1': user.date_joined.strftime('%H:%M:%S'),
'full_name': user.full_name,
'short_name': user.short_name,
}
def test_display_fields(self):
"""Test that admin shows all user fields"""
excluded_model_fields = ['id', 'logentry']
model_fields = set(
field.name for field in User._meta.get_fields()
if field.name not in excluded_model_fields
)
admin_fieldset_fields = set(
fieldname
for name, fieldset in UserAdmin.fieldsets
for fieldname in fieldset['fields']
)
self.assertEqual(model_fields, admin_fieldset_fields)
def test_add_has_required_fields(self):
"""Test all required fields in Admin Add view"""
excluded_model_fields = [
'date_joined', 'is_active', 'is_staff', 'is_superuser', 'password',
]
required_model_fields = [
field.name
for field in User._meta.get_fields()
if (field.name not in excluded_model_fields
and hasattr(field, 'null') and field.null is False
and hasattr(field, 'blank') and field.blank is False)
]
extra_form_fields = [
field_name
for field_name in list(
UserCreationForm.declared_fields, # pylint: disable=no-member
)
]
admin_add_fields = [
fieldname
for name, fieldset in UserAdmin.add_fieldsets
for fieldname in fieldset['fields']
]
for field in required_model_fields+extra_form_fields:
with self.subTest(field=field):
self.assertIn(field, admin_add_fields)
def test_correct_forms_used(self):
"""Test that UserAdmin uses the right forms"""
self.assertIs(UserAdmin.add_form, UserCreationForm)
self.assertIs(UserAdmin.form, UserChangeForm)
def test_user_add(self):
"""Ensure the admin add view works correctly"""
# we can get the form view
get_response = self.client.get(
reverse('auth_test_admin:improved_user_user_add'))
self.assertEqual(get_response.status_code, 200)
# we can create new users in the form view
post_response = self.client.post(
reverse('auth_test_admin:improved_user_user_add'),
{
'email': '<EMAIL>',
'password1': '<PASSWORD>!',
'password2': '<PASSWORD>!',
},
follow=True,
)
self.assertEqual(post_response.status_code, 200)
self.assertTrue(
User.objects.filter(email='<EMAIL>').exists())
new_user = User.objects.get(email='<EMAIL>')
self.assertTrue(new_user.check_password('<PASSWORD>!'))
def test_user_change_email(self):
"""Test that user can change email in Admin"""
data = self.get_user_data(self.admin)
data['email'] = 'new_' + data['email']
response = self.client.post(
reverse(
'auth_test_admin:improved_user_user_change',
args=(self.admin.pk,),
),
data,
)
self.assertRedirects(
response,
reverse('auth_test_admin:improved_user_user_changelist'))
row = LogEntry.objects.latest('id')
if DjangoVersion >= (1, 9):
self.assertEqual(row.get_change_message(), 'Changed email.')
else:
self.assertEqual(row.change_message, 'Changed email.')
def test_user_not_change(self):
"""Test that message is raised when form submitted unchanged"""
response = self.client.post(
reverse(
'auth_test_admin:improved_user_user_change',
args=(self.admin.pk,),
),
self.get_user_data(self.admin),
)
self.assertRedirects(
response,
reverse('auth_test_admin:improved_user_user_changelist'))
row = LogEntry.objects.latest('id')
if DjangoVersion >= (1, 9):
self.assertEqual(row.get_change_message(), 'No fields changed.')
else:
self.assertEqual(row.change_message, 'No fields changed.')
def test_user_change_password(self):
"""Test that URL to change password form is correct"""
user_change_url = reverse(
'auth_test_admin:improved_user_user_change', args=(self.admin.pk,))
password_change_url = reverse(
'auth_test_admin:auth_user_password_change',
args=(self.admin.pk,))
response = self.client.get(user_change_url)
# Test the link inside password field help_text.
rel_link = re.search(
r'you can change the password using '
r'<a href="([^"]*)">this form</a>',
force_text(response.content),
).groups()[0]
self.assertEqual(
os.path.normpath(user_change_url + rel_link),
os.path.normpath(password_change_url),
)
response = self.client.post(
password_change_url,
{
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
},
)
self.assertRedirects(response, user_change_url)
row = LogEntry.objects.latest('id')
if DjangoVersion >= (1, 9):
self.assertEqual(row.get_change_message(), 'Changed password.')
else:
self.assertEqual(row.change_message, 'Changed password.')
self.logout()
self.login(password='<PASSWORD>')
def test_user_change_password_subclass_path(self):
"""Test subclasses can override password URL"""
class CustomChangeForm(UserChangeForm):
"""Subclass of UserChangeForm; uses rel_password_url"""
rel_password_url = 'moOps'
form = CustomChangeForm()
self.assertEqual(form.rel_password_url, 'moOps')
rel_link = re.search(
r'you can change the password using '
r'<a href="([^"]*)">this form</a>',
form.fields['password'].help_text,
).groups()[0]
self.assertEqual(rel_link, 'moOps')
def test_user_change_different_user_password(self):
"""Test that administrator can update other Users' passwords"""
user = User.objects.get(email='<EMAIL>')
response = self.client.post(
reverse(
'auth_test_admin:auth_user_password_change',
args=(user.pk,),
),
{
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
},
)
self.assertRedirects(
response,
reverse(
'auth_test_admin:improved_user_user_change',
args=(user.pk,)))
row = LogEntry.objects.latest('id')
self.assertEqual(row.user_id, self.admin.pk)
self.assertEqual(row.object_id, str(user.pk))
if DjangoVersion >= (1, 9):
self.assertEqual(row.get_change_message(), 'Changed password.')
else:
self.assertEqual(row.change_message, 'Changed password.')
def test_changelist_disallows_password_lookups(self):
"""Users shouldn't be allowed to guess password
Checks against repeated password__startswith queries
https://code.djangoproject.com/ticket/20078
"""
# A lookup that tries to filter on password isn't OK
with patch_logger(
'django.security.DisallowedModelAdminLookup', 'error',
) as logger_calls:
response = self.client.get(
reverse('auth_test_admin:improved_user_user_changelist')
+ '?password__startswith=<PASSWORD>$')
self.assertEqual(response.status_code, 400)
self.assertEqual(len(logger_calls), 1)
|
StarcoderdataPython
|
1716564
|
#!/usr/bin/env python
import yaml
from netmiko import ConnectHandler
from netmiko import Netmiko
from pprint import pprint
from ciscoconfparse import CiscoConfParse
filename = "/home/dcarrasco/.netmiko.yml"
with open(filename) as f:
yaml_dict = yaml.load(f)
device = yaml_dict['cisco4']
Node = {
"host": device['host'],
"username": device['username'],
"password": device['password'],
"device_type": device['device_type']
}
net_connect = ConnectHandler(**Node)
CONFIG = net_connect.send_command("show run")
net_connect.disconnect()
parse = CiscoConfParse(CONFIG.splitlines())
interfaces = parse.find_objects_w_child(parentspec=r'^interface', childspec=r'ip address [0-9]')
for intf in interfaces:
print(intf.text)
ip_list = intf.re_search_children(r"ip address")
for ip in ip_list:
print(ip.text)
|
StarcoderdataPython
|
1774234
|
<gh_stars>0
import sounddevice
import pydub
import time
import numpy
class Audio():
def __init__(self, filepath=None):
if filepath is not None:
self.openfile(filepath)
def openfile(self, filepath):
if ".mp3" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath,codec="mp3")
elif ".wav" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath,codec="wav")
elif ".mp4" in filepath:
self.segment = pydub.AudioSegment.from_file(filepath)
else:
self.segment = pydub.AudioSegment.from_file(filepath)
def play(self, place=0):
if self.segment.channels != 1:
self.samples = numpy.array(self.segment.get_array_of_samples().tolist(),dtype="int16").reshape(-1,self.segment.channels)
else:
self.samples = numpy.array(self.segment.get_array_of_samples().tolist(),dtype='int16')
sounddevice.play(self.samples, self.segment.frame_rate)
def stop(self):
sounddevice.stop()
|
StarcoderdataPython
|
107800
|
import re
from emoji.unicode_codes import UNICODE_EMOJI
from nonebot import on_regex
from nonebot.params import RegexDict
from nonebot.plugin import PluginMetadata
from nonebot.adapters.onebot.v11 import MessageSegment
from .config import Config
from .data_source import mix_emoji
__plugin_meta__ = PluginMetadata(
name="emoji合成",
description="将两个emoji合成为一张图片",
usage="{emoji1}+{emoji2},如:😎+😁",
config=Config,
extra={
"unique_name": "emojimix",
"example": "😎+😁",
"author": "meetwq <<EMAIL>>",
"version": "0.1.7",
},
)
emojis = filter(lambda e: len(e) == 1, UNICODE_EMOJI["en"])
pattern = "(" + "|".join(re.escape(e) for e in emojis) + ")"
emojimix = on_regex(
rf"^\s*(?P<code1>{pattern})\s*\+\s*(?P<code2>{pattern})\s*$",
block=True,
priority=13,
)
@emojimix.handle()
async def _(msg: dict = RegexDict()):
emoji_code1 = msg["code1"]
emoji_code2 = msg["code2"]
result = await mix_emoji(emoji_code1, emoji_code2)
if isinstance(result, str):
await emojimix.finish(result)
else:
await emojimix.finish(MessageSegment.image(result))
|
StarcoderdataPython
|
1733084
|
<gh_stars>0
import cloudinary.uploader
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.views import generic as generic_views
from mytravelblog.main_app.forms.travel_picture import *
from mytravelblog.main_app.models import *
class TravelPictureRegisterView(LoginRequiredMixin, generic_views.CreateView):
model = TravelPicture
template_name = 'main_app/travel_picture/travel_picture_create.html'
success_url = reverse_lazy('travel pictures view')
form_class = TravelPictureRegistrationForm
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['located_city'] = VisitedCity.objects.filter(user=self.request.user).all()
return kwargs
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
result = VisitedCity.objects.filter(user=self.request.user).exists()
if result:
return super().dispatch(request, *args, **kwargs)
return redirect('register city')
return super().dispatch(request, *args, **kwargs)
class TravelPicturesView(LoginRequiredMixin, generic_views.ListView):
model = TravelPicture
template_name = 'main_app/generic/travel_pictures.html'
context_object_name = 'travel_pictures'
ordering = ('located_city', '-uploaded_on',)
def get_queryset(self):
return super().get_queryset().filter(user=self.request.user).all()
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
result = super().get_queryset().filter(user=self.request.user).exists()
if result:
return super().dispatch(request, *args, **kwargs)
return redirect('show dashboard')
return super().dispatch(request, *args, **kwargs)
class EditTravelPictureView(LoginRequiredMixin, generic_views.UpdateView):
model = TravelPicture
template_name = 'main_app/travel_picture/edit_travel_picture.html'
success_url = reverse_lazy('travel pictures view')
form_class = TravelPictureEditForm
context_object_name = 'travel_picture'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['user'] = self.request.user
kwargs['located_city'] = VisitedCity.objects.filter(user=self.request.user).all()
return kwargs
def form_valid(self, form, *args, **kwargs):
if 'travel_picture' in form.changed_data:
# GET THE EXISTING OBJECT ( NOT IN-MEMORY )
tp_to_update = TravelPicture.objects.get(pk=self.object.pk)
if form.cleaned_data['travel_picture'] and tp_to_update.travel_picture:
# IF UPDATED REMOVE THE EXISTING ONE
cloudinary.uploader.destroy(tp_to_update.travel_picture.public_id,
invalidate=True, )
elif not self.object.travel_picture:
# IF CLEARED REMOVE THE EXISTING ONE
cloudinary.uploader.destroy(tp_to_update.travel_picture.public_id,
invalidate=True, )
result = super().form_valid(form)
return result
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
result = super().get_queryset().filter(user=self.request.user, pk=kwargs['pk']).exists()
if result:
return super().dispatch(request, *args, **kwargs)
return redirect('travel pictures view')
return super().dispatch(request, *args, **kwargs)
class DeleteTravelPictureView(LoginRequiredMixin, generic_views.DeleteView):
model = TravelPicture
template_name = 'main_app/travel_picture/delete_travel_picture.html'
success_url = reverse_lazy('travel pictures view')
form_class = TravelPictureDeleteForm
context_object_name = 'travel_picture'
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
result = super().get_queryset().filter(user=self.request.user, pk=kwargs['pk']).exists()
if result:
return super().dispatch(request, *args, **kwargs)
return redirect('travel pictures view')
return super().dispatch(request, *args, **kwargs)
|
StarcoderdataPython
|
39410
|
<reponame>popravich/rdbtools3
from .parser import parse_rdb_stream, RDBItem
from .exceptions import FileFormatError, RDBValueError
__version__ = '0.1.2'
(RDBItem, parse_rdb_stream,
FileFormatError, RDBValueError) # pragma: no cover
|
StarcoderdataPython
|
167346
|
<reponame>opimentel-github/astro-lightcurves-classifier
from __future__ import print_function
from __future__ import division
from . import _C
import torch
import torch.nn as nn
from fuzzytorch.models.basics import MLP, Linear
###################################################################################################################################################
class SimpleClassifier(nn.Module):
def __init__(self, **kwargs):
super().__init__()
### ATTRIBUTES
for name, val in kwargs.items():
setattr(self, name, val)
self.reset()
def reset(self):
### MLP
self.k = 1
#self.classifiers_mlp_ft = nn.ModuleList([MLP(self.input_dims, 1, [self.input_dims*self.k]*self.layers, **mlp_kwargs) for _ in range(0, self.output_dims)])
#print('classifiers_mlp_ft:', self.classifiers_mlp_ft)
self.classifier_mlp_ft = MLP(self.input_dims, self.output_dims, [self.input_dims*self.k]*self.layers,
activation='relu',
last_activation='linear',
in_dropout=self.dropout['p'],
dropout=self.dropout['p'],
)
print('classifier_mlp_ft:', self.classifier_mlp_ft)
self.reset_parameters()
def get_finetuning_parameters(self): # fixme
return [self]
def reset_parameters(self):
self.classifier_mlp_ft.reset_parameters()
def get_output_dims(self):
return self.output_dims
def forward(self, tdict:dict, **kwargs):
encz_last = tdict[f'model/encz_last']
#encz_last = torch.cat([classifier_mlp_ft(encz_last) for classifier_mlp_ft in self.classifiers_mlp_ft], dim=-1)
# print(encz_last[0])
# print(encz_last[0].mean(), encz_last[0].std())
encz_last = self.classifier_mlp_ft(encz_last)
#print(encz_last.shape)
tdict[f'model/y'] = encz_last
return tdict
|
StarcoderdataPython
|
3334947
|
import matplotlib.pyplot as plt
import seaborn as sns
def hist(series, rotate_labels_by=None, **kwargs):
fig, ax = plt.subplots()
sns.distplot(series, ax=ax, **kwargs)
if rotate_labels_by:
plt.setp(ax.get_xticklabels(), rotation=rotate_labels_by)
return fig
def bars(series, rotate_labels_by=None, **kwargs):
fig, ax = plt.subplots()
sns.countplot(series, ax=ax, **kwargs)
if rotate_labels_by:
plt.setp(ax.get_xticklabels(), rotation=rotate_labels_by)
return fig
|
StarcoderdataPython
|
1773864
|
<gh_stars>1-10
import os
f = [i for i in os.listdir("test_images")]
for alg in ["RO","DE","PSO","GWO","JAYA","GA"]:
for n in [4,6,8]:
for t in f:
cmd = "python3 segment.py test_images/%s %d 20 2000 %s RI segmentations/%s_20_2000_%s_%d" % \
(t,n,alg,t[:-4],alg,n)
print(cmd, flush=True)
os.system(cmd)
|
StarcoderdataPython
|
3263711
|
<filename>py2markdown/__init__.py
'''
# py2markdown
Py2markdown converts a python file to a markdown file where all top level
level `""" comments """` are rendered as markdown, and everything else
is rendered as code blocks.
The `README.md` for this repo was generated by running:
$ py2markdown py2markdown/__init__.py > README.md
To install:
1. Git clone this repository
2. Run `pip3 install .`
Here's the source code!
'''
import ast
import parser
import re
from pathlib import Path
def process_source(text):
""" Convert and print some text """
# Get the start/end line of each comment
comment_lines = _get_comment_lines(text)
lines = text.split("\n")
# Add a 'comment' which starts on the last line and has no end
comment_lines.append((len(lines), None))
last = 0
output = []
# Render alternating code and comment blocks
for (firstline, lastline) in comment_lines:
code = "\n".join(lines[last:firstline])
if code.strip():
output.append(f"```py\n{code.strip()}\n```")
if lastline:
output.append(_extract_comment("\n".join(lines[firstline:lastline])))
last = lastline
return "\n".join(output)
def _extract_comment(comment):
""" Extract the content of a comment from a quoted comment string """
return re.match(
r"^\s*(?:(?:\"(?:\"\")?)|(?:\'(?:\'\')?))(.*?)(?:(?:\"(?:\"\")?)|(?:\'(?:\'\')?))\s*$", comment, flags=re.DOTALL
).group(1)
"""
Py2markdown uses the _AST_ module to parse the file and get the start and end line
numbers for all top level string expressions, making it fairly robust to odd comments
such as lines ending with \"""
"""
def _get_comment_lines(text):
""" Get the start and end line numbers of all top-level string nodes """
st = ast.parse(text, "source.py")
comment_lines = []
# Loop over top-level nodes
for node in st.body:
# Find Expression -> String nodes
if isinstance(node, ast.Expr) and isinstance(node.value, ast.Str):
comment = node.value.s
# Add the first/last line of the comment to the results list
lastline = node.lineno
firstline = lastline - len(comment.split("\n"))
comment_lines.append((firstline, lastline))
return comment_lines
|
StarcoderdataPython
|
1696602
|
#!/usr/bin/env python3.6
import pyperclip
import unittest
from credential import Credential
class TestCredential(unittest.TestCase):
"""
Test class that defines test cases for the credential class behaviousrs
"""
def setUp(self):
"""
use set up method
"""
self.new_credential = Credential("instagram","UmutoniRita","<PASSWORD>")
def test_init(self):
'''
test_init test case to test if the object is initialized properly
'''
self.assertEqual(self.new_credential.accountName,"instagram")
self.assertEqual(self.new_credential.username,"UmutoniRita")
self.assertEqual(self.new_credential.password,"<PASSWORD>")
def test_save_credential(self):
"""
test save credential to test if the user is saved
"""
self.new_credential.save_credential()
self.assertEqual(len(Credential.credential_list),1)
def tearDown(self):
"""
tearDown method that does clean up after each test case has run.
"""
Credential.credential_list = []
def test_save_multiple_credential(self):
"""
To test if you can save multiple credential
"""
self.new_credential.save_credential()
test_credential = Credential("gmail","marie","marie123")
test_credential.save_credential()
self.assertEqual(len(Credential.credential_list),2)
def test_delete_credential(self):
"""
To test if you can delete credential on the list
"""
self.new_credential.save_credential()
test_credential = Credential("gmail","marie","marie123")
test_credential.save_credential()
self.new_credential.delete_credential()
self.assertEqual(len(Credential.credential_list),1)
def test_password_generate(self):
"""
Test case to check a user if he\she be capable to geberate password
"""
password_generate = self.new_credential.password_generate()
self.assertEqual(len(password_generate),8)
def test_find_credential_by_username(self):
"""
test to check if we can find a credential by username and display information
"""
self.new_credential.save_credential()
test_credential = Credential("gmail","marie","marie123")
test_credential.save_credential()
found_credential = Credential.find_by_username("marie")
self.assertEqual(found_credential.accountName,test_credential.accountName)
def test_copy_password(self):
"""
Test to confirm that we are copying the password from a found credential
"""
self.new_credential.save_credential()
Credential.copy_password("<PASSWORD>")
self.assertEqual(self.new_credential.password,pyperclip.paste())
def test_credential_exists(self):
"""
test to check if we can return a Boolean if we cannot find the contact
"""
self.new_credential.save_credential()
test_credential = Credential("gmail","marie","<PASSWORD>ie123")
test_credential.save_credential()
credential_exists = Credential.credential_exist("marie")
self.assertTrue(credential_exists)
def test_display_credentials(self):
"""
methods that test the display all credntial saved by user
"""
self.assertEqual(Credential.display_credentials(),Credential.credential_list)
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
28884
|
import os
file=open("C:/Users/michael.duran\OneDrive - <NAME>/Documents/Audisoft/Thomas/Inmofianza/TeamQA/SeleniumInmofianza/src/classes/datos.txt","w")
file.write("Primera línea" + os.linesep)
file.write("Segunda línea")
file.close()
|
StarcoderdataPython
|
1717666
|
<filename>Lectures/tex/codes/lecture20.py
import numpy
import matplotlib
from matplotlib import pyplot
from scipy.optimize import fsolve
matplotlib.rcParams.update({'font.size':18, 'figure.figsize':(10,6)})
def relax_dirichlet(p, q, f, interval, bcs, N):
x, dx = numpy.linspace(interval[0], interval[1], N+2, retstep=True)
x_interior = x[1:-1]
A = numpy.zeros((N,N))
F = numpy.zeros((N,))
y = numpy.zeros((N+2,))
for i in range(N):
A[i,i] = dx**2 * q(x_interior[i]) - 2
if i>0:
A[i,i-1] = 1 - dx/2 * p(x_interior[i])
if i<N-1:
A[i,i+1] = 1 + dx/2 * p(x_interior[i])
F[i] = dx**2 * f(x_interior[i])
F[0] = F[0] - bcs[0] * (1 - dx/2 * p(x_interior[0]))
F[-1] = F[-1] - bcs[-1] * (1 + dx/2 * p(x_interior[-1]))
y_interior = numpy.linalg.solve(A, F)
y[0] = bcs[0]
y[1:-1] = y_interior
y[-1] = bcs[-1]
return x, y
def relax_blackbox(f, bcs, N):
x, dx = numpy.linspace(0, 1, N+2, retstep=True)
def residual(y):
y[0] = bcs[0]
y[-1] = bcs[-1]
dy = (y[2:] - y[:-2]) / (2*dx)
res = numpy.zeros_like(y)
res[1:-1] = y[:-2] + y[2:] - 2*y[1:-1] - dx**2 * f(x[1:-1], y[1:-1], dy)
return res
y_initial = numpy.zeros_like(x)
y = fsolve(residual, y_initial)
return x, y
def relax_newton(f, dfdy, dfddy, bcs, N):
x, dx = numpy.linspace(0, 1, N+2, retstep=True)
y = numpy.zeros_like(x)
y_old = numpy.ones_like(x)
step = 0
while numpy.linalg.norm(y-y_old) > 1e-10 and step < 100:
y_old = y.copy()
step = step + 1
y[0] = bcs[0]
y[-1] = bcs[-1]
x_interior = x[1:-1]
y_interior = y[1:-1]
dy = (y[2:] - y[:-2]) / (2*dx)
residual = y[:-2] + y[2:] - 2*y[1:-1] - dx**2 * f(x[1:-1], y[1:-1], dy)
J = numpy.zeros((N,N))
for i in range(N):
J[i,i] = -2 - dx**2*dfdy(x_interior[i], y_interior[i], dy[i])
if i>0:
J[i,i-1] = 1 + dx/2*dfddy(x_interior[i], y_interior[i], dy[i])
if i<N-1:
J[i,i+1] = 1 - dx/2*dfddy(x_interior[i], y_interior[i], dy[i])
y_new_interior = y_interior + numpy.linalg.solve(J, -residual)
y[1:-1] = y_new_interior
return x, y
if __name__=="__main__":
def p(x):
return numpy.ones_like(x)
def q(x):
return numpy.zeros_like(x)
def f(x):
return -numpy.ones_like(x)
x_exact = numpy.linspace(0, 1, 1000)
x, y = relax_dirichlet(p, q, f, [0, 1], [0, 1], 5)
pyplot.plot(x, y, 'kx', mew=2)
pyplot.plot(x_exact, 2*numpy.exp(1)/(numpy.exp(1)-1)*(1-numpy.exp(-x_exact))-x_exact)
pyplot.xlabel(r"$x$")
pyplot.show()
pyplot.plot(x, y-(2*numpy.exp(1)/(numpy.exp(1)-1)*(1-numpy.exp(-x))-x))
pyplot.xlabel(r"$x$")
pyplot.show()
def f_nonlinear(x, y, dy):
return -1/(1+y**2)
def dfdy_nonlinear(x, y, dy):
return 2*y/(1+y**2)**2
def dfddy_nonlinear(x, y, dy):
return numpy.zeros_like(x)
x, y = relax_blackbox(f_nonlinear, [0, 0], 50)
pyplot.plot(x, y, 'k--')
pyplot.show()
x, y = relax_newton(f_nonlinear, dfdy_nonlinear, dfddy_nonlinear,
[0, 0], 50)
pyplot.plot(x, y, 'k--')
pyplot.show()
|
StarcoderdataPython
|
1618909
|
from django.shortcuts import render_to_response
from django.template import RequestContext
# Create your views here.
def index(request,):
return render_to_response('blogs/index.html', context_instance=RequestContext(request))
|
StarcoderdataPython
|
33984
|
# -*- coding: utf-8 -*-
"""
Microsoft-Windows-UAC-FileVirtualization
GUID : c02afc2b-e24e-4449-ad76-bcc2c2575ead
"""
from construct import Int8sl, Int8ul, Int16ul, Int16sl, Int32sl, Int32ul, Int64sl, Int64ul, Bytes, Double, Float32l, Struct
from etl.utils import WString, CString, SystemTime, Guid
from etl.dtyp import Sid
from etl.parsers.etw.core import Etw, declare, guid
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2000_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2001_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2002_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2003_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2004_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2005, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2005_0(Etw):
pattern = Struct(
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2006, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2006_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2007, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2007_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2008, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2008_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2009, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2009_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2010, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2010_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2011, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2011_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2012, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2012_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2013, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2013_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2014, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2014_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2015, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2015_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2016, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2016_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2017, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2017_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2018, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2018_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=2019, version=0)
class Microsoft_Windows_UAC_FileVirtualization_2019_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"Error" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4001, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4001_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"TargetFileNameLength" / Int16ul,
"TargetFileNameBuffer" / Bytes(lambda this: this.TargetFileNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=4002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_4002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5000, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5000_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul,
"IrpMajorFunction" / Int8ul,
"Exclusions" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5002, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5002_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength),
"CreateOptions" / Int32ul,
"DesiredAccess" / Int32ul
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5003, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5003_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
@declare(guid=guid("c02afc2b-e24e-4449-ad76-bcc2c2575ead"), event_id=5004, version=0)
class Microsoft_Windows_UAC_FileVirtualization_5004_0(Etw):
pattern = Struct(
"Flags" / Int32ul,
"SidLength" / Int32ul,
"Sid" / Bytes(lambda this: this.SidLength),
"FileNameLength" / Int16ul,
"FileNameBuffer" / Bytes(lambda this: this.FileNameLength),
"ProcessImageNameLength" / Int16ul,
"ProcessImageNameBuffer" / Bytes(lambda this: this.ProcessImageNameLength)
)
|
StarcoderdataPython
|
1740898
|
import matplotlib.pyplot as plt
def pie_chart():
numbers = [40, 35, 15, 10]
labels = ['Python', 'Ruby', 'C++', 'PHP']
fig1, ax1 = plt.subplots()
ax1.pie(numbers, labels=labels)
plt.show()
if __name__ == '__main__':
pie_chart()
|
StarcoderdataPython
|
52675
|
# tests/test_provider_MissionCriticalCloud_cosmic.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:14:40 UTC)
def test_provider_import():
import terrascript.provider.MissionCriticalCloud.cosmic
def test_resource_import():
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_affinity_group
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_disk
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_instance
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_ipaddress
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_loadbalancer_rule,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network_acl
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_network_acl_rule
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_nic
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_port_forward
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_private_gateway
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_secondary_ipaddress,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_ssh_keypair
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_static_nat
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_static_route
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_template
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpc
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpn_connection
from terrascript.resource.MissionCriticalCloud.cosmic import (
cosmic_vpn_customer_gateway,
)
from terrascript.resource.MissionCriticalCloud.cosmic import cosmic_vpn_gateway
def test_datasource_import():
from terrascript.data.MissionCriticalCloud.cosmic import cosmic_network_acl
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.MissionCriticalCloud.cosmic
#
# t = terrascript.provider.MissionCriticalCloud.cosmic.cosmic()
# s = str(t)
#
# assert 'https://github.com/MissionCriticalCloud/terraform-provider-cosmic' in s
# assert '0.5.0' in s
|
StarcoderdataPython
|
4803856
|
<filename>midi_transformer/modules/emb.py
import numpy as np
import torch
from torch import nn
import math
class Embeddings(nn.Module):
def __init__(self, n_token, d_emb):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(n_token, d_emb)
self.d_emb = d_emb
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_emb)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, max_len=512):
super(PositionalEncoding, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
return self.pe[:, :x.size(1), :]
class CPEmbedding(nn.Module):
def __init__(self, config):
super().__init__()
emb_layers = []
for k in config['attributes']:
emb_layers += [[k, Embeddings(n_token=config['n_tokens'][k], d_emb=config['emb_dims'][k])]]
self.emb_layers = nn.ModuleDict(emb_layers)
sum_emb_dims = sum(config['emb_dims'].values())
self.proj = nn.Linear(sum_emb_dims, config['d_model']) if config['d_model'] != sum_emb_dims else None
self.pos_emb = PositionalEncoding(d_model=config['d_model'], max_len=config['max_len'])
self.dropout = nn.Dropout(p=config['dropout'])
def forward(self, x):
embs = []
for i, k in enumerate(self.emb_layers):
embs += [self.emb_layers[k](x[..., i])]
embs = torch.cat(embs, dim=-1)
if self.proj is not None:
embs = self.proj(embs)
pos_emb = self.pos_emb(embs)
return self.dropout(embs + pos_emb)
|
StarcoderdataPython
|
163519
|
<gh_stars>1-10
import re
import os
import pandas as pd
import csv
import shutil
from circmimi.reference import gendb
from circmimi.reference.species import species_list
from circmimi.seq import parse_fasta
from circmimi.reference import resource as rs
from circmimi.reference.utils import cwd
from circmimi.reference.mirbase import MatureMiRNAUpdater
class RefFile:
def __init__(self, src_name):
self.src_name = src_name
self.filename = None
def generate(self):
return self.filename
class AnnoRef(RefFile):
def generate(self):
self.filename = re.sub(r'\.gtf(?:\.gz)?$', '.db', self.src_name)
gendb.generate(self.src_name, self.filename)
return self.filename
class MirRef(RefFile):
def generate(self, species_key):
self.filename = re.sub(r'\.fa$', '.{}.fa'.format(species_key), self.src_name)
with open(self.src_name) as fa_in:
fa_txt = fa_in.read()
with open(self.filename, 'w') as out:
for fa_id, fa_seq in parse_fasta(fa_txt):
m = re.search(r'^{}-'.format(species_key), fa_id)
if m:
print(">{}\n{}".format(fa_id, fa_seq), file=out)
return self.filename
class MiRTarBaseRef(RefFile):
def generate(self, species):
df = pd.read_excel(self.src_name, engine='openpyxl')
formatted_data = df[
df['Species (miRNA)'] == species.fullname
][[
'miRNA',
'Target Gene',
'References (PMID)'
]].groupby([
'miRNA',
'Target Gene'
]).agg(
'count'
).reset_index(
).rename(
{
'miRNA': 'mirna',
'Target Gene': 'target_gene',
'References (PMID)': 'ref_count'
},
axis=1
)
self.filename = "miRTarBase.{}.tsv".format(species.key)
formatted_data.to_csv(
self.filename,
sep='\t',
index=False,
quoting=csv.QUOTE_NONE
)
return self.filename
class EnsemblTranscriptsFile(RefFile):
def generate(self, biotype):
self.filename = re.sub(r'\.fa$', '.{}.fa'.format(biotype), self.src_name)
with open(self.src_name) as fa_in:
fa_txt = fa_in.read()
with open(self.filename, 'w') as out:
for fa_id, fa_seq in parse_fasta(fa_txt):
m = re.search(r'{}'.format(biotype), fa_id)
if m:
print(">{}\n{}".format(fa_id, fa_seq), file=out)
return self.filename
class RepChrM(RefFile):
def generate(self):
file_path = os.path.dirname(self.src_name)
self.filename = os.path.join(file_path, "RepChrM.fa")
with open(self.src_name) as fa_in:
fa_txt = fa_in.read()
with open(self.filename, 'w') as out:
for fa_id, fa_seq in parse_fasta(fa_txt):
m = re.search(r'^(?:chr)?MT?', fa_id)
if m:
print(">{}\n{}\n{}".format(fa_id, fa_seq, fa_seq),
file=out)
return self.filename
class OtherTranscripts:
def __init__(self, src_type, pc_src, lncRNA_src, repChrM_src):
self.src_type = src_type
self.pc_src = pc_src
self.lncRNA_src = lncRNA_src
self.repChrM_src = repChrM_src
self.filename = 'others.fa'
def generate(self):
if self.src_type == 'ensembl':
pc_ref = EnsemblTranscriptsFile(self.pc_src.filename)
pc_ref.generate('protein_coding')
lncRNA_ref = EnsemblTranscriptsFile(self.lncRNA_src.filename)
lncRNA_ref.generate('lncRNA')
elif self.src_type == 'gencode':
pc_ref = self.pc_src
lncRNA_ref = self.lncRNA_src
repChrM_ref = RepChrM(self.repChrM_src.filename)
repChrM_ref.generate()
with open(self.filename, 'wb') as out:
for ref in [pc_ref, lncRNA_ref, repChrM_ref]:
with open(ref.filename, 'rb') as f_in:
shutil.copyfileobj(f_in, out)
return self.filename
class Files:
def __init__(self, files):
self.files = files
def download(self):
for file_ in self.files:
file_.download()
def unzip(self):
for file_ in self.files:
file_.unzip()
def __getitem__(self, n):
return self.files[n]
class EnsemblFiles(Files):
source = "ensembl"
class GencodeFiles(Files):
source = "gencode"
class MirTargetRef:
def __init__(self, ref_files, ref_names, species):
self.ref_files = ref_files
self.ref_names = ref_names
self.species = species
self.filename = "mir_target.{}.tsv".format(self.species.key)
self.remove_unavailable_files()
def generate(self):
merged_df = pd.DataFrame([], columns=['mirna', 'target_gene'])
for ref_file, ref_name in zip(self.ref_files, self.ref_names):
ref_df = pd.read_csv(ref_file, sep='\t', dtype='object')
ref_df = ref_df.pipe(self.add_ref_name, ref_name).pipe(self.add_ref_col, ref_name)
merged_df = merged_df.merge(ref_df, on=['mirna', 'target_gene'], how="outer")
for ref_name in self.ref_names:
merged_df[ref_name].fillna('0', inplace=True)
# promote the refname columns
col_names = list(merged_df.columns)
col_names = col_names[:2] + self.promote_items(col_names[2:], self.ref_names)
merged_df = merged_df[col_names]
merged_df.to_csv(self.filename, sep='\t', index=False)
@staticmethod
def add_ref_name(ref_df, ref_name, sep='__'):
data_cols = ref_df.columns[2:]
new_col_names = dict(map(lambda col: (col, "{}{}{}".format(ref_name, sep, col)), data_cols))
return ref_df.rename(new_col_names, axis=1)
@staticmethod
def add_ref_col(ref_df, ref_name):
return ref_df.assign(**{ref_name: '1'})
@staticmethod
def promote_items(all_items, to_be_promoted):
result_items = all_items.copy()
for item in to_be_promoted[::-1]:
if item in result_items:
item_idx = result_items.index(item)
result_items = [result_items[item_idx]] + result_items[:item_idx] + result_items[(item_idx + 1):]
return result_items
def remove_unavailable_files(self):
ref_files = []
ref_names = []
for ref_file, ref_name in zip(self.ref_files, self.ref_names):
if ref_file:
ref_files.append(ref_file)
ref_names.append(ref_name)
self.ref_files = ref_files
self.ref_names = ref_names
def generate(species, source, version, ref_dir):
with cwd(ref_dir):
species = species_list[species]
if source == "ensembl":
anno_file = rs.EnsemblAnnotation(species.name, version)
genome_file = rs.EnsemblGenome(species.name, version)
other_transcripts_files = EnsemblFiles(
[
rs.EnsemblCDna(species.name, version),
rs.EnsemblNCRna(species.name, version)
]
)
elif source == "gencode":
if species.key == 'hsa':
species_key = 'human'
elif species.key == 'mmu':
species_key = 'mouse'
anno_file = rs.GencodeAnnotation(species_key, version)
genome_file = rs.GencodeGenome(species_key, version)
other_transcripts_files = GencodeFiles(
[
rs.GencodeProteinCodingTranscripts(species_key, version),
rs.GencodeLongNonCodingTranscripts(species_key, version)
]
)
elif source.startswith("ensembl_"):
field = source.split('_')[1]
anno_file = rs.EnsemblSisterAnnotation(field, species.name, version)
genome_file = rs.EnsemblSisterGenome(field, species.name, version)
other_transcripts_files = EnsemblFiles(
[
rs.EnsemblSisterCDna(field, species.name, version),
rs.EnsemblSisterNCRna(field, species.name, version)
]
)
else:
raise rs.SourceNotSupportError(source)
mir_seq_file = rs.MiRBaseMiRNA(None, "22")
mir_target_files = Files(
[
rs.MiRTarBaseResource(None, "7.0"),
rs.MiRDBData(species.key, "6.0"),
rs.EncoriMiRNATargetData(species.key)
]
)
ENCORI_RBP_files = Files(
[
rs.EncoriRBPData(species.key, source, version, only_AGO=True),
# rs.EncoriRBPData(species.key, source, version),
# rs.EncoriRBPTargetData(species.key)
]
)
# download
anno_file.download()
genome_file.download()
mir_seq_file.download()
mir_target_files.download()
other_transcripts_files.download()
ENCORI_RBP_files.download()
# unzip
genome_file.unzip()
mir_seq_file.unzip()
mir_target_files.unzip()
other_transcripts_files.unzip()
ENCORI_RBP_files.unzip()
# genref
anno_ref = AnnoRef(anno_file.filename)
anno_ref.generate()
mir_ref = MirRef(mir_seq_file.filename)
mir_ref.generate(species.key)
# miRTarBase
miRTarBase_ref = MiRTarBaseRef(mir_target_files[0].filename)
miRTarBase_ref.generate(species)
# miRNAs updater
updater = MatureMiRNAUpdater("21", "22", species.key)
updater.create()
updated_miRTarBase_ref_filename = "miRTarBase.{}.miRBase_v22.tsv".format(species.key)
updater.update_file(
miRTarBase_ref.filename,
updated_miRTarBase_ref_filename,
col_key=0,
inplace=True,
remove_deleted=True
)
mir_target_refs = [
updated_miRTarBase_ref_filename,
mir_target_files[1].filename,
mir_target_files[2].filename
]
mir_target_ref = MirTargetRef(
mir_target_refs,
[
"miRTarBase",
"miRDB",
"ENCORI"
],
species
)
mir_target_ref.generate()
others_ref = OtherTranscripts(
other_transcripts_files.source,
*other_transcripts_files.files,
genome_file
)
others_ref.generate()
# config
info = {
'species': species.key,
'source': source,
'version': anno_file.version
}
ref_files = {
'anno_db': anno_ref.filename,
'ref_file': genome_file.filename,
'mir_ref': mir_ref.filename,
'mir_target': mir_target_ref.filename,
'other_transcripts': others_ref.filename,
'AGO_data': ENCORI_RBP_files[0].filename,
# 'RBP_data': ENCORI_RBP_files[1].filename,
# 'RBP_target': ENCORI_RBP_files[2].filename
}
return info, ref_files
|
StarcoderdataPython
|
65868
|
from __future__ import annotations
from typing import Optional
from .node import Node
from .terms import Field, Star
from .utils import copy_if_immutable, ignore_copy
class Selectable(Node):
def __init__(self, alias: Optional[str]) -> None:
self.alias = alias
def as_(self, alias: str) -> Selectable:
with copy_if_immutable(self) as this:
this.alias = alias
return this
def field(self, name: str) -> Field:
return Field(name, table=self)
@property
def star(self) -> Star:
return Star(self)
@ignore_copy
def __getattr__(self, name: str) -> Field:
return self.field(name)
@ignore_copy
def __getitem__(self, name: str) -> Field:
return self.field(name)
def get_table_name(self) -> Optional[str]:
return self.alias
|
StarcoderdataPython
|
3358630
|
import numpy as np
data = [1, 2, 3]
arr = np.array(data)
data2 = arr * 10
print(data2)
|
StarcoderdataPython
|
4815497
|
<reponame>KXXH/SCU_JWC_assist
TEST_URL = "http://zhjw.scu.edu.cn/main/showPyfaInfo"
|
StarcoderdataPython
|
3201533
|
<filename>macro/merge-hdf5.py
# -*- coding: utf-8 -*-
#! /usr/bin/env python3
import os
import argparse
import warnings
from itertools import chain
import glob
from collections import defaultdict
import numpy as np
import h5py
def _check_pathes(pathes, strict=True):
filtered = []
for path in pathes:
if h5py.is_hdf5(path):
filtered.append(path)
else:
if strict:
raise ValueError("{} is not an HDF5 file".format(path))
else:
warnings.warn("Dropping {} since it isn't HDF5 file".format(path))
return filtered
class Glob_HDF5(argparse.Action):
def __call__(self, parser, namespace, values, option_string):
pathes = []
for path in values:
pathes.extend(glob.glob(path))
if not len(pathes):
raise ValueError("The glob expansion result is empty")
filtered = _check_pathes(pathes, strict=False)
final = list(set(filtered))
setattr(namespace, self.dest, final)
def get_all_keys(obj, keys=None):
if keys == None:
keys=[]
keys.append(obj.name)
if isinstance(obj, h5py.Group):
for item in obj:
if isinstance(obj[item], h5py.Group):
get_all_keys(obj[item], keys)
else: # isinstance(obj[item], h5py.Dataset):
keys.append(obj[item].name)
return keys
def _depth(hdf_key):
return hdf_key.count('/')
class Merger():
'''Simple CLI utility to merge HDF5 files with chi-square maps
after likelihood profiling in different segments of the grid
produced by scan module. Top-level attributes are assumed to be identical for all
merged files'''
def __init__(self, opts):
with h5py.File(opts.output, 'w') as f:
for path in opts.input:
input_file = h5py.File(path, 'r')
for key in input_file:
try:
# easy case: recursively copy entire group
input_file.copy(key, f)
except ValueError as e:
# hard case: the group got splitted between files and
# simply copying won't work, need to identify what
# groups already in the output and update it and
# then copy others
keys_in_input = set(get_all_keys(input_file[key]))
keys_in_ouput = set(get_all_keys(f[key]))
missing_keys = list(keys_in_input.difference(keys_in_ouput))
# sort keys so groups come before datasets
missing_keys.sort(key=_depth)
# make sure each missing group is created, attributes
# and datasets are copied
for missed_key in missing_keys:
input_object = input_file[missed_key]
if isinstance(input_object, h5py.Group):
f.require_group(missed_key)
for name, val in input_object.attrs.items():
f[missed_key].attrs.create(name, val)
if isinstance(input_object, h5py.Dataset):
f.create_dataset(missed_key, data=input_object[:])
for attr, value in input_file['/'].attrs.items():
f['/'].attrs[attr] = value
input_file.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='''Simple tool to merge HDF5 after
likelihood profiling by scan module''')
parser.add_argument('input', nargs='*', type=os.path.abspath,
action=Glob_HDF5, help='List of HDF5 files to merge with possible globbing, duplicates are removed')
parser.add_argument('--output', type=os.path.abspath, required=True,
help='Path to merged output file')
opts = parser.parse_args()
merger = Merger(opts)
|
StarcoderdataPython
|
105106
|
<reponame>pauldicarlo/PySailocus<gh_stars>0
'''
@author: <NAME>
@copyright: 2018 <NAME>
@license: MIT
@contact: https://github.com/sailocus/PySailocus
'''
from pysailocus.geometry.Point import Point
from pysailocus.geometry.Line import newPointOnLine, getSlope
#############################################################################################
# For pointA, return a point weighed perpendicular to the linesegement as defined by pointb
#############################################################################################
def getPerpendicularLineSegmentPoint(pointA, pointB, weight):
print("__________________________________________________")
theSlope = getSlope(pointA, pointB)
#y_offset = -1* COE.COEMath.getOffsetForY(center_of_effort_1, center_of_effort_2)
perpendicularSlope = float(-1/theSlope)
x = pointA.getX()+weight
newPoint = newPointOnLine( perpendicularSlope, x, pointA )
if True:
print("==>original slope=" + str(theSlope))
#print("==>perpendicular slope=" + str(perpendicularSlope))
#print("==>calculated y=" + str(y))
print(str(pointA))
print(str(pointB))
print("x=" + str(x))
#self.drawLine(center_of_effort_1, Point(center_of_effort_1.getX()+24, y), fill='Orange')
#newPoint = Point(x, y) # create new point to create perpendicular line of weight based on surface area
return newPoint
################################################################
#
################################################################
class LineSegment(object):
################################################################
def __init__(self, point_a, point_b):
if not isinstance(point_a, Point):
raise ValueError("point_a is not a point: " + str(point_a))
if not isinstance(point_b, Point):
raise ValueError("point_b is not a point: " + str(point_b))
self.point_a = point_a
self.point_b = point_b
self.validate()
################################################################
def __str__(self):
return "LineSegement=[" + str(self.point_a) + ", " + str(self.point_b) + "]"
################################################################
def validate(self):
if self.point_a is None or self.point_b is None:
raise ValueError("LineSegement: both points must be non-null. " + str(self))
################################################################
def getMidpoint(self):
self.validate()
return Point(int((self.point_a.x+self.point_b.x)/2), int((self.point_a.y+self.point_b.y)/2))
################################################################
# M A I N
################################################################
if __name__ == "__main__":
print(str(LineSegment(Point(0,2), Point(0,4)).getMidpoint().isEqual(Point(0,3))))
#assert( False == LineSegment(Point(0,2), Point(0,4)).getMidpoint().isEqual(Point(0,3))), print("Yahoo")
|
StarcoderdataPython
|
3205196
|
<filename>Desafio063.py<gh_stars>0
#Escreva um pgm que leia um numero n inteiro qualquer e mostre na tela os n primeiros elementos de uma sequencia de Fibonacci. Ex: 0->1->1->2->3->5->8
#Sempre começa com 0 e 1 depois vem 1 2 3 5 8
print('{:@^40}'.format(' Sequência de Fibonacci '))
n = int(input('Quantos valores da sequência você quer ver? '))
c = ant = soma = 0
atual = 1
while c < n:
print(soma, end='')
c += 1
print(' - ' if c < n else '', end='')
if soma == 0:
soma += 1
else:
soma = ant + atual
ant = atual
atual = soma
|
StarcoderdataPython
|
1728487
|
data = zip('1234', [1, 2, 3, 4, 5, 6])
print(data)
# 在转换为列表时,使用了zip对象中的全部元素,zip对象中不再包含任何内容
print(list(data))
# 如果需要再次访问其中的元素,必须重新创建zip对象
data = zip('1234', [1, 2, 3, 4, 5, 6])
print(tuple(data))
data = zip('1234', [1, 2, 3, 4, 5, 6])
# zip对象是可迭代的,可以使用for循环逐个遍历和访问其中的元素
for item in data:
print(item)
|
StarcoderdataPython
|
2217
|
<gh_stars>0
#!/usr/bin/env python3
"""
cidr_enum.py is a very simple tool to help enumerate IP ranges when being used with other tools
"""
import argparse
import netaddr
def enum_ranges(ranges, do_sort):
cidrs=[]
for r in ranges:
try:
cidrs.append(netaddr.IPNetwork(r))
except Exception as e:
print("Error:", e)
return
if(do_sort):
cidrs = sorted(cidrs)
#print(cidrs)
for cidr in cidrs:
for ip in cidr:
print(ip)
def main():
parser = argparse.ArgumentParser(description='Enumarate CIDR ranges')
parser.add_argument('ranges', metavar='range', type=str, nargs='*',
help='List of CIDR ranges to enumerate')
parser.add_argument('-f', '--files', metavar='file', type=str, nargs='*',
help='List of files to retrieve CIDR ranges to enumerate')
parser.add_argument('-s', '--sort', action='store_true', help='Sort CIDR ranges')
args = parser.parse_args()
if args.files:
files = list(args.files)
else:
files = []
ranges = list(args.ranges)
if not (files or ranges):
print ("Please give a list or ranges or input files")
parser.print_help()
return
for f in files:
with open(f, "r") as fd:
for l in fd.readlines():
ranges.append(l.strip())
enum_ranges(ranges, do_sort=args.sort)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3395607
|
from django.conf.urls import patterns, url
import views
urlpatterns = patterns('',
url(r'^(?P<contest_id>[0-9]+)/$', views.contest, name='contest'),
url(r'^(?P<contest_id>[0-9]+)/feedback$', views.feedback, name='feedback'),
url(r'^(?P<contest_id>[0-9]+)/signup$', views.signup, name='signup'),
url(r'^(?P<contest_id>[0-9]+)/clarification$', views.clarification, name='clarification'),
url(r'^(?P<contest_id>[0-9]+)/ISeaTeLContestSite/rank/$', views.rank),
url(r'^(?P<contest_id>[0-9]+)/ISeaTeLContestSite/competitor/$', views.competitor),
)
|
StarcoderdataPython
|
1716774
|
<reponame>mlcommons/peoples-speech
# Adapter from second half of this answer: https://stackoverflow.com/a/44084038
class SparkListener:
def onApplicationEnd(self, applicationEnd):
pass
def onApplicationStart(self, applicationStart):
pass
def onBlockManagerRemoved(self, blockManagerRemoved):
pass
def onBlockUpdated(self, blockUpdated):
pass
def onEnvironmentUpdate(self, environmentUpdate):
pass
def onExecutorAdded(self, executorAdded):
pass
def onExecutorMetricsUpdate(self, executorMetricsUpdate):
pass
def onExecutorRemoved(self, executorRemoved):
pass
def onJobEnd(self, jobEnd):
pass
def onJobStart(self, jobStart):
pass
def onOtherEvent(self, event):
pass
def onStageCompleted(self, stageCompleted):
pass
def onStageSubmitted(self, stageSubmitted):
pass
def onTaskEnd(self, taskEnd):
pass
def onTaskGettingResult(self, taskGettingResult):
pass
def onTaskStart(self, taskStart):
pass
def onUnpersistRDD(self, unpersistRDD):
pass
def onBlockManagerAdded(self, _):
pass
class Java:
implements = ["org.apache.spark.scheduler.SparkListenerInterface"]
class WriteTaskEndListener(SparkListener):
def __init__(self):
self._value = 0
def onJobStart(self, _jobEnd):
self._value = 0
def onTaskEnd(self, taskEnd):
self._value += taskEnd.taskMetrics().outputMetrics().recordsWritten()
@property
def value(self):
return self._value
|
StarcoderdataPython
|
3359175
|
<reponame>normthenord/Discord_Bot
import discord
import random
import os
import dotenv
from dotenv.compat import to_env
dotenv.load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
GUILD = '''NormTheNord's Bot Server'''
client = discord.Client()
@client.event
async def on_ready():
for guild in client.guilds:
print(guild.name)
print(f'{client.user} has connected to Discord!')
@client.event
async def on_message(message):
print("Receiving message")
if message.author == client.user:
print('Message from me')
return
if message.content == "!roll":
await message.channel.send(f'Rolling between 1 and 10: {random.randint(1,10)}')
return
split = message.content.split(" ")
if "NormTheBot" in message.content:
await message.channel.send(f"Hi, {message.author.name}! So Nice to meet you on this grand day!")
if split[0] == "!roll":
if split[1].isdigit():
await message.channel.send(f'Rolling between 1 and {int(split[1])}: {random.randint(1,int(split[1]))}')
client.run(TOKEN)
|
StarcoderdataPython
|
127008
|
<gh_stars>10-100
from django.core.management.base import BaseCommand
from atlas import factories
class Command(BaseCommand):
help = "Load mock data"
def handle(self, *args, **options):
raise NotImplementedError("TODO")
ceo = factories.ProfileFactory.create(ceo=True)
print(f"Created {ceo}")
cmo = factories.ProfileFactory.create(cmo=True, reports_to=ceo.user)
print(f"Created {cmo}")
cto = factories.ProfileFactory.create(cto=True, reports_to=ceo.user)
print(f"Created {cto}")
cpo = factories.ProfileFactory.create(cpo=True, reports_to=ceo.user)
print(f"Created {cpo}")
cfo = factories.ProfileFactory.create(cfo=True, reports_to=ceo.user)
print(f"Created {cfo}")
cro = factories.ProfileFactory.create(cfo=True, reports_to=ceo.user)
print(f"Created {cro}")
factories.ProfileFactory.create(reports_to=cmo.user, marketing=True)
|
StarcoderdataPython
|
158768
|
<filename>tests/utils.py
import torch
from time import time
# convert dense matrix with explicit zeros to sparse matrix
def dense_to_sparse(w, mask, block):
Z = w.size(0)
ret = torch.empty((Z, mask.sum(), block, block), dtype=w.dtype, device=w.device)
nnz = mask.nonzero()
h, i, j = nnz[:, 0], nnz[:, 1], nnz[:, 2]
for zz in range(Z):
for idx, (hh, ii, jj) in enumerate(zip(h, i, j)):
ret[zz, idx, :, :] = w[zz, hh, ii*block: (ii+1)*block, jj*block: (jj+1)*block]
return ret
# convert sparse matrix to dense matrix with explicit zeros
def sparse_to_dense(w, mask, block, zero = 0):
maskedw = w.clone()
for bz, wz in enumerate(range(0, w.size(0))):
for bh, wh in enumerate(range(0, w.size(1))):
for bi, wi in enumerate(range(0, w.size(2), block)):
for bj, wj in enumerate(range(0, w.size(3), block)):
if mask[bh, bi, bj] == 0:
maskedw[wz, wh, wi : wi+block, wj:wj+block] = zero
#maskedw[wz, wh, wi : wi+block, wj : wj+block] *= mask[bh, bi, bj]
return maskedw
def relerr(x, y, eps=1e-7):
x = x.data.clone()
y = y.data.clone()
if x.shape != y.shape:
return 1
diff = x - y + eps
ewmax = torch.max(x.abs(), y.abs())
return (diff.abs() / (ewmax + eps)).max().item()
def mempad(x, shape, strides, pad_size=1024*1024):
pad = float('nan') * torch.ones(pad_size, device=x.device, dtype=x.dtype)
chunk = torch.cat((pad, x.flatten(), pad))
ret = chunk[pad_size:-pad_size].as_strided(shape, strides)
return ret
def mask_weights(w, layout, block):
repeat_k = block*torch.ones(layout.shape[0], dtype=torch.int64)
repeat_c = block*torch.ones(layout.shape[1], dtype=torch.int64)
mask = layout.repeat_interleave(repeat_k, dim=0)\
.repeat_interleave(repeat_c, dim=1).cuda().type(w.dtype)
return w * mask
def bench(fn, repeat, hook = None):
torch.cuda.synchronize()
# estimate hook time
hook_time = 0
if hook is not None:
start = time()
for i in range(repeat):
hook()
torch.cuda.synchronize()
end = time()
hook_time = end - start
# run bench
fn()
torch.cuda.synchronize()
start = time()
for i in range(repeat):
if hook is not None:
hook()
fn()
torch.cuda.synchronize()
end = time()
return (end - start - hook_time) / repeat
def compress_weights(w, layout, block):
blocks = torch.empty((layout.sum(), block, block), dtype=w.dtype, device=w.device)
current = 0
for k in range(layout.shape[0]):
for r in range(layout.shape[2]):
for s in range(layout.shape[3]):
for c in range(layout.shape[1]):
if layout[k, c, r, s] == 0:
continue
blocks[current, :] = w[k*block : (k+1)*block,
c*block : (c+1)*block,
r, s]
current += 1
return blocks
def allclose(x, y):
assert x.dtype == y.dtype
rtol, atol = {torch.float32: (1e-4, 1e-5),
torch.float16: (1e-2, 1e-3)}[x.dtype]
return torch.allclose(x, y, rtol=rtol, atol=atol)
def make_layout(rho, shape):
probs = torch.Tensor([rho, 1-rho])
generator = torch.distributions.categorical.Categorical(probs)
layout = generator.sample(shape)
return layout
def nbytes(x):
return x.nelement() * x.element_size()
def prettyprint(x, y, L, x_name = ' '):
L = [x_name] + list(map(str, L))
pad = max([len(x) for x in L]) + 2
frmt = (f'{{:>{pad}}}')*len(L)
print(frmt.format(*L))
for i in range(y.shape[0]):
row = [x[i]] + y[i,:].tolist()
frmt = f'{{:>{pad}}}' + f'{{:{pad}.2f}}'*(len(L)-1)
print(frmt.format(*row))
|
StarcoderdataPython
|
3348805
|
<reponame>mananeau/GPT2
import glob
import os
import time
from multiprocessing import Pool
import ftfy
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import encoder
base_dir = "gs://pre-training-bucket/german_gpt2/pretraining_data/raw/shards_wiki" # Path to where your .txt files are located
files_per = 175000 # 175000 ~ 200-300MB
name = "wiki" # Name of output files will be name_i.tfrecords where i is the number of the file
output_dir = "gs://pre-training-bucket/german_gpt2/pretraining_data/wiki_tfrecords"
log_dir = "logs"
files = glob.glob(os.path.join(base_dir, "**/*.txt"))
processes = 64 # Number of encoding processes to run
encoder_path = "gs://openwebtext/stuff/encoder" # Path to encoder files
minimum_size = 128
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
# Divides a list into chunks
def chunks(l, n):
out = []
for i in range(0, len(l), n):
out.append(l[i:i + n])
return out
if not os.path.exists(log_dir):
os.mkdir(log_dir)
enc = encoder.get_encoder(encoder_path)
file_chunks = chunks(files, files_per)
print("Got {} files, divided into {} chunks.".format(str(len(files)), str(len(file_chunks))))
def create_file(args):
i, chunk = args
s = name + "_" + str(i) + ".tfrecords"
if os.path.exists(os.path.join(log_dir, s)): # Hack-y, if file of same name is in log dir, sign that the file is complete, so skip
return
if os.path.exists(os.path.join(output_dir, s)): # Unfinished file, remove
os.remove(os.path.join(output_dir, s))
with tf.python_io.TFRecordWriter(os.path.join(output_dir, s)) as writer:
good_files = 0
current = None
for fn in chunk:
with tf.gfile.Open(fn, "r") as f:
d = f.read()
d = ftfy.fix_text(d, normalization='NFKC')
data = np.array(enc.encode(d), np.int32)
if data.shape[0] < minimum_size or (data == 0).all(): # If text is shorter than 25 tokens, or all tokens are 0, ignore
continue
hash = fn.split("/")[-1].split(".")[0]
feature = {
"hash": _bytes_feature(hash.encode()),
"text": _int64_feature(data)
}
tf_example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(tf_example.SerializeToString())
good_files += 1
# File complete
with open(os.path.join(log_dir, s), "w") as f: # Create mark that file is finished in logdir
f.write("{} / {}".format(str(good_files), str(len(chunk))))
with open(os.path.join(log_dir, "good_files.log"), "a") as f:
f.write("{}: {} / {}".format(str(i), str(good_files), str(len(chunk))))
return good_files
start = time.time()
pool = Pool(processes=processes)
good = 0
for g in tqdm(pool.imap(create_file, enumerate(file_chunks)), total=len(file_chunks)):
good += g
end = time.time()
print("Done! In {:.2f}s, {} / {} good files.".format(end-start, str(good), str(len(files))))
|
StarcoderdataPython
|
1704087
|
import pygame
from sprites.scenario import Asset
from extras.util import load_image, Image
from sprites.trucker import State
class Obstacle(Asset):
def fetch_image(self):
img = load_image(Image.TRASH)
return pygame.transform.scale(img, (64, 102))
def __init__(self, fy, sy):
Asset.__init__(self, fy, sy)
self.update_image()
def check(self, player):
if self.rect.colliderect(player.rect) and player.is_alive():
selftop = self.rect.centery - self.rect.height / 2
playerbottom = player.rect.centery + player.rect.height / 2
playerbottom -= 5
if playerbottom < selftop:
player.state = State.ALIVE
return
player.die()
|
StarcoderdataPython
|
3328129
|
<gh_stars>1-10
import os
import pandas as pd
from datetime import datetime
def stripTags(string):
if "<" in string and ">" in string and string.find("<") < string.find(">"):
iFirst = string.find("<")
iEnd = string.find(">")
strOut = string[:iFirst] + string[iEnd + 1:]
return stripTags(strOut)
else:
return string
def removeDayTag(strIn):
first2 = strIn[1:3].isdigit()
first3 = strIn[1:4].isdigit()
first4 = strIn[1:5].isdigit()
if first4:
return strIn[3:]
elif first3:
return strIn[2:]
elif first2:
return strIn[3:]
else:
return strIn[2:]
def getDay(strIn):
first1 = strIn[1:2].isdigit()
first2 = strIn[1:3].isdigit()
first3 = strIn[1:4].isdigit()
first4 = strIn[1:5].isdigit()
if first4:
return int(strIn[1:3])
elif first3:
return int(strIn[1:2])
elif first2:
return int(strIn[1:3])
else:
return int(strIn[1:2])
def checkReferenceBranching(strIn):
conditions = strIn.split("or")
dictOut = {}
for i in range(len(conditions)):
iOpenBracket = conditions[i].find("[")
iCloseBracket = conditions[i].find("]")
varField = removeDayTag(conditions[i][iOpenBracket + 1:iCloseBracket])
varAns = conditions[i][iCloseBracket + 1:]
if "=" in varAns:
if "'" in varAns:
iOpenStr = varAns.find("'")
iCloseStr = varAns.rfind("'")
varAnsInt = int(varAns[iOpenStr + 1:iCloseStr])
elif '"' in varAns:
iOpenStr = varAns.find('"')
iCloseStr = varAns.rfind('"')
varAnsInt = int(varAns[iOpenStr + 1:iCloseStr])
dictOut[varField] = varAnsInt
else:
dictOut[varField] = "<>"
return dictOut
def checkSelfBranching(strIn, expDay):
conditions = strIn.split("or")
lstOut = []
for i in range(len(conditions)):
if "=" in conditions[i]:
iOpenBracket = conditions[i].find("[")
iCloseBracket = conditions[i].find("]")
varField = getDay(conditions[i][iOpenBracket + 1:iCloseBracket])
lstOut.append(varField)
else:
lstOut.append(expDay)
return [i for i, x in enumerate(lstOut) if x == expDay]
def normalizeEntry(strIn):
strIn = str(strIn)
strIn = strIn.strip()
return strIn
if __name__ == "__main__":
while True:
usrIn = input("Enter a data dictionary path or 'quit' to quit program: ")
if usrIn == 'quit':
break
else:
try:
baseDF = pd.read_csv(usrIn)
except Exception as e:
print(e)
continue
else:
# Generating file name
currDateTime = datetime.now().strftime("%Y-%m-%d_%I-%M-%S-%p")
absFilepath = os.path.join(os.getcwd(), f"W4_DataDictionary_Check_{currDateTime}.txt")
print(f"Collecting values from {usrIn}")
baseDF = pd.read_csv(usrIn, low_memory=False)
day1 = pd.DataFrame(columns=baseDF.columns)
day2 = pd.DataFrame(columns=baseDF.columns)
day3 = pd.DataFrame(columns=baseDF.columns)
day4 = pd.DataFrame(columns=baseDF.columns)
day5 = pd.DataFrame(columns=baseDF.columns)
day6 = pd.DataFrame(columns=baseDF.columns)
day7 = pd.DataFrame(columns=baseDF.columns)
day8 = pd.DataFrame(columns=baseDF.columns)
day9 = pd.DataFrame(columns=baseDF.columns)
day10 = pd.DataFrame(columns=baseDF.columns)
day11 = pd.DataFrame(columns=baseDF.columns)
day12 = pd.DataFrame(columns=baseDF.columns)
day13 = pd.DataFrame(columns=baseDF.columns)
day14 = pd.DataFrame(columns=baseDF.columns)
for index, row in baseDF.iterrows():
if row["Form Name"] == "day_1_monday_daily_survey":
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day1 = day1.append(row)
if "day_2" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day2 = day2.append(row)
if "day_3" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day3 = day3.append(row)
if "day_4" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day4 = day4.append(row)
if "day_5" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day5 = day5.append(row)
if "day_6" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day6 = day6.append(row)
if "day_7" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day7 = day7.append(row)
if "day_8" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day8 = day8.append(row)
if "day_9" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][2:]
day9 = day9.append(row)
if "day_10" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day10 = day10.append(row)
if "day_11" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day11 = day11.append(row)
if "day_12" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day12 = day12.append(row)
if "day_13" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day13 = day13.append(row)
if "day_14" in row["Form Name"]:
row['Variable / Field Name'] = row['Variable / Field Name'][3:]
day14 = day14.append(row)
allDays = [day1, day2, day3, day4, day5, day6, day7, day8, day9, day10, day11, day12, day13, day14]
for i in range(len(allDays)):
newIndex = allDays[i].index - allDays[i].index[0]
allDays[i] = allDays[i].set_index(newIndex)
# Collecting days, putting in otherDays dictionary
otherDays = {}
for i in range(1, len(allDays)):
otherDays[i + 1] = allDays[i]
# Process list of variable field names - Day 1 (Referential)
day1Vars = allDays[0]['Variable / Field Name'].to_list()
for i in range(len(day1Vars)):
day1Vars[i] = str(day1Vars[i])
with open(absFilepath, "w") as fileOut:
# For each day (days 2-14)
for day in otherDays:
dayHeaderString = "------------------------------------------------------------\n" + '###\n'.join(['' for _ in range(3)]) + f"### Day {day}:\n" + '##\n'.join(['' for _ in range(3)]) + "------------------------------------------------------------ "
print(dayHeaderString)
fileOut.write("\n" + dayHeaderString)
# Process list of variable field names - Day x (Comparison)
dayVars = otherDays[day]['Variable / Field Name'].to_list()
dayVars = otherDays[day]['Variable / Field Name'].to_list()
# Checking for variables in day 1 but not in day x
[print(f"Day {day}: Missing t{day}{day1Vars[i]}") for i in range(len(day1Vars)) if
not (day1Vars[i] in dayVars)]
[fileOut.write(f"\nDay {day}: Missing t{day}{day1Vars[i]}") for i in range(len(day1Vars)) if
not (day1Vars[i] in dayVars)]
# Checking for variables in day x but not in day 1
[print(f"Day 1: Missing t1{dayVars[i]} (referencing t{day}{dayVars[i]})") for i in
range(len(dayVars)) if not (dayVars[i] in day1Vars)]
[fileOut.write(f"\nDay 1: Missing t1{dayVars[i]} (referencing t{day}{dayVars[i]})") for i in
range(len(dayVars)) if not (dayVars[i] in day1Vars)]
# Check the order of variables
for index, row in otherDays[day].iterrows():
if row['Variable / Field Name'] != allDays[0].loc[index, 'Variable / Field Name']:
print(
f"MISMATCHED ORDER: Day 1 {allDays[0].loc[index, 'Variable / Field Name']}; Day {day} - {row['Variable / Field Name']} ")
fileOut.write(
f"\nMISMATCHED ORDER: Day 1 {allDays[0].loc[index, 'Variable / Field Name']}; Day {day} - {row['Variable / Field Name']} ")
# for each variable (field) of a single day
for index, row in otherDays[day].iterrows():
if pd.Series.all(row == allDays[0].columns):
continue
currRefVar = pd.Series()
# determine the corresponding variable (field) in day 1
for _, row2 in allDays[0].iterrows():
if row2['Variable / Field Name'] == row['Variable / Field Name']:
currRefVar = row2
break
# checking branching logic
if pd.isna(row['Branching Logic (Show field only if...)']):
if len(currRefVar) > 0 and not pd.isna(
currRefVar['Branching Logic (Show field only if...)']):
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} is empty (expected branching)")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} is empty (expected branching)")
elif not len(currRefVar):
print(
f"\n## BRANCHING: DAY {day} - {row['Variable / Field Name']} does not have a matching variable in day 1")
fileOut.write(
f"\n\n## BRANCHING: DAY {day} - {row['Variable / Field Name']} does not have a matching variable in day 1")
else:
currDict = checkReferenceBranching(row["Branching Logic (Show field only if...)"])
refDict = checkReferenceBranching(currRefVar["Branching Logic (Show field only if...)"])
if currDict != refDict:
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Branching Logic does not match day 1")
print(f"\n\tDay {day} - {currDict}")
print(f"\n\tDay 1 - {refDict}")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Branching Logic does not match day 1")
fileOut.write(f"\n\n\tDay {day} - {currDict}")
fileOut.write(f"\n\n\tDay 1 - {refDict}")
if not checkSelfBranching(row["Branching Logic (Show field only if...)"], day):
print(
f"\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Inconsistent branching requirements (not contained within day):\n\t{row['Branching Logic (Show field only if...)']}")
fileOut.write(
f"\n\n## BRANCHING: Day {day} - {row['Variable / Field Name']} Inconsistent branching requirements (not contained within day):\n\t{row['Branching Logic (Show field only if...)']}")
currField = row['Field Label']
if not pd.isna(currField):
currField = normalizeEntry(currField)
if "tuesday" in currField and day % 7 == 3:
currField = currField.replace("tuesday", "sunday")
elif "wednesday" in currField and day % 7 == 4:
currField = currField.replace("wednesday", "sunday")
elif "thursday" in currField and day % 7 == 5:
currField = currField.replace("thursday", "sunday")
elif "friday" in currField and day % 7 == 6:
currField = currField.replace("friday", "sunday")
elif "saturday" in currField and day % 7 == 0:
currField = currField.replace("saturday", "sunday")
elif "monday" in currField and day % 7 == 2:
currField = currField.replace("monday", "sunday")
if "martes" in currField and day % 7 == 3:
currField = currField.replace("martes", "domingo")
elif "miercoles" in currField and day % 7 == 4:
currField = currField.replace("miercoles", "domingo")
elif "jueves" in currField and day % 7 == 5:
currField = currField.replace("jueves", "domingo")
elif "viernes" in currField and day % 7 == 6:
currField = currField.replace("viernes", "domingo")
elif "sabado" in currField and day % 7 == 0:
currField = currField.replace("sabado", "domingo")
elif "lunes" in currField and day % 7 == 2:
currField = currField.replace("lunes", "domingo")
if "miércoles" in currField and day % 7 == 4:
currField = currField.replace("miércoles", "domingo")
elif "sábado" in currField and day % 7 == 0:
currField = currField.replace("sábado", "domingo")
if "Tuesday" in currField and day % 7 == 3:
currField = currField.replace("Tuesday", "Sunday")
elif "Wednesday" in currField and day % 7 == 4:
currField = currField.replace("Wednesday", "Sunday")
elif "Thursday" in currField and day % 7 == 5:
currField = currField.replace("Thursday", "Sunday")
elif "Friday" in currField and day % 7 == 6:
currField = currField.replace("Friday", "Sunday")
elif "Saturday" in currField and day % 7 == 0:
currField = currField.replace("Saturday", "Sunday")
elif "Monday" in currField and day % 7 == 2:
currField = currField.replace("Monday", "Sunday")
if "Martes" in currField and day % 7 == 3:
currField = currField.replace("Martes", "Domingo")
elif "Miercoles" in currField and day % 7 == 4:
currField = currField.replace("Miercoles", "Domingo")
elif "Jueves" in currField and day % 7 == 5:
currField = currField.replace("Jueves", "Domingo")
elif "Viernes" in currField and day % 7 == 6:
currField = currField.replace("Viernes", "Domingo")
elif "Sabado" in currField and day % 7 == 0:
currField = currField.replace("Sabado", "Domingo")
elif "Lunes" in currField and day % 7 == 2:
currField = currField.replace("Lunes", "Domingo")
elif "Miércoles" in currField and day % 7 == 4:
currField = currField.replace("Miércoles", "Domingo")
elif "Sábado" in currField and day % 7 == 0:
currField = currField.replace("Sábado", "Domingo")
refField = currRefVar['Field Label']
if not pd.isna(refField):
refField = normalizeEntry(refField)
if pd.isna(currField) and not pd.isna(refField):
print(
f"\n## FIELD LABEL: Field label for Day {day} - t{day}{currRefVar['Variable / Field Name']} is empty (expected value)")
fileOut.write(
f"\n\n## FIELD LABEL: Day {day} - {currRefVar['Field Label']} is empty (expected value)")
elif currField != refField:
print(
f"\n## FIELD LABEL: Day {day} - t{day}{currRefVar['Variable / Field Name']} does not match expected field label from Day 1 t1{currRefVar['Variable / Field Name']}")
print(f"\n# Day 1 t1{currRefVar['Variable / Field Name']} Field Label:\n{refField}\n# Day {day} t{day}{currRefVar['Variable / Field Name']} Field Label:\n{currField}")
fileOut.write(
f"\n\n## FIELD LABEL: Day {day} - t{day}{currRefVar['Variable / Field Name']} does not match expected field label from Day 1 t1{currRefVar['Variable / Field Name']}")
fileOut.write(f"\n\n# Day 1 t1{currRefVar['Variable / Field Name']} Field Label:\n{refField}\n# Day {day} t{day}{currRefVar['Variable / Field Name']} Field Label:\n{currField}")
currChoice = row["Choices, Calculations, OR Slider Labels"]
refChoice = currRefVar["Choices, Calculations, OR Slider Labels"]
if not pd.isna(currChoice):
currChoice = normalizeEntry(currChoice)
if not pd.isna(refChoice):
refChoice = normalizeEntry(refChoice)
if pd.isna(currChoice) and not pd.isna(refChoice):
print(f"\n## CHOICES: Choices for Day {day} - {row['Variable / Field Name']} is empty (expected choices)")
fileOut.write(
f"\n\n## CHOICES: Choices for Day {day} - {row['Variable / Field Name']} is empty (expected choices)")
elif pd.isna(currChoice) and pd.isna(refChoice):
continue
elif currChoice != refChoice:
print(f"\n## CHOICES: Day {day} - {row['Variable / Field Name']} does not match expected choices from Day 1 t1{currRefVar['Variable / Field Name']}")
print(f"\n# Day 1 t1{row['Variable / Field Name']} choices:\n{refChoice}\n# Day {day} t{day}{row['Variable / Field Name']} choices:\n{currChoice}")
fileOut.write(
f"\n\n## CHOICES: Day {day} - {row['Variable / Field Name']} does not match expected choices from Day 1 t1{currRefVar['Variable / Field Name']}")
fileOut.write(
f"\n\n# Day 1 t1{row['Variable / Field Name']} choices:\n{refChoice}\n# Day {day} t{day}{row['Variable / Field Name']} choices:\n{currChoice}")
print("------------------------------------------------------------")
fileOut.write("\n------------------------------------------------------------")
print(f"\nOutput written to {absFilepath}\n")
|
StarcoderdataPython
|
54816
|
<filename>app.py
import logging
import os
from datetime import datetime, timedelta
import json
import sys
from flask import Flask, request, make_response, jsonify
from raven.contrib.flask import Sentry
import app_config
import hmac
from hashlib import sha1
from rq import Queue
from rq.job import Job
from worker import conn
from tasks import run_scripts
from handlers import parse_post
app = Flask(__name__)
app.config.from_object(app_config)
app.url_map.strict_slashes = False
sentry = Sentry(app, dsn=app_config.SENTRY_DSN)
q = Queue(connection=conn)
# expects GeoJSON object as a string
# client will need to use JSON.stringify() or similar
class AppError(Exception):
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['message'] = self.message
return rv
class InvalidUsage(AppError):
status_code = 400
pass
class ServerError(AppError):
status_code = 500
pass
class PayloadException(InvalidUsage):
pass
@app.errorhandler(AppError)
def handle_payload_exception(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
@app.route('/hooks/<site_type>/<branch_name>', methods=['POST'])
def execute(site_type, branch_name):
post = request.get_json()
content_type = request.headers.get('Content-Type')
if content_type != 'application/json':
raise ServerError('handling {content_type} is not implemented'.format(content_type=content_type),
status_code=501)
if app_config.SECRET:
try:
gh_signature = request.headers['X_HUB_SIGNATURE']
except KeyError as e:
raise InvalidUsage('HTTP header X-Hub-Signature is missing', status_code=403)
sha_name, signature = gh_signature.split('=')
if sha_name != 'sha1':
raise ServerError('{sha_name} not implemented'.format(sha_name=sha_name), 501)
mac = hmac.new(bytes(app_config.SECRET, 'latin1'), msg=request.data, digestmod='sha1')
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
raise InvalidUsage('hash digests don\'t match', 403)
resp = {'status': 'ok'}
post, hostname = parse_post(post, branch_name)
giturl = 'git@{server}:{owner}/{repo}.git'\
.format(server=app_config.GH_SERVER,
owner=post['owner'],
repo=post['repo'])
# source
source = '{temp}/{owner}/{repo}/{branch}/code'\
.format(temp=app_config.TEMP,
owner=post['owner'],
repo=post['repo'],
branch=post['branch'])
build = '{temp}/{owner}/{repo}/{branch}/site'\
.format(temp=app_config.TEMP,
owner=post['owner'],
repo=post['repo'],
branch=post['branch'])
venv_bin_dir = os.path.dirname(sys.executable)
if hostname and app_config.CONFIG_NGINX:
q.enqueue_call(func=run_scripts, args = (app_config.NGINX_SCRIPT, [hostname, post['repo'], venv_bin_dir]))
if post:
script_args = [post['repo'], post['branch'], post['owner'], giturl, source, build]
try:
scripts = app_config.SCRIPTS[site_type]
except KeyError:
raise ServerError("No script file defined for '{0}' in config.".format(site_type),
status_code=501)
else:
job = q.enqueue_call(
func=run_scripts, args = (scripts, script_args), result_ttl = 5000
)
response = make_response(json.dumps(resp), 202)
response.headers['Content-Type'] = 'application/json'
return response
# INIT
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5003))
app.run(host='0.0.0.0', port=port, debug=True)
if __name__ != "__main__":
gunicorn_logger = logging.getLogger("gunicorn.error")
app.logger.handlers = gunicorn_logger.handlers
app.logger.setLevel(gunicorn_logger.level)
|
StarcoderdataPython
|
3361176
|
global glb_dict
glb_dict = {}
def addItem(key, value):
glb_dict[key]=value
def getItem(key):
return glb_dict[key]
|
StarcoderdataPython
|
3361027
|
# Copyright 2014 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
# Copyright 2017 - Brocade Communications Systems, Inc.
# Copyright 2018 - Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from osprofiler import profiler
import threading
from mistral import context as auth_ctx
from mistral.engine import base as eng
from mistral.event_engine import base as evt_eng
from mistral.executors import base as exe
from mistral.notifiers import base as notif
from mistral.rpc import base
LOG = logging.getLogger(__name__)
_ENGINE_CLIENT = None
_ENGINE_CLIENT_LOCK = threading.Lock()
_EXECUTOR_CLIENT = None
_EXECUTOR_CLIENT_LOCK = threading.Lock()
_EVENT_ENGINE_CLIENT = None
_EVENT_ENGINE_CLIENT_LOCK = threading.Lock()
_NOTIFIER_CLIENT = None
_NOTIFIER_CLIENT_LOCK = threading.Lock()
def cleanup():
"""Clean all the RPC clients.
Intended to be used by tests to recreate all RPC related objects.
Another usage is forking a child API process. In this case we must
recreate all RPC objects so that they function properly.
"""
global _ENGINE_CLIENT
global _EXECUTOR_CLIENT
global _EVENT_ENGINE_CLIENT
global _NOTIFIER_CLIENT
_ENGINE_CLIENT = None
_EXECUTOR_CLIENT = None
_EVENT_ENGINE_CLIENT = None
_NOTIFIER_CLIENT = None
base.cleanup()
def get_engine_client():
global _ENGINE_CLIENT
global _ENGINE_CLIENT_LOCK
with _ENGINE_CLIENT_LOCK:
if not _ENGINE_CLIENT:
_ENGINE_CLIENT = EngineClient(cfg.CONF.engine)
return _ENGINE_CLIENT
def get_executor_client():
global _EXECUTOR_CLIENT
global _EXECUTOR_CLIENT_LOCK
with _EXECUTOR_CLIENT_LOCK:
if not _EXECUTOR_CLIENT:
_EXECUTOR_CLIENT = ExecutorClient(cfg.CONF.executor)
return _EXECUTOR_CLIENT
def get_event_engine_client():
global _EVENT_ENGINE_CLIENT
global _EVENT_ENGINE_CLIENT_LOCK
with _EVENT_ENGINE_CLIENT_LOCK:
if not _EVENT_ENGINE_CLIENT:
_EVENT_ENGINE_CLIENT = EventEngineClient(cfg.CONF.event_engine)
return _EVENT_ENGINE_CLIENT
def get_notifier_client():
global _NOTIFIER_CLIENT
global _NOTIFIER_CLIENT_LOCK
with _NOTIFIER_CLIENT_LOCK:
if not _NOTIFIER_CLIENT:
_NOTIFIER_CLIENT = NotifierClient(cfg.CONF.notifier)
return _NOTIFIER_CLIENT
class EngineClient(eng.Engine):
"""RPC Engine client."""
def __init__(self, rpc_conf_dict):
"""Constructs an RPC client for engine.
:param rpc_conf_dict: Dict containing RPC configuration.
"""
self._client = base.get_rpc_client_driver()(rpc_conf_dict)
@base.wrap_messaging_exception
def start_workflow(self, wf_identifier, wf_namespace='', wf_ex_id=None,
wf_input=None, description='', **params):
"""Starts workflow sending a request to engine over RPC.
:param wf_identifier: Workflow identifier.
:param wf_namespace: Workflow namespace.
:param wf_input: Workflow input data as a dictionary.
:param wf_ex_id: Workflow execution id. If passed, it will be set
in the new execution object.
:param description: Execution description.
:param params: Additional workflow type specific parameters.
:return: Workflow execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'start_workflow',
wf_identifier=wf_identifier,
wf_namespace=wf_namespace,
wf_ex_id=wf_ex_id,
wf_input=wf_input or {},
description=description,
params=params
)
@base.wrap_messaging_exception
def start_action(self, action_name, action_input,
description=None, **params):
"""Starts action sending a request to engine over RPC.
:param action_name: Action name.
:param action_input: Action input data as a dictionary.
:param description: Execution description.
:param params: Additional options for action running.
:return: Action execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'start_action',
action_name=action_name,
action_input=action_input or {},
description=description,
params=params
)
@base.wrap_messaging_exception
@profiler.trace('engine-client-on-action-complete', hide_args=True)
def on_action_complete(self, action_ex_id, result, wf_action=False,
async_=False):
"""Conveys action result to Mistral Engine.
This method should be used by clients of Mistral Engine to update
the state of an action execution once action has executed. One of
the clients of this method is Mistral REST API server that receives
action result from the outside action handlers.
Note: calling this method serves an event notifying Mistral that
it possibly needs to move the workflow on, i.e. run other workflow
tasks for which all dependencies are satisfied.
:param action_ex_id: Action execution id.
:param result: Action execution result.
:param wf_action: If True it means that the given id points to
a workflow execution rather than action execution. It happens
when a nested workflow execution sends its result to a parent
workflow.
:param async_: If True, run action in asynchronous mode (w/o waiting
for completion).
:return: Action(or workflow if wf_action=True) execution object.
"""
call = self._client.async_call if async_ else self._client.sync_call
return call(
auth_ctx.ctx(),
'on_action_complete',
action_ex_id=action_ex_id,
result=result,
wf_action=wf_action
)
@base.wrap_messaging_exception
@profiler.trace('engine-client-on-action-update', hide_args=True)
def on_action_update(self, action_ex_id, state, wf_action=False,
async_=False):
"""Conveys update of action state to Mistral Engine.
This method should be used by clients of Mistral Engine to update
the state of an action execution once action has executed.
Note: calling this method serves an event notifying Mistral that it
may need to change the state of the parent task and workflow. Use
on_action_complete if the action execution reached completion state.
:param action_ex_id: Action execution id.
:param action_ex_id: Updated state.
:param wf_action: If True it means that the given id points to
a workflow execution rather than action execution. It happens
when a nested workflow execution sends its result to a parent
workflow.
:param async_: If True, run action in asynchronous mode (w/o waiting
for completion).
:return: Action(or workflow if wf_action=True) execution object.
"""
call = self._client.async_call if async_ else self._client.sync_call
return call(
auth_ctx.ctx(),
'on_action_update',
action_ex_id=action_ex_id,
state=state,
wf_action=wf_action
)
@base.wrap_messaging_exception
def pause_workflow(self, wf_ex_id):
"""Stops the workflow with the given execution id.
:param wf_ex_id: Workflow execution id.
:return: Workflow execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'pause_workflow',
wf_ex_id=wf_ex_id
)
@base.wrap_messaging_exception
def rerun_workflow(self, task_ex_id, reset=True, env=None):
"""Rerun the workflow.
This method reruns workflow with the given execution id
at the specific task execution id.
:param task_ex_id: Task execution id.
:param reset: If true, then reset task execution state and purge
action execution for the task.
:param env: Environment variables to update.
:return: Workflow execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'rerun_workflow',
task_ex_id=task_ex_id,
reset=reset,
env=env
)
@base.wrap_messaging_exception
def resume_workflow(self, wf_ex_id, env=None):
"""Resumes the workflow with the given execution id.
:param wf_ex_id: Workflow execution id.
:param env: Environment variables to update.
:return: Workflow execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'resume_workflow',
wf_ex_id=wf_ex_id,
env=env
)
@base.wrap_messaging_exception
def stop_workflow(self, wf_ex_id, state, message=None):
"""Stops workflow execution with given status.
Once stopped, the workflow is complete with SUCCESS or ERROR,
and can not be resumed.
:param wf_ex_id: Workflow execution id
:param state: State assigned to the workflow: SUCCESS or ERROR
:param message: Optional information string
:return: Workflow execution, model.Execution
"""
return self._client.sync_call(
auth_ctx.ctx(),
'stop_workflow',
wf_ex_id=wf_ex_id,
state=state,
message=message
)
@base.wrap_messaging_exception
def rollback_workflow(self, wf_ex_id):
"""Rolls back the workflow with the given execution id.
:param wf_ex_id: Workflow execution id.
:return: Workflow execution.
"""
return self._client.sync_call(
auth_ctx.ctx(),
'rollback_workflow',
wf_ex_id=wf_ex_id
)
@base.wrap_messaging_exception
def report_running_actions(self, action_ex_ids):
"""Receives action execution heartbeats.
:param action_ex_ids: Action execution ids.
"""
return self._client.async_call(
auth_ctx.ctx(),
'report_running_actions',
action_ex_ids=action_ex_ids
)
class ExecutorClient(exe.Executor):
"""RPC Executor client."""
def __init__(self, rpc_conf_dict):
"""Constructs an RPC client for the Executor."""
self.topic = cfg.CONF.executor.topic
self._client = base.get_rpc_client_driver()(rpc_conf_dict)
@profiler.trace('executor-client-run-action')
def run_action(self, action_ex_id, action_cls_str, action_cls_attrs,
params, safe_rerun, execution_context, redelivered=False,
target=None, async_=True, timeout=None):
"""Sends a request to run action to executor.
:param action_ex_id: Action execution id.
:param action_cls_str: Action class name.
:param action_cls_attrs: Action class attributes.
:param params: Action input parameters.
:param safe_rerun: If true, action would be re-run if executor dies
during execution.
:param execution_context: A dict of values providing information about
the current execution.
:param redelivered: Tells if given action was run before on another
executor.
:param target: Target (group of action executors).
:param async_: If True, run action in asynchronous mode (w/o waiting
for completion).
:param timeout: a period of time in seconds after which execution of
action will be interrupted
:return: Action result.
"""
rpc_kwargs = {
'action_ex_id': action_ex_id,
'action_cls_str': action_cls_str,
'action_cls_attrs': action_cls_attrs,
'params': params,
'safe_rerun': safe_rerun,
'execution_context': execution_context,
'timeout': timeout
}
rpc_client_method = (self._client.async_call
if async_ else self._client.sync_call)
LOG.debug(
"Sending an action to executor [action_ex_id=%s, action_cls=%s]",
action_ex_id, action_cls_str
)
return rpc_client_method(auth_ctx.ctx(), 'run_action', **rpc_kwargs)
class EventEngineClient(evt_eng.EventEngine):
"""RPC EventEngine client."""
def __init__(self, rpc_conf_dict):
"""Constructs an RPC client for the EventEngine service."""
self._client = base.get_rpc_client_driver()(rpc_conf_dict)
def create_event_trigger(self, trigger, events):
return self._client.async_call(
auth_ctx.ctx(),
'create_event_trigger',
trigger=trigger,
events=events,
fanout=True,
)
def delete_event_trigger(self, trigger, events):
return self._client.async_call(
auth_ctx.ctx(),
'delete_event_trigger',
trigger=trigger,
events=events,
fanout=True,
)
def update_event_trigger(self, trigger):
return self._client.async_call(
auth_ctx.ctx(),
'update_event_trigger',
trigger=trigger,
fanout=True,
)
class NotifierClient(notif.Notifier):
"""RPC Notifier client."""
def __init__(self, rpc_conf_dict):
"""Constructs an RPC client for the Notifier service."""
self._client = base.get_rpc_client_driver()(rpc_conf_dict)
def notify(self, ex_id, data, event, timestamp, publishers):
try:
return self._client.async_call(
auth_ctx.ctx(),
'notify',
ex_id=ex_id,
data=data,
event=event,
timestamp=timestamp,
publishers=publishers
)
except Exception:
LOG.exception('Unable to send notification.')
|
StarcoderdataPython
|
3380927
|
import sys
import numpy as np
from scipy.special import erfc, erfcinv, expm1
def trandn(l,u):
## truncated normal generator
# * efficient generator of a vector of length(l)=length(u)
# from the standard multivariate normal distribution,
# truncated over the region [l,u];
# infinite values for 'u' and 'l' are accepted;
# * Remark:
# If you wish to simulate a random variable
# 'Z' from the non-standard Gaussian N(m,s^2)
# conditional on l<Z<u, then first simulate
# X=trandn((l-m)/s,(u-m)/s) and set Z=m+s*X;
# Reference:
# Botev, <NAME>. (2016). "The normal law under linear restrictions:
# simulation and estimation via minimax tilting". Journal of the
# Royal Statistical Society: Series B (Statistical Methodology).
# doi:10.1111/rssb.12162
l = np.asarray(l)
u = np.asarray(u)
l = l.ravel()
u = u.ravel() # make 'l' and 'u' column vectors
if len(l) != len(u):
print('Truncation limits have to be vectors of the same length')
sys.exit()
x = np.empty(len(l))
a = .66 # treshold for switching between methods
# three cases to consider:
# case 1: a<l<u
I = l>a
if np.any(I):
tl=l[I]
tu=u[I]
x[I]=ntail(tl,tu)
# case 2: l<u<-a
J = u<-a
if np.any(J):
tl=-u[J]
tu=-l[J]
x[J] = -ntail(tl,tu)
# case 3: otherwise use inverse transform or accept-reject
I=~(I|J);
if np.any(I):
tl=l[I]
tu=u[I]
x[I]=tn(tl,tu)
return x
#################################################################
def ntail(l,u):
# samples a column vector of length=length(l)=length(u)
# from the standard multivariate normal distribution,
# truncated over the region [l,u], where l>0 and
# l and u are column vectors;
# uses acceptance-rejection from Rayleigh distr.
# similar to Marsaglia (1964);
c = l**2/2
n = len(l)
f = expm1(c-u**2/2)
x = c - np.log(1+np.random.uniform(size=n)*f); # sample using Rayleigh
# keep list of rejected
I = np.random.uniform(size=n)**2*x > c
while np.any(I): # while there are rejections
cy = c[I] # find the thresholds of rejected
y = cy - np.log(1+np.random.uniform(size=len(cy))*f[I])
idx = (np.random.uniform(size=len(cy))**2)*y < cy # accepted
tmp = I.copy()
I[tmp] = idx # make the list of elements in x to update
x[I] = y[idx] # store the accepted
I[tmp] = np.logical_not(idx) # remove accepted from list
# while d>0: # while there are rejections
# cy = c[I] # find the thresholds of rejected
# y = cy - np.log(1+np.random.uniform(size=d)*f[I])
# idx = (np.random.uniform(size=d)**2)*y < cy # accepted
# x[I[idx]] = y[idx] # store the accepted
# I = I[~idx] # remove accepted from list
# d = len(I) # number of rejected
x = np.sqrt(2*x); # this Rayleigh transform can be delayed till the end
return x
##################################################################
def tn(l,u):
# samples a column vector of length=length(l)=length(u)
# from the standard multivariate normal distribution,
# truncated over the region [l,u], where -a<l<u<a for some
# 'a' and l and u are column vectors;
# uses acceptance rejection and inverse-transform method;
tol = 2 # controls switch between methods
# threshold can be tuned for maximum speed for each platform
# case: abs(u-l)>tol, uses accept-reject from randn
I = np.abs(u-l)>tol
x = l
if np.any(I):
tl=l[I]
tu=u[I]
x[I]=trnd(tl,tu)
# case: abs(u-l)<tol, uses inverse-transform
I=~I
if np.any(I):
tl=l[I]
tu=u[I]
pl = erfc(tl/np.sqrt(2))/2
pu = erfc(tu/np.sqrt(2))/2
x[I] = np.sqrt(2)*erfcinv(2*(pl-(pl-pu)
*np.random.uniform(size=len(tl))))
return x
#############################################################
def trnd(l,u):
# uses acceptance rejection to simulate from truncated normal
x=np.random.randn(len(l)) # sample normal
# keep list of rejected
I = np.logical_or(x<l ,x>u)
while np.any(I): # while there are rejections
ly = l[I] # find the thresholds of rejected
uy = u[I]
y = np.random.randn(len(ly))
idx = np.logical_and(y>ly,y<uy) # accepted
tmp = I.copy()
I[tmp] = idx # make the list of elements in x to update
x[I] = y[idx] # store the accepted
I[tmp] = np.logical_not(idx) # remove accepted from list
# d = len(I)
# while d>0: # while there are rejections
# ly = l[I] # find the thresholds of rejected
# uy = u[I]
# y = np.random.randn(len(ly))
# idx = np.logical_and(y>ly,y<uy) # accepted
# x[I[idx]] = y[idx] # store the accepted
# I = I[~idx] # remove accepted from list
# d = len(I) # number of rejected
return x
|
StarcoderdataPython
|
1747381
|
<reponame>nicthib/pyanthem
import os, random, sys, time, csv, pickle, re, pkg_resources, mido, h5py
os.environ['PYGAME_HIDE_SUPPORT_PROMPT']="hide"
from tkinter import *
from tkinter.ttk import *
from tkinter import filedialog as fd
from tkinter import simpledialog as sd
from ttkthemes import ThemedTk
from scipy.io import loadmat, savemat, whosmat
from scipy.optimize import nnls
from scipy.interpolate import interp1d
from scipy.io.wavfile import write as wavwrite
from sklearn.cluster import KMeans
from pygame.mixer import init, quit, get_init, set_num_channels, pre_init, music
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.ticker as tkr
import matplotlib.cm as cmaps # https://matplotlib.org/gallery/color/colormap_reference.html
import numpy as np
from numpy.matlib import repmat
try:
from pyanthem.pyanthem_vars import *
except:
from pyanthem_vars import *
from google_drive_downloader import GoogleDriveDownloader as gdd
import subprocess as sp
import PIL.Image as Image
def init_entry(fn):
'''
Generalized version of StringVar/DoubleVar followed by set()
'''
if isinstance(fn, str) or fn is None:
entry=StringVar()
else:
entry=DoubleVar()
entry.set(fn)
return entry
def stack_files(files,fmts,fn):
'''
Stacks .mp4 videos horizontally, and merges wav files
the fmts argument is a list of three possible formats: 'a', 'v', or 'av'
Videos must match in height.
'''
nv=len([fmt for fmt in fmts if 'v' in fmt])
na=len([fmt for fmt in fmts if 'a' in fmt])
vf=[i for i,fmt in enumerate(fmts) if 'v' in fmt]
af=[i for i,fmt in enumerate(fmts) if 'a' in fmt]
# 3 cases: 1) Videos and audio, 2) Only videos, 3) Only audio
filter_command='"'
map_command=''
if nv > 1:
for v in vf:
filter_command+='[{}:v]'.format(v)
filter_command+='hstack=inputs={}[v]'.format(nv)
map_command+=' -map "[v]" '
elif nv == 1:
map_command += '-c:v copy -map {}:v:0'.format(vf[0])
if na > 1:
# Seperate complex filters if video is being merged
if nv > 1:
filter_command+=';'
for a in af:
filter_command+='[{}:a]'.format(a)
filter_command+='amerge=inputs={}[a]'.format(na)
map_command+=' -map "[a]" '
elif na == 1:
map_command += ' -c:a aac -map {}:a:0 '.format(af[0])
filter_command+='"'
in_command=''
for i in range(len(files)):
in_command += ' -i '+files[i]
full_command = 'ffmpeg -y '+in_command+' -filter_complex '+filter_command+' '+map_command+' -ac 2 -vsync 0 '+fn
print(full_command)
os.system(full_command)
def uiopen(title,filetypes):
root=Tk()
root.withdraw()
file_in=os.path.normpath(fd.askopenfilename(title=title,filetypes=filetypes))
root.update()
root.destroy()
return file_in
def run(display=True):
'''
Main command to run GUI or CLI
'''
root=GUI(display=display)
sys.ps1='♫ '
if display:
root.mainloop()
else:
print('Welcome to pyanthem v{}!'.format(pkg_resources.require('pyanthem')[0].version))
return root
class GUI(ThemedTk):
def __init__(self,display=True):
'''
Initializes the GUI instance. display=True runs the Tk.__init__(self)
command, while display=False skips that and visual initialization, keeping
the GUI 'hidden'
'''
self.display=display
self.sf_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts')
if not os.path.isdir(self.sf_path):
print('Initializing soundfont library...')
os.mkdir(self.sf_path)
gdd.download_file_from_google_drive(file_id=sound_font,dest_path=\
os.path.join(os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts'),'font.sf2'),showsize=True)
if self.display:
ThemedTk.__init__(self)
self.set_theme('clearlooks')
self.initGUI()
def quit(self,event=None):
'''
Quits the GUI instance. Currently, jupyter instances are kinda buggy
'''
try:
# This raises a NameError exception in a notebook env, since
# sys.exit() is not an appropriate method
get_ipython().__class__.__name__
self.destroy()
except NameError:
sys.exit()
def message(self,msg):
'''
Sends message through print if no GUI, through self.status if GUI is running
'''
if self.display:
self.status['text']=msg
else:
print(msg)
def check_data(self):
'''
Checks to make sure data is loaded.
'''
if hasattr(self,'data') and self.cfg['save_path'] is not None:
return True
else:
self.message('Error: No dataset has been loaded or save_path is empty.')
return False
def self_to_cfg(self):
'''
This function is necessary to allow command-line access of the GUI functions.
StringVar() and IntVar() allow for dynamic, quick field updating and access,
but cannot be used outside of a mainloop or be pickled. for this reason, I convert
all StringVars and IntVars to a new dict called 'self.cfg', that can be accessed
oustide the GUI and dumped to a pickle file, which essentially "saves" the GUI.
'''
self.cfg={k: getattr(self,k).get() if self_fns[k] is 'entry' else getattr(self,k) for k in self_fns}
def dump_cfg(self):
'''
Saves config file.
'''
file_out=os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'_cfg.p'
pickle.dump(self.cfg,open(file_out, "wb"))
self.message(f'cfg file saved to {file_out}')
def load_data(self,filein=None):
'''
loads dataset from filein. At the time, only supports .mat files.
'''
if filein is None:
filein=uiopen(title='Select mat or hdf5 file for import',filetypes=[('.mat files','*.mat'),('hdf5 files','*.h5'),('hdf5 files','*.hdf5')])
if filein=='.':
return
if filein.endswith('.mat'):
data_unk,var=loadmat(filein),whosmat(filein)
var = [v[0] for v in var]
elif filein.endswith('.hdf5') or filein.endswith('.h5'):
data_unk=h5py.File(filein, 'r')
var = data_unk.keys
data = {}
for k in var:
print()
tmp_var = np.asarray(data_unk[k])
if k in ('__header__', '__version__', '__globals__'):
continue
elif len(tmp_var.flatten())==1:
data['fr']=float(tmp_var)
elif tmp_var.ndim==2:
data['H']=tmp_var
elif tmp_var.ndim==3:
data['W']=tmp_var
# Checks inner dimension match if both H and W present in file.
if ('H' in data and 'W' in data) and data['H'].shape[0] != data['W'].shape[-1]:
# try flipping dims
if ('H' in data and 'W' in data) and data['H'].T.shape[0] == data['W'].T.shape[-1]:
data['H'] = data['H'].T
data['W'] = data['W'].T
else:
self.message('Error: Inner or outer dimensions of W [shape={}] and H [shape={}] do not match!'.format(data['H'].shape, data['W'].shape))
return
if 'H' in data:
if 'W' in data:
data['W_shape']=data['W'].shape
data['W']=data['W'].reshape(data['W'].shape[0]*data['W'].shape[1],data['W'].shape[2])
if 'fr' not in data:
data['fr']=data['H'].shape[1]/60 # Forces length to be 1 minute!
self.data=data
if not self.display:
return self
else:
self.message('Error: .mat file incompatible. Please select a .mat file with three variables: W (3D), H (2D), and fr (1-element float)')
def load_GUI(self):
'''
GUI-addons for load_data. Prompts user with filedialog, assigns defaults and sets GUI fields.
'''
filein=uiopen(title='Select mat or hdf5 file for import',filetypes=[('.mat files','*.mat'),('hdf5 files','*.h5'),('hdf5 files','*.hdf5')])
if filein=='.':
return
self.load_data(filein)
if not hasattr(self,'data'):
return
self.data['H_pp']=self.data['H']
self.data['H_fp']=self.data['H']
self.fr.set(self.data['fr'])
if 'W' in self.data:
self.data['W_pp']=self.data['W']
self.file_in.set(os.path.splitext(os.path.split(filein)[1])[0])
# Set some defaults
self.file_out.set(self.file_in.get())
self.save_path.set(os.path.split(filein)[0])
Hstr='H' # for whatever reason, can't double nest quotations in an f-string :/
self.brightness.set(f'{float(f"{np.mean(self.data[Hstr])+np.std(self.data[Hstr]):.3g}"):g}')
self.threshold.set(f'{float(f"{np.mean(self.data[Hstr])+np.std(self.data[Hstr]):.3g}"):g}')
self.comps_to_show_arr=list(range(len(self.data['H'])))
self.init_plots()
self.process_H_W()
def load_config(self,filein=None):
'''
Loads .p file containing dict of parameters needed to create outputs. If display=True, sets GUI fields.
'''
if filein is None:
filein=uiopen(title='Select pickle file for import',filetypes=[('pickle file','*.p'),('pickle file','*.pkl'),('pickle file','*.pickle')])
if filein=='.':
return
with open(filein, "rb") as f:
self.cfg=pickle.load(f)
if self.display:
for key,value in self.cfg.items():
if self_fns[key] is 'entry':
getattr(self,key).set(value)
else:
setattr(self,key,value)
self.refresh_GUI()
else:
return self
def refresh_GUI(self,event=None):
#self.init_plots()
if 'W_pp' in self.data:
if self.frameslider.get() > len(self.data['H_pp'].T): # This (usually) occurs when the user crops the dataset
self.frameslider.set(1)
self.frameslider['to']=int(len(self.data['H_pp'].T)-1)
self.imWH=self.Wax1.imshow((self.data['W_pp']@np.diag(self.data['H_pp'][:,int(self.frameslider.get())])@\
self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3)\
.clip(min=0,max=255).astype('uint8'))
self.imW=self.Wax2.imshow((self.data['W_pp']@self.cmap[:,:-1]*255/np.max(self.data['W_pp'])).\
reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.imW.axes.set_aspect('equal')
self.imWH.axes.set_aspect('equal')
self.canvas_W.draw()
self.refresh_slider([])
if 'H_pp' in self.data:
Hstd=self.data['H_pp'].std()*3
if self.offsetH.get():
tmpH=self.data['H_pp'].T - repmat([w*Hstd for w in list(range(len(self.comps_to_show_arr)))],len(self.data['H_pp'].T),1)
else:
tmpH=self.data['H_pp'].T
self.Hax1.cla()
self.Hax1.set_title('Temporal Data (H)')
self.Hax1.plot(tmpH,linewidth=.5)
for i,j in enumerate(self.Hax1.lines):
j.set_color(self.cmap[i])
if not self.offsetH.get():
thresh_line=self.Hax1.plot(np.ones((len(self.data['H_pp'].T,)))*self.cfg['threshold'],linestyle='dashed',color='0',linewidth=1)
zero_line=self.Hax1.plot(np.zeros((len(self.data['H_pp'].T,))),linestyle='dashed',color='.5',linewidth=1)
self.legend=self.Hax1.legend((thresh_line[0],), ('Threshold',))
self.Hax1.set_xlim(0, len(self.data['H_pp'].T))
self.Hax1.set_ylim(np.min(tmpH), np.max(tmpH))
if self.offsetH.get():
self.Hax1.set(ylabel='Component #')
else:
self.Hax1.set(ylabel='Magnitude')
if self.audio_analog.get():
if len(self.data['H_pp'])>16:
self.audio_analog.set(0)
self.Hax2.imshow(self.data['H_fp'],interpolation='none',cmap=plt.get_cmap('gray'))
self.message('Error: Analog audio is currently limited to 16 components.')
else:
# I do not understand why I have to do it this way
tmp=self.Hax2.imshow(self.data['H_pp'],interpolation='none',cmap=plt.get_cmap('gray'))
tmp.set_clim(0, np.max(self.data['H_pp']))
else:
self.Hax2.imshow(self.data['H_fp'],interpolation='none',cmap=plt.get_cmap('gray'))
self.Hax2.xaxis.set_major_formatter(tkr.FuncFormatter(lambda x, pos: '{:.2g}'.format(x/self.cfg['fr'])))
if len(self.comps_to_show_arr) > 12:
yticks=np.arange(4,len(self.data['H_pp']),5)
yticklabels=np.arange(4,len(self.data['H_pp']),5)
else:
yticks=np.arange(0,len(self.data['H_pp']),1)
yticklabels=np.arange(0,len(self.data['H_pp']),1)
if self.offsetH.get():
self.Hax1.set(yticks=-yticks*Hstd,yticklabels=yticklabels)
self.Hax2.set(yticks=yticks,yticklabels=yticklabels)
self.Hax2.axes.set_aspect('auto')
self.canvas_H.draw()
def process_H_W(self):
'''
Core function of pyanthem. Applies all cfg settings to dataset, and
creates the note dict used for synthesis. Automatically calls
refresh_GUI() if display=True
'''
if self.display:
self.self_to_cfg()
self.message('Updating...')
self.update()
if self.cfg['comps_to_show']=='all':
self.comps_to_show_arr=list(range(len(self.data['H'])))
# regex expression which lazily checks for a bracketed expression containing numbers, colons and commas.
elif re.match('^\[[0-9,: ]*\]$',self.cfg['comps_to_show']) is not None:
# This is a magic function which transforms bracketed string arrays to actual numpy arrays.
# Example: '[1,3,5:8]' --> array([1,3,5,6,7])
self.comps_to_show_arr=eval('np.r_'+self.cfg['comps_to_show'])
else:
self.message('For \'components to show\', please input indices with commas and colons enclosed by square brackets, or \'all\' for all components.')
return
self.data['H_pp']=self.data['H'][self.comps_to_show_arr,int(len(self.data['H'].T)*self.cfg['start_percent']/100):int(len(self.data['H'].T)*self.cfg['end_percent']/100)]
self.data['H_pp']=self.data['H_pp']+self.cfg['baseline']
if 'W' in self.data:
self.data['W_pp']=self.data['W'][:,self.comps_to_show_arr]
# make_keys()
self.keys,i=[],0
while len(self.keys) < len(self.data['H_pp']):
self.keys.extend([k+i+key_opts[self.cfg['key']]+octave_add_opts[self.cfg['octave_add']] for k in scale_keys[self.cfg['scale_type']]])
i+=12
self.keys=self.keys[:len(self.data['H_pp'])]
self.keys=[min(k,127) for k in self.keys] # Notes cannot be higher than 127
# Making note dict
true_fr=self.cfg['fr']*self.cfg['speed']/100
upsample=1000
ns=int(len(self.data['H_pp'].T)*upsample/true_fr)
t1=np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],len(self.data['H_pp'].T))
t2=np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],ns)
nchan=len(self.data['H_pp'])
Hmax=np.max(self.data['H_pp'])
self.data['H_fp']=np.zeros(np.shape(self.data['H_pp']))
self.data['H_pp'][self.data['H_pp'] < 0]=0
self.data['H_pp'][:,-1]=0
self.data['H_pp'][:,0]=0
self.nd=[]
for i in range(nchan):
H_rs=interp1d(t1,self.data['H_pp'][i,:])(t2)
H_b=H_rs.copy()
H_b[H_b<self.cfg['threshold']]=0
H_b[H_b>=self.cfg['threshold']]=1
TC=np.diff(H_b)
st=np.argwhere(TC==1)
en=np.argwhere(TC==-1)
st=np.ndarray.flatten(st).tolist()
en=np.ndarray.flatten(en).tolist()
for j in range(len(st)):
mag=np.max(H_rs[st[j]:en[j]])
self.data['H_fp'][i,int(st[j]*true_fr/upsample):int(en[j]*true_fr/upsample)]=mag
# Type, time, note, velocity
self.nd.append(['note_on',st[j],self.keys[i],int(mag * 127 / Hmax)])
self.nd.append(['note_off',en[j],self.keys[i],int(mag * 127 / Hmax)])
self.nd=sorted(self.nd, key=lambda x: x[1])
# Colormap
cmap=getattr(cmaps,self.cfg['cmapchoice'])
self.cmap=cmap(np.linspace(0,1,len(self.data['H_pp'])))
if self.display:
self.refresh_GUI()
def refresh_slider(self,event):
'''
Updates bottom left W plot with slider movement
'''
self.imWH.set_data((self.data['W_pp']@np.diag(self.data['H_pp'][:,int(self.frameslider.get())])@self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.canvas_W.draw()
def preview_notes(self):
'''
Previews the self.keys list audibly and visually simultaneously.
'''
self.process_H_W()
self.message('Previewing notes...')
fn_font=os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts','font.sf2')
fn_midi=os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.mid')
fn_wav=os.path.join(os.path.dirname(os.path.abspath(__file__)),'preview.wav')
if get_init() is None: # Checks if pygame has initialized audio engine. Only needs to be run once per instance
pre_init(fs, -16, 2, 1024)
init()
set_num_channels(128) # We will never need more than 128...
mid=mido.MidiFile()
track=mido.MidiTrack()
mid.tracks.append(track)
mid.ticks_per_beat=1000
track.append(mido.MetaMessage('set_tempo', tempo=int(1e6)))
track.append(mido.Message('program_change', program=sound_presets[self.cfg['sound_preset']], time=0))
for i in range(len(self.keys)):
track.append(mido.Message('note_on', note=self.keys[i], velocity=100, time=250))
track.append(mido.Message('note_off', note=self.keys[i], time=250))
track.append(mido.Message('note_off', note=self.keys[i], time=500))
mid.save(fn_midi)
cmd='fluidsynth -ni {} -F {} -r {} {} {} '.format(self.cfg['fluidsynthextracommand'],fn_wav,fs,fn_font,fn_midi)
os.system(cmd)
music.load(fn_wav)
for i in range(len(self.keys)):
t=time.time()
self.imW.remove()
Wtmp=self.data['W_pp'][:,i]
cmaptmp=self.cmap[i,:-1]
self.imW=self.Wax2.imshow((Wtmp[:,None]@cmaptmp[None,:]*255/np.max(self.data['W_pp'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8'))
self.canvas_W.draw()
self.update()
if i==0:
music.play(0)
time.sleep(.5-np.min(((time.time()-t),.5)))
time.sleep(.5)
music.unload()
try:
os.remove(fn_midi)
os.remove(fn_wav)
except OSError as e:
print("Failed with:", e.strerror)
self.refresh_GUI()
def write_audio(self):
'''
Writes audio either from .sf2 using fluidsynth or into raw file.
'''
if not self.check_data():
return
self.process_H_W()
fn_midi=os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.mid'
fn_wav=os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.wav'
fn_font=os.path.join(os.path.dirname(os.path.abspath(__file__)),'anthem_soundfonts','font.sf2')
mid=mido.MidiFile()
track=mido.MidiTrack()
mid.tracks.append(track)
mid.ticks_per_beat=1000
track.append(mido.MetaMessage('set_tempo', tempo=int(1e6)))
if not self.cfg['audio_analog']:
track.append(mido.Message('program_change', program=sound_presets[self.cfg['sound_preset']], time=0))
current_time=0
for i in range(len(self.nd)):
track.append(mido.Message(self.nd[i][0], time=int(self.nd[i][1]-current_time), note=self.nd[i][2], velocity=self.nd[i][3]))
current_time=self.nd[i][1]
mid.save(fn_midi)
else:
true_fr=self.cfg['fr']*self.cfg['speed']/100
upsample=100
ns=int(len(self.data['H_pp'].T)*upsample/true_fr)
t1=np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],len(self.data['H_pp'].T))
t2=np.linspace(0,len(self.data['H_pp'].T)/self.cfg['fr'],ns)
nchan=len(self.data['H_pp'])
Hmax=np.max(self.data['H_pp'])
H_rs=np.zeros((nchan,ns))
ticks=int(mid.ticks_per_beat/upsample)
for i in range(nchan):
H_rs[i,:]=interp1d(t1,self.data['H_pp'][i,:])(t2)
for chan in range(nchan):
track.append(mido.Message('program_change', channel=chan, program=sound_presets[self.cfg['sound_preset']], time=0))
track.append(mido.Message('note_on', channel=chan, note=self.keys[chan], time=0, velocity=127))
track.append(mido.Message('control_change', channel=chan, control=7, value=0, time=0))
for i in range(ns):
for chan in range(nchan):
if chan==0:
realticks=ticks
else:
realticks=0
track.append(mido.Message('control_change', channel=chan, control=7, value=int(H_rs[chan,i]*127/Hmax), time=realticks))
for chan in range(nchan):
track.append(mido.Message('note_off', channel=chan, note=self.keys[chan], time=0))
mid.save(fn_midi)
os.system('fluidsynth -ni {} -F {} -r {} {} {}'.format(self.cfg['fluidsynthextracommand'],fn_wav,fs,fn_font,fn_midi))
self.message(f'Audio file written to {self.cfg["save_path"]}')
return self
def write_video(self):
'''
Writes video file using self.data['H_pp'] and self.data['W_pp'] using ffmpeg.
We avoid using opencv because it is very slow in a conda environment
http://zulko.github.io/blog/2013/09/27/read-and-write-video-frames-in-python-using-ffmpeg/
'''
if not self.check_data():
return
self.process_H_W()
fn_vid=os.path.join(self.cfg['save_path'],self.cfg['file_out'])+'.mp4'
v_shape=self.data['W_shape'][::-1][1:] # Reverse because ffmpeg does hxw
command=['ffmpeg',
'-loglevel', 'warning', # Prevents excessive messages
'-hide_banner',
'-y', # Auto overwrite
'-f', 'image2pipe',
'-vcodec','png',
'-s', '{}x{}'.format(v_shape[0],v_shape[1]),
'-r', str(self.cfg['fr']*self.cfg['speed']/100),
'-i', '-', # The input comes from a pipe
'-an', # Tells FFMPEG not to expect any audio
'-q:v','2', # Quality
'-vcodec', 'mpeg4',
'-preset', 'ultrafast',
fn_vid]
pipe=sp.Popen(command, stdin=sp.PIPE)
nframes=len(self.data['H_pp'].T)
t0 = time.time()
for i in range(nframes):
frame=(self.data['W_pp']@np.diag(self.data['H_pp'][:,i])@self.cmap[:,:-1]*(255/self.cfg['brightness'])).reshape(self.data['W_shape'][0],self.data['W_shape'][1],3).clip(min=0,max=255).astype('uint8')
im=Image.fromarray(frame)
im.save(pipe.stdin, 'PNG')
if self.display and i%20==1:
self.status['text']=f'Writing video file, {i} out of {nframes} frames written, avg fps: {i/(time.time()-t0)}'
self.update()
elif not self.display and i%20==1:
print(f'Writing video file, {i} out of {nframes} frames written, avg fps: {i/(time.time()-t0)}',end='\r')
pipe.stdin.close()
pipe.wait()
self.message(f'Video file written to {self.cfg["save_path"]}')
return self
def merge(self):
'''
Merges video and audio with ffmpeg
'''
fn=os.path.join(self.cfg['save_path'],self.cfg['file_out'])
cmd='ffmpeg -hide_banner -loglevel warning -y -i {} -i {} -c:a aac -map 0:v:0 -map 1:a:0 {}'.format(fn+'.mp4',fn+'.wav',fn+'_AV.mp4')
os.system(cmd)
self.message(f'A/V file written to {self.cfg["save_path"]}')
return self
def write_AV(self):
'''
Runs full write and merge
'''
self.write_video()
self.write_audio()
self.merge()
if not self.display:
return self
def cleanup(self):
'''
Tries to remove any files that are video or audio only.
'''
fn=os.path.join(self.cfg['save_path'],self.cfg['file_out'])
try:
os.remove(fn+'.mp4')
except OSError:
pass
try:
os.remove(fn+'.wav')
except OSError:
pass
try:
os.remove(fn+'.mid')
except OSError:
pass
self.message(f'A/V only videos removed')
return self
def edit_save_path(self):
self.save_path.set(fd.askdirectory(title='Select a directory to save output files',initialdir=self.cfg['save_path']))
def initGUI(self):
'''
Initialize GUI fields, labels, dropdowns, etc.
'''
self.winfo_toplevel().title('pyanthem v{}'.format(pkg_resources.require("pyanthem")[0].version))
#photo = PhotoImage(file = os.path.join(os.path.dirname(os.path.abspath(__file__)),'logo.png'))
#self.iconphoto(False, photo)
self.protocol('WM_DELETE_WINDOW', self.quit)
# StringVars
self.file_in=init_entry(None)
self.file_out=init_entry(None)
self.save_path=init_entry(None)
self.speed=init_entry(100)
self.fr=init_entry(0)
self.start_percent=init_entry(0)
self.end_percent=init_entry(100)
self.baseline=init_entry(0)
self.brightness=init_entry(0)
self.threshold=init_entry(0)
self.octave_add=init_entry('2')
self.scale_type=init_entry('Maj. 7 (4/oct)')
self.key=init_entry('C')
self.sound_preset=init_entry('Piano')
self.comps_to_show=init_entry('all')
self.cmapchoice=init_entry('jet')
self.fluidsynthextracommand = ''
#self.ffmpegextracommand = ''
# Labels
Label(text='',font='Helvetica 1 bold').grid(row=0,column=0) # Just to give a border around Seperators
Label(text='File Parameters',font='Helvetica 14 bold').grid(row=1,column=1,columnspan=2,sticky='WE')
Label(text='Movie Parameters',font='Helvetica 14 bold').grid(row=1,column=3,columnspan=2,sticky='WE')
Label(text='Audio Parameters',font='Helvetica 14 bold').grid(row=1,column=5,columnspan=2,sticky='WE')
Label(text='Input Filename').grid(row=2, column=1,columnspan=2,sticky='W')
Label(text='Output Filename').grid(row=4, column=1,columnspan=2,sticky='W')
Label(text='Save Path').grid(row=6, column=1,columnspan=1,sticky='W')
Label(text='Speed (%)').grid(row=2, column=3, sticky='E')
Label(text='Start (%)').grid(row=3, column=3, sticky='E')
Label(text='End (%)').grid(row=4, column=3, sticky='E')
Label(text='Baseline').grid(row=5, column=3, sticky='E')
Label(text='Max brightness').grid(row=6, column=3, sticky='E')
Label(text='Colormap').grid(row=7, column=3, sticky='E')
Label(text='Threshold').grid(row=2, column=5, sticky='E')
Label(text='Octave').grid(row=3, column=5, sticky='E')
Label(text='Scale Type').grid(row=4, column=5, sticky='E')
Label(text='Key').grid(row=5, column=5, sticky='E')
Label(text='Audio format').grid(row=6, column=5, sticky='E')
# Messages
self.status=Message(text='Welcome to pyanthem v{}!'.\
format(pkg_resources.require("pyanthem")[0].version),bg='white',fg='black',width=450)
self.status.grid(row=9, column=2, columnspan=5, sticky='NESW')
self.status['anchor']='nw'
# Entries
Entry(textvariable=self.file_in).grid(row=3, column=1,columnspan=2,sticky='W')
Entry(textvariable=self.file_out).grid(row=5, column=1,columnspan=2,sticky='W')
Entry(textvariable=self.save_path,width=17).grid(row=7, column=1,columnspan=2,sticky='EW')
Entry(textvariable=self.speed,width=7).grid(row=2, column=4, sticky='W')
Entry(textvariable=self.start_percent,width=7).grid(row=3, column=4, sticky='W')
Entry(textvariable=self.end_percent,width=7).grid(row=4, column=4, sticky='W')
Entry(textvariable=self.baseline,width=7).grid(row=5, column=4, sticky='W')
Entry(textvariable=self.brightness,width=7).grid(row=6, column=4, sticky='W')
self.threshold_entry=Entry(textvariable=self.threshold,width=7)
self.threshold_entry.grid(row=2, column=6, sticky='W')
# Styles
s = Style()
s.configure('my.TButton', font=('Helvetica', 14))
# Buttons
Button(text='Edit',command=self.edit_save_path,width=5).grid(row=6, column=2)
Button(text='Preview',width=11,command=self.preview_notes).grid(row=7, column=5,columnspan=1)
self.update_button=Button(text='Update',width=7,command=self.process_H_W,style='my.TButton')
self.update_button.grid(row=9,column=1,columnspan=1)
# Combo box
self.cmapchooser=Combobox(self,textvariable=self.cmapchoice,width=5)
self.cmapchooser['values']=cmaps_opts
self.cmapchooser['state']='readonly'
self.cmapchooser.grid(row=7, column=4, sticky='W')
self.cmapchooser.current()
self.cmap=[]
self.octave_add_menu=Combobox(self,textvariable=self.octave_add,width=7)
self.octave_add_menu['values']=tuple(octave_add_opts.keys())
self.octave_add_menu['state']='readonly'
self.octave_add_menu.grid(row=3, column=6, sticky='W')
self.octave_add_menu.current()
self.scale_type_menu=Combobox(self,textvariable=self.scale_type,width=11)
self.scale_type_menu['values']=tuple(scale_keys.keys())
self.scale_type_menu['state']='readonly'
self.scale_type_menu.grid(row=4, column=6, sticky='W')
self.scale_type_menu.current()
self.key_menu=Combobox(self,textvariable=self.key,width=7)
self.key_menu['values']=tuple(key_opts.keys())
self.key_menu['state']='readonly'
self.key_menu.grid(row=5, column=6, sticky='W')
self.key_menu.current()
self.sound_preset_menu=Combobox(self,textvariable=self.sound_preset,width=7)
self.sound_preset_menu['values']=tuple(sound_presets.keys())
self.sound_preset_menu['state']='readonly'
self.sound_preset_menu.grid(row=6, column=6, sticky='W')
self.sound_preset_menu.current()
# Checkbox
self.audio_analog=IntVar()
self.audio_analog.set(0)
Checkbutton(self, text="Analog",command=self.process_H_W,variable=self.audio_analog).grid(row=7, column=6,columnspan=1)
# Menu bar
menubar=Menu(self)
filemenu=Menu(menubar, tearoff=0)
filemenu.add_command(label="Load data...", command=self.load_GUI)
filemenu.add_command(label="Load config...", command=self.load_config)
filemenu.add_command(label="Quit",command=self.quit,accelerator="Ctrl+Q")
savemenu=Menu(menubar, tearoff=0)
savemenu.add_command(label="Audio", command=self.write_audio)
savemenu.add_command(label="Video", command=self.write_video)
savemenu.add_command(label="Config", command=self.dump_cfg)
savemenu.add_command(label="Merge A/V", command=self.merge)
savemenu.add_command(label="Write A/V then merge", command=self.write_AV)
savemenu.add_command(label="Cleanup", command=self.cleanup)
advancedmenu=Menu(menubar, tearoff=0)
advancedmenu.add_command(label="Custom fluidsynth params", command=self.fluidsynthextra)
#advancedmenu.add_command(label="Custom ffmpeg params", command=self.ffmpegextra)
advancedmenu.add_command(label="View config", command=self.view_cfg)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Save", menu=savemenu)
menubar.add_cascade(label="Advanced", menu=advancedmenu)
self.config(menu=menubar)
# Seperators
s_v=[[0,1,9],[2,1,8],[4,1,8],[6,1,9]]
s_h=[[1,1,6],[1,2,6],[1,9,6],[1,10,6],[1,4,2],[1,6,2]]
for sv in s_v:
Separator(self, orient='vertical').grid(column=sv[0], row=sv[1], rowspan=sv[2], sticky='nse')
for sh in s_h:
Separator(self, orient='horizontal').grid(column=sh[0], row=sh[1], columnspan=sh[2], sticky='nwe')
# Offset
self.offsetH=IntVar()
self.offsetH.set(1)
# Bind shortcuts
self.bind_all("<Control-q>", self.quit)
self.bind_all("<Control-a>", lambda:[self.process_H_W(),self.refresh_GUI()])
def init_plots(self):
'''
Initializes the plot areas. Is called every time update_GUI() is called.
'''
# H
if 'H_pp' in self.data:
self.figH=plt.Figure(figsize=(7,6), dpi=100, tight_layout=True)
self.Hax1=self.figH.add_subplot(211)
self.Hax2=self.figH.add_subplot(212)
self.Hax2.set_title('Audio Preview')
self.canvas_H=FigureCanvasTkAgg(self.figH, master=self)
self.canvas_H.get_tk_widget().grid(row=1,column=7,rowspan=30,columnspan=10)
bg=self.status.winfo_rgb(self['bg'])
self.figH.set_facecolor([(x>>8)/255 for x in bg])
self.Hax1.spines['left'].set_visible(False)
self.Hax1.spines['top'].set_visible(False)
self.Hax1.spines['bottom'].set_visible(False)
self.Hax1.spines['right'].set_visible(False)
self.Hax1.yaxis.tick_right()
self.Hax1.yaxis.set_label_position('right')
self.Hax1.tick_params(axis='x',which='both',bottom=False, top=False, labelbottom=False, right=False)
self.Hax2.set(xlabel='time (sec)',ylabel='Component #')
self.Hax2.spines['left'].set_visible(False)
self.Hax2.spines['top'].set_visible(False)
self.Hax2.spines['bottom'].set_visible(False)
self.Hax2.spines['right'].set_visible(False)
self.Hax2.yaxis.tick_right()
self.Hax2.yaxis.set_label_position('right')
# Checkbox
Checkbutton(self, text="Offset H",command=self.refresh_GUI,variable=self.offsetH).grid(row=1,rowspan=1,column=16)
try:
# If user previously loaded dataset with W, clear those
# plots and remove associated GUI elements.
self.figW.clear()
self.canvas_W.draw()
self.frameslider.grid_forget()
self.comps_to_show_label.grid_forget()
self.comps_to_show_entry.grid_forget()
except:
pass
if 'W_pp' in self.data:
self.figW=plt.Figure(figsize=(6,3), dpi=100, constrained_layout=True)
self.Wax1=self.figW.add_subplot(121)
self.Wax2=self.figW.add_subplot(122)
self.Wax1.set_title('Video Preview')
self.Wax2.set_title('Spatial Data (W)')
self.Wax1.axis('off')
self.Wax2.axis('off')
self.canvas_W=FigureCanvasTkAgg(self.figW, master=self)
self.canvas_W.get_tk_widget().grid(row=11,column=1,rowspan=19,columnspan=6)
self.figW.set_facecolor([(x>>8)/255 for x in bg])
# Frameslider
# frameslider
self.frameslider=Scale(self, from_=1, to=2, orient=HORIZONTAL)
self.frameslider.set(1)
self.frameslider['command']=self.refresh_slider
self.frameslider.grid(row=30, column=1, columnspan=3,sticky='EW')
# comps_to_show
self.comps_to_show_label=Label(text='Components:')
self.comps_to_show_label.grid(row=30, column=5, columnspan=1, sticky='E')
self.comps_to_show_entry=Entry(textvariable=self.comps_to_show,width=15,justify='center')
self.comps_to_show_entry.grid(row=30, column=6, columnspan=1,sticky='W')
def process_raw(self,data=None,n_clusters=None,frame_rate=None,save=False,file_in=None):
'''
Decomposes raw dataset. Can be used in two ways: as a part of the
GUI class for immediate processing (e.g. process_raw().write_AV()),
or as a method to save a new dataset.
'''
sh=data.shape
if len(sh) != 3:
self.message('ERROR: input dataset is not 3D.')
return
data=data.reshape(sh[0]*sh[1],sh[2])
# Ignore rows with any nans
nanidx=np.any(np.isnan(data), axis=1)
data_nn=data[~nanidx] # nn=non-nan
# k-means
print('Performing k-means...',end='')
if n_clusters is None:
# Default k is the 4th root of the number of samples per frame (for 256x256, this would be 16)
n_clusters=int(len(data)**.25)
print(f'No num_clusters given. Defaulting to {n_clusters}...',end='')
idx_nn=KMeans(n_clusters=n_clusters, random_state=0).fit(data_nn).labels_
idx=np.zeros((len(data),))
idx[nanidx==False]=idx_nn
# TCs
H=np.zeros((n_clusters,len(data.T)))
for i in range(n_clusters):
H[i,:]=np.nanmean(data[idx==i,:],axis=0)
print('done.')
# NNLS
nnidx=np.where(~nanidx)[0]
W=np.zeros((len(data),n_clusters))
print('Performing NNLS...',end='')
for i in range(len(nnidx)):
W[nnidx[i],:]=nnls(H.T,data_nn[i,:])[0]
# Sort bottom to top
xc,yc=[],[]
(X,Y)=np.meshgrid(range(sh[1]),range(sh[0]))
for i in range(len(W.T)):
Wtmp=W[:,i].reshape(sh[0],sh[1])
xc.append((X*Wtmp).sum() / Wtmp.sum().astype("float"))
yc.append((Y*Wtmp).sum() / Wtmp.sum().astype("float"))
I=np.argsort(yc[::-1]) # Reverse orders from bottom to top
W,H=W[:,I],H[I,:]
print('done.')
# Assign variables and save
self.data={}
self.data['H']=H
self.data['W']=W.reshape(sh[0],sh[1],n_clusters)
#self.data['W_shape']=self.data['W'].shape.flatten()
if frame_rate==[]:
self.data['fr']=10
print('No fr given. Defaulting to 10')
else:
self.data['fr']=frame_rate
if save:
if file_in is None:
file_in='data.mat'
fn=file_in.replace('.mat','_decomp.mat')
savemat(fn,self.data)
self.message(f'Decomposed data file saved to {fn}')
# Reshape W here, since any use of self from here would require a flattened W
self.data['W']=self.data['W'].reshape(self.data['W'].shape[0]*self.data['W'].shape[1],self.data['W'].shape[2])
return self
def fluidsynthextra(self):
self.fluidsynthextracommand=sd.askstring('Input custom fluidsynth parameters here','',initialvalue=self.fluidsynthextracommand,parent=self)
#def ffmpegextra(self):
# self.ffmpegextracommand=sd.askstring('Input custom ffmpeg parameters here','',initialvalue=self.ffmpegextracommand,parent=self)
def view_cfg(self):
'''
Prints cfg info to command line
'''
try:
for key in self.cfg:
print(str(key)+': '+str(self.cfg[key]))
except:
pass
def help(self):
print('To load a dataset:\npyanthem.load_data()\n\nTo load a cfg file:\npyanthem.load_config()\n\nTo write video:\npyanthem.write_video()\n\nTo write audio:\npyanthem.write_audio()')
if __name__=="__main__":
run()
# self\.([a-z_]{1,14})\.get\(\)
# self\.cfg\[$1\]
|
StarcoderdataPython
|
197204
|
<reponame>Juna2/grasp_detection
#!/usr/bin/env python
'''Converts Cornell Grasping Dataset data into TFRecords data format using Example protos.
The raw data set resides in png and txt files located in the following structure:
dataset/03/pcd0302r.png
dataset/03/pcd0302cpos.txt
'''
'''
1. Check variable "dataset" if dataset path correctly set
2. Check "for name in glob.glob(os.path.join(dataset, i+'_one_direction', 'pcd'+i+'*r.png')):"
and make sure folders and files are in the right place in path "dataset"
3. Check train-cgd, validation-cgd files are in "dataset" path and move those file to other directory so that they cannot be erased
4. Check the percentage of validation set
'''
import tensorflow as tf
import os
import glob
import numpy as np
dataset = '/home/irobot2/Documents/custom_dataset' # '/root/dataset/cornell_grasping_dataset''
class ImageCoder(object):
def __init__(self):
self._sess = tf.Session()
self._decode_png_data = tf.placeholder(dtype=tf.string)
self._decode_png = tf.image.decode_png(self._decode_png_data, channels=3)
def decode_png(self, image_data):
return self._sess.run(self._decode_png,
feed_dict={self._decode_png_data: image_data})
def _process_image(filename, coder):
# Decode the image
with open(filename) as f:
image_data = f.read()
image = coder.decode_png(image_data)
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_bboxes(name):
'''Create a list with the coordinates of the grasping rectangles. Every
element is either x or y of a vertex.'''
with open(name, 'r') as f:
bboxes = list(map(
lambda coordinate: float(coordinate), f.read().strip().split()))
return bboxes
def _int64_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(int64_list=tf.train.Int64List(value=v))
def _floats_feature(v):
if not isinstance(v, list):
v = [v]
return tf.train.Feature(float_list=tf.train.FloatList(value=v))
def _bytes_feature(v):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[v]))
def _convert_to_example(filename, bboxes, image_buffer, height, width):
# Build an Example proto for an example
example = tf.train.Example(features=tf.train.Features(feature={
'image/filename': _bytes_feature(filename),
'image/encoded': _bytes_feature(image_buffer),
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'bboxes': _floats_feature(bboxes)}))
return example
def main():
train_file = os.path.join(dataset, 'train-cgd')
validation_file = os.path.join(dataset, 'validation-cgd')
print(train_file)
print(validation_file)
writer_train = tf.python_io.TFRecordWriter(train_file)
writer_validation = tf.python_io.TFRecordWriter(validation_file)
# Creating a list with all the image paths
folders = range(0,11)
folders = ['0'+str(i) if i<10 else '10' for i in folders]
print(len(folders))
filenames = []
for i in folders:
for name in glob.glob(os.path.join(dataset, i, 'pcd'+i+'*r.png')):
filenames.append(name)
# Shuffle the list of image paths
np.random.shuffle(filenames)
count = 0
valid_img = 0
train_img = 0
coder = ImageCoder()
# print(len(filenames))
for filename in filenames:
bbox = filename[:-5]+'cpos.txt'
bboxes = _process_bboxes(bbox)
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, bboxes, image_buffer, height, width)
#Split the dataset in 80% for training and 20% for validation
# if count % 5 == 0:
# writer_validation.write(example.SerializeToString())
# valid_img +=1
# else:
writer_train.write(example.SerializeToString())
train_img +=1
count +=1
print('Done converting %d images in TFRecords with %d train images and %d validation images' % (count, train_img, valid_img))
writer_train.close()
writer_validation.close()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3297955
|
<gh_stars>1-10
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import soundfile as sf
import torch
import torchaudio.compliance.kaldi as kaldi
class LogMelFeatureReader:
"""
Wrapper class to run inference on HuBERT model.
Helps extract features for a given audio file.
"""
def __init__(self, *args, **kwargs):
self.num_mel_bins = kwargs.get("num_mel_bins", 80)
self.frame_length = kwargs.get("frame_length", 25.0)
def get_feats(self, file_path):
wav, sr = sf.read(file_path)
feats = torch.from_numpy(wav).float()
feats = kaldi.fbank(
feats.unsqueeze(0),
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
sample_frequency=sr,
)
return feats
|
StarcoderdataPython
|
158901
|
"""In memory storage classes."""
from __future__ import absolute_import
import logging
import threading
from collections import Counter
from six.moves import queue
from splitio.models.segments import Segment
from splitio.storage import SplitStorage, SegmentStorage, ImpressionStorage, EventStorage, \
TelemetryStorage
MAX_SIZE_BYTES = 5 * 1024 * 1024
class InMemorySplitStorage(SplitStorage):
"""InMemory implementation of a split storage."""
def __init__(self):
"""Constructor."""
self._logger = logging.getLogger(self.__class__.__name__)
self._lock = threading.RLock()
self._splits = {}
self._change_number = -1
self._traffic_types = Counter()
def get(self, split_name):
"""
Retrieve a split.
:param split_name: Name of the feature to fetch.
:type split_name: str
:rtype: splitio.models.splits.Split
"""
with self._lock:
return self._splits.get(split_name)
def fetch_many(self, split_names):
"""
Retrieve splits.
:param split_names: Names of the features to fetch.
:type split_name: list(str)
:return: A dict with split objects parsed from queue.
:rtype: dict(split_name, splitio.models.splits.Split)
"""
return {split_name: self.get(split_name) for split_name in split_names}
def put(self, split):
"""
Store a split.
:param split: Split object.
:type split: splitio.models.split.Split
"""
with self._lock:
if split.name in self._splits:
self._decrease_traffic_type_count(self._splits[split.name].traffic_type_name)
self._splits[split.name] = split
self._increase_traffic_type_count(split.traffic_type_name)
def remove(self, split_name):
"""
Remove a split from storage.
:param split_name: Name of the feature to remove.
:type split_name: str
:return: True if the split was found and removed. False otherwise.
:rtype: bool
"""
with self._lock:
split = self._splits.get(split_name)
if not split:
self._logger.warning("Tried to delete nonexistant split %s. Skipping", split_name)
return False
self._splits.pop(split_name)
self._decrease_traffic_type_count(split.traffic_type_name)
return True
def get_change_number(self):
"""
Retrieve latest split change number.
:rtype: int
"""
with self._lock:
return self._change_number
def set_change_number(self, new_change_number):
"""
Set the latest change number.
:param new_change_number: New change number.
:type new_change_number: int
"""
with self._lock:
self._change_number = new_change_number
def get_split_names(self):
"""
Retrieve a list of all split names.
:return: List of split names.
:rtype: list(str)
"""
with self._lock:
return list(self._splits.keys())
def get_all_splits(self):
"""
Return all the splits.
:return: List of all the splits.
:rtype: list
"""
with self._lock:
return list(self._splits.values())
def is_valid_traffic_type(self, traffic_type_name):
"""
Return whether the traffic type exists in at least one split in cache.
:param traffic_type_name: Traffic type to validate.
:type traffic_type_name: str
:return: True if the traffic type is valid. False otherwise.
:rtype: bool
"""
with self._lock:
return traffic_type_name in self._traffic_types
def _increase_traffic_type_count(self, traffic_type_name):
"""
Increase by one the count for a specific traffic type name.
:param traffic_type_name: Traffic type to increase the count.
:type traffic_type_name: str
"""
self._traffic_types.update([traffic_type_name])
def _decrease_traffic_type_count(self, traffic_type_name):
"""
Decrease by one the count for a specific traffic type name.
:param traffic_type_name: Traffic type to decrease the count.
:type traffic_type_name: str
"""
self._traffic_types.subtract([traffic_type_name])
self._traffic_types += Counter()
class InMemorySegmentStorage(SegmentStorage):
"""In-memory implementation of a segment storage."""
def __init__(self):
"""Constructor."""
self._logger = logging.getLogger(self.__class__.__name__)
self._segments = {}
self._change_numbers = {}
self._lock = threading.RLock()
def get(self, segment_name):
"""
Retrieve a segment.
:param segment_name: Name of the segment to fetch.
:type segment_name: str
:rtype: str
"""
with self._lock:
fetched = self._segments.get(segment_name)
if fetched is None:
self._logger.warning(
"Tried to retrieve nonexistant segment %s. Skipping",
segment_name
)
return fetched
def put(self, segment):
"""
Store a segment.
:param segment: Segment to store.
:type segment: splitio.models.segment.Segment
"""
with self._lock:
self._segments[segment.name] = segment
def update(self, segment_name, to_add, to_remove, change_number=None):
"""
Update a split. Create it if it doesn't exist.
:param segment_name: Name of the segment to update.
:type segment_name: str
:param to_add: Set of members to add to the segment.
:type to_add: set
:param to_remove: List of members to remove from the segment.
:type to_remove: Set
"""
with self._lock:
if segment_name not in self._segments:
self._segments[segment_name] = Segment(segment_name, to_add, change_number)
return
self._segments[segment_name].update(to_add, to_remove)
if change_number is not None:
self._segments[segment_name].change_number = change_number
def get_change_number(self, segment_name):
"""
Retrieve latest change number for a segment.
:param segment_name: Name of the segment.
:type segment_name: str
:rtype: int
"""
with self._lock:
if segment_name not in self._segments:
return None
return self._segments[segment_name].change_number
def set_change_number(self, segment_name, new_change_number):
"""
Set the latest change number.
:param segment_name: Name of the segment.
:type segment_name: str
:param new_change_number: New change number.
:type new_change_number: int
"""
with self._lock:
if segment_name not in self._segments:
return
self._segments[segment_name].change_number = new_change_number
def segment_contains(self, segment_name, key):
"""
Check whether a specific key belongs to a segment in storage.
:param segment_name: Name of the segment to search in.
:type segment_name: str
:param key: Key to search for.
:type key: str
:return: True if the segment contains the key. False otherwise.
:rtype: bool
"""
with self._lock:
if segment_name not in self._segments:
self._logger.warning(
"Tried to query members for nonexistant segment %s. Returning False",
segment_name
)
return False
return self._segments[segment_name].contains(key)
class InMemoryImpressionStorage(ImpressionStorage):
"""In memory implementation of an impressions storage."""
def __init__(self, queue_size):
"""
Construct an instance.
:param eventsQueueSize: How many events to queue before forcing a submission
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._impressions = queue.Queue(maxsize=queue_size)
self._lock = threading.Lock()
self._queue_full_hook = None
def set_queue_full_hook(self, hook):
"""
Set a hook to be called when the queue is full.
:param h: Hook to be called when the queue is full
"""
if callable(hook):
self._queue_full_hook = hook
def put(self, impressions):
"""
Put one or more impressions in storage.
:param impressions: List of one or more impressions to store.
:type impressions: list
"""
try:
with self._lock:
for impression in impressions:
self._impressions.put(impression, False)
return True
except queue.Full:
if self._queue_full_hook is not None and callable(self._queue_full_hook):
self._queue_full_hook()
self._logger.warning(
'Event queue is full, failing to add more events. \n'
'Consider increasing parameter `eventQueueSize` in configuration'
)
return False
def pop_many(self, count):
"""
Pop the oldest N impressions from storage.
:param count: Number of impressions to pop.
:type count: int
"""
impressions = []
with self._lock:
while not self._impressions.empty() and count > 0:
impressions.append(self._impressions.get(False))
count -= 1
return impressions
class InMemoryEventStorage(EventStorage):
"""
In memory storage for events.
Supports adding and popping events.
"""
def __init__(self, eventsQueueSize):
"""
Construct an instance.
:param eventsQueueSize: How many events to queue before forcing a submission
"""
self._logger = logging.getLogger(self.__class__.__name__)
self._lock = threading.Lock()
self._events = queue.Queue(maxsize=eventsQueueSize)
self._queue_full_hook = None
self._size = 0
def set_queue_full_hook(self, hook):
"""
Set a hook to be called when the queue is full.
:param h: Hook to be called when the queue is full
"""
if callable(hook):
self._queue_full_hook = hook
def put(self, events):
"""
Add an event to storage.
:param event: Event to be added in the storage
"""
try:
with self._lock:
for event in events:
self._size += event.size
if self._size >= MAX_SIZE_BYTES:
self._queue_full_hook()
return False
self._events.put(event.event, False)
return True
except queue.Full:
if self._queue_full_hook is not None and callable(self._queue_full_hook):
self._queue_full_hook()
self._logger.warning(
'Events queue is full, failing to add more events. \n'
'Consider increasing parameter `eventsQueueSize` in configuration'
)
return False
def pop_many(self, count):
"""
Pop multiple items from the storage.
:param count: number of items to be retrieved and removed from the queue.
"""
events = []
with self._lock:
while not self._events.empty() and count > 0:
events.append(self._events.get(False))
count -= 1
self._size = 0
return events
class InMemoryTelemetryStorage(TelemetryStorage):
"""In-Memory implementation of telemetry storage interface."""
def __init__(self):
"""Constructor."""
self._logger = logging.getLogger(self.__class__.__name__)
self._latencies = {}
self._gauges = {}
self._counters = {}
self._latencies_lock = threading.Lock()
self._gauges_lock = threading.Lock()
self._counters_lock = threading.Lock()
def inc_latency(self, name, bucket):
"""
Add a latency.
:param name: Name of the latency metric.
:type name: str
:param value: Value of the latency metric.
:tyoe value: int
"""
if not 0 <= bucket <= 21:
self._logger.warning('Incorect bucket "%d" for latency "%s". Ignoring.', bucket, name)
return
with self._latencies_lock:
latencies = self._latencies.get(name, [0] * 22)
latencies[bucket] += 1
self._latencies[name] = latencies
def inc_counter(self, name):
"""
Increment a counter.
:param name: Name of the counter metric.
:type name: str
"""
with self._counters_lock:
counter = self._counters.get(name, 0)
counter += 1
self._counters[name] = counter
def put_gauge(self, name, value):
"""
Add a gauge metric.
:param name: Name of the gauge metric.
:type name: str
:param value: Value of the gauge metric.
:type value: int
"""
with self._gauges_lock:
self._gauges[name] = value
def pop_counters(self):
"""
Get all the counters.
:rtype: list
"""
with self._counters_lock:
try:
return self._counters
finally:
self._counters = {}
def pop_gauges(self):
"""
Get all the gauges.
:rtype: list
"""
with self._gauges_lock:
try:
return self._gauges
finally:
self._gauges = {}
def pop_latencies(self):
"""
Get all latencies.
:rtype: list
"""
with self._latencies_lock:
try:
return self._latencies
finally:
self._latencies = {}
|
StarcoderdataPython
|
174358
|
# python3
# Copyright 2018 DeepMind Technologies Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Episode adders.
This implements full episode adders, potentially with padding.
"""
from typing import Optional
from acme import types
from acme.adders.reverb import base
from acme.adders.reverb import utils
import dm_env
import reverb
class EpisodeAdder(base.ReverbAdder):
"""Adder which adds entire episodes as trajectories."""
def __init__(
self,
client: reverb.Client,
max_sequence_length: int,
delta_encoded: bool = False,
chunk_length: Optional[int] = None,
priority_fns: Optional[base.PriorityFnMapping] = None,
):
super().__init__(
client=client,
buffer_size=max_sequence_length - 1,
max_sequence_length=max_sequence_length,
delta_encoded=delta_encoded,
chunk_length=chunk_length,
priority_fns=priority_fns,
)
def add(
self,
action: types.NestedArray,
next_timestep: dm_env.TimeStep,
extras: types.NestedArray = (),
):
if len(self._buffer) == self._buffer.maxlen:
# If the buffer is full that means we've buffered max_sequence_length-1
# steps, one dangling observation, and are trying to add one more (which
# will overflow the buffer).
raise ValueError(
'The number of observations within the same episode exceeds '
'max_sequence_length')
super().add(action, next_timestep, extras)
def _write(self):
# Append the previous step.
self._writer.append(self._buffer[-1])
def _write_last(self):
# Append a zero-filled final step.
final_step = utils.final_step_like(self._buffer[0], self._next_observation)
self._writer.append(final_step)
# The length of the sequence we will be adding is the size of the buffer
# plus one due to the final step.
steps = list(self._buffer) + [final_step]
num_steps = len(steps)
# Calculate the priority for this episode.
table_priorities = utils.calculate_priorities(self._priority_fns, steps)
# Create a prioritized item for each table.
for table_name, priority in table_priorities.items():
self._writer.create_item(table_name, num_steps, priority)
|
StarcoderdataPython
|
125212
|
<filename>src/dataset/__init__.py
from .backend import Backend
from .dataset_ycb import YCB
from .dataset_generic import GenericDataset
__all__ = (
'GenericDataset',
'YCB',
'Backend'
)
|
StarcoderdataPython
|
108102
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: Inverted_index_cn.py
Description : 爬取新浪财经网股票公司每日公告
Author : charl
date: 2018/9/5
-------------------------------------------------
Change Activity: 2018/9/5:
-------------------------------------------------
"""
# 爬取新浪财经网股票公司每日公告
# 提供日期即可 eg: 2017-02-21
import os
import math
import time
import datetime
import requests
import threading
from lxml import etree
# 爬取一条公告并保存
def spiderOnePiece(iurl,headers,datetime,filename):
# 去除文件名中的非法字符
invaild=['*','\\','/',':','\"','<','>','|','?']
for c in invaild:
if c in filename:
filename=filename.replace(c,'')
response=requests.get(iurl,headers=headers).content
page=etree.HTML(response)
content=page.xpath('//*[@id="content"]/pre')
if len(content)==0:
return
content=content[0].text
with open(datetime+os.sep+filename,'w') as f:
f.write(content.encode('utf-8'))
# 爬取一页
def spiderOnePage(url,headers,datetime):
website='http://vip.stock.finance.sina.com.cn'
response=requests.get(url,headers=headers).content
page=etree.HTML(response)
trList=page.xpath(r'//*[@id="wrap"]/div[@class="Container"]/table/tbody/tr')
print(len(trList))
if len(trList)==1: # 爬取结束 该行(对不起没有相关记录)
return 0
if not os.path.exists(datetime): # 创建日期文件夹
os.mkdir(datetime)
for item in trList:
aUrl=item.xpath('th/a[1]')
title=aUrl[0].text # 公告标题
href=aUrl[0].attrib['href'] # 公告uri
href=website+href # 公告url
atype=item.xpath('td[1]')[0].text # 公告类型
spiderOnePiece(href,headers,datetime,title+'_'+atype+'.txt')
return 1
# 爬取一天
def spiderOneDay(url,headers,datetime,log_path='log'):
url=url.replace('#datetime#',datetime) # 填充日期
flag=1 # 爬取成功标志
index=1 # 起始页
f=open(log_path+os.sep+datetime+'.txt','a')
while flag:
t_url=url+str(index)
try:
flag=spiderOnePage(t_url,headers,datetime)
except Exception as e:
print('err:',e)
flag=0
finally:
if flag:
print('%s page_%d load success,continue.' %(datetime,index))
f.write('%s_page_%d load success.\n' %(datetime,index))
f.flush()
else:
print('%s page_%d load fail,end.' %(datetime,index))
f.write('%s_page_%d load failed.\n' %(datetime,index))
f.flush()
index+=1
f.close()
# 爬取一组天股票公司的数据
def spiderOneGroupDays(url,headers,date_group,log_path):
for idate in date_group:
try:
spiderOneDay(url,headers,idate,log_path)
print('%s has load success.over.' %idate)
except Exception as e:
print('err:',e)
continue
# 获取指定起始日期[包含]--结束日期[包含]之间的日期
def getBetweenDay(begin_date,end_date):
date_list=[]
begin_date=datetime.datetime.strptime(begin_date,'%Y-%m-%d')
# 现在的日期
now_date=datetime.datetime.strptime(time.strftime('%Y-%m-%d',time.localtime(time.time())),'%Y-%m-%d')
end_date=datetime.datetime.strptime(end_date,'%Y-%m-%d')
# 如果给出的结束日期大于现在的日期 则将今天的日期作为结束日期
if end_date>now_date:
end_date=now_date
while begin_date<=end_date:
date_str=begin_date.strftime('%Y-%m-%d')
date_list.append(date_str)
begin_date+=datetime.timedelta(days=1)
return date_list
# 将date_list 平均分成threadNum组 最后一组可能较少
def split_date_list(date_list,threadNum):
# length=(len(date_list)/threadNum if len(date_list)%threadNum==0 else len(date_list)/threadNum+1)
length=int(math.ceil(len(date_list)*1.0/threadNum))
return [date_list[m:m+length] for m in range(0,len(date_list),length)]
def main():
headers = {
"Accept-Language": "zh-CN,zh;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Host": "vip.stock.finance.sina.com.cn",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Upgrade-Insecure-Requests": "1",
"Connection": "keep-alive",
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36"
}
url='http://vip.stock.finance.sina.com.cn/corp/view/vCB_BulletinGather.php?gg_date=#datetime#&page='
log_path='log'
if not os.path.exists(log_path):
os.mkdir(log_path)
# datetime='2017-02-19'
# spiderOneDay(url,headers,datetime,log_path)
begin_date='2017-01-01'
end_date='2017-01-31'
# begin_date[包含]-->end_date[包含] 之间的所有date
date_list=getBetweenDay(begin_date,end_date)
print('%s-%s:%d days.' %(begin_date,end_date,len(date_list)))
cut_date_list=split_date_list(date_list,4)
print(cut_date_list)
threads=[]
for dgroup in cut_date_list:
t=threading.Thread(target=spiderOneGroupDays,args=(url,headers,dgroup,log_path,))
threads.append(t)
# 开始线程
for t in threads:
t.start()
# 等待所有线程结束 阻塞主线程
for t in threads:
t.join()
print('all load success...')
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
95069
|
<filename>vnpy/gateway/comstar/comstar_gateway.py
from datetime import datetime
from typing import Optional, Sequence, Dict
from enum import Enum
import pytz
from vnpy.event import EventEngine
from vnpy.trader.gateway import BaseGateway
from vnpy.trader.constant import (
Exchange,
Product,
Offset,
OrderType,
Direction,
Status
)
from vnpy.trader.object import (
SubscribeRequest,
CancelRequest,
OrderRequest,
ContractData,
TickData,
OrderData,
TradeData,
LogData
)
from .comstar_api import TdApi
VN_ENUMS = {
"Exchange": Exchange,
"Product": Product,
"Offset": Offset,
"OrderType": OrderType,
"Direction": Direction,
"Status": Status
}
CHINA_TZ = pytz.timezone("Asia/Shanghai")
class ComstarGateway(BaseGateway):
"""
VN Trader Gateway for Comstar service.
"""
default_setting = {
"交易服务器": "",
"用户名": "",
"密码": "",
"Key": ""
}
exchanges = [Exchange.CFETS]
def __init__(self, event_engine: EventEngine):
"""Constructor"""
super().__init__(event_engine, "COMSTAR")
self.api = UserApi(self)
def connect(self, setting: dict):
""""""
td_address = setting["交易服务器"]
username = setting["用户名"]
password = setting["密码"]
key = setting["Key"]
self.api.connect(username, password, key, td_address)
def subscribe(self, req: SubscribeRequest):
""""""
# Symbol format: 180406_T0 or 180406_T1
symbol, settle_type, *_ = req.symbol.split("_") + [""]
if settle_type not in {"T0", "T1"}:
self.write_log("请输入清算速度T0或T1")
return ""
data = vn_encode(req)
data["symbol"] = symbol
data["settle_type"] = settle_type
self.api.subscribe(data, self.gateway_name)
def send_order(self, req: OrderRequest):
""""""
# Offset is not supported for Comstar gateawy
req.offset = Offset.NONE
if req.type not in {OrderType.LIMIT, OrderType.FAK}:
self.write_log("仅支持限价单和FAK单")
return ""
symbol, settle_type, *_ = req.symbol.split("_") + [""]
if settle_type not in {"T0", "T1"}:
self.write_log("请输入清算速度T0或T1")
return ""
data = vn_encode(req)
data["symbol"] = symbol
data["settle_type"] = settle_type
data["strategy_name"] = data.pop("reference")
order_id = self.api.send_order(data, self.gateway_name)
# convert to vt_orderid
return f"{self.gateway_name}.{order_id}"
def cancel_order(self, req: CancelRequest):
""""""
data = vn_encode(req)
symbol, settle_type, *_ = req.symbol.split("_") + [""]
data["symbol"] = symbol
data["settle_type"] = settle_type
self.api.cancel_order(data, self.gateway_name)
def query_account(self):
""""""
pass
def query_position(self):
""""""
pass
def query_all(self):
""""""
self.api.get_all_contracts()
self.api.get_all_orders()
self.api.get_all_trades()
def close(self):
""""""
self.api.close()
class UserApi(TdApi):
"""
Implements Comstar API.
"""
def __init__(self, gateway: ComstarGateway):
"""Constructor"""
super().__init__()
self.gateway = gateway
self.gateway_name = gateway.gateway_name
self.trades: Dict[str, TradeData] = {}
self.orders: Dict[str, OrderData] = {}
def on_tick(self, tick: dict):
""""""
data = parse_tick(tick)
self.gateway.on_tick(data)
def on_order(self, order: dict):
""""""
data = parse_order(order)
# Filter duplicated order data push after reconnect
last_order = self.orders.get(data.vt_orderid, None)
if (
last_order
and data.traded == last_order.traded
and data.status == last_order.status
):
return
self.orders[data.vt_orderid] = data
self.gateway.on_order(data)
def on_trade(self, trade: dict):
""""""
data = parse_trade(trade)
# Filter duplicated trade data push after reconnect
if data.vt_tradeid in self.trades:
return
self.trades[data.vt_tradeid] = data
self.gateway.on_trade(data)
def on_log(self, log: dict):
data = parse_log(log)
self.gateway.on_log(data)
def on_login(self, data: dict):
""""""
if data["status"]:
self.gateway.query_all()
self.gateway.write_log("服务器登录成功")
else:
self.gateway.write_log("服务器登录失败")
def on_disconnected(self, reason: str):
""""""
self.gateway.write_log(reason)
def on_all_contracts(self, contracts: Sequence[dict]):
""""""
for data in contracts:
for settle_type in ("T0", "T1"):
contract = parse_contract(data, settle_type)
contract.gateway_name = self.gateway_name
self.gateway.on_contract(contract)
self.gateway.write_log("合约信息查询成功")
def on_all_orders(self, orders: Sequence[dict]):
""""""
for data in orders:
order = parse_order(data)
order.gateway_name = self.gateway_name
self.gateway.on_order(order)
self.gateway.write_log("委托信息查询成功")
def on_all_trades(self, trades: Sequence[dict]):
""""""
for data in trades:
trade = parse_trade(data)
trade.gateway_name = self.gateway_name
self.gateway.on_trade(trade)
self.gateway.write_log("成交信息查询成功")
def on_auth(self, status: bool):
""""""
if status:
self.gateway.write_log("服务器授权验证成功")
else:
self.gateway.write_log("服务器授权验证失败")
def parse_tick(data: dict) -> TickData:
"""
Convert json received from API to TickData object.
XBond Depth Data Notice:
1. Bid/Ask1 are public best price.
2. Bid/Ask2-6 are private price data.
"""
tick = TickData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
datetime=parse_datetime(data["datetime"]),
name=data["name"],
volume=float(data["volume"]),
last_price=float(data["last_price"]),
open_price=float(data["open_price"]),
high_price=float(data["high_price"]),
low_price=float(data["low_price"]),
pre_close=float(data["pre_close"]),
bid_price_1=float(data["bid_price_2"]),
bid_price_2=float(data["bid_price_3"]),
bid_price_3=float(data["bid_price_4"]),
bid_price_4=float(data["bid_price_5"]),
bid_price_5=float(data["bid_price_6"]),
ask_price_1=float(data["ask_price_2"]),
ask_price_2=float(data["ask_price_3"]),
ask_price_3=float(data["ask_price_4"]),
ask_price_4=float(data["ask_price_5"]),
ask_price_5=float(data["ask_price_6"]),
bid_volume_1=float(data["bid_volume_2"]),
bid_volume_2=float(data["bid_volume_3"]),
bid_volume_3=float(data["bid_volume_4"]),
bid_volume_4=float(data["bid_volume_5"]),
bid_volume_5=float(data["bid_volume_6"]),
ask_volume_1=float(data["ask_volume_2"]),
ask_volume_2=float(data["ask_volume_3"]),
ask_volume_3=float(data["ask_volume_4"]),
ask_volume_4=float(data["ask_volume_5"]),
ask_volume_5=float(data["ask_volume_6"]),
gateway_name=data["gateway_name"]
)
tick.public_bid_price = float(data["bid_price_1"])
tick.public_ask_price = float(data["ask_price_1"])
tick.public_bid_volume = float(data["bid_volume_1"])
tick.public_ask_volume = float(data["ask_volume_1"])
return tick
def parse_order(data: dict) -> OrderData:
"""
Convert json received from API to OrderData object.
"""
order = OrderData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
orderid=data["orderid"],
type=enum_decode(data["type"]),
direction=enum_decode(data["direction"]),
offset=Offset.NONE,
price=float(data["price"]),
volume=float(data["volume"]),
traded=float(data["traded"]),
status=enum_decode(data["status"]),
datetime=generate_datetime(data["time"]),
gateway_name=data["gateway_name"]
)
return order
def parse_trade(data: dict) -> TradeData:
"""
Convert json received from API to TradeData object.
"""
trade = TradeData(
symbol=f"{data['symbol']}_{data['settle_type']}",
exchange=enum_decode(data["exchange"]),
orderid=data["orderid"],
tradeid=data["tradeid"],
direction=enum_decode(data["direction"]),
offset=Offset.NONE,
price=float(data["price"]),
volume=float(data["volume"]),
datetime=generate_datetime(data["time"]),
gateway_name=data["gateway_name"]
)
return trade
def parse_contract(data: dict, settle_type: str) -> ContractData:
"""
Convert json received from API to ContractData object.
"""
contract = ContractData(
symbol=f"{data['symbol']}_{settle_type}",
exchange=enum_decode(data["exchange"]),
name=data["name"],
product=enum_decode(data["product"]),
size=int(data["size"]),
pricetick=float(data["pricetick"]),
min_volume=float(data["min_volume"]),
gateway_name=data["gateway_name"]
)
return contract
def parse_log(data: dict) -> LogData:
"""
从api收到的data里解析出LogData
"""
log = LogData(
msg=data["msg"],
level=data["level"],
gateway_name=data["gateway_name"]
)
log.time = parse_datetime(data["time"])
return log
def parse_datetime(s: str) -> datetime:
if "." in s:
dt = datetime.strptime(s, "%Y%m%d %H:%M:%S.%f")
elif len(s) > 0:
dt = datetime.strptime(s, "%Y%m%d %H:%M:%S")
else:
dt = datetime.now()
dt = CHINA_TZ.localize(dt)
return dt
def enum_decode(s: str) -> Optional[Enum]:
"""
Convert string into vn.py constant enum.
"""
if "." in s:
name, member = s.split(".")
return getattr(VN_ENUMS[name], member)
else:
return None
def vn_encode(obj: object) -> str or dict:
"""
Convert vn.py object into json format.
"""
if type(obj) in VN_ENUMS.values():
return str(obj)
else:
s = {}
for (k, v) in obj.__dict__.items():
if type(v) in VN_ENUMS.values():
s[k] = vn_encode(v)
else:
s[k] = str(v)
return s
def generate_datetime(time: str) -> datetime:
""""""
today = datetime.now().strftime("%Y%m%d")
timestamp = f"{today} {time}"
dt = parse_datetime(timestamp)
return dt
|
StarcoderdataPython
|
23958
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Jemalloc(AutotoolsPackage):
"""jemalloc is a general purpose malloc(3) implementation that emphasizes
fragmentation avoidance and scalable concurrency support."""
homepage = "http://jemalloc.net/"
url = "https://github.com/jemalloc/jemalloc/releases/download/4.0.4/jemalloc-4.0.4.tar.bz2"
version('5.2.1', sha256='34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6')
version('5.2.0', sha256='74be9f44a60d2a99398e706baa921e4efde82bf8fd16e5c0643c375c5851e3b4')
version('4.5.0', sha256='9409d85664b4f135b77518b0b118c549009dc10f6cba14557d170476611f6780')
version('4.4.0', sha256='a7aea63e9718d2f1adf81d87e3df3cb1b58deb86fc77bad5d702c4c59687b033')
version('4.3.1', sha256='f7bb183ad8056941791e0f075b802e8ff10bd6e2d904e682f87c8f6a510c278b')
version('4.2.1', sha256='5630650d5c1caab95d2f0898de4fe5ab8519dc680b04963b38bb425ef6a42d57')
version('4.2.0', sha256='b216ddaeb901697fe38bd30ea02d7505a4b60e8979092009f95cfda860d46acb')
version('4.1.0', sha256='fad06d714f72adb4265783bc169c6d98eeb032d57ba02d87d1dcb4a2d933ec8e')
version('4.0.4', sha256='3fda8d8d7fcd041aa0bebbecd45c46b28873cf37bd36c56bf44961b36d0f42d0')
variant('stats', default=False, description='Enable heap statistics')
variant('prof', default=False, description='Enable heap profiling')
variant(
'jemalloc_prefix', default='none',
description='Prefix to prepend to all public APIs',
values=None,
multi=False
)
def configure_args(self):
spec = self.spec
args = []
if '+stats' in spec:
args.append('--enable-stats')
if '+prof' in spec:
args.append('--enable-prof')
je_prefix = spec.variants['jemalloc_prefix'].value
if je_prefix != 'none':
args.append('--with-jemalloc-prefix={0}'.format(je_prefix))
return args
|
StarcoderdataPython
|
50918
|
<reponame>yasuotakei/torrent_parser<filename>tests/test_create.py
from __future__ import unicode_literals
import collections
import hashlib
import io
import os.path
import unittest
from torrent_parser import TorrentFileParser, TorrentFileCreator
class TestCreate(unittest.TestCase):
TEST_FILES_DIR = os.path.join(os.path.dirname(__file__), 'test_files')
REAL_FILE = os.path.join(TEST_FILES_DIR, 'real.torrent')
def test_simple_create(self):
data = collections.OrderedDict()
data['a'] = 1
data['b'] = 2
self.assertEqual(TorrentFileCreator(data).encode(), b'd1:ai1e1:bi2ee')
def test_same_output_if_no_edit(self):
with open(self.REAL_FILE, 'rb') as fp:
in_data = fp.read()
data = TorrentFileParser(io.BytesIO(in_data), True).parse()
out_data = TorrentFileCreator(data).encode()
m1 = hashlib.md5()
m1.update(in_data)
m2 = hashlib.md5()
m2.update(out_data)
self.assertEqual(m1.digest(), m2.digest())
|
StarcoderdataPython
|
3397423
|
# -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2019 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
User-specified notes for Circuit, Part, Pin, Net, Bus, Interface objects.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import str
from future import standard_library
standard_library.install_aliases()
class Note(list):
def __init__(self, *notes):
super(Note, self).__init__()
self.__iadd__(*notes)
def __iadd__(self, *notes):
"""Add new notes."""
for note in notes:
if isinstance(note, (tuple, list, set)):
self.extend(note)
else:
self.append(note)
return self
def __str__(self):
return "\n".join(self)
|
StarcoderdataPython
|
171964
|
import numpy as np
from .Layer import Layer
class Sign():
"""Sign Layer
f(x) = 1 for x > 0
f(x) = 0 for x = 0
f(x) = -1 for x < 0
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "sign")
# self.input_shape = get_tensor_shape(graph, node.output[0])
# self.output_shape = input_shape
# self.name = "sign"
class Sigmoid(Layer):
"""Sigmoid activation
f(x) = 1 / (1 + exp(-x))
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "sigmoid")
class Relu(Layer):
"""Rectified Linear Unit
f(x) = max(0, x)
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "relu")
class LeakyRelu(Layer):
"""Leaky Rectified Linear Unit
f(x) = alpha * x for x < 0
f(x) = x for x >= 0
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
alpha: Coefficient of leakage
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "leakyrelu")
class Step(Layer):
"""Step Layer
f(x) = high for x > threshold
f(x) = high for x = threshold and threshold_is_high
f(x) = low for x = threshold and not threshold_is_high
f(x) = low for x < threshold
This is the Activation Layer in a binary neural net as it has only two distinct outputs (in comparison
to the three outputs of Sign Layers). There is no official support for Step Layers in ONNX.
To generate a net with Step Layers, use the following ONNX structure:
Greater + Where or
Less + Where
The code generator will convert this into a Step Layer if the binary argument is passed.
Example in PyTorch:
x = torch.where(x > 0, torch.tensor([1.0]), torch.tensor([-1.0]))
When a BatchNormalization Layer follows directly afterwards, the scales and biases are embedded as thresholds
of the Step Layer. The following holds since x is an integer:
x * s - b > 0
x > int(b / s)
The output is directly packed into ints of size binary_word_size. This is done by setting each bit individually.
The following sets the c'th leftmost bit to 1 or 0:
output |= (1U << ((binary_word_size-1) - c % binary_word_size))
output &= ~(1U << ((binary_word_size-1) - c % binary_word_size))
Attributes:
input_shape = [N, C, H, W]: The shape of the input tensor
output_shape = [N, C, H, W]: The shape of the resulting output tensor, must match the input shape
threshold: The threshold, can be scalar or numpy array
low: Value selected at indices where x < threshold
high: Value selected at indices where x > threshold
threshold_is_high: Whether high value is selected where x = threshold
"""
def __init__(self, input_shape, threshold, low, high):
super().__init__(input_shape, input_shape, "step")
self.threshold = threshold
self.low = low
self.high = high
self.threshold_is_high = True
# class Softmax(Layer):
# """Softmax (normalized exponential)
# To combat numerical issues when doing softmax computation, a common trick is used that shifts
# the input vector by subtracting the maximum element in it from all elements.
# z = x - max(x)
# numerator = np.exp(z)
# denominator = np.sum(numerator)
# softmax = numerator/denominator
# Attributes:
# output_shape = [N, D]: The dimension of the output tensor
# """
# def __init__(self, output_shape):
# self.input_shape = self.output_shape = output_shape
# def render(self, backend, **kwargs):
# code_init = ''
# code_alloc = super(Softmax, self).render('alloc', output_shape=self.output_shape, backend=backend, **kwargs)
# code_predict = super(Softmax, self).render('softmax', output_size=self.output_shape[1], backend=backend,**kwargs)
# return code_init, code_alloc, code_predict
# def output_type(self, input_type, backend):
# return 'float'
class LogSoftmax(Layer):
"""Log of Softmax
To combat numerical issues when doing softmax computation, a common trick is used that shifts
the input vector by subtracting the maximum element in it from all elements.
z = x - max(x)
numerator = np.exp(z)
denominator = np.sum(numerator)
softmax = numerator/denominator
logsoftmax = np.log(softmax)
Attributes:
output_shape = [N, D]: The dimension of the output tensor
"""
def __init__(self, graph, node, input_shape):
super().__init__(input_shape, input_shape, "logsoftmax")
|
StarcoderdataPython
|
1750797
|
# Write a function called "show_excitement" where the string
# "I am super excited for this course!" is returned exactly
# 5 times, where each sentence is separated by a single space.
# Return the string with "return".
# You can only have the string once in your code.
# Don't just copy/paste it 5 times into a single variable!
def show_excitement():
# Your code goes here!
here = ''
for i in range(5):
here += "I am super excited for this course! "
return here[:-1]
print show_excitement()s
|
StarcoderdataPython
|
3267929
|
# -*- coding: utf-8 -*-
import unittest
import pytest
from calvin.utilities.attribute_resolver import AttributeResolver
class AttributeResolverTester(unittest.TestCase):
def test_cpu_resources(self):
"""
Tests valid cpu resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"cpuAvail": "100"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'cpuAvail')
self.assertEqual(att_list[0][3], '0')
self.assertEqual(att_list[0][4], '25')
self.assertEqual(att_list[0][5], '50')
self.assertEqual(att_list[0][6], '75')
self.assertEqual(att_list[0][7], '100')
self.assertEqual(att.get_indexed_public()[0], '/node/resource/cpuAvail/0/25/50/75/100')
def test_cpu_invalid_value(self):
"""
Tests invalid cpu resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"cpuAvail": "1"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'cpuAvail')
self.assertEqual(att.get_indexed_public()[0], '/node/resource/cpuAvail')
def test_cpu_total(self):
"""
Tests valid CPU power in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"cpuTotal": "10000000"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'cpuTotal')
self.assertEqual(att_list[0][3], '1')
self.assertEqual(att_list[0][4], '1000')
self.assertEqual(att_list[0][5], '100000')
self.assertEqual(att_list[0][6], '1000000')
self.assertEqual(att_list[0][7], '10000000')
self.assertEqual(att.get_indexed_public()[0], '/node/attribute/cpuTotal/1/1000/100000/1000000/10000000')
def test_cpu_total_invalid_value(self):
"""
Tests invalid CPU power in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"cpuTotal": "2"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'cpuTotal')
self.assertEqual(att.get_indexed_public()[0], '/node/attribute/cpuTotal')
def test_mem_avail(self):
"""
Tests valid RAM resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"memAvail": "100"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'memAvail')
self.assertEqual(att_list[0][3], '0')
self.assertEqual(att_list[0][4], '25')
self.assertEqual(att_list[0][5], '50')
self.assertEqual(att_list[0][6], '75')
self.assertEqual(att_list[0][7], '100')
self.assertEqual(att.get_indexed_public()[0], '/node/resource/memAvail/0/25/50/75/100')
def test_cpu_affinity(self):
"""
Tests cpu affinity parameter in indexed_public field
"""
att = AttributeResolver({"indexed_public": {"cpuAffinity": "dedicated"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'cpuAffinity')
self.assertEqual(att_list[0][3], 'dedicated')
self.assertEqual(att.get_indexed_public()[0], '/node/attribute/cpuAffinity/dedicated')
def test_mem_avail_invalid_value(self):
"""
Tests invalid RAM resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"memAvail": "1"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'memAvail')
self.assertEqual(att.get_indexed_public()[0], '/node/resource/memAvail')
def test_mem_total(self):
"""
Tests valid RAM resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"memTotal": "10G"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'memTotal')
self.assertEqual(att_list[0][3], '1K')
self.assertEqual(att_list[0][4], '100K')
self.assertEqual(att_list[0][5], '1M')
self.assertEqual(att_list[0][6], '100M')
self.assertEqual(att_list[0][7], '1G')
self.assertEqual(att_list[0][8], '10G')
self.assertEqual(att.get_indexed_public()[0], '/node/attribute/memTotal/1K/100K/1M/100M/1G/10G')
def test_mem_total_invalid_value(self):
"""
Tests invalid RAM resources in the indexed_public field
"""
att = AttributeResolver({"indexed_public": {"memTotal": "10K"}})
att_list = att.get_indexed_public(as_list=True)
self.assertEqual(att_list[0][2], 'memTotal')
self.assertEqual(att.get_indexed_public()[0], '/node/attribute/memTotal')
|
StarcoderdataPython
|
3393643
|
import datetime
from math import ceil
from typing import List
from idact.core.config import ClusterConfig
from idact.core.retry import Retry
from idact.detail.allocation.allocation_parameters import AllocationParameters
from idact.detail.entry_point.fetch_port_info import fetch_port_info
from idact.detail.entry_point.remove_port_info import remove_port_info
from idact.detail.entry_point.sshd_port_info import SshdPortInfo
from idact.detail.helper.retry import retry_with_config
from idact.detail.helper.stage_info import stage_debug
from idact.detail.log.get_logger import get_logger
from idact.detail.nodes.node_impl import NodeImpl
def determine_ports_for_nodes(allocation_id: int,
hostnames: List[str],
config: ClusterConfig,
raise_on_missing: bool) -> List[int]:
"""Tries to determine sshd ports for each node.
Removes the file if no exception was raised.
:param allocation_id: Job id.
:param hostnames: List of hostnames.
:param config: Cluster config.
:param raise_on_missing: Raise an exception if port could not
be determined.
"""
log = get_logger(__name__)
with stage_debug(log, "Fetching port info for sshd."):
port_info_contents = fetch_port_info(allocation_id=allocation_id,
config=config)
port_info = SshdPortInfo(contents=port_info_contents)
with stage_debug(log, "Determining ports for each host."):
ports = [port_info.get_port(host=host,
raise_on_missing=raise_on_missing)
for host in hostnames]
with stage_debug(log, "Removing the file containing sshd port info."):
remove_port_info(allocation_id, config=config)
return ports
def finalize_allocation(allocation_id: int,
hostnames: List[str],
nodes: List[NodeImpl],
parameters: AllocationParameters,
allocated_until: datetime.datetime,
config: ClusterConfig):
"""Fetches node ports and makes them allocated.
:param allocation_id: Allocation id, e.g. Slurm job id.
:param hostnames: List of hostnames.
:param nodes: Nodes to update with information.
:param parameters: Allocation parameters.
:param allocated_until: Timestamp for job termination.
:param config: Cluster config.
"""
def try_to_determine_ports():
return determine_ports_for_nodes(allocation_id=allocation_id,
hostnames=hostnames,
config=config,
raise_on_missing=True)
try:
node_count = len(hostnames)
multiplier = int(ceil(node_count / 10))
ports = retry_with_config(try_to_determine_ports,
name=Retry.PORT_INFO,
config=config,
multiplier=multiplier)
except RuntimeError:
ports = determine_ports_for_nodes(allocation_id=allocation_id,
hostnames=hostnames,
config=config,
raise_on_missing=False)
for host, port, node in zip(hostnames, ports, nodes):
node.make_allocated(
host=host,
port=port,
cores=parameters.cores,
memory=parameters.memory_per_node,
allocated_until=allocated_until)
|
StarcoderdataPython
|
4837551
|
<reponame>chrisbubernak/recipe-scrapers<filename>recipe_scrapers/simplyrecipes.py
from ._abstract import AbstractScraper
from ._utils import get_minutes, normalize_string, get_yields
class SimplyRecipes(AbstractScraper):
@classmethod
def host(cls):
return "simplyrecipes.com"
def title(self):
return self.soup.find("h1").get_text()
def total_time(self):
return get_minutes(
self.soup.find("div", {"class": "total-time"})
.find("span", {"class": "meta-text__data"})
.text
)
def yields(self):
return get_yields(
normalize_string(
self.soup.find("div", {"class": "recipe-serving"})
.find("span", {"class": "meta-text__data"})
.text
)
)
def ingredients(self):
ingredients = self.soup.find("ul", {"class": "ingredient-list"}).findAll("li")
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
steps = self.soup.find(
"div", {"class": "structured-project__steps"}
).ol.findAll("li")
return "\n".join(
[
normalize_string(
step.div.text + ": " + "".join([p.text for p in step.findAll("p")])
)
for step in steps
]
)
|
StarcoderdataPython
|
3251297
|
#!/usr/bin/env python3
import unittest
from dataclasses import dataclass
from typing import List, Optional, Sequence
from common import open_fixture
def abs(n: int) -> int:
if n < 0:
return -n
return n
def in_plane(a: int, b: int, n: int) -> bool:
if n < a and n < b:
return False
if n > a and n > b:
return False
return True
@dataclass
class Point:
x: int
y: int
score: int = 0
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def manhattan_distance(self) -> int:
return abs(self.x) + abs(self.y)
@dataclass
class Line:
a: Point
b: Point
score: int = 0
def __str__(self) -> str:
return f"[{self.a}, {self.b}]"
@property
def is_horizontal(self) -> bool:
return self.a.y == self.b.y
@property
def is_vertical(self) -> bool:
return self.a.x == self.b.x
@property
def length(self) -> int:
return abs((self.a.x - self.b.x) + (self.a.y - self.b.y))
def intersection(self, other: "Line") -> Optional[Point]:
if self.is_horizontal:
if (
other.is_vertical
and in_plane(self.a.x, self.b.x, other.a.x)
and in_plane(other.a.y, other.b.y, self.a.y)
):
return Point(other.a.x, self.a.y)
else:
if (
other.is_horizontal
and in_plane(self.a.y, self.b.y, other.a.y)
and in_plane(other.a.x, other.b.x, self.a.x)
):
return Point(self.a.x, other.a.y)
return None
Path = List[Line]
def decode_path(s: str) -> Path:
tokens = s.split(",")
x, y, d = 0, 0, 0
path: Path = []
for token in tokens:
(o, n) = token[0], int(token[1:])
start = Point(x, y)
if o == "R":
x += n
elif o == "D":
y -= n
elif o == "L":
x -= n
elif o == "U":
y += n
else:
raise RuntimeError(f"Unknown token: {token}")
line = Line(start, Point(x, y), d)
path.append(line)
d += line.length
return path
def get_intersections(A: Path, B: Path) -> Sequence[Point]:
intersections: List[Point] = []
for line_a in A:
for line_b in B:
intersection = line_a.intersection(line_b)
if intersection:
# compute score
intersection.score = (
line_a.score
+ Line(line_a.a, intersection).length
+ line_b.score
+ Line(line_b.a, intersection).length
)
intersections.append(intersection)
return intersections
def get_closest_by_manhattan(A: Path, B: Path) -> int:
results = sorted(point.manhattan_distance() for point in get_intersections(A, B))
for result in results:
if result:
return result
return results[0]
def get_closest_by_distance(A: Path, B: Path) -> int:
results = sorted(point.score for point in get_intersections(A, B))
for result in results:
if result:
return result
return results[0]
class TestDay3(unittest.TestCase):
def test_part1_fixture1(self):
A = decode_path("R75,D30,R83,U83,L12,D49,R71,U7,L72")
B = decode_path("U62,R66,U55,R34,D71,R55,D58,R83")
self.assertEqual(get_closest_by_manhattan(A, B), 159)
def test_part1_fixture2(self):
A = decode_path("R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51")
B = decode_path("U98,R91,D20,R16,D67,R40,U7,R15,U6,R7")
self.assertEqual(get_closest_by_manhattan(A, B), 135)
def test_part1(self):
with open_fixture("day03") as fp:
lines = fp.readlines()
A = decode_path(lines[0])
B = decode_path(lines[1])
self.assertEqual(get_closest_by_manhattan(A, B), 2129)
def test_part2_fixture1(self):
A = decode_path("R75,D30,R83,U83,L12,D49,R71,U7,L72")
B = decode_path("U62,R66,U55,R34,D71,R55,D58,R83")
self.assertEqual(get_closest_by_distance(A, B), 610)
def test_part2_fixture2(self):
A = decode_path("R98,U47,R26,D63,R33,U87,L62,D20,R33,U53,R51")
B = decode_path("U98,R91,D20,R16,D67,R40,U7,R15,U6,R7")
self.assertEqual(get_closest_by_distance(A, B), 410)
def test_part2(self):
with open_fixture("day03") as fp:
lines = fp.readlines()
A = decode_path(lines[0])
B = decode_path(lines[1])
self.assertEqual(get_closest_by_distance(A, B), 134662)
|
StarcoderdataPython
|
92968
|
<filename>test_list.py
import time
# xrange = range
def test1():
c = 1000000
a = []
tm = time.time()
for i in xrange(c):
a.append(i)
print "python-append(1000000)", time.time() - tm
def test2():
c = 100000
a = []
tm = time.time()
for i in xrange(c):
a.insert(0, i)
print "python-insert(100000)", time.time() - tm
def test3():
c = 1000000
a = []
for i in xrange(c):
a.append(i)
tm = time.time()
for i in xrange(c):
a.pop()
print "python-pop-after(1000000)", time.time() - tm
def test4():
c = 100000
a = []
for i in xrange(c):
a.append(i)
tm = time.time()
for i in xrange(c):
a.pop(0)
print "python-pop-before(100000)", time.time() - tm
def test5():
c = 1000000
a = []
for i in xrange(c):
a.append(i)
tm = time.time()
for i in xrange(c):
a[i] = a[i] - a[i]
print "python-get/set(1000000)", time.time() - tm
seed = 123456789
def srandom():
global seed
seed = (1103515245 * seed + 12345) % 65536
return seed
def test6():
global seed
seed = 123456789
c = 1000000
a = []
for i in xrange(c):
a.append(srandom())
def comp(a, b):
return a - b
tm = time.time()
# a.sort(cmp=comp)
a.sort(key=lambda a: a)
print "python-sort(1000000)", time.time() - tm
test1()
test2()
test3()
test4()
test5()
test6()
|
StarcoderdataPython
|
3380661
|
import tensorflow as tf
import numpy as np
from model import recon2recon
sess = tf.Session()
model = recon2recon(sess,'arch1_10','../../../data/processed/train_10_500/',
'../../../data/processed/test_10_500/',
'../../../data/processed/val_10_500/')
model.train(10)
|
StarcoderdataPython
|
121014
|
DASHBOARD = 'mydashboard'
DISABLED = False
ADD_INSTALLED_APPS = [
'openstack_dashboard.dashboards.mydashboard',
]
|
StarcoderdataPython
|
3285366
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Unlimited developers
"""
Tests the electrum call 'blockchain.transaction.get'
"""
import asyncio
from test_framework.util import assert_equal, p2p_port
from test_framework.test_framework import BitcoinTestFramework
from test_framework.loginit import logging
from test_framework.electrumutil import *
from test_framework.nodemessages import COIN, ToHex
from test_framework.blocktools import create_coinbase, create_block, \
create_transaction
from test_framework.mininode import (
P2PDataStore,
NodeConn,
NetworkThread,
)
from test_framework.script import CScript, OP_TRUE, OP_DROP, OP_NOP
import time
TX_GET = "blockchain.transaction.get"
class ElectrumTransactionGet(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [bitcoind_electrum_args()]
def bootstrap_p2p(self):
"""Add a P2P connection to the node.
Helper to connect and wait for version handshake."""
self.p2p = P2PDataStore()
self.connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.p2p)
self.p2p.add_connection(self.connection)
NetworkThread().start()
self.p2p.wait_for_verack()
assert(self.p2p.connection.state == "connected")
def mine_blocks(self, n, num_blocks, txns = None):
prev = n.getblockheader(n.getbestblockhash())
print(prev)
prev_height = prev['height']
prev_hash = prev['hash']
prev_time = max(prev['time'] + 1, int(time.time()))
blocks = [ ]
for i in range(num_blocks):
coinbase = create_coinbase(prev_height + 1)
b = create_block(
hashprev = prev_hash,
coinbase = coinbase,
txns = txns,
nTime = prev_time + 1)
txns = None
b.solve()
blocks.append(b)
prev_time = b.nTime
prev_height += 1
prev_hash = b.hash
self.p2p.send_blocks_and_test(blocks, n)
assert_equal(blocks[-1].hash, n.getbestblockhash())
# Return coinbases for spending later
return [b.vtx[0] for b in blocks]
def run_test(self):
n = self.nodes[0]
self.bootstrap_p2p()
coinbases = self.mine_blocks(n, 5)
async def async_tests():
cli = ElectrumConnection()
await cli.connect()
# Test raw
for tx in coinbases:
assert_equal(ToHex(tx), await cli.call(TX_GET, tx.hash))
# Test verbose.
# The spec is unclear. It states:
#
# "whatever the coin daemon returns when asked for a
# verbose form of the raw transaction"
#
# Just check the basics.
for tx in coinbases:
electrum = await cli.call(TX_GET, tx.hash, True)
bitcoind = n.getrawtransaction(tx.hash, True)
assert_equal(bitcoind['txid'], electrum['txid'])
assert_equal(bitcoind['locktime'], electrum['locktime'])
assert_equal(bitcoind['size'], electrum['size'])
assert_equal(bitcoind['hex'], electrum['hex'])
assert_equal(len(bitcoind['vin']), len(bitcoind['vin']))
assert_equal(len(bitcoind['vout']), len(bitcoind['vout']))
loop = asyncio.get_event_loop()
loop.run_until_complete(async_tests())
if __name__ == '__main__':
ElectrumTransactionGet().main()
|
StarcoderdataPython
|
3269690
|
from pylgbst.hub import TrainHub
from pylgbst import get_connection_gattool
from pylgbst.peripherals import Motor
import time
def callback(value):
print("Voltage: %s" % value)
conn = get_connection_gattool(hub_mac='90:84:2B:0F:D1:F8') #auto connect does not work
hub = TrainHub(conn)
for device in hub.peripherals:
print(device)
motor = Motor(hub, hub.PORT_A)
#hub.connection.notification_delayed('050082030a', 0.1)
motor.start_power(1.0) #here motor really moves
time.sleep(0.5)
#hub.connection.notification_delayed('050082030a', 0.1)
motor.stop() #here motor really stops
print("Goodbye")
"""
Output
0
50 => 0x32
59 => 0x3B
60 => 0x3C
Goodbye
"""
|
StarcoderdataPython
|
117598
|
# -*- coding: utf-8 -*-
from typing import Optional
import uvicorn
from fastapi import FastAPI, Query, Path, Response
from rarbg import *
tags_metadata = [
{
"name": "Search",
"externalDocs": {
"description": "Available Categories",
"url": "https://github.com/Apocalypsor/Rarbg/tree/master/rarbg/Categories.md",
},
},
{"name": "Latest"},
]
app = FastAPI(title="Customed RARBG feed", openapi_tags=tags_metadata)
@app.get("/search/{title}", summary="Search by title.", tags=["Search"])
def generateSearch(
title: str = Path(..., description="Title"),
category: Optional[set] = Query(None, description="Category"),
):
status, output = searchRarbg.getResult("search_string", title, category)
if status:
return Response(content=output, media_type="application/rss+xml")
return Response(content=f"Error: {output}")
@app.get("/imdb/{imdb}", summary="Search by IMDb index.", tags=["Search"])
def generateIMDb(
imdb: str = Path(..., description="IMDb ID"),
category: Optional[set] = Query(None, description="Category"),
):
status, output = searchRarbg.getResult("search_imdb", imdb, category)
if status:
return Response(content=output, media_type="application/rss+xml")
return Response(content=f"Error: {output}")
@app.get("/tvdb/{tvdb}", summary="Search by TheTVDB index.", tags=["Search"])
def generateTVDb(
tvdb: str = Path(..., description="TheTVDB ID"),
category: Optional[set] = Query(None, description="Category"),
):
status, output = searchRarbg.getResult("search_tvdb", tvdb, category)
if status:
return Response(content=output, media_type="application/rss+xml")
return Response(content=f"Error: {output}")
@app.get("/tmdb/{tmdb}", summary="Search by TMDb index.", tags=["Search"])
def generateTMDb(
tmdb: str = Path(..., description="TMDb ID"),
category: Optional[set] = Query(None, description="Category"),
):
status, output = searchRarbg.getResult("search_themoviedb", tmdb, category)
if status:
return Response(content=output, media_type="application/rss+xml")
return Response(content=f"Error: {output}")
@app.get("/latest", summary="List latest torrents.", tags=["Latest"])
def generateLatest():
status, output = searchRarbg.getResult("latest")
if status:
return Response(content=output, media_type="application/rss+xml")
return Response(content=f"Error: {output}")
if __name__ == "__main__":
uvicorn.run(app, host="127.0.0.1", port=8000, debug=True)
|
StarcoderdataPython
|
1704236
|
#!/usr/bin/env python
import npyscreen
import sys
import os
import mipexpect
import getopt
import mipass
import platform
import time
default_opt = \
"""% set dynamic socks proxy
% -D 1080
% forward a local port to a service at a remote port, e.g. vnc @ host:1
% -L 5901
% -L 5901:1.2.3.4:5901
% forward a remote port to a service at a local port
% -R 8080
% sftp options
%# -r
"""
c = None
class MisshApp(npyscreen.NPSApp):
def __init__(self, fn, host, password, opt):
self.fn = fn
self.hostn = host
self.passwordn = password
self.optn = opt
# self.fwdn = fwd
def main(self):
npyscreen.setTheme(npyscreen.Themes.TransparentThemeDarkText)
F = npyscreen.ActionForm(name="MiSSH - " + self.fn)
F.on_ok = self.on_ok
F.while_editing = self.on_switch
self.host = F.add(npyscreen.TitleText, name="Host:", value=self.hostn)
# self.port = F.add(npyscreen.TitleText, name = "Port:", value=self.portn )
self.password = F.add(npyscreen.TitlePassword, name="Password:", value=self.passwordn)
F.add(npyscreen.TitleFixedText, name="Other options:", height=1)
self.options = F.add(npyscreen.MultiLineEdit, value=self.optn, max_height=12)
# self.forward_only = F.add(npyscreen.Checkbox, name="Forward only?", value=self.fwdn)
# self.forward_only= F.add(npyscreen.TitleMultiSelect, value=self.fwdn,
# name="Mode",values=["Forward only?"],scroll_exit=True)
self.connect = False
F.edit()
def __str__(self):
return "%s # %s @%s\n%s" % (self.host.value, self.password.value,
self.forward_only.value, self.options.value)
def on_switch(self):
if self.host.value != self.hostn:
ok, pwd = c.get_pass(self.host.value)
if ok:
self.password.value = pwd
self.passwordn = pwd
self.hostn = self.host.value
def on_ok(self):
self.connect = True
def remove_remark(line):
pos = line.find("#")
if(pos >= 0):
line = line[:pos]
line = line.strip()
return line
def get_key_val(line):
pos = line.find("=")
if(pos < 0):
return "", ""
return line[:pos].strip().lower(), line[pos + 1:].strip()
class missh_cfg:
fn = ""
def __init__(self, fn, new=False, ftp=False):
self.fn = fn
self.host = None
self.opt = []
self.ftp = ftp
# self.fwd = 0
if not new:
self.read_cfg()
def cmdline(self):
p = self.host.find(':')
if p > 0:
host = self.host[:p]
o = [(self.ftp and '-P ' or '-p ') + self.host[p + 1:]]
else:
host = self.host
o = []
for i in self.opt:
i = i.strip()
if not i.startswith("%"):
if self.ftp:
if i.startswith("#"):
o.append(i[1:].strip())
else:
if not i.startswith("#"):
o.append(i)
return (self.ftp and 'sftp ' or "ssh ") + " ".join(o) + " " + host
def update(self, host, opt):
need_write = False
if self.host != host:
self.host = host
need_write = True
if self.opt != opt:
self.opt = opt
need_write = True
if need_write:
self.write_cfg()
def write_cfg(self):
f = open(self.fn, "wb")
f.write("#!/usr/bin/env missh\n")
f.write("# don't edit this file manually. please use 'missh -o'.\n\n")
f.write("host = %s\n" % self.host)
for i in self.opt:
f.write("opt = %s\n" % i)
f.close()
def read_cfg(self):
line_cnt = 0
try:
f = open(self.fn, "rb")
for line in f:
line_cnt = line_cnt + 1
# strip and remove remarks
line = remove_remark(line)
if line == '':
continue
# fetch the key and value
try:
key, val = get_key_val(line)
if(key == "host"):
self.host = val
elif(key == "opt"):
self.opt.append(val)
elif(key == "forward"):
pass # obsolete
else:
raise "bad key"
except:
print "error config line #%d : %s" % (line_cnt, line)
continue
f.close()
except:
print "bad configuration file:", self.fn
return
def set_master(c, create=True):
import getpass
if create:
note="Create the master password:"
else:
note="Input the new master password:"
while 1:
master_pwd = getpass.getpass(note)
master_pwd2 = getpass.getpass("Please repeat it:")
if master_pwd == master_pwd2:
break
print "They are not matched!"
ok, resp = c.set_master(master_pwd)
if not ok:
print "Can't set the master key. Error:", resp
sys.exit(1)
return master_pwd
def get_master(c, force=False):
if c.need_master() == -2:
return set_master(c)
elif force or c.need_master() == -1:
import getpass
while 1:
master_pwd = getpass.getpass("Input the master password:")
ok, resp = c.check_master(master_pwd)
if not ok:
print "Please try again. Error:", resp
else:
return master_pwd
return None
def usage(to=None):
print "missh 0.3.0 by LenX (<EMAIL>)"
print
if to!=None:
print "The timeout of caching the master password is",to,"minutes."
print """Usage:
missh [opt] [file_path]
-o open a session file
-n create a new session file
-m change the master password
-t timeout change the timeout of caching the master password, in minutes
-k kill all background missh processes
-r reconnect automatically after disconnection
-f use sftp to connect the host
-h show help information
-v verbose mode
"""
sys.exit(2)
def main():
global c
# parse arguments
fn = ""
conf = ""
kill = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hvnomt:krsf")
except getopt.GetoptError as err:
print str(err) # will print something like "option -a not recognized"
usage()
edit = False
kill = False
create = False
change_master = False
timeout= None
recon = False
ftp = False
for o, a in opts:
if o == "-v":
mipass.verbose = True
elif o == "-h":
usage()
sys.exit()
elif o == "-o":
edit = True
elif o == '-n':
edit = True
create = True
elif o == '-k':
kill = True
elif o == '-m':
change_master = True
elif o == '-t':
timeout=a
elif o == '-r':
recon = True
elif o == '-f':
ftp = True
else:
print "Error: bad options - ( %s : %s )" % (o, a)
usage()
c = mipass.client(mipass.unixsock)
if change_master:
c.connect()
if c.need_master()==-2:
master_pwd = get_master(c, True)
else:
master_pwd = get_master(c, True)
new_pwd = set_master(c, False)
if new_pwd!=master_pwd:
c.set_master(new_pwd)
return
if kill:
ok, resp = c.kill()
if ok:
print "The service is stopped."
else:
print resp
sys.exit(0)
ok, to = c.get_timeout()
if timeout!=None:
try:
t1=int(timeout)
except:
t1=-1
if t1<=0:
print "Bad timeout:",timeout
return
if not ok or to!=timeout:
c.set_timeout(timeout)
print "The timeout is set to %s." % timeout
else:
print "The timeout is still %s." % timeout
return
if(len(args) == 1):
fn = args[0]
else:
if not ok:
to=None
usage(to)
sys.exit(2)
if not os.path.exists(fn) and not create:
print "Session file is not found:", fn
sys.exit(1)
if create and os.path.exists(fn):
s = raw_input("Session file exists. Are you going to rewrite it? [y/n]")
if s.lower() == 'y' or s.lower() == 'yes':
pass
else:
sys.exit(1)
c.connect()
get_master(c)
# parse msh file
connect = True
cfg = missh_cfg(fn, create, ftp=ftp)
# [a:login:main:get_password]
if create:
pwd = ""
cfg.opt = default_opt.split('\n')
else:
ok, pwd = c.get_pass(cfg.host)
if not ok:
pwd = ""
if mipass.verbose:
print "Password:", pwd
# show dialog if needed
# todo: verbose mode
if edit:
App = MisshApp(fn, cfg.host, pwd, "\n".join(cfg.opt))
App.run()
connect = App.connect
# update config
if connect:
cfg.update(App.host.value, App.options.value.split('\n'))
# update pwd
if pwd != App.password.value:
pwd = App.password.value
if App.passwordn != pwd:
c.set_pass(cfg.host, pwd)
# connect to ssh
if connect:
while 1:
fail = False
#workaround a bug of term of mac osx
#if platform.system().lower()=="darwin":
# os.system("clear")
print cfg.cmdline()
c = mipexpect.spawn(cfg.cmdline())
try:
i = c.expect(["yes/no", "assword:"], timeout=30)
if i == 0:
y = raw_input("%syes/no)?" % c.before)
if y.strip().lower() == "yes":
c.sendline("yes\n")
i = c.expect("assword:", timeout=30)
else:
c.sendline("no\n")
print "Host key verification failed."
fail = True
pass
if not fail:
c.sendline(pwd)
except:
fail=True
print c.before
if not fail:
c.interact()
if recon:
print "[%s] Disconnected. Try to reconnect after 3 sec. You can break it using CTRL-C." %\
time.ctime()
time.sleep(3)
continue
break
#workaround a bug of term of mac osx
#if platform.system().lower()=="darwin":
# os.system("tput reset")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1635162
|
from panda3d.core import *
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.interval.IntervalGlobal import *
class DistributedPresent(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPresent')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.x = 0
self.y = 0
self.z = 0
self.h = 0
self.p = 0
self.r = 0
self.modelId = 1
self.model = None
self.collNode = None
self.fadeInSeq = None
self.idleSeq = None
self.collectSeq = None
self.vanishSeq = None
self.collNodeName = 'present_collision-%d' % id(self)
self.unavailable = False
self.presentSfx = loader.loadSfx('phase_4/audio/sfx/MG_pairing_match_bonus_both.ogg')
return
def announceGenerate(self):
self.collNodeName = self.uniqueName('present_collision')
if not self.unavailable:
self.respawn()
def disable(self):
self.ignoreAll()
if self.fadeInSeq:
self.fadeInSeq.finish()
self.fadeInSeq = None
if self.collectSeq:
self.collectSeq.finish()
self.collectSeq = None
if self.idleSeq:
self.idleSeq.finish()
self.idleSeq = None
if self.vanishSeq:
self.vanishSeq.finish()
self.vanishSeq = None
if self.model:
self.model.removeNode()
self.model = None
if self.collNode:
self.collNode.clearSolids()
self.collNode = None
self.presentSfx = None
DistributedObject.DistributedObject.disable(self)
return
def setPosHpr(self, x, y, z, h, p, r):
self.x = x
self.y = y
self.z = z
self.h = h
self.p = p
self.r = r
def setModelId(self, id):
self.modelId = id
def setUnavailable(self, avail):
if avail:
if self.model:
self.disablePresent()
else:
self.respawn()
self.unavailable = avail
def respawn(self):
if not self.model:
self.model = loader.loadModel(('phase_14/models/props/christmasBox{0}').format(self.modelId))
self.model.reparentTo(render)
self.model.setPosHpr(self.x, self.y, self.z, self.h, self.p, self.r)
if self.fadeInSeq:
self.fadeInSeq = None
if self.idleSeq:
self.idleSeq = None
self.fadeInSeq = LerpFunctionInterval(self.model.setAlphaScale, toData=1.0, fromData=0.0, duration=1)
self.fadeInSeq.start()
self.idleSeq = Sequence(self.model.posHprInterval(6.9, (self.x, self.y, self.z + 1), (self.h, 0, 0), blendType='easeInOut'), self.model.posHprInterval(16.9, (self.x, self.y, self.z), (self.h + 360, 0, 0), blendType='easeInOut'))
self.idleSeq.loop()
cs = CollisionSphere(0, 0, 0, 3)
self.collNode = self.model.attachNewNode(CollisionNode('cnode1')).node()
self.collNode.addSolid(cs)
self.collNode.setName(self.collNodeName)
self.model.unstash()
self.acceptOnce(('enter{0}').format(self.collNodeName), self.reward)
return
def disablePresent(self):
self.vanishSeq = LerpFunctionInterval(self.model.setAlphaScale, toData=0.0, fromData=1.0, duration=1)
self.vanishSeq.start()
self.collectSeq = Sequence(Func(base.playSfx, self.presentSfx, volume=0.9), Wait(1), Func(self.model.stash), Func(self.idleSeq.finish))
self.collectSeq.start()
def reward(self, collEntry):
self.sendUpdate('giveReward', [])
|
StarcoderdataPython
|
1769851
|
from mythic_payloadtype_container.MythicCommandBase import *
import json
class PwdArguments(TaskArguments):
def __init__(self, command_line):
super().__init__(command_line)
self.args = {}
async def parse_arguments(self):
pass
class PwdCommand(CommandBase):
cmd = "pwd"
needs_admin = False
help_cmd = "pwd"
description = "Prints the current working directory for the agent"
version = 1
author = "@mattreduce"
attackmapping = ["T1083"]
argument_class = PwdArguments
async def create_tasking(self, task: MythicTask) -> MythicTask:
return task
async def process_response(self, response: AgentResponse):
pass
|
StarcoderdataPython
|
3268300
|
from . import db
from utils import s_to_hms, hms_to_s
class OrderableMixin:
# TODO: implement testing
order = db.Column(db.Integer, index=True)
def _get_model_class(self):
for c in db.Model._decl_class_registry.values():
if (hasattr(c, '__tablename__') and
c.__tablename__ == self.__tablename__):
return c
def __init__(self):
self._model = self._get_model_class()
if self._model.query.count() == 0:
self.order = 0
else:
self.order = max((item.order for item in self._model.query)) + 1
def move_up(self):
self._model = self._get_model_class()
items = self._model.query.order_by(self._model.order).all()
id_ = items.index(self)
# if first item then do nothing
if id_ == 0:
return
# get the item before which we swap position with
item_before = items[id_ - 1]
# swap order numbers with the item before
x = self.order
self.order = item_before.order
item_before.order = x
db.session.add(self)
db.session.add(item_before)
db.session.commit()
# normalize order numbers for all items
for i, item in enumerate(self._model.query.order_by(self._model.order)):
item.order = i
db.session.commit()
def move_down(self):
self._model = self._get_model_class()
items = self._model.query.order_by(self._model.order).all()
id_ = items.index(self)
# if first item then do nothing
if id_ == len(items) - 1:
return
# get the item before which we swap position with
item_after = items[id_ + 1]
# swap order numbers with the item before
x = self.order
self.order = item_after.order
item_after.order = x
db.session.add(self)
db.session.add(item_after)
db.session.commit()
# normalize order numbers for all items
for i, item in enumerate(self._model.query.order_by(self._model.order)):
item.order = i
db.session.commit()
class Setting(db.Model):
__tablename__ = 'settings'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
value = db.Column(db.Float)
class SequenceStep(db.Model, OrderableMixin):
__tablename = 'sequence_steps'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
duration = db.Column(db.Integer)
temperature = db.Column(db.Float(precision=1))
tolerance = db.Column(db.Float(precision=1))
heater = db.Column(db.Boolean)
mixer = db.Column(db.Boolean)
def __init__(self, *args, **kwargs):
db.Model.__init__(self, *args, **kwargs)
OrderableMixin.__init__(self)
def duration_formatted(self):
return '{:02}:{:02}:{:02}'.format(*s_to_hms(self.duration)) or ''
|
StarcoderdataPython
|
47006
|
from typing import Callable
from putput.presets import displaCy
from putput.presets import iob2
from putput.presets import luis
from putput.presets import stochastic
def get_preset(preset: str) -> Callable:
"""A factory that gets a 'preset' Callable.
Args:
preset: the preset's name.
Returns:
The return value of calling a preset's 'preset'
function without arguments.
Examples:
>>> from pathlib import Path
>>> from putput.pipeline import Pipeline
>>> pattern_def_path = Path(__file__).parent.parent.parent / 'tests' / 'doc' / 'example_pattern_definition.yml'
>>> dynamic_token_patterns_map = {'ITEM': ('fries',)}
>>> p = Pipeline.from_preset('IOB2',
... pattern_def_path,
... dynamic_token_patterns_map=dynamic_token_patterns_map)
>>> generator = p.flow(disable_progress_bar=True)
>>> for utterance, tokens, groups in generator:
... print(utterance)
... print(tokens)
... print(groups)
... break
can she get fries can she get fries and fries
('B-ADD I-ADD I-ADD', 'B-ITEM', 'B-ADD I-ADD I-ADD', 'B-ITEM', 'B-CONJUNCTION', 'B-ITEM')
('B-ADD_ITEM I-ADD_ITEM I-ADD_ITEM I-ADD_ITEM', 'B-ADD_ITEM I-ADD_ITEM I-ADD_ITEM I-ADD_ITEM',
'B-None', 'B-None')
"""
supported_presets = ('IOB2', 'DISPLACY', 'LUIS', 'STOCHASTIC')
if preset == 'IOB2':
return iob2.preset()
if preset == 'DISPLACY':
return displaCy.preset()
if preset == 'LUIS':
return luis.preset()
if preset == 'STOCHASTIC': # pragma: no cover
return stochastic.preset()
raise ValueError('Unrecoginzed preset. Please choose from the supported presets: {}'.format(supported_presets))
|
StarcoderdataPython
|
1691566
|
<gh_stars>1-10
import discord
import re
from discord.ext.commands import command, Cog
import asyncio
from botutils.searchforlinks import get_ffn_url_from_query, get_ao3_url_from_query
from brain.ffn_brain import ffn_searcher
from brain.ao3_brain import ao3_searcher
class GSearchCog(Cog):
def __init__(self, bot):
self.bot = bot
@command('ff')
async def ffsearch(self, ctx):
async with ctx.typing():
await asyncio.sleep(1)
query = ctx.message.content
query = query[query.index('.ff')+3: ]
if query:
link = get_ffn_url_from_query(query)
if link:
ffn_obj = ffn_searcher.FFnSearcher(' ')
ffn_obj.all_links_to_get_stories_for.append(link)
ffn_obj.get_metadata()
ffn_obj.fetch_ffn_embeds()
embeds_to_send = ffn_obj.res_embeds
if embeds_to_send:
message = await ctx.send(embed=embeds_to_send[0])
@command('ao3')
async def ao3search(self, ctx):
async with ctx.typing():
await asyncio.sleep(1)
query = ctx.message.content
query = query[query.index('.ao3')+4: ]
if query:
link = get_ao3_url_from_query(query)
if link:
ao3_obj = ao3_searcher.Ao3Searcher(' ')
ao3_obj.all_links_to_get_stories_for.append(link)
ao3_obj.get_metadata()
ao3_obj.fetch_ao3_embeds()
embeds_to_send = ao3_obj.res_embeds
if embeds_to_send:
message = await ctx.send(embed=embeds_to_send[0])
def setup(bot):
bot.add_cog(GSearchCog(bot))
|
StarcoderdataPython
|
3251352
|
<filename>ecomm_app/job_worker.py
#!/usr/bin/env python
import os
import logging
# import the app's tasks
import ecomm_app.ecommerce.tasks
name = "ecommerce-worker"
log = logging.getLogger(name)
log.info("Start - {}".format(name))
default_broker_url = "pyamqp://rabbitmq:rabbitmq@localhost:5672//"
default_backend_url = "redis://localhost:6379/10"
default_config_module = "ecomm_app.ecommerce.celeryconfig_pub_sub"
worker_broker_url = os.getenv("WORKER_BROKER_URL",
default_broker_url).strip().lstrip()
ssl_options = {}
transport_options = {}
# Get the Celery app from the ecommerce project's get_celery_app
app = ecomm_app.ecommerce.tasks.get_celery_app(
name=name,
auth_url=worker_broker_url,
backend_url=default_backend_url)
# if you want to discover tasks in other directories:
# app.autodiscover_tasks(["some_dir_name_with_tasks"])
log.info("End - {}".format(name))
|
StarcoderdataPython
|
1713983
|
<reponame>malvidin/assemblyline-service-yara
import json
import logging
import os
import re
import subprocess
import tempfile
from assemblyline.common.str_utils import safe_str
class YaraValidator(object):
def __init__(self, externals=None, logger=None):
if not logger:
from assemblyline.common import log as al_log
al_log.init_logging('YaraValidator')
logger = logging.getLogger('assemblyline.yara_validator')
logger.setLevel(logging.WARNING)
if not externals:
externals = {'dummy': ''}
self.log = logger
self.externals = externals
self.rulestart = re.compile(r'^(?:global )?(?:private )?(?:private )?rule ', re.MULTILINE)
self.rulename = re.compile('rule ([^{^:]+)')
def clean(self, rulefile, eline, message, invalid_rule_name):
with open(rulefile, 'r') as f:
f_lines = f.readlines()
# List will start at 0 not 1
error_line = eline - 1
if invalid_rule_name:
f_lines[error_line] = f_lines[error_line].replace(invalid_rule_name, f"{invalid_rule_name}_1")
self.log.warning(f"Yara rule '{invalid_rule_name}' was renamed '{invalid_rule_name}_1' because it's "
f"rule name was used more then once.")
else:
# First loop to find start of rule
start_idx = 0
while True:
find_start = error_line - start_idx
if find_start == -1:
raise Exception("Yara Validator failed to find invalid rule start. "
f"Yara Error: {message} Line: {eline}")
line = f_lines[find_start]
if re.match(self.rulestart, line):
invalid_rule_name = re.search(self.rulename, line).group(1).strip()
# Second loop to find end of rule
end_idx = 0
while True:
find_end = error_line + end_idx
if find_end >= len(f_lines):
raise Exception("Yara Validator failed to find invalid rule end. "
f"Yara Error: {message} Line: {eline}")
line = f_lines[find_end]
if re.match(self.rulestart, line) or find_end == len(f_lines) - 1:
# Now we have the start and end, strip from file
if find_end == len(f_lines) - 1:
f_lines = f_lines[:find_start]
else:
f_lines = f_lines[:find_start] + f_lines[find_end:]
break
end_idx += 1
# Send the error output to AL logs
error_message = f"Yara rule '{invalid_rule_name}' removed from rules file because of an error " \
f"at line {eline} [{message}]."
self.log.warning(error_message)
break
start_idx += 1
with open(rulefile, 'w') as f:
f.writelines(f_lines)
return invalid_rule_name
def paranoid_rule_check(self, rulefile):
# Run rules separately on command line to ensure there are no errors
print_val = "--==Rules_validated++__"
external_file = os.path.join(tempfile.gettempdir(), "externals.json")
try:
with open(external_file, "wb") as out_json:
out_json.write(json.dumps(self.externals).encode("utf-8"))
p = subprocess.Popen(f"python3 paranoid_check.py {rulefile} {external_file}",
cwd=os.path.dirname(os.path.realpath(__file__)),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = p.communicate()
finally:
os.unlink(external_file)
stdout = safe_str(stdout)
stderr = safe_str(stderr)
if print_val not in stdout:
if stdout.strip().startswith('yara.SyntaxError'):
raise Exception(stdout.strip())
else:
raise Exception("YaraValidator has failed!--+--" + str(stderr) + "--:--" + str(stdout))
def validate_rules(self, rulefile):
change = False
while True:
try:
self.paranoid_rule_check(rulefile)
return change
# If something goes wrong, clean rules until valid file given
except Exception as e:
error = str(e)
change = True
if error.startswith('yara.SyntaxError'):
e_line = int(error.split('):', 1)[0].split("(", -1)[1])
e_message = error.split("): ", 1)[1]
if "duplicated identifier" in error:
invalid_rule_name = e_message.split('"')[1]
else:
invalid_rule_name = ""
try:
self.clean(rulefile, e_line, e_message, invalid_rule_name)
except Exception as ve:
raise ve
else:
raise e
continue
|
StarcoderdataPython
|
1780456
|
import json
import time
from threading import Thread
import requests
class HSocket:
"""
Client HSocket that communicates with the remote server HSocket
"""
def __init__(self, host, auto_connect=True): # type: (str, bool) -> None
"""
Initializes the HSocket
:param host: the host server URL (eg. if HSocket server is running on https://example.com/hsocket/
then pass as host just "https://example.com")
:param auto_connect: if set to True, immediately performs a connection to the host
"""
self.host = host
self._listeners = {} # event name: function to call upon firing of the event
self.__thread = None # background thread for communicating with server
self.connected = False # indicated if the connection with the server is stable
self.__connectedFired = False # when listener for connected is not defined yet, it will be fired right after
# definition if this is still False
self._connecting = False # indicates if we are at least connecting to the server
self._reconnecting = False # the meantime between disconnection and automatic reconnection
self.sid = None # local socket's id from server
self._last_message_time = time.time() # time of last message we got from the server
self._fetch_msg_max_time = 10.0 # maximum time between fetching new messages from the server
if auto_connect:
self.connect()
def connect(self): # type: () -> None
"""
Performs a connection to the server
Fires 'connect' event upon successful connection
:return: None
"""
if self._connecting:
return
self._connecting = True
self._reconnecting = False
class HSocketRecevierThread(Thread):
"""
Thread that handles messages from the server
"""
# noinspection PyMethodParameters
def run(__):
while self._connecting: # as long as we are at least connecting to the server, fetch new messages
msg = self._get_message()
if (msg is None or msg.get('action', '') == 'disconnect') and self.connected:
# there was an error in communication or we are ordered to disconnect for now
self.disconnect(reconnect=True) # disconnect for now, but retry later
elif msg is None: # invalid message. Skip.
continue
elif msg.get('action', '') == 'connect': # server processed our request and had decided to connect
# us. Accept a new socket ID from the server and run "connect" event
self.sid = msg['sid']
self.connected = True
self._run_listener('connect')
elif msg.get('action', '') == 'event': # server is firing an event on us
# run the appropriate listener
self._run_listener(msg['name'], msg['data'])
elif msg.get('action', '') == 'set_max_msg_interval': # server orders us to set a new maximum time
# between asking for new messages
self.set_retry_interval(float(msg['data']))
# start the background communication thread
self.__thread = HSocketRecevierThread()
self.__thread.start()
def disconnect(self, reconnect=False): # type: (bool) -> None
"""
Disconnect from the server
:param reconnect: if set to True then after 30 seconds we will try to reconnect to the server
:return: None
"""
if not self._connecting:
return
# reset everything
self.__thread = None
self._connecting = False
self.__connectedFired = False
self.sid = None
if self.connected:
# if we are connected, inform the server about our disconnection
try:
requests.post(self.host + '/hsocket/', params={'sid': self.sid}, data={'action': 'disconnect'},
timeout=5)
except requests.exceptions.ConnectionError or requests.exceptions.ConnectTimeout:
pass
except requests.exceptions.ReadTimeout:
pass
self.connected = False
self._run_listener('disconnect')
if reconnect and not self._reconnecting:
# if enabled, run the reconnection countdown in background
self._reconnecting = True
def f_reconnect():
for _ in range(30):
if not self._reconnecting:
break
time.sleep(1)
self.connect()
AsyncExecuter(f_reconnect).start()
if not reconnect and self._reconnecting:
# do not reconnect, disconnect us for good
self._reconnecting = False
def on(self, event_name, func): # type: (str, "function") -> None
"""
Sets a new listener for an event
:param event_name: name of the event that the listener shall listen for
:param func: function fired upon calling of this event. Calls are performed like func(event_data)
:return: None
"""
item = self._listeners.get(event_name, [])
item.append(func)
self._listeners[event_name] = item
if event_name == 'connect' and self.connected and not self.__connectedFired:
self._run_listener(event_name)
def emit(self, event_name, data=None): # type: (str, any) -> None
"""
Fire an event with specified data
:param event_name: Name of the event to fire on the server
:param data: data passed to the fired function
:return: None
"""
if not self.connected:
return
try:
requests.post(self.host + '/hsocket/', params={'sid': self.sid}, data={'action': 'event',
'name': event_name,
'data': data})
except requests.exceptions.ConnectionError:
self.disconnect(reconnect=True)
def set_retry_interval(self, interval): # type: (float) -> None
"""
Sets the maximum time in seconds before asking the server for new messages
:param interval: maximum time in seconds before asking the server for new messages
:return: None
"""
self._fetch_msg_max_time = interval
def _get_message(self): # type: () -> dict or None
"""
Waits until the message from server for this client is available or some error occurs and then returns
the fetched message or None on fail
:return: fetched message from the server or None on connection fail
"""
try:
while True:
request = requests.get(self.host + '/hsocket/', params=None if self.sid is None else {'sid': self.sid},
timeout=10)
if request.status_code not in [200, 404]:
self.disconnect(reconnect=True)
return
data = request.json()
if data.get('action', '') != 'retry': # if the message was a real message, save the time
# we have gathered it
if data.get('action', '') != 'set_max_msg_interval':
self._last_message_time = time.time()
break
time.sleep(min(self._fetch_msg_max_time, max(1.0, time.time() - self._last_message_time)))
return data
except requests.exceptions.ConnectionError:
self.disconnect(reconnect=True)
except json.decoder.JSONDecodeError:
raise HSocketException("This is not a http-socket server")
except requests.exceptions.Timeout:
pass
def _run_listener(self, event_name, data=None): # type: (str, any) -> None
"""
Runs asynchronously all listeners for specified event
:param event_name: name of the event listeners to run
:param data: data to pass to the listening functions
:return: None
"""
if event_name == 'connect':
self.__connectedFired = True
for listener in self._listeners.get(event_name, []):
AsyncExecuter(listener, data).start()
class AsyncExecuter(Thread):
"""
Executes a function asynchronously
"""
def __init__(self, func, data=None): # type: ("function", any) -> None
"""
Initializes the data for asynchronous execution.
The execution itself must be then started by using .start()
:param func: function to execute
:param data: data passed to the executed function
"""
Thread.__init__(self)
self.func = func
self.data = data
def run(self):
self.func() if self.data is None else self.func(self.data)
class HSocketException(Exception):
pass
# If run directly, perform a quick test
if __name__ == '__main__':
sock = HSocket('http://127.0.0.1:5000')
def connect():
print('Connected')
def disconnect():
print('Disconnected')
def hello(msg):
print('Got:', msg)
sock.emit('helloBack', 'You too, sir')
sock.on('hello', hello)
sock.on('connect', connect)
sock.on('disconnect', disconnect)
|
StarcoderdataPython
|
1657096
|
<gh_stars>0
from unittest import TestCase
from artificial_idiot.game.game import Game
from artificial_idiot.game.state import State
from artificial_idiot.evaluation.evaluator_generator import (
NaiveEvaluatorGenerator, AdvanceEG
)
from artificial_idiot.util.json_parser import JsonParser
from artificial_idiot.search.max_n import MaxN
from artificial_idiot.search.search_cutoff.cutoff import DepthLimitCutoff
import json
def parse_state(file_name):
f = open(file_name)
pos_dict, colour, completed = JsonParser(json.load(f)).parse()
return State(pos_dict, colour, completed)
def change_state_color(state, color):
return State(state.pos_to_piece, color, state.completed)
# class TestMaxN(TestCase):
# weights = [5, 2, 0.7]
# evaluator_generator = NaiveEvaluatorGenerator(weights)
# cutoff = DepthLimitCutoff(3)
# search = MaxN(evaluator_generator, cutoff, 3)
#
# def test_initial_move(self):
# search = self.search
# state = parse_state("../../tests/red_initial_state.json")
# print(state)
#
# game = Game('red', state)
# best_action = search.search(game, state)
# print(best_action)
#
# def test_avoid_eaten(self):
# search = self.search
# state = parse_state("../../tests/avoid_eaten.json")
# print(state)
#
# game = Game('red', state)
# best_action = search.search(game, state)
# print(best_action)
class TestMyMaxN(TestCase):
# self.utility_pieces, num_exited_piece, self.utility_distance
weights = [100, 101, 1]
evaluator_generator = NaiveEvaluatorGenerator(weights)
cutoff = DepthLimitCutoff(3)
search = MaxN(evaluator_generator, cutoff, 3)
def test_initial(self):
search = self.search
state = parse_state("../../tests/red_initial_state.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
def test_must_exit(self):
search = self.search
state = parse_state("../../tests/must_exit_0.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertEqual(best_action[-1], 'EXIT')
def test_eat_green(self):
search = self.search
state = parse_state("../../tests/eat_green.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertTupleEqual(((0, 0), (2, -2), 'JUMP'), best_action)
def test_move(self):
search = self.search
state = parse_state("../../tests/move.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
self.assertEqual(((0, 0), (1, -1), 'MOVE'), best_action)
def test_jump(self):
search = self.search
state = parse_state("../../tests/jump.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertEqual(((0, 0), (2, -2), 'JUMP'), best_action)
def test_inch_forward(self):
search = self.search
state = parse_state("../../tests/inch_forward.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
self.assertEqual(((-3, 2), (-2, 1), 'MOVE'), best_action)
def test_avoid_eaten(self):
search = self.search
state = parse_state("../../tests/avoid_eaten.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertTupleEqual(best_action, ((-2, -1), (-3, 0), 'MOVE'))
def test_eat_blue(self):
search = self.search
state = parse_state("../../tests/eat_blue.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertTupleEqual(best_action, ((0, 0), (2, -2), 'JUMP'))
def test_should_not_exit(self):
search = self.search
state = parse_state("../../tests/should_not_exit.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
self.assertNotEqual(best_action[-1], 'EXIT')
def test_move_not_jump(self):
search = self.search
state = parse_state("../../tests/move_not_jump.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
def test_weird(self):
search = self.search
state = parse_state("../../tests/weird.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
def test_busy(self):
search = self.search
state = parse_state("../../tests/busy.json")
print(state)
game = Game('red', state)
best_action = search.search(game, state)
print(best_action)
def test_pass(self):
search = self.search
state = parse_state("../../tests/pass.json")
print(state)
game = Game('blue', state)
best_action = search.search(game, state)
self.assertTupleEqual((None, None, 'PASS'), best_action)
|
StarcoderdataPython
|
33397
|
import collections
import contextlib
import os.path
import typing
from contextlib import ExitStack
from pathlib import Path
from typing import BinaryIO, Dict, Optional, Generator, Iterator, Set
from mercury_engine_data_structures import formats, dread_data
from mercury_engine_data_structures.formats.base_resource import AssetId, BaseResource, NameOrAssetId, resolve_asset_id
from mercury_engine_data_structures.formats.pkg import PKGHeader, Pkg
from mercury_engine_data_structures.game_check import Game
class PkgEditor:
"""
Manages efficiently reading all PKGs in the game and writing out modifications to a new path.
_files_for_asset_id: mapping of asset id to all pkgs it can be found at
_ensured_asset_ids: mapping of pkg name to assets we'll copy into it when saving
_modified_resources: mapping of asset id to bytes. When saving, these asset ids are replaced
"""
_files_for_asset_id: Dict[AssetId, Set[str]]
_ensured_asset_ids: Dict[str, Set[AssetId]]
_modified_resources: Dict[AssetId, bytes]
def __init__(self, root: Path, target_game: Game = Game.DREAD):
all_pkgs = root.rglob("*.pkg")
self.files = {}
self.root = root
self.target_game = target_game
self.headers = {}
self._files_for_asset_id = collections.defaultdict(set)
self._ensured_asset_ids = {}
self._modified_resources = {}
for pkg_path in all_pkgs:
name = pkg_path.relative_to(root).as_posix()
self.files[name] = pkg_path
with pkg_path.open("rb") as f:
self.headers[name] = PKGHeader.parse_stream(f, target_game=target_game)
self._ensured_asset_ids[name] = set()
for entry in self.headers[name].file_entries:
self._files_for_asset_id[entry.asset_id].add(name)
def all_asset_ids(self) -> Iterator[AssetId]:
"""
Returns an iterator of all asset ids in the available pkgs.
"""
yield from self._files_for_asset_id.keys()
def all_asset_names(self) -> Iterator[str]:
"""
Returns an iterator of all known names of the present asset ids.
"""
for asset_id in self.all_asset_ids():
name = dread_data.name_for_asset_id(asset_id)
if name is not None:
yield name
def find_pkgs(self, asset_id: NameOrAssetId) -> Iterator[str]:
yield from self._files_for_asset_id[resolve_asset_id(asset_id)]
def get_raw_asset(self, asset_id: NameOrAssetId, in_pkg: Optional[str] = None) -> bytes:
asset_id = resolve_asset_id(asset_id)
if asset_id in self._modified_resources:
return self._modified_resources[asset_id]
for name, header in self.headers.items():
if in_pkg is not None and name != in_pkg:
continue
for entry in header.file_entries:
if entry.asset_id == asset_id:
with self.files[name].open("rb") as f:
f.seek(entry.start_offset)
return f.read(entry.end_offset - entry.start_offset)
raise ValueError(f"Unknown asset_id: {asset_id:0x}")
def get_parsed_asset(self, name: str, in_pkg: Optional[str] = None) -> BaseResource:
data = self.get_raw_asset(name, in_pkg)
file_format = os.path.splitext(name)[1][1:]
return formats.format_for(file_format).parse(data, target_game=self.target_game)
def replace_asset(self, asset_id: NameOrAssetId, new_data: typing.Union[bytes, BaseResource]):
if not isinstance(new_data, bytes):
new_data = new_data.build()
self._modified_resources[resolve_asset_id(asset_id)] = new_data
def ensure_present(self, pkg_name: str, asset_id: NameOrAssetId):
"""
Ensures the given pkg has the give assets, collecting from other pkgs if needed.
"""
if pkg_name not in self._ensured_asset_ids:
raise ValueError(f"Unknown pkg_name: {pkg_name}")
asset_id = resolve_asset_id(asset_id)
# If the pkg already has the given asset, do nothing
if pkg_name not in self._files_for_asset_id[asset_id]:
self._ensured_asset_ids[pkg_name].add(asset_id)
def save_modified_pkgs(self):
modified_pkgs = set()
for asset_id in self._modified_resources.keys():
modified_pkgs.update(self._files_for_asset_id[asset_id])
# Read all asset ids we need to copy somewhere else
asset_ids_to_copy = {}
for asset_ids in self._ensured_asset_ids.values():
for asset_id in asset_ids:
if asset_id not in asset_ids_to_copy:
asset_ids_to_copy[asset_id] = self.get_raw_asset(asset_id)
for pkg_name in modified_pkgs:
with self.files[pkg_name].open("rb") as f:
pkg = Pkg.parse_stream(f, target_game=self.target_game)
for asset_id, data in self._modified_resources.items():
if pkg_name in self._files_for_asset_id[asset_id]:
pkg.replace_asset(asset_id, data)
for asset_id in self._ensured_asset_ids[pkg_name]:
pkg.add_asset(asset_id, asset_ids_to_copy[asset_id])
self._files_for_asset_id[asset_id].add(pkg_name)
with self.files[pkg_name].open("wb") as f:
pkg.build_stream(f)
# Clear the ensured asset ids, since we've written these
self._ensured_asset_ids[pkg_name] = set()
self._modified_resources = {}
|
StarcoderdataPython
|
115849
|
<gh_stars>0
"""
Unit test cases for items.py
"""
from .. import items
class TestItems:
def test_file_item(self):
file_item = items.FileItem()
file_item['name'] = 'foo'
assert file_item['name'] == 'foo', \
'Attribute "name" from FileItem does not retain the value.'
file_item['amount_lines'] = 10
assert file_item['amount_lines'] == 10, \
'Attribute "amount_lines" from FileItem does not retain the value.'
file_item['amount_bytes'] = 500
assert file_item['amount_bytes'] == 500, \
'Attribute "amount_bytes" from FileItem does not retain the value.'
def test_directory_item(self):
dir_item = items.DirectoryItem()
dir_item['name'] = 'foo'
assert dir_item['name'] == 'foo', \
'Attribute "name" from FileItem does not retain the value.'
dir_item['amount_lines'] = 10
assert dir_item['amount_lines'] == 10, \
'Attribute "amount_lines" from FileItem does not retain the value.'
dir_item['amount_bytes'] = 500
assert dir_item['amount_bytes'] == 500, \
'Attribute "amount_bytes" from FileItem does not retain the value.'
dir_item['is_root'] = True
assert dir_item['is_root'], \
'Attribute "is_root" from FileItem does not retain the value.'
dir_item['repository_name'] = 'repository001'
assert dir_item['repository_name'] == 'repository001', \
'Attribute "repository_name" from FileItem does not retain \
the value.'
dir_item['children'] = [1, 2, 3]
assert len(dir_item['children']) == 3 \
and all([xi == yi for xi, yi in
zip(dir_item['children'], [1, 2, 3])]), \
'Attribute "children" from FileItem does not retain \
the value.'
def test_text_file_item(self):
text_file_item = items.TextFileItem()
text_file_item['name'] = 'foo'
assert text_file_item['name'] == 'foo', \
'Attribute "name" from TextFileItem does not retain the value.'
text_file_item['amount_lines'] = 10
assert text_file_item['amount_lines'] == 10, \
'Attribute "amount_lines" from TextFileItem does not retain \
the value.'
text_file_item['amount_bytes'] = 500
assert text_file_item['amount_bytes'] == 500, \
'Attribute "amount_bytes" from TextFileItem does not retain \
the value.'
text_file_item['extension'] = 'txt'
assert text_file_item['extension'] == 'txt', \
'Attribute "extension" from TextFileItem does not retain \
the value.'
|
StarcoderdataPython
|
88778
|
<gh_stars>100-1000
import sys
import l_bp
from exceptions import MissingProbabilitiesException
class BreakpointInterval(object):
'''
Class for storing the range and probability distribution
of a breakpoint
'''
# Constant value for slop padding
SLOP_PROB = 1e-100
def __init__(self, chrom, start, end, p):
self.chrom = chrom
self.start = start
self.end = end
self.p = p
def pad_slop(self, percent_slop, fixed_slop):
'''
Add slop to the interval
'''
slop = int(max(percent_slop * (self.end - self.start + 1), fixed_slop))
self.start -= slop
self.end += slop
self.p = [BreakpointInterval.SLOP_PROB] * slop + self.p + [BreakpointInterval.SLOP_PROB] * slop
self._trim()
self._normalize()
def _trim(self):
'''
Trim any part of range past the beginning of the chromosome
'''
if self.start < 0:
self.p = self.p[-self.start:]
self.start = 0
def _normalize(self):
'''
Normalize interval's probability to sum to 1
'''
sum_p = sum(self.p)
self.p = [float(x)/sum_p for x in self.p]
def common_range(self, other):
return max(self.start, other.start), min(self.end, other.end)
def overlap_prob(self, other, c_start, c_len):
start_off = c_start - self.start
other_start_off = c_start - other.start
ovl = 0
for i in range(c_len):
ovl += min(self.p[i + start_off], other.p[i + other_start_off])
return ovl
class Breakpoint(object):
'''
Class for storing information about Breakpoints for merging
'''
def __init__(self, line, percent_slop=0, fixed_slop=0):
'''
Initialize with slop for probabilities
'''
self.l = line
(self.sv_type,
chr_l,
chr_r,
self.strands,
start_l,
end_l,
start_r,
end_r,
m) = l_bp.split_v(line)
try:
self.left = BreakpointInterval(chr_l, start_l, end_l, self.floats_from_tag(m, 'PRPOS'))
self.right = BreakpointInterval(chr_r, start_r, end_r, self.floats_from_tag(m, 'PREND'))
except RuntimeError as e:
raise MissingProbabilitiesException(str(e))
if ((percent_slop > 0) or (fixed_slop > 0)):
self.left.pad_slop(percent_slop, fixed_slop)
self.right.pad_slop(percent_slop, fixed_slop)
def __str__(self):
'''
Convert back to a string
'''
return '\t'.join([str(x) for x in [self.left.chrom,
self.left.start,
self.left.end,
self.right.chrom,
self.right.start,
self.right.end,
self.sv_type,
self.strands,
self.left.p,
self.right.p]])
def ovl(self, b):
'''
Calculate overlapping cumulative probability value as weight?
0 if not overlapping.
'''
if ((self.left.chrom != b.left.chrom) or
(self.right.chrom != b.right.chrom) or
(self.sv_type != b.sv_type)):
return 0
#get common intervals
c_start_l, c_end_l = self.left.common_range(b.left)
c_start_r, c_end_r = self.right.common_range(b.right)
c_l_len = c_end_l - c_start_l + 1
c_r_len = c_end_r - c_start_r + 1
if (c_l_len < 1) or (c_r_len < 1):
return 0
ovl_l = self.left.overlap_prob(b.left, c_start_l, c_l_len)
ovl_r = self.right.overlap_prob(b.right, c_start_r, c_r_len)
return ovl_l * ovl_r
@staticmethod
def floats_from_tag(info_dict, tag):
if tag in info_dict:
return [float(x) for x in info_dict[tag].split(',')]
else:
raise RuntimeError('Required tag {0} not found.'.format(tag))
|
StarcoderdataPython
|
1739784
|
####################################################### README #########################################################
# This file consists of function that convolves an image with a receptive field so that input to the network is
# close to the form perceived by our eyes.
########################################################################################################################
import tensorflow as tf
import numpy as np
def rf_tf(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -0.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
filter = tf.convert_to_tensor(w, dtype=tf.float32)
filter = tf.expand_dims(filter, -1)
filter = tf.expand_dims(filter, -1)
pot = tf.nn.conv2d(inp, filter, strides=[1, 1, 1, 1], padding='SAME')
return pot
def rf_np(inp):
sca1 = 0.625
sca2 = 0.125
sca3 = -0.125
sca4 = -.5
# Receptive field kernel
w = [[ sca4, sca3, sca2, sca3, sca4],
[ sca3, sca2, sca1, sca2, sca3],
[ sca2, sca1, 1, sca1, sca2],
[ sca3, sca2, sca1, sca2, sca3],
[ sca4, sca3, sca2, sca3, sca4]]
pot = np.zeros([inp.shape[0], inp.shape[1]])
ran = [-2, -1, 0, 1, 2]
ox = 2
oy = 2
# Convolution
for i in range(inp.shape[0]):
for j in range(inp.shape[1]):
summ = 0
for m in ran:
for n in ran:
if (i + m) >= 0 and (i + m) <= inp.shape[0] - 1 and (j + n) >= 0 and (j + n) <= inp.shape[0] - 1:
summ = summ + w[ox + m][oy + n] * inp[i + m][j + n] / 255
pot[i][j] = summ
return pot
|
StarcoderdataPython
|
6483
|
<gh_stars>0
#!/usr/bin/python
# mp4museum.org by <NAME> 2019
import os
import sys
import glob
from subprocess import Popen, PIPE
import RPi.GPIO as GPIO
FNULL = open(os.devnull, "w")
# setup GPIO pin
GPIO.setmode(GPIO.BOARD)
GPIO.setup(11, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
GPIO.setup(13, GPIO.IN, pull_up_down = GPIO.PUD_DOWN)
# functions to be called by event listener
def buttonPause(channel):
player.stdin.write("p")
def buttonNext(channel):
player.stdin.write("q")
# add event listener
GPIO.add_event_detect(11, GPIO.FALLING, callback = buttonPause, bouncetime = 234)
GPIO.add_event_detect(13, GPIO.FALLING, callback = buttonNext, bouncetime = 1234)
# please do not remove my logo screen
player = Popen(['omxplayer', '--adev', 'both', '/home/pi/mp4museum.mp4'],stdin=PIPE,stdout=FNULL)
player.wait()
# the loop
while(1):
for files in sorted(glob.glob(r'/media/*/*.mp4')):
player = Popen(['omxplayer','--adev', 'both',files],stdin=PIPE,stdout=FNULL)
player.wait()
|
StarcoderdataPython
|
1611553
|
<filename>tests/v2/test_rspv.py
import json
from tests.v2.basecases import TestBaseCase
class RSpvsTestCase(TestBaseCase):
"""
test class for the comment endpoint
"""
def test_rspv_created_with_str(self):
"""Test if a comment is posted"""
auth_token = self.user_login()
response = self.client.post('/api/v2/meetups/<meetup_id>/rspv',
data=json.dumps(self.rspv_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"Resource Identifier need an integer")
def test_rspv_created_with_none_meetup(self):
"""Test if a rspv is posted"""
auth_token = self.user_login()
response = self.client.post('/api/v2/meetups/1/rspv',
data=json.dumps(self.rspv_one),
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"The meetup you entered is not found. Please pass same meetup on your URL")
def test_get_rspv_with_str(self):
"""Test if you get an rspv is posted"""
auth_token = self.user_login()
response = self.client.get('/api/v2/rspv/<rspv_id>',
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"Url need an integer")
def test_get_rspv(self):
"""Test if you get an rspv is posted"""
auth_token = self.user_login()
response = self.client.get('/api/v2/rspv/1',
headers=dict(
Authorization="Bearer " + auth_token),
content_type='application/json')
res = json.loads(response.data.decode())
self.assertEqual(response.status_code, 404)
self.assertEqual(
res["error"],
"RSPV of id 1 not found")
|
StarcoderdataPython
|
4812470
|
import pytest
import json
from fastapi.security import OAuth2PasswordRequestForm
from app.api.v1.endpoints.authorization import login_for_access_token
from app.core import crud
from app.core.authorization import get_password_hash
from app.core.schemas.users import UserInDB
# @pytest.mark.parametrize(
# "email, password, status_code",
# [
# ["<EMAIL>", "<PASSWORD>", 200],
# ["<EMAIL>", "password", 401],
# ],
# )
# def test_login_for_access_token(test_app, monkeypatch, email, password, status_code):
# async def get_by_email(email):
# if email == '<EMAIL>':
# return UserInDB(email=email, hashed_password=get_password_hash("<PASSWORD>"))
# return None
# monkeypatch.setattr(crud.users, "get_by_email", get_by_email)
#
# response = test_app.post(f"/token", data=json.dumps(dict(username=email, password=password)),)
# assert response.status_code == status_code
|
StarcoderdataPython
|
29630
|
import unittest
from unittest.mock import patch, Mock
from werkzeug.datastructures import FileStorage
import io
import json
from app import app
from app.models.base import db
from app.models.user import User
from app.auth.views import UserPassportphotoView
from app.auth import views
class AuthUploadPassportPhotoTestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
app.testing = True
self.user_data = {
"username": "john123",
"email": "<EMAIL>",
"password": "<PASSWORD>",
}
with app.app_context():
db.drop_all()
db.create_all()
# create admin user
user = User(
username="john123",
email="<EMAIL>",
password="<PASSWORD>",
role=True,
)
user.save()
@patch.object(views.UserPassportphotoView, "post")
def test_upload_passport_photo(self, mock_post):
upload = UserPassportphotoView()
mock_post.return_value.status_code = 200
res = upload.post(
"/api/v1/auth/upload",
data=dict(file=(io.BytesIO(b"abcdef"), "test.jpg")),
headers={"Content-Type": "multipart/form-data"},
)
self.assertEqual(res.status_code, 200)
def test_upload_photo_with_non_allowed_ext(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
data = {"file": (io.BytesIO(b'my file contents'), 'hello.txt')}
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data=data,
)
self.assertEqual(result.status_code, 400)
def test_no_photo_upload(self):
res = self.app.post(
"/api/v1/auth/login",
data=json.dumps(self.user_data),
headers={"Content-Type": "application/json"},
)
token = json.loads(res.data.decode())["access_token"]
result = self.app.post(
"/api/v1/auth/upload", buffered=True,
headers={
"Authorization": token,
"Content-Type" : 'multipart/form-data',
},
data={},
)
self.assertEqual(result.status_code, 400)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.