id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
3223122
|
<reponame>Yosoyfr/tytus
import libs.ply.yacc as yacc
from Optimizador.lex2 import *
from controllers.error_controller import ErrorController
from Optimizador.clases3d import *
precedence = (
('nonassoc', 'LESS_THAN', 'LESS_EQUAL', 'GREATE_THAN',
'GREATE_EQUAL', 'EQUALS', 'EQUALS_EQUALS','NOT_EQUAL_LR', 'LEFT_CORCH', 'RIGHT_CORCH'), # Level 4
('left', 'LEFT_PARENTHESIS',
'RIGHT_PARENTHESIS', 'COLON', 'NOT_EQUAL'), # Level 6
('left', 'PLUS', 'REST'), # Level 7
('left', 'ASTERISK', 'DIVISION', 'MODULAR', 'BITWISE_SHIFT_RIGHT',
'BITWISE_SHIFT_LEFT', 'BITWISE_AND', 'BITWISE_OR'), # Level 8
('left', 'EXPONENT', 'BITWISE_XOR'), # Level 9
('right', 'UPLUS', 'UREST'), # Level 10
('left', 'DOT') # Level 13
)
# ponete vivo xd
def p_instruction_list(p):
'''instructionlist : instructionlist instruction
| instruction
'''
if len(p) == 3:
p[1].append(p[2])
p[0] = p[1]
else:
p[0] = [p[1]]
def p_instruction(p):
'''instruction : import_instr
| alias_instr
| ifInstr
| SINGLE_LINE_COMMENT
| definition_instr
| labels_instr
| goto_instr
| error EQUALS
'''
if p.slice[1].type == 'definition_instr':
p[0] = p[1]
elif p.slice[1].type == 'goto_instr':
p[0] = p[1]
elif p.slice[1].type == 'labels_instr':
p[0] = p[1]
elif p.slice[1].type == 'ifInstr':
p[0] = p[1]
else:
pass
def p_import_instr(p):
'''import_instr : FROM GOTO IMPORT WITH_GOTO
| FROM LIST_DOT IMPORT LIST_ID
| FROM LIST_DOT IMPORT ASTERISK'''
def p_definition_instr(p):
'''definition_instr : DEF ID LEFT_PARENTHESIS RIGHT_PARENTHESIS COLON
| GLOBAL LIST_ID
| PRINT LEFT_PARENTHESIS EXPRESSION RIGHT_PARENTHESIS
| ID EQUALS comparasion
| ID LEFT_CORCH EXPRESSION RIGHT_CORCH EQUALS comparasion
| ID LEFT_PARENTHESIS RIGHT_PARENTHESIS'''
if len(p) == 4:
if p.slice[2].type == "EQUALS":
p[0] = AsignacionID(p[1], p[3])
def p_alias_instr(p):
'''alias_instr : ARROBA WITH_GOTO'''
def p_list_id(p):
'''LIST_ID : LIST_ID COMMA ID
| ID'''
def p_list_dot(p):
'''LIST_DOT : LIST_DOT DOT ID
| ID'''
# def p_list_dot_asignacion(p):
# '''LIST_DOT_ASIGNACION : LIST_DOT_ASIGNACION DOT ID LEFT_PARENTHESIS ID EXPRESSION RIGHT_PARENTHESIS
# | LIST_DOT_ASIGNACION DOT ID LEFT_PARENTHESIS RIGHT_PARENTHESIS
# | LIST_DOT_ASIGNACION DOT ID LEFT_PARENTHESIS LIST_DOT_ASIGNACION RIGHT_PARENTHESIS
# | ID LEFT_PARENTHESIS RIGHT_PARENTHESIS
# | ID LEFT_PARENTHESIS ID EXPRESSION RIGHT_PARENTHESIS'''
def p_goto_instr(p):
''' goto_instr : GOTO DOT ID'''
p[0] = Goto(p[3])
def p_labels_instr(p):
'''labels_instr : LABEL DOT ID'''
p[0] = LabelIF(p[3])
def p_ifInstr(p):
'''ifInstr : IF comparasion COLON GOTO DOT ID
| IF LEFT_PARENTHESIS comparasion RIGHT_PARENTHESIS COLON GOTO DOT ID'''
if len(p) == 7:
p[0] = ifStatement(p[2], Goto(p[6]))
else:
p[0] = ifStatement(p[3], Goto(p[8]))
def p_comparasion(p):
''' comparasion : EXPRESSION RELOP EXPRESSION
| EXPRESSION'''
if len(p) == 4:
p[0] = Relop(p[1], p[2], p[3])
else:
p[0] = p[1]
def p_expression(p):
'''EXPRESSION : EXPRESSION PLUS EXPRESSION
| EXPRESSION REST EXPRESSION
| EXPRESSION ASTERISK EXPRESSION
| EXPRESSION DIVISION EXPRESSION
| EXPRESSION EXPONENT EXPRESSION
| EXPRESSION MODULAR EXPRESSION
| EXPRESSION DOT EXPRESSION
| REST EXPRESSION %prec UREST
| PLUS EXPRESSION %prec UPLUS
| EXPRESSION BITWISE_SHIFT_RIGHT EXPRESSION
| EXPRESSION BITWISE_SHIFT_LEFT EXPRESSION
| EXPRESSION BITWISE_AND EXPRESSION
| EXPRESSION BITWISE_OR EXPRESSION
| EXPRESSION BITWISE_XOR EXPRESSION
| BITWISE_NOT EXPRESSION %prec UREST
| LEFT_CORCH comparasion RIGHT_CORCH
| ID LEFT_PARENTHESIS comparasion RIGHT_PARENTHESIS
| ID LEFT_PARENTHESIS RIGHT_PARENTHESIS
| ID LEFT_CORCH comparasion RIGHT_CORCH
| ID LEFT_CORCH ID COLON ID RIGHT_CORCH
| STRING_CADENAS
| INTEGER_NUMBERS'''
# Si ya estas ahora xd ponete vivo x2
if len(p) == 4:
if p.slice[1].type == "LEFT_CORCH":
p[0] = p[2]
else:
p[0] = ArithmeticBinaryOperation(p[1], p[3], p[2])
elif len(p) == 2:
p[0] = p[1]
def p_relop(p):
'''RELOP : EQUALS_EQUALS
| NOT_EQUAL
| GREATE_EQUAL
| GREATE_THAN
| LESS_THAN
| LESS_EQUAL
| NOT_EQUAL_LR'''
p[0] = p[1]
def p_string_cadenas(p):
'''STRING_CADENAS : STRINGCONT
| CHARCONT
| ID'''
p[0] = p[1]
def p_integer_numbers(p):
'''INTEGER_NUMBERS : INT_NUMBER
| FLOAT_NUMBER '''
p[0] = p[1]
def p_error(p):
try:
# print(str(p.value))
description = ' or near ' + str(p.value)
column = find_column(p)
ErrorController().add(33, 'Syntactic', description, p.lineno, column)
except AttributeError:
# print(number_error, description)
ErrorController().add(1, 'Syntactic', '', 'EOF', 'EOF')
parser = yacc.yacc()
def parse_optimizacion(inpu):
global input, contador_instr
contador_instr = 0
ErrorController().destroy()
lexer = lex.lex()
lexer.lineno = 1
input = inpu
get_text(input)
return parser.parse(inpu, lexer=lexer)
|
StarcoderdataPython
|
145392
|
from typing import List
from collections import defaultdict
class Solution:
def findingUsersActiveMinutes(self, logs: List[List[int]], k: int) -> List[int]:
data = defaultdict(set)
uam = dict()
inverted_uam = {k:0 for k in range(1,k+1)}
solution = list()
for item in logs:
data[item[0]].add(item[1])
for item in data:
uam[item] = len(data[item])
for key, values in uam.items():
inverted_uam[values] +=1
for values in inverted_uam.values():
solution.append(values)
return solution
solution = Solution()
print(solution.findingUsersActiveMinutes(logs = [[0,5],[1,2],[0,2],[0,5],[1,3]], k = 5))
print(solution.findingUsersActiveMinutes(logs = [[1,1],[2,2],[2,3]], k = 4))
|
StarcoderdataPython
|
109737
|
# coding=utf8
import numpy as np
class LabelSpreading:
def __init__(self, alpha=0.2, max_iter=30, tol=1e-3):
"""
:param alpha: clamping factor between (0,1)
:param max_iter: maximum number of iterations
:param tol: convergence tolerance
"""
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.dist = None
def fit(self, w, y):
"""
fit label spreading algorithm
:param w: similarity matrix of n x n shape with n samples
:param y: labels of n x c shape with c labels, where 1
denotes label of x_i or 0 otherwise. Unlabeled samples
have labels set to 0.
"""
if type(w) != np.ndarray or type(y) != np.ndarray or len(w) != len(y):
raise Exception("w and y should be numpy array with equal length")
if 0 > self.alpha > 1 or self.max_iter < 0 or self.tol < 0:
raise Exception("Parameters are set incorrectly")
# construct the matrix S
d = np.sum(w, axis=1)
d[d == 0] = 1
np.power(d, -1 / 2., d)
d = np.diag(d)
s = np.dot(np.dot(d, w), d)
# Iterate F(t+1) until convergence
cur_iter = 0
err = self.tol
f0 = y
f1 = None
while cur_iter < self.max_iter and err >= self.tol:
f1 = self.alpha * np.dot(s, f0) + (1 - self.alpha) * y
err = np.max(np.abs(f1 - f0))
f0 = f1
cur_iter += 1
self.dist = f1 # set distributions
return self
def predict(self, y):
"""
use model to create predictions
:param y: labels of n x c shape with c labels, where 1
denotes label of x_i or 0 otherwise. Unlabeled samples
have labels set to 0.
:return: list with predictions
"""
if not np.any(y):
raise Exception("Please fit model first")
if type(y) != np.ndarray:
raise Exception("y should be numpy array")
predictions = []
for i, labels in enumerate(y):
index = np.where(labels == 1)[0]
if len(index) == 1:
# was labeled before
predictions.append(index[0])
else:
# use label with highest score
predictions.append(np.argmax(self.dist[i]))
return predictions
|
StarcoderdataPython
|
3249709
|
<reponame>SteveMaverick/Python<filename>divide_and_conquer/quicksort.py
import sys
from typing import List
sys.setrecursionlimit(10 ** 5)
def partition(array: List, start: int, end: int) -> int:
"""
Helper function for quick_sort
Partitions array around a pivot
such that elements to the right of pivot are > pivot
elements to the left of pivot < pivot
and pivot is in the correct position
and returns index of pivot in sorted array
>>> array = [4,1,5,6,3,5,2]
>>> p = partition(array,0,6)
>>> p
3
"""
pivot = array[start] # pivot element to partition the array around
i = start + 1 # pointer to keep track of partition elements
for j in range(i, end + 1):
"""
loop that runs through all elements in the sub array
and partitions around the pivot
"""
if array[j] < pivot:
array[j], array[i] = array[i], array[j]
i += 1
"""
Swapping pivot so that it ends up in it's right place
"""
array[start], array[i - 1] = array[i - 1], array[start]
return i - 1
def quick_sort(array: List, start: int = 0, end: int = None) -> List:
"""
function that takes in a list as input
and return sorted list
>>> array = [4 , 1, 6, 5, 3, 2, 5]
>>> sorted_array = quick_sort(array)
>>> sorted_array
[1, 2, 3, 4, 5, 5, 6]
"""
if end is None:
"""
Overriding default pointer to end of original array
"""
end = len(array) - 1
if len(array) <= 1:
return array
elif start >= end:
return array
else:
pivot_index = partition(array, start, end) # partition array around a pivot
array = quick_sort(
array, start, pivot_index - 1
) # run quicksort on left subarray on elements < pivot
array = quick_sort(
array, pivot_index + 1, end
) # run quicksort on right subarray on elements >= pivot
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
|
StarcoderdataPython
|
1647116
|
import os
import numpy as np
import pytest
from capreolus.benchmark.robust04 import Robust04Benchmark
from capreolus.collection import Collection
from capreolus.extractor.berttext import BertText
from capreolus.searcher.bm25 import BM25Grid
from capreolus.tests.common_fixtures import trec_index, dummy_collection_config
def test_transform_qid_posdocid_negdocid(monkeypatch, tmpdir, trec_index, dummy_collection_config):
collection = Collection(dummy_collection_config)
pipeline_config = {
"indexstops": True,
"maxthreads": 1,
"stemmer": "anserini",
"bmax": 0.2,
"k1max": 0.2,
"maxqlen": 5,
"maxdoclen": 10,
"keepstops": True,
"rundocsonly": False,
}
bm25_run = BM25Grid(trec_index, collection, os.path.join(tmpdir, "searcher"), pipeline_config)
bm25_run.create()
folds = {"s1": {"train_qids": ["301"], "predict": {"dev": ["301"], "test": ["301"]}}}
benchmark = Robust04Benchmark(bm25_run, collection, pipeline_config)
benchmark.create_and_store_train_and_pred_pairs(folds)
feature = BertText(tmpdir, tmpdir, pipeline_config, index=trec_index, collection=collection, benchmark=benchmark)
feature.build_from_benchmark()
transformed = feature.transform_qid_posdocid_negdocid("301", "LA010189-0001", "LA010189-0001")
assert np.array_equal(
transformed["postoks"],
[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102],
)
assert np.array_equal(transformed["posmask"], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["possegs"], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["posqmask"], [1, 1, 0, 0, 0])
assert np.array_equal(transformed["posdmask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(
transformed["negtoks"],
[101, 24369, 9986, 0, 0, 0, 102, 24369, 24369, 24369, 7592, 2088, 1010, 14806, 2015, 2013, 6058, 102],
)
assert np.array_equal(transformed["negmask"], [1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["negsegs"], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert np.array_equal(transformed["negqmask"], [1, 1, 0, 0, 0])
assert np.array_equal(transformed["negdmask"], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
assert transformed["posdocid"] == "LA010189-0001"
assert transformed["negdocid"] == "LA010189-0001"
assert transformed["qid"] == "301"
|
StarcoderdataPython
|
4820611
|
<filename>backend/initiatives/views/__init__.py
from .admin_views import *
from .views import *
|
StarcoderdataPython
|
1794795
|
<reponame>acharal/tensorflow<gh_stars>0
import tensorflow as tf
from tensorflow.python.framework import function
ack = function.Declare("tak", [("x", tf.int32), ("y", tf.int32), ("z", tf.int32)], [("ret", tf.int32)])
@function.Defun(tf.int32, tf.int32, tf.int32, func_name="Tak", out_names=["ret"])
def TakImpl(x,y,z):
return tf.cond(tf.less(y, x),
lambda: tak(tak(x-1,y,z), tak(y-1,z,x), tak(z-1,x,y))
lambda: z)
TakImpl.add_to_graph(tf.get_default_graph())
x = tf.placeholder(tf.int32, shape=[])
y = tf.placeholder(tf.int32, shape=[])
z = tf.placeholder(tf.int32, shape=[])
res = tak(x,y,z)
writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())
sess = tf.Session()
#print(tf.get_default_graph().as_graph_def())
writer.close()
print(sess.run(res, feed_dict={x:24, y:16, z:8}))
sess.close()
|
StarcoderdataPython
|
129957
|
import string
size = 10
mid_line = '-'.join([string.ascii_letters[size - x] for x in range(1, size)] + [string.ascii_letters[x] for x in range(size)])
lines = []
for x in range(2,size+1):
main = ''.join(string.ascii_letters[size - x] for x in range(1, x))
*main_list,_ = list(main)
reverse = ''.join(x for x in reversed(main_list))
line = '-'.join(main+reverse)
num = (len(mid_line)-len(line)) // 2
output_line = '-' * num + line + '-' * num
lines.append(output_line)
[print(x) for x in lines]
print(mid_line)
[print(x) for x in reversed(lines)]
|
StarcoderdataPython
|
3284618
|
<reponame>munniomer/Send-IT-Api-v1
"""User views contains Signup and login Resources"""
from app.api.v1.models.user_model import UserModel
from flask import Flask, request, make_response, json, jsonify
from flask_restful import Resource
from validators.validators import Validators
db = UserModel()
validate = Validators()
class SignupResource(Resource):
"""Resource for user registration."""
def post(self):
"""Method for posting user data"""
request_data = request.get_json()
print(request_data)
fname = request_data["fname"]
lname = request_data["lname"]
email = request_data["email"]
phone = request_data["phone"]
password = request_data["password"]
confirm_password = request_data["confirm_password"]
city = request_data["city"]
# Checks if names and city are valid
if not validate.valid_name(fname) or not validate.valid_name(lname) or not validate.valid_name(city):
return {'message': "PLease check if your fname, lname or city is empty or contains numbers"}, 400
# Checks if email is valid
if not validate.valid_email(email):
return {'message': "Please enter a valid email "}, 400
# checks if email exists
check_email = db.check_email(email)
if check_email:
return {'message': 'That email exists. use a unique email'}, 400
# Checks if phone is valid
if not isinstance(phone, int):
return {'message': "Please enter a valid phone number "}, 400
# Checks if passwords are empty or less than 3
if not validate.valid_password(password) or not validate.valid_password(confirm_password):
return {'message': "Please check if your password or confirm password are empty or less than 3"}, 400
# checks if confirm password is equal to password
if confirm_password != password:
return {"message": "confirm password does not match password"},400
data=db.add_user(fname, lname, email, phone,
password, confirm_password, city)
return {"All users": data,
"message": "User successfully created", }, 201
|
StarcoderdataPython
|
1722891
|
import argparse
import logging
from enum import Enum
from codigofacilito import unreleased, released, articles
from .config import DEBUG
if DEBUG:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
class Items(str, Enum):
WORSHOPS = "workshops"
ARTICLES = "articles"
item_choices = [tag.value for tag in Items]
def main(*args, **kwargs):
if kwargs.get("items", None) == Items.WORSHOPS:
if kwargs.get("unreleased", False):
logging.info(unreleased())
else:
logging.info(released())
elif kwargs.get("items", None) == Items.ARTICLES:
logging.info(articles())
else:
logging.error("No valid item selected.")
if __name__ == '__main__':
logging.debug('>>> Estamos comenzando la ejecución del paquete.')
logging.debug('>>> Procesando argumentos...')
parser = argparse.ArgumentParser()
parser.add_argument(
'--items',
help='flag to choose between "workshops" or "articles"',
type=str,
required=True,
choices=item_choices
)
parser.add_argument(
'--unreleased',
help='flag to return unreleased workshops',
dest='unreleased',
action='store_true'
)
parser.add_argument(
'--no-unreleased',
help='flag to return unreleased workshops',
dest='unreleased',
action='store_false'
)
parser.set_defaults(unreleased=False)
args = parser.parse_args()
logging.debug(f'>>> {args}')
main(items=args.items, unreleased=args.unreleased)
logging.debug('>>> Estamos finalizando la ejecución del paquete.')
|
StarcoderdataPython
|
21067
|
<reponame>zhihou7/VCL<gh_stars>10-100
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>, based on code from Transferable-Interactiveness-Network, <NAME>, <NAME> and <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.python.framework import ops
from ult.tools import get_convert_matrix
from ult.config import cfg
from ult.visualization import draw_bounding_boxes_HOI
import numpy as np
def resnet_arg_scope(is_training=True,
weight_decay=cfg.TRAIN.WEIGHT_DECAY,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': ops.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d, slim.fully_connected],
weights_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer = slim.variance_scaling_initializer(),
biases_regularizer = tf.contrib.layers.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
biases_initializer = tf.constant_initializer(0.0),
trainable = is_training,
activation_fn = tf.nn.relu,
normalizer_fn = slim.batch_norm,
normalizer_params = batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
class ResNet101():
def __init__(self, model_name):
self.model_name = model_name
self.visualize = {}
self.test_visualize = {}
self.intermediate = {}
self.predictions = {}
self.score_summaries = {}
self.event_summaries = {}
self.train_summaries = []
self.losses = {}
self.image = tf.placeholder(tf.float32, shape=[1, None, None, 3], name = 'image')
self.spatial = tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name = 'sp')
self.H_boxes = tf.placeholder(tf.float32, shape=[None, 5], name = 'H_boxes')
self.O_boxes = tf.placeholder(tf.float32, shape=[None, 5], name = 'O_boxes')
self.gt_class_HO = tf.placeholder(tf.float32, shape=[None, 600], name = 'gt_class_HO')
self.H_num = tf.placeholder(tf.int32) # positive nums
self.image_id = tf.placeholder(tf.int32)
self.num_classes = 600
self.compose_num_classes = 600
self.num_fc = 1024
self.verb_num_classes = 117
self.obj_num_classes = 80
self.scope = 'resnet_v1_101'
self.stride = [16, ]
self.lr = tf.placeholder(tf.float32)
if tf.__version__ == '1.1.0':
raise Exception('wrong tensorflow version 1.1.0')
else:
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
self.blocks = [resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=1),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
resnet_v1_block('block5', base_depth=512, num_units=3, stride=1)]
if self.model_name.__contains__('unique_weights') or self.model_name.__contains__('_pa3')\
or self.model_name.__contains__('_pa4'):
print("add block6 unique_weights2")
self.blocks.append(resnet_v1_block('block6', base_depth=512, num_units=3, stride=1))
"""We copy from TIN. calculated by log(1/(n_c/sum(n_c)) c is the category and n_c is
the number of positive samples"""
self.HO_weight = np.array([
9.192927, 9.778443, 10.338059, 9.164914, 9.075144, 10.045923, 8.714437, 8.59822, 12.977117, 6.2745423,
11.227917, 6.765012, 9.436157, 9.56762, 11.0675745, 11.530198, 9.609821, 9.897503, 6.664475, 6.811699,
6.644726, 9.170454, 13.670264, 3.903943, 10.556748, 8.814335, 9.519224, 12.753973, 11.590822, 8.278912,
5.5245695, 9.7286825, 8.997436, 10.699849, 9.601237, 11.965516, 9.192927, 10.220277, 6.056692, 7.734048,
8.42324, 6.586457, 6.969533, 10.579222, 13.670264, 4.4531965, 9.326459, 9.288238, 8.071842, 10.431585,
12.417501, 11.530198, 11.227917, 4.0678477, 8.854023, 12.571651, 8.225684, 10.996116, 11.0675745,
10.100731,
7.0376034, 7.463688, 12.571651, 14.363411, 5.4902234, 11.0675745, 14.363411, 8.45805, 10.269067,
9.820116,
14.363411, 11.272368, 11.105314, 7.981595, 9.198626, 3.3284247, 14.363411, 12.977117, 9.300817,
10.032678,
12.571651, 10.114916, 10.471591, 13.264799, 14.363411, 8.01953, 10.412168, 9.644913, 9.981384,
7.2197933,
14.363411, 3.1178555, 11.031207, 8.934066, 7.546675, 6.386472, 12.060826, 8.862153, 9.799063, 12.753973,
12.753973, 10.412168, 10.8976755, 10.471591, 12.571651, 9.519224, 6.207762, 12.753973, 6.60636,
6.2896967,
4.5198326, 9.7887, 13.670264, 11.878505, 11.965516, 8.576513, 11.105314, 9.192927, 11.47304, 11.367679,
9.275815, 11.367679, 9.944571, 11.590822, 10.451388, 9.511381, 11.144535, 13.264799, 5.888291,
11.227917,
10.779892, 7.643191, 11.105314, 9.414651, 11.965516, 14.363411, 12.28397, 9.909063, 8.94731, 7.0330057,
8.129001, 7.2817025, 9.874775, 9.758241, 11.105314, 5.0690055, 7.4768796, 10.129305, 9.54313, 13.264799,
9.699972, 11.878505, 8.260853, 7.1437693, 6.9321113, 6.990665, 8.8104515, 11.655361, 13.264799,
4.515912,
9.897503, 11.418972, 8.113436, 8.795067, 10.236277, 12.753973, 14.363411, 9.352776, 12.417501,
0.6271591,
12.060826, 12.060826, 12.166186, 5.2946343, 11.318889, 9.8308115, 8.016022, 9.198626, 10.8976755,
13.670264,
11.105314, 14.363411, 9.653881, 9.503599, 12.753973, 5.80546, 9.653881, 9.592727, 12.977117, 13.670264,
7.995224, 8.639826, 12.28397, 6.586876, 10.929424, 13.264799, 8.94731, 6.1026597, 12.417501, 11.47304,
10.451388, 8.95624, 10.996116, 11.144535, 11.031207, 13.670264, 13.670264, 6.397866, 7.513285, 9.981384,
11.367679, 11.590822, 7.4348736, 4.415428, 12.166186, 8.573451, 12.977117, 9.609821, 8.601359, 9.055143,
11.965516, 11.105314, 13.264799, 5.8201604, 10.451388, 9.944571, 7.7855496, 14.363411, 8.5463,
13.670264,
7.9288645, 5.7561946, 9.075144, 9.0701065, 5.6871653, 11.318889, 10.252538, 9.758241, 9.407584,
13.670264,
8.570397, 9.326459, 7.488179, 11.798462, 9.897503, 6.7530537, 4.7828183, 9.519224, 7.6492405, 8.031909,
7.8180614, 4.451856, 10.045923, 10.83705, 13.264799, 13.670264, 4.5245686, 14.363411, 10.556748,
10.556748,
14.363411, 13.670264, 14.363411, 8.037262, 8.59197, 9.738439, 8.652985, 10.045923, 9.400566, 10.9622135,
11.965516, 10.032678, 5.9017305, 9.738439, 12.977117, 11.105314, 10.725825, 9.080208, 11.272368,
14.363411,
14.363411, 13.264799, 6.9279733, 9.153925, 8.075553, 9.126969, 14.363411, 8.903826, 9.488214, 5.4571533,
10.129305, 10.579222, 12.571651, 11.965516, 6.237189, 9.428937, 9.618479, 8.620408, 11.590822,
11.655361,
9.968962, 10.8080635, 10.431585, 14.363411, 3.796231, 12.060826, 10.302968, 9.551227, 8.75394,
10.579222,
9.944571, 14.363411, 6.272396, 10.625742, 9.690582, 13.670264, 11.798462, 13.670264, 11.724354,
9.993963,
8.230013, 9.100721, 10.374427, 7.865129, 6.514087, 14.363411, 11.031207, 11.655361, 12.166186, 7.419324,
9.421769, 9.653881, 10.996116, 12.571651, 13.670264, 5.912144, 9.7887, 8.585759, 8.272101, 11.530198,
8.886948,
5.9870906, 9.269661, 11.878505, 11.227917, 13.670264, 8.339964, 7.6763024, 10.471591, 10.451388,
13.670264,
11.185357, 10.032678, 9.313555, 12.571651, 3.993144, 9.379805, 9.609821, 14.363411, 9.709451, 8.965248,
10.451388, 7.0609145, 10.579222, 13.264799, 10.49221, 8.978916, 7.124196, 10.602211, 8.9743395, 7.77862,
8.073695, 9.644913, 9.339531, 8.272101, 4.794418, 9.016304, 8.012526, 10.674532, 14.363411, 7.995224,
12.753973, 5.5157638, 8.934066, 10.779892, 7.930471, 11.724354, 8.85808, 5.9025764, 14.363411,
12.753973,
12.417501, 8.59197, 10.513264, 10.338059, 14.363411, 7.7079706, 14.363411, 13.264799, 13.264799,
10.752493,
14.363411, 14.363411, 13.264799, 12.417501, 13.670264, 6.5661197, 12.977117, 11.798462, 9.968962,
12.753973,
11.47304, 11.227917, 7.6763024, 10.779892, 11.185357, 14.363411, 7.369478, 14.363411, 9.944571,
10.779892,
10.471591, 9.54313, 9.148476, 10.285873, 10.412168, 12.753973, 14.363411, 6.0308623, 13.670264,
10.725825,
12.977117, 11.272368, 7.663911, 9.137665, 10.236277, 13.264799, 6.715625, 10.9622135, 14.363411,
13.264799,
9.575919, 9.080208, 11.878505, 7.1863923, 9.366199, 8.854023, 9.874775, 8.2857685, 13.670264, 11.878505,
12.166186, 7.616999, 9.44343, 8.288065, 8.8104515, 8.347254, 7.4738197, 10.302968, 6.936267, 11.272368,
7.058223, 5.0138307, 12.753973, 10.173757, 9.863602, 11.318889, 9.54313, 10.996116, 12.753973,
7.8339925,
7.569945, 7.4427395, 5.560738, 12.753973, 10.725825, 10.252538, 9.307165, 8.491293, 7.9161053,
7.8849015,
7.782772, 6.3088884, 8.866243, 9.8308115, 14.363411, 10.8976755, 5.908519, 10.269067, 9.176025,
9.852551,
9.488214, 8.90809, 8.537411, 9.653881, 8.662968, 11.965516, 10.143904, 14.363411, 14.363411, 9.407584,
5.281472, 11.272368, 12.060826, 14.363411, 7.4135547, 8.920994, 9.618479, 8.891141, 14.363411,
12.060826,
11.965516, 10.9622135, 10.9622135, 14.363411, 5.658909, 8.934066, 12.571651, 8.614018, 11.655361,
13.264799,
10.996116, 13.670264, 8.965248, 9.326459, 11.144535, 14.363411, 6.0517673, 10.513264, 8.7430105,
10.338059,
13.264799, 6.878481, 9.065094, 8.87035, 14.363411, 9.92076, 6.5872955, 10.32036, 14.363411, 9.944571,
11.798462, 10.9622135, 11.031207, 7.652888, 4.334878, 13.670264, 13.670264, 14.363411, 10.725825,
12.417501,
14.363411, 13.264799, 11.655361, 10.338059, 13.264799, 12.753973, 8.206432, 8.916674, 8.59509,
14.363411,
7.376845, 11.798462, 11.530198, 11.318889, 11.185357, 5.0664344, 11.185357, 9.372978, 10.471591,
9.6629305,
11.367679, 8.73579, 9.080208, 11.724354, 5.04781, 7.3777695, 7.065643, 12.571651, 11.724354, 12.166186,
12.166186, 7.215852, 4.374113, 11.655361, 11.530198, 14.363411, 6.4993753, 11.031207, 8.344818,
10.513264,
10.032678, 14.363411, 14.363411, 4.5873594, 12.28397, 13.670264, 12.977117, 10.032678, 9.609821
], dtype='float32').reshape(1, 600)
num_inst_path = cfg.ROOT_DIR + '/Data/num_inst.npy'
num_inst = np.load(num_inst_path)
self.num_inst = num_inst
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix(self.verb_num_classes, self.obj_num_classes)
self.obj_to_HO_matrix = tf.constant(obj_to_HO_matrix, tf.float32)
self.verb_to_HO_matrix = tf.constant(verb_to_HO_matrix, tf.float32)
self.gt_obj_class = tf.cast(tf.matmul(self.gt_class_HO, self.obj_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
self.gt_verb_class = tf.cast(tf.matmul(self.gt_class_HO, self.verb_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
def init_table(self):
pass
def set_ph(self, image, image_id, num_pos, Human_augmented, Object_augmented, action_HO, sp):
if image is not None: self.image = image
if image_id is not None: self.image_id = image_id
if sp is not None: self.spatial = sp
if Human_augmented is not None: self.H_boxes = Human_augmented
if Object_augmented is not None: self.O_boxes = Object_augmented
if action_HO is not None: self.gt_class_HO = action_HO
self.H_num = num_pos
self.reset_classes()
def reset_classes(self):
from ult.tools import get_convert_matrix
verb_to_HO_matrix, obj_to_HO_matrix = get_convert_matrix(self.verb_num_classes, self.obj_num_classes)
self.obj_to_HO_matrix = tf.constant(obj_to_HO_matrix, tf.float32)
self.verb_to_HO_matrix = tf.constant(verb_to_HO_matrix, tf.float32)
self.gt_obj_class = tf.cast(tf.matmul(self.gt_class_HO, self.obj_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
self.gt_verb_class = tf.cast(tf.matmul(self.gt_class_HO, self.verb_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
def build_base(self):
with tf.variable_scope(self.scope, self.scope, reuse=tf.AUTO_REUSE,):
net = resnet_utils.conv2d_same(self.image, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
return net
def image_to_head(self, is_training):
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net = self.build_base()
net, _ = resnet_v1.resnet_v1(net,
self.blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
if self.model_name.__contains__('unique_weights'):
print("unique_weights3")
stop = -3
else:
stop = -2
head, _ = resnet_v1.resnet_v1(net,
self.blocks[cfg.RESNET.FIXED_BLOCKS:stop],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
return head
def sp_to_head(self):
with tf.variable_scope(self.scope, self.scope, reuse=tf.AUTO_REUSE,):
ends = 2
if self.model_name.__contains__('_spose'):
ends = 3
conv1_sp = slim.conv2d(self.spatial[:,:,:,0:ends], 64, [5, 5], padding='VALID', scope='conv1_sp')
pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], padding='VALID', scope='conv2_sp')
pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
pool2_flat_sp = slim.flatten(pool2_sp)
return pool2_flat_sp
def res5(self, pool5_H, pool5_O, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
if pool5_H is None:
fc7_H = None
else:
fc7_H, _ = resnet_v1.resnet_v1(pool5_H,
self.blocks[-2:-1],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
# fc7_H = tf.reduce_mean(fc7_H, axis=[1, 2])
if pool5_O is None:
fc7_O = None
else:
fc7_O, _ = resnet_v1.resnet_v1(pool5_O,
self.blocks[-1:],
global_pool=False,
include_root_block=False,
reuse=tf.AUTO_REUSE,
scope=self.scope)
# fc7_O = tf.reduce_mean(fc7_O, axis=[1, 2])
return fc7_H, fc7_O
def head_to_tail(self, fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
fc7_SH = tf.reduce_mean(pool5_SH, axis=[1, 2])
fc7_SO = tf.reduce_mean(pool5_SO, axis=[1, 2])
Concat_SH = tf.concat([fc7_H, fc7_SH], 1)
fc8_SH = slim.fully_connected(Concat_SH, self.num_fc, scope='fc8_SH', reuse=tf.AUTO_REUSE)
fc8_SH = slim.dropout(fc8_SH, keep_prob=0.5, is_training=is_training, scope='dropout8_SH')
fc9_SH = slim.fully_connected(fc8_SH, self.num_fc, scope='fc9_SH', reuse=tf.AUTO_REUSE)
fc9_SH = slim.dropout(fc9_SH, keep_prob=0.5, is_training=is_training, scope='dropout9_SH')
Concat_SO = tf.concat([fc7_O, fc7_SO], 1)
fc8_SO = slim.fully_connected(Concat_SO, self.num_fc, scope='fc8_SO', reuse=tf.AUTO_REUSE)
fc8_SO = slim.dropout(fc8_SO, keep_prob=0.5, is_training=is_training, scope='dropout8_SO')
fc9_SO = slim.fully_connected(fc8_SO, self.num_fc, scope='fc9_SO', reuse=tf.AUTO_REUSE)
fc9_SO = slim.dropout(fc9_SO, keep_prob=0.5, is_training=is_training, scope='dropout9_SO')
Concat_SHsp = tf.concat([fc7_H, sp], 1)
Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='Concat_SHsp', reuse=tf.AUTO_REUSE)
Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp')
fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, scope='fc7_SHsp', reuse=tf.AUTO_REUSE)
fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp')
return fc9_SH, fc9_SO, fc7_SHsp
def crop_pool_layer(self, bottom, rois, name):
with tf.variable_scope(name) as scope:
batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name="batch_id"), [1])
bboxes = self.trans_boxes_by_feats(bottom, rois)
if cfg.RESNET.MAX_POOL:
pre_pool_size = cfg.POOLING_SIZE * 2
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name="crops")
crops = slim.max_pool2d(crops, [2, 2], padding='SAME')
else:
crops = tf.image.crop_and_resize(bottom, bboxes, tf.to_int32(batch_ids), [cfg.POOLING_SIZE, cfg.POOLING_SIZE], name="crops")
return crops
def trans_boxes_by_feats(self, bottom, rois):
bottom_shape = tf.shape(bottom)
height = (tf.to_float(bottom_shape[1]) - 1.) * np.float32(self.stride[0])
width = (tf.to_float(bottom_shape[2]) - 1.) * np.float32(self.stride[0])
x1 = tf.slice(rois, [0, 1], [-1, 1], name="x1") / width
y1 = tf.slice(rois, [0, 2], [-1, 1], name="y1") / height
x2 = tf.slice(rois, [0, 3], [-1, 1], name="x2") / width
y2 = tf.slice(rois, [0, 4], [-1, 1], name="y2") / height
bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))
return bboxes
def attention_pool_layer_H(self, bottom, fc7_H, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_H, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_H(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att)
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def attention_pool_layer_O(self, bottom, fc7_O, is_training, name):
with tf.variable_scope(name) as scope:
fc1 = slim.fully_connected(fc7_O, 512, scope='fc1_b')
fc1 = slim.dropout(fc1, keep_prob=0.8, is_training=is_training, scope='dropout1_b')
fc1 = tf.reshape(fc1, [tf.shape(fc1)[0], 1, 1, tf.shape(fc1)[1]])
att = tf.reduce_mean(tf.multiply(bottom, fc1), 3, keep_dims=True)
return att
def attention_norm_O(self, att, name):
with tf.variable_scope(name) as scope:
att = tf.transpose(att, [0, 3, 1, 2])
att_shape = tf.shape(att)
att = tf.reshape(att, [att_shape[0], att_shape[1], -1])
att = tf.nn.softmax(att)
att = tf.reshape(att, att_shape)
att = tf.transpose(att, [0, 2, 3, 1])
return att
def region_classification(self, fc7_H, fc7_O, fc7_SHsp, is_training, initializer, name):
with tf.variable_scope(name) as scope:
cls_score_H = slim.fully_connected(fc7_H, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_H')
cls_prob_H = tf.nn.sigmoid(cls_score_H, name='cls_prob_H')
tf.reshape(cls_prob_H, [-1, self.num_classes])
cls_score_O = slim.fully_connected(fc7_O, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_O')
cls_prob_O = tf.nn.sigmoid(cls_score_O, name='cls_prob_O')
tf.reshape(cls_prob_O, [-1, self.num_classes])
cls_score_sp = slim.fully_connected(fc7_SHsp, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
activation_fn=None, scope='cls_score_sp')
cls_prob_sp = tf.nn.sigmoid(cls_score_sp, name='cls_prob_sp')
tf.reshape(cls_prob_sp, [-1, self.num_classes])
self.predictions["cls_score_H"] = cls_score_H
self.predictions["cls_prob_H"] = cls_prob_H
self.predictions["cls_score_O"] = cls_score_O
self.predictions["cls_prob_O"] = cls_prob_O
self.predictions["cls_score_sp"] = cls_score_sp
self.predictions["cls_prob_sp"] = cls_prob_sp
self.predictions["cls_prob_HO"] = cls_prob_sp * (cls_prob_O + cls_prob_H)
return cls_prob_H, cls_prob_O, cls_prob_sp
def bottleneck(self, bottom, is_training, name, reuse=False):
with tf.variable_scope(name) as scope:
if reuse:
scope.reuse_variables()
head_bottleneck = slim.conv2d(bottom, 1024, [1, 1], scope=name)
return head_bottleneck
def build_network(self, is_training):
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
# ResNet Backbone
head = self.image_to_head(is_training)
sp = self.sp_to_head()
pool5_H = self.crop_pool_layer(head, self.H_boxes, 'Crop_H')
pool5_O = self.crop_pool_layer(head, self.O_boxes[:self.H_num,:], 'Crop_O')
fc7_H, fc7_O = self.res5(pool5_H, pool5_O, sp, is_training, 'res5')
fc7_H = tf.reduce_mean(fc7_H, axis=[1, 2])
fc7_O = tf.reduce_mean(fc7_O, axis=[1, 2])
# Phi
head_phi = slim.conv2d(head, 512, [1, 1], scope='head_phi')
# g
head_g = slim.conv2d(head, 512, [1, 1], scope='head_g')
Att_H = self.attention_pool_layer_H(head_phi, fc7_H, is_training, 'Att_H')
Att_H = self.attention_norm_H(Att_H, 'Norm_Att_H')
att_head_H = tf.multiply(head_g, Att_H)
Att_O = self.attention_pool_layer_O(head_phi, fc7_O, is_training, 'Att_O')
Att_O = self.attention_norm_O(Att_O, 'Norm_Att_O')
att_head_O = tf.multiply(head_g, Att_O)
pool5_SH = self.bottleneck(att_head_H, is_training, 'bottleneck', False)
pool5_SO = self.bottleneck(att_head_O, is_training, 'bottleneck', True)
# fc7_O = tf.Print(fc7_O, [tf.shape(fc7_O), tf.shape(fc7_H)], message='check fc7_O:')
fc7_SH, fc7_SO, fc7_SHsp = self.head_to_tail(fc7_H, fc7_O, pool5_SH, pool5_SO, sp, is_training, 'fc_HO')
# fc7_SO = tf.Print(fc7_SO, [tf.shape(fc7_SO), tf.shape(fc7_SH), tf.shape(fc7_SHsp)], message='check fc7_SHsp:')
cls_prob_H, cls_prob_O, cls_prob_sp = self.region_classification(fc7_SH, fc7_SO, fc7_SHsp, is_training, initializer, 'classification')
self.score_summaries.update(self.predictions)
self.visualize["attention_map_H"] = (Att_H - tf.reduce_min(Att_H[0,:,:,:])) / tf.reduce_max((Att_H[0,:,:,:] - tf.reduce_min(Att_H[0,:,:,:])))
self.visualize["attention_map_O"] = (Att_O - tf.reduce_min(Att_O[0,:,:,:])) / tf.reduce_max((Att_O[0,:,:,:] - tf.reduce_min(Att_O[0,:,:,:])))
return cls_prob_H, cls_prob_O, cls_prob_sp
def create_architecture(self, is_training):
self.build_network(is_training)
# for var in tf.trainable_variables():
# self.train_summaries.append(var)
if is_training: self.add_loss()
layers_to_output = {}
layers_to_output.update(self.losses)
val_summaries = []
if is_training:
with tf.device("/cpu:0"):
# val_summaries.append(self.add_gt_image_summary_H())
# val_summaries.append(self.add_gt_image_summary_HO())
# tf.summary.image('ATTENTION_MAP_H', self.visualize["attention_map_H"], max_outputs=1)
# tf.summary.image('ATTENTION_MAP_O', self.visualize["attention_map_O"], max_outputs=1)
for key, var in self.visualize.items():
tf.summary.image(key, var, max_outputs=1)
for key, var in self.event_summaries.items():
val_summaries.append(tf.summary.scalar(key, var))
# val_summaries.append(tf.summary.scalar('lr', self.lr))
self.summary_op = tf.summary.merge_all()
self.summary_op_val = tf.summary.merge(val_summaries)
return layers_to_output
def add_loss(self):
with tf.variable_scope('LOSS') as scope:
cls_score_H = self.predictions["cls_score_H"]
cls_score_O = self.predictions["cls_score_O"]
cls_score_sp = self.predictions["cls_score_sp"]
label_HO = self.gt_class_HO
H_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO[:self.H_num,:], logits = cls_score_H[:self.H_num,:]))
O_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO[:self.H_num,:], logits = cls_score_O[:self.H_num,:]))
sp_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels = label_HO, logits = cls_score_sp))
self.losses['H_cross_entropy'] = H_cross_entropy
self.losses['O_cross_entropy'] = O_cross_entropy
self.losses['sp_cross_entropy'] = sp_cross_entropy
loss = H_cross_entropy + O_cross_entropy + sp_cross_entropy
self.losses['total_loss'] = loss
self.event_summaries.update(self.losses)
return loss
def add_gt_image_summary_H(self):
image = tf.py_func(draw_bounding_boxes_HOI,
[tf.reverse(self.image+cfg.PIXEL_MEANS, axis=[-1]), self.H_boxes, self.gt_class_HO],
tf.float32, name="gt_boxes_H")
return tf.summary.image('GROUND_TRUTH_H', image)
def add_gt_image_summary_HO(self):
image = tf.py_func(draw_bounding_boxes_HOI,
[tf.reverse(self.image+cfg.PIXEL_MEANS, axis=[-1]), self.O_boxes, self.gt_class_HO],
tf.float32, name="gt_boxes_HO")
return tf.summary.image('GROUND_TRUTH_HO)', image)
def add_score_summary(self, key, tensor):
if tensor is not None and tensor.op is not None:
tf.summary.histogram('SCORE/' + tensor.op.name + '/' + key + '/scores', tensor)
def add_train_summary(self, var):
tf.summary.histogram('TRAIN/' + var.op.name, var)
def get_feed_dict(self, blobs):
feed_dict = {self.image: blobs['image'], self.H_boxes: blobs['H_boxes'],
self.O_boxes: blobs['O_boxes'], self.gt_class_HO: blobs['gt_class_HO'],
self.spatial: blobs['sp'],
# self.lr: lr,
self.H_num: blobs['H_num']}
return feed_dict
def train_step(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, _ = sess.run([self.losses['total_loss'],
train_op],
feed_dict=feed_dict)
return loss
def train_step_with_summary(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, summary, _ = sess.run([self.losses['total_loss'],
self.summary_op,
train_op],
feed_dict=feed_dict)
return loss, summary
def train_step_tfr(self, sess, blobs, lr, train_op):
loss, image_id, _ = sess.run([self.losses['total_loss'], self.image_id,
train_op])
return loss, image_id
def train_step_tfr_with_summary(self, sess, blobs, lr, train_op):
loss, summary, image_id, _ = sess.run([self.losses['total_loss'],
self.summary_op, self.image_id,
train_op])
return loss, image_id, summary
def test_image_HO(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'], self.spatial: blobs['sp'], self.H_num: blobs['H_num']}
cls_prob_HO = sess.run([self.predictions["cls_prob_HO"]], feed_dict=feed_dict)
return cls_prob_HO
def obtain_all_preds(self, sess, image, blobs):
feed_dict = {self.image: image, self.H_boxes: blobs['H_boxes'], self.O_boxes: blobs['O_boxes'],
self.spatial: blobs['sp'], self.H_num: blobs['H_num']}
cls_prob_HO, pH, pO, pSp = sess.run([self.predictions["cls_prob_HO"], self.predictions["cls_prob_H"],
self.predictions["cls_prob_O"], self.predictions["cls_prob_sp"]], feed_dict=feed_dict)
return cls_prob_HO, pH, pO, pSp, pSp
def obtain_all_preds_tfr(self, sess):
cls_prob_HO, pH, pO, pSp = sess.run([self.predictions["cls_prob_HO"], self.predictions["cls_prob_H"],
self.predictions["cls_prob_O"], self.predictions["cls_prob_sp"]])
return cls_prob_HO, pH, pO, pSp, pSp
|
StarcoderdataPython
|
30498
|
class O(object): pass
class A(O): pass
class B(O): pass
class C(O): pass
class D(O): pass
class E(O): pass
class K1(A,B,C): pass
class K2(D,B,E): pass
class K3(D,A): pass
class Z(K1,K2,K3): pass
print K1.__mro__
print K2.__mro__
print K3.__mro__
print Z.__mro__
|
StarcoderdataPython
|
1625765
|
"""
balances simple
"""
import archon.broker.broker as broker
import archon.exchange.exchanges as exc
a = broker.Broker(setAuto=False)
a.set_keys_exchange_file(path_file_apikeys="./apikeys.toml")
client = a.afacade.get_client(exc.BINANCE)
bal = client.get_account()["balances"]
for x in bal:
f,l = float(x["free"]),float(x["locked"])
a = x["asset"]
total = f+l
if total > 0:
print (a,total)
|
StarcoderdataPython
|
1718248
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
assert int('89') == 89
assert int('101', 2) == 5
assert int('0B101', 2) == 5
assert int('27', 8) == 23
assert int('027', 8) == 23
assert int('22', 16) == 34
assert int('0x22', 16) == 34
assert int('0X22', 16) == 34
|
StarcoderdataPython
|
114297
|
<filename>piconumpy/test_cpython_capi.py
import numpy as np
from . import array
class Tests:
_array = array
def test_init_array(self):
a = self._array([1.0, 2.0])
assert a.size == 2
def test_init_array_numpy(self):
np_a = np.array([1.0, 2.0, 0.0, 0.0])
a = self._array(np_a.tolist())
assert a.size == np_a.size
assert a.tolist() == np_a.tolist()
def test_multiply(self):
a = self._array([1.0, 2.0])
assert (2 * a).tolist() == [2.0, 4.0]
assert (a * 3).tolist() == [3.0, 6.0]
def test_add(self):
a = self._array([1.0, 2.0])
assert (a + 2 * a).tolist() == [3.0, 6.0]
def test_divide(self):
a = self._array([1.0, 2.0])
assert (a / 2).tolist() == [0.5, 1.0]
def test_sequence(self):
a = self._array([1.0, 2.0])
assert len(a) == 2
assert a[1] == 2.0
|
StarcoderdataPython
|
3360250
|
def move(from_position, target_position):
print(f'Move disk from {from_position} to {target_position}')
def hanoi(disk_count, from_position, helper_position, target_position):
if not disk_count:
return
hanoi(disk_count - 1, from_position, helper_position, target_position)
move(from_position, target_position)
hanoi(disk_count - 1, helper_position, from_position, target_position)
if __name__ == '__main__':
hanoi(4, "A", "B", "C")
|
StarcoderdataPython
|
64409
|
<gh_stars>1-10
"""
Author: Benny
Date: Nov 2019
"""
from data_utils.ModelNetDataLoader import ModelNetDataLoader
import argparse
import numpy as np
import os
import torch
import datetime
import logging
from pathlib import Path
from tqdm import tqdm
import sys
import provider
import importlib
import shutil
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(os.path.join(ROOT_DIR, 'models'))
def parse_args():
'''PARAMETERS'''
parser = argparse.ArgumentParser('PointNet')
parser.add_argument('--batch_size', type=int, default=24, help='batch size in training [default: 24]')
parser.add_argument('--model', default='pointnet_cls', help='model name [default: pointnet_cls]')
parser.add_argument('--epoch', default=200, type=int, help='number of epoch in training [default: 200]')
parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate in training [default: 0.001]')
parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 1024]')
parser.add_argument('--optimizer', type=str, default='Adam', help='optimizer for training [default: Adam]')
parser.add_argument('--log_dir', type=str, default=None, help='experiment root')
parser.add_argument('--decay_rate', type=float, default=1e-4, help='decay rate [default: 1e-4]')
parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
return parser.parse_args()
def test(model, loader, num_class=40):
mean_correct = []
class_acc = np.zeros((num_class,3))
for j, data in tqdm(enumerate(loader), total=len(loader)):
points, target = data
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
classifier = model.eval()
pred, _ = classifier(points)
pred_choice = pred.data.max(1)[1]
for cat in np.unique(target.cpu()):
classacc = pred_choice[target==cat].eq(target[target==cat].long().data).cpu().sum()
class_acc[cat,0]+= classacc.item()/float(points[target==cat].size()[0])
class_acc[cat,1]+=1
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item()/float(points.size()[0]))
class_acc[:,2] = class_acc[:,0]/ class_acc[:,1]
class_acc = np.mean(class_acc[:,2])
instance_acc = np.mean(mean_correct)
return instance_acc, class_acc
def main(args):
def log_string(str):
logger.info(str)
print(str)
'''HYPER PARAMETER'''
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
'''CREATE DIR'''
timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
experiment_dir = Path('./log/')
experiment_dir.mkdir(exist_ok=True)
experiment_dir = experiment_dir.joinpath('classification')
experiment_dir.mkdir(exist_ok=True)
if args.log_dir is None:
experiment_dir = experiment_dir.joinpath(timestr)
else:
experiment_dir = experiment_dir.joinpath(args.log_dir)
experiment_dir.mkdir(exist_ok=True)
checkpoints_dir = experiment_dir.joinpath('checkpoints/')
checkpoints_dir.mkdir(exist_ok=True)
log_dir = experiment_dir.joinpath('logs/')
log_dir.mkdir(exist_ok=True)
'''LOG'''
args = parse_args()
logger = logging.getLogger("Model")
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
log_string('PARAMETER ...')
log_string(args)
'''DATA LOADING'''
log_string('Load dataset ...')
DATA_PATH = 'data/modelnet40_normal_resampled/'
TRAIN_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='train',
normal_channel=args.normal)
TEST_DATASET = ModelNetDataLoader(root=DATA_PATH, npoint=args.num_point, split='test',
normal_channel=args.normal)
trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size, shuffle=True, num_workers=4)
testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size, shuffle=False, num_workers=4)
'''MODEL LOADING'''
num_class = 40
MODEL = importlib.import_module(args.model)
shutil.copy('./models/%s.py' % args.model, str(experiment_dir))
shutil.copy('./models/pointnet_util.py', str(experiment_dir))
classifier = MODEL.get_model(num_class,normal_channel=args.normal).cuda()
criterion = MODEL.get_loss().cuda()
try:
checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
start_epoch = checkpoint['epoch']
classifier.load_state_dict(checkpoint['model_state_dict'])
log_string('Use pretrain model')
except:
log_string('No existing model, starting training from scratch...')
start_epoch = 0
if args.optimizer == 'Adam':
optimizer = torch.optim.Adam(
classifier.parameters(),
lr=args.learning_rate,
betas=(0.9, 0.999),
eps=1e-08,
weight_decay=args.decay_rate
)
else:
optimizer = torch.optim.SGD(classifier.parameters(), lr=0.01, momentum=0.9)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.7)
global_epoch = 0
global_step = 0
best_instance_acc = 0.0
best_class_acc = 0.0
mean_correct = []
'''TRANING'''
logger.info('Start training...')
for epoch in range(start_epoch,args.epoch):
log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
scheduler.step()
for batch_id, data in tqdm(enumerate(trainDataLoader, 0), total=len(trainDataLoader), smoothing=0.9):
points, target = data
points = points.data.numpy()
points = provider.random_point_dropout(points)
points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
points = torch.Tensor(points)
target = target[:, 0]
points = points.transpose(2, 1)
points, target = points.cuda(), target.cuda()
optimizer.zero_grad()
classifier = classifier.train()
pred, trans_feat = classifier(points)
loss = criterion(pred, target.long(), trans_feat)
pred_choice = pred.data.max(1)[1]
correct = pred_choice.eq(target.long().data).cpu().sum()
mean_correct.append(correct.item() / float(points.size()[0]))
loss.backward()
optimizer.step()
global_step += 1
train_instance_acc = np.mean(mean_correct)
log_string('Train Instance Accuracy: %f' % train_instance_acc)
with torch.no_grad():
instance_acc, class_acc = test(classifier.eval(), testDataLoader)
if (instance_acc >= best_instance_acc):
best_instance_acc = instance_acc
best_epoch = epoch + 1
if (class_acc >= best_class_acc):
best_class_acc = class_acc
log_string('Test Instance Accuracy: %f, Class Accuracy: %f'% (instance_acc, class_acc))
log_string('Best Instance Accuracy: %f, Class Accuracy: %f'% (best_instance_acc, best_class_acc))
if (instance_acc >= best_instance_acc):
logger.info('Save model...')
savepath = str(checkpoints_dir) + '/best_model.pth'
log_string('Saving at %s'% savepath)
state = {
'epoch': best_epoch,
'instance_acc': instance_acc,
'class_acc': class_acc,
'model_state_dict': classifier.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}
torch.save(state, savepath)
global_epoch += 1
logger.info('End of training...')
if __name__ == '__main__':
args = parse_args()
main(args)
|
StarcoderdataPython
|
3215666
|
# -*- coding: utf-8 -*-
r"""
The set `\mathbb{P}^1(\QQ)` of cusps
EXAMPLES::
sage: Cusps
Set P^1(QQ) of all cusps
::
sage: Cusp(oo)
Infinity
"""
# ****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.rings.all import Rational, Integer, ZZ, QQ
from sage.rings.infinity import Infinity, InfinityRing
from sage.structure.parent import Parent
from sage.misc.fast_methods import Singleton
from sage.structure.element import Element, is_InfinityElement
from sage.structure.richcmp import richcmp
from sage.libs.pari.all import pari, pari_gen
from sage.modular.modsym.p1list import lift_to_sl2z_llong
from sage.structure.element import is_Matrix
class Cusp(Element):
"""
A cusp.
A cusp is either a rational number or infinity, i.e., an element of
the projective line over Q. A Cusp is stored as a pair (a,b), where
gcd(a,b)=1 and a,b are of type Integer.
EXAMPLES::
sage: a = Cusp(2/3); b = Cusp(oo)
sage: a.parent()
Set P^1(QQ) of all cusps
sage: a.parent() is b.parent()
True
"""
def __init__(self, a, b=None, parent=None, check=True):
r"""
Create the cusp a/b in `\mathbb{P}^1(\QQ)`, where if b=0
this is the cusp at infinity.
When present, b must either be Infinity or coercible to an
Integer.
EXAMPLES::
sage: Cusp(2,3)
2/3
sage: Cusp(3,6)
1/2
sage: Cusp(1,0)
Infinity
sage: Cusp(infinity)
Infinity
sage: Cusp(5)
5
sage: Cusp(1/2)
1/2
sage: Cusp(1.5)
3/2
sage: Cusp(int(7))
7
sage: Cusp(1, 2, check=False)
1/2
sage: Cusp('sage', 2.5, check=False) # don't do this!
sage/2.50000000000000
::
sage: I**2
-1
sage: Cusp(I)
Traceback (most recent call last):
...
TypeError: unable to convert I to a cusp
::
sage: a = Cusp(2,3)
sage: loads(a.dumps()) == a
True
::
sage: Cusp(1/3,0)
Infinity
sage: Cusp((1,0))
Infinity
TESTS::
sage: Cusp("1/3", 5)
1/15
sage: Cusp(Cusp(3/5), 7)
3/35
sage: Cusp(5/3, 0)
Infinity
sage: Cusp(3,oo)
0
sage: Cusp((7,3), 5)
7/15
sage: Cusp(int(5), 7)
5/7
::
sage: Cusp(0,0)
Traceback (most recent call last):
...
TypeError: unable to convert (0, 0) to a cusp
::
sage: Cusp(oo,oo)
Traceback (most recent call last):
...
TypeError: unable to convert (+Infinity, +Infinity) to a cusp
::
sage: Cusp(Cusp(oo),oo)
Traceback (most recent call last):
...
TypeError: unable to convert (Infinity, +Infinity) to a cusp
Conversion from PARI is supported (see :trac:`32091`)::
sage: Cusp(pari.oo())
Infinity
sage: Cusp(pari(2/3))
2/3
"""
if parent is None:
parent = Cusps
Element.__init__(self, parent)
if not check:
self.__a = a
self.__b = b
return
if b is None:
if isinstance(a, Integer):
self.__a = a
self.__b = ZZ.one()
elif isinstance(a, Rational):
self.__a = a.numer()
self.__b = a.denom()
elif (is_InfinityElement(a) or
(isinstance(a, pari_gen) and a.type() == 't_INFINITY')):
self.__a = ZZ.one()
self.__b = ZZ.zero()
elif isinstance(a, Cusp):
self.__a = a.__a
self.__b = a.__b
elif isinstance(a, int):
self.__a = ZZ(a)
self.__b = ZZ.one()
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert %r to a cusp" % a)
if ZZ(a[1]) == 0:
self.__a = ZZ.one()
self.__b = ZZ.zero()
return
try:
r = QQ((a[0], a[1]))
self.__a = r.numer()
self.__b = r.denom()
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp" % a)
else:
try:
r = QQ(a)
self.__a = r.numer()
self.__b = r.denom()
except (ValueError, TypeError):
raise TypeError("unable to convert %r to a cusp" % a)
return
if is_InfinityElement(b):
if is_InfinityElement(a) or (isinstance(a, Cusp) and a.is_infinity()):
raise TypeError("unable to convert (%r, %r) to a cusp" % (a, b))
self.__a = ZZ.zero()
self.__b = ZZ.one()
return
elif not b:
if not a:
raise TypeError("unable to convert (%r, %r) to a cusp" % (a, b))
self.__a = ZZ.one()
self.__b = ZZ.zero()
return
if isinstance(a, (Integer, Rational)):
r = a / ZZ(b)
elif is_InfinityElement(a):
self.__a = ZZ.one()
self.__b = ZZ.zero()
return
elif isinstance(a, Cusp):
if a.__b:
r = a.__a / (a.__b * b)
else:
self.__a = ZZ.one()
self.__b = ZZ.zero()
return
elif isinstance(a, int):
r = ZZ(a) / b
elif isinstance(a, (tuple, list)):
if len(a) != 2:
raise TypeError("unable to convert (%r, %r) to a cusp" % (a, b))
r = ZZ(a[0]) / (ZZ(a[1]) * b)
else:
try:
r = QQ(a) / b
except (ValueError, TypeError):
raise TypeError("unable to convert (%r, %r) to a cusp" % (a, b))
self.__a = r.numer()
self.__b = r.denom()
def __hash__(self):
"""
EXAMPLES::
sage: hash(Cusp(1/3)) == hash((1,3))
True
sage: hash(Cusp(oo)) == hash((1,0))
True
"""
return hash((self.__a, self.__b))
def _richcmp_(self, right, op):
"""
Compare the cusps ``self`` and ``right``.
Comparison is as for rational numbers, except with the cusp oo
greater than everything but itself.
The ordering in comparison is only really meaningful for infinity
or elements that coerce to the rationals.
EXAMPLES::
sage: Cusp(2/3) == Cusp(oo)
False
sage: Cusp(2/3) < Cusp(oo)
True
sage: Cusp(2/3)> Cusp(oo)
False
sage: Cusp(2/3) > Cusp(5/2)
False
sage: Cusp(2/3) < Cusp(5/2)
True
sage: Cusp(2/3) == Cusp(5/2)
False
sage: Cusp(oo) == Cusp(oo)
True
sage: 19/3 < Cusp(oo)
True
sage: Cusp(oo) < 19/3
False
sage: Cusp(2/3) < Cusp(11/7)
True
sage: Cusp(11/7) < Cusp(2/3)
False
sage: 2 < Cusp(3)
True
"""
if not self.__b:
s = Infinity
else:
s = self._rational_()
if not right.__b:
o = Infinity
else:
o = right._rational_()
return richcmp(s, o, op)
def is_infinity(self):
"""
Returns True if this is the cusp infinity.
EXAMPLES::
sage: Cusp(3/5).is_infinity()
False
sage: Cusp(1,0).is_infinity()
True
sage: Cusp(0,1).is_infinity()
False
"""
return not self.__b
def numerator(self):
"""
Return the numerator of the cusp a/b.
EXAMPLES::
sage: x=Cusp(6,9); x
2/3
sage: x.numerator()
2
sage: Cusp(oo).numerator()
1
sage: Cusp(-5/10).numerator()
-1
"""
return self.__a
def denominator(self):
"""
Return the denominator of the cusp a/b.
EXAMPLES::
sage: x=Cusp(6,9); x
2/3
sage: x.denominator()
3
sage: Cusp(oo).denominator()
0
sage: Cusp(-5/10).denominator()
2
"""
return self.__b
def _rational_(self):
"""
Coerce to a rational number.
EXAMPLES::
sage: QQ(Cusp(oo))
Traceback (most recent call last):
...
TypeError: cusp Infinity is not a rational number
sage: QQ(Cusp(-3,7))
-3/7
sage: Cusp(11,2)._rational_()
11/2
"""
try:
return self.__rational
except AttributeError:
pass
if not self.__b:
raise TypeError("cusp %s is not a rational number" % self)
self.__rational = self.__a / self.__b
return self.__rational
def _integer_(self, ZZ=None):
"""
Coerce to an integer.
EXAMPLES::
sage: ZZ(Cusp(-19))
-19
sage: Cusp(4,2)._integer_()
2
::
sage: ZZ(Cusp(oo))
Traceback (most recent call last):
...
TypeError: cusp Infinity is not an integer
sage: ZZ(Cusp(-3,7))
Traceback (most recent call last):
...
TypeError: cusp -3/7 is not an integer
"""
if self.__b != 1:
raise TypeError("cusp %s is not an integer" % self)
return self.__a
def _repr_(self):
"""
String representation of this cusp.
EXAMPLES::
sage: a = Cusp(2/3); a
2/3
sage: a._repr_()
'2/3'
sage: a.rename('2/3(cusp)'); a
2/3(cusp)
"""
if self.__b.is_zero():
return "Infinity"
if self.__b != 1:
return "%s/%s" % (self.__a, self.__b)
else:
return str(self.__a)
def _latex_(self):
r"""
Latex representation of this cusp.
EXAMPLES::
sage: latex(Cusp(-2/7))
\frac{-2}{7}
sage: latex(Cusp(oo))
\infty
sage: latex(Cusp(oo)) == Cusp(oo)._latex_()
True
"""
if self.__b.is_zero():
return "\\infty"
if self.__b != 1:
return "\\frac{%s}{%s}" % (self.__a, self.__b)
else:
return str(self.__a)
def __neg__(self):
"""
The negative of this cusp.
EXAMPLES::
sage: -Cusp(2/7)
-2/7
sage: -Cusp(oo)
Infinity
"""
return Cusp(-self.__a, self.__b)
def is_gamma0_equiv(self, other, N, transformation=None):
r"""
Return whether self and other are equivalent modulo the action of
`\Gamma_0(N)` via linear fractional transformations.
INPUT:
- ``other`` - Cusp
- ``N`` - an integer (specifies the group
Gamma_0(N))
- ``transformation`` - None (default) or either the string 'matrix' or 'corner'. If 'matrix',
it also returns a matrix in Gamma_0(N) that sends self to other. The matrix is chosen such that the lower left entry is as small as possible in absolute value. If 'corner' (or True for backwards compatibility), it returns only the upper left entry of such a matrix.
OUTPUT:
- a boolean - True if self and other are equivalent
- a matrix or an integer- returned only if transformation is 'matrix' or 'corner', respectively.
EXAMPLES::
sage: x = Cusp(2,3)
sage: y = Cusp(4,5)
sage: x.is_gamma0_equiv(y, 2)
True
sage: _, ga = x.is_gamma0_equiv(y, 2, 'matrix'); ga
[-1 2]
[-2 3]
sage: x.is_gamma0_equiv(y, 3)
False
sage: x.is_gamma0_equiv(y, 3, 'matrix')
(False, None)
sage: Cusp(1/2).is_gamma0_equiv(1/3,11,'corner')
(True, 19)
sage: Cusp(1,0)
Infinity
sage: z = Cusp(1,0)
sage: x.is_gamma0_equiv(z, 3, 'matrix')
(
[-1 1]
True, [-3 2]
)
ALGORITHM: See Proposition 2.2.3 of Cremona's book 'Algorithms for
Modular Elliptic Curves', or Prop 2.27 of Stein's Ph.D. thesis.
"""
if transformation not in [False, True, "matrix", None, "corner"]:
raise ValueError("Value %s of the optional argument transformation is not valid.")
if not isinstance(other, Cusp):
other = Cusp(other)
N = ZZ(N)
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
zero = ZZ.zero()
one = ZZ.one()
if transformation == "matrix":
from sage.matrix.constructor import matrix
if v1 == v2 and u1 == u2:
if not transformation:
return True
elif transformation == "matrix":
return True, matrix(ZZ, [[1, 0], [0, 1]])
else:
return True, one
# a necessary, but not sufficient condition unless N is square-free
if v1.gcd(N) != v2.gcd(N):
if not transformation:
return False
else:
return False, None
if (u1, v1) != (zero, one):
if v1 in [zero, one]:
s1 = one
else:
s1 = u1.inverse_mod(v1)
else:
s1 = 0
if (u2, v2) != (zero, one):
if v2 in [zero, one]:
s2 = one
else:
s2 = u2.inverse_mod(v2)
else:
s2 = zero
g = (v1 * v2).gcd(N)
a = s1 * v2 - s2 * v1
if a % g != 0:
if not transformation:
return False
else:
return False, None
if not transformation:
return True
# Now we know the cusps are equivalent. Use the proof of Prop 2.2.3
# of Cremona to find a matrix in Gamma_0(N) relating them.
if v1 == 0: # the first is oo
if v2 == 0: # both are oo
if transformation == "matrix":
return (True, matrix(ZZ, [[1, 0], [0, 1]]))
else:
return (True, one)
else:
dum, s2, r2 = u2.xgcd(-v2)
assert dum.is_one()
if transformation == "matrix":
return (True, matrix(ZZ, [[u2, r2], [v2, s2]]))
else:
return (True, u2)
elif v2 == 0: # the second is oo
dum, s1, r1 = u1.xgcd(-v1)
assert dum.is_one()
if transformation == "matrix":
return (True, matrix(ZZ, [[s1, -r1], [-v1, u1]]))
else:
return (True, s1)
dum, s2, r2 = u2.xgcd(-v2)
assert dum.is_one()
dum, s1, r1 = u1.xgcd(-v1)
assert dum.is_one()
a = s1 * v2 - s2 * v1
assert (a % g).is_zero()
# solve x*v1*v2 + a = 0 (mod N).
d, x0, y0 = (v1 * v2).xgcd(N) # x0*v1*v2 + y0*N = d = g.
# so x0*v1*v2 - g = 0 (mod N)
x = -x0 * ZZ(a / g)
# now x*v1*v2 + a = 0 (mod N)
# the rest is all added in trac #10926
s1p = s1 + x * v1
M = N // g
if transformation == "matrix":
C = s1p * v2 - s2 * v1
if C % (M * v1 * v2) == 0:
k = - C // (M * v1 * v2)
else:
k = - (C / (M * v1 * v2)).round()
s1pp = s1p + k * M * v1
# C += k*M*v1*v2 # is now the smallest in absolute value
C = s1pp * v2 - s2 * v1
A = u2 * s1pp - r2 * v1
r1pp = r1 + (x + k * M) * u1
B = r2 * u1 - r1pp * u2
D = s2 * u1 - r1pp * v2
ga = matrix(ZZ, [[A, B], [C, D]])
assert ga.det() == 1
assert C % N == 0
assert (A * u1 + B * v1) / (C * u1 + D * v1) == u2 / v2
return (True, ga)
else:
# mainly for backwards compatibility and
# for how it is used in modular symbols
A = (u2 * s1p - r2 * v1)
if u2 != 0 and v1 != 0:
A = A % (u2 * v1 * M)
return (True, A)
def is_gamma1_equiv(self, other, N):
"""
Return whether self and other are equivalent modulo the action of
Gamma_1(N) via linear fractional transformations.
INPUT:
- ``other`` - Cusp
- ``N`` - an integer (specifies the group
Gamma_1(N))
OUTPUT:
- ``bool`` - True if self and other are equivalent
- ``int`` - 0, 1 or -1, gives further information
about the equivalence: If the two cusps are u1/v1 and u2/v2, then
they are equivalent if and only if v1 = v2 (mod N) and u1 = u2 (mod
gcd(v1,N)) or v1 = -v2 (mod N) and u1 = -u2 (mod gcd(v1,N)) The
sign is +1 for the first and -1 for the second. If the two cusps
are not equivalent then 0 is returned.
EXAMPLES::
sage: x = Cusp(2,3)
sage: y = Cusp(4,5)
sage: x.is_gamma1_equiv(y,2)
(True, 1)
sage: x.is_gamma1_equiv(y,3)
(False, 0)
sage: z = Cusp(QQ(x) + 10)
sage: x.is_gamma1_equiv(z,10)
(True, 1)
sage: z = Cusp(1,0)
sage: x.is_gamma1_equiv(z, 3)
(True, -1)
sage: Cusp(0).is_gamma1_equiv(oo, 1)
(True, 1)
sage: Cusp(0).is_gamma1_equiv(oo, 3)
(False, 0)
"""
if not isinstance(other, Cusp):
other = Cusp(other)
N = ZZ(N)
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
g = v1.gcd(N)
if ((v2 - v1) % N == 0 and (u2 - u1) % g == 0):
return True, 1
elif ((v2 + v1) % N == 0 and (u2 + u1) % g == 0):
return True, -1
return False, 0
def is_gamma_h_equiv(self, other, G):
r"""
Return a pair (b, t), where b is True or False as self and other
are equivalent under the action of G, and t is 1 or -1, as
described below.
Two cusps `u1/v1` and `u2/v2` are equivalent modulo
Gamma_H(N) if and only if `v1 = h*v2 (\mathrm{mod} N)` and
`u1 = h^{(-1)}*u2 (\mathrm{mod} gcd(v1,N))` or
`v1 = -h*v2 (mod N)` and
`u1 = -h^{(-1)}*u2 (\mathrm{mod} gcd(v1,N))` for some
`h \in H`. Then t is 1 or -1 as c and c' fall into the
first or second case, respectively.
INPUT:
- ``other`` - Cusp
- ``G`` - a congruence subgroup Gamma_H(N)
OUTPUT:
- ``bool`` - True if self and other are equivalent
- ``int`` - -1, 0, 1; extra info
EXAMPLES::
sage: x = Cusp(2,3)
sage: y = Cusp(4,5)
sage: x.is_gamma_h_equiv(y,GammaH(13,[2]))
(True, 1)
sage: x.is_gamma_h_equiv(y,GammaH(13,[5]))
(False, 0)
sage: x.is_gamma_h_equiv(y,GammaH(5,[]))
(False, 0)
sage: x.is_gamma_h_equiv(y,GammaH(23,[4]))
(True, -1)
Enumerating the cusps for a space of modular symbols uses this
function.
::
sage: G = GammaH(25,[6]) ; M = G.modular_symbols() ; M
Modular Symbols space of dimension 11 for Congruence Subgroup Gamma_H(25) with H generated by [6] of weight 2 with sign 0 over Rational Field
sage: M.cusps()
[8/25, 1/3, 6/25, 1/4, 1/15, -7/15, 7/15, 4/15, 1/20, 3/20, 7/20, 9/20]
sage: len(M.cusps())
12
This is always one more than the associated space of weight 2 Eisenstein
series.
::
sage: G.dimension_eis(2)
11
sage: M.cuspidal_subspace()
Modular Symbols subspace of dimension 0 of Modular Symbols space of dimension 11 for Congruence Subgroup Gamma_H(25) with H generated by [6] of weight 2 with sign 0 over Rational Field
sage: G.dimension_cusp_forms(2)
0
"""
from sage.modular.arithgroup.all import is_GammaH
if not isinstance(other, Cusp):
other = Cusp(other)
if not is_GammaH(G):
raise TypeError("G must be a group GammaH(N).")
H = G._list_of_elements_in_H()
N = ZZ(G.level())
u1 = self.__a
v1 = self.__b
u2 = other.__a
v2 = other.__b
g = v1.gcd(N)
for h in H:
v_tmp = (h * v1) % N
u_tmp = (h * u2) % N
if (v_tmp - v2) % N == 0 and (u_tmp - u1) % g == 0:
return True, 1
if (v_tmp + v2) % N == 0 and (u_tmp + u1) % g == 0:
return True, -1
return False, 0
def _acted_upon_(self, g, self_on_left):
r"""
Implement the left action of `SL_2(\ZZ)` on self.
EXAMPLES::
sage: g = matrix(ZZ, 2, [1,1,0,1]); g
[1 1]
[0 1]
sage: g * Cusp(2,5)
7/5
sage: Cusp(2,5) * g
Traceback (most recent call last):
...
TypeError: unsupported operand parent(s) for *: 'Set P^1(QQ) of all cusps' and 'Full MatrixSpace of 2 by 2 dense matrices over Integer Ring'
sage: h = matrix(ZZ, 2, [12,3,-100,7])
sage: h * Cusp(2,5)
-13/55
sage: Cusp(2,5)._acted_upon_(h, False)
-13/55
sage: (h*g) * Cusp(3,7) == h * (g * Cusp(3,7))
True
sage: cm = sage.structure.element.get_coercion_model()
sage: cm.explain(MatrixSpace(ZZ, 2), Cusps)
Action discovered.
Left action by Full MatrixSpace of 2 by 2 dense matrices over Integer Ring on Set P^1(QQ) of all cusps
Result lives in Set P^1(QQ) of all cusps
Set P^1(QQ) of all cusps
"""
if not self_on_left:
if (is_Matrix(g) and g.base_ring() is ZZ
and g.ncols() == 2 == g.nrows()):
a, b, c, d = g.list()
return Cusp(a * self.__a + b * self.__b,
c * self.__a + d * self.__b)
def apply(self, g):
"""
Return g(self), where g=[a,b,c,d] is a list of length 4, which we
view as a linear fractional transformation.
EXAMPLES: Apply the identity matrix::
sage: Cusp(0).apply([1,0,0,1])
0
sage: Cusp(0).apply([0,-1,1,0])
Infinity
sage: Cusp(0).apply([1,-3,0,1])
-3
"""
return Cusp(g[0] * self.__a + g[1] * self.__b,
g[2] * self.__a + g[3] * self.__b)
def galois_action(self, t, N):
r"""
Suppose this cusp is `\alpha`, `G` a congruence subgroup of level `N`
and `\sigma` is the automorphism in the Galois group of
`\QQ(\zeta_N)/\QQ` that sends `\zeta_N` to `\zeta_N^t`. Then this
function computes a cusp `\beta` such that `\sigma([\alpha]) = [\beta]`,
where `[\alpha]` is the equivalence class of `\alpha` modulo `G`.
This code only needs as input the level and not the group since the
action of Galois for a congruence group `G` of level `N` is compatible
with the action of the full congruence group `\Gamma(N)`.
INPUT:
- `t` -- integer that is coprime to N
- `N` -- positive integer (level)
OUTPUT:
- a cusp
.. WARNING::
In some cases `N` must fit in a long long, i.e., there
are cases where this algorithm isn't fully implemented.
.. NOTE::
Modular curves can have multiple non-isomorphic models over `\QQ`.
The action of Galois depends on such a model. The model over `\QQ`
of `X(G)` used here is the model where the function field
`\QQ(X(G))` is given by the functions whose Fourier expansion at
`\infty` have their coefficients in `\QQ`. For `X(N):=X(\Gamma(N))`
the corresponding moduli interpretation over `\ZZ[1/N]` is that
`X(N)` parametrizes pairs `(E,a)` where `E` is a (generalized)
elliptic curve and `a: \ZZ / N\ZZ \times \mu_N \to E` is a closed
immersion such that the Weil pairing of `a(1,1)` and `a(0,\zeta_N)`
is `\zeta_N`. In this parameterisation the point `z \in H`
corresponds to the pair `(E_z,a_z)` with `E_z=\CC/(z \ZZ+\ZZ)` and
`a_z: \ZZ / N\ZZ \times \mu_N \to E` given by `a_z(1,1) = z/N` and
`a_z(0,\zeta_N) = 1/N`.
Similarly `X_1(N):=X(\Gamma_1(N))` parametrizes pairs `(E,a)` where
`a: \mu_N \to E` is a closed immersion.
EXAMPLES::
sage: Cusp(1/10).galois_action(3, 50)
1/170
sage: Cusp(oo).galois_action(3, 50)
Infinity
sage: c=Cusp(0).galois_action(3, 50); c
50/17
sage: Gamma0(50).reduce_cusp(c)
0
Here we compute the permutations of the action for t=3 on cusps for
Gamma0(50). ::
sage: N = 50; t=3; G = Gamma0(N); C = G.cusps()
sage: cl = lambda z: exists(C, lambda y:y.is_gamma0_equiv(z, N))[1]
sage: for i in range(5):
....: print((i, t^i))
....: print([cl(alpha.galois_action(t^i,N)) for alpha in C])
(0, 1)
[0, 1/25, 1/10, 1/5, 3/10, 2/5, 1/2, 3/5, 7/10, 4/5, 9/10, Infinity]
(1, 3)
[0, 1/25, 7/10, 2/5, 1/10, 4/5, 1/2, 1/5, 9/10, 3/5, 3/10, Infinity]
(2, 9)
[0, 1/25, 9/10, 4/5, 7/10, 3/5, 1/2, 2/5, 3/10, 1/5, 1/10, Infinity]
(3, 27)
[0, 1/25, 3/10, 3/5, 9/10, 1/5, 1/2, 4/5, 1/10, 2/5, 7/10, Infinity]
(4, 81)
[0, 1/25, 1/10, 1/5, 3/10, 2/5, 1/2, 3/5, 7/10, 4/5, 9/10, Infinity]
TESTS:
Here we check that the Galois action is indeed a permutation on the
cusps of Gamma1(48) and check that :trac:`13253` is fixed. ::
sage: G = Gamma1(48)
sage: C = G.cusps()
sage: for i in Integers(48).unit_gens():
....: C_permuted = [G.reduce_cusp(c.galois_action(i,48)) for c in C]
....: assert len(set(C_permuted))==len(C)
We test that Gamma1(19) has 9 rational cusps and check that :trac:`8998`
is fixed. ::
sage: G = Gamma1(19)
sage: [c for c in G.cusps() if c.galois_action(2,19).is_gamma1_equiv(c,19)[0]]
[2/19, 3/19, 4/19, 5/19, 6/19, 7/19, 8/19, 9/19, Infinity]
REFERENCES:
- Section 1.3 of Glenn Stevens, "Arithmetic on Modular Curves"
- There is a long comment about our algorithm in the source code for this function.
AUTHORS:
- <NAME>, 2009-04-18
"""
if self.is_infinity():
return self
if not isinstance(t, Integer):
t = Integer(t)
# Our algorithm for computing the Galois action works as
# follows (see Section 1.3 of Glenn Stevens "Arithmetic on
# Modular Curves" for a proof that the action given below is
# correct). We alternatively view the set of cusps as the
# Gamma-equivalence classes of column vectors [a;b] with
# gcd(a,b,N)=1, and the left action of Gamma by matrix
# multiplication. The action of t is induced by [a;b] |-->
# [a;t'*b], where t' is an inverse mod N of t. For [a;t'*b]
# with gcd(a,t'*b)==1, the cusp corresponding to [a;t'*b] is
# just the rational number a/(t'*b). Thus in this case, to
# compute the action of t we just do a/b <--> [a;b] |--->
# [a;t'*b] <--> a/(t'*b). IN the other case when we get
# [a;t'*b] with gcd(a,t'*b) != 1, which can and does happen,
# we have to work a bit harder. We need to find [c;d] such
# that [c;d] is congruent to [a;t'*b] modulo N, and
# gcd(c,d)=1. There is a standard lifting algorithm that is
# implemented for working with P^1(Z/NZ) [it is needed for
# modular symbols algorithms], so we just apply it to lift
# [a,t'*b] to a matrix [A,B;c,d] in SL_2(Z) with lower two
# entries congruent to [a,t'*b] modulo N. This exactly solves
# our problem, since gcd(c,d)=1.
a = self.__a
b = self.__b * t.inverse_mod(N)
if b.gcd(a) != 1:
_, _, a, b = lift_to_sl2z_llong(a, b, N)
a = Integer(a)
b = Integer(b)
# Now that we've computed the Galois action, we efficiently
# construct the corresponding cusp as a Cusp object.
return Cusp(a, b, check=False)
def __pari__(self):
"""
Return a PARI representation of ``self``.
EXAMPLES::
sage: Cusp(1, 0).__pari__()
+oo
sage: pari(Cusp(3, 2))
3/2
"""
b = self.__b
return pari(self.__a / b) if b else pari.oo()
class Cusps_class(Singleton, Parent):
"""
The set of cusps.
EXAMPLES::
sage: C = Cusps; C
Set P^1(QQ) of all cusps
sage: loads(C.dumps()) == C
True
"""
def __init__(self):
r"""
The set of cusps, i.e. `\mathbb{P}^1(\QQ)`.
EXAMPLES::
sage: C = sage.modular.cusps.Cusps_class() ; C
Set P^1(QQ) of all cusps
sage: Cusps == C
True
"""
Parent.__init__(self, self)
Element = Cusp
def _repr_(self):
"""
String representation of the set of cusps.
EXAMPLES::
sage: Cusps
Set P^1(QQ) of all cusps
sage: Cusps._repr_()
'Set P^1(QQ) of all cusps'
sage: Cusps.rename('CUSPS'); Cusps
CUSPS
sage: Cusps.rename(); Cusps
Set P^1(QQ) of all cusps
sage: Cusps
Set P^1(QQ) of all cusps
"""
return "Set P^1(QQ) of all cusps"
def _latex_(self):
r"""
Return latex representation of self.
EXAMPLES::
sage: latex(Cusps)
\mathbf{P}^1(\QQ)
sage: latex(Cusps) == Cusps._latex_()
True
"""
return r"\mathbf{P}^1(\QQ)"
def __call__(self, x):
"""
Coerce x into the set of cusps.
EXAMPLES::
sage: a = Cusps(-4/5); a
-4/5
sage: Cusps(a) is a
False
sage: Cusps(1.5)
3/2
sage: Cusps(oo)
Infinity
sage: Cusps(I)
Traceback (most recent call last):
...
TypeError: unable to convert I to a cusp
TESTS::
sage: Cusps.has_coerce_map_from(ZZ)
True
sage: Cusps.has_coerce_map_from(QQ)
True
sage: Cusps.has_coerce_map_from(GF(7))
False
"""
return Cusp(x)
def _coerce_map_from_(self, R):
if QQ.has_coerce_map_from(R):
return True
if R is InfinityRing:
return True
return False
def _element_constructor_(self, x):
return Cusp(x)
Cusps = Cusps_class()
|
StarcoderdataPython
|
36201
|
# -*- coding: utf-8 -*-
##############################################################################
# Author:QQ173782910
##############################################################################
import logging
from apscheduler.schedulers.background import BlockingScheduler
from RunUse import TradeRun
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=format, filename='log_print.txt')
logger = logging.getLogger('print')
logging.getLogger("apscheduler").setLevel(logging.WARNING) # 设置apscheduler.
if __name__ == '__main__':
RunTrade = TradeRun()
scheduler = BlockingScheduler() # 定时的任务.
scheduler.add_job(RunTrade.get_kline_data, trigger='cron', second='*/2') # 主计算k线
scheduler.add_job(RunTrade.get_open_orders, trigger='cron', second='*/2') # 未成交单
scheduler.add_job(RunTrade.get_position, trigger='cron', second='*/1') # 仓位
scheduler.start()
|
StarcoderdataPython
|
3345664
|
<reponame>cuappdev/archives<filename>tempo-api/src/app/base.py
from marshmallow_sqlalchemy import ModelSchema
from . import db
class Base(db.Model):
__abstract__ = True
created_at = db.Column(db.DateTime, default = db.func.current_timestamp())
updated_at = db.Column(db.DateTime, default = db.func.current_timestamp())
|
StarcoderdataPython
|
1782292
|
import numpy as np
import matplotlib.pyplot as plt
from math import pi, cos
from scipy import loadtxt, optimize
import os
M = 1.41
plt.figure(figsize=(10,7), dpi=80)
ax = plt.axes()
dat = loadtxt("./particles/particles.tsv", skiprows=0, delimiter="\t")
t = dat.transpose()[0]
tracers = dat.transpose()[1:]
r_max = tracers.max()
ax.axis([0, r_max * 1.1 / M, 0, t.max() * 1.0])
for i in range(0, len(tracers)):
ax.plot(tracers[i] / M, t, label = r"m/M = {}".format((i+1)*0.2))
plot_name = r"Particle world lines"
ax.set_title(plot_name)
ax.set_xlabel("r/M")
ax.set_ylabel("t")
ax.legend()
plt.show()
|
StarcoderdataPython
|
3309142
|
<reponame>Corleo/st_settings
import re
import sublime
import sublime_plugin
# for debugging
# sublime.log_commands(True)
# pattern = re.compile(r".*test.*") # match
# pattern = re.compile('(?!.*(?:test)).*') # don't match
class CustomBuildSystemCommand(sublime_plugin.WindowCommand):
def run(self, *args, **kwargs):
params = self.window.extract_variables()
# print(params)
if params['file_extension'] == "py":
if re.match(r".*test.*", params['file_name']):
variant = "pytest"
else:
variant = "run"
self.window.run_command("build", {
"build_system": "Packages/User/py3k.sublime-build",
"choice_build_system": "true",
"choice_variant": "true",
"variant": "{}".format(variant)
}
)
else:
# run regular build command
self.window.run_command("build")
# params = {
# "packages": "~/.config/sublime-text-3/Packages",
# "file_name": "custom_buildSystem.py",
# "file": "~/.config/sublime-text-3/Packages/User/custom_buildSystem.py",
# "file_extension": "py",
# "platform": "Linux",
# "file_base_name": "custom_buildSystem",
# "file_path": "~/.config/sublime-text-3/Packages/User"
# }
|
StarcoderdataPython
|
4831196
|
money=float(input('Quanto voce quer converter '))
dolar= money /3.91
print ('voce tem R$ {:.2f} reais ,convertido em dolar são $ {:.2f} dolares'.format(money,dolar))
|
StarcoderdataPython
|
178793
|
from scrapy_scylla_proxies.random_proxy import RandomProxyMiddleware
|
StarcoderdataPython
|
155616
|
## @package AssociateJoint Association joint that used by gait recorder
## The class that has all the information about associations
class AssociateJoint:
## Constructor
# @param self Object pointer
# @param module Module name string
# @param node Node index
# @param corr Bool, correaltion: True for positive; False for negtive
# @param ratio Correlation ratio
def __init__(self, module, node, corr, ratio):
## Module name string
self.ModuleName = module # name string
## Node index
self.Node = node
## Correlation boolean value
self.Correlation = corr # bool value
## Correlation ratio
self.Ratio = ratio
## Current object to string
# @param self Object pointer
def ToString(self):
return self.ModuleName+"::"+self.NodeToString(self.Node)+"::"+ \
self.CorrelationToStr(self.Correlation)+"::"+str(self.Ratio)
## Find node string name given node index
# @param self Object pointer
# @param node Integer, node indez
def NodeToString(self, node):
if node == 0:
return "Front Wheel"
if node == 1:
return "Lft Wheel"
if node == 2:
return "Rgt Wheel"
if node == 3:
return "Central Bending"
## Correlation boolean value to string
# @param self Object pointer
# @param corr Correlation boolean
def CorrelationToStr(self,corr):
if corr:
return "+"
else:
return "-"
|
StarcoderdataPython
|
1758335
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Convert the *DECOW14X* corpus into a plain text file. Is used as pre-processing step for the
`word2vec <https://code.google.com/archive/p/word2vec/>`_ training.
To make this this more feasible (decow is a **huge** corpus), python's :mod:`multiprocessing` is used, s.t. every
part of the corpus in simultaneously processed. Afterwards, a bash command like ``cat`` can be used to merge into one
single file.
"""
# STANDARD
import codecs
import gzip as gz
import multiprocessing
import optparse
import os
import re
# PROJECT
from src.misc.decorators import log_time
from src.misc.helpers import alt, contains_tag, extract_sentence_id
def main():
"""
Main function. Uses command lines to start corpus processing.
"""
optparser = optparse.OptionParser()
optparser.add_option('--in', dest='in_dir', help='Path to input directory')
optparser.add_option('--out', dest='out', help='Path to output directory')
optparser.add_option('--merge', dest='merge', action="store_true", help='Merge multi-word named entities?')
optparser.add_option('--log', dest='log', help='Path to logfile')
optparser.add_option('--log_interval', dest='inter', type='int', help='Logging interval')
(options, args) = optparser.parse_args()
convert_decow_to_plain(options.in_dir, options.out, options.log, options.merge, options.inter)
def convert_decow_to_plain(decow_dir, out_dir, log_path, merge_nes, log_interval):
"""
Convert the whole corpus into plain text.
Args:
decow_dir (str): Path to directory with decow corpus paths.
out_dir (str): Path where plain text parts should be written to.
log_path (str): Path where the log files should be written to.
merge_nes (bool): Flag to indicate whether multi-word expression should be merged with underscores.
log_interval (int): Interval to log current process state in seconds.
"""
# Split logging interval into hourse - minutes - seconds
m_proc, s_proc = divmod(log_interval, 60)
h_proc, m_proc = divmod(m_proc, 60)
# Init logfile
with codecs.open(log_path, "a", "utf-8") as log_file:
log_file.write(alt("Starting logging...\n"))
log_file.write(alt("Corpus (parts) directory:\t%s\n" % decow_dir))
log_file.write(alt("Output directory:\t\t%s\n" % out_dir))
log_file.write(alt("Logging path:\t\t%s\n" % log_path))
log_file.write(alt("Logging intervals:\n\t Every %2dh %2dm %2ds for metalog\n" % (h_proc, m_proc, s_proc)))
# Start processes
@log_time(log_path, log_interval)
def _convert_decow_to_plain(decow_dir, out_dir, log_path, merge_nes, log_interval):
with codecs.open(log_path, "a", "utf-8") as log_file:
log_file.write(alt("Preparing %i process(es)...\n" %(len(decow_dir))))
inpaths = [path for path in os.listdir(decow_dir) if ".DS_Store" not in path and "decow" in path]
pool = multiprocessing.Pool(processes=len(inpaths))
log_file.write(alt("Starting process(es)!\n"))
if merge_nes:
pool.map(convert_part_merging, [(decow_dir + inpath, out_dir, log_path, log_interval) for inpath in inpaths])
else:
pool.map(convert_part, [(decow_dir + inpath, out_dir, log_path, log_interval) for inpath in inpaths])
_convert_decow_to_plain(decow_dir, out_dir, log_path, merge_nes, log_interval)
def convert_part(argstuple):
"""
Convert a corpus part into plain text without merging multiple word entries.
Args:
argstuple: Tuple of methods arguments (``inpath`` (*str*): Path to this processes' corpus part / ``dir_outpath``
(*str*): Path to this processes' output / ``log_path`` (*str*): Path to this processes' log / ``interval``
(*int*): Logging interval in seconds)
"""
inpath, dir_outpath, log_path, interval = argstuple
@log_time(log_path, interval)
def _convert_part(inpath, dir_outpath):
file_n = get_file_number(inpath)
outpath = dir_outpath + 'decow%s_out.txt' %(str(file_n))
with gz.open(inpath, 'rb') as infile, codecs.open(outpath, 'wb', 'utf-8') as outfile:
sentence = []
for line in infile:
line = line.strip().decode("utf-8")
if line.startswith(u'<s'):
outfile.write('%s\n' %(' '.join(sentence)))
sentence = []
if not line.startswith(u'<'):
sentence.append(line.split('\t')[0])
_convert_part(inpath, dir_outpath)
def convert_part_merging(argstuple):
"""
Convert a corpus part into plain text and merging multiple word entries.
Args:
argstuple: Tuple of methods arguments (``inpath`` (*str*): Path to this processes' corpus part / ``dir_outpath``
(*str*): Path to this processes' output / ``log_path`` (*str*): Path to this processes' log / ``interval``
(*int*): Logging interval in seconds)
"""
inpath, dir_outpath, log_path, interval = argstuple
@log_time(log_path, interval)
def _convert_part_merging(inpath, dir_outpath, log_path):
with codecs.open(log_path, "a", "utf-8") as log_file:
process_name = multiprocessing.current_process().name
log_file.write(alt("%s: Start logging processing of\n\t%s to \n\t%s...\n" % (process_name, inpath, dir_outpath)))
file_n = get_file_number(inpath)
outpath = dir_outpath + 'decow%s_out.txt' %(str(file_n))
with gz.open(inpath, 'rb') as infile, codecs.open(outpath, 'wb', 'utf-8') as outfile:
sentence = []
line, lcount = infile.readline().strip().decode("utf-8"), 1
while line != "":
if lcount % 100000 == 0:
log_file.write(alt("%s: Processing line nr. %i...\n" % (process_name, lcount)))
ne = extract_named_entity(line) # Extract possible named entity
if line.startswith(u'<s'):
outfile.write('%s\n' %(' '.join(sentence)))
sentence = []
# If there was a named entity found, try to complete it if it's a multi-word expression
elif ne is not None:
while True:
next_line = infile.readline().strip().decode("utf-8")
lcount += 1
if not contains_tag(next_line):
next_ne = extract_named_entity(next_line)
if next_ne is not None and next_ne[1] == ne[1]:
ne = ("%s_%s" %(ne[0], next_ne[0]), ne[1])
else:
break
else:
break
sentence.append(ne[0])
line = next_line
continue
elif not line.startswith(u'<'):
sentence.append(line.split('\t')[0])
line, lcount = infile.readline().strip().decode("utf-8"), lcount + 1
_convert_part_merging(inpath, dir_outpath, log_path)
def get_file_number(filename):
"""
Get the number of the current decow corpus part.
Args:
filename (str): Decow corpus part file name
Returns:
str: File number
"""
file_n = re.findall(re.compile("\d{2}(?=[^a])"), filename) # Retrieve file number
if len(file_n) == 0:
file_n = re.findall(re.compile("\d+"), filename)[len(file_n) - 1]
else:
file_n = file_n[len(file_n) - 1]
return file_n if int(file_n) > 9 else "0" + file_n
def extract_named_entity(line):
"""
Extract named entity from current line.
Args:
line (str): Current line
Returns:
str or None: Extracted named entity or None if no named entity is present.
"""
try:
line_parts = line.split("\t")
feature = line_parts[3]
if feature != "O":
return line_parts[2], line_parts[3]
except IndexError:
return None
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
120178
|
<reponame>chulth/CRide
'''users app.'''
# Django
#from django.app import AppConfig
from django.apps import AppConfig
class UsersAppConfig(AppConfig):
'''users app config.'''
name = 'cride.users'
verbose_name = 'Users'
|
StarcoderdataPython
|
141882
|
<gh_stars>0
# Generated by Django 2.2.1 on 2019-05-26 03:44
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_auto_20190525_2338'),
]
operations = [
migrations.RenameField(
model_name='customuser',
old_name='reputation',
new_name='reputation_count',
),
migrations.AddField(
model_name='customuser',
name='total_answers',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='customuser',
name='total_questions',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='customuser',
name='user',
field=models.OneToOneField(on_delete='models.CASCADE', to=settings.AUTH_USER_MODEL),
),
]
|
StarcoderdataPython
|
3249885
|
#!/usr/bin/env python
# Run the tests as below from the root folder of this python project:
# cd [THE_ROOT_FOLDER]
# python -m unittest discover -s tests
"""Tests for `epsg_constants` package."""
import unittest
from epsg_constants.epsg_number import EpsgNumber
class TestEpsg_constants(unittest.TestCase):
"""Tests for `epsg_constants` package."""
def test_someConstants(self):
self.assertEqual(
EpsgNumber.WORLD__WGS_84__4326,
4326
)
self.assertEqual(
EpsgNumber.SWEDEN__SWEREF99_TM__3006,
3006
)
self.assertEqual(
EpsgNumber.SWEDEN__12_00__SWEREF99_12_00__3007,
3007
)
|
StarcoderdataPython
|
1766044
|
# Initial imports
import pandas as pd
import numpy as np
import datetime as dt
from pathlib import Path
%matplotlib inline
# Reading whale returns
# Rading the whale returs dataset using the pandas built in function read_csv and converting the Date column into datetime format.
df = pd.read_csv('./Resources/whale_returns.csv',index_col="Date", parse_dates=True, infer_datetime_format=True)
df.sort_index(axis=0)
# Count nulls
# Checking the null values and counting them using pandas built in function sum.
df.isnull().sum().sum()
# Drop nulls
# Droping the row which contain null values using pandas build in function dropna.
df.dropna(inplace=True)
# Reading algorithmic returns
# Rading the algorithmic returns dataset using the pandas built in function read_csv and converting the Date column into datetime format.
df_algo = pd.read_csv('./Resources/algo_returns.csv',index_col="Date", parse_dates=True, infer_datetime_format=True)
df_algo.sort_index(axis=0)
# Count nulls
# Checking the null values and counting them using pandas built in function sum.
df_algo.isnull().sum().sum()
# Drop nulls
# Droping the row which contain null values using pandas build in function dropna.
df_algo.dropna(inplace=True)
# Reading S&P 500 Closing Prices
# Rading the S&P 500 Closing Prices dataset using the pandas built in function read_csv and converting the Date column into datetime format
df_SP = pd.read_csv('./Resources/sp500_history.csv',index_col="Date", parse_dates=True, infer_datetime_format=True)
df_SP['Close'] = df_SP['Close']
df_SP.sort_index(axis=0)
# Check Data Types
# Checking the data type of the specific column using the dtype.
data_type=df_SP['Close'].dtype
# Fix Data Types
# Removing the special character from the Close column and converting it into float.
df_SP['Close'] = df_SP['Close'].str.replace(r'\D','').astype(float)
# Calculate Daily Returns
# Calculating the daily returns using the pct_change function.
df_SP['Close'] = df_SP['Close'].pct_change()
# Drop nulls
# Droping the row which contain null values using pandas build in function dropna.
df_SP.dropna(inplace=True)
|
StarcoderdataPython
|
1769215
|
<reponame>fish159753/python_projects<filename>coin_flip_runs.py
"""
File: coin_flip_runs.py
Name: <NAME>
-----------------------
This program should simulate coin flip(s)
with the number of runs input by users.
A 'run' is defined as consecutive results
on either 'H' or 'T'. For example, 'HHHHHTHTT'
is regarded as a 2-run result.
Your program should stop immediately after your
coin flip results reach the runs!
"""
import random as r
def main():
"""
The same number is regarded as a run.
Decide what you want the run is.
"""
print("Let's flip a coin!")
num_run = int(input('Number of runs: '))
run = 0
is_in_a_row = False
# To tell the number in a row.
roll1 = r.randint(1, 2)
if roll1 == 1:
print('H', end='')
else:
print('T', end='')
while True:
if run != num_run:
roll2 = r.randint(1, 2)
if roll2 == 1:
print('H', end='')
else:
print('T', end='')
if roll1 == roll2:
if not is_in_a_row:
# The latter number is like the former number.
run += 1
is_in_a_row = True
# Avoid the same pair of the run, to effect the run number.
else:
is_in_a_row = False
roll1 = roll2
else:
break
###### DO NOT EDIT CODE BELOW THIS LINE ######
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
1665837
|
"""
Project.x
Author: <NAME>
"""
from __future__ import print_function, absolute_import
from six import iteritems
from six.moves import range
import ast
from types import ModuleType, FunctionType
# noinspection PyUnresolvedReferences
from six.moves import builtins
import math
import keyword
_builtins = dir(builtins)
_builtins.extend(dir(math))
_builtins.extend(keyword.kwlist)
_builtins = set(_builtins)
def parse_equation_01(eqn_str, func_name='function_name'):
eqn_str = eqn_str.replace("'", "").replace('{', '').replace('}', '').replace('=', '').replace('^', '**')
vars = [
node.id for node in ast.walk(ast.parse(eqn_str))
if isinstance(node, ast.Name)
]
vars_ = list(vars)
for var in vars_:
if var in _builtins:
vars.remove(var)
eqn_str = """from math import *\ndef %s%s:\n return %s""" % (func_name, str(tuple(vars)).replace("'", ""), eqn_str)
# print(eqn_str)
try:
compiled = compile(eqn_str, '', 'exec')
except Exception:
print(1)
return None, None
module = ModuleType(func_name)
try:
exec(compiled, module.__dict__)
except Exception:
print(2)
return None, None
_function = getattr(module, func_name)
if not isinstance(_function, FunctionType):
print(3)
return None, None
return _function, vars
if __name__ == '__main__':
eqn = "max(0, 1)"
print(parse_equation_01(eqn)[0]())
|
StarcoderdataPython
|
3200521
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import random
import tempfile
import unittest
from . import train
class TrainTest(unittest.TestCase):
def setUp(self):
self.job_dir = tempfile.mkdtemp()
self.num_checkpoints = 10
self.checkpoint_files = []
self.checkpoint_steps = 100
self.test_job_dir = tempfile.mkdtemp()
self.test_job_file_glob = os.path.join(self.test_job_dir, "*")
# Note that hyperparameters are intended to be constant across checkpoints
self.hyperparameter_1 = 17
self.hyperparameter_2 = 3.14159
for i in range(self.num_checkpoints):
path = os.path.join(
self.job_dir,
"dummy-checkpoint-{}.json".format(i)
)
checkpoint_data = {
"steps": i*self.checkpoint_steps,
"hyperparameters": {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
},
"model": random.random()
}
with open(path, "w") as fp:
json.dump(checkpoint_data, fp)
self.checkpoint_files.append(path)
self.garbage_file = os.path.join(self.job_dir, "garbage")
with open(self.garbage_file, "w") as gf:
gf.write("garbage")
def tearDown(self):
os.remove(self.garbage_file)
for path in self.checkpoint_files:
os.remove(path)
os.rmdir(self.job_dir)
test_job_files = glob.glob(self.test_job_file_glob)
for path in test_job_files:
os.remove(path)
os.rmdir(self.test_job_dir)
def test_get_checkpoints(self):
checkpoints = train.get_checkpoints(self.job_dir)
self.assertSetEqual(set(checkpoints), set(self.checkpoint_files))
def test_checkpoint_index(self):
indices = map(train.checkpoint_index, self.checkpoint_files)
self.assertListEqual(indices, range(self.num_checkpoints))
def test_latest_checkpoint_1(self):
latest_checkpoint = train.latest_checkpoint(
random.sample(self.checkpoint_files, self.num_checkpoints)
)
self.assertEqual(
latest_checkpoint,
(self.checkpoint_files[-1], self.num_checkpoints-1)
)
def test_latest_checkpoint_2(self):
latest_checkpoint = train.latest_checkpoint([])
self.assertEqual(latest_checkpoint, (None, None))
def test_save_checkpoint(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
checkpoint_data = {
"test_key": "test_value"
}
checkpoint_file = train.save_checkpoint(
self.test_job_dir,
1,
checkpoint_data
)
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 1)
with open(checkpoint_file) as fp:
saved_object = json.load(fp)
self.assertDictEqual(saved_object, checkpoint_data)
def test_runner(self):
self.assertEqual(len(glob.glob(self.test_job_file_glob)), 0)
hyperparameters = {
"hyperparameter_1": self.hyperparameter_1,
"hyperparameter_2": self.hyperparameter_2
}
train_steps = 100
checkpoint_steps = 10
train.runner(
train.generate_trainer,
self.test_job_dir,
train_steps,
checkpoint_steps,
hyperparameters
)
self.assertEqual(
len(glob.glob(self.test_job_file_glob)),
int(train_steps/checkpoint_steps) + 1
)
if __name__ == "__main__":
unittest.main()
|
StarcoderdataPython
|
82687
|
#!/usr/bin/env python3
"""Tools to generate a Snakemake-based BIDS app."""
import os
import pathlib
import subprocess
import argparse
import logging
import sys
import yaml
import bids
import snakemake
from snakemake.io import load_configfile
# We define Path here in addition to pathlib to put both variables in globals()
# This way, users specifying a path type in their config.yaml can indicate
# either Path or pathlib.Path
Path = pathlib.Path
bids.config.set_option("extension_initial_dot", True)
logger = logging.Logger(__name__)
class ConfigError(Exception):
"""Exception raised for errors with the Snakebids config."""
def __init__(self, msg):
self.msg = msg
Exception.__init__()
class KeyValue(argparse.Action):
"""Class for accepting key=value pairs in argparse"""
# Constructor calling
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, dict())
for value in values:
# split it into key and value
key, value = value.split("=")
# assign into dictionary
getattr(namespace, self.dest)[key] = value
class SnakemakeHelpAction(argparse.Action):
"""Class for printing snakemake usage in argparse"""
def __call__(self, parser, namespace, values, option_string=None):
run("snakemake -h")
sys.exit(0)
def run(command, env=None):
"""Helper function for running a system command while merging
stderr/stdout to stdout.
Parameters
----------
command : list of str
command to run
env : dict, optional
environment variable to set before running the command
"""
if env is None:
env = {}
merged_env = os.environ
merged_env.update(env)
process = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
env=merged_env,
)
while True:
line = process.stdout.readline()
line = str(line, "utf-8")[:-1]
print(line)
if line == "" and process.poll() is not None:
break
if process.returncode != 0:
raise Exception("Non zero return code: %d" % process.returncode)
def get_time_hash():
""" currently unused """
import hashlib
import time
hash = hashlib.sha1()
hash.update(str(time.time()).encode('utf-8'))
return hash.hexdigest()[:8]
def resolve_path(path_candidate):
"""Helper function to resolve any paths or list
of paths it's passed. Otherwise, returns the argument
unchanged.
Parameters
----------
command : list, os.Pathlike, object
command to run
Returns
-------
list, os.Pathlike, object
If os.Pathlike or list of os.Pathlike, the same paths resolved.
Otherwise, the argument unchanged.
"""
if isinstance(path_candidate, list):
return [resolve_path(p) for p in path_candidate]
if isinstance(path_candidate, os.PathLike):
return path_candidate.resolve()
return path_candidate
SNAKEFILE_CHOICES = [
"Snakefile",
"snakefile",
"workflow/Snakefile",
"workflow/snakefile",
]
CONFIGFILE_CHOICES = [
"config/snakebids.yml",
"config/snakebids.json",
"snakebids.yml",
"snakebids.json",
"config.yml",
"config.json",
"config/config.json",
"config/config.yml"]
class SnakeBidsApp:
"""Snakebids app with config and arguments.
Parameters
----------
snakemake_dir : str
Root directory of the snakebids app, containing the config file and
workflow files.
skip_parse_args : bool, optional
If true, the Snakebids app will not attempt to parse input arguments,
and will only handle the config file.
out_configfile : str
Path to the updated configfile (YAML or JSON), relative to the output
working directory. This should be the same as the `configfile: ` used
in your workflow. (default: 'config/snakebids.yml')
Attributes
----------
config : dict
Contains all the configuration variables parsed from the config file
and generated during the initialization of the SnakeBidsApp.
parser_include_snakemake : ArgumentParser
Parser including the generic Snakemake parser as a parent. This will
contain all arguments a Snakemake app can receive.
parser : ArgumentParser
Parser including only the arguments specific to this Snakebids app, as
specified in the config file.
snakefile : str
Absolute path to the input Snakefile
join(snakemake_dir, snakefile_path)
configfile_path : str
Relative path to config file (relative to snakemake_dir)
updated_config : str
Absolute path to the updated config file to write
"""
def __init__(self, snakemake_dir, skip_parse_args=False):
# input argument is the dir where snakemake would be run
# we use this to locate the config file, and snakefile adding them to
# generated_config and also add the snakemake_dir to the
# generated_config, so the workflow can use it to source files from
# it (e.g. atlases etc..)
# look for snakebids.yml in the snakemake_dir, quit if not found
self.configfile_path = None
for path in CONFIGFILE_CHOICES:
if Path(snakemake_dir, path).exists():
self.configfile_path = path
break
if self.configfile_path is None:
raise ConfigError(
f"Error: no config file found, tried {', '.join(CONFIGFILE_CHOICES)}."
)
# look for snakefile in the snakemake_dir, quit if not found
self.snakefile = None
for snakefile_path in SNAKEFILE_CHOICES:
if Path(snakemake_dir, snakefile_path).exists():
self.snakefile = Path(snakemake_dir, snakefile_path)
break
if self.snakefile is None:
raise ConfigError(
f"Error: no Snakefile found, tried {', '.join(SNAKEFILE_CHOICES)}."
)
self.config = load_configfile(Path(snakemake_dir,
self.configfile_path))
if self.config.get("debug", False):
logging.basicConfig(level=logging.DEBUG)
# add path to snakefile to the config -- so workflows can grab files
# relative to the snakefile folder
self.config["snakemake_dir"] = snakemake_dir
self.config["snakefile"] = self.snakefile
self.parser_include_snakemake = self.__create_parser(
include_snakemake=True
)
self.parser = self.__create_parser()
if not skip_parse_args:
self.__parse_args()
def __create_parser(self, include_snakemake=False):
"""Create a parser with snakemake parser as parent solely for
displaying help and checking conflicts, but then for actual parsing
use snakebids parser to parse known args, then pass remaining to
snakemake.
"""
if include_snakemake:
# get snakemake parser
smk_parser = snakemake.get_argument_parser()
# create parser
parser = argparse.ArgumentParser(
description="Snakebids helps build BIDS Apps with Snakemake",
add_help=False,
parents=[smk_parser],
)
else:
parser = argparse.ArgumentParser(
description="Snakebids helps build BIDS Apps with Snakemake"
)
# add option for printing out snakemake usage
parser.add_argument(
"--help_snakemake",
nargs=0,
action=SnakemakeHelpAction,
help=(
"Options to Snakemake can also be passed directly at the "
"command-line, use this to print Snakemake usage"
),
)
# create parser group for app options
app_group = parser.add_argument_group(
"SNAKEBIDS", "Options for snakebids app"
)
# update the parser with config options
for name, parse_args in self.config["parse_args"].items():
# Convert type annotations from strings to class types
# We first check that the type annotation is, in fact,
# a str to allow the edge case where it's already
# been converted
if "type" in parse_args and isinstance(parse_args["type"], str):
try:
parse_args["type"] = globals()[parse_args["type"]]
except KeyError as err:
raise TypeError(
f"{parse_args['type']} is not available "
+ f"as a type for {name}"
) from err
app_group.add_argument(name, **parse_args)
# general parser for
# --filter_{input_type} {key1}={value1} {key2}={value2}...
# create filter parsers, one for each input_type
filter_opts = parser.add_argument_group(
"BIDS FILTERS",
"Filters to customize PyBIDS get() as key=value pairs",
)
for input_type in self.config["pybids_inputs"].keys():
argname = f"--filter_{input_type}"
arglist_default = [
f"{key}={value}"
for (key, value) in self.config["pybids_inputs"][input_type][
"filters"
].items()
]
arglist_default_string = " ".join(arglist_default)
filter_opts.add_argument(
argname,
nargs="+",
action=KeyValue,
help=f"(default: {arglist_default_string})",
)
# general parser for
# --wildcards_{input_type} {wildcard1} {wildcard2} ...
# create wildcards parsers, one for each input_type
wildcards_opts = parser.add_argument_group(
"INPUT WILDCARDS",
"File path entities to use as wildcards in snakemake",
)
for input_type in self.config["pybids_inputs"].keys():
argname = f"--wildcards_{input_type}"
arglist_default = [
f"{wc}"
for wc in self.config["pybids_inputs"][input_type]["wildcards"]
]
arglist_default_string = " ".join(arglist_default)
wildcards_opts.add_argument(
argname,
nargs="+",
help=f"(default: {arglist_default_string})",
)
override_opts = parser.add_argument_group(
"PATH OVERRIDE",
(
"Options for overriding BIDS by specifying absolute paths "
"that include wildcards, e.g.: "
"/path/to/my_data/{subject}/t1.nii.gz"
),
)
# create path override parser
for input_type in self.config["pybids_inputs"].keys():
argname = f"--path_{input_type}"
override_opts.add_argument(argname, default=None)
return parser
def __parse_args(self):
# use snakebids parser to parse the known arguments
# will pass the rest of args when running snakemake
all_args = self.parser.parse_known_args()
args = all_args[0]
snakemake_args = all_args[1]
# resolve all path items to get absolute paths
args.__dict__ = {
k: resolve_path(v) for k, v in args.__dict__.items()
}
# add snakebids arguments to config
self.config.update(args.__dict__)
# add snakemake arguments to config
self.config.update({"snakemake_args": snakemake_args})
# argparse adds filter_{input_type} to the config
# we want to update the pybids_inputs dict with this, then remove the
# filter_{input_type} dict
for input_type in self.config["pybids_inputs"].keys():
arg_filter_dict = self.config[f"filter_{input_type}"]
if arg_filter_dict is not None:
self.config["pybids_inputs"][input_type]["filters"].update(
arg_filter_dict
)
del self.config[f"filter_{input_type}"]
# add cmdline defined wildcards from the list:
# wildcards_{input_type}
for input_type in self.config["pybids_inputs"].keys():
wildcards_list = self.config[f"wildcards_{input_type}"]
if wildcards_list is not None:
self.config["pybids_inputs"][input_type][
"wildcards"
] += wildcards_list
del self.config[f"wildcards_{input_type}"]
# add custom input paths to
# config['pybids_inputs'][input_type]['custom_path']
for input_type in self.config["pybids_inputs"].keys():
custom_path = self.config[f"path_{input_type}"]
if custom_path is not None:
self.config["pybids_inputs"][input_type][
"custom_path"
] = Path(custom_path).resolve()
del self.config[f"path_{input_type}"]
# replace paths with realpaths
self.config["bids_dir"] = Path(self.config["bids_dir"]).resolve()
self.config["output_dir"] = Path(self.config["output_dir"]).resolve()
def write_updated_config(self):
"""Create an updated snakebids config file in the output dir."""
self.updated_config = Path(self.config["output_dir"],
self.configfile_path)
# create the output folder if needed
self.updated_config.parent.mkdir(parents = True, exist_ok=True)
time_hash = get_time_hash() # TODO: copy to a time-hashed file too
# for provenance?
# unused as of now..
with open(self.updated_config, "w") as f:
# write either as JSON or YAML
if self.updated_config.suffix == '.json':
import json
json.dump(self.config, f, indent=4)
else: #if not json, then should be yaml or yml
from collections import OrderedDict
#this is needed to make the output yaml clean
yaml.add_representer(OrderedDict,
lambda dumper,data:
dumper.represent_mapping(
'tag:yaml.org,2002:map',
data.items()))
# Represent any PathLikes as str.
path2str = lambda dumper, data: dumper.represent_scalar('tag:yaml.org,2002:str',str(data))
yaml.add_representer(pathlib.PosixPath, path2str)
yaml.add_representer(pathlib.WindowsPath, path2str)
yaml.dump(dict(self.config),
f,
default_flow_style=False,
sort_keys=False)
def run_snakemake(self):
"""Run snake make with that config.
Workflow snakefile will read snakebids config, create inputs_config,
and read that in.
"""
# write updated config
self.write_updated_config()
# running the chosen participant level
analysis_level = self.config["analysis_level"]
# runs snakemake, using the workflow config and inputs config to
# override
# run snakemake command-line (passing any leftover args from argparse)
snakemake_cmd_list = [
"snakemake",
f"--snakefile {self.snakefile}",
f"--directory {self.config['output_dir']}",
*self.config["snakemake_args"],
*self.config["targets_by_analysis_level"][analysis_level],
]
snakemake_cmd = " ".join(snakemake_cmd_list)
run(snakemake_cmd)
|
StarcoderdataPython
|
169644
|
# -*- coding: utf8 -*-
from __future__ import absolute_import
import os
from celery import Celery
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'docato_proj.settings')
#app = Celery('docato_proj')
app = Celery('docato_proj',backend='rpc://') #,include=['test_celery.tasks'] # broker='amqp://admin:mypass@rabbit:5672'
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
# подстрахуюсь
try:
import docato.tasks
except ImportError:
import docato.docato.tasks
print('app.tasks=', app.tasks)
app.conf.update(
#
BROKER_URL='amqp://admin:mypass@rabbit:5672',
#BROKER_URL='amqp://admin:[email protected]:5673',
)
|
StarcoderdataPython
|
168253
|
import pdb
from collections import namedtuple
from pathlib import Path
import z3
import sage.all
import helpers.vcommon as CM
from helpers.miscs import Miscs
import data.prog
import settings
DBG = pdb.set_trace
mlog = CM.getLogger(__name__, settings.logger_level)
class SymbsVals(namedtuple("SymbsVals", ("ss", "vs"))):
""" "
((x, y), (3, 4))
"""
def __new__(cls, ss, vs):
assert isinstance(ss, tuple), ss
assert isinstance(vs, tuple), vs
return super().__new__(cls, ss, vs)
def __str__(self):
return ",".join(f"{s}={v}" for s, v in zip(self.ss, self.vs))
def mkExpr(self, ss):
# create z3 expression
assert len(ss) == len(self.vs), (ss, self.vs)
try:
exprs = [s == v for s, v in zip(ss, self.vs)]
except Exception:
exprs = [s == int(v) for s, v in zip(ss, self.vs)]
return z3.And(exprs)
class SymbsValsSet(set):
def __init__(self, myset=set()):
assert all(isinstance(t, SymbsVals) for t in myset), myset
super().__init__(myset)
def __contains__(self, t):
assert isinstance(t, SymbsVals), t
return super().__contains__(t)
def add(self, t):
assert isinstance(t, SymbsVals), t
return super().add(t)
class Trace(SymbsVals):
@property
def mydict(self):
# use for expression substitution
try:
return self._mydict
except AttributeError:
self._mydict = {
sage.all.var(s): v for s, v in zip(self.ss, self.vs) if "!" not in s
}
return self._mydict
@property
def mydict_str(self):
# use for maxplus eval
try:
return self._mydict_str
except AttributeError:
self._mydict_str = {s: v for s, v in zip(self.ss, self.vs) if "!" not in s}
return self._mydict_str
@classmethod
def parse(cls, ss, vs):
assert isinstance(ss, (tuple, list)), ss
assert isinstance(vs, (tuple, list)), vs
vs = tuple(Miscs.rat2str(t) for t in vs)
return Trace(ss, vs)
@classmethod
def fromDict(cls, d):
# {'y': 1, 'x': 2, 'r': 2, 'b': 2}
ss = tuple(sorted(d))
vs = tuple(d[s] for s in ss)
return cls(ss, vs)
def myeval(self, expr):
assert Miscs.is_expr(expr), expr
rs = expr.subs(self.mydict)
return rs
class Traces(SymbsValsSet):
def __str__(self, printDetails=False):
if printDetails:
return ", ".join(map(str, sorted(self)))
else:
return str(len(self))
# @property
# def maxdeg(self):
# return Miscs.guess_maxdeg(self.mydicts2)
def myeval(self, expr, pred=None):
assert Miscs.is_expr(expr), expr
if pred is None:
return [trace.myeval(expr) for trace in self]
else:
return any(pred(trace.myeval(expr)) for trace in self)
@classmethod
def extract(cls, cexs, useOne=True):
"""
cexs is a dict{inv: [dict]}
for each disproved inv, use just 1 cex
"""
if useOne:
cexs = [cexs[inv][0] for inv in cexs]
else:
cexs = [cex for inv in cexs for cex in cexs[inv]]
cexs = [Trace.fromDict(cex) for cex in cexs]
cexs = Traces(cexs)
return cexs
@property
def mydicts(self):
return (trace.mydict for trace in self)
@property
def mydicts2(self):
myd = {}
for trace in sorted(self):
d = trace.mydict
for k in d:
if k not in myd:
myd[k] = []
myd[k].append(d[k])
return myd
def instantiate(self, term, ntraces):
assert Miscs.is_expr(term), term
assert ntraces is None or ntraces >= 1, ntraces
exprs = set()
if ntraces is None:
for t in self.mydicts:
exprs = set(term.subs(t) for t in self.mydicts)
else:
ntracesExtra = ntraces * settings.TRACE_MULTIPLIER
for t in self.mydicts:
expr = term.subs(t)
if expr not in exprs:
exprs.add(expr)
if len(exprs) >= ntracesExtra:
break
# instead of doing this, can find out the # 0's in traces
# the more 0's , the better
exprs = sorted(exprs, key=lambda expr: len(Miscs.get_vars(expr)))
exprs = set(exprs[:ntraces])
return exprs
def padzeros(self, ss):
new_traces = Traces()
for t in self:
tss = set(t.ss)
if len(tss) < len(ss):
ss_ = ss - tss
newss = t.ss + tuple(ss_)
newvs = t.vs + (0,) * len(ss_)
t = Trace(newss, newvs)
new_traces.add(t)
return new_traces
class DTraces(dict):
"""
{loc: Traces}
"""
@property
def siz(self):
return sum(map(len, self.values()))
def __str__(self, printDetails=False):
return "\n".join(
f"{loc}: {traces.__str__(printDetails)}" for loc, traces in self.items()
)
def add(self, loc, trace):
assert isinstance(loc, str) and loc, loc
assert isinstance(trace, Trace), trace
if loc not in self:
self[loc] = Traces()
not_in = trace not in self[loc]
if not_in:
self[loc].add(trace)
return not_in
def merge(self, new_traces):
"""
add new traces and return those that are really new
"""
new_traces_ = DTraces()
for loc in new_traces:
for trace in new_traces[loc]:
not_in = self.add(loc, trace)
if not_in:
new_traces_.add(loc, trace)
else:
mlog.warning(f"trace {trace} exist")
return new_traces_
@classmethod
def mk(cls, locs):
assert locs
return cls({loc: Traces() for loc in locs})
@staticmethod
def parse(trace_str, inv_decls):
"""
parse trace for new traces
trace_str = ['vtrace1: 0 285 1 9 285 9 ',
'vtrace1: 0 285 2 18 285 9 ',
'vtrace1: 0 285 4 36 285 9 ']
"""
assert isinstance(inv_decls, data.prog.DSymbs) and inv_decls, inv_decls
lines = [l.strip() for l in trace_str]
lines = [l for l in lines if l]
dtraces = DTraces()
for l in lines:
# 22: 8460 16 0 1 16 8460
parts = l.split(":")
assert len(parts) == 2, parts
loc, tracevals = parts[0], parts[1]
loc = loc.strip() # 22
if loc not in inv_decls:
"""
No symbolic states for this loc, so will not
collect concrete states here
"""
continue
ss = inv_decls[loc].names
vs = tracevals.strip().split()
mytrace = Trace.parse(ss, vs)
dtraces.add(loc, mytrace)
return dtraces
def vwrite(self, inv_decls, tracefile):
"""
write traces to file
each loc will have its own file
file 'traces_loc.csv'
var1, var2, var3
v1, v2, v2
...
"""
assert inv_decls and isinstance(inv_decls, data.prog.DSymbs), inv_decls
assert isinstance(tracefile, Path), tracefile
ss = []
for loc in self:
traces = [inv_decls[loc]]
traces.extend([", ".join(map(str, t.vs)) for t in self[loc]])
traces = [f"{loc}: {trace}" for trace in traces]
ss.extend(traces)
tracefile.write_text("\n".join(ss))
@classmethod
def vread(cls, tracefile):
assert tracefile.is_file(), tracefile
trace_str = []
# determine variable declarations for different locations
inv_decls = data.prog.DSymbs()
for line in tracefile.read_text().splitlines():
line = line.strip()
if not line or line.startswith("#"):
continue
loc, contents = line.split(":")
if loc not in inv_decls:
inv_decls[loc] = data.prog.Symbs.mk(contents) # I x, I y
else:
trace_str.append(line.replace(",", " "))
dtraces = DTraces.parse(trace_str, inv_decls)
return inv_decls, dtraces
class Inp(SymbsVals):
pass
class Inps(SymbsValsSet):
def merge(self, ds, ss):
"""
ds can be
1. cexs = {loc:{inv: {'x': val, 'y': val}}}
2. [cexs]
3. [inp]
"""
if not ds:
return Inps()
def f(d):
inps = []
for loc in d:
for inv in d[loc]:
for d_ in d[loc][inv]:
try:
inp = tuple(d_[s] for s in ss)
inps.append(inp)
except KeyError:
# happens when the cex does not contain inp var
# e.g., when we only have symstates over
# non input vars
# see Hola 01.div.c
pass
return inps
if isinstance(ds, list) and all(isinstance(d, dict) for d in ds):
new_inps = [inp for d in ds for inp in f(d)]
elif isinstance(ds, dict):
new_inps = f(ds)
else:
assert isinstance(ds, set) and all(isinstance(d, tuple) for d in ds), ds
new_inps = [inp for inp in ds]
new_inps = [Inp(ss, inp) for inp in new_inps]
new_inps = set(inp for inp in new_inps if inp not in self)
for inp in new_inps:
self.add(inp)
return Inps(new_inps)
|
StarcoderdataPython
|
12510
|
"""
DB operations for Targets
"""
from api.models.base import DBModel
class TargetDB(DBModel):
'''DBModel for the targets table'''
tablename = 'targets'
|
StarcoderdataPython
|
1640472
|
from django.db import models
from datetime import datetime as dt
# from polymorphic.manager import PolymorphicManager
from polymorphic.managers import PolymorphicManager
class ActividadQuerySet(models.QuerySet):
def en_espera(self):
return self.filter(estado='espera')
def rechazado(self):
return self.filter(estado='rechazado')
def aprobado(self):
return self.filter(estado='aprobado')
def puede_aprobar(self, usuario):
return self.filter(estado='espera', departamento=usuario.perfil.departamento)
def propias(self, usuario):
return self.filter(usuario=usuario)
def actuales(self):
fecha = dt.now()
return self.filter(fecha__year=fecha.year)
class ActividadManager(PolymorphicManager):
def get_queryset(self):
return ActividadQuerySet(self.model, using=self._db)
def en_espera(self):
return self.get_queryset().en_espera()
def rechazado(self):
return self.get_queryset().rechazado()
def aprobado(self):
return self.get_queryset().aprobado()
def puede_aprobar(self, usuario):
return self.get_queryset().puede_aprobar(usuario)
def propias(self, usuario):
return self.get_queryset().propias(usuario)
def actuales(self):
return self.get_queryset().actuales()
|
StarcoderdataPython
|
4823402
|
<filename>src2/reader.py
# Reads cleans and parses data from wordle dictionaries.
class Reader:
def load_lists(solution_corpus_path, guess_corpus_path):
solution_corpus = Reader.get_word_list(solution_corpus_path)
guess_corpus = Reader.get_word_list(guess_corpus_path)
full_corpus = solution_corpus + guess_corpus
full_corpus = Reader.unique(full_corpus)
return (solution_corpus, full_corpus)
def get_word_list(input_path):
translation_table = {
ord('"'): None,
ord(','): ' ',
ord('['): None,
ord(']'): None
}
with open(input_path) as file:
words = file.read()
# Replace data structure characters and separate words by spaces.
words = words.translate(translation_table)
# Remove any excess new lines or spaces.
words = words.strip()
# Convert into array of words.
words = words.split()
unique_word_list = Reader.unique(words)
return unique_word_list
def unique(word_list):
return list(set(word_list))
|
StarcoderdataPython
|
38313
|
# graph
from datetime import date
import numpy as np
from bokeh.client import push_session
from bokeh.io import output_server, show, vform
from bokeh.palettes import RdYlBu3
from bokeh.plotting import figure, curdoc, vplot, output_server
from bokeh.models import ColumnDataSource
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from random import randint
# create a plot and style its properties
p = figure(x_range=(0, 100), y_range=(0, 100))
p.border_fill_color = 'black'
p.background_fill_color = 'black'
p.outline_line_color = None
p.grid.grid_line_color = None
# add a text renderer to out plot (no data yet)
r = p.text(x=[], y=[], text=[], text_color=[], text_font_size="20pt",
text_baseline="middle", text_align="center")
session = push_session(curdoc())
data = dict(
dates=[date(2014, 3, i+1) for i in range(10)],
downloads=[randint(0, 100) for i in range(10)],
)
source = ColumnDataSource(data)
columns = [
TableColumn(field="dates", title="Date", formatter=DateFormatter()),
TableColumn(field="downloads", title="Downloads"),
]
data_table = DataTable(source=source, columns=columns, width=400, height=280)
curdoc().add_root(vform(data_table))
session.show()
|
StarcoderdataPython
|
4809778
|
<gh_stars>100-1000
import os
import numpy as np
import zarr
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
from torch.utils.data import random_split
from tqdm import tqdm
def as_array(image):
return np.asarray(image).swapaxes(2, 0)
def convert_data_set(path, data_set, batch_size=1000):
loader = DataLoader(
data_set, batch_size=batch_size, shuffle=False, num_workers=4)
num_examples = len(data_set)
os.makedirs(path, exist_ok=True)
with zarr.LMDBStore(path) as store:
root = zarr.group(store=store, overwrite=True)
images_set = root.zeros(
'images',
shape=(num_examples, 3, 96, 96),
chunks=(1, None, None, None),
dtype='u1')
labels_set = root.zeros(
'labels', shape=(num_examples, ), chunks=(1, ), dtype='u1')
current_iter = 0
for images, labels in tqdm(loader):
size = images.shape[0]
images_set[current_iter:current_iter + size] = images
labels_set[current_iter:current_iter + size] = labels
current_iter += size
def main():
data_set = ImageFolder(root='anime-faces', transform=as_array)
val_ratio = 0.1
val_size = int(len(data_set) * val_ratio)
train_size = len(data_set) - val_size
train_set, val_set = random_split(data_set, [train_size, val_size])
confs = [
('data/anime_faces/train.lmdb', train_set),
('data/anime_faces/val.lmdb', val_set),
]
for path, data_set in confs:
convert_data_set(path, data_set)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1612332
|
## Copyright 2015-2019 <NAME>, <NAME>
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import os
import json
from Qt import QtCore, QtGui
from PyFlow.Core.Common import *
from PyFlow.Input import InputAction, InputManager, InputActionType
@SingletonDecorator
class ConfigManager(object):
"""Responsible for registering configuration files, reading/writing values to registered config files by aliases, providing QSettings from registered aliases."""
CONFIGS_STORAGE = {}
CONFIGS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "Configs")
INPUT_CONFIG_PATH = os.path.join(CONFIGS_DIR, "input.json")
def __init__(self, *args, **kwargs):
self.registerConfigFile("PREFS", os.path.join(self.CONFIGS_DIR, "prefs.ini"))
self.registerConfigFile("APP_STATE", os.path.join(self.CONFIGS_DIR, "config.ini"))
if not os.path.exists(self.INPUT_CONFIG_PATH):
self.createDefaultInput()
data = InputManager().serialize()
if not os.path.exists(os.path.dirname(self.INPUT_CONFIG_PATH)):
os.makedirs(os.path.dirname(self.INPUT_CONFIG_PATH))
with open(self.INPUT_CONFIG_PATH, "w") as f:
json.dump(data, f)
else:
with open(self.INPUT_CONFIG_PATH, "r") as f:
data = json.load(f)
InputManager().loadFromData(data)
@staticmethod
def shouldRedirectOutput():
return ConfigManager().getPrefsValue("PREFS", "General/RedirectOutput") == "true"
def registerConfigFile(self, alias, absPath):
if alias not in self.CONFIGS_STORAGE:
self.CONFIGS_STORAGE[alias] = absPath
return True
return False
def getSettings(self, alias):
if alias in self.CONFIGS_STORAGE:
settings = QtCore.QSettings(self.CONFIGS_STORAGE[alias], QtCore.QSettings.IniFormat)
return settings
def getPrefsValue(self, configAlias, valueKey):
settings = self.getSettings(configAlias)
if settings:
if settings.contains(valueKey):
return settings.value(valueKey)
def createDefaultInput(self):
InputManager().registerAction(InputAction(name="Canvas.Pan", actionType=InputActionType.Mouse, group="Navigation", mouse=QtCore.Qt.MouseButton.MiddleButton))
InputManager().registerAction(InputAction(name="Canvas.Pan", actionType=InputActionType.Mouse, group="Navigation", mouse=QtCore.Qt.MouseButton.LeftButton, modifiers=QtCore.Qt.AltModifier))
InputManager().registerAction(InputAction(name="Canvas.Zoom", actionType=InputActionType.Mouse, group="Navigation", mouse=QtCore.Qt.MouseButton.RightButton))
InputManager().registerAction(InputAction(name="Canvas.FrameSelected", actionType=InputActionType.Keyboard, group="Navigation", key=QtCore.Qt.Key_F))
InputManager().registerAction(InputAction(name="Canvas.FrameAll", actionType=InputActionType.Keyboard, group="Navigation", key=QtCore.Qt.Key_H))
InputManager().registerAction(InputAction(name="Canvas.ZoomIn", actionType=InputActionType.Keyboard, group="Navigation", key=QtCore.Qt.Key_Equal, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.ZoomOut", actionType=InputActionType.Keyboard, group="Navigation", key=QtCore.Qt.Key_Minus, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.ResetScale", actionType=InputActionType.Keyboard, group="Navigation", key=QtCore.Qt.Key_R, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.AlignLeft", actionType=InputActionType.Keyboard, group="Refactoring", modifiers=QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier, key=QtCore.Qt.Key_Left))
InputManager().registerAction(InputAction(name="Canvas.AlignTop", actionType=InputActionType.Keyboard, group="Refactoring", modifiers=QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier, key=QtCore.Qt.Key_Up))
InputManager().registerAction(InputAction(name="Canvas.AlignRight", actionType=InputActionType.Keyboard, group="Refactoring", modifiers=QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier, key=QtCore.Qt.Key_Right))
InputManager().registerAction(InputAction(name="Canvas.AlignBottom", actionType=InputActionType.Keyboard, group="Refactoring", modifiers=QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier, key=QtCore.Qt.Key_Down))
InputManager().registerAction(InputAction(name="Canvas.Undo", actionType=InputActionType.Keyboard, group="Editing", modifiers=QtCore.Qt.ControlModifier, key=QtCore.Qt.Key_Z))
InputManager().registerAction(InputAction(name="Canvas.Redo", actionType=InputActionType.Keyboard, group="Editing", modifiers=QtCore.Qt.ControlModifier, key=QtCore.Qt.Key_Y))
InputManager().registerAction(InputAction(name="Canvas.KillSelected", actionType=InputActionType.Keyboard, group="Editing", key=QtCore.Qt.Key_Delete))
InputManager().registerAction(InputAction(name="Canvas.CopyNodes", actionType=InputActionType.Keyboard, group="Editing", key=QtCore.Qt.Key_C, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.CutNodes", actionType=InputActionType.Keyboard, group="Editing", key=QtCore.Qt.Key_X, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.DragCopyNodes", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.LeftButton, modifiers=QtCore.Qt.AltModifier))
InputManager().registerAction(InputAction(name="Canvas.DragCopyNodes", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.MiddleButton, modifiers=QtCore.Qt.AltModifier))
InputManager().registerAction(InputAction(name="Canvas.DragNodes", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.MiddleButton))
InputManager().registerAction(InputAction(name="Canvas.DragNodes", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.LeftButton))
InputManager().registerAction(InputAction(name="Canvas.DragChainedNodes", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.MiddleButton))
InputManager().registerAction(InputAction(name="Canvas.PasteNodes", actionType=InputActionType.Keyboard, group="Editing", key=QtCore.Qt.Key_V, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.DuplicateNodes", actionType=InputActionType.Keyboard, group="Editing", key=QtCore.Qt.Key_D, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="Canvas.DisconnectPin", actionType=InputActionType.Mouse, group="Editing", mouse=QtCore.Qt.MouseButton.LeftButton, modifiers=QtCore.Qt.AltModifier))
InputManager().registerAction(InputAction(name="App.NewFile", actionType=InputActionType.Keyboard, group="IO", key=QtCore.Qt.Key_N, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="App.Save", actionType=InputActionType.Keyboard, group="IO", key=QtCore.Qt.Key_S, modifiers=QtCore.Qt.ControlModifier))
InputManager().registerAction(InputAction(name="App.SaveAs", actionType=InputActionType.Keyboard, group="IO", key=QtCore.Qt.Key_S, modifiers=QtCore.Qt.ControlModifier | QtCore.Qt.ShiftModifier))
InputManager().registerAction(InputAction(name="App.Load", actionType=InputActionType.Keyboard, group="IO", key=QtCore.Qt.Key_O, modifiers=QtCore.Qt.ControlModifier))
|
StarcoderdataPython
|
3206009
|
import requests
from faker import Faker
import random
fake = Faker()
Genres = [
"Action",
"Comedy",
"Drama",
"Fantasy",
"Horror",
"Mystery",
"Romance",
"Thriller",
"Western",
]
Book_Numbers = 51
for i in range(1, Book_Numbers):
# Book
requests.post(
"http://localhost:8000/api/books/",
json={
"BookID": i,
"Date_Published": random.randint(1985, 2022),
"Genre": random.choice(Genres),
},
)
# Author
for j in range(1, random.randint(1, 4)):
requests.post(
"http://localhost:8000/api/authors/",
json={
"name": fake.name(),
"age": random.randint(15, 85),
"email": fake.email(),
"country": fake.country(),
"BookID": i,
},
)
# Editor
for k in range(1, random.randint(1, 4)):
requests.post(
"http://localhost:8000/api/editors/",
json={
"name": fake.name(),
"age": random.randint(15, 85),
"email": fake.email(),
"country": fake.country(),
"BookID": i,
},
)
# Publisher
requests.post(
"http://localhost:8000/api/publishers/",
json={"name": fake.name(), "country": fake.country(), "BookID": i},
)
|
StarcoderdataPython
|
1739438
|
<reponame>benjyz/ape
from copy import deepcopy
from typing import Dict, List, Optional
from .abstract import (
FileMixin,
SerializableType,
update_dict_params,
update_list_params,
update_params,
)
from .contract import Compiler, ContractInstance, ContractType, Source
class PackageMeta(SerializableType):
authors: Optional[List[str]] = None
license: Optional[str] = None
description: Optional[str] = None
keywords: Optional[List[str]] = None
links: Optional[Dict[str, str]] = None
class PackageManifest(FileMixin, SerializableType):
# NOTE: Must not override this key
manifest: str = "ethpm/3"
# NOTE: `name` and `version` should appear together
# NOTE: `name` must begin lowercase, and be comprised of only `[a-z0-9-]` chars
# NOTE: `name` should not exceed 255 chars in length
name: Optional[str] = None
# NOTE: `version` should be valid SemVer
version: Optional[str] = None
# NOTE: `meta` should be in all published packages
meta: Optional[PackageMeta] = None
# NOTE: `sources` source tree should be necessary and sufficient to compile
# all `ContractType`s in manifest
sources: Optional[Dict[str, Source]] = None
# NOTE: `contractTypes` should only include types directly computed from manifest
# NOTE: `contractTypes` should not include abstracts
contractTypes: Optional[Dict[str, ContractType]] = None
compilers: Optional[List[Compiler]] = None
# NOTE: Keys must be a valid BIP122 URI chain definition
# NOTE: Values must be a dict of `ContractType.contractName` => `ContractInstance` objects
deployments: Optional[Dict[str, Dict[str, ContractInstance]]] = None
# NOTE: keys must begin lowercase, and be comprised of only `[a-z0-9-]` chars
# (like `PackageManifest.name`)
# NOTE: keys should not exceed 255 chars in length (like `PackageManifest.name`)
# NOTE: values must be a Content Addressible URI that conforms to the same manifest
# version as `manifest`
buildDependencies: Optional[Dict[str, str]] = None
def __getattr__(self, attr_name: str):
if self.contractTypes and attr_name in self.contractTypes:
return self.contractTypes[attr_name]
else:
raise AttributeError(f"{self.__class__.__name__} has no attribute '{attr_name}'")
@classmethod
def from_dict(cls, params: Dict):
params = deepcopy(params)
update_params(params, "meta", PackageMeta)
update_dict_params(params, "sources", Source)
# NOTE: Special 1-level dict with key in type as arg
if "contractTypes" in params and params["contractTypes"]:
for name in params["contractTypes"]:
params["contractTypes"][name] = ContractType.from_dict( # type: ignore
{
# NOTE: We inject this parameter ourselves, remove it when serializing
"contractName": name,
**params["contractTypes"][name],
}
)
update_list_params(params, "compilers", Compiler)
# NOTE: Special 2-level dict
if "deployments" in params and params["deployments"]:
for name in params["deployments"]:
update_dict_params(params["deployments"], name, ContractInstance)
return cls(**params) # type: ignore
|
StarcoderdataPython
|
4803067
|
<reponame>dewrin/img_scanner_en_django
from django.shortcuts import render, redirect
from django.views.generic import View
from django.core.files.storage import FileSystemStorage
from PIL import Image
from pytesseract import image_to_string
from django.shortcuts import render
from django.http import HttpResponse
import json, os
import time, urllib.request
class OCR(View):
def index(request):
response_data = {}
response_data["success"] = True
response_data["url"] = None
response_data["code"] = None
if request.method == "GET":
try:
url = request.GET["url"]
response_data["url"] = url
file = "%s"%time.time()
urllib.request.urlretrieve(url, file)
im = Image.open(file)
text = image_to_string(im)
response_data["code"] = text
response_data["message"] = file
os.remove(file)
except Exception as e:
response_data["message"]="%s"%e
return HttpResponse(json.dumps(response_data),content_type="application/json")
class IMGView(View):
template_name = "index.html"
def get(self, request, *args, **kwargs):
return render(request, self.template_name, {})
def post(self, request, *args, **kwargs):
if request.FILES:
myimg = request.FILES['img']
context = {}
try:
im = Image.open(myimg)
text = image_to_string(im)
if text == '':
context['message'] = 'Sorry, This is an empty image :('
else:
fs = FileSystemStorage()
filename = fs.save(myimg.name, myimg)
uploaded_file_url = fs.url(filename)
context['imgurl'] = uploaded_file_url
context['alt'] = myimg.name
context['content'] = text
except Exception as ex:
context['message'] = ex
return render(request, self.template_name, context)
else:
return redirect('img')
|
StarcoderdataPython
|
1784542
|
#!/usr/bin/env python
"""Amalgamates all specified file references from a main source file into one large source file.
Searches through the main source file for a 'hotword:source_file' phrase.
Replaces this line with the full contents of the specified 'source_file'.
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2017"
__license__ = "MIT"
import argparse
import shutil
def amalgamacate():
# Set up Command line argument detection
parser = argparse.ArgumentParser(description='Amalgamate source files into one large file')
parser.add_argument('main', help='the file that contains the main process')
parser.add_argument('output', help='the destination file')
parser.add_argument('-hw', '--hotword', help='the hot word to look for, default="AMALGAMACATE:"')
args = parser.parse_args()
if args.hotword is None:
args.hotword = 'AMALGAMACATE'
# if no file type is given, copy the 'main's file type
if '.' not in args.output:
file_suffix = args.main.split('.')
if len(file_suffix) > 1:
args.output += "." + file_suffix[-1].rstrip()
exit()
with open(args.main, 'r') as infile, open(args.output, 'w+') as outfile:
for line in infile:
if line.__contains__(args.hotword):
hotline = line.split(':')
with open(hotline[1].rstrip()) as external_file:
shutil.copyfileobj(external_file, outfile)
else:
outfile.write(line)
print('Files successfully amalgamacated into one!')
if __name__ == "__main__":
amalgamacate()
|
StarcoderdataPython
|
130130
|
<gh_stars>0
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from ..analysis import CoherenceAnalyzer
def test_CoherenceAnalyzer_inputs():
input_map = dict(
NFFT=dict(usedefault=True, ),
TR=dict(),
figure_type=dict(usedefault=True, ),
frequency_range=dict(usedefault=True, ),
in_TS=dict(),
in_file=dict(
extensions=None,
requires=('TR', ),
),
n_overlap=dict(usedefault=True, ),
output_csv_file=dict(extensions=None, ),
output_figure_file=dict(extensions=None, ),
)
inputs = CoherenceAnalyzer.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_CoherenceAnalyzer_outputs():
output_map = dict(
coherence_array=dict(),
coherence_csv=dict(extensions=None, ),
coherence_fig=dict(extensions=None, ),
timedelay_array=dict(),
timedelay_csv=dict(extensions=None, ),
timedelay_fig=dict(extensions=None, ),
)
outputs = CoherenceAnalyzer.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
StarcoderdataPython
|
3311160
|
<filename>tensorflow_federated/python/core/impl/test.py
# Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""General purpose test utilities for TFF."""
import functools
from absl import logging
from tensorflow_federated.python.core.api import computations
def tf1_and_tf2(fn):
"""A decorator for creating test parameterized by TF computation decorators.
Args:
fn: A test function to be decorated. It must accept two arguments: self (a
`TestCase`), and tf_computation (either a `tff.tf_computation` or
`tff.tf2_computation`).
Returns:
A decorated function, which executes `fn` using both decorators.
"""
@functools.wraps(fn)
def wrapped_fn(self):
logging.info('Testing under tff.tf2_computation')
fn(self, computations.tf2_computation)
logging.info('Testing under tff.tf_computation')
fn(self, computations.tf_computation)
return wrapped_fn
def tf1(fn):
"""A decorator for testing the `tff.tf_computation` decorator."""
@functools.wraps(fn)
def wrapped_fn(self):
fn(self, computations.tf_computation)
return wrapped_fn
def tf2(fn):
"""A decorator for testing the `tff.tf2_computation` decorator."""
@functools.wraps(fn)
def wrapped_fn(self):
fn(self, computations.tf2_computation)
return wrapped_fn
|
StarcoderdataPython
|
4802464
|
<reponame>adamgreig/momobot
import feedparser
class Woot:
"""
Returns the current item on sale at Woot.com, according to the woot rss.
"""
def __init__(self, bot):
self.bot = bot
bot.register_command('woot', self.woot)
print "hello woot world"
self.bot.say('I exist')
def woot(self, data):
print "Wooting! {}".format(self.get_woot())
self.bot.say('The current sale at www.woot.com is ' + self.get_woot(), data['channel'])
def get_woot(self):
woot = feedparser.parse("http://www.woot.com/blog/rss.aspx")
sale = "No woots? How is this possible!?"
for item in woot['entries']:
if item.category == 'Woot':
sale = item.title
break
return sale
|
StarcoderdataPython
|
10825
|
<gh_stars>0
from django.db import models
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Profile(models.Model):
"""Model for handling User Profile"""
user = models.OneToOneField(User, on_delete= models.CASCADE)
username = models.CharField(max_length = 25)
signup_date = models.DateTimeField(auto_now_add= True)
profile_photo = CloudinaryField('images')
followers = models.ManyToManyField(User, related_name='followers', blank= True)
bio = models.CharField(max_length= 70)
def __str__(self):
return self.name
def total_followers(self):
"""Method to return total numberof followers"""
return self.followers.count()
def save_profile(self):
"""Method to save profile to the database"""
self.save()
def delete_profile(self):
"""Method to delete profile from the database"""
self.delete()
def update_profile(self,new):
"""Method to update user profile
Args:
new([type]): [description]
"""
self.username = new.username
self.bio = new.bio
self.profile_photo = new.profile_pic
self.save()
@classmethod
def get_following(cls,user):
"""Method to return all users a specific user is following """
following = user.followers.all()
users = []
for profile in following:
user = User.objects.get(profile = profile)
users.append(user)
return users
@classmethod
def search_profile(cls,search_term):
"""Method to return profiles with a provided search term"""
profiles = cls.objects.filter(username_icontains = search_term)
return profiles
class Likes(models.Model):
"""Model for handling Image likes"""
likes = models.IntegerField(default=0)
class Image(models.Model):
"""Model for handling Image posts by users"""
user = models.ForeignKey(User,on_delete= models.CASCADE)
image = CloudinaryField('images')
image_name = models.CharField(max_length= 25)
caption = models.CharField(max_length= 100)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, default= None)
likes = models.ForeignKey(Likes, on_delete=CASCADE, default=None)
comment = models.CharField(max_length= 120)
time_posted = models.DateTimeField(auto_now_add= True)
def __str__(self):
return self.name
def save_image(self):
"""Method to save Image to Database"""
self.save()
def delete_image(self):
"""Method to delete Image """
self.delete()
def like_image(self,user):
"""Method to add user as an image liker"""
self.likes.add(user)
def get_total_likes(self):
"""Method to get the total number of likess on an Image"""
return self.likes.count()
def update_caption(self,caption):
"""Method to updat eimage captions in database"""
self.caption = caption
self.save()
@classmethod
def get_images(cls,users):
"""Method to get a specific image"""
posts = []
for user in users:
images = Image.objects.filter(user = user)
for image in images:
posts.append(image)
return posts
def get_comments(self):
"""Method to get all comments related to a post"""
comments = Comments.objects.filter(image = self)
return comments
class Comments(models.Model):
"""Method to define attributes of a comment"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
image = models.ForeignKey(Image,on_delete=models.CASCADE)
comment = models.TextField()
def __str__(self):
return self.comment
|
StarcoderdataPython
|
1793274
|
<gh_stars>0
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'Grpc.Tls' : {
'meta_info' : _MetaInfoClass('Grpc.Tls',
False,
[
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable TLS
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('trustpoint', ATTRIBUTE, 'str' , None, None,
[], [],
''' Trustpoint Name
''',
'trustpoint',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'tls',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
'Grpc' : {
'meta_info' : _MetaInfoClass('Grpc',
False,
[
_MetaInfoClassMember('address-family', ATTRIBUTE, 'str' , None, None,
[], [],
''' Address family identifier type
''',
'address_family',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('enable', ATTRIBUTE, 'Empty' , None, None,
[], [],
''' Enable GRPC
''',
'enable',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-per-user', ATTRIBUTE, 'int' , None, None,
[('1', '32')], [],
''' Maximum concurrent requests per user
''',
'max_request_per_user',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('max-request-total', ATTRIBUTE, 'int' , None, None,
[('1', '256')], [],
''' Maximum concurrent requests in total
''',
'max_request_total',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('port', ATTRIBUTE, 'int' , None, None,
[('10000', '57999')], [],
''' Server listening port
''',
'port',
'Cisco-IOS-XR-man-ems-cfg', False),
_MetaInfoClassMember('tls', REFERENCE_CLASS, 'Tls' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg', 'Grpc.Tls',
[], [],
''' Transport Layer Security (TLS)
''',
'tls',
'Cisco-IOS-XR-man-ems-cfg', False),
],
'Cisco-IOS-XR-man-ems-cfg',
'grpc',
_yang_ns._namespaces['Cisco-IOS-XR-man-ems-cfg'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_man_ems_cfg'
),
},
}
_meta_table['Grpc.Tls']['meta_info'].parent =_meta_table['Grpc']['meta_info']
|
StarcoderdataPython
|
3343991
|
"""Library to access del.icio.us data via Python.
An introduction to the project is given in the README.
pydelicious is released under the FreeBSD License.
See license.txt for details and the copyright holders.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys
import os
import time
import datetime
import locale
import httplib
import urllib2
from urllib import urlencode, quote_plus
from StringIO import StringIO
from pprint import pformat
try:
# Python >= 2.5
from hashlib import md5
except ImportError:
from md5 import md5
try:
from elementtree.ElementTree import parse as parse_xml
except ImportError:
# Python 2.5 and higher
from xml.etree.ElementTree import parse as parse_xml
try:
import feedparser
except ImportError:
print >>sys.stderr, \
"Feedparser not available, no RSS parsing."
feedparser = None
### Static config
# pydoc and distutils supported exports
__version__ = '0.6'
__author__ = "Berend (.mpe)"
__author_email__ = "dev,<EMAIL>"
#__date__ = "$Date$"[]
__credits__ = """<NAME> (original author), and in no
particular order: <NAME>, me.gooz, mohangk, stumble.then.rise, clupprich"""
__license__ = 'FreeBSD'
__rcs_id__ = "$Id: __init__.py 68 2010-11-21 13:58:04Z berend.van.berkum $"[3:-1]
__url__ = 'http://code.google.com/p/pydelicious/'
__docformat__ = "restructuredtext en"
__description__ = "Access delicious.com API's with Python"
__long_description__ = "A complete Python interface to del.icio.us Bookmarks' HTTP API's."
DLCS_OK_MESSAGES = ('done', 'ok')
"Known text values of positive del.icio.us <result/> answers"
DLCS_WAIT_TIME = 4
"Time to wait between API requests"
DLCS_REQUEST_TIMEOUT = 444
"Seconds before socket triggers timeout"
DLCS_API_REALM = 'del.icio.us API'
DLCS_API_HOST = 'api.del.icio.us'
DLCS_API_PATH = 'v1'
DLCS_API = "https://%s/%s" % (DLCS_API_HOST, DLCS_API_PATH)
DLCS_RSS = 'http://previous.delicious.com/v2/rss/'
"Old RSS feeds, formerly <http://del.icio.us/rss/>"
DLCS_FEEDS = 'http://feeds.delicious.com/v2/'
PREFERRED_ENCODING = locale.getpreferredencoding()
# XXX: might need to check sys.platform/encoding combinations here, ie
#if sys.platform == 'darwin' || PREFERRED_ENCODING == 'macroman:
# PREFERRED_ENCODING = 'utf-8'
if not PREFERRED_ENCODING:
PREFERRED_ENCODING = 'iso-8859-1'
ISO_8601_DATETIME = '%Y-%m-%dT%H:%M:%SZ'
USER_AGENT = 'pydelicious/%s %s' % (__version__, __url__)
DEBUG = 0
if 'DLCS_DEBUG' in os.environ:
DEBUG = int(os.environ['DLCS_DEBUG'])
if DEBUG:
print >>sys.stderr, \
"Set DEBUG to %i from DLCS_DEBUG env." % DEBUG
HTTP_PROXY = os.environ.get('HTTP_PROXY', None)
HTTPS_PROXY = os.environ.get('HTTPS_PROXY', HTTP_PROXY)
if DEBUG and (HTTP_PROXY or HTTPS_PROXY):
print >>sys.stderr, \
"Set proxies to %s, %s from env." % (HTTP_PROXY, HTTPS_PROXY, )
### Timeoutsocket hack taken from FeedParser.py
# timeoutsocket allows feedparser to time out rather than hang forever on ultra-
# slow servers. Python 2.3 now has this functionality available in the standard
# socket library, so under 2.3 you don't need to install anything. But you
# probably should anyway, because the socket module is buggy and timeoutsocket
# is better.
try:
import timeoutsocket # http://www.timo-tasi.org/python/timeoutsocket.py
timeoutsocket.setDefaultSocketTimeout(DLCS_REQUEST_TIMEOUT)
except ImportError:
import socket
if hasattr(socket, 'setdefaulttimeout'):
socket.setdefaulttimeout(DLCS_REQUEST_TIMEOUT)
if DEBUG: print >>sys.stderr, \
"Set socket timeout to %s seconds" % DLCS_REQUEST_TIMEOUT
### Utility classes
class _Waiter:
"""Waiter makes sure a certain amount of time passes between
successive calls of `Waiter()`.
Some attributes:
:last: time of last call
:wait: the minimum time needed between calls
:waited: the number of calls throttled
pydelicious.Waiter is an instance created when the module is loaded.
"""
def __init__(self, wait):
self.wait = wait
self.waited = 0
self.lastcall = 0;
def __call__(self):
tt = time.time()
wait = self.wait
timeago = tt - self.lastcall
if timeago < wait:
wait = wait - timeago
if DEBUG>0: print >>sys.stderr, "Waiting %s seconds." % wait
time.sleep(wait)
self.waited += 1
self.lastcall = tt + wait
else:
self.lastcall = tt
Waiter = _Waiter(DLCS_WAIT_TIME)
class PyDeliciousException(Exception):
"""Standard pydelicious error"""
class PyDeliciousThrottled(Exception): pass
class PyDeliciousUnauthorized(Exception): pass
class DeliciousError(Exception):
"""Raised when the server responds with a negative answer"""
@staticmethod
def raiseFor(error_string, path, **params):
if error_string == 'item already exists':
raise DeliciousItemExistsError, params['url']
else:
raise DeliciousError, "%s, while calling <%s?%s>" % (error_string,
path, urlencode(params))
class DeliciousItemExistsError(DeliciousError):
"""Raised then adding an already existing post."""
class DeliciousHTTPErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_401(self, req, fp, code, msg, headers):
raise PyDeliciousUnauthorized, "Check credentials."
def http_error_503(self, req, fp, code, msg, headers):
# Retry-After?
errmsg = "Try again later."
if 'Retry-After' in headers:
errmsg = "You may try again after %s" % headers['Retry-After']
raise PyDeliciousThrottled, errmsg
### Utility functions
def dict0(d):
"Removes empty string values from dictionary"
return dict([(k,v) for k,v in d.items()
if v=='' and isinstance(v, basestring)])
def delicious_datetime(str):
"""Parse a ISO 8601 formatted string to a Python datetime ...
"""
return datetime.datetime(*time.strptime(str, ISO_8601_DATETIME)[0:6])
def http_request(url, user_agent=USER_AGENT, retry=4, opener=None):
"""Retrieve the contents referenced by the URL using urllib2.
Retries up to four times (default) on exceptions.
"""
request = urllib2.Request(url, headers={'User-Agent':user_agent})
if not opener:
opener = urllib2.build_opener()
# Remember last error
e = None
# Repeat request on time-out errors
tries = retry;
while tries:
try:
return opener.open(request)
except urllib2.HTTPError, e:
# reraise unexpected protocol errors as PyDeliciousException
raise PyDeliciousException, "%s" % e
except urllib2.URLError, e:
# xxx: Ugly check for time-out errors
#if len(e)>0 and 'timed out' in arg[0]:
print >> sys.stderr, "%s, %s tries left." % (e, tries)
Waiter()
tries = tries - 1
#else:
# tries = None
# Give up
raise PyDeliciousException, \
"Unable to retrieve data at '%s', %s" % (url, e)
def build_api_opener(host, user, passwd, extra_handlers=() ):
"""
Build a urllib2 style opener with HTTP Basic authorization for one host
and additional error handling. If HTTP_PROXY is set a proxyhandler is also
added.
"""
global DEBUG, HTTP_PROXY, HTTPS_PROXY, DLCS_API_REALM
password_manager = urllib2.HTTPPasswordMgr()
password_manager.add_password(DLCS_API_REALM, host, user, passwd)
auth_handler = urllib2.HTTPBasicAuthHandler(password_manager)
handlers = ( auth_handler, DeliciousHTTPErrorHandler(), ) + extra_handlers
if DEBUG:
httpdebug = urllib2.HTTPHandler(debuglevel=DEBUG)
handlers += ( httpdebug, )
if HTTP_PROXY or HTTPS_PROXY:
proto = {}
if HTTPS_PROXY:
proto['https'] = HTTPS_PROXY
if HTTP_PROXY:
proto['http'] = HTTP_PROXY
handlers += ( urllib2.ProxyHandler( proto ), )
o = urllib2.build_opener(*handlers)
return o
def dlcs_api_opener(user, passwd):
"Build an opener for DLCS_API_HOST, see build_api_opener()"
return build_api_opener(DLCS_API_HOST, user, passwd)
def dlcs_api_request(path, params=None, user='', passwd='', throttle=True,
opener=None):
"""Retrieve/query a path within the del.icio.us API.
This implements a minimum interval between calls to avoid
throttling. [#]_ Use param 'throttle' to turn this behaviour off.
.. [#] http://del.icio.us/help/api/
"""
if throttle:
Waiter()
if params:
url = "%s/%s?%s" % (DLCS_API, path, urlencode(params))
else:
url = "%s/%s" % (DLCS_API, path)
if DEBUG: print >>sys.stderr, \
"dlcs_api_request: %s" % url
if not opener:
opener = dlcs_api_opener(user, passwd)
fl = http_request(url, opener=opener)
if DEBUG>2: print >>sys.stderr, \
pformat(fl.info().headers)
return fl
def dlcs_encode_params(params, usercodec=PREFERRED_ENCODING, encoded=False):
"""Turn all param values (int, list, bool) into utf8 encoded strings.
"""
if params:
for key in params.keys():
if isinstance(params[key], bool):
if params[key]:
params[key] = 'yes'
else:
params[key] = 'no'
elif isinstance(params[key], int):
params[key] = str(params[key])
elif not params[key]:
# strip/ignore empties other than False or 0
del params[key]
continue
elif isinstance(params[key], list):
params[key] = " ".join(params[key])
if encoded:
assert isinstance(params[key], str)
else:
params[key] = params[key].decode(usercodec)
assert isinstance(params[key], basestring)
if not encoded:
params = dict([ (k, v.encode('utf8'))
for k, v in params.items() if v])
return params
def dlcs_parse_xml(data, split_tags=False):
"""Parse any del.icio.us XML document and return Python data structure.
Recognizes all XML document formats as returned by the version 1 API and
translates to a JSON-like data structure (dicts 'n lists).
Returned instance is always a dictionary. Examples::
{'posts': [{'url':'...','hash':'...',},],}
{'tags':['tag1', 'tag2',]}
{'dates': [{'count':'...','date':'...'},], 'tag':'', 'user':'...'}
{'result':(True, "done")}
# etcetera.
"""
# TODO: split_tags is not implemented
if DEBUG>3: print >>sys.stderr, "dlcs_parse_xml: parsing from ", data
if not hasattr(data, 'read'):
data = StringIO(data)
doc = parse_xml(data)
root = doc.getroot()
fmt = root.tag
# Split up into three cases: Data, Result or Update
if fmt in ('tags', 'posts', 'dates', 'bundles'):
# Data: expect a list of data elements, 'resources'.
# Use `fmt` (without last 's') to find data elements, elements
# don't have contents, attributes contain all the data we need:
# append to list
elist = [el.attrib for el in doc.findall(fmt[:-1])]
# Return list in dict, use tagname of rootnode as keyname.
data = {fmt: elist}
# Root element might have attributes too, append dict.
data.update(root.attrib)
return data
elif fmt == 'result':
# Result: answer to operations
if root.attrib.has_key('code'):
msg = root.attrib['code']
else:
msg = root.text
# XXX: Return {'result':(True, msg)} for /known/ O.K. messages,
# use (False, msg) otherwise. Move this to DeliciousAPI?
v = msg in DLCS_OK_MESSAGES
return {fmt: (v, msg)}
elif fmt == 'update':
# Update: "time"
return {fmt: {
'time':time.strptime(root.attrib['time'], ISO_8601_DATETIME) }}
else:
raise PyDeliciousException, "Unknown XML document format '%s'" % fmt
## Feed util
def dlcs_rss_request(tag="", popular=0, user="", url=''):
"""Parse a RSS request, old style.
This requests old (now undocumented?) URL paths that still seem to work.
- http://del.icio.us/rss/url/{urimd5}
- http://del.icio.us/rss/{user}/{tag}
- http://del.icio.us/rss/{user}
- http://del.icio.us/rss
- http://del.icio.us/rss/tag/{tag}
- http://del.icio.us/rss/popular
- http://del.icio.us/rss/popular/{tag}
"""
tag = quote_plus(tag)
user = quote_plus(user)
if url != '':
url = DLCS_RSS + 'url/%s' % md5(url).hexdigest()
elif user != '' and tag != '':
url = DLCS_RSS + '%(user)s/%(tag)s' % {'user':user, 'tag':tag}
elif user != '' and tag == '':
url = DLCS_RSS + '%s' % user
elif popular == 0 and tag == '':
url = DLCS_RSS
elif popular == 0 and tag != '':
url = DLCS_RSS + "tag/%s" % tag
elif popular == 1 and tag == '':
url = DLCS_RSS + 'popular'
elif popular == 1 and tag != '':
url = DLCS_RSS + 'popular/%s' % tag
if DEBUG:
print 'dlcs_rss_request', url
rss = http_request(url).read()
# assert feedparser, "requires feedparser to be installed."
if not feedparser:
return rss
rss = feedparser.parse(rss)
posts = []
for e in rss.entries:
if e.has_key("links") and e["links"]!=[] and e["links"][0].has_key("href"):
url = e["links"][0]["href"]
elif e.has_key("link"):
url = e["link"]
elif e.has_key("id"):
url = e["id"]
else:
url = ""
if e.has_key("title"):
description = e['title']
elif e.has_key("title_detail") and e["title_detail"].has_key("title"):
description = e["title_detail"]['value']
else:
description = ''
try:
tags = [tag['term'] for tag in e['tags']]
except:
try:
tags = [e["category"]]
except:
tags = []
if e.has_key("modified"):
dt = e['modified']
else:
dt = ""
if e.has_key("summary"):
extended = e['summary']
elif e.has_key("summary_detail"):
e['summary_detail']["value"]
else:
extended = ""
if e.has_key("author"):
user = e['author']
else:
user = ""
# time = dt ist weist auf ein problem hin
# die benennung der variablen ist nicht einheitlich
# api senden und
# xml bekommen sind zwei verschiedene schuhe :(
posts.append({'url':url, 'description':description, 'tags':tags,
'dt':dt, 'extended':extended, 'user':user})
return posts
"""
Bookmarks from the hotlist:
{format}
Recent bookmarks:
{format}/recent
Recent bookmarks by tag:
{format}/tag/{tag[+tag+...+tag]}
Popular bookmarks:
{format}/popular
Popular bookmarks by tag:
{format}/popular/{tag}
Recent site alerts (as seen in the top-of-page alert bar on the site):
{format}/alerts
Public summary information about a user (as seen in the network badge):
{format}/userinfo/{username}
A list of all public tags for a user:
{format}/tags/{username}
A list of related public tags for a user tag comination:
{format}/tags/{username}/{tag[+tag+...+tag]}
Bookmarks from a user's subscriptions:
{format}/subscriptions/{username}
Private feed for a user's inbox bookmarks from others:
{format}/inbox/{username}?private={key}
Bookmarks from members of a user's network:
{format}/network/{username}
Bookmarks from members of a user's private network:
{format}/network/{username}?private={key}
Bookmarks from members of a user's network by tag:
{format}/network/{username}/{tag[+tag+...+tag]}
Bookmarks from members of a user's private network by tag:
{format}/network/{username}/{tag[+tag+...+tag]}?private={key}
A list of a user's network members:
{format}/networkmembers/{username}
A list of a user's network fans:
{format}/networkfans/{username}
Recent bookmarks for a URL:
{format}/url/{url md5}
Summary information about a URL (as seen in the tagometer):
json/urlinfo/{url md5}
"""
delicious_v2_feeds = {
# Bookmarks from the hotlist
'hotlist': "%(format)s",
#"Recent bookmarks"
'recent': "%(format)s/recent",
#"Recent bookmarks by tag"
'tagged': "%(format)s/tag/%(tag)s",
#"Popular bookmarks"
'popular': "%(format)s/popular",
#"Popular bookmarks by tag"
'popular_tagged': "%(format)s/popular/%(tag)s",
#"Recent site alerts (as seen in the top-of-page alert bar on the site)"
'alerts': "%(format)s/alerts",
# Bookmarks for a specific user:
'user': "%(format)s/%(username)s",
# Private bookmarks for a specific user:
'user_private': "%(format)s/%(username)s?private=%(key)s",
# Bookmarks for a specific user by tag(s)
'user_tagged': "%(format)s/%(username)s/%(tag)s",
# Private bookmarks for a specific user by tag(s):
'user_tagged_private': "%(format)s/%(username)s/%(tag)s?private=%(key)s",
#"Public summary information about a user (as seen in the network badge)"
'user_info': "%(format)s/userinfo/%(username)s",
#"A list of all public tags for a user"
'user_tags': "%(format)s/tags/%(username)s",
#"Bookmarks from a user's subscriptions"
'user_subscription': "%(format)s/subscriptions/%(username)s",
#"Private feed for a user's inbox bookmarks from others"
'user_inbox': "%(format)s/inbox/%(username)s?private=%(key)s",
#"Bookmarks from members of a user's network"
'user_network': "%(format)s/network/%(username)s",
#"Bookmarks from members of a user's network by tag"
'user_network_tagged': "%(format)s/network/%(username)s/%(tag)s",
#"A list of a user's network members"
'user_network_member': "%(format)s/networkmembers/%(username)s",
#"A list of a user's network fans"
'user_network_fan': "%(format)s/networkfans/%(username)s",
#"Recent bookmarks for a URL"
'url': "%(format)s/url/%(urlmd5)s",
#"Summary information about a URL (as seen in the tagometer)"
'urlinfo': "json/urlinfo/%(urlmd5)s",
}
def dlcs_feed(name_or_url, url_map=delicious_v2_feeds, count=15, **kwds):
"""
Request and parse a feed.
Count should be between 1 and 100, default 15.
Format values include 'rss' and 'json', defaults to json.
- http://www.delicious.com/help/feeds
"""
#if fancy == True:
# '?fancy'
#elif fancy != None:
# '?plain'
format = kwds.setdefault('format', 'json')
kwds.setdefault('count', count)
if not name_or_url:
name_or_url = 'hotlist'
if name_or_url in url_map:
params = dict([(k, quote_plus(str(v))) for k,v in kwds.items()])
url = DLCS_FEEDS + url_map[name_or_url] % params
else:
url = name_or_url
if DEBUG:
print 'dlcs_feed', url
feed = http_request(url).read()
if format == 'rss':
if feedparser:
rss = feedparser.parse(feed)
return rss
else:
return feed
elif format == 'json':
return feed
### Main module class
class DeliciousAPI:
"""A single-user Python facade to the del.icio.us HTTP API.
See http://delicious.com/help/api.
Methods ``request`` and ``request_raw`` represent the core. For all API
paths there are furthermore methods (e.g. posts_add for 'posts/all') with
an explicit declaration of parameters and documentation.
"""
def __init__(self, user, passwd, codec=PREFERRED_ENCODING,
api_request=dlcs_api_request, xml_parser=dlcs_parse_xml,
build_opener=dlcs_api_opener, encode_params=dlcs_encode_params,
encoded=False):
"""Initialize access to the API for ``user`` with ``passwd``.
``codec`` sets the encoding of the arguments, which defaults to the
users preferred locale.
The ``api_request`` and ``xml_parser`` parameters by default point to
functions within this package with standard implementations which
request and parse a resource. See ``dlcs_api_request()`` and
``dlcs_parse_xml()``.
Parameter ``build_opener`` is a callable that, provided with the
credentials, should build a urllib2 opener for the delicious API server
with HTTP authentication. See ``dlcs_api_opener()`` for the default
implementation.
``encode_params`` finally preprocesses API parameters before
they are passed to ``api_request``.
"""
assert user != ""
self.user = user
self.passwd = <PASSWORD>
self.codec = codec
# Implement communication to server and parsing of respons messages:
assert callable(encode_params)
self._encode_params = encode_params
self._encoded = encoded
assert callable(build_opener)
self._opener = build_opener(user, passwd)
assert callable(api_request)
self._api_request = api_request
assert callable(xml_parser)
self._parse_response = xml_parser
### Core functionality
def request(self, path, _raw=False, **params):
"""Sends a request message to `path` in the API, and parses the results
from XML. Use with ``_raw=True`` or ``call request_raw()`` directly
to get the filehandler and process the response message manually.
Calls to some paths will return a `result` message, i.e.::
<result code="..." />
or::
<result>...</result>
These should all be parsed to ``{'result':(Boolean, MessageString)}``,
this method raises a ``DeliciousError`` on negative `result` answers.
Positive answers are silently accepted and nothing is returned.
Using ``_raw=True`` bypasses all parsing and never raises
``DeliciousError``.
See ``dlcs_parse_xml()`` and ``self.request_raw()``."""
if _raw:
# return answer
return self.request_raw(path, **params)
else:
params = self._encode_params(params, self.codec,
encoded=self._encoded)
# get answer and parse
fl = self._api_request(path, params=params, opener=self._opener)
rs = self._parse_response(fl)
if type(rs) == dict and 'result' in rs:
if not rs['result'][0]:
# Raise an error for negative 'result' answers
errmsg = ""
if len(rs['result'])>0:
errmsg = rs['result'][1]
DeliciousError.raiseFor(errmsg, path, **params)
else:
# not out-of-the-oridinary result, OK
return
return rs
def request_raw(self, path, **params):
"""Calls the path in the API, returns the filehandle. Returned file-
like instances have an ``HTTPMessage`` instance with HTTP header
information available. Use ``filehandle.info()`` or refer to the
``urllib2.openurl`` documentation.
"""
# see `request()` on how the response can be handled
params = self._encode_params(params, self.codec, encoded=self._encoded)
return self._api_request(path, params=params, opener=self._opener)
### Explicit declarations of API paths, their parameters and docs
# Tags
def tags_get(self, **kwds):
"""Returns a list of tags and the number of times it is used by the
user.
::
<tags>
<tag tag="TagName" count="888">
"""
return self.request("tags/get", **kwds)
def tags_delete(self, tag, **kwds):
"""Delete an existing tag.
&tag={TAG}
(required) Tag to delete
"""
return self.request('tags/delete', tag=tag, **kwds)
def tags_rename(self, old, new, **kwds):
"""Rename an existing tag with a new tag name. Returns a `result`
message or raises an ``DeliciousError``. See ``self.request()``.
&old={TAG}
(required) Tag to rename.
&new={TAG}
(required) New tag name.
"""
return self.request("tags/rename", old=old, new=new, **kwds)
# Posts
def posts_update(self, **kwds):
"""Returns the last update time for the user. Use this before calling
`posts_all` to see if the data has changed since the last fetch.
::
<update time="CCYY-MM-DDThh:mm:ssZ">
"""
return self.request("posts/update", **kwds)
def posts_dates(self, tag="", **kwds):
"""Returns a list of dates with the number of posts at each date.
::
<dates>
<date date="CCYY-MM-DD" count="888">
&tag={TAG}
(optional) Filter by this tag
"""
return self.request("posts/dates", tag=tag, **kwds)
def posts_get(self, tag="", dt="", url="", hashes=[], meta=True, **kwds):
"""Returns posts matching the arguments. If no date or url is given,
most recent date will be used.
::
<posts dt="CCYY-MM-DD" tag="..." user="...">
<post ...>
&tag={TAG} {TAG} ... {TAG}
(optional) Filter by this/these tag(s).
&dt={CCYY-MM-DDThh:mm:ssZ}
(optional) Filter by this date, defaults to the most recent date on
which bookmarks were saved.
&url={URL}
(optional) Fetch a bookmark for this URL, regardless of date.
&hashes={MD5} {MD5} ... {MD5}
(optional) Fetch multiple bookmarks by one or more URL MD5s
regardless of date.
&meta=yes
(optional) Include change detection signatures on each item in a
'meta' attribute. Clients wishing to maintain a synchronized local
store of bookmarks should retain the value of this attribute - its
value will change when any significant field of the bookmark
changes.
"""
return self.request("posts/get", tag=tag, dt=dt, url=url,
hashes=hashes, meta=meta, **kwds)
def posts_recent(self, tag="", count="", **kwds):
"""Returns a list of the most recent posts, filtered by argument.
::
<posts tag="..." user="...">
<post ...>
&tag={TAG}
(optional) Filter by this tag.
&count={1..100}
(optional) Number of items to retrieve (Default:15, Maximum:100).
"""
return self.request("posts/recent", tag=tag, count=count, **kwds)
def posts_all(self, tag="", start=None, results=None, fromdt=None,
todt=None, meta=True, hashes=False, **kwds):
"""Returns all posts. Please use sparingly. Call the `posts_update`
method to see if you need to fetch this at all.
::
<posts tag="..." user="..." update="CCYY-MM-DDThh:mm:ssZ">
<post ...>
&tag
(optional) Filter by this tag.
&start={#}
(optional) Start returning posts this many results into the set.
&results={#}
(optional) Return this many results.
&fromdt={CCYY-MM-DDThh:mm:ssZ}
(optional) Filter for posts on this date or later
&todt={CCYY-MM-DDThh:mm:ssZ}
(optional) Filter for posts on this date or earlier
&meta=yes
(optional) Include change detection signatures on each item in a
'meta' attribute. Clients wishing to maintain a synchronized local
store of bookmarks should retain the value of this attribute - its
value will change when any significant field of the bookmark
changes.
&hashes
(optional, exclusive) Do not fetch post details but a posts
manifest with url- and meta-hashes. Other options do not apply.
"""
if hashes:
return self.request("posts/all", hashes=hashes, **kwds)
else:
return self.request("posts/all", tag=tag, fromdt=fromdt, todt=todt,
start=start, results=results, meta=meta, **kwds)
def posts_add(self, url, description, extended="", tags="", dt="",
replace=False, shared=True, **kwds):
"""Add a post to del.icio.us. Returns a `result` message or raises an
``DeliciousError``. See ``self.request()``.
&url (required)
the url of the item.
&description (required)
the description of the item.
&extended (optional)
notes for the item.
&tags (optional)
tags for the item (space delimited).
&dt (optional)
datestamp of the item (format "CCYY-MM-DDThh:mm:ssZ").
Requires a LITERAL "T" and "Z" like in ISO8601 at
http://www.cl.cam.ac.uk/~mgk25/iso-time.html for example:
"1984-09-01T14:21:31Z"
&replace=no (optional) - don't replace post if given url has already
been posted.
&shared=yes (optional) - wether the item is public.
"""
return self.request("posts/add", url=url, description=description,
extended=extended, tags=tags, dt=dt,
replace=replace, shared=shared, **kwds)
def posts_delete(self, url, **kwds):
"""Delete a post from del.icio.us. Returns a `result` message or
raises an ``DeliciousError``. See ``self.request()``.
&url (required)
the url of the item.
"""
return self.request("posts/delete", url=url, **kwds)
# Bundles
def bundles_all(self, **kwds):
"""Retrieve user bundles from del.icio.us.
::
<bundles>
<bundel name="..." tags=...">
"""
return self.request("tags/bundles/all", **kwds)
def bundles_set(self, bundle, tags, **kwds):
"""Assign a set of tags to a single bundle, wipes away previous
settings for bundle. Returns a `result` messages or raises an
``DeliciousError``. See ``self.request()``.
&bundle (required)
the bundle name.
&tags (required)
list of tags.
"""
if type(tags)==list:
tags = " ".join(tags)
return self.request("tags/bundles/set", bundle=bundle, tags=tags,
**kwds)
def bundles_delete(self, bundle, **kwds):
"""Delete a bundle from del.icio.us. Returns a `result` message or
raises an ``DeliciousError``. See ``self.request()``.
&bundle (required)
the bundle name.
"""
return self.request("tags/bundles/delete", bundle=bundle, **kwds)
### Utils
# Lookup table for del.icio.us url-path to DeliciousAPI method.
paths = {
'tags/get': 'tags_get',
'tags/delete': 'tags_delete',
'tags/rename': 'tags_rename',
'posts/update': 'posts_update',
'posts/dates': 'posts_dates',
'posts/get': 'posts_get',
'posts/recent': 'posts_recent',
'posts/all': 'posts_all',
'posts/add': 'posts_add',
'posts/delete': 'posts_delete',
'tags/bundles/all': 'bundles_all',
'tags/bundles/set': 'bundles_set',
'tags/bundles/delete': 'bundles_delete',
}
def get_method(self, path):
return getattr(self, self.paths[path])
def get_url(self, url):
"""Return the del.icio.us url at which the HTML page with posts for
``url`` can be found.
"""
return "http://del.icio.us/url/?url=%s" % (url,)
def __repr__(self):
return "DeliciousAPI(%s)" % self.user
### Quick API access
def apiNew(user, passwd):
"Creates a new DeliciousAPI object, requires user(name) and passwd."
return DeliciousAPI(user=user, passwd=passwd)
def add(user, passwd, url, description, tags="", extended="", dt=None,
replace=False):
"Add a post for user. "
apiNew(user, passwd).posts_add(url=url, description=description,
extended=extended, tags=tags, dt=dt, replace=replace)
def get(user, passwd, tag="", dt=None, count=0, hashes=[]):
"Returns a list of posts for the user using the API. "
posts = apiNew(user, passwd).posts_get(
tag=tag, dt=dt, hashes=hashes)['posts']
if count: posts = posts[:count]
return posts
def get_update(user, passwd):
"Returns the last update time for the user. "
return apiNew(user, passwd).posts_update()['update']['time']
def get_all(user, passwd, tag="", start=0, results=100, fromdt=None,
todt=None):
"Returns a list with all posts. Please use sparingly. See `get_updated`"
return apiNew(user, passwd).posts_all(tag=tag, start=start,
results=results, fromdt=fromdt, todt=todt, meta=True)['posts']
def get_tags(user, passwd):
"Returns a list with all tags for user."
return apiNew(user=user, passwd=passwd).tags_get()['tags']
def delete(user, passwd, url):
"Delete the URL from the del.icio.us account."
apiNew(user, passwd).posts_delete(url=url)
def rename_tag(user, passwd, oldtag, newtag):
"Rename the tag for the del.icio.us account."
apiNew(user=user, passwd=passwd).tags_rename(old=oldtag, new=newtag)
### Old RSS
def getrss(tag="", popular=0, url='', user=""):
"""Get posts from del.icio.us via parsing RSS.
tag (opt) sort by tag
popular (opt) look for the popular stuff
user (opt) get the posts by a user, this striks popular
url (opt) get the posts by url
"""
return dlcs_rss_request(tag=tag, popular=popular, user=user, url=url)
def get_userposts(user):
"parse RSS for user"
return getrss(user=user)
def get_tagposts(tag):
"parse RSS for tag"
return getrss(tag=tag)
def get_urlposts(url):
"parse RSS for URL"
return getrss(url=url)
def get_popular(tag=""):
"parse RSS for popular URLS for tag"
return getrss(tag=tag, popular=1)
### Feeds (RSS/JSON/?)
def user_posts(user=None, tag=None, key=None, **params):
"""
Bookmarks for a specific user:
{format}/{username}
Private bookmarks for a specific user:
{format}/{username}?private={key}
Bookmarks for a specific user by tag(s):
{format}/{username}/{tag[+tag+...+tag]}
Private bookmarks for a specific user by tag(s):
{format}/{username}/{tag[+tag+...+tag]}?private={key}
"""
assert username
if tag and key:
path = 'user_tagged_private'
elif tag:
path = 'user_tagged'
elif key:
path = 'user_private'
else:
path = 'user'
return dlcs_feed(path, user=user, tag=tag, key=key, **params)
def json_tags(user, atleast, count, sort='alpha', raw=True, callback=None):
"""
user
atleast=### include only tags for which there are at least ###
number of posts.
count=### include ### tags, counting down from the top.
sort={alpha|count} construct the object with tags in alphabetic order
(alpha), or by count of posts (count).
callback=NAME wrap the object definition in a function call NAME(...),
thus invoking that function when the feed is executed.
raw a pure JSON object is returned, instead of code that
will construct an object named Delicious.tags.
"""
url = 'http://del.icio.us/feeds/json/tags/' + \
dlcs_encode_params({0:user})[0]
return dlcs_feed(url, atleast=atleast, count=count, sort=sort, raw=raw,
callback=callback)
def json_network(user, raw=True, callback=None):
"""
callback=NAME wrap the object definition in a function call NAME(...)
?raw a raw JSON object is returned, instead of an object named
Delicious.posts
"""
url = 'http://del.icio.us/feeds/json/network/' + \
dlcs_encode_params({0:user})[0]
return dlcs_feed(url, raw=raw, callback=callback)
def json_fans(user, raw=True, callback=None):
"""
callback=NAME wrap the object definition in a function call NAME(...)
?raw a pure JSON object is returned, instead of an object named
Delicious.
"""
url = 'http://del.icio.us/feeds/json/fans/' + \
dlcs_encode_params({0:user})[0]
return dlcs_feed(url, raw=raw, callback=callback)
### delicious V2 feeds
def getfeed(name, **params):
return dlcs_feed(name, **params)
|
StarcoderdataPython
|
1758704
|
import traceback
from spikeforest2_utils import AutoRecordingExtractor
class Recording:
def __init__(self):
super().__init__()
self._recording = None
def javascript_state_changed(self, prev_state, state):
self._set_status('running', 'Running Recording')
if not self._recording:
self._set_status('running', 'Loading recording')
recording0 = state.get('recording', None)
if not recording0:
self._set_error('Missing: recording')
return
try:
self._recording = AutoRecordingExtractor(recording0)
except Exception as err:
traceback.print_exc()
self._set_error('Problem initiating recording: {}'.format(err))
return
self._set_status('running', 'Loading recording data')
try:
channel_locations = self._recording.get_channel_locations()
except:
channel_locations = None
self.set_state(dict(
num_channels=self._recording.get_num_channels(),
channel_ids=self._recording.get_channel_ids(),
channel_locations=channel_locations,
num_timepoints=self._recording.get_num_frames(),
samplerate=self._recording.get_sampling_frequency(),
status_message='Loaded recording.'
))
self._set_status('finished', '')
def _set_state(self, **kwargs):
self.set_state(kwargs)
def _set_error(self, error_message):
self._set_status('error', error_message)
def _set_status(self, status, status_message=''):
self._set_state(status=status, status_message=status_message)
|
StarcoderdataPython
|
4834973
|
<reponame>fabric-testbed/ActorBase
#!/usr/bin/env python3
# MIT License
#
# Copyright (c) 2020 FABRIC Testbed
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#
# Author: <NAME> (<EMAIL>)
from __future__ import annotations
import pickle
from typing import TYPE_CHECKING, List
from fabric_cf.actor.core.common.constants import Constants
from fabric_cf.actor.core.common.exceptions import DatabaseException
from fabric_cf.actor.core.apis.abc_actor_mixin import ABCActorMixin, ActorType
from fabric_cf.actor.core.apis.abc_container_database import ABCContainerDatabase
from fabric_cf.actor.db.psql_database import PsqlDatabase
if TYPE_CHECKING:
from fabric_cf.actor.core.apis.abc_management_object import ABCManagementObject
from fabric_cf.actor.core.util.id import ID
class ContainerDatabase(ABCContainerDatabase):
"""
Implements Container Interface to various Database operations
"""
PropertyTime = "time"
PropertyContainer = "container"
def __init__(self, *, user: str, password: str, database: str, db_host: str, logger):
self.user = user
self.password = password
self.database = database
self.db_host = db_host
self.db = PsqlDatabase(user=user, password=password, database=database, db_host=db_host, logger=logger)
self.initialized = False
self.reset_state = False
self.logger = logger
def __getstate__(self):
state = self.__dict__.copy()
self.db = PsqlDatabase(user=self.user, password=<PASSWORD>, database=self.database,
db_host=self.db_host, logger=self.logger)
del state['initialized']
del state['reset_state']
del state['logger']
def __setstate__(self, state):
self.__dict__.update(state)
self.initialized = False
self.reset_state = False
def initialize(self):
"""
Initialize
"""
if not self.initialized:
self.db.create_db()
if self.reset_state:
self.db.reset_db()
self.initialized = True
def set_reset_state(self, *, value: bool):
"""
Set Reset State
"""
self.reset_state = value
def reset_db(self):
"""
Reset the database
"""
self.db.reset_db()
def add_actor(self, *, actor: ABCActorMixin):
"""
Add an actor
@param actor actor
"""
properties = pickle.dumps(actor)
self.db.add_actor(name=actor.get_name(), guid=str(actor.get_guid()), act_type=actor.get_type().value,
properties=properties)
def remove_actor(self, *, actor_name: str):
"""
Remove an actor
@param actor_name actor name
"""
self.db.remove_actor(name=actor_name)
def remove_actor_database(self, *, actor_name: str):
"""
Remove an actor
@param actor_name actor name
"""
self.db.remove_actor(name=actor_name)
def get_actors(self, *, name: str = None, actor_type: int = None) -> List[ABCActorMixin]:
"""
Get Actors
@param name actor name
@param actor_type actor type
@return list of actors
"""
result = None
try:
act_dict_list = None
if name is None and actor_type is None:
act_dict_list = self.db.get_actors()
elif name is not None and actor_type is not None:
name = "%{}%".format(name)
if actor_type != ActorType.All.value:
act_dict_list = self.db.get_actors_by_name_and_type(actor_name=name, act_type=actor_type)
else:
act_dict_list = self.db.get_actors_by_name(act_name=name)
if act_dict_list is not None:
result = []
for a in act_dict_list:
pickled_actor = a.get(Constants.PROPERTY_PICKLE_PROPERTIES)
act_obj = pickle.loads(pickled_actor)
result.append(act_obj)
return result
except Exception as e:
self.logger.error(e)
return result
def get_actor(self, *, actor_name: str) -> dict:
"""
Get Actor
@param name actor name
@return actor
"""
result = None
try:
act_dict = self.db.get_actor(name=actor_name)
if act_dict is not None:
pickled_actor = act_dict.get(Constants.PROPERTY_PICKLE_PROPERTIES)
return pickle.loads(pickled_actor)
except Exception as e:
self.logger.error(e)
return result
def get_actor_id(self, *, actor_name: str) -> dict:
"""
Get Actor
@param name actor name
@return actor
"""
result = None
try:
act_dict = self.db.get_actor(name=actor_name)
if act_dict is not None:
return act_dict['act_id']
except Exception as e:
self.logger.error(e)
return result
def add_time(self, *, properties: dict):
"""
Add time
@param properties properties
"""
self.db.add_miscellaneous(name=self.PropertyTime, properties=properties)
def get_time(self) -> dict:
"""
Get Time
@param time properties
"""
result = None
try:
result = self.db.get_miscellaneous(name=self.PropertyTime)
except Exception as e:
self.logger.error(e)
return result
def add_container_properties(self, *, properties: dict):
"""
Add container properties
@param properties properties
"""
self.db.add_miscellaneous(name=self.PropertyContainer, properties=properties)
def get_container_properties(self) -> dict:
"""
Get Container Properties
@return properties
"""
result = None
try:
result = self.db.get_miscellaneous(name=self.PropertyContainer)
except Exception as e:
self.logger.error(e)
return result
def get_manager_objects_by_actor_name(self, *, actor_name: str) -> list:
"""
Get Management Object by actor name
@param actor_name actor name
@return list of management objects
"""
result = None
try:
result = self.db.get_manager_objects_by_actor_name(act_name=actor_name)
except Exception as e:
self.logger.error(e)
return result
def get_manager_container(self) -> List[dict]:
"""
Get Management Container
@return list of management objects for containers
"""
result = None
try:
result = self.db.get_manager_containers()
except Exception as e:
self.logger.error(e)
return result
def add_manager_object(self, *, manager: ABCManagementObject):
"""
Add Management object
@param manager management object
"""
properties = manager.save()
act_id = None
actor_name = manager.get_actor_name()
if actor_name is not None:
act_id = self.get_actor_id(actor_name=actor_name)
self.db.add_manager_object(manager_key=str(manager.get_id()), properties=properties, act_id=act_id)
def remove_manager_object(self, *, mid: ID):
"""
Remove management object
@param mid management object id
"""
self.db.remove_manager_object(manager_key=str(mid))
|
StarcoderdataPython
|
101605
|
<reponame>pennucci/enterprise<filename>tests/test_gp_priors.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_gp_priors
----------------------------------
Tests for GP priors and bases.
"""
import unittest
import numpy as np
from tests.enterprise_test_data import datadir
from enterprise.pulsar import Pulsar
from enterprise.signals import parameter
from enterprise.signals import gp_signals
from enterprise.signals import gp_priors
from enterprise.signals import gp_bases
import scipy.stats
class TestGPSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup the Pulsar object."""
# initialize Pulsar class
cls.psr = Pulsar(datadir + "/B1855+09_NANOGrav_9yv1.gls.par", datadir + "/B1855+09_NANOGrav_9yv1.tim")
def test_turnover_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.turnover(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
lf0=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(2.5, 5),
beta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, lf0, kappa, beta = -14.5, 4.33, -8.5, 3, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_lf0": lf0,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_beta": beta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.turnover(f2, log10_A=log10_A, gamma=gamma, lf0=lf0, kappa=kappa, beta=beta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_free_spec_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.free_spectrum(log10_rho=parameter.Uniform(-10, -4, size=30))
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
rhos = np.random.uniform(-10, -4, size=30)
params = {"B1855+09_red_noise_log10_rho": rhos}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.free_spectrum(f2, log10_rho=rhos)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_t_process_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.t_process(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
alphas=gp_priors.InvGamma(alpha=1, gamma=1, size=30),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
alphas = scipy.stats.invgamma.rvs(1, scale=1, size=30)
log10_A, gamma = -15, 4.33
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_alphas": alphas,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.t_process(f2, log10_A=log10_A, gamma=gamma, alphas=alphas)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_adapt_t_process_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.t_process_adapt(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
alphas_adapt=gp_priors.InvGamma(),
nfreq=parameter.Uniform(5, 25),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
alphas = scipy.stats.invgamma.rvs(1, scale=1, size=1)
log10_A, gamma, nfreq = -15, 4.33, 12
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_alphas_adapt": alphas,
"B1855+09_red_noise_nfreq": nfreq,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for free spectrum."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.t_process_adapt(f2, log10_A=log10_A, gamma=gamma, alphas_adapt=alphas, nfreq=nfreq)
msg = "Spectrum incorrect for free spectrum."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for free spectrum."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_turnover_knee_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.turnover_knee(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
lfb=parameter.Uniform(-9, -7.5),
lfk=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(2.5, 5),
delta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, lfb = -14.5, 4.33, -8.5
lfk, kappa, delta = -8.5, 3, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_lfb": lfb,
"B1855+09_red_noise_lfk": lfk,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_delta": delta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.turnover_knee(f2, log10_A=log10_A, gamma=gamma, lfb=lfb, lfk=lfk, kappa=kappa, delta=delta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_broken_powerlaw_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.broken_powerlaw(
log10_A=parameter.Uniform(-18, -12),
gamma=parameter.Uniform(1, 7),
log10_fb=parameter.Uniform(-9, -7.5),
kappa=parameter.Uniform(0.1, 1.0),
delta=parameter.Uniform(0.01, 1),
)
basis = gp_bases.createfourierdesignmatrix_red(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma, log10_fb, kappa, delta = -14.5, 4.33, -8.5, 1, 0.5
params = {
"B1855+09_red_noise_log10_A": log10_A,
"B1855+09_red_noise_gamma": gamma,
"B1855+09_red_noise_log10_fb": log10_fb,
"B1855+09_red_noise_kappa": kappa,
"B1855+09_red_noise_delta": delta,
}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_red(self.psr.toas, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.broken_powerlaw(f2, log10_A=log10_A, gamma=gamma, log10_fb=log10_fb, kappa=kappa, delta=delta)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
def test_powerlaw_genmodes_prior(self):
"""Test that red noise signal returns correct values."""
# set up signal parameter
pr = gp_priors.powerlaw_genmodes(log10_A=parameter.Uniform(-18, -12), gamma=parameter.Uniform(1, 7))
basis = gp_bases.createfourierdesignmatrix_chromatic(nmodes=30)
rn = gp_signals.BasisGP(priorFunction=pr, basisFunction=basis, name="red_noise")
rnm = rn(self.psr)
# parameters
log10_A, gamma = -14.5, 4.33
params = {"B1855+09_red_noise_log10_A": log10_A, "B1855+09_red_noise_gamma": gamma}
# basis matrix test
F, f2 = gp_bases.createfourierdesignmatrix_chromatic(self.psr.toas, self.psr.freqs, nmodes=30)
msg = "F matrix incorrect for turnover."
assert np.allclose(F, rnm.get_basis(params)), msg
# spectrum test
phi = gp_priors.powerlaw_genmodes(f2, log10_A=log10_A, gamma=gamma)
msg = "Spectrum incorrect for turnover."
assert np.all(rnm.get_phi(params) == phi), msg
# inverse spectrum test
msg = "Spectrum inverse incorrect for turnover."
assert np.all(rnm.get_phiinv(params) == 1 / phi), msg
# test shape
msg = "F matrix shape incorrect"
assert rnm.get_basis(params).shape == F.shape, msg
|
StarcoderdataPython
|
3326620
|
<filename>data_wrangler.py
import csv
import json
import os
# Manages the retrieval and storage of CSV data
class DataManager:
CSVFiles = []
# Stores all of the csv file names
def __init__(self):
for filename in os.listdir("CSV"):
self.CSVFiles.append("CSV/" + filename)
# returns the data of a chosen csv (by index) as json
def csv_json(self, index: int = 0):
file = self.CSVFiles[index]
data = {}
# Reads in data from csv file, stores it in data dictionary
# JSON and Python dictionaries are functionally the same
with open(file) as csvFile:
csvReader = csv.DictReader(csvFile)
for row in csvReader:
id = row['sequence']
data[id] = row
return data
# Returns the data of all the local csv's as json
def csv_json_all(self):
data = {}
for file in self.CSVFiles:
currData = {}
with open(file) as csvFile:
csvReader = csv.DictReader(csvFile)
for row in csvReader:
id = row['sequence']
currData[id] = row
data[file] = currData
return data
# Writes the given json data to a csv file
def write_json_csv(self, json_data, data_columns):
with open('new_example.csv', 'w') as file_output:
file_output.write("{0},{1},{2}\n".format(data_columns[0], data_columns[1], data_columns[2]))
for row in json_data:
file_output.write("{0},{1},{2}\n".format(row.name, row.description, row.index))
|
StarcoderdataPython
|
3355347
|
<reponame>ubikpt/PyXtal
from structure import *
allpassed = True
for sg in range(1, 231):
print("Calculating spacegroup " + str(sg))
wyckoffs = get_wyckoffs(sg)
for index, wp in enumerate(wyckoffs):
v = np.random.random(3)
for i in range(3):
if np.random.random() < 0.5:
v[i] *= -1
# v = SymmOp.from_rotation_and_translation(np.zeros([3,3]), v)
points = []
for p in wp:
points.append(p.operate(v))
for i, p in enumerate(points):
for j in range(3):
a = np.random.random()
if a < 1 / 3:
points[i][j] += 1
elif a < 2 / 3:
points[i][j] -= 1
if check_wyckoff_position(points, sg) is not False:
pass
else:
allpassed = False
print("sg: " + str(sg) + ", index: " + str(index))
print("points:")
for p in points:
print(p)
if allpassed is True:
print("All spacegroups passed.")
|
StarcoderdataPython
|
4826647
|
<reponame>rbirger/OxfordHCVNonSpatial
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <markdowncell>
# ###Description and preliminary code for Continuous-Time Markov Chain Model
#
# This model will test the importance of including a spatial component in the system. We will use ODEs to describe the dynamics of each lineage and competition between lineages. The model includes a second latent class that keeps cells latently infected for longer before becoming infectious, and also allows for proliferation of infected cells by allowing cells to be reborn into the latent class
#
# * Healthy Hepatocytes
#
# * Latently Infected Hepatocytes
#
# * Long-lived Latently Infected Hepatocytes
#
# * Infected Hepatocytes
#
# * Dead Infected Hepatocytes
#
# * Dead Healthy Hepatocytes
#
# Healthy cells are regenerated from Dead cells. Interacting with Infected cells, they become Latently Infected, and after the eclipse phase, Latent Infections become Infectious. Both Healthy and Infected Hepatocytes die, with Infected being eliminated by the immune response faster than natural death rates. Dead cells regenerate, but those dead after being infected with HCV have a lower probability of regenerating. Some cells regenerate into infectious cells.
#
# Adapting the Perelson/Neumann model, we have
#
# $\begin{eqnarray*}
# \frac{dT}{dt}& =& \phi_{DT} D_T + (1-\kappa)\phi_{DI} D_I - (\lambda_{virions} + \lambda_{local} +\nu_T) T\\
# \frac{dE}{dt}& =& (1-\eta)(\lambda_{virions} + \lambda_{local} )T - (\alpha +\nu_T)E\\
# \frac{dEX}{dt}& =& \eta(\lambda_{virions} + \lambda_{local} )T - (\alpha_X +\nu_T)E\\
# \frac{dI}{dt}& =& \kappa\phi_{DI} D_I+ \alpha E- \nu_I I\\
# \frac{dD_T}{dt}& =& \nu_T(T+E+EX) - \phi_{DT} D_T\\
# \frac{dD_I}{dt}& =& \nu_I I - \phi_{DI} D_I\\\
# \end{eqnarray*}$
#
# To translate these equations into a continuous-time Markov Chain model, we can calculate the transition probabilities from the parameters above. Let $\vec{X(t)} = [T(t), E(t), EX(t) I(t), D_T(t), D_I(t)]$, so the probability of state change is defined as Prob$\{\Delta \vec{X(t)} = (a, b, c, d, e, f)|\vec{X(t)}\}$, where $a$ represents the change in state $T$, $b$ in state $E$, etc. We assume that the time step is small enough that each change is only in one cell, so $a - f$ can only take the values 0 or $\pm 1$. The transition probabilities are as follows
#
#
# $$\begin{cases}
# (1-\eta)(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, b = 1\\
# \eta(\lambda_{virions} + \lambda_{local}) T\ \Delta t + o(\Delta t), & a = -1, c = 1\\
# \nu_T T \Delta t + o(\Delta t), & a = -1, e = 1\\
# \alpha E \Delta t + o(\Delta t), & b = -1, d = 1\\
# \nu_T E \Delta t + o(\Delta t), & b = -1, e = 1\\
# \alpha_X EX \Delta t + o(\Delta t), & c = -1, d = 1\\
# \nu_T EX \Delta t + o(\Delta t), & c = -1, e = 1\\
# \nu_I I \Delta t + o(\Delta t), & d = -1, f = 1 \\
# \phi_{DT} D_T \Delta t + o(\Delta t), & d = -1, a = 1\\
# \kappa\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, d = 1\\
# (1-\kappa)\phi_{DI} D_I \Delta t + o(\Delta t), & f = -1, a = 1\\
# \end{cases}$$
#
# The generator matrix $\mathbf{Q}$ derived from these transition probabilities is thus as follows
#
#
# $$ \mathbf{Q} =
# \left[ \begin{array}{cccccc}
# 0& (1-\eta)(\lambda_{virions} + \lambda_{local}) T& \eta(\lambda_{virions} + \lambda_{local}) T& 0 & \nu_T T &0\\
# 0 & 0 & \alpha E &0 &\nu_T E & 0\\
# 0 & 0 & \alpha_X EX &0 &\nu_T E & 0\\
# 0 & 0 & 0 & 0 & 0&\nu_I I \\
# \phi_{DT} D_T &0 &0 & 0&0&0\\
# (1-\kappa)\phi_{DI} D_I & 0 & 0& \kappa \phi_{DI}& 0&0\\
# \end{array} \right] $$
# <codecell>
%matplotlib inline
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import random
# <codecell>
class HCVHepatocyte:
def __init__(self, cellID, parentID, infType, tLat, cellType, tInf = None, tDead = None):
self.cellID = cellID #ID of cell
self.parentID = parentID #ID of infector, whether it is virus or infected cell
self.infType = infType #type of infection (from virus or from infected cell)
self.tLat = tLat #time of infection of cell (time cell became latently infected)
self.cellType = cellType #type of cell latent, longterm, infectious, infectious from longterm,
#dead, dead from long term
self.tInf = tInf #time to become infectious
self.tDead = tDead #time of death
if cellType in ('Infected', 'InfectedL'):
if tInf == None:
print("Error: Infectious cells must have time Infectious")
elif cellType in ('Dead', 'DeadL'):
if tInf == None:
print("Error: Dead cells must have time of death")
#define method for infecting a susceptible cell
def InfectCell(self, newID, simTime, newInfType):
''' Method for infecting new cell'''
if self.cellType not in ['Infected', 'InfectedL']:
print("Error: Latent Cell cannot infect")
else:
return HCVHepatocyte(newID, self.cellID, 'Cell', simTime, newInfType)
# <codecell>
#Create function to randomly select one cell to infect
def CreateLatent(cellHandle, newID, state_idx, simTime):
if state_idx in [0,1]:
newLatent = cellHandle.InfectCell(newID, simTime, 'Latent')
return newLatent
elif state_idx in [2,3]:
newLatent = cellHandle.InfectCell(newID, simTime, 'LatentL')
return newLatent
else:
print("Error: State is not an infecting transition")
# <codecell>
#Create function to Kill Infected cell
def KillInfected(cellHandle, time):
cellHandle.tDead = time
if cellHandle.cellType == 'Infected':
cellHandle.cellType = 'Dead'
elif cellHandle.cellType == 'InfectedL':
cellHandle.cellType = 'DeadL'
else:
print("Error: Cannot kill uninfected cell")
return cellHandle
# <codecell>
#Create function to move latent to infectious
def LatentInfectious(cellHandle, time):
cellHandle.tInf = time
if cellHandle.cellType == 'Latent':
cellHandle.cellType = 'Infected'
elif cellHandle.cellType == 'LatentL':
cellHandle.cellType = 'InfectedL'
else:
print("Error: Cell not Latent")
return cellHandle
# <codecell>
#Number of state transitions to observe
M = int(1e8)
# time vector
time = np.zeros(M)
#Define parameters
init=10 #10 #initial number of infected hepatocytes
v_init = 0#initial viral load
ALT_init = 100 #initial ALT level
rho = 8.18 #viral export rate
c = 22.3 #viral clearance rate
gamma = 1500 #scaling factor -
R = 4.1825 #average HCV RNA in infected hepatocyte
N_liver = int(1e11) #Number of cells in liver
alpha = 1 #1/latent period (days)
alpha_x = 1.3e-2 #1/long-term latent period
nu_T = 1.4e-2 #death rate of healthy cells
nu_I = 1/7 #death rate of infected cells
phi_T = 10*nu_T #regeneration rate of dead healthy cells
phi_I = .8*phi_T #regeneration rate of dead infected cells
beta_V = .5e-8 #viral transmision rate
beta_L = R*1e-5/(60*24) #cell-cell transmission rate
eta = .01 #proportion of infected cells that go long-term latent
kappa = 0 #.1 #proportion of dead infected cells regenerated as infected cells
changes = 13;
delta = .33 #ALT degradation rate
N=N_liver/1e7 #initial number of hepatocytes
eps = (delta*ALT_init)/(nu_T*N) #rate of ALT production
Q = np.zeros(changes)
Q[0] = (1-eta)*(beta_L*init) #Infection of Target cell by cell-> latent
Q[1] = (1-eta)*beta_V*v_init #Infection of Target cell by virus -> latent
Q[2] = eta*beta_L*init #Infection of Target cell by cell -> long-term latent
Q[3] = eta*beta_V*v_init #Infection of Target cell by virus -> long-term latent
Q[4] = nu_T; #Death of target cell
Q[5] = alpha; #latent cell becomes infected
Q[6] = nu_T; #latent cell dies
Q[7] = alpha_x #long-term latent cell becomes infected
Q[8] = nu_T #long-term latent cell dies
Q[9] = nu_I; #Infected cell dies
Q[10] = phi_T; #Healthy cell regenerates
Q[11] = (1-kappa)*phi_I; #Infected cell regenerates into healthy cell
Q[12] = kappa*phi_I
#Construct matrix of state transition vectors
trans_vecs = np.zeros([6, changes])
#state 1: infection of healthy cell by cell-> latent
trans_vecs[0,0] = -1;
trans_vecs[1,0] = 1;
#state 2: infection of healthy cell by virus -> latent
trans_vecs[0,1] = -1;
trans_vecs[1,1] = 1;
#state 3: infection of healthy cell by cell -> long-term latent
trans_vecs[0,2] = -1;
trans_vecs[2,2] = 1;
#state 4: infection of healthy cell by virus -> long-term latent
trans_vecs[0,3] = -1;
trans_vecs[2,3] = 1;
#state 5: death of healthy cell
trans_vecs[0,4] = -1;
trans_vecs[4,4] = 1;
#state 6: movement of latent cell into infected
trans_vecs[1,5] = -1;
trans_vecs[3,5] = 1;
#state 7: death of latent cell
trans_vecs[1,6] = -1;
trans_vecs[4,6] = 1;
#state 8: movement of long-term latent cell into infected
trans_vecs[2,7] = -1;
trans_vecs[3,7] = 1;
#state 9: death of long-term latent cell
trans_vecs[2,8] = -1;
trans_vecs[4,8] = 1;
#state 10: death of infected cell
trans_vecs[3,9] = -1;
trans_vecs[5,9] = 1;
#state 11: regeneration of dead healthy cell
trans_vecs[4,10] = -1;
trans_vecs[0,10] = 1;
#state 12: regeneration of dead infected cell into healthy cell
trans_vecs[5,11] = -1;
trans_vecs[0,11] = 1;
#state 13: regeneration of dead infected cell into infected cell
trans_vecs[5,12] = -1;
trans_vecs[3,12] = 1;
#Initialize state variable vectors
T = np.zeros(M)
E = np.zeros(M)
Ex = np.zeros(M)
I = np.zeros(M)
Dt = np.zeros(M)
Di = np.zeros(M)
VL = np.zeros(M)
ALT = np.zeros(M)
state_vec = np.zeros(M)
InfectionChain = [] # dict()
Infecteds = [] # dict()
#Initialize Infected Hepatocyte objects
InfectedDict = {}
for i in range(0,int(init/2)):
x = HCVHepatocyte(i, None, 'Initial', -1, 'Infected', 0)
InfectedDict[i] = x
for i in range(int(init/2),init):
x = HCVHepatocyte(i, None, 'Initial', -83, 'InfectedL', 0)
InfectedDict[i] = x
LatentDict = {}
LatentLDict = {}
DeadDict = {}
lastCellID = init-1 #get last cellID
#Input initial conditions
I[0] = init;
T[0] = N-init;
VL[0] = v_init
j =0
InfectionArray = []
while I[j] >= 0 and j<M-1:
#print [T[j],E[j],I[j],Dt[j],Di[j]]
#Update Q to reflect new number of infected cells and viruses
Q[0] = (1-eta)*beta_L*I[j]
Q[1] = (1-eta)*beta_V*VL[j]
Q[2] = eta*beta_L*I[j]
Q[3] = eta*beta_V*VL[j]
#Calculate transition matrix
Qij = Q*[T[j],T[j],T[j], T[j],T[j], E[j],E[j], Ex[j], Ex[j], I[j], Dt[j], Di[j], Di[j]]
#Draw from exponential distributions of waiting times
time_vec = -np.log(np.random.random(changes))/Qij
#np.random.exponential([1/Qij])[0]
#
#find minimum waiting time and obtain index to ascertain next state jump
newTime = min(time_vec)
time_vecL = time_vec.tolist()
state_idx = time_vecL.index(min(time_vecL))
state_vec[j] = state_idx
[T[j+1],E[j+1],Ex[j+1],I[j+1],Dt[j+1],Di[j+1]]=[T[j],E[j],Ex[j],I[j],Dt[j],Di[j]]+ trans_vecs[:,state_idx]
#make adjustments to hepatocyte dictionaries according to state transition
#Infection of healthy cell by cell or virus -> latent or longterm latent
if state_idx in [0,1,2,3]:
Infector = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
newCellID = lastCellID + 1
lastCellID = newCellID
newLatent = CreateLatent(Infector, newCellID, state_idx, time[j])
#newLatent = CreateLatentNumba(Infector, newCellID, state_idx, time[j])
if state_idx in [0,1]:
LatentDict[newCellID] = newLatent
elif state_idx in [2,3]:
LatentLDict[newCellID] = newLatent
else:
print('Incorrect State')
#Latent cell becomes infectious
elif state_idx in [5,7]:
if state_idx == 5:
LatCell = LatentDict[random.choice(list(LatentDict.keys()))]
del LatentDict[LatCell.cellID] #remove cell from Latent Dict
elif state_idx == 7:
LatCell = LatentLDict[random.choice(list(LatentLDict.keys()))]
del LatentLDict[LatCell.cellID]
else:
print('Incorrect State')
InfectedDict[LatCell.cellID] = LatentInfectious(LatCell, time[j]) #add cell to Infected Dict
#Latent cell dies
elif state_idx == 6:
del LatentDict[random.choice(list(LatentDict.keys()))]
#LatentL cell dies
elif state_idx == 8:
del LatentLDict[random.choice(list(LatentLDict.keys()))]
#Infected cell dies
elif state_idx == 9:
KilledCell = InfectedDict[random.choice(list(InfectedDict.keys()))] #choose random infector cell
del InfectedDict[KilledCell.cellID]
KilledCell.cellType = 'Dead'
KilledCell.tDead = time[j]
#newDead = KillInfected(KilledCell,time[j])
#DeadDict[newDead.cellID] = newDead
DeadDict[KilledCell.cellID] = KilledCell
#Dead infected cell regenerates into health cell -- just delete from dead dict
elif state_idx == 11:
del DeadDict[random.choice(list(DeadDict.keys()))]
#Infected cell regenerated from Dead cell
elif state_idx == 12:
newCellID = lastCellID + 1
lastCellID = newCellID
DeadGen = DeadDict[random.choice(list(DeadDict.keys()))]
del DeadDict[DeadGen.cellID]
newInfected = HCVHepatocyte(newCellID,DeadGen.cellID,'DeadGen', DeadGen.tDead, 'Infected', time[j])
InfectedDict[newInfected.cellID] = newInfected
#Output Infection chain and infecteds at each time step
#check lengths of InfectionChain and Infecteds
if len(InfectionChain)< int(time[j])+1:
InfectionChain.append([])
if len(Infecteds) < int(time[j])+1:
Infecteds.append([])
#add to array of infections with timestep
if state_idx in [0,1,2,3]:
#if int(time[j]) in InfectionChain:
# InfectionChain[int(time[j])].append([Infector.cellID, newCellID])
#else:
# InfectionChain[int(time[j])] = [[Infector.cellID, newCellID]]
InfectionChain[int(time[j])].append([Infector.cellID, newCellID])
elif state_idx == 12:
#if int(time[j]) in InfectionChain:
# InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID])
#else:
# InfectionChain[int(time[j])] = [DeadGen.cellID, newInfected.cellID]
InfectionChain[int(time[j])].append([DeadGen.cellID, newInfected.cellID])
#else:
# InfectionChain.append([])
#Infecteds.append(int([time[j]),list(InfectedDict.keys())])
#if int(time[j]) in Infecteds:
Infecteds[int(time[j])] = list(set(Infecteds[int(time[j])] + InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys()))
#else:
# Infecteds[int(time[j])] = InfectedDict.keys() +LatentDict.keys() +LatentLDict.keys()
#update viral load and ALT
VL[j+1] = np.floor(rho*N_liver*(I[j+1]/N)*R/(gamma*c)) #VL[j] + (I[j]/N)*rho*N_liver*newTime - c*gamma*VL[j]*newTime #
ALT[j+1] = ALT[j] + (eps*(nu_T*(T[j] + E[j] + Ex[j]) + nu_I*I[j])-delta*ALT[j])*newTime
time[j+1] = time[j] + newTime
j+=1
# <codecell>
# <codecell>
#Sort Infecteds and Infection chain, and break up infection chain
InfectedsSort = dict()
for key, item in enumerate(Infecteds):
InfectedsSort[key] = sorted(Infecteds[i])
InfectionChainSort = dict()
for key, item in enumerate(InfectionChain):
a = sorted(list(InfectionChain[i]), key=lambda x: x[0])
InfectionChainSort[key] = [b for c in a for b in c]
import csv
f = open('Infecteds.txt', 'w')
writer = csv.writer(f, delimiter = ' ')
for key, value in InfectedsSort.iteritems():
writer.writerow([key] + value)
f = open('InfectionChain.txt', 'w')
writer = csv.writer(f, delimiter = ' ')
for key, value in InfectionChainSort.iteritems():
writer.writerow([key] + value)
# <codecell>
plt.plot(time,T, label = 'Susc')
plt.plot(time,I, label = 'Infected')
plt.plot(time,Dt, label = 'Dead (healthy)')
plt.plot(time,Di, label = 'Dead (infected)')
plt.legend(loc = 'upper right')
# <codecell>
plt.plot(time,VL)
# <codecell>
from NonSpatialFns import *
# <codecell>
kwargs = {'T' : T, 'E' : E, 'Ex': Ex, 'I': I, 'Dt':Dt, 'Di' : Di, 'time' :time, 'VL':VL, 'ALT' : ALT, 'Infecteds' :Infecteds, 'InfectionChain' : InfectionChain}
saveWorkspace(kwargs)
# <codecell>
|
StarcoderdataPython
|
3232245
|
# Importing necessary packages for this project
import cv2
import numpy as np
import matplotlib.pyplot as plt
# Setting seed for reproducibility
UBIT = 'damirtha'
np.random.seed(sum([ord(c) for c in UBIT]))
# Function to apply a mask on an image
def pointMask(image, mask):
img_list = []
for img_row in range(int(len(mask)/2), len(image)-int(len(mask)/2)):
for img_col in range(int(len(mask[0])/2), len(image[0])-int(len(mask[0])/2)):
img_list.append(np.mean(np.multiply(image[img_row-int(len(mask)/2):img_row+int(len(mask)/2)+1,
img_col-int(len(mask[0])/2):img_col+int(len(mask[0])/2)+1]
, mask)))
return np.pad(np.array(img_list).reshape(-1,len(image[0])-len(mask[0])+1), int(len(mask)/2),'edge')
image=cv2.imread('Images/point.jpg', cv2.IMREAD_GRAYSCALE)
# Laplacian mask for point detection
mask = -np.array([[0,0,-1,0,0],[0,-1,-2,-1,0],[-1,-2,16,-2,-1],[0,-1,-2,-1,0],[0,0,-1,0,0]])
masked_image = pointMask(image, mask)
# Thresholding the masked image to get good points
x, y = np.where(masked_image>np.max(masked_image)*0.9)
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
print("Points detected: ")
for i, j in zip(x, y):
cv2.circle(image, (j,i), 5, [0,0,255], thickness=2, lineType=8, shift=0)
print((i,j))
cv2.imwrite('Results/res_point.jpg',image)
|
StarcoderdataPython
|
71166
|
<gh_stars>0
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Feature analysis functionality.
"""
import logging
from math import log
import numbers
import random
import _registries
import _tokenizer
import _transforms
import apache_beam as beam
from apache_beam.typehints import Dict
from apache_beam.typehints import Tuple
from apache_beam.typehints import Union
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
import numpy as np
from google.cloud.ml.util import _dataflow as dfutil
class _ExtractValues(beam.DoFn):
"""Extract values from all feature columns."""
def __init__(self, sorted_feature_columns):
self._sorted_feature_columns = sorted_feature_columns
# TODO(user): Remove the context param and try catch after sdk update
def start_bundle(self, context=None):
self._extractors = [
_registries.analyzer_registry.get_analyzer(column).extract_value
for column in self._sorted_feature_columns
]
def process(self, element):
try:
element = element.element
except AttributeError:
pass
try:
instance = element
yield [
extract_value(instance, column_index)
for column_index, extract_value in enumerate(self._extractors)
]
except Exception as ex: # pylint: disable=broad-except
try:
yield beam.pvalue.TaggedOutput('errors', (ex, element))
except AttributeError:
yield beam.pvalue.SideOutputValue('errors', (ex, element))
class AnalyzeData(beam.PTransform):
"""A PTransform to analyze feature data to create metadata for preprocessing.
The input to this PTransform is a PCollection representing the source dataset,
with each element of the collection being a dictionary. The keys of which
correspond to the columns referenced in the feature spec provided when
constructing this transform.
"""
def __init__(self,
features,
input_format=None,
format_metadata=None,
error_threshold=0,
return_bad_elements=False):
"""Construct an AnalyzeData PTransform.
Args:
features: A list of Features for the data.
input_format: Optional, whether the input was csv or json.
format_metadata: Optional, arguments to store in the metadata for the
input_format.
error_threshold: How many errors are allowed before the job fails.
return_bad_elements: Should elements with errors be returned as a side
output. Defaults to False.
"""
super(AnalyzeData, self).__init__('Analyze Data')
self._features = features
self._format = input_format
self._format_metadata = format_metadata
self._error_threshold = error_threshold
self._return_bad_elements = return_bad_elements
self._sorted_feature_columns = _transforms.sorted_columns_from_features(
self._features)
# TODO(b/33677990): Remove apply method.
def apply(self, data):
return self.expand(data)
def expand(self, data):
"""Analyzes each of the columns in the feature spec to generate metadata.
Args:
data: The input PCollection.
Returns:
Just the metadata if return_bad_elements is False, otherwise a tuple of
the metadata and the bad elements side output.
"""
rows, errors = data | 'Extract Columns' >> beam.ParDo(
_ExtractValues(self._sorted_feature_columns)).with_outputs(
'errors', main='rows')
_ = data | dfutil.CountPCollection('ml-analyze-input')
_ = errors | dfutil.CountPCollection('ml-analyze-errors')
_ = (errors, data) | dfutil.CheckErrorThreshold(self._error_threshold)
analysis_list = []
combine_fn_analyzers = {}
for ix, column in enumerate(self._sorted_feature_columns):
analyzer = _registries.analyzer_registry.get_analyzer(column)
if isinstance(analyzer, CombineFnColumnAnalyzer):
combine_fn_analyzers[ix] = analyzer
else:
values = rows | 'extract_%s' % column.name >> beam.Map(
lambda row, ix=ix: row[ix])
analysis_list.append(values | analyzer)
if combine_fn_analyzers:
analysis_list.append(rows | 'Analyze CombineFn Features' >>
_MultiColumnAnalyzer(combine_fn_analyzers))
columns = analysis_list | beam.Flatten() | beam.combiners.ToDict()
metadata = columns | 'Generate Metadata' >> beam.Map(self._create_metadata)
if self._return_bad_elements:
return metadata, errors
else:
return metadata
def _get_version(self):
# Version numbers are stored in the top level package.
# Which we can't import at the top as it would be a circular reference.
import google.cloud.ml as ml # pylint: disable=g-import-not-at-top
return ml.__version__
def _create_metadata(self, columns):
features = {}
stats = {}
metadata = {
'sdk_version': self._get_version(),
'columns': columns,
'features': features,
'stats': stats,
}
if self._format:
metadata['format'] = self._format
if self._format_metadata:
metadata[self._format] = self._format_metadata
for feature in self._features:
feature_size = 0
feature_type = 'dense'
feature_dtype = 'int64'
feature_cols = []
for feature_column in feature.columns:
column_name = feature_column.name
column = columns.get(column_name, None)
if not column:
logging.warning('%s not processed because it has no metadata',
column_name)
continue
value_type = column['type']
if value_type == 'target' and hasattr(feature_column, 'scenario'):
column['scenario'] = feature_column.scenario
transformer = _registries.transformation_registry.get_transformer(
column)
if transformer.dtype != 'int64':
# If we're combining an int with anything else, the "other" dtype
# takes precedence. For numeric columns, this will be 'float' and for
# anything else, this will likely be 'bytes'
# TODO(user). Some unexpected behaviour could result from the
# assignment of dtypes here (i.e. in the loop) with respect to
# incompatible types getting combined mistakenly. At the time of
# b/32318252 has been opened to track refactoring this logic so that
# it is clearer to the reader.
feature_dtype = transformer.dtype
if transformer.feature_type == 'sparse':
# If we're combining dense transforms with sparse transforms, the
# resulting feature will be sparse.
# TODO(user): Consider having an enum for 'sparse' and 'dense'
feature_type = 'sparse'
feature_size += transformer.feature_size
if value_type == 'key':
stats['instances'] = column['count']
elif value_type == 'target':
if 'vocab' in column:
stats['labels'] = len(column['vocab'])
if 'mean' in column:
stats['mean'] = column['mean']
feature_cols.append(column_name)
features[feature.name] = {
'name': feature.name,
'size': feature_size,
'type': feature_type,
'dtype': feature_dtype,
'columns': feature_cols
}
return metadata
class _MultiColumnAnalyzer(beam.PTransform):
def __init__(self, analyzers):
self._analyzers = analyzers
# TODO(b/33677990): Remove apply method.
def apply(self, rows):
return self.expand(rows)
def expand(self, rows):
value_indices, value_analyzers = zip(*self._analyzers.items())
assert all(
isinstance(analyzer, CombineFnColumnAnalyzer)
for analyzer in value_analyzers)
return (
rows
| 'Extract' >> beam.Map(lambda row: [row[ix] for ix in value_indices])
| 'Combine' >> beam.CombineGlobally(beam.combiners.TupleCombineFn(
*[a.combine_fn for a in value_analyzers])).without_defaults()
| 'PairWithName' >> beam.FlatMap(lambda combined_values: [ # pylint: disable=g-long-lambda
(a.column_name, a.combined_value_to_dict(c))
for a, c in zip(value_analyzers, combined_values)]))
class ColumnAnalyzer(beam.PTransform):
"""Base class for column analyzers.
"""
def __init__(self, column):
super(ColumnAnalyzer, self).__init__('Analyze ' + column.name)
self._column = column
def extract_value(self, instance, index):
"""Extracts the column value from an element (represented as a dict).
By default, extracts the value by column name, returning None if it does
not exist.
May be overridden to compute this value and/or throw an error if the
column value is not valid.
Args:
instance: The input instance to extract from.
index: The index for the feature column being analyzed.
Returns:
The column from this instance.
"""
return instance[index]
def _get_column_metadata(self):
"""Returns a dictionary of the needed metadata.
Sets name, type and transforms args if there are any.
Returns:
A dictionary of the needed metadata.
"""
column_metadata = {'name': self._column.name}
if self._column.default is not None:
column_metadata['default'] = self._column.default
if self._column.value_type:
column_metadata['type'] = self._column.value_type
transform_name = self._column._transform # pylint: disable=protected-access
if transform_name:
column_metadata['transform'] = transform_name
if transform_name and self._column.transform_args:
column_metadata[transform_name] = self._column.transform_args
return column_metadata
class IdentityColumnAnalyzer(ColumnAnalyzer):
"""This is the default analyzer, and only generates simple metatada.
Disregards the values and returns a PCollection with a single entry. A tuple
in the same format as all the other metadata.
"""
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
return ['empty'] | 'Identity Metadata' >> beam.Map(
self._ret_val) # run once
def _ret_val(self, _):
return (self._column.name, self._get_column_metadata())
class CombineFnColumnAnalyzer(ColumnAnalyzer):
"""Analyzes columns using a CombineFn.
Subclasses MUST NOT override the apply method, as an alternative
(cross-feature) PTransform may be used instead.
"""
def __init__(self, column, combine_fn, output_name='combined_value'):
assert self.apply.im_func is CombineFnColumnAnalyzer.apply.im_func, (
'Subclass %s of CombineFnColumnAnalyzer must not overload apply.' %
type(self))
super(CombineFnColumnAnalyzer, self).__init__(column)
self._combine_fn = combine_fn
self._output_name = output_name
@property
def combine_fn(self):
return self._combine_fn
@property
def column_name(self):
return self._column.name
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
return (
values
| beam.CombineGlobally(self._combine_fn).without_defaults()
|
beam.Map(lambda c: (self.column_name, self.combined_value_to_dict(c))))
def combined_value_to_dict(self, aggregate):
return dict(self._get_column_metadata(), **{self._output_name: aggregate})
@_registries.register_analyzer('key')
class IdColumnAnalyzer(CombineFnColumnAnalyzer):
"""Analyzes id columns to produce a count of instances.
"""
def __init__(self, column):
super(IdColumnAnalyzer, self).__init__(column,
beam.combiners.CountCombineFn(),
'count')
def combined_value_to_dict(self, count):
return {'name': self._column.name, 'type': 'key', 'count': count}
@_registries.register_analyzer('numeric')
@with_input_types(Union[int, long, float])
@with_output_types(Tuple[str, Dict[Union[str, unicode], float]])
class NumericColumnAnalyzer(CombineFnColumnAnalyzer):
"""Analyzes numeric columns to produce a min/max/mean statistics.
"""
def __init__(self, column):
super(NumericColumnAnalyzer, self).__init__(
column, self.MinMeanMax(getattr(column, 'log_base', None)))
def extract_value(self, instance, index):
value = instance[index]
if value is not None and not isinstance(value, numbers.Number):
return float(value)
else:
return value
def combined_value_to_dict(self, combined_value):
return dict(self._get_column_metadata(), **combined_value)
class MinMeanMax(beam.core.CombineFn):
"""Aggregator to combine values within a numeric column.
"""
def __init__(self, log_base=None):
self._log_base = log_base
def create_accumulator(self):
return (float('+inf'), float('-inf'), 0, 0)
def add_input(self, stats, element):
if element is None:
return stats
(min_value, max_value, total, count) = stats
if self._log_base:
element = log(element, self._log_base)
return (min(min_value, element), max(max_value, element), total + element,
count + 1)
def merge_accumulators(self, accumulators):
min_values, max_values, totals, counts = zip(*accumulators)
return (min(min_values), max(max_values), sum(totals), sum(counts))
def extract_output(self, stats):
(min_value, max_value, total, count) = stats
return {
'min': min_value,
'max': max_value,
'mean': 0 if count == 0 else total / float(count),
}
@_registries.register_analyzer('categorical')
@with_input_types(Union[str, unicode])
@with_output_types(Tuple[str, Dict[Union[str, unicode], float]])
class CategoricalColumnAnalyzer(ColumnAnalyzer):
"""Analyzes categorical columns to produce a dictionary of discrete values.
Returns a tuple (column_name, metadata_dictionary).
(This will return an empty list, if no values appear more than
frequency threshold times. b/30843722)
"""
def __init__(self, column):
super(CategoricalColumnAnalyzer, self).__init__(column)
# Need to make these checks because all columns will not have these
# attributes. This is true for TargetFeatureColumns which get this analyzer
# by default if we're in a classification problem.
if hasattr(column, 'frequency_threshold'):
self._frequency_threshold = column.frequency_threshold
else:
self._frequency_threshold = 0
if hasattr(column, 'tokenizer_args'):
tokenizer_args = column.tokenizer_args
# Although create_flat_tokenizer also deals with empty split_regex, it is
# useful to skip the tokenization step since it ammounts to a noop.
if tokenizer_args and tokenizer_args['split_regex']:
# Create a tokenizer that matches the one used by the categorical column
# transform.
self._tokenizer_fn = _tokenizer.create_flat_tokenizer(
split_regex=tokenizer_args['split_regex'],
stop_words=tokenizer_args['stop_words'],
use_stemmer=tokenizer_args['use_stemmer'],
ngrams=tokenizer_args['ngrams'],
strip_html=tokenizer_args['strip_html'],
removable_tags=tokenizer_args['removable_tags'])
else:
self._tokenizer_fn = None
else:
self._tokenizer_fn = None
self._aggregator = CategoricalColumnAnalyzer.Aggregator(
self._get_column_metadata())
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
if self._tokenizer_fn:
values |= 'Tokenize Categorical' >> beam.FlatMap(self._tokenizer_fn)
values |= 'count' >> beam.combiners.Count.PerElement()
if self._frequency_threshold > 1:
values |= 'filter by threshold' >> beam.Filter(
lambda x: x[1] >= self._frequency_threshold)
return (values
| 'Analysis'
>> beam.core.CombineGlobally(self._aggregator).without_defaults())
class Aggregator(beam.core.CombineFn):
"""Aggregator to combine values within a categorical column.
"""
def __init__(self, column):
self._column = column
def create_accumulator(self):
return set()
def add_input(self, accumulator, element):
if element[0] is not None:
accumulator.add(element[0])
return accumulator
def merge_accumulators(self, accumulators):
return set.union(*accumulators)
def extract_output(self, accumulator):
items = dict(zip(sorted(accumulator), xrange(len(accumulator))))
column = self._column
column['vocab'] = items
column['idf'] = {}
return (self._column['name'], column)
@_registries.register_analyzer('text')
@with_input_types(Union[str, unicode])
@with_output_types(Tuple[str, Dict[Union[str, unicode], float]])
class TextColumnAnalyzer(ColumnAnalyzer):
"""Analyzes text columns to produce a dict and mapping of words to indices.
"""
def __init__(self, column):
super(TextColumnAnalyzer, self).__init__(column)
self._tokenizer_fn = _tokenizer.create_flat_tokenizer(
split_regex=column.split_regex,
stop_words=column.stop_words,
use_stemmer=column.use_stemmer,
ngrams=column.ngrams,
strip_html=column.strip_html,
removable_tags=column.removable_tags)
self._aggregator = TextColumnAnalyzer.Aggregator(self._get_column_metadata(
))
self._word2vec_dict = column.word2vec_dict
if not self._word2vec_dict:
self._n = column.transform_args['vocab_size']
self._sampling_percentage = column.sampling_percentage
self._ngrams = column.ngrams
self._use_tf_idf = column.use_tf_idf
self._frequency_threshold = column.frequency_threshold
# TODO(b/33677990): Remove apply method.
def apply(self, values):
return self.expand(values)
def expand(self, values):
if self._sampling_percentage < 100.0:
values |= 'Sampling %s/100' % self._sampling_percentage >> beam.ParDo(
SamplingFn(self._sampling_percentage))
ngram_list_list = values | 'Tokenize index' >> beam.Map(self._tokenizer_fn)
if self._word2vec_dict:
max_doc_size = beam.pvalue.AsSingleton(
self._get_max_tokens_in_doc(ngram_list_list))
metadata_column = (self._column.name, self._get_column_metadata())
return [metadata_column] | 'create metadata' >> beam.Map(
self._add_doc_size, max_doc_size)
ngram_counts = (ngram_list_list
| 'FlatMap' >> beam.FlatMap(lambda x: x)
| 'Count' >> beam.combiners.Count.PerElement())
if self._frequency_threshold > 1:
ngram_counts |= ('Filter categories' >>
beam.Filter(lambda a: a[1] >= self._frequency_threshold))
top_n_grams = (ngram_counts | 'TopNCount' >> beam.combiners.Top.Of(
self._n, compare=lambda a, b: (a[1], a[0]) < (b[1], b[0])))
vocab_grams = top_n_grams
vocab_column = vocab_grams | 'Analysis' >> beam.core.CombineGlobally(
self._aggregator).without_defaults()
if self._use_tf_idf:
docs_count = beam.pvalue.AsSingleton(values | 'Count Documents' >>
beam.combiners.Count.Globally())
vocab_set = vocab_column | 'Get Vocab Set' >> beam.Map(
lambda x: set(x[1]['vocab'].keys()))
idf_dict = self._get_idf_dict(ngram_list_list, vocab_set, docs_count)
return (idf_dict | beam.Map(self.convert_idf_dict,
beam.pvalue.AsSingleton(vocab_column)))
else:
return vocab_column
def _add_doc_size(self, column, max_doc_size):
(name, column_dict) = column
column_dict['word2vec']['max_doc_size'] = max_doc_size
return (name, column_dict)
def _get_idf_dict(self, ngram_list_list, vocab_set, docs_count):
return (ngram_list_list
# flatten ngrams lol, take set
| 'Unique Ngrams per doc' >> beam.FlatMap(set)
| beam.combiners.Count.PerElement()
| 'Vocab Filter' >> beam.FlatMap(self.vocab_filter,
beam.pvalue.AsSingleton(vocab_set))
| 'compute idf' >> beam.ParDo(self.idf, docs_count)
| beam.combiners.ToDict())
def _get_max_tokens_in_doc(self, ngram_list_list):
return (ngram_list_list
| 'Count of words doc' >> beam.FlatMap(lambda x: [len(x)])
| beam.CombineGlobally(self.MaxFn()))
# TODO(user): Investigate doing this Max with native dataflow transforms.
def vocab_filter(self, kv, vocab):
(k, v) = kv
if k in vocab:
yield (k, v)
def convert_idf_dict(self, word_to_idf, column):
"""Convert idf dict from word-> idf score, to word_vocab_index-> idf score.
Args:
word_to_idf: Dictionary with word to idf mapping.
column: The metadata column.
Returns:
The column name and column dictionary.
"""
(column_name, column_dict) = column
id_to_idf = np.zeros(len(word_to_idf))
for word in word_to_idf.keys():
word_idx = column_dict['vocab'].get(word, -1)
if word_idx >= 0: # if word in final vocab
id_to_idf[word_idx] = word_to_idf[word]
column_dict['idf'] = id_to_idf.tolist()
return (column_name, column_dict)
def idf(self, kv, docs_count):
"""Calculate inverse document frequency for a word.
Args:
kv: key-value of (word, number of documents it appears in).
docs_count: number of total documents
Raises:
ValueError: If the number of documents is negative.
Yields:
A tuple of key and idf.
"""
(key, v) = kv
if v <= 0:
raise ValueError('Number of documents word %s appeared is %d' % (key, v))
idf = log(docs_count / float(v)) + 1 # +1 for smoothing - to avoid 0's
yield (key, idf)
class MaxFn(beam.CombineFn):
"""A CombineFn to find the max of the input PCollection.
"""
def create_accumulator(self):
return -1
def add_input(self, current_min, x):
return max(current_min, x)
def merge_accumulators(self, accumulators):
return max(accumulators)
def extract_output(self, x):
return x
class Aggregator(beam.core.CombineFn):
"""Aggregator to combine values within a text column.
"""
def __init__(self, column):
self._column = column
def create_accumulator(self):
return set()
def add_input(self, accumulator, element):
for (word, _) in element:
if word is not None:
accumulator.add(word)
return accumulator
def merge_accumulators(self, accumulators):
return set.union(*accumulators)
def extract_output(self, accumulator):
vocab = dict(zip(sorted(accumulator), xrange(len(accumulator))))
column = self._column
column['vocab'] = vocab
column['idf'] = None
return (self._column['name'], column)
class SamplingFn(beam.DoFn):
def __init__(self, sampling_percentage):
super(SamplingFn, self).__init__('Sampling')
self._sampling_percentage = sampling_percentage
# TODO(user): Remove the try catch after sdk update
def process(self, element):
try:
element = element.element
except AttributeError:
pass
random_sample = random.uniform(0.0, 100.0)
if random_sample <= self._sampling_percentage:
yield element
|
StarcoderdataPython
|
3267403
|
import random
def Partition(A):
if (len(A)==1):
return 0
v = len(A)-1
i = 0
j = len(A)-2
while (i <= j):
if ( (A[i] < A[v]) and (A[j] >= A[v]) ):
i += 1
j -= 1
if ( (A[i] >= A[v]) and (A[j] < A[v]) ):
A[i], A[j] = A[j], A[i]
i += 1
j -= 1
if ( (A[i] < A[v]) and (A[j] < A[v]) ):
i += 1
if ( (A[i] >= A[v]) and (A[j] >= A[v]) ):
j -= 1
A[i], A[v] = A[v], A[i]
return i
def QuickSort(A):
p = Partition(A)
# NIE: A[:p-1]
left = A[:p]
# A[p] pozostaje niezmieniony
right = A[p+1:]
if len(left) > 0:
QuickSort(left)
if len(right) > 0:
QuickSort(right)
# NIE: A = left + mid + right
A[:p] = left
# A[p] pozostaje niezmieniony
A[p+1:] = right
# list = [10, 9, 6, 3, 2]
list = random.choices(range(1,100), k=10)
print("Input:")
print(list)
print(sum(list))
print("Quick sort:")
QuickSort(list)
print(list)
print(sum(list))
|
StarcoderdataPython
|
178527
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
from common.mymako import render_mako_context
import datetime
import json
from django.contrib.auth.models import Group
from django.core import serializers
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.db import connection, transaction
from django.db.models import Min, Avg, Max, Sum,Count
from django.http import HttpResponse
from common.mymako import render_mako_context,render_json
from home_application.models import TInspectPlan,AccountBkuser
def home(request):
"""
首页
"""
return render_mako_context(request, '/home_application/home.html')
def dev_guide(request):
"""
开发指引
"""
return render_mako_context(request, '/home_application/dev_guide.html')
def contactus(request):
"""
联系我们
"""
return render_mako_context(request, '/home_application/contact.html')
class ComplexEncoder2(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
def getUserInfo(request):
userInfo={}
menuInfo = {}
loginInfo = {}
if request.user.is_authenticated():
user = request.user
# print current_user_set
# current_group_set = Group.objects.get(user=current_user_set)
# print current_group_set
# print current_user_set.get_group_permissions()
userInfo['username'] = user.username
userInfo['qq'] = user.qq
userInfo['id'] = user.id
userInfo['email'] = user.email
userInfo['fullname'] = 'administrator'
user = AccountBkuser.objects.filter(username='admin')
user = user[0]
user.__dict__.pop("_state")
ujs = json.dumps(user.__dict__, cls=ComplexEncoder2, ensure_ascii=False)
menu =[] #TPlanMenu.objects.all()
L = []
for p in menu:
p.__dict__.pop("_state")
L.append(p.__dict__)
dict =[] # TPlanDict.objects.all()
list = []
for p in dict:
p.__dict__.pop("_state")
list.append(p.__dict__)
loginInfo['menus'] = L
loginInfo['dict'] = list
loginInfo['userInfo'] = user.__dict__
json_data = json.dumps(loginInfo, cls=ComplexEncoder2, ensure_ascii=False)
return HttpResponse(json_data, content_type='application/json')
def userList(request):
index = request.GET.get('index')
size = request.GET.get('size')
userid = request.GET.get('userid')
fromdate = request.GET.get('fromdate')
todate = request.GET.get('todate')
type = request.GET.get('type')
menu =AccountBkuser.objects.all().order_by('-id')
maxid = AccountBkuser.objects.aggregate(Count('id'))
maxid = maxid['id__count']
paginator = Paginator(menu, size) # Show 25 contacts per page
menu = paginator.page(index)
L = []
for p in menu:
p.__dict__.pop("_state")
L.append(p.__dict__)
json_data = json.dumps(L, cls=ComplexEncoder2,
ensure_ascii=False) # serializers.serialize("json", menu, ensure_ascii=False,encoding='UTF-8') # 只有数据没页码
json_data = json_data.replace("null", '""');
result = {'success': True, 'rows': json.loads(json_data), 'pageIndex': index, 'pageSize': size, 'pageCount': 5,
'total': maxid}
return render_json(result)
def inpectList(request):
index = request.GET.get('index')
size = request.GET.get('size')
userid = request.GET.get('userid')
fromdate = request.GET.get('fromdate')
todate = request.GET.get('todate')
type = request.GET.get('type')
menu = TInspectPlan.objects.all().order_by('-id')
maxid = TInspectPlan.objects.aggregate(Count('id'))
maxid = maxid['id__count']
paginator = Paginator(menu, size) # Show 25 contacts per page
menu = paginator.page(index)
L = []
for p in menu:
p.__dict__.pop("_state")
L.append(p.__dict__)
json_data = json.dumps(L, cls=ComplexEncoder2,
ensure_ascii=False) # serializers.serialize("json", menu, ensure_ascii=False,encoding='UTF-8') # 只有数据没页码
json_data = json_data.replace("null", '""');
result = {'success': True, 'rows': json.loads(json_data), 'pageIndex': index, 'pageSize': size, 'pageCount': 5,
'total': maxid}
return render_json(result)
def userDetail(request):
id = request.GET.get('id')
user = PlatUser.objects.filter(id=id)
user = user[0]
user.__dict__.pop("_state")
userinfo = {}
userinfo['obj'] = user.__dict__
json_data = json.dumps(userinfo, cls=ComplexEncoder2, ensure_ascii=False)
json_data = json_data.replace("null", '""');
return HttpResponse(json_data, content_type='application/json')
def taskDetail(request):
id = request.GET.get('id')
user = PlatUser.objects.filter(id=id)
user = user[0]
user.__dict__.pop("_state")
userinfo = {}
userinfo['obj'] = user.__dict__
json_data = json.dumps(userinfo, cls=ComplexEncoder2, ensure_ascii=False)
json_data = json_data.replace("null", '""');
return HttpResponse(json_data, content_type='application/json')
def inpectSave(request):
dictStr = request.GET.get('json')
dict = json.loads(dictStr)
if hasattr(dict, 'pid'):
dict.pid = -1
else:
dict['pid'] = -1
maxid = TInspectPlan.objects.aggregate(Max('id'))
maxid = maxid['id__max']
autoid = 'D{0:0>6}'.format(7)
if dict['id'] > 0:
res = TInspectPlan(id=dict['id'],cron_expression=dict['cron_expression'],ip=dict['ip'], plan_name=dict['plan_name'], begin_time=dict['begin_time'],
end_time=dict['end_time'], sys_type=dict['sys_type'], plan_status=0, plan_desc=dict['plan_desc'],create_time=datetime.datetime.now()).save()
else:
res = TInspectPlan(cron_expression=dict['cron_expression'], ip=dict['ip'], plan_name=dict['plan_name'],
begin_time=dict['begin_time'],
end_time=dict['end_time'], sys_type=dict['sys_type'], plan_status=0, plan_desc=dict['plan_desc'],
create_time=datetime.datetime.now()).save()
if res:
result = {'success': True, 'msg': 'ok'}
else:
result = {'success': True, 'msg': 'ok'}
return render_json(result)
def userSave(request):
dictStr = request.GET.get('json')
dict = json.loads(dictStr)
maxid = AccountBkuser.objects.aggregate(Max('id'))
maxid = maxid['id__max']
autoid = 'D{0:0>6}'.format(7)
if dict['id']>0:
res = AccountBkuser(id=dict['id'],chname=dict['chname'],qq=dict['qq'], phone=dict['phone'], username=dict['username'],
is_superuser=dict['is_superuser'], email=dict['email'], is_staff=0, company=dict['company'],
date_joined=datetime.datetime.now(), last_login=datetime.datetime.now()).save()
else:
res = AccountBkuser(chname=dict['chname'], qq=dict['qq'], phone=dict['phone'], username=dict['username'],
is_superuser=dict['is_superuser'], email=dict['email'], is_staff=0, company=dict['company'],date_joined=datetime.datetime.now(),last_login=datetime.datetime.now()).save()
if res:
result = {'success': True, 'msg': 'ok'}
else:
result = {'success': True, 'msg': 'ok'}
return render_json(result)
def inpectDelIds(request):
ids = request.GET.get('ids')
ids = ids.split(',')
for id in ids:
TInspectPlan.objects.get(id=id).delete()
result = {"success": True, "message": "删除成功"}
return render_json(result)
def inpectDel(request):
id = request.GET.get('id')
TInspectPlan.objects.get(id=id).delete()
result = {"success": True, "message": "删除成功"}
return render_json(result)
def userDel(request):
id = request.GET.get('id')
AccountBkuser.objects.get(id=id).delete()
result = {"success": True, "message": "删除成功"}
return render_json(result)
|
StarcoderdataPython
|
4839199
|
<gh_stars>100-1000
# -*- test-case-name: txdav.common.datastore.upgrade.sql.test -*-
##
# Copyright (c) 2011-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from txweb2.dav.resource import TwistedQuotaUsedProperty, TwistedGETContentMD5
from twisted.internet.defer import inlineCallbacks
from twistedcaldav import caldavxml, customxml
from twistedcaldav.config import config
from txdav.base.propertystore.base import PropertyName
from txdav.common.datastore.sql_tables import schema
from txdav.common.datastore.upgrade.sql.upgrades.util import updateCalendarDataVersion, \
removeProperty, cleanPropertyStore, logUpgradeStatus, doToEachHomeNotAtVersion
from txdav.xml import element
"""
Data upgrade from database version 4 to 5
"""
UPGRADE_TO_VERSION = 5
BATCH_SIZE = 100
@inlineCallbacks
def doUpgrade(sqlStore):
"""
Do the required upgrade steps.
"""
yield updateCalendarHomes(sqlStore, config.UpgradeHomePrefix)
# Don't do remaining upgrade if we are only process a subset of the homes
if not config.UpgradeHomePrefix:
yield removeOtherProperties(sqlStore)
# Always bump the DB value
yield updateCalendarDataVersion(sqlStore, UPGRADE_TO_VERSION)
@inlineCallbacks
def updateCalendarHomes(sqlStore, prefix=None):
"""
For each calendar home, update the associated properties on the home or its owned calendars.
"""
yield doToEachHomeNotAtVersion(sqlStore, schema.CALENDAR_HOME, UPGRADE_TO_VERSION, updateCalendarHome, "Update Calendar Home", filterOwnerUID=prefix)
@inlineCallbacks
def updateCalendarHome(txn, homeResourceID):
"""
For this calendar home, update the associated properties on the home or its owned calendars.
"""
home = yield txn.calendarHomeWithResourceID(homeResourceID)
yield moveCalendarTimezoneProperties(home)
yield moveCalendarAvailabilityProperties(home)
yield cleanPropertyStore()
@inlineCallbacks
def moveCalendarTimezoneProperties(home):
"""
Need to move all the CalDAV:calendar-timezone properties in the
RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
the new value from the XML property.
"""
# Iterate over each calendar (both owned and shared)
calendars = (yield home.loadChildren())
for calendar in calendars:
if calendar.isInbox():
continue
prop = calendar.properties().get(PropertyName.fromElement(caldavxml.CalendarTimeZone))
if prop is not None:
yield calendar.setTimezone(prop.calendar())
del calendar.properties()[PropertyName.fromElement(caldavxml.CalendarTimeZone)]
@inlineCallbacks
def moveCalendarAvailabilityProperties(home):
"""
Need to move all the CS:calendar-availability properties in the
RESOURCE_PROPERTY table to the new CALENDAR_BIND table columns, extracting
the new value from the XML property.
"""
inbox = (yield home.calendarWithName("inbox"))
if inbox is not None:
prop = inbox.properties().get(PropertyName.fromElement(customxml.CalendarAvailability))
if prop is not None:
yield home.setAvailability(prop.calendar())
del inbox.properties()[PropertyName.fromElement(customxml.CalendarAvailability)]
@inlineCallbacks
def removeOtherProperties(sqlStore):
"""
Remove the following properties:
DAV:acl
DAV:getcontenttype
DAV:resource-id
{urn:ietf:params:xml:ns:caldav}originator
{urn:ietf:params:xml:ns:caldav}recipient
{urn:ietf:params:xml:ns:caldav}supported-calendar-component-set
{http://calendarserver.org/ns/}getctag
{http://twistedmatrix.com/xml_namespace/dav/private/}quota-used
{http://twistedmatrix.com/xml_namespace/dav/}getcontentmd5
{http://twistedmatrix.com/xml_namespace/dav/}schedule-auto-respond
"""
logUpgradeStatus("Starting Calendar Remove Other Properties")
sqlTxn = sqlStore.newTransaction(label="calendar_upgrade_from_4_to_5.removeOtherProperties")
yield removeProperty(sqlTxn, PropertyName.fromElement(element.ACL))
yield removeProperty(sqlTxn, PropertyName.fromElement(element.GETContentType))
yield removeProperty(sqlTxn, PropertyName.fromElement(element.ResourceID))
yield removeProperty(sqlTxn, PropertyName(caldavxml.caldav_namespace, "originator"))
yield removeProperty(sqlTxn, PropertyName(caldavxml.caldav_namespace, "recipient"))
yield removeProperty(sqlTxn, PropertyName.fromElement(caldavxml.SupportedCalendarComponentSet))
yield removeProperty(sqlTxn, PropertyName.fromElement(customxml.GETCTag))
yield removeProperty(sqlTxn, PropertyName.fromElement(TwistedQuotaUsedProperty))
yield removeProperty(sqlTxn, PropertyName.fromElement(TwistedGETContentMD5))
yield removeProperty(sqlTxn, PropertyName(element.twisted_dav_namespace, "schedule-auto-respond"))
yield sqlTxn.commit()
yield cleanPropertyStore()
logUpgradeStatus("End Calendar Remove Other Properties")
|
StarcoderdataPython
|
178071
|
<filename>magenta/models/nsynth/wavenet/eval.py
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""The evaluation script.
This script requires tensorflow 1.1.0-rc1 or beyond.
As of 04/05/17 this requires installing tensorflow from source,
(https://github.com/tensorflow/tensorflow/releases)
So that it works locally, the default worker_replicas and total_batch_size are
set to 1. For training in 200k iterations, they both should be 32.
"""
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import os
import pickle
from magenta.models.nsynth import utils
slim = tf.contrib.slim
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("master", "",
"BNS name of the TensorFlow master to use.")
tf.app.flags.DEFINE_string("config", "h512_bo16", "Model configuration name")
tf.app.flags.DEFINE_integer("task", 0,
"Task id of the replica running the training.")
tf.app.flags.DEFINE_integer("worker_replicas", 1,
"Number of replicas. We train with 32.")
tf.app.flags.DEFINE_integer("ps_tasks", 0,
"Number of tasks in the ps job. If 0 no ps job is "
"used. We typically use 11.")
tf.app.flags.DEFINE_integer("total_batch_size", 1,
"Batch size spread across all sync replicas."
"We use a size of 32.")
tf.app.flags.DEFINE_integer("sample_length", 64000,
"Raw sample length of input.")
tf.app.flags.DEFINE_integer("num_evals", None,
"number of evauaitons -- None does entire dataset")
tf.app.flags.DEFINE_string("logdir", "/tmp/nsynth",
"The log directory for this experiment.")
tf.app.flags.DEFINE_string("checkpoint_dir", "/tmp/nsynth",
"Where the checkpoints are stored")
tf.app.flags.DEFINE_string("checkpoint_path", None,
"path of checkpoint -- if none use checkpoint_dir")
tf.app.flags.DEFINE_string("problem", "nsynth",
"Which problem setup (i.e. dataset) to use")
tf.app.flags.DEFINE_string("eval_path", "", "The path to the train tfrecord.")
tf.app.flags.DEFINE_string("log", "INFO",
"The threshold for what messages will be logged."
"DEBUG, INFO, WARN, ERROR, or FATAL.")
tf.app.flags.DEFINE_bool("vae", False,
"Whether or not to train variationally")
tf.app.flags.DEFINE_bool("small", False,
"Whether to use full model i.e. 30 layers in decoder/encoder or reduced model")
tf.app.flags.DEFINE_integer("asymmetric", 0,
"Whether to have equal number of layers in decoder/encoder or a weaker decoder")
tf.app.flags.DEFINE_bool("kl_annealing", False,
"Whether to use kl_annealing")
tf.app.flags.DEFINE_float("aux_coefficient", 0,
"coefficient for auxilliary loss")
tf.app.flags.DEFINE_float("annealing_loc", 1750.,
"params of normal cdf for annealing")
tf.app.flags.DEFINE_float("annealing_scale", 150.,
"params of normal cdf for annealing")
tf.app.flags.DEFINE_float("kl_threshold", None,
"Threshold with which to bound KL-Loss")
tf.app.flags.DEFINE_float("input_dropout", 1,
"How much dropout at input to add")
def main(unused_argv=None):
tf.logging.set_verbosity(FLAGS.log)
if FLAGS.config is None:
raise RuntimeError("No config name specified.")
if FLAGS.vae:
config = utils.get_module("wavenet." + FLAGS.config).VAEConfig(
FLAGS.eval_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric, aux=FLAGS.aux_coefficient, dropout=FLAGS.input_dropout)
else:
config = utils.get_module("wavenet." + FLAGS.config).Config(
FLAGS.eval_path, sample_length=FLAGS.sample_length, problem=FLAGS.problem, small=FLAGS.small, asymmetric=FLAGS.asymmetric)
logdir = FLAGS.logdir
tf.logging.info("Saving to %s" % logdir)
with tf.Graph().as_default():
total_batch_size = FLAGS.total_batch_size
assert total_batch_size % FLAGS.worker_replicas == 0
worker_batch_size = total_batch_size / FLAGS.worker_replicas
# Run the Reader on the CPU
cpu_device = "/job:localhost/replica:0/task:0/cpu:0"
if FLAGS.ps_tasks:
cpu_device = "/job:worker/cpu:0"
with tf.device(cpu_device):
inputs_dict = config.get_batch(worker_batch_size, is_training=False)
with tf.device(
tf.train.replica_device_setter(ps_tasks=FLAGS.ps_tasks,
merge_devices=True)):
global_step = tf.get_variable(
"global_step", [],
tf.int32,
initializer=tf.constant_initializer(0),
trainable=False)
# build the model graph
outputs_dict = config.build(inputs_dict, is_training=False)
if FLAGS.vae:
if FLAGS.kl_annealing:
dist = tfp.distributions.Normal(loc=FLAGS.annealing_loc, scale=FLAGS.annealing_scale)
annealing_rate = dist.cdf(tf.to_float(global_step)) # how to adjust the annealing
else:
annealing_rate = 0.
kl = outputs_dict["loss"]["kl"]
rec = outputs_dict["loss"]["rec"]
aux = outputs_dict["loss"]["aux"]
tf.summary.scalar("kl", kl)
tf.summary.scalar("rec", rec)
tf.summary.scalar("annealing_rate", annealing_rate)
if FLAGS.kl_threshold is not None:
kl = tf.maximum(tf.cast(FLAGS.kl_threshold, dtype=kl.dtype), kl)
if FLAGS.aux_coefficient > 0:
tf.summary.scalar("aux", aux)
loss = rec + annealing_rate*kl + tf.cast(FLAGS.aux_coefficient, dtype=tf.float32)*aux
else:
loss = outputs_dict["loss"]
tf.summary.scalar("train_loss", loss)
labels = inputs_dict["parameters"]
x_in = inputs_dict["wav"]
batch_size, _ = x_in.get_shape().as_list()
predictions = outputs_dict["predictions"]
_, pred_dim = predictions.get_shape().as_list()
predictions = tf.reshape(predictions, [batch_size, -1, pred_dim])
encodings = outputs_dict["encoding"]
session_config = tf.ConfigProto(allow_soft_placement=True)
# Define the metrics:
if FLAGS.vae:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/kl': slim.metrics.streaming_mean(kl),
'eval/rec': slim.metrics.streaming_mean(rec),
'eval/loss': slim.metrics.streaming_mean(loss),
'eval/predictions': slim.metrics.streaming_concat(predictions),
'eval/labels': slim.metrics.streaming_concat(labels),
'eval/encodings': slim.metrics.streaming_concat(encodings),
'eval/audio': slim.metrics.streaming_concat(x_in)
})
else:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'eval/loss': slim.metrics.streaming_mean(loss),
'eval/predictions': slim.metrics.streaming_concat(predictions),
'eval/labels': slim.metrics.streaming_concat(labels),
'eval/encodings': slim.metrics.streaming_concat(encodings),
'eval/audio': slim.metrics.streaming_concat(x_in)
})
print('Running evaluation Loop...')
if FLAGS.checkpoint_path is not None:
checkpoint_path = FLAGS.checkpoint_path
else:
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
metric_values = slim.evaluation.evaluate_once(
num_evals=FLAGS.num_evals,
master=FLAGS.master,
checkpoint_path=checkpoint_path,
logdir=FLAGS.logdir,
eval_op=names_to_updates.values(),
final_op=names_to_values.values(),
session_config=session_config)
names_to_values = dict(zip(names_to_values.keys(), metric_values))
losses = {}
data_name = FLAGS.eval_path.split('/')[-1].split('.')[0]
outpath = os.path.join(FLAGS.logdir, data_name)
for k, v in names_to_values.items():
name = k.split('/')[-1]
if name in ['predictions', 'encodings', 'labels', 'audio']:
out = outpath+'-{}'.format(name)
if name == 'predictions':
v = np.argmax(v, axis = -1)
v = utils.inv_mu_law_numpy(v - 128)
np.save(out, v)
else:
losses[name] = v
out_loss = outpath+'-losses.pickle'
with open(out_loss, 'w') as w:
pickle.dump(losses, w)
def console_entry_point():
tf.app.run(main)
if __name__ == "__main__":
console_entry_point()
|
StarcoderdataPython
|
3307301
|
import numpy as np
import qpimage
from drymass import search
def test_basic():
size = 200
image = np.zeros((size, size), dtype=float)
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
cx = 80
cy = 120
radius = 30
r = np.sqrt((x - cx)**2 + (y - cy)**2)
image[r < radius] = 1.3
rois = search.search_objects_base(image=image, size=2 * radius)
roi = rois[0]
assert np.allclose(roi.equivalent_diameter, 2 * radius, atol=.2, rtol=0)
assert np.allclose(roi.centroid, (cx, cy))
def test_bg_overlap():
size = 200
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
# 5 px between regions
cx1 = 100
cy1 = 100
cx2 = 100
cy2 = 145
radius = 20
r1 = np.sqrt((x - cx1)**2 + (y - cy1)**2)
r2 = np.sqrt((x - cx2)**2 + (y - cy2)**2)
raw_pha = (r1 < radius) * 1.3
bg_pha = (r2 < radius) * 1.2
# create data set
qpi = qpimage.QPImage(data=raw_pha, bg_data=bg_pha,
which_data="phase",
meta_data={"pixel size": 1e-6})
slices1 = search.search_phase_objects(qpi=qpi,
size_m=2 * radius *
qpi["pixel size"],
exclude_overlap=0)
slices2 = search.search_phase_objects(qpi=qpi,
size_m=2 * radius * 1e-6,
exclude_overlap=10)
assert len(slices1) == 1
assert len(slices2) == 0
def test_padding():
size = 200
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
cx = 80
cy = 120
radius = 30
r = np.sqrt((x - cx)**2 + (y - cy)**2)
image = (r < radius) * 1.3
pxsize = 1e-6
qpi = qpimage.QPImage(data=image,
which_data="phase",
meta_data={"pixel size": pxsize})
paddiff = 7
[slice1] = search.search_phase_objects(qpi=qpi,
size_m=2 * radius * pxsize,
pad_border=0,
)
[slice2] = search.search_phase_objects(qpi=qpi,
size_m=2 * radius * pxsize,
pad_border=paddiff,
)
[slice3] = search.search_phase_objects(qpi=qpi,
size_m=2 * radius * pxsize,
pad_border=100,
)
dx1 = slice1[0].stop - slice1[0].start
dy1 = slice1[1].stop - slice1[1].start
dx2 = slice2[0].stop - slice2[0].start
dy2 = slice2[1].stop - slice2[1].start
dx3 = slice3[0].stop - slice3[0].start
dy3 = slice3[1].stop - slice3[1].start
assert dx2 - dx1 == 2 * paddiff
assert dy2 - dy1 == 2 * paddiff
assert dx3 == size
assert dy3 == size
assert slice3[0].start == 0
assert slice3[1].start == 0
def test_threshold_float():
size = 200
image = np.zeros((size, size), dtype=float)
x = np.arange(size).reshape(-1, 1)
y = np.arange(size).reshape(1, -1)
cx = 80
cy = 120
radius = 30
r = np.sqrt((x - cx)**2 + (y - cy)**2)
image[r < radius] = 1.3
# test with correct threshold
rois = search.search_objects_base(image=image,
size=2*radius,
threshold=1)
roi = rois[0]
assert np.allclose(roi.equivalent_diameter, 2 * radius, atol=.2, rtol=0)
assert np.allclose(roi.centroid, (cx, cy))
# test with bad threshold
rois2 = search.search_objects_base(image=image,
size=2*radius,
threshold=2)
assert len(rois2) == 0
if __name__ == "__main__":
# Run all tests
loc = locals()
for key in list(loc.keys()):
if key.startswith("test_") and hasattr(loc[key], "__call__"):
loc[key]()
|
StarcoderdataPython
|
4808122
|
###############################################################################
# TransientLogSpiralPotential: a transient spiral potential
###############################################################################
import numpy
from ..util import conversion
from .planarPotential import planarPotential
_degtorad= numpy.pi/180.
class TransientLogSpiralPotential(planarPotential):
"""Class that implements a steady-state spiral potential
.. math::
\\Phi(R,\\phi) = \\frac{\\mathrm{amp}(t)}{\\alpha}\\,\\cos\\left(\\alpha\,\ln R - m\\,(\\phi-\\Omega_s\\,t-\\gamma)\\right)
where
.. math::
\\mathrm{amp}(t) = \\mathrm{amp}\\,\\times A\\,\\exp\\left(-\\frac{[t-t_0]^2}{2\\,\\sigma^2}\\right)
"""
def __init__(self,amp=1.,omegas=0.65,A=-0.035,
alpha=-7.,m=2,gamma=numpy.pi/4.,p=None,
sigma=1.,to=0.,ro=None,vo=None):
"""
NAME:
__init__
PURPOSE:
initialize a transient logarithmic spiral potential localized
around to
INPUT:
amp - amplitude to be applied to the potential (default:
1., A below)
gamma - angle between sun-GC line and the line connecting the peak of the spiral pattern at the Solar radius (in rad; default=45 degree; can be Quantity)
A - amplitude (alpha*potential-amplitude; default=0.035; can be Quantity)
omegas= - pattern speed (default=0.65; can be Quantity)
m= number of arms
to= time at which the spiral peaks (can be Quantity)
sigma= "spiral duration" (sigma in Gaussian amplitude; can be Quantity)
Either provide:
a) alpha=
b) p= pitch angle (rad; can be Quantity)
OUTPUT:
(none)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
planarPotential.__init__(self,amp=amp,ro=ro,vo=vo)
gamma= conversion.parse_angle(gamma)
p= conversion.parse_angle(p)
A= conversion.parse_energy(A,vo=self._vo)
omegas= conversion.parse_frequency(omegas,ro=self._ro,vo=self._vo)
to= conversion.parse_time(to,ro=self._ro,vo=self._vo)
sigma= conversion.parse_time(sigma,ro=self._ro,vo=self._vo)
self._omegas= omegas
self._A= A
self._m= m
self._gamma= gamma
self._to= to
self._sigma2= sigma**2.
if not p is None:
self._alpha= self._m/numpy.tan(p)
else:
self._alpha= alpha
self.hasC= True
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2011-03-27 - Started - Bovy (NYU)
"""
return self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*numpy.cos(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t-self._gamma))
def _Rforce(self,R,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/R*numpy.sin(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t-self._gamma))
def _phiforce(self,R,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2010-11-24 - Written - Bovy (NYU)
"""
return -self._A*numpy.exp(-(t-self._to)**2./2./self._sigma2)\
/self._alpha*self._m*numpy.sin(self._alpha*numpy.log(R)
-self._m*(phi-self._omegas*t
-self._gamma))
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2011-10-10 - Written - Bovy (IAS)
"""
return self._omegas
|
StarcoderdataPython
|
3388374
|
from pypower import idx_bus, idx_gen, idx_brch
import numpy as np
from cim2busbranch import ext_pypower
pytest_plugins = 'cim2busbranch.test.support'
def test_create(case, ppc):
res = ext_pypower.create(case)
assert res['version'] == ppc['version']
assert res['baseMVA'] == ppc['baseMVA']
assert (res['bus'] == ppc['bus']).all()
assert (res['gen'] == ppc['gen']).all()
assert (res['branch'] == ppc['branch']).all()
def test_write_results_to_case(case, ppc):
ppc['bus'][0][idx_bus.VM] = 0.5
ppc['bus'][0][idx_bus.VA] = 0.3
ppc['bus'][1][idx_bus.PD] = 3.4
ppc['bus'][1][idx_bus.QD] = 4.2
ppc['gen'][0][idx_gen.PG] = 2.3
ppc['gen'][0][idx_gen.QG] = 1.7
ppc['branch'][0][idx_brch.PF] = 1.1
ppc['branch'][0][idx_brch.QF] = 1.2
ppc['branch'][0][idx_brch.PT] = 1.3
ppc['branch'][0][idx_brch.QT] = 1.4
ext_pypower.write_results_to_case(ppc, case)
assert case.buses[0].vm == 0.5
assert case.buses[0].va == 0.3
assert case.buses[0].pd == 1
assert case.buses[0].qd == 2
assert case.buses[1].vm == 15
assert case.buses[1].va == 16
assert case.buses[1].pd == 3.4
assert case.buses[1].qd == 4.2
assert case.generators[0].pg == 2.3
assert case.generators[0].qg == 1.7
assert case.branches[0].p_from == 1.1
assert case.branches[0].q_from == 1.2
assert case.branches[0].p_to == 1.3
assert case.branches[0].q_to == 1.4
def test__make_bus_list(case, ppc):
ret = ext_pypower._make_bus_list(case)
assert (ret == ppc['bus']).all()
def test__fill_bus_array(case, ppc):
for bc, bp in zip(case.buses, ppc['bus']):
ret = np.zeros(13, dtype=np.float64)
ext_pypower._fill_bus_array(ret, bc, case.bus_ids[bc])
assert (ret == bp).all()
def test__make_gen_list(case, ppc):
ret = ext_pypower._make_gen_list(case.generators, case.bus_ids)
assert (ret == ppc['gen']).all()
def test__fill_gen_array(case, ppc):
for gc, gp in zip(case.generators, ppc['gen']):
ret = np.zeros(21, dtype=np.float64)
ext_pypower._fill_gen_array(ret, gc, case.bus_ids)
assert (ret == gp).all()
def test__make_branch_list(case, ppc):
ret = ext_pypower._make_branch_list(case.branches, case.bus_ids)
assert (ret == ppc['branch']).all()
def test__fil_branch_array(case, ppc):
for bc, bp in zip(case.branches, ppc['branch']):
ret = np.zeros(17, dtype=np.float64)
ext_pypower._fill_branch_array(ret, bc, case.bus_ids)
assert (ret == bp).all()
|
StarcoderdataPython
|
3231714
|
import enum
import math
import numpy as np
from pylot.control.utils import get_angle
class BehaviorPlannerState(enum.Enum):
""" States in which the FSM behavior planner can be in."""
READY = 1
KEEP_LANE = 2
PREPARE_LANE_CHANGE_LEFT = 3
LANGE_CHANGE_LEFT = 4
PREPARE_LANE_CHANGE_RIGHT = 5
LANE_CHANGE_RIGHT = 6
def get_xy_vector_dist(loc1, loc2):
vec = np.array([loc1.x, loc1.y] - np.array([loc2.x, loc2.y]))
dist = math.sqrt(vec[0]**2 + vec[1]**2)
if abs(dist) < 0.00001:
return vec, dist
else:
return vec / dist, dist
def get_waypoint_vector_and_angle(wp_transform, ego_transform):
wp_vector, wp_mag = get_xy_vector_dist(
wp_transform.location,
ego_transform.location)
if wp_mag > 0:
wp_angle = get_angle(
wp_vector,
[ego_transform.orientation.x, ego_transform.orientation.y])
else:
wp_angle = 0
return wp_vector, wp_angle
def get_distance(loc1, loc2):
""" Computes the Euclidian distance between two 2D points."""
x_diff = loc1.x - loc2.x
y_diff = loc1.y - loc2.y
return math.sqrt(x_diff**2 + y_diff**2)
|
StarcoderdataPython
|
130042
|
<filename>monitoring/monitorlib/locality.py
from enum import Enum
class Locality(str, Enum):
"""Operating locations and their respective regulation and technical variations."""
CHE = 'CHE'
"""Switzerland"""
@property
def is_uspace_applicable(self) -> bool:
return self in {Locality.CHE}
@property
def allow_same_priority_intersections(self) -> bool:
return self in set()
|
StarcoderdataPython
|
1761864
|
<reponame>athaun/Python-ai-assistant
import re
import time
import requests
import json
from jarvis.skills.skill import AssistantSkill
class LightSkills (AssistantSkill):
@classmethod
def toggle_light(cls, voice_transcript, skill, **kwargs):
"""
Toggles ceiling light on or off.
"""
try:
r = requests.post("http://192.168.1.20:5000/",
data=b"lightSwitch",
)
cls.response("TOGGLED LIGHT")
except Exception as e:
cls.console(error_log=e)
cls.response("Unable to toggle light.")
|
StarcoderdataPython
|
130188
|
<gh_stars>1-10
__author__ = 'Ivan'
import objectness_python
import tracker_python
from Dataset import VOT2015Dataset
import numpy as np
import matplotlib.pyplot as plt
import cv2
from matplotlib import gridspec
import re
import os
import time
import math
import copy
class ObjectnessVizualizer(object):
"""Class to perform objectness visualization"""
def __init__(self, dataset, superpixels = 200, inner=0.9):
"""Constructor for ObjectnessVizualizer"""
self.dataset = dataset
self.superpixels = superpixels
self.inner = inner
@staticmethod
def combinePlotsWithMean(full_image, H, img, mean, filename = None, axis_str = None):
gs = gridspec.GridSpec(1, 3, width_ratios=[4, 2, 2])
ax0 = plt.subplot(gs[0])
ax0.imshow(full_image)
ax0.axis('off')
zvals = np.array(H)
zvals2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#zvals = np.transpose(zvals)
zvals = np.flipud(zvals)
zvals2 = np.flipud(zvals2)
cmap1 = plt.cm.jet
cmap2 = plt.cm.gray
cmap2._init() # create the _lut array, with rgba valuesHH
alphas = np.linspace(0, 0.6, cmap2.N+3)
cmap2._lut[:,-1] = alphas
ax1 = plt.subplot(gs[1])
ax1.imshow(zvals, interpolation='nearest', cmap=cmap1, origin='lower')
ax1.imshow(zvals2, interpolation='nearest', cmap=cmap2, origin='lower')
ax1.axis('off')
if axis_str is not None:
ax0.set_title(axis_str)
ax1.set_title("Straddling")
ax2=plt.subplot(gs[2])
ax2.matshow(mean)
ax2.axis('off')
ax2.set_title("Mean")
if filename is None:
#plt.show()
plt.draw()
time.sleep(1)
else:
plt.savefig(filename,bbox_inches='tight', dpi = 100)
plt.close()
@staticmethod
def combinePlots(full_image, H, img,filename = None, axis_str = None):
gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1])
ax0 = plt.subplot(gs[0])
ax0.imshow(full_image)
ax0.axis('off')
zvals = np.array(H)
#min_z = np.min(zvals.flatten(1))
#max_z = np.max(zvals.flatten(1))
#zvals = (zvals - min_z)/(max_z - min_z)
zvals2 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#zvals = np.transpose(zvals)
zvals = np.flipud(zvals)
zvals2 = np.flipud(zvals2)
cmap1 = plt.cm.jet
cmap2 = plt.cm.gray
cmap2._init() # create the _lut array, with rgba valuesHH
alphas = np.linspace(0, 0.6, cmap2.N+3)
cmap2._lut[:,-1] = alphas
ax1 = plt.subplot(gs[1])
ax1.imshow(zvals, interpolation='nearest', cmap=cmap1, origin='lower')
ax1.imshow(zvals2, interpolation='nearest', cmap=cmap2, origin='lower')
ax1.axis('off')
if axis_str is not None:
ax0.set_title(axis_str)
if filename is None:
#plt.show()
plt.draw()
time.sleep(1)
else:
plt.savefig(filename,bbox_inches='tight', dpi = 100)
plt.close()
@staticmethod
def correctDims(box, width, height, R):
min_x = max(box[0]-R, 0)
min_y = max(box[1]-R, 0)
max_x = min(box[0]+R +box[2], width -1)
max_y = min(box[1]+R+box[3], height -1)
return (min_x, min_y, max_x, max_y)
@staticmethod
def drawRectangle(image, box, R):
n = image.shape[0]
m = image.shape[1]
c_x = n/2
c_y = m/2
pt1 = (max(c_y - R, box[2]/2), max(c_x - R, box[3]/2))
pt2 = (min(c_y + R, m - box[2]/2), min(c_x + R, n - box[3]/2))
cv2.rectangle(image, pt1, pt2, (0,255,100), 2)
return image
def evaluateImageAverageStraddling(self, video_number, frame_number = 0, saveFolder = None):
video = self.dataset.video_folders[video_number]
boxes = self.dataset.readGroundTruthAll(video)
print video
print len(boxes)
images = self.dataset.getListOfImages(video)
R = 60
scale_R = 60
min_size_half = 10
min_scales=-15
max_scales =8
downsample=1.03
shrink_one_size = 0
s=re.split('/',video)
video_name = s[len(s)-1]
fig = plt.figure(figsize=(8, 6))
plt.ion()
plt.show()
i = frame_number
obj = objectness_python.Objectness()
box=boxes[i]
im_name = images[i]
img = cv2.imread(im_name,1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height = img.shape[0]
width = img.shape[1]
(min_x, min_y, max_x, max_y) = self.correctDims(box, width, height, R)
small_image = img[min_y:max_y, min_x :max_x]
obj.readImage(im_name)
obj.smallImage(R, box[0], box[1], box[2], box[3])
a = obj.process(self.superpixels, self.inner, 0, R, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
#obj.plot()
c_x = box[0] - min_x + int(box[2]/2.0)
c_y = box[1] - min_y + int(box[3]/2.0)
counter = 1
# reshuffle the list a bit
a = a[1:min_scales] + [a[0]] + a[min_scales:len(a)]
sums =np.zeros((len(a[0]),len(a[0][0])))
counts = np.zeros((len(a[0]),len(a[0][0])))
normalized=list()
delay = 5
for H,i in zip(a, range(0,len(a))):
prevExists = (i-delay>=0)
if (prevExists):
objs_delay = np.array(a[i - delay])
objs = np.array(H)
mat = np.zeros((len(a[0]),len(a[0][0])))
print np.max(H)
for x in range(0,objs.shape[0]):
for y in range(0, objs.shape[1]):
# get the new data
if objs[x,y]!=0:
counts[x,y]= counts[x,y]+1
sums[x,y] = sums[x,y] +objs[x,y]
# keep the moving average moving
if prevExists:
sums[x,y] = sums[x,y] - objs_delay[x,y]
if (objs_delay[x,y]!=0):
counts[x,y] = counts[x,y] -1
if counts[x,y]!= 0:
mat[x,y] = sums[x,y] / float(counts[x,y])
normalized.append(mat)
for H,h in zip(normalized,a):
h=np.array(h)
image_full = copy.deepcopy(img)
small_image_copy = image_full[min_y:max_y, min_x :max_x]
if ( counter == 1):
half_width = box[2]/2.0
half_height = box[3]/2.0
width = box[2]
height = box[3]
else:
half_width = ((box[2]/2)*math.pow(downsample, min_scales + counter - 1))
half_height = ((box[3]/2)*math.pow(downsample, min_scales + counter - 1))
width = int(half_width*2)
height = int(half_height*2)
pt1=(int(c_x - half_width), int(c_y - half_height))
pt2=(int(c_x + half_width), int(c_y + half_height))
cv2.rectangle(image_full, (pt1[0]+min_x, pt1[1]+min_y),(pt2[0]+min_x, pt2[1]+min_y), (100,0,150), 2)
cv2.rectangle(image_full, (min_x, min_y), (max_x, max_y), (0,255,200),2)
small_image_copy = self.drawRectangle(small_image_copy, (0,0,width, height) , R)
print "processing image: ", " " , counter ,"/", len(a)
if saveFolder is not None:
directory = saveFolder + "/" + video_name+"/"
if not os.path.exists(directory):
os.makedirs(directory)
saveImage = directory+ str(1000 + counter) + ".png"
if(os.path.isfile(saveImage)):
counter = counter + 1
continue
else:
saveImage = None
axis_str = str(round(width/float(box[2])*100,2)) +"%"
self.combinePlotsWithMean(image_full, H, small_image_copy,h,filename = saveImage, axis_str=axis_str)
counter = counter + 1
plt.close()
def evaluateImage(self, video_number, frame_number = 0, saveFolder = None):
video = self.dataset.video_folders[video_number]
boxes = self.dataset.readGroundTruthAll(video)
print video
print len(boxes)
images = self.dataset.getListOfImages(video)
R = 60
scale_R = 60
min_size_half = 10
min_scales=-15
max_scales =8
downsample=1.03
shrink_one_size = 0
s=re.split('/',video)
video_name = s[len(s)-1]
fig = plt.figure(figsize=(8, 6))
plt.ion()
plt.show()
i = frame_number
obj = objectness_python.Objectness()
box=boxes[i]
im_name = images[i]
img = cv2.imread(im_name,1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height = img.shape[0]
width = img.shape[1]
(min_x, min_y, max_x, max_y) = self.correctDims(box, width, height, R)
small_image = img[min_y:max_y, min_x :max_x]
obj.readImage(im_name)
obj.smallImage(R, box[0], box[1], box[2], box[3])
a = obj.process(self.superpixels, self.inner, R, 0, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
#obj.plot()
c_x = box[0] - min_x + int(box[2]/2.0)
c_y = box[1] - min_y + int(box[3]/2.0)
counter = 1
mean =np.zeros((len(a[0]),len(a[0][0])))
counts = np.zeros((len(a[0]),len(a[0][0])))
for H in a:
objs = np.array(H)
for x in range(0,objs.shape[0]):
for y in range(0, objs.shape[1]):
if objs[x,y]!=0:
counts[x,y]= counts[x,y]+1
mean[x,y] = mean[x,y] +objs[x,y]
for x in range(0,objs.shape[0]):
for y in range(0, objs.shape[1]):
if counts[x,y]!=0:
mean[x,y] = mean[x,y]/float(counts[x,y])
for H in a:
image_full = copy.deepcopy(img)
small_image_copy = image_full[min_y:max_y, min_x :max_x]
if ( counter == 1):
half_width = box[2]/2.0
half_height = box[3]/2.0
width = box[2]
height = box[3]
else:
half_width = ((box[2]/2)*math.pow(downsample, min_scales + counter - 1))
half_height = ((box[3]/2)*math.pow(downsample, min_scales + counter - 1))
width = int(half_width*2)
height = int(half_height*2)
pt1=(int(c_x - half_width), int(c_y - half_height))
pt2=(int(c_x + half_width), int(c_y + half_height))
cv2.rectangle(image_full, (pt1[0]+min_x, pt1[1]+min_y),(pt2[0]+min_x, pt2[1]+min_y), (100,0,150), 2)
cv2.rectangle(image_full, (min_x, min_y), (max_x, max_y), (0,255,200),2)
small_image_copy = self.drawRectangle(small_image_copy, (0,0,width, height) , R)
print "processing image: ", " " , counter ,"/", len(a)
if saveFolder is not None:
directory = saveFolder + "/" + video_name+"/"
if not os.path.exists(directory):
os.makedirs(directory)
saveImage = directory+ str(1000 + counter) + ".png"
if(os.path.isfile(saveImage)):
counter = counter + 1
continue
else:
saveImage = None
axis_str = str(round(width/float(box[2])*100,2)) +"%"
self.combinePlotsWithMean(image_full, H, small_image_copy, mean,filename = saveImage, axis_str=axis_str)
counter = counter + 1
plt.close()
def evaluateDiscriminativeFunction(self, video_number, together=False, saveFolder=None):
video = self.dataset.video_folders[video_number]
boxes = self.dataset.readGroundTruthAll(video)
print video
print len(boxes)
images = self.dataset.getListOfImages(video)
bbox = boxes[0]
R = 60
scale_R = 60
min_size_half = 10
min_scales=0
max_scales =0
downsample=1.05
shrink_one_size = 0
s=re.split('/',video)
video_name = s[len(s)-1]
tracker = tracker_python.Antrack()
tracker.initializeTracker()
print images[0], bbox
tracker.initialize(images[0], bbox[0], bbox[1], bbox[2], bbox[3])
fig = plt.figure(figsize=(8, 6))
plt.ion()
plt.show()
for i in range(1, len(images)):
print "processing image: ", " " , i ,"/", len(images)
if saveFolder is not None:
directory = saveFolder + "/" + video_name+"/"
if not os.path.exists(directory):
os.makedirs(directory)
saveImage = directory+ str(1000 + i) + ".png"
if(os.path.isfile(saveImage)):
continue
else:
saveImage = None
box=boxes[i]
im_name = images[i]
img = cv2.imread(im_name,1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height = img.shape[0]
width = img.shape[1]
(min_x, min_y, max_x, max_y) = self.correctDims(box, width, height, R)
small_image = img[min_y:max_y, min_x :max_x]
im_name = images[i]
out = tracker.track(im_name)
if i == 100:
if together:
obj = objectness_python.Objectness()
obj.readImage(im_name)
obj.smallImage(R, box[0], box[1], box[2], box[3])
a_s = obj.processEdge(self.superpixels,self.inner, 0,
R, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
a_e = obj.processEdge(self.superpixels,self.inner, 0,
R, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
H_s=np.array(a_s[0])
H_e=np.array(a_e[0])
a = tracker.calculateDiscriminativeFunction(im_name)
H=np.array(a)
H=H[min_x:max_x, min_y :max_y]
H = np.transpose(H)
if together:
min_z = np.min(H.flatten(1))
max_z = np.max(H.flatten(1))
H = (H - min_z)/(max_z - min_z)
H = H + 0.3* H_s + 0.3 * H_e
print H.shape
self.combinePlots(img, H, small_image, saveImage)
def evaluateVideoEdge(self, video_number, saveFolder=None):
video = self.dataset.video_folders[video_number]
boxes = self.dataset.readGroundTruthAll(video)
print video
print len(boxes)
images = self.dataset.getListOfImages(video)
R = 60
scale_R = 60
min_size_half = 10
min_scales=0
max_scales =0
downsample=1.05
shrink_one_size = 0
s=re.split('/',video)
video_name = s[len(s)-1]
fig = plt.figure(figsize=(8, 6))
plt.ion()
plt.show()
for i in range(0, len(images)):
print "processing image: ", " " , i ,"/", len(images)
if saveFolder is not None:
directory = saveFolder + "/" + video_name+"/"
if not os.path.exists(directory):
os.makedirs(directory)
saveImage = directory+ str(1000 + i) + ".png"
if(os.path.isfile(saveImage)):
continue
else:
saveImage = None
obj = objectness_python.Objectness()
box=boxes[i]
im_name = images[i]
img = cv2.imread(im_name,1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height = img.shape[0]
width = img.shape[1]
(min_x, min_y, max_x, max_y) = self.correctDims(box, width, height, R)
small_image = img[min_y:max_y, min_x :max_x]
obj.readImage(im_name)
pt1=(box[0] - min_x, box[1] - min_y)
pt2=(box[0] - min_x + box[2], box[1] -min_y + box[3])
cv2.rectangle(small_image, pt1,pt2, (100,0,150), 2)
cv2.rectangle(img, (min_x, min_y), (max_x, max_y), (0,255,200),2)
small_image = self.drawRectangle(small_image, box , R)
obj.smallImage(R, box[0], box[1], box[2], box[3])
a = obj.processEdge(self.superpixels,self.inner, 0,
R, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
#obj.plot()
H = a[0]
print len(H), len(H[0])
self.combinePlots(img, H, small_image, saveImage)
def evaluateVideo(self, video_number, saveFolder=None):
video = self.dataset.video_folders[video_number]
boxes = self.dataset.readGroundTruthAll(video)
print video
print len(boxes)
images = self.dataset.getListOfImages(video)
R = 60
scale_R = 60
min_size_half = 10
min_scales=0
max_scales =0
downsample=1.05
shrink_one_size = 0
s=re.split('/',video)
video_name = s[len(s)-1]
fig = plt.figure(figsize=(8, 6))
plt.ion()
plt.show()
for i in range(0, len(images)):
print "processing image: ", " " , i ,"/", len(images)
if saveFolder is not None:
directory = saveFolder + "/" + video_name+"/"
if not os.path.exists(directory):
os.makedirs(directory)
saveImage = directory+ str(1000 + i) + ".png"
if(os.path.isfile(saveImage)):
continue
else:
saveImage = None
obj = objectness_python.Objectness()
box=boxes[i]
im_name = images[i]
img = cv2.imread(im_name,1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height = img.shape[0]
width = img.shape[1]
(min_x, min_y, max_x, max_y) = self.correctDims(box, width, height, R)
small_image = img[min_y:max_y, min_x :max_x]
obj.readImage(im_name)
pt1=(box[0] - min_x, box[1] - min_y)
pt2=(box[0] - min_x + box[2], box[1] -min_y + box[3])
cv2.rectangle(small_image, pt1,pt2, (100,0,150), 2)
cv2.rectangle(img, (min_x, min_y), (max_x, max_y), (0,255,200),2)
#small_image = self.drawRectangle(small_image, box , R)
obj.smallImage(R, box[0], box[1], box[2], box[3])
a = obj.process(self.superpixels,self.inner, 0,
R, scale_R, min_size_half, min_scales, max_scales,
downsample, shrink_one_size,
box[0], box[1], box[2], box[3])
#obj.plot()
H = a[0]
self.combinePlots(img, H, small_image, saveImage)
def straddlingInTime(save = False):
root_folder = '/Users/Ivan/Code/Tracking/Antrack/matlab/vot-toolkit/vot2015/sequences'
vot = VOT2015Dataset(root_folder)
superpixels = 200
obj = ObjectnessVizualizer(vot)
#videos = [3, 30, 25]
videos = [3]
if save:
saveOutputFolder = '/Users/Ivan/Files/Results/Tracking/VOT2015_straddling_in_time'
else:
saveOutputFolder = None
for v in videos:
obj.evaluateVideo(v, saveOutputFolder)
def edgeDensityInTime(save = False):
root_folder = '/Users/Ivan/Code/Tracking/Antrack/matlab/vot-toolkit/vot2015/sequences'
vot = VOT2015Dataset(root_folder)
obj = ObjectnessVizualizer(vot)
#videos = [3, 30, 25]
videos = [3]
if save:
saveOutputFolder = '/Users/Ivan/Files/Results/Tracking/VOT2015_edgeDensity_in_time'
else:
saveOutputFolder = None
for v in videos:
obj.evaluateVideoEdge(v, saveOutputFolder)
def discriminativeFunctionInTime(together = True, save = False):
root_folder = '/Users/Ivan/Code/Tracking/Antrack/matlab/vot-toolkit/vot2015/sequences'
vot = VOT2015Dataset(root_folder)
obj = ObjectnessVizualizer(vot)
#videos = [3, 30, 25]
videos = [3]
if save:
saveOutputFolder = '/Users/Ivan/Files/Results/Tracking/VOT2015_discriminative_in_time'
else:
saveOutputFolder = None
for v in videos:
obj.evaluateDiscriminativeFunction(v,together=together, saveFolder=saveOutputFolder)
def straddlingInSpace( save = False):
root_folder = '/Users/Ivan/Code/Tracking/Antrack/matlab/vot-toolkit/vot2015/sequences'
vot = VOT2015Dataset(root_folder)
superpixels = 200
obj = ObjectnessVizualizer(vot, superpixels)
videos = [3, 30, 25]
#videos = [30]
if save:
saveOutputFolder = '/Users/Ivan/Files/Results/Tracking/VOT2015_straddling_in_space'
else:
saveOutputFolder = None
for v in videos:
obj.evaluateImage(v, saveFolder=saveOutputFolder)
def straddelingAverageInSpace(save = False):
root_folder = '/Users/Ivan/Code/Tracking/Antrack/matlab/vot-toolkit/vot2015/sequences'
vot = VOT2015Dataset(root_folder)
superpixels = 200
obj = ObjectnessVizualizer(vot, superpixels)
videos = [3, 30, 25]
videos = [30]
if save:
saveOutputFolder = '/Users/Ivan/Files/Results/Tracking/VOT2015_straddling_in_space_average'
else:
saveOutputFolder = None
for v in videos:
obj.evaluateImageAverageStraddling(v, saveFolder=saveOutputFolder)
if __name__ == "__main__":
discriminativeFunctionInTime(together=True, save=True)
#straddlingInTime(True)
#edgeDensityInTime(False)
|
StarcoderdataPython
|
121544
|
<gh_stars>0
total = caros = cont = 0
barato = ''
print('==' * 20)
print(' <NAME> ')
print('==' * 20)
while True:
nome = str(input('Nome do Produto: ')).strip().title()
preco = float(input('Preço: R$ '))
op = ' '
while op not in 'SN':
op = str(input('Quer Continuar ? [S/N] ')).strip().upper()[0]
total += preco
if preco > 1000:
caros += 1
if cont == 0:
barato = nome
p_barato = preco
cont += 1
elif preco < p_barato:
barato = nome
p_barato = preco
if op == 'N':
break
print('--' * 20)
print(f'O total gasto em compras foi R${total:.2f}')
print(f'Ao todo {caros} produtos custam mais de R$ 1000')
print(f'O produto mais barato foi {barato} com um preço de R${p_barato:.2f}.')
|
StarcoderdataPython
|
3220643
|
import boto3
def get_s3_object_last_modified(bucket_name, prefix):
"""
Get last modified S3 object in specified bucket_name with prefix
:param str bucket_name: Name of bucket to chewck for last modified object
:param str prefix: Prefix of object key
:return Object: AWS S3 Object
"""
# Based on https://stackoverflow.com/a/62864288
s3 = boto3.client("s3")
paginator = s3.get_paginator("list_objects_v2")
page_iterator = paginator.paginate(Bucket=bucket_name, Prefix=prefix)
last_modified = None
for page in page_iterator:
if "Contents" in page:
last_modified2 = max(page["Contents"], key=lambda x: x["LastModified"])
if last_modified is None or last_modified2["LastModified"] > last_modified["LastModified"]:
last_modified = last_modified2
return last_modified
|
StarcoderdataPython
|
82994
|
import json
import os
import threading
import time
import socket
import getpass
from datetime import datetime
from wandb import util
import wandb
METADATA_FNAME = 'wandb-metadata.json'
class Meta(object):
"""Used to store metadata during and after a run."""
HEARTBEAT_INTERVAL_SECONDS = 15
def __init__(self, api, out_dir='.'):
self.fname = os.path.join(out_dir, METADATA_FNAME)
self._api = api
self._shutdown = False
try:
self.data = json.load(open(self.fname))
except (IOError, ValueError):
self.data = {}
self.lock = threading.Lock()
self.setup()
self._thread = threading.Thread(target=self._thread_body)
self._thread.daemon = True
def start(self):
self._thread.start()
def setup(self):
self.data["root"] = os.getcwd()
if self._api.git.enabled:
self.data["git"] = {
"remote": self._api.git.remote_url,
"commit": self._api.git.last_commit
}
self.data["email"] = self._api.git.email
self.data["root"] = self._api.git.root or self.data["root"]
self.data["startedAt"] = datetime.utcfromtimestamp(
wandb.START_TIME).isoformat()
self.data["host"] = socket.gethostname()
self.data["username"] = os.getenv("WANDB_USERNAME", getpass.getuser())
try:
import __main__
self.data["program"] = __main__.__file__
except (ImportError, AttributeError):
self.data["program"] = '<python with no main file>'
self.data["state"] = "running"
self.write()
def write(self):
self.lock.acquire()
try:
self.data["heartbeatAt"] = datetime.utcnow().isoformat()
with open(self.fname, 'w') as f:
s = util.json_dumps_safer(self.data, indent=4)
f.write(s)
f.write('\n')
finally:
self.lock.release()
def shutdown(self):
self._shutdown = True
try:
self._thread.join()
# Incase we never start it
except RuntimeError:
pass
def _thread_body(self):
seconds = 0
while True:
if seconds > self.HEARTBEAT_INTERVAL_SECONDS or self._shutdown:
self.write()
seconds = 0
if self._shutdown:
break
else:
time.sleep(2)
seconds += 2
|
StarcoderdataPython
|
3276205
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
from .. import crm
AVAILABLE_STATES = [
('draft','Draft'),
('open','Open'),
('cancel', 'Cancelled'),
('done', 'Closed'),
('pending','Pending')
]
MONTHS = [
('01', 'January'),
('02', 'February'),
('03', 'March'),
('04', 'April'),
('05', 'May'),
('06', 'June'),
('07', 'July'),
('08', 'August'),
('09', 'September'),
('10', 'October'),
('11', 'November'),
('12', 'December')
]
class crm_lead_report(osv.osv):
""" CRM Lead Analysis """
_name = "crm.lead.report"
_auto = False
_description = "CRM Lead Analysis"
_rec_name = 'deadline_day'
_columns = {
# grouping fields based on Deadline Date
'deadline_year': fields.char('Ex. Closing Year', size=10, readonly=True, help="Expected closing year"),
'deadline_month':fields.selection(MONTHS, 'Exp. Closing Month', readonly=True, help="Expected closing month"),
'deadline_day': fields.char('Exp. Closing Day', size=10, readonly=True, help="Expected closing day"),
# grouping fields based on Create Date
'creation_year': fields.char('Creation Year', size=10, readonly=True, help="Creation year"),
'creation_month': fields.selection(MONTHS, 'Creation Month', readonly=True, help="Creation month"),
'creation_day': fields.char('Creation Day', size=10, readonly=True, help="Creation day"),
# other date fields
'create_date': fields.datetime('Create Date', readonly=True),
'opening_date': fields.date('Opening Date', readonly=True),
'date_closed': fields.date('Close Date', readonly=True),
# durations
'delay_open': fields.float('Delay to Open',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to open the case"),
'delay_close': fields.float('Delay to Close',digits=(16,2),readonly=True, group_operator="avg",help="Number of Days to close the case"),
'delay_expected': fields.float('Overpassed Deadline',digits=(16,2),readonly=True, group_operator="avg"),
'user_id':fields.many2one('res.users', 'User', readonly=True),
'country_id':fields.many2one('res.country', 'Country', readonly=True),
'section_id':fields.many2one('crm.case.section', 'Sales Team', readonly=True),
'channel_id':fields.many2one('crm.case.channel', 'Channel', readonly=True),
'type_id':fields.many2one('crm.case.resource.type', 'Campaign', readonly=True),
'state': fields.selection(AVAILABLE_STATES, 'Status', size=16, readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'probability': fields.float('Probability',digits=(16,2),readonly=True, group_operator="avg"),
'planned_revenue': fields.float('Planned Revenue',digits=(16,2),readonly=True),
'probable_revenue': fields.float('Probable Revenue', digits=(16,2),readonly=True),
'stage_id': fields.many2one ('crm.case.stage', 'Stage', readonly=True, domain="[('section_ids', '=', section_id)]"),
'partner_id': fields.many2one('res.partner', 'Partner' , readonly=True),
'nbr': fields.integer('# of Cases', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'priority': fields.selection(crm.AVAILABLE_PRIORITIES, 'Priority'),
'type':fields.selection([
('lead','Lead'),
('opportunity','Opportunity'),
],'Type', help="Type is used to separate Leads and Opportunities"),
}
def init(self, cr):
"""
CRM Lead Report
@param cr: the current row, from the database cursor
"""
tools.drop_view_if_exists(cr, 'crm_lead_report')
cr.execute("""
CREATE OR REPLACE VIEW crm_lead_report AS (
SELECT
id,
to_char(c.date_deadline, 'YYYY') as deadline_year,
to_char(c.date_deadline, 'MM') as deadline_month,
to_char(c.date_deadline, 'YYYY-MM-DD') as deadline_day,
to_char(c.create_date, 'YYYY') as creation_year,
to_char(c.create_date, 'MM') as creation_month,
to_char(c.create_date, 'YYYY-MM-DD') as creation_day,
to_char(c.date_open, 'YYYY-MM-DD') as opening_date,
to_char(c.date_closed, 'YYYY-mm-dd') as date_closed,
c.state,
c.user_id,
c.probability,
c.stage_id,
c.type,
c.company_id,
c.priority,
c.section_id,
c.channel_id,
c.type_id,
c.partner_id,
c.country_id,
c.planned_revenue,
c.planned_revenue*(c.probability/100) as probable_revenue,
1 as nbr,
date_trunc('day',c.create_date) as create_date,
extract('epoch' from (c.date_closed-c.create_date))/(3600*24) as delay_close,
abs(extract('epoch' from (c.date_deadline - c.date_closed))/(3600*24)) as delay_expected,
extract('epoch' from (c.date_open-c.create_date))/(3600*24) as delay_open
FROM
crm_lead c
WHERE c.active = 'true'
)""")
crm_lead_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
StarcoderdataPython
|
132028
|
import pathlib
from setuptools import find_packages, setup
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
name="chainlibpy",
version="1.0.0",
description="Tools for Crypto.com wallet management and offline transaction signing",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/crypto-com/chainlibpy",
author="Linfeng.Yuan",
author_email="<EMAIL>",
classifiers=[
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
keywords="CRO, blockchain, signature, crypto.com",
packages=find_packages(),
python_requires=">=3.6, <4",
install_requires=[
"ecdsa>=0.14.0, <0.17.0",
"bech32~=1.1.0",
"mnemonic>=0.19, <0.20",
"hdwallets~=0.1.0",
],
extras_require={
"test": ["pytest", "pytest-cov", "pytest-randomly"],
},
project_urls={
"Bug Reports": "https://github.com/crypto-com/chainlibpy/issues",
"Funding": "https://donate.pypi.org",
"Say Thanks!": "https://github.com/hukkinj1/cosmospy",
"Source": "https://github.com/crypto-com/chainlibpy",
},
)
|
StarcoderdataPython
|
3246886
|
"""
Copyright (C) Cortic Technology Corp. - All Rights Reserved
Written by <NAME> <<EMAIL>>, 2021
"""
from abc import abstractmethod
class BaseVisionProcessing:
def __init__(self, processor_type):
self.processor_type = processor_type
@abstractmethod
def config_worker(self, params):
pass
@abstractmethod
def preprocess_input(self, input_data):
pass
@abstractmethod
def run_inference(self, params):
pass
@abstractmethod
def postprocess_result(self, inference_outputs):
pass
|
StarcoderdataPython
|
21466
|
<filename>src/service/uri_generator.py
"""Generates pre-signed uri's for blob handling."""
from boto3 import client
import os
s3_client = client('s3')
def create_uri(repo_name, resource_oid, upload=False, expires_in=300):
"""Create a download uri for the given oid and repo."""
action = 'get_object'
if upload:
action = 'put_object'
params = {'Bucket': os.environ['LFS_S3_BUCKET_NAME'],
'Key': repo_name + '/' + resource_oid}
return s3_client.generate_presigned_url(action, Params=params,
ExpiresIn=expires_in)
def file_exists(repo_name, resource_oid):
"""Check if the file exists within the bucket."""
key = repo_name + '/' + resource_oid
response = s3_client.list_objects_v2(
Bucket=os.environ['LFS_S3_BUCKET_NAME'], Prefix=key)
for obj in response.get('Contents', []):
if obj['Key'] == key:
return True
return False
|
StarcoderdataPython
|
3376526
|
<reponame>uint0/pylicy
from typing import Any, Dict, List
import pytest
from hypothesis import given
from hypothesis import strategies as st
from pylicy import models, rules
def test_load_rules_bad_version() -> None:
with pytest.raises(AttributeError):
rules.load({}, [])
with pytest.raises(NotImplementedError):
rules.load({"version": 9999}, [])
@given(
st.lists(
st.fixed_dictionaries(
{
"name": st.text(),
"weight": st.integers(),
"resources": st.one_of(st.text(), st.lists(st.text())),
"policies": st.one_of(st.text(), st.lists(st.text())),
"description": st.one_of(st.none(), st.text()),
"context": st.one_of(st.none(), st.dictionaries(st.text(), st.text())),
}
)
),
st.lists(st.text()),
)
def test_load_v1_rules_correct_hypo(rule_set: List[Dict[str, Any]], policies: List[str]) -> None:
loaded = rules.load({"version": 1, "rules": rule_set}, policies)
assert all(isinstance(rule, models.Rule) for rule in loaded)
def test_load_v1_rules_correct() -> None:
assert rules.load({"version": 1, "rules": []}, []) == []
assert rules.load(
{
"version": 1,
"rules": [
{
"name": "enforce_all",
"weight": 1,
"resources": "*",
"policies": "*",
},
{
"name": "allow_admin_wildcards",
"description": "Allow admins to have wildcards",
"resources": "admin_*",
"policies": ["!token_no_wildcard"],
},
{
"name": "frank_extend_time",
"description": "Frank can have longer times betwen token rotation",
"resources": "frank_*",
"policies": ["token_age"],
"context": {"max_rotation_time": 365},
},
],
},
["token_age", "token_no_wildcard"],
) == [
models.Rule(
name="enforce_all",
description="enforce_all",
weight=1,
resource_patterns=["*"],
policy_patterns=["*"],
context=None,
),
models.Rule(
name="allow_admin_wildcards",
description="Allow admins to have wildcards",
weight=100,
resource_patterns=["admin_*"],
policy_patterns=["!token_no_wildcard"],
context=None,
),
models.Rule(
name="frank_extend_time",
description="Frank can have longer times betwen token rotation",
weight=100,
resource_patterns=["frank_*"],
policy_patterns=["token_age"],
context={"max_rotation_time": 365},
),
]
def test_resolve_user_rules() -> None:
assert rules.resolve_user_rule(
models.UserRule(
name="enforce_all",
weight=1,
resources="*",
policies="*",
),
["token_age", "token_no_wildcard"],
) == models.Rule(
name="enforce_all",
description="enforce_all",
weight=1,
resource_patterns=["*"],
policy_patterns=["*"],
context=None,
)
assert rules.resolve_user_rule(
models.UserRule(
name="allow_admin_wildcards",
description="Allow admins to have wildcards",
resources="admin_*",
policies=["!token_no_wildcard"],
),
["token_age", "token_no_wildcard"],
) == models.Rule(
name="allow_admin_wildcards",
description="Allow admins to have wildcards",
weight=100,
resource_patterns=["admin_*"],
policy_patterns=["!token_no_wildcard"],
context=None,
)
assert rules.resolve_user_rule(
models.UserRule(
name="frank_extend_time",
description="Frank can have longer times betwen token rotation",
resources="frank_*",
policies=["token_age"],
context={"max_rotation_time": 365},
),
["token_age", "token_no_wildcard"],
) == models.Rule(
name="frank_extend_time",
description="Frank can have longer times betwen token rotation",
weight=100,
resource_patterns=["frank_*"],
policy_patterns=["token_age"],
context={"max_rotation_time": 365},
)
def test_load_v1_rules_no_rules() -> None:
with pytest.raises(AttributeError):
rules.load({"version": 1}, [])
with pytest.raises(TypeError):
rules.load({"version": 1, "rules": {}}, [])
def test_load_v1_rules_invalid_rule() -> None:
with pytest.raises(TypeError):
rules.load({"version": 1, "rules": ["hello"]}, [])
with pytest.raises(ValueError):
rules.load({"version": 1, "rules": [{}]}, [])
with pytest.raises(ValueError):
rules.load(
{
"version": 1,
"rules": [
{
"name": "test",
"resources": [],
"policies": [],
"weight": "not a int",
}
],
},
[],
)
|
StarcoderdataPython
|
1753475
|
<filename>app.py
# This file is derived from this source: https://github.com/bhavaniravi/rasa-site-bot
# The original file is licensed under "the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or any later version."
# Hence this file is also licensed under GNU GPL version 3
# All other files within this repo (https://github.com/Glane13/OpenDataFinder) except appy.py and bind.js are governed by an MIT licence
from flask import Flask
from flask import render_template,jsonify,request
import requests
import random
import sys
import sys
app = Flask(__name__)
app.secret_key = '12345'
@app.route('/')
def hello_world():
return render_template('home.html')
@app.route('/chat',methods=["POST"])
def chat():
user_message = request.form["text"]
response = requests.post('http://localhost:5005/webhooks/rest/webhook', json={"sender": "Graham","message":user_message})
response = response.json()[0]['text']
if not response:
response = "error: no response"
return jsonify({"status":"success","response": response})
app.config["DEBUG"] = True
if __name__ == "__main__":
app.run(port=8080)
|
StarcoderdataPython
|
1768740
|
# Used to create html file from function open_movies_page
import fresh_tomatoes
# Used to get access to class Movies
import media
# This section is accessing the module media.py
toy_story = media.Movie(
"Toy Story",
"A story of a boy and his toys that come to life",
"https://upload.wikimedia.org/wikipedia/en/1/13/Toy_Story.jpg",
"https://www.youtube.com/watch?v=KYz2wyBy3kc") # NOQA
avatar = media.Movie(
"Avatar",
"A Marine on an Alien Planet",
"https://upload.wikimedia.org/wikipedia/en/b/b0/Avatar-Teaser-Poster.jpg",
"https://www.youtube.com/watch?v=5PSNL1qE6VY") # NOQA
lion_king = media.Movie(
"The Lion King",
"About a lion who lost everything and thought he should give up..."
"But when he was needed the most he remebered who he was",
"https://upload.wikimedia.org/wikipedia/en/3/3d/The_Lion_King_poster.jpg",
"https://www.youtube.com/watch?v=4sj1MT05lAA") # NOQA
pulp_fiction = media.Movie(
"Pulp Fiction",
"Don't mess with Marcell",
"https://upload.wikimedia.org/wikipedia/en/3/3b/"
"Pulp_Fiction_%281994%29_poster.jpg",
"https://www.youtube.com/watch?v=s7EdQ4FqbhY") # NOQA
forgetting_sarah_marshall = media.Movie(
"Forgetting <NAME>",
"A man on vacation after a terrible breakup"
"runs into his old lover while finding new love",
"https://upload.wikimedia.org/wikipedia/en/7/7c/"
"Forgetting_sarah_marshall_ver2.jpg",
"https://www.youtube.com/watch?v=PyVEHIO6jZ0") # NOQA
three_hundred = media.Movie(
"300",
"This is Sparta!",
"https://vignette4.wikia.nocookie.net/theflophouse/images/7/"
"7e/300-movie-poster.jpg/revision/latest?cb=20111111170631",
"https://www.youtube.com/watch?v=UrIbxk7idYA") # NOQA
# This creates an array for all the movies and their instances
movies = [toy_story, avatar, lion_king,
pulp_fiction, forgetting_sarah_marshall, three_hundred]
# This will run the application
fresh_tomatoes.open_movies_page(movies)
|
StarcoderdataPython
|
163677
|
import os
from newsapi import NewsApiClient
import datetime
# Init
api_key = os.environ.get('api_key')
newsapi = NewsApiClient(api_key=api_key)
#sources
sources = 'abc-news, al-jazeera-english,ars-technica,bbc-news,bbc-sport,bleacher-report,bloomberg,business-insider,buzzfeed,cnn,crypto-coins-news, entertainment-weekly,espn,football-italia,fox-news,fox-sports,hacker-news,medical-news-today,msnbc,mtv-news,mtv-news-uk,national-geographic,national-review,news24,new-scientist,new-york-magazine,nfl-news,techcrunch,techradar,the-verge,the-wall-street-journal,the-washington-post,wired',
# Today's date
date_today = datetime.date.today()
# Date five days ago
timedelta = datetime.timedelta(days=5)
start_date = (date_today - timedelta)
# get content
def get_all_content(content_about):
content = newsapi.get_everything(q=content_about,
language="en" ,
from_param=start_date,
to=date_today,
sources=str(sources),
sort_by="relevancy",
page_size=50
)
content = content.get('articles')
return content
# General headlines
def get_headlines():
content = newsapi.get_top_headlines(
sources=str(sources),
language="en" ,
page_size=50,
)
content = content.get('articles')
return content
|
StarcoderdataPython
|
1694099
|
from .constants import BASE_URL
from .api.stores import BestBuyStoresAPI
from .api.bulk import BestBuyBulkAPI
from .api.products import BestBuyProductsAPI
from .api.categories import BestBuyCategoryAPI
__version__ = "2.0.0"
class BestBuyAPI:
def __init__(self, api_key):
"""API's base class
:params:
:api_key (str): best buy developer API key.
"""
self.api_key = api_key.strip()
self.bulk = BestBuyBulkAPI(self.api_key)
self.products = BestBuyProductsAPI(self.api_key)
self.category = BestBuyCategoryAPI(self.api_key)
self.stores = BestBuyStoresAPI(self.api_key)
|
StarcoderdataPython
|
71694
|
<gh_stars>1-10
#
"""
Unit tests for conv encoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import texar.tf as tx
from texar.tf.modules.encoders.conv_encoders import Conv1DEncoder
class Conv1DEncoderTest(tf.test.TestCase):
"""Tests :class:`~texar.tf.modules.Conv1DEncoder` class.
"""
def test_encode(self):
"""Tests encode.
"""
encoder_1 = Conv1DEncoder()
self.assertEqual(len(encoder_1.layers), 4)
self.assertTrue(isinstance(encoder_1.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in encoder_1.layers[0].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_1 = tf.ones([64, 16, 300], tf.float32)
outputs_1 = encoder_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
"other_conv_kwargs": {"padding": "same"},
# Pooling layers
"pooling": "AveragePooling",
"pool_size": 2,
"pool_strides": 1,
# Dense layers
"num_dense_layers": 3,
"dense_size": [128, 128, 10],
"dense_activation": "relu",
"other_dense_kwargs": {"use_bias": False},
# Dropout
"dropout_conv": [0, 1, 2],
"dropout_dense": 2
}
encoder_2 = Conv1DEncoder(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(encoder_2.layers), 1 + 1 + 1 + 3 + 4 + 1)
self.assertTrue(isinstance(encoder_2.layer_by_name("conv_pool_1"),
tx.core.MergeLayer))
for layer in encoder_2.layers[1].layers:
self.assertTrue(isinstance(layer, tx.core.SequentialLayer))
inputs_2 = tf.ones([64, 16, 300], tf.float32)
outputs_2 = encoder_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
def test_unknown_seq_length(self):
"""Tests use of pooling layer when the seq_length dimension of inputs
is `None`.
"""
encoder_1 = Conv1DEncoder()
inputs_1 = tf.placeholder(tf.float32, [64, None, 300])
outputs_1 = encoder_1(inputs_1)
self.assertEqual(outputs_1.shape, [64, 128])
hparams = {
# Conv layers
"num_conv_layers": 2,
"filters": 128,
"kernel_size": [[3, 4, 5], 4],
# Pooling layers
"pooling": "AveragePooling",
"pool_size": [2, None],
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
encoder = Conv1DEncoder(hparams)
# nlayers = nconv-pool + nconv + npool + ndense + ndropout + flatten
self.assertEqual(len(encoder.layers), 1 + 1 + 1 + 1 + 1 + 1)
self.assertTrue(isinstance(encoder.layer_by_name('pool_2'),
tx.core.AverageReducePooling1D))
inputs = tf.placeholder(tf.float32, [64, None, 300])
outputs = encoder(inputs)
self.assertEqual(outputs.shape, [64, 10])
hparams_2 = {
# Conv layers
"num_conv_layers": 1,
"filters": 128,
"kernel_size": 4,
"other_conv_kwargs": {'data_format': 'channels_first'},
# Pooling layers
"pooling": "MaxPooling",
"other_pool_kwargs": {'data_format': 'channels_first'},
# Dense layers
"num_dense_layers": 1,
"dense_size": 10,
}
encoder_2 = Conv1DEncoder(hparams_2)
inputs_2 = tf.placeholder(tf.float32, [64, 300, None])
outputs_2 = encoder_2(inputs_2)
self.assertEqual(outputs_2.shape, [64, 10])
if __name__ == "__main__":
tf.test.main()
|
StarcoderdataPython
|
3360742
|
<reponame>mail2nsrajesh/neutron-vpnaas
# Copyright (c) 2015 Canonical, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils
from neutron_lib import constants
from neutron_vpnaas._i18n import _
from neutron_vpnaas.services.vpn.device_drivers import ipsec
LOG = logging.getLogger(__name__)
TEMPLATE_PATH = os.path.dirname(os.path.abspath(__file__))
strongswan_opts = [
cfg.StrOpt(
'ipsec_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/ipsec.conf.template'),
help=_('Template file for ipsec configuration.')),
cfg.StrOpt(
'strongswan_config_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/strongswan.conf.template'),
help=_('Template file for strongswan configuration.')),
cfg.StrOpt(
'ipsec_secret_template',
default=os.path.join(
TEMPLATE_PATH,
'template/strongswan/ipsec.secret.template'),
help=_('Template file for ipsec secret configuration.')),
cfg.StrOpt(
'default_config_area',
default=os.path.join(
TEMPLATE_PATH,
'/etc/strongswan.d'),
help=_('The area where default StrongSwan configuration '
'files are located.'))
]
cfg.CONF.register_opts(strongswan_opts, 'strongswan')
NS_WRAPPER = 'neutron-vpn-netns-wrapper'
class StrongSwanProcess(ipsec.BaseSwanProcess):
# ROUTED means route created. (only for auto=route mode)
# CONNECTING means route created, connection tunnel is negotiating.
# INSTALLED means route created,
# also connection tunnel installed. (traffic can pass)
DIALECT_MAP = dict(ipsec.BaseSwanProcess.DIALECT_MAP)
STATUS_DICT = {
'ROUTED': constants.DOWN,
'CONNECTING': constants.DOWN,
'INSTALLED': constants.ACTIVE
}
STATUS_RE = '([a-f0-9\-]+).* (ROUTED|CONNECTING|INSTALLED)'
STATUS_NOT_RUNNING_RE = 'Command:.*ipsec.*status.*Exit code: [1|3] '
def __init__(self, conf, process_id, vpnservice, namespace):
self.DIALECT_MAP['v1'] = 'ikev1'
self.DIALECT_MAP['v2'] = 'ikev2'
self.DIALECT_MAP['sha256'] = 'sha256'
self._strongswan_piddir = self._get_strongswan_piddir()
LOG.debug("strongswan piddir is '%s'" % (self._strongswan_piddir))
super(StrongSwanProcess, self).__init__(conf, process_id,
vpnservice, namespace)
def _get_strongswan_piddir(self):
return utils.execute(
cmd=[self.binary, "--piddir"], run_as_root=True).strip()
def _check_status_line(self, line):
"""Parse a line and search for status information.
If a given line contains status information for a connection,
extract the status and mark the connection as ACTIVE or DOWN
according to the STATUS_MAP.
"""
m = self.STATUS_PATTERN.search(line)
if m:
connection_id = m.group(1)
status = self.STATUS_MAP[m.group(2)]
return connection_id, status
return None, None
def _execute(self, cmd, check_exit_code=True, extra_ok_codes=None):
"""Execute command on namespace.
This execute is wrapped by namespace wrapper.
The namespace wrapper will bind /etc/ and /var/run
"""
ip_wrapper = ip_lib.IPWrapper(namespace=self.namespace)
return ip_wrapper.netns.execute(
[NS_WRAPPER,
'--mount_paths=/etc:%s/etc,%s:%s/var/run' % (
self.config_dir, self._strongswan_piddir, self.config_dir),
'--cmd=%s' % ','.join(cmd)],
check_exit_code=check_exit_code,
extra_ok_codes=extra_ok_codes)
def copy_and_overwrite(self, from_path, to_path):
# NOTE(toabctl): the agent may run as non-root user, so rm/copy as root
if os.path.exists(to_path):
utils.execute(
cmd=["rm", "-rf", to_path], run_as_root=True)
utils.execute(
cmd=["cp", "-a", from_path, to_path], run_as_root=True)
def ensure_configs(self):
"""Generate config files which are needed for StrongSwan.
If there is no directory, this function will create
dirs.
"""
self.ensure_config_dir(self.vpnservice)
self.ensure_config_file(
'ipsec.conf',
cfg.CONF.strongswan.ipsec_config_template,
self.vpnservice)
self.ensure_config_file(
'strongswan.conf',
cfg.CONF.strongswan.strongswan_config_template,
self.vpnservice)
self.ensure_config_file(
'ipsec.secrets',
cfg.CONF.strongswan.ipsec_secret_template,
self.vpnservice,
0o600)
self.copy_and_overwrite(cfg.CONF.strongswan.default_config_area,
self._get_config_filename('strongswan.d'))
def get_status(self):
return self._execute([self.binary, 'status'],
extra_ok_codes=[1, 3])
def restart(self):
"""Restart the process."""
self.reload()
def reload(self):
"""Reload the process.
Sends a USR1 signal to ipsec starter which in turn reloads the whole
configuration on the running IKE daemon charon based on the actual
ipsec.conf. Currently established connections are not affected by
configuration changes.
"""
self._execute([self.binary, 'reload'])
def start(self):
"""Start the process for only auto=route mode now.
Note: if there is no namespace yet,
just do nothing, and wait next event.
"""
if not self.namespace:
return
self._execute([self.binary, 'start'])
# initiate ipsec connection
for ipsec_site_conn in self.vpnservice['ipsec_site_connections']:
self._execute([self.binary, 'stroke', 'up-nb',
ipsec_site_conn['id']])
def stop(self):
self._execute([self.binary, 'stop'])
self.connection_status = {}
class StrongSwanDriver(ipsec.IPsecDriver):
def create_process(self, process_id, vpnservice, namespace):
return StrongSwanProcess(
self.conf,
process_id,
vpnservice,
namespace)
|
StarcoderdataPython
|
190293
|
<reponame>vishalbelsare/pysonDB
import json
import os
from typing import Any
from typing import Dict
from uuid import uuid4
from .errors import DataError
def verify_data(data: Dict[str, Any], db: Dict[str, Dict[str, Any]]) -> bool:
if db:
if sorted(list(db.values())[0]) == sorted(list(data)):
return True
else:
raise DataError(
"The data provided does not comply with the schema of the intially provided data"
)
return True
def get_id(db: Dict[str, Dict[str, Any]]) -> str:
"""Generates a new uuid and then checks whether it exists in the DB"""
def get_id() -> str:
_id = str(uuid4().int)[:18]
if _id in db:
return get_id()
else:
return _id
return get_id()
def create_db(filename: str, create_file: bool = True) -> None:
def create(filename: str, data: str) -> None:
with open(filename, "w") as db_file:
db_file.write(data)
if filename.endswith(".json"):
if create_file and not os.path.exists(filename):
create(filename, json.dumps({})) # just simply write empty data
|
StarcoderdataPython
|
160962
|
<reponame>kipsang01/art-gallery
from django.shortcuts import render
from django.http import HttpResponse, Http404
from django.core.exceptions import ObjectDoesNotExist
from .models import Category, Image,Location
# Create your views here.
def home(request):
images = Image.objects.all()
categories = Category.objects.all()
locations = Location.objects.all()
context={
'categories':categories,
'images' : images,
'locations':locations,
}
return render(request, 'home.html', context)
def image_view(request,image_id):
categories = Category.objects.all()
try:
image = Image.get_image_by_id(image_id)
except Image.DoesNotExist:
raise Http404()
context={
'categories':categories,
'image' : image,
}
return render(request, 'image.html', context)
def search_results(request):
categories = Category.objects.all()
if 'search' in request.GET and request.GET["search"]:
search_term = request.GET.get("search")
searched_images = Image.search_image(search_term)
message = f"{search_term}"
context={
'categories':categories,
'images': searched_images,
'message':message
}
return render(request, 'search.html',context)
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":message})
def get_category(request,category):
categories = Category.objects.all()
images = Image.image_cat(category)
context={
'categories':categories,
'images' : images,
}
return render(request, 'category.html', context)
def get_by_locations(request,location):
categories = Category.objects.all()
images = Image.images_by_location(location)
context={
'categories':categories,
'images' : images,
}
return render(request, 'category.html', context)
|
StarcoderdataPython
|
1796009
|
<reponame>WalkingMachine/sara_behaviors
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###########################################################
# WARNING: Generated code! #
# ************************** #
# Manual changes may get lost if file is generated again. #
# Only code inside the [MANUAL] tags will be kept. #
###########################################################
from flexbe_core import Behavior, Autonomy, OperatableStateMachine, ConcurrencyContainer, PriorityContainer, Logger
from sara_flexbe_states.SetKey import SetKey
from flexbe_states.log_key_state import LogKeyState
from sara_flexbe_states.sara_set_head_angle import SaraSetHeadAngle
from sara_flexbe_states.list_entities_by_name import list_entities_by_name
from flexbe_states.flexible_calculation_state import FlexibleCalculationState
from flexbe_states.wait_state import WaitState
from sara_flexbe_states.sara_say import SaraSay
from sara_flexbe_states.for_loop import ForLoop
from sara_flexbe_behaviors.action_turn_sm import action_turnSM
from sara_flexbe_states.SetRosParam import SetRosParam
# Additional imports can be added inside the following tags
# [MANUAL_IMPORT]
# [/MANUAL_IMPORT]
'''
Created on Sat Jun 1 2018
@author: <NAME>
'''
class Action_countSM(Behavior):
'''
Count instances of entity class around sara (will only rotate, won't move).
'''
def __init__(self):
super(Action_countSM, self).__init__()
self.name = 'Action_count'
# parameters of this behavior
# references to used behaviors
self.add_behavior(action_turnSM, 'action_turn')
# Additional initialization code can be added inside the following tags
# [MANUAL_INIT]
# [/MANUAL_INIT]
# Behavior comments:
def create(self):
# x:475 y:412, x:73 y:374
_state_machine = OperatableStateMachine(outcomes=['done', 'failed'], input_keys=['className'], output_keys=['Count'])
_state_machine.userdata.className = "bottle"
_state_machine.userdata.Count = 0
# Additional creation code can be added inside the following tags
# [MANUAL_CREATE]
# [/MANUAL_CREATE]
# x:756 y:397
_sm_move_head_0 = OperatableStateMachine(outcomes=['finished'], input_keys=['className', 'Count'], output_keys=['Count'])
with _sm_move_head_0:
# x:19 y:95
OperatableStateMachine.add('set left',
SaraSetHeadAngle(pitch=-0.6, yaw=1.2),
transitions={'done': 'wait1'},
autonomy={'done': Autonomy.Off})
# x:5 y:229
OperatableStateMachine.add('count',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add', 'none_found': 'add'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:10 y:326
OperatableStateMachine.add('add',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:241 y:88
OperatableStateMachine.add('set center',
SaraSetHeadAngle(pitch=-0.6, yaw=0),
transitions={'done': 'wait 2'},
autonomy={'done': Autonomy.Off})
# x:266 y:154
OperatableStateMachine.add('wait 2',
WaitState(wait_time=10),
transitions={'done': 'count2'},
autonomy={'done': Autonomy.Off})
# x:245 y:224
OperatableStateMachine.add('count2',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add2', 'none_found': 'add2'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:252 y:321
OperatableStateMachine.add('add2',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'geb text 2'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:24 y:162
OperatableStateMachine.add('wait1',
WaitState(wait_time=12),
transitions={'done': 'count'},
autonomy={'done': Autonomy.Off})
# x:445 y:90
OperatableStateMachine.add('set right',
SaraSetHeadAngle(pitch=-0.6, yaw=-1.2),
transitions={'done': 'wait 3'},
autonomy={'done': Autonomy.Off})
# x:464 y:164
OperatableStateMachine.add('wait 3',
WaitState(wait_time=10),
transitions={'done': 'count3'},
autonomy={'done': Autonomy.Off})
# x:443 y:237
OperatableStateMachine.add('count3',
list_entities_by_name(frontality_level=0, distance_max=2),
transitions={'found': 'add3', 'none_found': 'add3'},
autonomy={'found': Autonomy.Off, 'none_found': Autonomy.Off},
remapping={'name': 'className', 'entity_list': 'entity_list', 'number': 'number'})
# x:457 y:334
OperatableStateMachine.add('add3',
FlexibleCalculationState(calculation=lambda x: x[0]+x[1], input_keys=["Count", "number"]),
transitions={'done': 'gen text3'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'number': 'number', 'output_value': 'Count'})
# x:30 y:412
OperatableStateMachine.add('gen text',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'say_1'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:253 y:392
OperatableStateMachine.add('geb text 2',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'sara_2'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:461 y:405
OperatableStateMachine.add('gen text3',
FlexibleCalculationState(calculation=lambda x: "I see "+ str(x[0])+ " "+ str(x[1]), input_keys=["number", "classname"]),
transitions={'done': 'Say_3'},
autonomy={'done': Autonomy.Off},
remapping={'number': 'number', 'classname': 'className', 'output_value': 'text'})
# x:53 y:492
OperatableStateMachine.add('say_1',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set center'},
autonomy={'done': Autonomy.Off})
# x:264 y:471
OperatableStateMachine.add('sara_2',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'set right'},
autonomy={'done': Autonomy.Off})
# x:486 y:485
OperatableStateMachine.add('Say_3',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=0, block=True),
transitions={'done': 'finished'},
autonomy={'done': Autonomy.Off})
with _state_machine:
# x:55 y:34
OperatableStateMachine.add('init count',
SetKey(Value=0),
transitions={'done': 'set angle'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'Count'})
# x:444 y:326
OperatableStateMachine.add('Log Count',
LogKeyState(text="Found: {} objects", severity=Logger.REPORT_HINT),
transitions={'done': 'done'},
autonomy={'done': Autonomy.Off},
remapping={'data': 'Count'})
# x:40 y:183
OperatableStateMachine.add('Move head',
_sm_move_head_0,
transitions={'finished': 'for 1'},
autonomy={'finished': Autonomy.Inherit},
remapping={'className': 'className', 'Count': 'Count'})
# x:419 y:254
OperatableStateMachine.add('Look Center Found',
SaraSetHeadAngle(pitch=-0.4, yaw=0),
transitions={'done': 'Log Count'},
autonomy={'done': Autonomy.Off})
# x:234 y:227
OperatableStateMachine.add('for 1',
ForLoop(repeat=0),
transitions={'do': 'action_turn', 'end': 'Log Count'},
autonomy={'do': Autonomy.Off, 'end': Autonomy.Off},
remapping={'index': 'index'})
# x:38 y:275
OperatableStateMachine.add('action_turn',
self.use_behavior(action_turnSM, 'action_turn'),
transitions={'finished': 'Move head', 'failed': 'failed'},
autonomy={'finished': Autonomy.Inherit, 'failed': Autonomy.Inherit},
remapping={'rotation': 'rotation'})
# x:56 y:102
OperatableStateMachine.add('set angle',
SetKey(Value=3.14159),
transitions={'done': 'Move head'},
autonomy={'done': Autonomy.Off},
remapping={'Key': 'rotation'})
# x:417 y:37
OperatableStateMachine.add('store count',
SetRosParam(ParamName="behavior/Count/CountedObjets"),
transitions={'done': 'concat'},
autonomy={'done': Autonomy.Off},
remapping={'Value': 'Count'})
# x:400 y:114
OperatableStateMachine.add('concat',
FlexibleCalculationState(calculation=lambda x: "I counted "+str(x[0])+" "+str(x[1])+".", input_keys=["Count", "className"]),
transitions={'done': 'say_count'},
autonomy={'done': Autonomy.Off},
remapping={'Count': 'Count', 'className': 'className', 'output_value': 'Text'})
# x:419 y:186
OperatableStateMachine.add('say_count',
SaraSay(sentence=lambda x: x, input_keys=[], emotion=1, block=True),
transitions={'done': 'Look Center Found'},
autonomy={'done': Autonomy.Off})
return _state_machine
# Private functions can be added inside the following tags
# [MANUAL_FUNC]
# [/MANUAL_FUNC]
|
StarcoderdataPython
|
1647675
|
import pytest
from ocdeployer.images import ImageImporter, import_images
@pytest.fixture
def mock_oc(mocker):
_mock_oc = mocker.patch("ocdeployer.images.oc")
mocker.patch("ocdeployer.images.get_json", return_value={})
yield _mock_oc
def _check_oc_calls(mocker, mock_oc):
assert mock_oc.call_count == 2
calls = [
mocker.call(
"import-image",
"image1:tag",
"--from=docker.url/image1:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
),
mocker.call(
"import-image",
"image2:tag",
"--from=docker.url/image2:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
),
]
mock_oc.assert_has_calls(calls)
def test_images_short_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"image1:tag": "docker.url/image1:sometag"},
{"image2:tag": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_long_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag"},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_old_style_syntax(mocker, mock_oc):
config_content = {
"images": {
"image1:tag": "docker.url/image1:sometag",
"image2:tag": "docker.url/image2:sometag",
}
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_mixed_style_syntax(mocker, mock_oc):
config_content = {
"images": [
{"image1:tag": "docker.url/image1:sometag"},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, [])
_check_oc_calls(mocker, mock_oc)
def test_images_conditional_images(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag", "envs": ["qa", "prod"]},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, ["prod"])
_check_oc_calls(mocker, mock_oc)
def test_images_conditional_ignore_image(mocker, mock_oc):
config_content = {
"images": [
{"istag": "image1:tag", "from": "docker.url/image1:sometag", "envs": ["qa", "prod"]},
{"istag": "image2:tag", "from": "docker.url/image2:sometag"},
]
}
ImageImporter.imported_istags = []
import_images(config_content, ["foo"])
assert mock_oc.call_count == 1
calls = [
mocker.call(
"import-image",
"image2:tag",
"--from=docker.url/image2:sometag",
"--confirm",
"--scheduled=True",
_reraise=True,
)
]
mock_oc.assert_has_calls(calls)
|
StarcoderdataPython
|
3338868
|
<filename>hatespeech_core/modules/pattern_classifier/PatternVectorizer.py
import regex
import pandas as pd
import numpy as np
class PatternVectorizer:
def __init__(self, patterns, binary=False):
self.binary = binary
vocabulary = pd.DataFrame()
vocabulary['patterns'] = patterns
vocabulary['regex'] = vocabulary.patterns.apply(
lambda p: regex.compile(PatternVectorizer.pattern_to_regexp(p))
)
self.vocabulary = vocabulary
def transform(self, documents):
X = np.array([*map(lambda doc: self.count_vocab(doc), documents)], dtype=np.int32)
if self.binary:
X[X>0] = 1
return X
def count_vocab(self, text):
return self.vocabulary.regex.apply(lambda voc: len(voc.findall(text)))
@classmethod
def token_to_regexp(cls, token):
tok_to_reg = {
'.+': "((?![@,#])[\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<hashtag>': "#([\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<usermention>': "@([\\p{L}\\p{M}*\\p{N}_]+|(?![@,#])\\p{Punct}+)",
'<url>': "http://([\\p{L}\\p{M}*\\p{N}_\\.\\/]+|(?![@,#])\\p{Punct}+)"
}
return tok_to_reg.get(token) or token
@classmethod
def pattern_to_regexp(cls, pattern_str):
delimRegex = "((?![@,#])\\b|\\p{Z}+|$|^|(?![@,#])\\p{Punct})"
patt = pattern_str.strip()
tokens = patt.split(" ")
tokens_reg = map(lambda t: cls.token_to_regexp(t),tokens)
pattern = delimRegex + delimRegex.join(tokens_reg) + delimRegex
return pattern
|
StarcoderdataPython
|
3347241
|
from KLS_EDA import new_kls_df
from sklearn.model_selection import train_test_split
# Splitting the data into training data and test data
X = new_kls_df[1:4].to_numpy().reshape(new_kls_df[1:4].size//3, 3)
y = new_kls_df.loc['Karachi Electric'].to_numpy().reshape(-1)
others_blamed_train, others_blamed_test, ke_train, ke_test = train_test_split(X, y, test_size = 0.3, random_state = 1, stratify = y)
|
StarcoderdataPython
|
83310
|
<gh_stars>1-10
import secrets
import string
def main():
''' Generates a password of the length specified by the user. '''
password_length = input("How many characters long should the password be?: ")
if password_length.isdecimal():
password_length = int(password_length)
# Generates password.
if password_length > 0:
character_set = string.ascii_letters + string.digits
password = ""
for i in range(password_length):
password = password + secrets.choice(character_set)
print(password)
else:
print("Invalid length.")
else:
print("Invalid length.")
input()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1756247
|
from django.shortcuts import render,redirect
from .models import Profile,Project
from django.contrib.auth.decorators import login_required
from .forms import ProjectForm,VoteForm,EditProfile
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializer import ProjectSerializer,ProfileSerializer
# Create your views here.
def welcome(request):
projects = Project.objects.all()
# prof = Profile.objects.filter(user=request.user)
return render(request,'welcome.html',{"projects":projects})
@login_required(login_url='/accounts/login/')
def ProjectsUpload(request):
logged_user = request.user
if request.method == 'POST':
form = ProjectForm(request.POST,request.FILES)
if form.is_valid():
ProjectsUpload = form.save(commit=False)
ProjectsUpload.Project = logged_user
ProjectsUpload.save()
return redirect('welcome')
else:
form = ProjectForm()
return render(request,'project.html',{'form':form})
@login_required(login_url='/accounts/login/')
def review(request):
logged_user = request.user
if request.method == 'POST':
form = VoteForm(request.POST,request.FILES)
if form.is_valid():
review = form.save(commit=False)
review.Project = logged_user
review.save()
return redirect('welcome')
else:
form = VoteForm()
return render(request,'review.html',{'form':form})
@login_required(login_url='/accounts/login/')
def edit_profile(request):
logged_user = request.user
if request.method == 'POST':
form = EditProfile(request.POST,request.FILES)
if form.is_valid():
edit = form.save(commit=False)
edit.user = logged_user
edit.save()
return redirect('welcome')
else:
form = EditProfile()
return render(request,'profile.html',{'form':form})
@login_required(login_url='/accounts/login/')
def view_profile(request):
current_user = request.user
projects = Project.objects.filter(project_user = current_user)
try:
prof = Profile.objects.get(user=current_user)
except Exception as e:
return redirect('EditProfile')
return render(request,'view_profile.html',{'profile':prof,'projects':projects})
def search(request):
if 'title' in request.GET and request.GET["title"]:
search_term = request.GET.get("title")
searched_title = Project.search_by_title(search_term)
message = f"{search_term}"
return render(request, 'search.html',{"message":message,"project":searched_title})
else:
message = "You haven't searched for any term"
return render(request, 'search.html',{"message":searched_title })
class project_list(APIView):
def get(self, request, format=None):
project = Project.objects.all()
serializer = ProjectSerializer(project, many=True)
return Response(serializer.data)
class profile_list(APIView):
def get(self, request, format=None):
profile = Profile.objects.all()
serializer = ProfileSerializer(profile, many=True)
return Response(serializer.data)
|
StarcoderdataPython
|
1653425
|
from datetime import datetime
from app.core.config import STRFTIME
from app.db.db import database
from app.db.schemas import weights
from app.models.models import WeightDB
from app.models.models import WeightSchema
from loguru import logger
from sqlalchemy import desc
def _log_query(query: str, query_params: dict = None) -> None:
logger.debug(f"query: {str(query)}, values: {query_params}")
async def post(payload: WeightSchema) -> int:
query = weights.insert().values(
weight=payload.weight,
created_at=datetime.now().strftime(STRFTIME)
if not payload.created_at
else payload.created_at,
)
_log_query(query=str(query), query_params=query.parameters)
return await database.execute(query)
async def get(id: int) -> WeightDB:
query = weights.select().where(id == weights.c.id)
_log_query(query=str(query).replace("\n", ""), query_params=id)
return await database.fetch_one(query=query)
async def get_all(fromdate: datetime = None, todate: datetime = None):
if fromdate or todate:
query = (
weights.select()
.where(weights.c.created_at <= todate)
.where(weights.c.created_at >= fromdate)
)
else:
query = weights.select()
_log_query(query=str(query).replace("\n", ""), query_params="")
return await database.fetch_all(query=query)
async def get_latest():
query = weights.select().order_by(desc(weights.c.created_at)).limit(1)
return await database.fetch_one(query=query)
async def put(id: int, payload: WeightSchema) -> WeightDB:
query = (
weights.update()
.where(id == weights.c.id)
.values(payload)
.returning(
weights.c.id, weights.c.updated_at, weights.c.created_at, weights.c.weight
)
)
_log_query(query=str(query).replace("\n", ""), query_params=payload)
return await database.fetch_all(query=query)
async def delete(id: int) -> WeightDB:
query = weights.delete().where(id == weights.c.id)
_log_query(query=str(query).replace("\n", ""), query_params=id)
return await database.execute(query=query)
|
StarcoderdataPython
|
3395778
|
# Generated by Django 2.1.2 on 2018-10-10 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("pyazo_core", "0007_upload_mime_type"),
]
operations = [
migrations.AddField(
model_name="upload",
name="thumbnail",
field=models.FileField(blank=True, upload_to="thumbnail/"),
),
]
|
StarcoderdataPython
|
198087
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
def move_dossier(apps, schema_editor):
Company = apps.get_model("core", "Company")
for c in Company.objects.all():
c.city_uk = c.city
c.street_uk = c.street
c.appt_uk = c.appt
c.wiki_uk = c.wiki
c.other_founders_uk = c.other_founders
c.other_recipient_uk = c.other_recipient
c.other_owners_uk = c.other_owners
c.other_managers_uk = c.other_managers
c.bank_name_uk = c.bank_name
c.sanctions_uk = c.sanctions
c.save()
class Migration(migrations.Migration):
dependencies = [
('core', '0086_auto_20160419_0250'),
]
operations = [
migrations.RunPython(
move_dossier, reverse_code=migrations.RunPython.noop),
]
|
StarcoderdataPython
|
3310093
|
<gh_stars>0
from db import db
from flask_restful_swagger import swagger
@swagger.model
class CreatorModel(db.Model):
__tablename__ = 'creators'
id = db.Column(db.Integer, primary_key=True)
firstname = db.Column(db.String(80))
lastname = db.Column(db.String(80))
def __init__(self, lastname, firstname=None):
if firstname:
self.firstname = firstname
if lastname:
self.lastname = lastname
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
def json(self):
return { 'id':self.id, 'firstname' : self.firstname, 'lastname':self.lastname}
def save_to_db(self):
db.session.add(self)
db.session.commit()
@classmethod
def find_by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
@classmethod
def find_by_lastname(cls, lastname):
#users = User.query.filter(func.soundex(User.name) == func.soundex('Tina')).all()
return cls.query.filter(db.func.soundex(CreatorModel.lastname) == db.func.soundex(lastname)).first()
@classmethod
def find(cls, lastname=None, firstname=None):
filters = []
if lastname:
filters.append(db.func.soundex(CreatorModel.lastname) == db.func.soundex(lastname))
if firstname:
filters.append(db.func.soundex(CreatorModel.firstname) == db.func.soundex(firstname))
if len(filters) > 0:
return cls.query.filter(*filters).all()
else:
return cls.query.all()
|
StarcoderdataPython
|
3204651
|
<reponame>daniel-keogh/graph-theory
#!/usr/bin/env python3
import unittest
# Enables executing this module directly.
# Ref: Remi - https://stackoverflow.com/a/9806045
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from match.regex import (
match,
InvalidRegexError
)
class MatchTest(unittest.TestCase):
def test_match(self):
self.assertTrue(match("a.b|b*", "bbbbbbb"))
self.assertFalse(match("a.b|b*", "bbbbbx"))
def test_concat(self):
self.assertTrue(match("h.e.l.l.o", "hello"))
def test_optional(self):
self.assertTrue(match("a?.b", "b"))
self.assertTrue(match("a?.b", "ab"))
def test_alternation(self):
self.assertTrue(match("a|b", "b"))
self.assertFalse(match("a|b", "x"))
def test_group(self):
self.assertTrue(match("(a.b)*", "ababab"))
self.assertTrue(match("(a.b)+", "ababab"))
def test_invalid_group(self):
self.assertRaises(InvalidRegexError, match, "a|b)", "a")
def test_invalid_regex(self):
self.assertRaises(InvalidRegexError, match, ".a|b", "a")
def test_empty_kleene(self):
self.assertTrue(match("b*", ""))
def test_empty_plus(self):
self.assertFalse(match("b+", ""))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
167376
|
<reponame>jamesreinhold/vigolend
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from vigolend.users.models import User
from helpers.common.basemodel import BaseModel
from helpers.common.choices import ModelChoices
from locations.models import Country
class KycApplication(BaseModel):
"""
A KYC or Know Your Customer is used to gather information on user in a regular interval.
The KYCs collect information such as where they live, collecting their updated or different ID
information, and their risk level at that point in time.
Know Your Client (KYC) is a requirement that protects both financial institutions and their users. Financial institutions are required to formally verify the identity of all users and understand the purpose of trading, expected volumes and jurisdictions their users will use.
Identity verification is a requirement for companies across a range of industries. Veriff offers compliance, fraud prevention and global scalability.
"""
# region Fields
legal_first_names = models.CharField(
max_length=255,
verbose_name=_('Legal First names'),
blank=True, null=True,
help_text=_("First name of the user submitting KYC application - As shown in documents."))
legal_last_names = models.CharField(
max_length=255,
verbose_name=_('Legal Last names'),
blank=True, null=True,
help_text=_("First name of the user submitting KYC application - As shown in documents."))
birth_date = models.DateField(
verbose_name=_('Date of Birth'),
blank=True, null=True,
help_text=_("""The user's date of birth as per the identification document. The date of birth must match The user's ID"""))
email = models.EmailField(
verbose_name=_('Email Address'),
max_length=150,
blank=True,
help_text=_("The primary e-mail address of the user submitting KYC application"))
address_line_1 = models.CharField(
max_length=255,
verbose_name=_('Address Line 1'),
help_text=_("""The Address Line 1 of the user submitting KYC application. Must be from The user's country of Residence indicated at the time of registration."""))
address_line_2 = models.CharField(
max_length=255,
blank=True, null=True,
verbose_name=_('Address Line 2'),
help_text=_("""The Address Line 2 of the user submitting KYC application. Must be from The user's country of Residence indicated at the time of registration."""))
state = models.CharField(
max_length=255,
verbose_name=_('State/Region'),
help_text=_("""The State/Region/Province of the user submitting KYC application. Must be from The user's country of Residence indicated at the time of registration."""))
zip_code = models.CharField(
max_length=10,
verbose_name=_('Zip Code'),
help_text=_("""The zip code or postal code of the user submitting KYC application. Must be from The user's country of Residence indicated at the time of registration."""))
city = models.CharField(
max_length=255,
verbose_name=_('City'),
help_text=_("""The city of the user submitting KYC application. Must be from the users country of Residence indicated at the time of registration."""))
identification_type = models.CharField(
max_length=21,
choices=ModelChoices.PHOTO_IDENTIFICATION_TYPE,
default='national_id',
verbose_name=_('Photo ID Type'),
help_text=_("""The type of identification document that the user has provided to the bank such as passport or national ID card. Chosen credential must not be expired. Document should be good condition and clearly visible. File is at least 1 MB in size and has at least 300 dpi resolution."""))
address_proof_type = models.CharField(
max_length=21,
choices=ModelChoices.PROOF_OF_ADDRESS_TYPE,
default=ModelChoices.BANK_STATEMENT,
verbose_name=_('Proof of Address type'),
help_text=_("""Document that serves as a Proof of address. Chosen credential must not be expired. Document should be good condition and clearly visible. File is at least 1 MB in size and has at least 300 dpi resolution."""))
proof_of_address_document = models.FileField(
storage="uploads/kyc/",
verbose_name=_('Proof of Address'),
help_text=_("""The document must contain your name, the address and should not be older than 90 days. Chosen credential must not be expired. Document should be good condition and clearly visible. File is at least 1 MB in size and has at least 300 dpi resolution."""))
photo_id = models.FileField(
storage="uploads/kyc/",
verbose_name=_('Photo ID(front)'),
help_text=_("""The front side of The user's Photo Identitification. Chosen credential must not be expired. Document should be good condition and clearly visible. File is at least 1 MB in size and has at least 300 dpi resolution."""))
photo_id_back = models.FileField(
storage="uploads/kyc/",
verbose_name=_('Photo ID(back)'),
blank=True, null=True,
help_text=_("""The back side of The user's Photo Identitification. Chosen credential must not be expired. Document should be good condition and clearly visible. File is at least 1 MB in size and has at least 300 dpi resolution."""))
selfie_with_id = models.FileField(
storage="uploads/kyc/",
verbose_name=_('Selfie with ID'),
help_text=_(
"""Upload a photo with yourself and your Passport or both sides of the ID Card. The face and the document must be clearly visible."""),
blank=True, null=True)
kyc_status = models.CharField(
max_length=28,
choices=ModelChoices.KYC_STATUS,
default='Pending',
verbose_name=_('KYC Status'),
help_text=_("The KYC status of the user. The default is `Unverified`."))
kyc_status_note = models.TextField(
max_length=255,
blank=True, null=True,
editable=False,
verbose_name=_('KYC Status Note'),
help_text=_("State the reason for issuing this status."))
status_update_date = models.DateTimeField(
default=timezone.now,
editable=False,
verbose_name=_('Status Update Time'),
help_text=_('Timestamp at which the resource status was updated.'))
politically_exposed_person = models.CharField(
verbose_name=_('Politically Exposed Person(PEP)'),
choices=ModelChoices.PEP_CHOICES,
max_length=16,
default='not_pep',
help_text=_("""A politically exposed person is one who has been entrusted with a prominent public function. A PEP generally presents a higher risk for potential involvement in bribery and corruption by virtue of their position and the influence that they may hold. `not_pep` implies user is not Politically Exposed Person and `pep` implies user is a Politically Exposed Person. Default is `not_pep` etc."""))
place_of_birth = models.CharField(
verbose_name=_("Place of birth"),
blank=True, null=True, max_length=255,
help_text=_("The place of birth of the user."))
identification_number = models.CharField(
max_length=50,
help_text=_(
"The number of the identification document provided by the person such as the passport number or the national ID card number."),
blank=True, null=True,
verbose_name=_('Photo Identification number'))
identification_issue_date = models.DateField(
blank=True, null=True,
help_text=_("""The date of issue of the identification document provided by the user"""),
verbose_name=_('ID Issue date'))
identification_expiry = models.DateField(
blank=True, null=True,
help_text=_("""The date of expiry of the identification document provided by the user"""),
verbose_name=_('ID Expiry date'))
kyc_submitted_ip_address = models.GenericIPAddressField(
blank=True, null=True,
verbose_name=_('KYC Submitted IP'),
editable=False,
help_text=_("""The IP address of the user recorded at the time of registration."""))
registered_ip_address = models.GenericIPAddressField(
blank=True, null=True,
verbose_name=_('Registered IP'),
editable=False,
help_text=_("""The IP address of the user recorded at the time of registration. Registered IP address is compared with the Submitted IP address to make sure client is within the same region."""))
# reference = models.CharField(
# default=Generators.generate_reference,
# max_length=8,
# verbose_name=_('Reference'),
# help_text=_("""Auto-generated reference for KYC application. This is for internal purposes ONLY. A transaction Reference number helps an identify transactions in records and used to monitor transactions associated with a card payment."""))
us_citizen_tax_resident = models.BooleanField(
default=False,
verbose_name=_("US Citizen or Tax Resident"),
help_text=_("""Indication of whether user is a citizen of the United States or a tax resident. Defaults to `False`."""))
accept_terms = models.BooleanField(
default=False,
verbose_name=_('Accepted Terms'),
help_text=_("""Agreements collected from the user, such as acceptance of terms and conditions, or opt in for marketing. This defaults to False."""))
agreed_to_data_usage = models.BooleanField(
default=False,
verbose_name=_('Agreed to Data Usage'),
help_text=_("""Consent to us using the provided data, including consent for us to verify the identity of relevant individuals with our service providers and database owners in accordance with the Identity Verification Terms. This defaults to False."""))
# endregion
# region Navigation Fields
citizenship = models.ForeignKey(
Country,
verbose_name=_('Citizenship'),
on_delete=models.CASCADE,
related_name='+',
help_text=_("""The citizenship of the user submitting KYC application. A proof of such citizenship is required by form of National ID or Passport."""))
second_citizenship = models.ForeignKey(
Country,
verbose_name=_('Second Citizenship'),
on_delete=models.CASCADE,
help_text=_("The user's second Nationality (if he/she has dual Nationality)."),
blank=True, null=True,
related_name='+')
country_residence = models.ForeignKey(
Country,
blank=True,
verbose_name=_('Country of Residence'),
on_delete=models.CASCADE,
help_text=_("""The country in which the person primarily resides. A proof of residence is required and requested upon change of residence."""))
kyc_country = models.ForeignKey(
Country,
on_delete=models.PROTECT,
blank=True, null=True,
verbose_name=_('KYC Country'),
help_text=_("""Country for which KYC has been performed against user. Each country may have different set of fields for KYC. This flag drives the system to show or hide the necessary fields."""),
related_name='kyc_country')
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
verbose_name=_('KYC User'),
help_text=_('Unique identifier of the user that owns the activity.'))
reviewer = models.ForeignKey(
User,
blank=True, null=True,
verbose_name=_('Reviewer'),
on_delete=models.SET_NULL,
related_name='kyc_reviewer',
help_text=_("The KYC staff or representative who checked and reviewed KYC application."))
kyc_review_date = models.DateTimeField(
blank=True, null=True,
editable=False,
verbose_name=_('KYC Checked Date'),
help_text=_("""Date on which KYC check was performed."""))
reviewer_ip_address = models.GenericIPAddressField(
blank=True, null=True,
verbose_name=_('Staff Submitted IP'),
editable=False,
help_text=_("Recorded IP address of the staff reviewing KYC application."))
kyc_refused_code = models.CharField(
verbose_name=_("KYC Refused Code"),
max_length=34,
choices=ModelChoices.KYC_REFUSE_REASON_CODE,
blank=True, null=True,
help_text=_("The type of reason for refusal")
)
# endregion
# region Metadata
class Meta:
verbose_name = _('KYC Application')
verbose_name_plural = _('KYC Applications')
db_table = 'kyc_applications'
permissions = [
("verify_kyc", _("Verify KYC Application")),
("reject_kyc", _("Reject KYC Application")),
("merge_kyc", _("Merge KYC data with user Information")),
]
# endregion
# region Methods
def __str__(self):
return _("KYC #: ") + self.reference
@property
def age(self):
return int((datetime.now().date() - self.birth_date).days / 365.25)
def get_user(self):
return str(self.user.pk)
get_object_user = property(get_user)
# def clean_fields(self, exclude=None):
# super().clean_fields(exclude=exclude)
# if self.identification_issue_date == self.identification_expiry:
# raise ValidationError(
# {
# 'identification_issue_date': _(
# "ID issue date and Expiry date cannot be the same."
# ),
# }
# )
# if self.identification_issue_date > date.today():
# raise ValidationError({
# 'identification_issue_date': _(
# "We cannot time-travel into the future at the moment."
# ),
# }
# )
# if self.identification_expiry == date.today() or self.identification_expiry < date.today():
# raise ValidationError({
# 'identification_expiry': _(
# "We cannot travel back in time. ID has expired."
# ),
# }
# )
# endregion
|
StarcoderdataPython
|
1688548
|
import json
from typing import Any, ClassVar, Dict, Iterable, List, Tuple
import attr
from ...parameters import Parameter
from .converter import to_json_schema_recursive
@attr.s(slots=True, eq=False)
class OpenAPIParameter(Parameter):
"""A single Open API operation parameter."""
example_field: ClassVar[str]
examples_field: ClassVar[str]
nullable_field: ClassVar[str]
supported_jsonschema_keywords: ClassVar[Tuple[str, ...]]
@property
def example(self) -> Any:
"""The primary example defined for this parameter."""
if self._example:
return self._example
if self._schema_example:
# It is processed only if there are no `example` / `examples` in the root, overridden otherwise
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#fixed-fields-10
# We mimic this behavior for Open API 2.0
return self._schema_example
@property
def location(self) -> str:
"""Where this parameter is located.
E.g. "query".
"""
return {"formData": "body"}.get(self.raw_location, self.raw_location)
@property
def raw_location(self) -> str:
"""Open API specific location name."""
return self.definition["in"]
@property
def name(self) -> str:
"""Parameter name."""
return self.definition["name"]
@property
def is_required(self) -> bool:
return self.definition.get("required", False)
@property
def is_header(self) -> bool:
raise NotImplementedError
@property
def _example(self) -> Any:
"""A not-named example, defined in the parameter root.
{
"in": "query",
"name": "key",
"type": "string"
"example": "foo", # This one
}
"""
return self.definition.get(self.example_field)
@property
def _schema_example(self) -> Any:
"""Example defined on the schema-level.
{
"in": "query", (only "body" is possible for Open API 2.0)
"name": "key",
"schema": {
"type": "string",
"example": "foo", # This one
}
}
"""
return self.definition.get("schema", {}).get("example")
def as_json_schema(self) -> Dict[str, Any]:
"""Convert parameter's definition to JSON Schema."""
schema = self.from_open_api_to_json_schema(self.definition)
return self.transform_keywords(schema)
def transform_keywords(self, schema: Dict[str, Any]) -> Dict[str, Any]:
"""Transform Open API specific keywords into JSON Schema compatible form."""
definition = to_json_schema_recursive(schema, self.nullable_field)
# Headers are strings, but it is not always explicitly defined in the schema. By preparing them properly, we
# can achieve significant performance improvements for such cases.
# For reference (my machine) - running a single test with 100 examples with the resulting strategy:
# - without: 4.37 s
# - with: 294 ms
#
# It also reduces the number of cases when the "filter_too_much" health check fails during testing.
if self.is_header:
definition.setdefault("type", "string")
return definition
def from_open_api_to_json_schema(self, open_api_schema: Dict[str, Any]) -> Dict[str, Any]:
"""Convert Open API's `Schema` to JSON Schema."""
return {
key: value
for key, value in open_api_schema.items()
# Allow only supported keywords or vendor extensions
if key in self.supported_jsonschema_keywords or key.startswith("x-") or key == self.nullable_field
}
def serialize(self) -> str:
# For simplicity, JSON Schema semantics is not taken into account (e.g. 1 == 1.0)
# I.e. two semantically equal schemas may have different representation
return json.dumps(self.as_json_schema(), sort_keys=True)
@attr.s(slots=True, eq=False)
class OpenAPI20Parameter(OpenAPIParameter):
"""Open API 2.0 parameter.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameterObject
"""
example_field = "x-example"
examples_field = "x-examples"
nullable_field = "x-nullable"
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#parameterObject
# Excluding informative keywords - `title`, `description`, `default`.
# `required` is not included because it has a different meaning here. It determines whether or not this parameter
# is required, which is not relevant because these parameters are later constructed
# into an "object" schema, and the value of this keyword is used there.
# The following keywords are relevant only for non-body parameters.
supported_jsonschema_keywords: ClassVar[Tuple[str, ...]] = (
"$ref",
"type", # only as a string
"format",
"items",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"enum",
"multipleOf",
)
@property
def is_header(self) -> bool:
return self.location == "header"
@property
def _schema_example(self) -> Any:
# There is no "schema" in non-body parameters
return None
@attr.s(slots=True, eq=False)
class OpenAPI30Parameter(OpenAPIParameter):
"""Open API 3.0 parameter.
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#parameter-object
"""
example_field = "example"
examples_field = "examples"
nullable_field = "nullable"
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#schema-object
# Excluding informative keywords - `title`, `description`, `default`.
# In contrast with Open API 2.0 non-body parameters, in Open API 3.0, all parameters have the `schema` keyword.
supported_jsonschema_keywords = (
"$ref",
"multipleOf",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"maxProperties",
"minProperties",
"required",
"enum",
"type",
"allOf",
"oneOf",
"anyOf",
"not",
"items",
"properties",
"additionalProperties",
"format",
)
@property
def is_header(self) -> bool:
return self.location in ("header", "cookie")
def from_open_api_to_json_schema(self, open_api_schema: Dict[str, Any]) -> Dict[str, Any]:
open_api_schema = get_parameter_schema(open_api_schema)
return super().from_open_api_to_json_schema(open_api_schema)
@attr.s(slots=True, eq=False)
class OpenAPIBody(OpenAPIParameter):
media_type: str = attr.ib()
@property
def location(self) -> str:
return "body"
@property
def name(self) -> str:
# The name doesn't matter but is here for the interface completeness.
return "body"
@attr.s(slots=True, eq=False)
class OpenAPI20Body(OpenAPIBody, OpenAPI20Parameter):
"""Open API 2.0 body variant."""
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#schemaObject
# The `body` parameter contains the `schema` keyword that represents the `Schema Object`.
# It has slightly different keywords than other parameters. Informational keywords are excluded as well.
supported_jsonschema_keywords = (
"$ref",
"format",
"multipleOf",
"multipleOf",
"maximum",
"exclusiveMaximum",
"minimum",
"exclusiveMinimum",
"maxLength",
"minLength",
"pattern",
"maxItems",
"minItems",
"uniqueItems",
"maxProperties",
"minProperties",
"enum",
"type",
"items",
"allOf",
"properties",
"additionalProperties",
)
# NOTE. For Open API 2.0 bodies, we still give `x-example` precedence over the schema-level `example` field to keep
# the precedence rules consistent.
def as_json_schema(self) -> Dict[str, Any]:
"""Convert body definition to JSON Schema."""
# `schema` is required in Open API 2.0 when the `in` keyword is `body`
schema = self.definition["schema"]
return self.transform_keywords(schema)
@property
def _schema_example(self) -> Any:
# In Open API 2.0, there is the `example` keyword,
# so we use the default behavior of the `OpenAPIParameter` class
return super(OpenAPI20Parameter, self)._schema_example
FORM_MEDIA_TYPES = ("multipart/form-data", "application/x-www-form-urlencoded")
@attr.s(slots=True, eq=False)
class OpenAPI30Body(OpenAPIBody, OpenAPI30Parameter):
"""Open API 3.0 body variant.
We consider each media type defined in the schema as a separate variant that can be chosen for data generation.
The value of the `definition` field is essentially the Open API 3.0 `MediaType`.
"""
# The `required` keyword is located above the schema for concrete media-type;
# Therefore, it is passed here explicitly
required: bool = attr.ib(default=False)
def as_json_schema(self) -> Dict[str, Any]:
"""Convert body definition to JSON Schema."""
schema = get_media_type_schema(self.definition)
return self.transform_keywords(schema)
def transform_keywords(self, schema: Dict[str, Any]) -> Dict[str, Any]:
definition = super().transform_keywords(schema)
if self.is_form:
# It significantly reduces the "filtering" part of data generation.
definition.setdefault("type", "object")
return definition
@property
def is_form(self) -> bool:
"""Whether this payload represent a form."""
return self.media_type in FORM_MEDIA_TYPES
@property
def is_required(self) -> bool:
return self.required
@attr.s(slots=True, eq=False)
class OpenAPI20CompositeBody(OpenAPIBody, OpenAPI20Parameter):
"""A special container to abstract over multiple `formData` parameters."""
definition: List[OpenAPIParameter] = attr.ib()
@classmethod
def from_parameters(cls, *parameters: Dict[str, Any], media_type: str) -> "OpenAPI20CompositeBody":
return cls(
definition=[OpenAPI20Parameter(parameter) for parameter in parameters],
media_type=media_type,
)
@property
def is_required(self) -> bool:
# We generate an object for formData - it is always required.
return bool(self.definition)
@property
def _example(self) -> Any:
return {parameter.name: parameter._example for parameter in self.definition if parameter._example}
@property
def _schema_example(self) -> Any:
return {parameter.name: parameter._schema_example for parameter in self.definition if parameter._schema_example}
def as_json_schema(self) -> Dict[str, Any]:
"""The composite body is transformed into an "object" JSON Schema."""
return parameters_to_json_schema(self.definition)
def parameters_to_json_schema(parameters: Iterable[OpenAPIParameter]) -> Dict[str, Any]:
"""Create an "object" JSON schema from a list of Open API parameters.
:param List[OpenAPIParameter] parameters: A list of Open API parameters related to the same location. All of
them are expected to have the same "in" value.
For each input parameter, there will be a property in the output schema.
This:
[
{
"in": "query",
"name": "id",
"type": "string",
"required": True
}
]
Will become:
{
"properties": {
"id": {"type": "string"}
},
"additionalProperties": False,
"type": "object",
"required": ["id"]
}
We need this transformation for locations that imply multiple components with a unique name within
the same location.
For example, "query" - first, we generate an object that contains all defined parameters and then serialize it
to the proper format.
"""
properties = {}
required = []
for parameter in parameters:
name = parameter.name
properties[name] = parameter.as_json_schema()
if parameter.is_required:
required.append(name)
return {"properties": properties, "additionalProperties": False, "type": "object", "required": required}
def get_parameter_schema(data: Dict[str, Any]) -> Dict[str, Any]:
"""Extract `schema` from Open API 3.0 `Parameter`."""
# In Open API 3.0, there could be "schema" or "content" field. They are mutually exclusive.
if "schema" in data:
return data["schema"]
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.3.md#fixed-fields-10
# > The map MUST only contain one entry.
options = iter(data["content"].values())
media_type_object = next(options)
return get_media_type_schema(media_type_object)
def get_media_type_schema(definition: Dict[str, Any]) -> Dict[str, Any]:
"""Extract `schema` from Open API 3.0 `MediaType`."""
# The `schema` keyword is optional, and we treat it as the payload could be any value of the specified media type
# Note, the main reason to have this function is to have an explicit name for the action we're doing.
return definition.get("schema", {})
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.