hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a26823d702ba75dc63a6b31cbaeac1cefb349c3 | 686 | py | Python | django_jwt/middleware.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | 5 | 2022-02-21T10:19:19.000Z | 2022-03-29T19:05:44.000Z | django_jwt/middleware.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | null | null | null | django_jwt/middleware.py | Casassarnau/django-jwt-oidc | 0c047c060ff08736b56f408432fbff9ad5799ad3 | [
"MIT"
] | 1 | 2022-03-27T08:39:47.000Z | 2022-03-27T08:39:47.000Z | from django.contrib.auth.models import AnonymousUser
from django.utils.deprecation import MiddlewareMixin
from django_jwt.auth import JWTAuthentication
from django_jwt.settings_utils import get_setting
class JWTAuthenticationMiddleware(MiddlewareMixin):
def process_request(self, request):
key = request.get_signed_cookie(get_setting('JWT_CLIENT.COOKIE_NAME'), None, salt=get_setting('SECRET_KEY'))
user = None
if key is not None:
try:
user = JWTAuthentication.authenticate_credentials(key)
except JWTAuthentication.JWTException:
pass
request.user = user or AnonymousUser()
| 38.111111 | 117 | 0.706997 |
4a2682eab7b26af7111abc106b4714b0ca4d3f7b | 1,099 | py | Python | backend/views.py | jbwashington/F1n35-Server | e2da179bce74b006f1b495077c4188a61a5f1f64 | [
"Apache-2.0"
] | null | null | null | backend/views.py | jbwashington/F1n35-Server | e2da179bce74b006f1b495077c4188a61a5f1f64 | [
"Apache-2.0"
] | 11 | 2020-09-04T10:13:29.000Z | 2022-03-08T22:09:25.000Z | backend/views.py | jbwashington/F1n35-Server | e2da179bce74b006f1b495077c4188a61a5f1f64 | [
"Apache-2.0"
] | null | null | null | from flask import request, jsonify, Blueprint
from flask_login import login_required, login_user, current_user, logout_user
from models import User
bp = Blueprint('blueprint', __name__, template_folder='templates')
@bp.route("/", methods=["GET"])
def index():
return jsonify(message="Hello World!"), 200
@bp.route("/login", methods=["POST"])
def login():
json_payload = request.get_json()
user_entry = User.get(json_payload['username'])
if (user_entry):
user = User(*user_entry)
if (user.password == json_payload['password']): # not for prod
login_user(user)
return jsonify(isLoggedIn=current_user.is_authenticated), 200
return jsonify(authorization=False), 403
@bp.route("/protected", methods=["GET"])
@login_required
def protected():
return jsonify(message="WHATS BRACKIN SLIME"), 200
@bp.route("/me", methods=["GET"])
def me():
return jsonify(isLoggedIn=current_user.is_authenticated)
@bp.route("/logout", methods=["GET"])
def logout():
logout_user()
return jsonify(isLoggedIn=current_user.is_authenticated)
| 27.475 | 77 | 0.698817 |
4a268340053a1ddb86db483e978b359c47ca3eb6 | 640 | py | Python | setup.py | PatrickAlphaC/cryptocompare | d94b9bc17d69f6460dbe95b94dbab5c1d2fefc15 | [
"MIT"
] | null | null | null | setup.py | PatrickAlphaC/cryptocompare | d94b9bc17d69f6460dbe95b94dbab5c1d2fefc15 | [
"MIT"
] | null | null | null | setup.py | PatrickAlphaC/cryptocompare | d94b9bc17d69f6460dbe95b94dbab5c1d2fefc15 | [
"MIT"
] | null | null | null | from setuptools import setup
with open('README.md', encoding="utf-8") as f:
readme = f.read()
setup(
name='cryptocompare',
version='0.7.3',
description='Wrapper for CryptoCompare.com',
long_description=readme,
long_description_content_type='text/markdown',
url='https://github.com/lagerfeuer/cryptocompare',
author='lagerfeuer',
author_email='[email protected]',
keywords='crypto cryptocurrency wrapper cryptocompare',
license='MIT',
python_requires='>=3',
packages=['cryptocompare'],
classifiers=['Programming Language :: Python :: 3'],
install_requires=['requests']
)
| 29.090909 | 59 | 0.695313 |
4a26843a2369c6d6443513e2c909facba9c54943 | 816 | py | Python | ProjectApplication/evaluation/migrations/0011_adds_decision_letter.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-07-29T10:00:11.000Z | 2022-02-19T11:00:34.000Z | ProjectApplication/evaluation/migrations/0011_adds_decision_letter.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 471 | 2019-09-20T14:37:28.000Z | 2022-03-25T14:16:34.000Z | ProjectApplication/evaluation/migrations/0011_adds_decision_letter.py | code-review-doctor/project-application | d85b40b69572efbcda24ce9c40803f76d8ffd192 | [
"MIT"
] | 5 | 2020-03-15T12:42:47.000Z | 2022-02-15T18:06:52.000Z | # Generated by Django 3.0.3 on 2020-03-05 09:05
from django.db import migrations, models
import evaluation.models
import storages.backends.s3boto3
class Migration(migrations.Migration):
dependencies = [
('evaluation', '0010_file_name_processed_on_save'),
]
operations = [
migrations.AddField(
model_name='historicalproposalevaluation',
name='decision_letter',
field=models.TextField(blank=True, max_length=100, null=True),
),
migrations.AddField(
model_name='proposalevaluation',
name='decision_letter',
field=models.FileField(blank=True, null=True, storage=storages.backends.s3boto3.S3Boto3Storage(), upload_to=evaluation.models.proposal_evaluation_eligibility_letter_rename),
),
]
| 31.384615 | 185 | 0.682598 |
4a26844a23da6f7c906ea767fe60f0f6b85a2a89 | 4,021 | py | Python | labs/lab06/data.py | abalone88/cs61a_2018sp | 59d408d0961cf71faf10b77779bfc71c0c508f0c | [
"MIT"
] | 2 | 2021-06-05T20:36:20.000Z | 2021-06-05T20:44:26.000Z | labs/lab06/data.py | abalone88/cs61a_2018sp | 59d408d0961cf71faf10b77779bfc71c0c508f0c | [
"MIT"
] | null | null | null | labs/lab06/data.py | abalone88/cs61a_2018sp | 59d408d0961cf71faf10b77779bfc71c0c508f0c | [
"MIT"
] | null | null | null | # CS 61A World Game Data:
from classes import *
# Characters:
james = Character('James',
'I saw Gibbes near Soda with a smoothie. You can probably find him there.')
gibbes = Character('Gibbes',
"This smoothie is so disappointing! "
"I wish someone would bring me a non-disappointing smoothie.")
jen = Character('Jen',
'No one brought food to the potluck! '
'Maybe the Golden Bear Cafe (GBC) is open; we can get food there.')
jerry_113 = Character('Jerry',
"You just saw me in Wheeler? But I've been here all along!")
tiffany = Character('Tiffany',
"My marker ran out of ink, so I can't vandalize this tower!")
jerry = Character('Jerry',
'I heard you like games, so I put some games in this game. '
'Have you gone to Games of Berkeley on Shattuck?')
allen = Character('Allen',
'Hey! Want to play ultimate frisbee?')
student = Character('Student',
'I once went into Dwinelle and got lost for 3 days! '
'That place is a maze!')
scared_student = Character('Terrified Student',
"I've been lost in Dwinelle for weeks")
spooked_student = Character('Spooked Student',
'Help')
# Things:
smoothie = Thing('Smoothie',
"Looks pretty non-disappointing. Gibbes might want this.")
lemon = Thing('Lemon',
'Hmmm... try bringing it to a TA')
coffee = Thing('Coffee',
'The sweet, caffeinated nectar of the gods')
monopoly = Thing('Monopoly',
'Just right for 61A study breaks!')
strange_skull = Thing('Strange Skull',
'A strange skull. Dinosaur? Giraffe? Who knows.')
# Keys:
try:
skeleton_key = Key('Skeleton Key', 'A key that unlocks many doors')
except NameError as e:
skeleton_key = Thing('Not a Skeleton Key', 'You must first implement the Key class')
# Places:
sather_gate = Place('Sather Gate', 'Sather Gate - A fairly ineffective gate',
[], [])
fsm = Place('FSM', 'Free Speech Cafe - Home of Coffee',
[], [smoothie, coffee])
vlsb = Place('VLSB', 'VLSB - Have you visited the dinosaur?',
[james], [skeleton_key])
soda = Place('Soda', 'Soda Hall - A building where Soda is not allowed',
[gibbes, jen, jerry_113], [])
gbc = Place('GBC', 'Golden Bear Cafe - Now with (healthy?) food',
[], [lemon])
campanile = Place('Campanile', 'The Campanile - A great tower!',
[tiffany], [])
game_store = Place('Games of Berkeley', 'Games of Berkeley',
[], [monopoly])
hp = Place('HP', 'HP Auditorium',
[], [])
shattuck = Place('Shattuck', 'Shattuck Avenue',
[], [])
wheeler = Place('Wheeler', 'Wheeler - CS61A lectures are held here.',
[jerry], [])
dwinelle = Place('Dwinelle Hall', 'Dwinelle Hall - A Maze',
[student], [])
deep_dwinelle = Place('Deep in Dwinelle Hall', 'You are lost in Dwinelle Hall',
[scared_student, spooked_student], [strange_skull])
memorial_glade = Place('Memorial Glade', 'Memorial Glade on a beautiful day',
[allen], [])
# Exits:
sather_gate.add_exits([gbc, wheeler, dwinelle, memorial_glade])
gbc.add_exits([sather_gate])
wheeler.add_exits([sather_gate, campanile])
deep_dwinelle.add_exits([deep_dwinelle, dwinelle])
dwinelle.add_exits([sather_gate, vlsb, wheeler, deep_dwinelle])
memorial_glade.add_exits([sather_gate, fsm, campanile, soda])
campanile.add_exits([memorial_glade, wheeler])
vlsb.add_exits([fsm, soda, shattuck, dwinelle])
shattuck.add_exits([vlsb, game_store])
fsm.add_exits([vlsb, memorial_glade])
soda.add_exits([hp, vlsb, memorial_glade])
hp.add_exits([soda])
game_store.add_exits([shattuck])
# Locked Buildings
fsm.locked = True
# Player:
# The Player should start at sather_gate.
me = Player('Water', sather_gate)
| 40.21 | 93 | 0.616513 |
4a2684d52340d5170ad16aa921c8659376a0be7f | 5,399 | py | Python | manim/utils/bezier.py | elletech/practice_manim | 83671e9e801490ce84100da3a684e369860fda37 | [
"MIT"
] | 1 | 2021-05-06T13:05:01.000Z | 2021-05-06T13:05:01.000Z | manim/utils/bezier.py | elletech/practice_manim | 83671e9e801490ce84100da3a684e369860fda37 | [
"MIT"
] | null | null | null | manim/utils/bezier.py | elletech/practice_manim | 83671e9e801490ce84100da3a684e369860fda37 | [
"MIT"
] | null | null | null | """Utility functions related to Bézier curves."""
__all__ = [
"bezier",
"partial_bezier_points",
"interpolate",
"integer_interpolate",
"mid",
"inverse_interpolate",
"match_interpolate",
"get_smooth_handle_points",
"diag_to_matrix",
"is_closed",
]
import typing
import numpy as np
from scipy import linalg
from ..utils.simple_functions import choose
CLOSED_THRESHOLD: float = 0.001
def bezier(
points: np.ndarray,
) -> typing.Callable[[float], typing.Union[int, typing.Iterable]]:
n = len(points) - 1
return lambda t: sum(
[
((1 - t) ** (n - k)) * (t ** k) * choose(n, k) * point
for k, point in enumerate(points)
]
)
def partial_bezier_points(points: np.ndarray, a: float, b: float) -> np.ndarray:
"""
Given an array of points which define
a bezier curve, and two numbers 0<=a<b<=1,
return an array of the same size, which
describes the portion of the original bezier
curve on the interval [a, b].
This algorithm is pretty nifty, and pretty dense.
"""
if a == 1:
return [points[-1]] * len(points)
a_to_1 = np.array([bezier(points[i:])(a) for i in range(len(points))])
end_prop = (b - a) / (1.0 - a)
return np.array([bezier(a_to_1[: i + 1])(end_prop) for i in range(len(points))])
# Linear interpolation variants
def interpolate(start: int, end: int, alpha: float) -> float:
return (1 - alpha) * start + alpha * end
def integer_interpolate(start: float, end: float, alpha: float) -> int:
"""
alpha is a float between 0 and 1. This returns
an integer between start and end (inclusive) representing
appropriate interpolation between them, along with a
"residue" representing a new proportion between the
returned integer and the next one of the
list.
For example, if start=0, end=10, alpha=0.46, This
would return (4, 0.6).
"""
if alpha >= 1:
return (end - 1, 1.0)
if alpha <= 0:
return (start, 0)
value = int(interpolate(start, end, alpha))
residue = ((end - start) * alpha) % 1
return (value, residue)
def mid(start: float, end: float) -> float:
return (start + end) / 2.0
def inverse_interpolate(start: float, end: float, value: float) -> np.ndarray:
return np.true_divide(value - start, end - start)
def match_interpolate(
new_start: float, new_end: float, old_start: float, old_end: float, old_value: float
) -> np.ndarray:
return interpolate(
new_start, new_end, inverse_interpolate(old_start, old_end, old_value)
)
# Figuring out which bezier curves most smoothly connect a sequence of points
def get_smooth_handle_points(
points: np.ndarray,
) -> typing.Tuple[np.ndarray, np.ndarray]:
points = np.array(points)
num_handles = len(points) - 1
dim = points.shape[1]
if num_handles < 1:
return np.zeros((0, dim)), np.zeros((0, dim))
# Must solve 2*num_handles equations to get the handles.
# l and u are the number of lower an upper diagonal rows
# in the matrix to solve.
l, u = 2, 1
# diag is a representation of the matrix in diagonal form
# See https://www.particleincell.com/2012/bezier-splines/
# for how to arive at these equations
diag = np.zeros((l + u + 1, 2 * num_handles))
diag[0, 1::2] = -1
diag[0, 2::2] = 1
diag[1, 0::2] = 2
diag[1, 1::2] = 1
diag[2, 1:-2:2] = -2
diag[3, 0:-3:2] = 1
# last
diag[2, -2] = -1
diag[1, -1] = 2
# This is the b as in Ax = b, where we are solving for x,
# and A is represented using diag. However, think of entries
# to x and b as being points in space, not numbers
b = np.zeros((2 * num_handles, dim))
b[1::2] = 2 * points[1:]
b[0] = points[0]
b[-1] = points[-1]
def solve_func(b: np.ndarray) -> np.ndarray:
return linalg.solve_banded((l, u), diag, b)
use_closed_solve_function = is_closed(points)
if use_closed_solve_function:
# Get equations to relate first and last points
matrix = diag_to_matrix((l, u), diag)
# last row handles second derivative
matrix[-1, [0, 1, -2, -1]] = [2, -1, 1, -2]
# first row handles first derivative
matrix[0, :] = np.zeros(matrix.shape[1])
matrix[0, [0, -1]] = [1, 1]
b[0] = 2 * points[0]
b[-1] = np.zeros(dim)
def closed_curve_solve_func(b: np.ndarray) -> np.ndarray:
return linalg.solve(matrix, b)
handle_pairs = np.zeros((2 * num_handles, dim))
for i in range(dim):
if use_closed_solve_function:
handle_pairs[:, i] = closed_curve_solve_func(b[:, i])
else:
handle_pairs[:, i] = solve_func(b[:, i])
return handle_pairs[0::2], handle_pairs[1::2]
def diag_to_matrix(l_and_u: typing.Tuple[int, int], diag: np.ndarray) -> np.ndarray:
"""
Converts array whose rows represent diagonal
entries of a matrix into the matrix itself.
See scipy.linalg.solve_banded
"""
l, u = l_and_u
dim = diag.shape[1]
matrix = np.zeros((dim, dim))
for i in range(l + u + 1):
np.fill_diagonal(
matrix[max(0, i - u) :, max(0, u - i) :], diag[i, max(0, u - i) :]
)
return matrix
def is_closed(points: typing.Tuple[np.ndarray, np.ndarray]) -> bool:
return np.allclose(points[0], points[-1])
| 29.664835 | 88 | 0.614558 |
4a2685902fbeb59f0f379729b052e1a767eb4e52 | 12,107 | py | Python | examples/mpc_cifar/mpc_cifar.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 2 | 2020-03-23T18:32:13.000Z | 2020-12-11T10:54:08.000Z | examples/mpc_cifar/mpc_cifar.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 10 | 2021-02-03T16:45:53.000Z | 2021-04-07T16:24:17.000Z | examples/mpc_cifar/mpc_cifar.py | knarflin/CrypTen | 6a06dc8cd52200f40a9fc520be0066bd0dea6b14 | [
"MIT"
] | 2 | 2020-04-15T19:28:02.000Z | 2020-04-16T01:59:30.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import shutil
import tempfile
import time
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from examples.meters import AverageMeter
from examples.util import NoopContextManager
from torchvision import datasets, transforms
def run_mpc_cifar(
epochs=25,
start_epoch=0,
batch_size=1,
lr=0.001,
momentum=0.9,
weight_decay=1e-6,
print_freq=10,
model_location="",
resume=False,
evaluate=True,
seed=None,
skip_plaintext=False,
context_manager=None,
):
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
crypten.init()
# create model
model = LeNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay
)
# optionally resume from a checkpoint
best_prec1 = 0
if resume:
if os.path.isfile(model_location):
logging.info("=> loading checkpoint '{}'".format(model_location))
checkpoint = torch.load(model_location)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=> loaded checkpoint '{}' (epoch {})".format(
model_location, checkpoint["epoch"]
)
)
else:
raise IOError("=> no checkpoint found at '{}'".format(model_location))
# Data loading code
def preprocess_data(context_manager, data_dirname):
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
with context_manager:
trainset = datasets.CIFAR10(
data_dirname, train=True, download=True, transform=transform
)
testset = datasets.CIFAR10(
data_dirname, train=False, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=2
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=2
)
return trainloader, testloader
if context_manager is None:
context_manager = NoopContextManager()
data_dir = tempfile.TemporaryDirectory()
train_loader, val_loader = preprocess_data(context_manager, data_dir.name)
if evaluate:
if not skip_plaintext:
logging.info("===== Evaluating plaintext LeNet network =====")
validate(val_loader, model, criterion, print_freq)
logging.info("===== Evaluating Private LeNet network =====")
input_size = get_input_size(val_loader, batch_size)
private_model = construct_private_model(input_size, model)
validate(val_loader, private_model, criterion, print_freq)
# logging.info("===== Validating side-by-side ======")
# validate_side_by_side(val_loader, model, private_model)
return
# define loss function (criterion) and optimizer
for epoch in range(start_epoch, epochs):
adjust_learning_rate(optimizer, epoch, lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, print_freq)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, print_freq)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "LeNet",
"state_dict": model.state_dict(),
"best_prec1": best_prec1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
data_dir.cleanup()
def train(train_loader, model, criterion, optimizer, epoch, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if i % print_freq == 0:
logging.info(
"Epoch: [{}][{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f})\t"
"Prec@5 {:.3f} ({:.3f})".format(
epoch,
i,
len(train_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
def validate_side_by_side(val_loader, plaintext_model, private_model):
"""Validate the plaintext and private models side-by-side on each example"""
# switch to evaluate mode
plaintext_model.eval()
private_model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
# compute output for plaintext
output_plaintext = plaintext_model(input)
# encrypt input and compute output for private
# assumes that private model is encrypted with src=0
input_encr = encrypt_data_tensor_with_src(input)
output_encr = private_model(input_encr)
# log all info
logging.info("==============================")
logging.info("Example %d\t target = %d" % (i, target))
logging.info("Plaintext:\n%s" % output_plaintext)
logging.info("Encrypted:\n%s\n" % output_encr.get_plain_text())
# only use the first 1000 examples
if i > 1000:
break
def get_input_size(val_loader, batch_size):
input, target = next(iter(val_loader))
return input.size()
def construct_private_model(input_size, model):
"""Encrypt and validate trained model for multi-party setting."""
# get rank of current process
rank = comm.get().get_rank()
dummy_input = torch.empty(input_size)
# party 0 always gets the actual model; remaining parties get dummy model
if rank == 0:
model_upd = model
else:
model_upd = LeNet()
private_model = crypten.nn.from_pytorch(model_upd, dummy_input).encrypt(src=0)
return private_model
def encrypt_data_tensor_with_src(input):
"""Encrypt data tensor for multi-party setting"""
# get rank of current process
rank = comm.get().get_rank()
# get world size
world_size = comm.get().get_world_size()
if world_size > 1:
# party 1 gets the actual tensor; remaining parties get dummy tensor
src_id = 1
else:
# party 0 gets the actual tensor since world size is 1
src_id = 0
if rank == src_id:
input_upd = input
else:
input_upd = torch.empty(input.size())
private_input = crypten.cryptensor(input_upd, src=src_id)
return private_input
def validate(val_loader, model, criterion, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = encrypt_data_tensor_with_src(input)
# compute output
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(
"\nTest: [{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f}) \t"
"Prec@5 {:.3f} ({:.3f})".format(
i + 1,
len(val_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
logging.info(
" * Prec@1 {:.3f} Prec@5 {:.3f}".format(top1.value(), top5.value())
)
return top1.value()
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
"""Saves checkpoint of plaintext model"""
# only save from rank 0 process to avoid race condition
rank = comm.get().get_rank()
if rank == 0:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def adjust_learning_rate(optimizer, epoch, lr=0.01):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
new_lr = lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class LeNet(nn.Sequential):
"""
Adaptation of LeNet that uses ReLU activations
"""
# network architecture:
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 31.944591 | 88 | 0.57124 |
4a26861c83bd93dbc803e187e68b4a9b49810759 | 2,604 | py | Python | youtuatools/extractor/lemonde.py | Pagasis/YouTua | edb44b2065a7224f8b26aaf76166bf7287901567 | [
"MIT"
] | 47 | 2021-01-02T07:44:50.000Z | 2022-02-28T22:02:13.000Z | nextdl/extractor/lemonde.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | 4 | 2021-02-07T03:35:13.000Z | 2021-10-31T19:23:53.000Z | nextdl/extractor/lemonde.py | devenu85/nextdl | 0b458f556e2e0be80cb94bd9a9b1405ad2e9182d | [
"MIT"
] | 8 | 2021-01-03T05:44:39.000Z | 2021-11-01T05:46:32.000Z | from __future__ import unicode_literals
from .common import InfoExtractor
class LemondeIE(InfoExtractor):
_VALID_URL = r"https?://(?:.+?\.)?lemonde\.fr/(?:[^/]+/)*(?P<id>[^/]+)\.html"
_TESTS = [
{
"url": "http://www.lemonde.fr/police-justice/video/2016/01/19/comprendre-l-affaire-bygmalion-en-cinq-minutes_4849702_1653578.html",
"md5": "da120c8722d8632eec6ced937536cc98",
"info_dict": {
"id": "lqm3kl",
"ext": "mp4",
"title": "Comprendre l'affaire Bygmalion en 5 minutes",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 309,
"upload_date": "20160119",
"timestamp": 1453194778,
"uploader_id": "3pmkp",
},
},
{
# standard iframe embed
"url": "http://www.lemonde.fr/les-decodeurs/article/2016/10/18/tout-comprendre-du-ceta-le-petit-cousin-du-traite-transatlantique_5015920_4355770.html",
"info_dict": {
"id": "uzsxms",
"ext": "mp4",
"title": "CETA : quelles suites pour l'accord commercial entre l'Europe et le Canada ?",
"thumbnail": r"re:^https?://.*\.jpg",
"duration": 325,
"upload_date": "20161021",
"timestamp": 1477044540,
"uploader_id": "3pmkp",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://redaction.actu.lemonde.fr/societe/video/2016/01/18/calais-debut-des-travaux-de-defrichement-dans-la-jungle_4849233_3224.html",
"only_matching": True,
},
{
# YouTube embeds
"url": "http://www.lemonde.fr/pixels/article/2016/12/09/pourquoi-pewdiepie-superstar-de-youtube-a-menace-de-fermer-sa-chaine_5046649_4408996.html",
"only_matching": True,
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
digiteka_url = self._proto_relative_url(
self._search_regex(
r'url\s*:\s*(["\'])(?P<url>(?:https?://)?//(?:www\.)?(?:digiteka\.net|ultimedia\.com)/deliver/.+?)\1',
webpage,
"digiteka url",
group="url",
default=None,
)
)
if digiteka_url:
return self.url_result(digiteka_url, "Digiteka")
return self.url_result(url, "Generic")
| 37.2 | 163 | 0.517281 |
4a2686d96e5332d268201d0bad2ac481f79f2572 | 751 | py | Python | 02_challenge/02_challenge.py | anithagd30/wtfiswronghere | c2ebccd2fe8bb6df7e78bf9d9d81c1944e45c91b | [
"MIT"
] | null | null | null | 02_challenge/02_challenge.py | anithagd30/wtfiswronghere | c2ebccd2fe8bb6df7e78bf9d9d81c1944e45c91b | [
"MIT"
] | null | null | null | 02_challenge/02_challenge.py | anithagd30/wtfiswronghere | c2ebccd2fe8bb6df7e78bf9d9d81c1944e45c91b | [
"MIT"
] | null | null | null | """
We will use this script to teach Python to absolute beginners
The script is an example of Fizz-Buzz implemented in Python
The FizzBuzz problem:
For all integers between 1 and 99 (include both):
# print fizz for multiples of 3
# print buzz for multiples of 5
# print fizzbuzz for multiples of 3 and 5"
"""
def fizzbuzz(max_num):
"This method implements FizzBuzz"
# Google for 'range in python' to see what it does
for i in range(1,max_num):
# % or modulo division gives you the remainder
if i%3==0 and i%5==0:
print(i,"fizzbuzz")
elif i%3==0:
print(i,"fizz")
elif i%5==0:
print(i,"Buzz")
#----START OF SCRIPT
if __name__=='__main__':
fizzbuzz(9)
| 27.814815 | 61 | 0.629827 |
4a268830078ee05db245f6d9d5b517507415e48a | 4,091 | py | Python | python/example_code/s3/s3_versioning/remove_delete_marker.py | Pahtoe/aws-doc-sdk-examples | 1534e600cc1bbbf4d9da95cd5df6730e7473edf4 | [
"Apache-2.0"
] | 1 | 2020-08-27T16:54:38.000Z | 2020-08-27T16:54:38.000Z | python/example_code/s3/s3_versioning/remove_delete_marker.py | hloo/aws-doc-sdk-examples | 7ad6a0c079bda6a221c3bbd608cda237d4122bd7 | [
"Apache-2.0"
] | null | null | null | python/example_code/s3/s3_versioning/remove_delete_marker.py | hloo/aws-doc-sdk-examples | 7ad6a0c079bda6a221c3bbd608cda237d4122bd7 | [
"Apache-2.0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
An AWS Lambda handler that receives an Amazon S3 batch event. The handler unpacks the
event and removes the specified delete marker from the bucket.
"""
# snippet-start:[s3.python.lambda.remove_delete_marker]
import logging
from urllib import parse
import boto3
from botocore.exceptions import ClientError
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
s3 = boto3.client('s3')
def lambda_handler(event, context):
"""
Removes a delete marker from the specified versioned object.
:param event: The S3 batch event that contains the ID of the delete marker
to remove.
:param context: Context about the event.
:return: A result structure that Amazon S3 uses to interpret the result of the
operation. When the result code is TemporaryFailure, S3 retries the
operation.
"""
# Parse job parameters from Amazon S3 batch operations
invocation_id = event['invocationId']
invocation_schema_version = event['invocationSchemaVersion']
results = []
result_code = None
result_string = None
task = event['tasks'][0]
task_id = task['taskId']
try:
obj_key = parse.unquote(task['s3Key'], encoding='utf-8')
obj_version_id = task['s3VersionId']
bucket_name = task['s3BucketArn'].split(':')[-1]
logger.info("Got task: remove delete marker %s from object %s.",
obj_version_id, obj_key)
try:
# If this call does not raise an error, the object version is not a delete
# marker and should not be deleted.
response = s3.head_object(
Bucket=bucket_name, Key=obj_key, VersionId=obj_version_id)
result_code = 'PermanentFailure'
result_string = f"Object {obj_key}, ID {obj_version_id} is not " \
f"a delete marker."
logger.debug(response)
logger.warning(result_string)
except ClientError as error:
delete_marker = error.response['ResponseMetadata']['HTTPHeaders'] \
.get('x-amz-delete-marker', 'false')
if delete_marker == 'true':
logger.info("Object %s, version %s is a delete marker.",
obj_key, obj_version_id)
try:
s3.delete_object(
Bucket=bucket_name, Key=obj_key, VersionId=obj_version_id)
result_code = 'Succeeded'
result_string = f"Successfully removed delete marker " \
f"{obj_version_id} from object {obj_key}."
logger.info(result_string)
except ClientError as error:
# Mark request timeout as a temporary failure so it will be retried.
if error.response['Error']['Code'] == 'RequestTimeout':
result_code = 'TemporaryFailure'
result_string = f"Attempt to remove delete marker from " \
f"object {obj_key} timed out."
logger.info(result_string)
else:
raise
else:
raise ValueError(f"The x-amz-delete-marker header is either not "
f"present or is not 'true'.")
except Exception as error:
# Mark all other exceptions as permanent failures.
result_code = 'PermanentFailure'
result_string = error
logger.exception(error)
finally:
results.append({
'taskId': task_id,
'resultCode': result_code,
'resultString': result_string
})
return {
'invocationSchemaVersion': invocation_schema_version,
'treatMissingKeysAs': 'PermanentFailure',
'invocationId': invocation_id,
'results': results
}
# snippet-end:[s3.python.lambda.remove_delete_marker]
| 38.961905 | 88 | 0.594231 |
4a26895a31ec0e1182be3cd2b895cfbcb5d30aec | 230 | py | Python | cli/commands/cmd_stopall.py | Pythonian/catwatch | 25730faa9d8ec6564b075de78bbbf4ff125ada97 | [
"MIT"
] | null | null | null | cli/commands/cmd_stopall.py | Pythonian/catwatch | 25730faa9d8ec6564b075de78bbbf4ff125ada97 | [
"MIT"
] | null | null | null | cli/commands/cmd_stopall.py | Pythonian/catwatch | 25730faa9d8ec6564b075de78bbbf4ff125ada97 | [
"MIT"
] | 2 | 2018-08-04T16:46:55.000Z | 2019-07-02T19:30:24.000Z | import subprocess
import click
@click.command()
def cli():
"""
Stop all services.
:return: Subprocess call result
"""
cmd = 'pkill honcho && docker-compose stop'
return subprocess.call(cmd, shell=True)
| 15.333333 | 47 | 0.647826 |
4a268c538c826eedfe7dd2d9c131594353ea2811 | 947 | py | Python | Ser_1/Code/seir1ask8d.py | AlexandrosKyriakakis/ElectromagneticFieldsB | c7d9cf077a7f6235d1ba27e62dd306d515fa38de | [
"MIT"
] | 2 | 2021-02-22T11:20:03.000Z | 2022-03-24T09:12:03.000Z | Ser_1/Code/seir1ask8d.py | AlexandrosKyriakakis/ElectromagneticFieldsB | c7d9cf077a7f6235d1ba27e62dd306d515fa38de | [
"MIT"
] | null | null | null | Ser_1/Code/seir1ask8d.py | AlexandrosKyriakakis/ElectromagneticFieldsB | c7d9cf077a7f6235d1ba27e62dd306d515fa38de | [
"MIT"
] | null | null | null | import scipy as sp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from scipy import integrate
from mpl_toolkits import mplot3d
def f(x, y):
e0 = 8.854 * (10**(-12))
def Sx(TH): return (2*e0) / \
(((x + 1 - 0.1*np.cos(TH))**2 + 1 + 0.01*(np.sin(TH))**2)**1.5)
sx, error1 = integrate.quad(Sx, 0, np.pi*2)
def Sy(TH): return (-1)/(((x - 1 - 0.1*np.cos(TH))
** 2 + 1 + 0.01 * (np.sin(TH))**2)**1.5)
sy, error1 = integrate.quad(Sy, 0, np.pi*2)
return sx+sy
f1 = np.vectorize(f)
x = np.linspace(0, 2, 100)
y = np.linspace(-2, 2, 100)
X, Y = np.meshgrid(x, y)
Z = f1(X, Y)
fig, ax = plt.subplots()
CS = ax.contour(X, Y, Z, zdir='xy', offset=11,
cmap=matplotlib.cm.plasma, color='black')
ax.clabel(CS, CS.levels, inline=True, fontsize=5)
ax.set_title('Επιφανειακή Πυκνότητα Φορτίου')
ax.set_xlabel('X(m)')
ax.set_ylabel('Z(m)')
plt.show()
| 21.044444 | 71 | 0.57339 |
4a268ce86fc34d9cfb693959f72998a8d81096bd | 1,017 | py | Python | homzhub_customization/homzhub_customization/doctype/customer_dashboard.py | HPL-ERP/homzhub_customization | 311364116aa61fc00b263c78ffbb6d3946cff154 | [
"MIT"
] | null | null | null | homzhub_customization/homzhub_customization/doctype/customer_dashboard.py | HPL-ERP/homzhub_customization | 311364116aa61fc00b263c78ffbb6d3946cff154 | [
"MIT"
] | null | null | null | homzhub_customization/homzhub_customization/doctype/customer_dashboard.py | HPL-ERP/homzhub_customization | 311364116aa61fc00b263c78ffbb6d3946cff154 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from frappe import _
def get_data(data):
return {
'heatmap': True,
'heatmap_message': _('This is based on transactions against this Customer. See timeline below for details'),
'fieldname': 'customer',
'non_standard_fieldnames': {
'Payment Entry': 'party',
'Quotation': 'party_name',
'Opportunity': 'party_name',
'Project':'tenant'
},
'dynamic_links': {
'party_name': ['Customer', 'quotation_to']
},
'transactions': [
{
'label': _('Pre Sales'),
'items': ['Opportunity', 'Quotation']
},
{
'label': _('Orders'),
'items': ['Sales Order', 'Delivery Note', 'Sales Invoice']
},
{
'label': _('Payments'),
'items': ['Payment Entry']
},
{
'label': _('Support'),
'items': ['Issue']
},
{
'label': _('Projects'),
'items': ['Project']
},
{
'label': _('Pricing'),
'items': ['Pricing Rule']
},
{
'label': _('Subscriptions'),
'items': ['Subscription']
}
]
}
| 19.941176 | 110 | 0.561455 |
4a268d237840418126dfde839970ccb927debe2f | 7,101 | py | Python | object_detector_app/object_detection/core/matcher_test.py | robotanica/moveo_ros | 12d9e81dd393f3b540e6b9f771ce332a73c86472 | [
"MIT"
] | null | null | null | object_detector_app/object_detection/core/matcher_test.py | robotanica/moveo_ros | 12d9e81dd393f3b540e6b9f771ce332a73c86472 | [
"MIT"
] | null | null | null | object_detector_app/object_detection/core/matcher_test.py | robotanica/moveo_ros | 12d9e81dd393f3b540e6b9f771ce332a73c86472 | [
"MIT"
] | 1 | 2022-03-28T21:28:41.000Z | 2022-03-28T21:28:41.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.core.matcher."""
import numpy as np
import tensorflow as tf
from object_detection.core import matcher
class AnchorMatcherTest(tf.test.TestCase):
def test_get_correct_matched_columnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [0, 1, 3, 5]
matched_column_indices = match.matched_column_indices()
self.assertEquals(matched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_column_indices = sess.run(matched_column_indices)
self.assertAllEqual(matched_column_indices, expected_column_indices)
def test_get_correct_counts(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
exp_num_matched_columns = 4
exp_num_unmatched_columns = 2
exp_num_ignored_columns = 1
num_matched_columns = match.num_matched_columns()
num_unmatched_columns = match.num_unmatched_columns()
num_ignored_columns = match.num_ignored_columns()
self.assertEquals(num_matched_columns.dtype, tf.int32)
self.assertEquals(num_unmatched_columns.dtype, tf.int32)
self.assertEquals(num_ignored_columns.dtype, tf.int32)
with self.test_session() as sess:
(num_matched_columns_out, num_unmatched_columns_out,
num_ignored_columns_out) = sess.run(
[num_matched_columns, num_unmatched_columns, num_ignored_columns])
self.assertAllEqual(num_matched_columns_out, exp_num_matched_columns)
self.assertAllEqual(num_unmatched_columns_out, exp_num_unmatched_columns)
self.assertAllEqual(num_ignored_columns_out, exp_num_ignored_columns)
def testGetCorrectUnmatchedColumnIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4]
unmatched_column_indices = match.unmatched_column_indices()
self.assertEquals(unmatched_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_column_indices = sess.run(unmatched_column_indices)
self.assertAllEqual(unmatched_column_indices, expected_column_indices)
def testGetCorrectMatchedRowIndices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_row_indices = [3, 1, 0, 5]
matched_row_indices = match.matched_row_indices()
self.assertEquals(matched_row_indices.dtype, tf.int32)
with self.test_session() as sess:
matched_row_inds = sess.run(matched_row_indices)
self.assertAllEqual(matched_row_inds, expected_row_indices)
def test_get_correct_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [6]
ignored_column_indices = match.ignored_column_indices()
self.assertEquals(ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
ignored_column_indices = sess.run(ignored_column_indices)
self.assertAllEqual(ignored_column_indices, expected_column_indices)
def test_get_correct_matched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [True, True, False, True, False, True, False]
matched_column_indicator = match.matched_column_indicator()
self.assertEquals(matched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
matched_column_indicator = sess.run(matched_column_indicator)
self.assertAllEqual(matched_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, True, False, True, False, False]
unmatched_column_indicator = match.unmatched_column_indicator()
self.assertEquals(unmatched_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
unmatched_column_indicator = sess.run(unmatched_column_indicator)
self.assertAllEqual(unmatched_column_indicator, expected_column_indicator)
def test_get_correct_ignored_column_indicator(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indicator = [False, False, False, False, False, False, True]
ignored_column_indicator = match.ignored_column_indicator()
self.assertEquals(ignored_column_indicator.dtype, tf.bool)
with self.test_session() as sess:
ignored_column_indicator = sess.run(ignored_column_indicator)
self.assertAllEqual(ignored_column_indicator, expected_column_indicator)
def test_get_correct_unmatched_ignored_column_indices(self):
match_results = tf.constant([3, 1, -1, 0, -1, 5, -2])
match = matcher.Match(match_results)
expected_column_indices = [2, 4, 6]
unmatched_ignored_column_indices = (match.
unmatched_or_ignored_column_indices())
self.assertEquals(unmatched_ignored_column_indices.dtype, tf.int32)
with self.test_session() as sess:
unmatched_ignored_column_indices = sess.run(
unmatched_ignored_column_indices)
self.assertAllEqual(unmatched_ignored_column_indices,
expected_column_indices)
def test_all_columns_accounted_for(self):
# Note: deliberately setting to small number so not always
# all possibilities appear (matched, unmatched, ignored)
num_matches = 10
match_results = tf.random.uniform(
[num_matches], minval=-2, maxval=5, dtype=tf.int32)
match = matcher.Match(match_results)
matched_column_indices = match.matched_column_indices()
unmatched_column_indices = match.unmatched_column_indices()
ignored_column_indices = match.ignored_column_indices()
with self.test_session() as sess:
matched, unmatched, ignored = sess.run([
matched_column_indices, unmatched_column_indices,
ignored_column_indices
])
all_indices = np.hstack((matched, unmatched, ignored))
all_indices_sorted = np.sort(all_indices)
self.assertAllEqual(all_indices_sorted,
np.arange(num_matches, dtype=np.int32))
if __name__ == '__main__':
tf.test.main()
| 47.02649 | 80 | 0.740177 |
4a268d682215360adb8929a5d03c09de15434064 | 3,984 | py | Python | kubernetes_asyncio/client/models/v1_node_selector.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_node_selector.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | 13 | 2021-04-12T02:03:48.000Z | 2022-03-28T02:08:46.000Z | kubernetes_asyncio/client/models/v1_node_selector.py | weltonrodrigo/kubernetes_asyncio | b793f3e9ea43cbd0f4ff40ace1b0b677682f4042 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.16.14
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1NodeSelector(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'node_selector_terms': 'list[V1NodeSelectorTerm]'
}
attribute_map = {
'node_selector_terms': 'nodeSelectorTerms'
}
def __init__(self, node_selector_terms=None, local_vars_configuration=None): # noqa: E501
"""V1NodeSelector - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._node_selector_terms = None
self.discriminator = None
self.node_selector_terms = node_selector_terms
@property
def node_selector_terms(self):
"""Gets the node_selector_terms of this V1NodeSelector. # noqa: E501
Required. A list of node selector terms. The terms are ORed. # noqa: E501
:return: The node_selector_terms of this V1NodeSelector. # noqa: E501
:rtype: list[V1NodeSelectorTerm]
"""
return self._node_selector_terms
@node_selector_terms.setter
def node_selector_terms(self, node_selector_terms):
"""Sets the node_selector_terms of this V1NodeSelector.
Required. A list of node selector terms. The terms are ORed. # noqa: E501
:param node_selector_terms: The node_selector_terms of this V1NodeSelector. # noqa: E501
:type: list[V1NodeSelectorTerm]
"""
if self.local_vars_configuration.client_side_validation and node_selector_terms is None: # noqa: E501
raise ValueError("Invalid value for `node_selector_terms`, must not be `None`") # noqa: E501
self._node_selector_terms = node_selector_terms
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSelector):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1NodeSelector):
return True
return self.to_dict() != other.to_dict()
| 32.129032 | 124 | 0.61998 |
4a268db0274b1afe5c56016133b5c54b85c18283 | 6,353 | py | Python | python/GafferAppleseedUI/AppleseedAttributesUI.py | danieldresser-ie/gaffer | 78c22487156a5800fcca49a24f52451a8ac0c559 | [
"BSD-3-Clause"
] | null | null | null | python/GafferAppleseedUI/AppleseedAttributesUI.py | danieldresser-ie/gaffer | 78c22487156a5800fcca49a24f52451a8ac0c559 | [
"BSD-3-Clause"
] | null | null | null | python/GafferAppleseedUI/AppleseedAttributesUI.py | danieldresser-ie/gaffer | 78c22487156a5800fcca49a24f52451a8ac0c559 | [
"BSD-3-Clause"
] | null | null | null | ##########################################################################
#
# Copyright (c) 2014, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import string
import Gaffer
import GafferUI
import GafferAppleseed
def __visibilitySummary( plug ) :
info = []
for childName, label in (
( "camera", "Camera" ),
( "light", "Light" ),
( "shadow", "Shadow" ),
( "diffuse", "Diffuse" ),
( "specular", "Specular" ),
( "glossy", "Glossy" ),
) :
values = []
if plug[childName+"Visibility"]["enabled"].getValue() :
values.append( "On" if plug[childName+"Visibility"]["value"].getValue() else "Off" )
if values :
info.append( label + " : " + "/".join( values ) )
return ", ".join( info )
def __shadingSummary( plug ) :
info = []
if plug["shadingSamples"]["enabled"].getValue() :
info.append( "Shading Samples %d" % plug["shadingSamples"]["value"].getValue() )
info.append( "Medium Priority %d" % plug["mediumPriority"]["value"].getValue() )
return ", ".join( info )
def __alphaMapSummary( plug ) :
info = []
if plug["alphaMap"]["enabled"].getValue() :
info.append( "Alpha Map %s" % plug["alphaMap"]["value"].getValue() )
return ", ".join( info )
def __meshSummary( plug ) :
info = []
if plug["smoothNormals"]["enabled"].getValue() :
info.append( "Smooth Normals %s" % plug["smoothNormals"]["value"].getValue() )
if plug["smoothTangents"]["enabled"].getValue() :
info.append( "Smooth Tangents %s" % plug["smoothTangents"]["value"].getValue() )
return ", ".join( info )
Gaffer.Metadata.registerNode(
GafferAppleseed.AppleseedAttributes,
"description",
"""
Applies appleseed attributes to objects
in the scene.
""",
plugs = {
# Sections
"attributes" : [
"layout:section:Visibility:summary", __visibilitySummary,
"layout:section:Shading:summary", __shadingSummary,
"layout:section:Alpha Map:summary", __alphaMapSummary,
"layout:section:Mesh :summary", __meshSummary,
],
# Visibility
"attributes.cameraVisibility" : [
"description",
"""
Whether or not the object is visible to camera
rays. To hide an object completely, use the
visibility settings on the StandardAttributes
node instead.
""",
"layout:section", "Visibility",
"label", "Camera",
],
"attributes.lightVisibility" : [
"description",
"""
Whether or not the object is visible to light
rays (whether or not it is visible to photons).
""",
"layout:section", "Visibility",
"label", "Light",
],
"attributes.shadowVisibility" : [
"description",
"""
Whether or not the object is visible to shadow
rays (whether or not it casts shadows).
""",
"layout:section", "Visibility",
"label", "Shadow",
],
"attributes.diffuseVisibility" : [
"description",
"""
Whether or not the object is visible to diffuse
rays - whether it casts bounce light or not.
""",
"layout:section", "Visibility",
"label", "Diffuse",
],
"attributes.specularVisibility" : [
"description",
"""
Whether or not the object is visible in
tight mirror reflections and refractions.
""",
"layout:section", "Visibility",
"label", "Specular",
],
"attributes.glossyVisibility" : [
"description",
"""
Whether or not the object is visible in
soft specular reflections and refractions.
""",
"layout:section", "Visibility",
"label", "Glossy",
],
# Shading
"attributes.shadingSamples" : [
"description",
"""
Number of samples to use when computing shading for the object.
""",
"layout:section", "Shading",
],
"attributes.mediumPriority" : [
"description",
"""
Specify the object medium priority.
When multiple objects share the same volume, appleseed will consider
only the highest priority one for intersections and shading.
Sometimes called nested dielectrics in other renderers.
""",
"layout:section", "Shading",
],
# Alpha Map
"attributes.alphaMap" : [
"description",
"""
Specifies a grayscale texture than can be used to efficiently discard
unwanted parts of the surface of the object while computing ray intersections.
""",
"layout:section", "Alpha Map",
],
"attributes.alphaMap.value" : [
"plugValueWidget:type", "GafferUI.FileSystemPathPlugValueWidget",
"path:leaf", True,
"path:bookmarks", "texture",
],
"attributes.smoothNormals" : [
"description",
"""
Compute smooth normals.
""",
"layout:section", "Mesh",
"label", "Smooth Normals",
],
"attributes.smoothTangents" : [
"description",
"""
Compute smooth tangents.
""",
"layout:section", "Mesh",
"label", "Smooth Tangents",
],
}
)
| 23.271062 | 87 | 0.651661 |
4a268dbf84d78fc55f4701f0d00c478dbf8015b9 | 7,289 | py | Python | FigureTable/RadioROC/roc_pr.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | null | null | null | FigureTable/RadioROC/roc_pr.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | null | null | null | FigureTable/RadioROC/roc_pr.py | vkola-lab/multi-task | 6a61db4223e1812744f13028747b07e2f840cc0b | [
"MIT"
] | null | null | null | import os, sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
parentdir = os.path.dirname(parentdir)
print(parentdir)
sys.path.append(parentdir)
from performance_eval import *
import csv
from collections import defaultdict
# get diagnoses from radiologists' rating form
team = defaultdict(dict) # key is initials of radiologist, content is rating
with open('ratings.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
if row['rev_initials'] in ['SQ', 'PJ test', 'PJ-trial chart', 'test', 'test (JL)']: continue
id = row['id'].split()[0]
team[row['rev_initials'].lower()][id] = row['dem_dx'] if row['dem_dx'] == '1' else '0'
# for key in team:
# print(key, len(team[key].keys()), team[key])
# name_map = {'jac': 'n1', 'phl': 'n2', 'am': 'n3', 'abp': 'n4', 'jes': 'n5', 'mjs': 'n6', 'hy': 'n7'}
# with open('kappa/' + name_map[key] + '.csv', 'w') as csv_file:
# writer = csv.DictWriter(csv_file, fieldnames=['Diagnosis Label'])
# writer.writeheader()
# for id in range(1, 51):
# writer.writerow({'Diagnosis Label': team[key][str(id)]})
# get ground truth labels from 50cases_dummy.csv
label = {} # key is dummy id, content is ADD label (0 is non-ADD, 1 is ADD)
with open('../../lookupcsv/derived_tables/NACC_ALL/50cases_dummy.csv', 'r') as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
label[row['dummy']] = row['ADD']
# print(label)
def get_sensitivity_specificity(diag, label):
tp, fp, tn, fn = 0, 0, 0, 0
for key in diag:
assert key in label, 'id not found'
if diag[key] == '1' and label[key] == '1':
tp += 1
elif diag[key] == '1' and label[key] == '0':
fp += 1
elif diag[key] == '0' and label[key] == '0':
tn += 1
elif diag[key] == '0' and label[key] == '1':
fn += 1
sensi = tp / (tp + fn + 0.00000001)
specf = tn / (tn + fp + 0.00000001)
preci = tp / (tp + fp + 0.00000001)
recal = sensi
return sensi, specf, preci, recal
def generate_roc(csv_files, positive_label, color, out_file, team):
"""
:param csv_files: a list of csv files as above format
:param positive_label: if positive_label == 'NC', the curve is about NC vs not NC
if positive_label == 'DE', the curve is about DE vs not DE
if positive_label =='ADD', the curve is about ADD vs nADD
:param color: color of the roc curve
:param out_file: image filename you want to save as
:return:
"""
lw = 2
text_size = 12
fig, ax = plt.subplots(dpi=200)
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
for csvfile in csv_files:
scores, labels = get_score_label(csvfile, positive_label)
fpr, tpr, thres = roc_curve(labels, scores, pos_label=1)
AUC = auc(fpr, tpr)
ax.plot(fpr, tpr, lw=lw/2, alpha=0.15)
interp_tpr = np.interp(mean_fpr, fpr, tpr)
interp_tpr[0] = 0.0
tprs.append(interp_tpr)
aucs.append(AUC)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
ax.plot(mean_fpr, mean_tpr, color=color,
label=r'AUC=%0.3f$\pm$%0.3f' % (mean_auc, std_auc),
lw=2, alpha=.8)
ax.plot([0, 1], [0, 1], 'k--', lw=lw)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color=color, alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
legend_properties = {'weight': 'bold', 'size': text_size}
ax.set_xlabel('False Positive Rate', fontsize=text_size, fontweight='bold')
ax.set_ylabel('True Positive Rate', fontsize=text_size, fontweight='bold')
ax.tick_params(axis='both', which='major', labelsize=14)
add_dots(ax, team, 'roc')
ax.legend(loc="lower right", prop=legend_properties)
fig.savefig(out_file, bbox_inches='tight')
fig.clf()
plt.close()
return mean_auc
def generate_pr(csv_files, positive_label, color, out_file, team):
lw = 2
text_size = 12
fig, ax = plt.subplots(dpi=200)
prs = []
aucs = []
mean_rc = np.linspace(0, 1, 100)
for csvfile in csv_files:
scores, labels = get_score_label(csvfile, positive_label)
pr, rc, thres = precision_recall_curve(labels, scores, pos_label=1)
pr, rc = pr[::-1], rc[::-1]
AUC = average_precision_score(labels, scores, pos_label=1)
ax.plot(rc, pr, lw=lw/2, alpha=0.15)
interp_pr = np.interp(mean_rc, rc, pr)
prs.append(interp_pr)
aucs.append(AUC)
mean_pr = np.mean(prs, axis=0)
mean_auc = np.mean(aucs) # is this right?
std_auc = np.std(aucs)
ax.plot(mean_rc, mean_pr, color=color,
label=r'AP=%0.3f$\pm$%0.3f' % (mean_auc, std_auc),
lw=2, alpha=.8)
count = collections.Counter(labels)
ratio = count[1] / (count[1] + count[0])
ax.plot([0, 1], [ratio, ratio], 'k--', lw=lw)
std_pr = np.std(prs, axis=0)
prs_upper = np.minimum(mean_pr + std_pr, 1)
prs_lower = np.maximum(mean_pr - std_pr, 0)
ax.fill_between(mean_rc, prs_lower, prs_upper, color=color, alpha=.2,
label=r'$\pm$ 1 std. dev.')
ax.set(xlim=[-0.05, 1.05], ylim=[-0.05, 1.05])
legend_properties = {'weight': 'bold', 'size': text_size}
ax.set_xlabel('Recall', fontsize=text_size, fontweight='bold')
ax.set_ylabel('Precision', fontsize=text_size, fontweight='bold')
ax.tick_params(axis='both', which='major', labelsize=14)
add_dots(ax, team, 'pr')
ax.legend(loc="lower left", prop=legend_properties)
fig.savefig(out_file, bbox_inches='tight')
fig.clf()
plt.close()
return mean_auc
def add_dots(ax, team, mode):
y_list, x_list = [], []
for ini in team:
sen, spe, pre, rec = get_sensitivity_specificity(team[ini], label)
print(ini + ' has sensitivity ', sen, ' has specificity ', spe)
if mode == 'roc':
y_list.append(sen)
x_list.append(1-spe)
if mode == 'pr':
y_list.append(pre)
x_list.append(rec)
print(x_list, y_list)
x_mean, x_std = np.array(x_list).mean(), np.array(x_list).std()
y_mean, y_std = np.array(y_list).mean(), np.array(y_list).std()
ax.scatter(x_list, y_list, color='b', marker='P', label='Radiologist',
linewidths=1, edgecolors='k', s=7 ** 2, zorder=10)
ax.errorbar(x_mean, y_mean, label='avg.Radiologist',
xerr=x_std, yerr=y_std, fmt='o',
markeredgewidth=1, markeredgecolor='k',
markerfacecolor='green',
markersize=7, marker='P',
elinewidth=1.5, ecolor='green',
capsize=3, zorder=11)
# draw model roc curve
csv_files = ['../../tb_log/CNN_3ways_special1_cross{}/neuro_test_eval_after.csv'.format(i) for i in range(5)]
generate_roc(csv_files, 'ADD', 'r', 'radioroc.png', team)
generate_pr(csv_files, 'ADD', 'r', 'radiopr.png', team)
| 40.494444 | 109 | 0.601317 |
4a268dde4d774af7768c59b276cd88e211f8cdec | 453 | py | Python | data/scripts/templates/object/static/structure/naboo/shared_gungan_shield_s01.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/static/structure/naboo/shared_gungan_shield_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/static/structure/naboo/shared_gungan_shield_s01.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Static()
result.template = "object/static/structure/naboo/shared_gungan_shield_s01.iff"
result.attribute_template_id = -1
result.stfName("obj_n","unknown_object")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 26.647059 | 79 | 0.728477 |
4a268e230d27c3ddf3c6f06ef2d0c066763505d9 | 257 | py | Python | python/tests/config/sparse_nogold.py | CitizenScienceInAstronomyWorkshop/pyIBCC | 35215648f3361689e374780182f39182eddda64f | [
"MIT"
] | 11 | 2015-01-05T14:03:00.000Z | 2020-05-31T07:22:20.000Z | python/tests/config/sparse_nogold.py | edwinrobots/pyIBCC | 9a51a21638b1e22260a3386862725926addb46c3 | [
"MIT"
] | 5 | 2015-03-10T18:24:24.000Z | 2018-04-03T12:52:40.000Z | python/tests/config/sparse_nogold.py | CitizenScienceInAstronomyWorkshop/pyIBCC | 35215648f3361689e374780182f39182eddda64f | [
"MIT"
] | 7 | 2015-03-16T11:35:08.000Z | 2018-04-24T05:45:51.000Z | import numpy as np
scores = np.array([0,1])
nScores = len(scores)
nClasses = 2
inputFile = "./data/crowdlabels_sparse_mixed.csv"
outputFile = "./data/test.out"
confMatFile = "./data/test_ibcc.mat"
nu0 = np.array([50,50])
alpha0 = np.array([[2, 1], [1, 2]])
| 25.7 | 49 | 0.677043 |
4a268e4d8f5ff36d46432fb4209de18e0dc881cd | 993 | py | Python | grid_search.py | FDKevin0/Micro-Expression-with-Deep-Learning | 617a359f264a4ccc4b6c5b1eb68c56b08d9cc397 | [
"BSD-3-Clause-Attribution"
] | null | null | null | grid_search.py | FDKevin0/Micro-Expression-with-Deep-Learning | 617a359f264a4ccc4b6c5b1eb68c56b08d9cc397 | [
"BSD-3-Clause-Attribution"
] | null | null | null | grid_search.py | FDKevin0/Micro-Expression-with-Deep-Learning | 617a359f264a4ccc4b6c5b1eb68c56b08d9cc397 | [
"BSD-3-Clause-Attribution"
] | null | null | null | import itertools
import matplotlib.pyplot as plt
import numpy as np
plt.figure()
title = "test"
classes = ['2nd Last FC', 'Last FC', '', '', ''] # VGG
classes_2 = ['1-layer LSTM(512)', '1-layer LSTM(3000)', '2-layer LSTM(1024-1024)', '2-layer LSTM(3000-1024)',
'2-layer LSTM(5000-1024)'] # LSTM
cmap = plt.cm.Blues
sample_list = np.array([[0, 0.35], [0, 0.35], [0, 0.31], [0.27, 0.31], [0, 0.27]])
plt.imshow(sample_list, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes_2)
thresh = sample_list.max() / 2.
for i, j in itertools.product(range(sample_list.shape[0]), range(sample_list.shape[1])):
plt.text(j, i, format(sample_list[i, j]),
horizontalalignment="center",
color="white" if sample_list[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
| 31.03125 | 109 | 0.660624 |
4a268e96b365ba018064f0627c0d8e79d1f7ddf5 | 9,507 | py | Python | cosmosis/runtime/parameter.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-09-15T10:10:26.000Z | 2021-09-15T10:10:26.000Z | cosmosis/runtime/parameter.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | null | null | null | cosmosis/runtime/parameter.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 07e5d308c6a8641a369a3e0b8d13c4104988cd2b | [
"BSD-2-Clause"
] | 1 | 2021-06-11T15:29:43.000Z | 2021-06-11T15:29:43.000Z | #coding: utf-8
u"""Definition of the :class:`Parameter` class."""
from __future__ import absolute_import
from builtins import object
import random
from . import config
from . import prior as priors # to avoid breaking other stuff below
import numpy as np
class Parameter(object):
u"""Distribution meta-data for a :class:`Pipeline` parameter.
While pipeline modules deal with a dictionary of scalar parameters,
the pipeline infrastructure (i.e., the :class:`Pipeline` class) must
track meta-data for each parameter: the allowable range of values the
parameter is allowed to take, and the (prior) distribution from which
the values should be sampled. These data, *but not the parameter
values themselves*, are stored in this class and kept by the
:class:`Pipeline` class in an array which parallels the
:class:`DataBlock` or :class:`Config` which holds the values.
"""
def __init__(self, section, name, start, limits=None, prior=None):
u"""Store meta-data for parameter at `(section, name)`.
If `prior` is not supplied, a uniform distribution is assumed.
If `limits` are not supplied, the parameter will be considered
fixed at the `start` value.
For the available priors, see the :module:Priors module.
"""
if not limits:
self.limits = (start, start)
else:
self.limits = limits
if (limits[1]<limits[0]):
raise ValueError("Your ini file specified that "
"parameter %s in section %s had upper limit "
"< lower limit"% (name, section))
self.section = section
self.name = name
self.start = start
if prior is None:
if limits is None:
# Parameter has no setting in the priors file and is fixed
prior = priors.DeltaFunctionPrior(start)
else:
# Parameter has no setting in the priors file and is
# variable
prior = priors.UniformPrior(limits[0], limits[1])
else:
if limits is None:
# Parameter does have setting in the priors file but is
# fixed - just fix value
prior = priors.DeltaFunctionPrior(start)
else:
# Parameter does have setting in the priors file and is
# variable - truncate prior to limits
prior = prior.truncate(limits[0], limits[1])
self.prior = prior
# TODO: check consistency of prior with limits
def __eq__(self, other):
u"""Return `True` if `other` stands for the same data block entry as us.
The same entry means that the `(section, name)` pairs are the
same. Note that this is NOT a test of equality of the
`Parameter`sʼ values!
"""
if isinstance(other, (list, tuple)):
try:
section, name = other
except ValueError:
return False
return (self.section == section and
self.name == name)
elif isinstance(other, Parameter):
return (self.section == other.section and
self.name == other.name)
elif isinstance(other, str):
return other==self.__str__()
else:
raise NotImplementedError("Tried to do parameter==something where something was not a thing I understand.")
def __str__(self):
u"""Return our ID "section--name" as stringified version."""
return self.section + "--" + self.name
def __repr__(self):
u"""Return __str__."""
return self.__str__()
def is_fixed(self):
u"""Test whether this parameter is fixed or varied.
Returns `True` if parameter is fixed to a single value by
degenerate definition of the limits, and false if there is room
for it to vary.
"""
return self.limits[0] == self.limits[1]
def is_varied(self):
u"""The opposite of :func:`is_fixed`."""
return not self.is_fixed()
def in_range(self, p):
u"""Check that the value `p` is in this parameterʼs allowed range."""
return self.limits[0] <= p <= self.limits[1]
def width(self):
u"""Return the difference between the upper and lower limits."""
return self.limits[1] - self.limits[0]
def random_point(self):
u"""Return a random number taken from the 'prior' distribution."""
return self.prior.sample()
def normalize(self, p):
u"""Return the relative position of `p` between the allowable limits: a value fron 0.0 to 1.0.
But the return value will go beyond the unit interval if `p` is
actually outside the limits.
"""
if self.is_fixed():
return 0.0
else:
return (p - self.limits[0]) / (self.limits[1] - self.limits[0])
def denormalize(self, p, raise_exception=True):
u"""Return the value at the relative position `p` between the lower and upper limits.
If `p` is outside the range [0.0, 1.0], then if `raise_exception`
is `True` a ValueError will be raised, otherwise a value extrapolated
outside the range of the limits will be returned.
"""
if 0.0 <= p <= 1.0:
return p*(self.limits[1]-self.limits[0]) + self.limits[0]
elif not raise_exception:
return p*(self.limits[1]-self.limits[0]) + self.limits[0]
else:
raise ValueError("parameter value {} for {} not normalized".format(p,self))
def denormalize_from_prior(self, p):
u"""Take `p` as a probability, and find the value which has that (cumulated) probability in the prior distribution."""
if 0.0 <= p <= 1.0:
return self.prior.denormalize_from_prior(p)
else:
raise ValueError("parameter value {} for {} not normalized".format(p,self))
def evaluate_prior(self, p):
u"""Get the probability of `p` coming from the prior distribution."""
if p < self.limits[0] or p > self.limits[1]:
return -np.inf
elif self.prior:
return self.prior(p)
else:
return 0.0
@staticmethod
def load_parameters(value_file, priors_files=None, override=None):
u"""Return array of :class:`Parameters` as directed by the input files.
Every key in the `value_file` will produce an entry in the
returned array, with the inferred starting value, and lower and
upper limits (space-separated list).
Where a key is also found in a given file in `priors_files` then a
:class:`Prior` object will be constructed and attached to the
parameter.
If `override` contains a corresponding `(section, name)` key, then
it will provide the start and limit values for the parameter,
regardless of the file contents.
"""
if isinstance(value_file, config.Inifile):
values_ini = value_file
else:
values_ini = config.Inifile(value_file)
if priors_files:
priors_data = priors.Prior.load_priors(priors_files)
else:
priors_data = {}
parameters = []
for (section, name), value in values_ini:
#override if available
if (override is not None) and (section, name) in override:
value = override[(section,name)]
# parse value line
start, limits = Parameter.parse_parameter(value)
# check for prior
pri = priors_data.get((section, name), None)
parameters.append(Parameter(section, name,
start, limits, pri))
return parameters
@staticmethod
def parse_parameter(line):
u"""Interpret a `line` of one to three numbers as the start and range of a parameter.
In all cases, the return will be a scalar start value and 2-tuple
of limits.
With one number, the start value will be taken and `None` returned
as the limits tuple, indicating that the parameter is to be kept
constant, equivalently a delta-function distribution.
With two numbers, they will be taken as the limits and the
starting value will be half way between the two.
With three numbers, they will be taken directly as the start
value, lower limit, and upper limit.
"""
try:
values = [float(p) for p in line.split()]
if len(values) == 1:
if values[0]==int(values[0]):
try:
v = int(line)
return v, None
except ValueError:
return values[0], None
return values[0], None
elif len(values) == 2:
return 0.5*(values[0]+values[1]), tuple(values)
elif len(values) == 3:
return values[1], (values[0], values[2])
else:
raise ValueError("Was expecting 1-3 values for "
"parameter value %s" % (line,))
except ValueError as error:
raise ValueError("Unable to parse numeric value for "
"parameter value %s, error %s" %
(line, error))
| 33.125436 | 126 | 0.585358 |
4a268ee6de45c8bb94a82cd154a030be9032abbe | 2,221 | py | Python | benchmark/startPyquil1163.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil1163.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | benchmark/startPyquil1163.py | UCLA-SEAL/QDiff | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | [
"BSD-3-Clause"
] | null | null | null | # qubit number=5
# total number=52
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += H(0) # number=38
prog += CZ(1,0) # number=39
prog += H(0) # number=40
prog += CNOT(1,0) # number=45
prog += Z(1) # number=46
prog += H(0) # number=49
prog += CZ(1,0) # number=50
prog += H(0) # number=51
prog += H(0) # number=32
prog += CZ(1,0) # number=33
prog += H(0) # number=34
prog += X(4) # number=48
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += CNOT(3,0) # number=41
prog += Z(3) # number=42
prog += CNOT(3,0) # number=43
prog += CNOT(1,3) # number=44
prog += X(0) # number=9
prog += X(1) # number=10
prog += X(2) # number=11
prog += CNOT(0,3) # number=35
prog += X(3) # number=36
prog += CNOT(0,3) # number=37
prog += CNOT(1,0) # number=24
prog += X(0) # number=25
prog += CNOT(1,0) # number=26
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
prog += X(1) # number=22
prog += X(1) # number=23
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil1163.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| 24.955056 | 64 | 0.541648 |
4a268f383c62cc15e8aa82f261f3ee6a50c8e949 | 1,424 | py | Python | datasets/fairface/download_fairface.py | oxai/debias-vision-lang | 15ff8a00c964fbd03543b77d98ea8d61438e2840 | [
"MIT"
] | 5 | 2022-03-23T02:34:28.000Z | 2022-03-29T06:06:19.000Z | datasets/fairface/download_fairface.py | oxai/debias-vision-lang | 15ff8a00c964fbd03543b77d98ea8d61438e2840 | [
"MIT"
] | null | null | null | datasets/fairface/download_fairface.py | oxai/debias-vision-lang | 15ff8a00c964fbd03543b77d98ea8d61438e2840 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
import os
from gdown import download
import subprocess
BASE_PATH = os.path.abspath("")
OUTDATA_PATH = os.path.join(BASE_PATH, "data")
fairface_parts = {
"imgs": {
"train_val": (
"https://drive.google.com/uc?id=1g7qNOZz9wC7OfOhcPqH1EZ5bk1UFGmlL",
"train_val_imgs.zip",
),
},
"labels": {
"train": (
"https://drive.google.com/uc?id=1i1L3Yqwaio7YSOCj7ftgk8ZZchPG7dmH",
"train_labels.csv",
),
"val": (
"https://drive.google.com/uc?id=1wOdja-ezstMEp81tX1a-EYkFebev4h7D",
"val_labels.csv",
),
},
}
for part_name, part in fairface_parts.items():
for subpart_name, (subpart_url, subpart_fname) in part.items():
subpart_dir = os.path.join(OUTDATA_PATH, part_name, subpart_name)
os.makedirs(subpart_dir, exist_ok=True)
print(f"Downloading fairface {subpart_name} {part_name}...")
output_path = os.path.join(subpart_dir, subpart_fname)
download(subpart_url, output=output_path)
if subpart_fname.endswith(".zip"):
print(f"Unzipping {subpart_name} {part_name}...")
subprocess.check_output(["unzip", "-d", subpart_dir, output_path])
os.remove(output_path)
print(f"Done unzipping {subpart_name} {part_name}.")
print(f"Done with fairface {subpart_name} {part_name}.")
| 33.116279 | 79 | 0.622893 |
4a268f4f2876440b5d01522bd80e44f01536ffad | 64,529 | py | Python | ape/intcoords/nifty.py | shihchengli/APE | c2f529b9e20959824317dbc3c018ce41702d67f6 | [
"MIT"
] | 3 | 2020-04-13T02:26:34.000Z | 2022-01-04T12:02:08.000Z | ape/intcoords/nifty.py | shihchengli/APE | c2f529b9e20959824317dbc3c018ce41702d67f6 | [
"MIT"
] | 1 | 2021-08-06T11:05:51.000Z | 2021-08-06T11:05:51.000Z | ape/intcoords/nifty.py | shihchengli/APE | c2f529b9e20959824317dbc3c018ce41702d67f6 | [
"MIT"
] | 1 | 2021-08-03T08:28:25.000Z | 2021-08-03T08:28:25.000Z |
"""@package geometric.nifty Nifty functions, originally intended to be imported by any module within ForceBalance.
This file was copied over from ForceBalance to geomeTRIC in order to lighten the dependencies of the latter.
Table of Contents:
- I/O formatting
- Math: Variable manipulation, linear algebra, least squares polynomial fitting
- Pickle: Expand Python's own pickle to accommodate writing XML etree objects
- Commands for submitting things to the Work Queue
- Various file and process management functions
- Development stuff (not commonly used)
Named after the mighty Sniffy Handy Nifty (King Sniffy)
@author Lee-Ping Wang
@date 2018-03-10
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import filecmp
import itertools
import distutils.dir_util
import os
import re
import shutil
import sys
from select import select
import numpy as np
from numpy.linalg import multi_dot
# For Python 3 compatibility
try:
from itertools import zip_longest as zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import threading
from pickle import Pickler, Unpickler
import tarfile
import time
import subprocess
import math
import six # For six.string_types
from subprocess import PIPE
from collections import OrderedDict, defaultdict
#================================#
# Set up the logger #
#================================#
if "forcebalance" in __name__:
# If this module is part of ForceBalance, use the package level logger
from .output import *
package="ForceBalance"
else:
from logging import *
# Define two handlers that don't print newline characters at the end of each line
class RawStreamHandler(StreamHandler):
"""
Exactly like StreamHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, stream = sys.stdout):
super(RawStreamHandler, self).__init__(stream)
def emit(self, record):
message = record.getMessage()
self.stream.write(message)
self.flush()
class RawFileHandler(FileHandler):
"""
Exactly like FileHandler, except no newline character is printed at the end of each message.
This is done in order to ensure functions in molecule.py and nifty.py work consistently
across multiple packages.
"""
def __init__(self, *args, **kwargs):
super(RawFileHandler, self).__init__(*args, **kwargs)
def emit(self, record):
if self.stream is None:
self.stream = self._open()
message = record.getMessage()
self.stream.write(message)
self.flush()
if "geometric" in __name__:
# This ensures logging behavior is consistent with the rest of geomeTRIC
logger = getLogger(__name__)
logger.setLevel(INFO)
package="geomeTRIC"
else:
logger = getLogger("NiftyLogger")
logger.setLevel(INFO)
handler = RawStreamHandler()
logger.addHandler(handler)
if __name__ == "__main__":
package = "LPW-nifty.py"
else:
package = __name__.split('.')[0]
try:
import bz2
HaveBZ2 = True
except ImportError:
logger.warning("bz2 module import failed (used in compressing or decompressing pickle files)\n")
HaveBZ2 = False
try:
import gzip
HaveGZ = True
except ImportError:
logger.warning("gzip module import failed (used in compressing or decompressing pickle files)\n")
HaveGZ = False
# The directory that this file lives in
rootdir = os.path.dirname(os.path.abspath(__file__))
# On 2020-05-07, these values were revised to CODATA 2018 values
# hartree-joule relationship 4.359 744 722 2071(85) e-18
# Hartree energy in eV 27.211 386 245 988(53)
# Avogadro constant 6.022 140 76 e23 (exact)
# molar gas constant 8.314 462 618 (exact)
# Boltzmann constant 1.380649e-23 (exact)
# Bohr radius 5.291 772 109 03(80) e-11
# speed of light in vacuum 299 792 458 (exact)
# reduced Planck's constant 1.054571817e-34 (exact)
# calorie-joule relationship 4.184 J (exact; from NIST)
## Boltzmann constant in kJ mol^-1 k^-1
kb = 0.008314462618 # Previous value: 0.0083144100163
kb_si = 1.380649e-23
# Conversion factors
bohr2ang = 0.529177210903 # Previous value: 0.529177210
ang2bohr = 1.0 / bohr2ang
au2kcal = 627.5094740630558 # Previous value: 627.5096080306
kcal2au = 1.0 / au2kcal
au2kj = 2625.4996394798254 # Previous value: 2625.5002
kj2au = 1.0 / au2kj
grad_au2gmx = 49614.75258920567 # Previous value: 49614.75960959161
grad_gmx2au = 1.0 / grad_au2gmx
au2evang = 51.422067476325886 # Previous value: 51.42209166566339
evang2au = 1.0 / au2evang
c_lightspeed = 299792458.
hbar = 1.054571817e-34
avogadro = 6.02214076e23
au_mass = 9.1093837015e-31 # Atomic unit of mass in kg
amu_mass = 1.66053906660e-27 # Atomic mass unit in kg
amu2au = amu_mass / au_mass
cm2au = 100 * c_lightspeed * (2*np.pi*hbar) * avogadro / 1000 / au2kj # Multiply to convert cm^-1 to Hartree
ambervel2au = 9.349961132249932e-04 # Multiply to go from AMBER velocity unit Ang/(1/20.455 ps) to bohr/atu.
## Q-Chem to GMX unit conversion for energy
eqcgmx = au2kj # Previous value: 2625.5002
## Q-Chem to GMX unit conversion for force
fqcgmx = -grad_au2gmx # Previous value: -49621.9
#=========================#
# I/O formatting #
#=========================#
# These functions may be useful someday but I have not tested them
# def bzip2(src):
# dest = src+'.bz2'
# if not os.path.exists(src):
# logger.error('File to be compressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Archive to be created already exists')
# raise RuntimeError
# with open(src, 'rb') as input:
# with bz2.BZ2File(dest, 'wb', compresslevel=9) as output:
# copyfileobj(input, output)
# os.remove(input)
# def bunzip2(src):
# dest = re.sub('\.bz2$', '', src)
# if not os.path.exists(src):
# logger.error('File to be decompressed does not exist')
# raise RuntimeError
# if os.path.exists(dest):
# logger.error('Target path for decompression already exists')
# raise RuntimeError
# with bz2.BZ2File(src, 'rb', compresslevel=9) as input:
# with open(dest, 'wb') as output:
# copyfileobj(input, output)
# os.remove(input)
def pvec1d(vec1d, precision=1, format="e", loglevel=INFO):
"""Printout of a 1-D vector.
@param[in] vec1d a 1-D vector
"""
v2a = np.array(vec1d)
for i in range(v2a.shape[0]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % v2a[i])
logger.log(loglevel, '\n')
def astr(vec1d, precision=4):
""" Write an array to a string so we can use it to key a dictionary. """
return ' '.join([("%% .%ie " % precision % i) for i in vec1d])
def pmat2d(mat2d, precision=1, format="e", loglevel=INFO):
"""Printout of a 2-D array.
@param[in] mat2d a 2-D array
"""
m2a = np.array(mat2d)
for i in range(m2a.shape[0]):
for j in range(m2a.shape[1]):
logger.log(loglevel, "%% .%i%s " % (precision, format) % m2a[i][j])
logger.log(loglevel, '\n')
def grouper(iterable, n):
"""Collect data into fixed-length chunks or blocks"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
lzip = [[j for j in i if j is not None] for i in list(zip_longest(*args))]
return lzip
def encode(l):
return [[len(list(group)),name] for name, group in itertools.groupby(l)]
def segments(e):
# Takes encoded input.
begins = np.array([sum([k[0] for k in e][:j]) for j,i in enumerate(e) if i[1] == 1])
lens = np.array([i[0] for i in e if i[1] == 1])
return [(i, i+j) for i, j in zip(begins, lens)]
def commadash(l):
# Formats a list like [27, 28, 29, 30, 31, 88, 89, 90, 91, 100, 136, 137, 138, 139]
# into '27-31,88-91,100,136-139
L = sorted(l)
if len(L) == 0:
return "(empty)"
L.append(L[-1]+1)
LL = [i in L for i in range(L[-1])]
return ','.join('%i-%i' % (i[0]+1,i[1]) if (i[1]-1 > i[0]) else '%i' % (i[0]+1) for i in segments(encode(LL)))
def uncommadash(s):
# Takes a string like '27-31,88-91,100,136-139'
# and turns it into a list like [26, 27, 28, 29, 30, 87, 88, 89, 90, 99, 135, 136, 137, 138]
L = []
try:
for w in s.split(','):
ws = w.split('-')
a = int(ws[0])-1
if len(ws) == 1:
b = int(ws[0])
elif len(ws) == 2:
b = int(ws[1])
else:
logger.warning("Dash-separated list cannot exceed length 2\n")
raise
if a < 0 or b <= 0 or b <= a:
if a < 0 or b <= 0:
logger.warning("Items in list cannot be zero or negative: %d %d\n" % (a, b))
else:
logger.warning("Second number cannot be smaller than first: %d %d\n" % (a, b))
raise
newL = range(a,b)
if any([i in L for i in newL]):
logger.warning("Duplicate entries found in list\n")
raise
L += newL
if sorted(L) != L:
logger.warning("List is out of order\n")
raise
except:
logger.error('Invalid string for converting to list of numbers: %s\n' % s)
raise RuntimeError
return L
def natural_sort(l):
""" Return a natural sorted list. """
# Convert a character to a digit or a lowercase character
convert = lambda text: int(text) if text.isdigit() else text.lower()
# Split string into "integer" and "noninteger" fields and convert each one
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
# Sort strings using these keys in descending order of importance, I guess.
return sorted(l, key = alphanum_key)
def printcool(text,sym="#",bold=False,color=2,ansi=None,bottom='-',minwidth=50,center=True,sym2="="):
"""Cool-looking printout for slick formatting of output.
@param[in] text The string that the printout is based upon. This function
will print out the string, ANSI-colored and enclosed in the symbol
for example:\n
<tt> ################# </tt>\n
<tt> ### I am cool ### </tt>\n
<tt> ################# </tt>
@param[in] sym The surrounding symbol\n
@param[in] bold Whether to use bold print
@param[in] color The ANSI color:\n
1 red\n
2 green\n
3 yellow\n
4 blue\n
5 magenta\n
6 cyan\n
7 white
@param[in] bottom The symbol for the bottom bar
@param[in] minwidth The minimum width for the box, if the text is very short
then we insert the appropriate number of padding spaces
@return bar The bottom bar is returned for the user to print later, e.g. to mark off a 'section'
"""
def newlen(l):
return len(re.sub(r"\x1b\[[0-9;]*m","",l))
text = text.split('\n')
width = max(minwidth,max([newlen(line) for line in text]))
bar = ''.join([sym2 for i in range(width + 6)])
bar = sym + bar + sym
#bar = ''.join([sym for i in range(width + 8)])
logger.info('\r'+bar + '\n')
for ln, line in enumerate(text):
if type(center) is list: c1 = center[ln]
else: c1 = center
if c1:
padleft = ' ' * (int((width - newlen(line))/2))
else:
padleft = ''
padright = ' '* (width - newlen(line) - len(padleft))
if ansi is not None:
ansi = str(ansi)
logger.info("%s| \x1b[%sm%s " % (sym, ansi, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
elif color is not None:
if color == 0 and bold:
logger.info("%s| \x1b[1m%s " % (sym, padleft) + line + " %s\x1b[0m |%s\n" % (padright, sym))
elif color == 0:
logger.info("%s| %s " % (sym, padleft)+line+" %s |%s\n" % (padright, sym))
else:
logger.info("%s| \x1b[%s9%im%s " % (sym, bold and "1;" or "", color, padleft)+line+" %s\x1b[0m |%s\n" % (padright, sym))
# if color == 3 or color == 7:
# print "%s\x1b[40m\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
# else:
# print "%s\x1b[%s9%im%s" % (''.join([sym for i in range(3)]), bold and "1;" or "", color, padleft),line,"%s\x1b[0m%s" % (padright, ''.join([sym for i in range(3)]))
else:
warn_press_key("Inappropriate use of printcool")
logger.info(bar + '\n')
botbar = ''.join([bottom for i in range(width + 8)])
return botbar + '\n'
def printcool_dictionary(Dict,title="Dictionary Keys : Values",bold=False,color=2,keywidth=25,topwidth=50,center=True,leftpad=0):
"""See documentation for printcool; this is a nice way to print out keys/values in a dictionary.
The keys in the dictionary are sorted before printing out.
@param[in] dict The dictionary to be printed
@param[in] title The title of the printout
"""
if Dict is None: return
bar = printcool(title,bold=bold,color=color,minwidth=topwidth,center=center)
def magic_string(str):
# This cryptic command returns a string with the number of characters specified as a variable. :P
# Useful for printing nice-looking dictionaries, i guess.
# print "\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"'))
return eval("\'%%-%is\' %% '%s'" % (keywidth,str.replace("'","\\'").replace('"','\\"')))
if isinstance(Dict, OrderedDict):
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in Dict if Dict[key] is not None]))
else:
logger.info('\n'.join([' '*leftpad + "%s %s " % (magic_string(str(key)),str(Dict[key])) for key in sorted([i for i in Dict]) if Dict[key] is not None]))
logger.info("\n%s" % bar)
#===============================#
#| Math: Variable manipulation |#
#===============================#
def isint(word):
"""ONLY matches integers! If you have a decimal point? None shall pass!
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is an integer (only +/- sign followed by digits)
"""
try:
word = str(word)
except:
return False
return re.match('^[-+]?[0-9]+$', word)
def isfloat(word):
"""Matches ANY number; it can be a decimal, scientific notation, what have you
CAUTION - this will also match an integer.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is any number
"""
try: word = str(word)
except: return False
if len(word) == 0: return False
return re.match(r'^[-+]?[0-9]*\.?[0-9]*([eEdD][-+]?[0-9]+)?$',word)
def isdecimal(word):
"""Matches things with a decimal only; see isint and isfloat.
@param[in] word String (for instance, '123', '153.0', '2.', '-354')
@return answer Boolean which specifies whether the string is a number with a decimal point
"""
try: word = str(word)
except: return False
return isfloat(word) and not isint(word)
def floatornan(word):
"""Returns a big number if we encounter NaN.
@param[in] word The string to be converted
@return answer The string converted to a float; if not a float, return 1e10
@todo I could use suggestions for making this better.
"""
big = 1e10
if isfloat(word):
return float(word)
else:
logger.info("Setting %s to % .1e\n" % big)
return big
def col(vec):
"""
Given any list, array, or matrix, return a 1-column 2D array.
Input:
vec = The input vector that is to be made into a column
Output:
A 1-column 2D array
"""
return np.array(vec).reshape(-1, 1)
def row(vec):
"""Given any list, array, or matrix, return a 1-row 2D array.
@param[in] vec The input vector that is to be made into a row
@return answer A 1-row 2D array
"""
return np.array(vec).reshape(1, -1)
def flat(vec):
"""Given any list, array, or matrix, return a single-index array.
@param[in] vec The data to be flattened
@return answer The flattened data
"""
return np.array(vec).reshape(-1)
def est124(val):
"""Given any positive floating point value, return a value [124]e+xx
that is closest to it in the log space.
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log4 = 0.6020599913279624
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log4):
fac = 2.0
elif logfrac < 0.5*(log4+log10):
fac = 4.0
else:
fac = 10.0
return fac*10**logint
def est1234568(val):
"""Given any positive floating point value, return a value [1234568]e+xx
that is closest to it in the log space. Just because I don't like seven
and nine. Call me a numberist?
"""
log = np.log10(val)
logint = math.floor(log)
logfrac = log - logint
log1 = 0.0
log2 = 0.3010299956639812
log3 = np.log10(3)
log4 = 0.6020599913279624
log5 = np.log10(5)
log6 = np.log10(6)
log8 = np.log10(8)
log10 = 1.0
if logfrac < 0.5*(log1+log2):
fac = 1.0
elif logfrac < 0.5*(log2+log3):
fac = 2.0
elif logfrac < 0.5*(log3+log4):
fac = 3.0
elif logfrac < 0.5*(log4+log5):
fac = 4.0
elif logfrac < 0.5*(log5+log6):
fac = 5.0
elif logfrac < 0.5*(log6+log8):
fac = 6.0
elif logfrac < 0.5*(log8+log10):
fac = 8.0
else:
fac = 10.0
return fac*10**logint
def monotonic(arr, start, end):
# Make sure an array is monotonically decreasing from the start to the end.
a0 = arr[start]
i0 = start
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
arr[i0:i+1] = np.linspace(a0, arr[i], i-i0+1)
a0 = arr[i]
i0 = i
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
arr[i:i0+1] = np.linspace(arr[i], a0, i0-i+1)
a0 = arr[i]
i0 = i
i -= 1
def monotonic_decreasing(arr, start=None, end=None, verbose=False):
"""
Return the indices of an array corresponding to strictly monotonic
decreasing behavior.
Parameters
----------
arr : numpy.ndarray
Input array
start : int
Starting index (first element if None)
end : int
Ending index (last element if None)
Returns
-------
indices : numpy.ndarray
Selected indices
"""
if start is None:
start = 0
if end is None:
end = len(arr) - 1
a0 = arr[start]
idx = [start]
if verbose: logger.info("Starting @ %i : %.6f\n" % (start, arr[start]))
if end > start:
i = start+1
while i < end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i += 1
if end < start:
i = start-1
while i >= end:
if arr[i] < a0:
a0 = arr[i]
idx.append(i)
if verbose: logger.info("Including %i : %.6f\n" % (i, arr[i]))
else:
if verbose: logger.info("Excluding %i : %.6f\n" % (i, arr[i]))
i -= 1
return np.array(idx)
#====================================#
#| Math: Vectors and linear algebra |#
#====================================#
def orthogonalize(vec1, vec2):
"""Given two vectors vec1 and vec2, project out the component of vec1
that is along the vec2-direction.
@param[in] vec1 The projectee (i.e. output is some modified version of vec1)
@param[in] vec2 The projector (component subtracted out from vec1 is parallel to this)
@return answer A copy of vec1 but with the vec2-component projected out.
"""
v2u = vec2/np.linalg.norm(vec2)
return vec1 - v2u*np.dot(vec1, v2u)
def invert_svd(X,thresh=1e-12):
"""
Invert a matrix using singular value decomposition.
@param[in] X The 2-D NumPy array containing the matrix to be inverted
@param[in] thresh The SVD threshold; eigenvalues below this are not inverted but set to zero
@return Xt The 2-D NumPy array containing the inverted matrix
"""
u,s,vh = np.linalg.svd(X, full_matrices=0)
uh = np.transpose(u)
v = np.transpose(vh)
si = s.copy()
for i in range(s.shape[0]):
if abs(s[i]) > thresh:
si[i] = 1./s[i]
else:
si[i] = 0.0
si = np.diag(si)
Xt = multi_dot([v, si, uh])
return Xt
#==============================#
#| Linear least squares |#
#==============================#
def get_least_squares(x, y, w = None, thresh=1e-12):
"""
@code
__ __
| |
| 1 (x0) (x0)^2 (x0)^3 |
| 1 (x1) (x1)^2 (x1)^3 |
| 1 (x2) (x2)^2 (x2)^3 |
| 1 (x3) (x3)^2 (x3)^3 |
| 1 (x4) (x4)^2 (x4)^3 |
|__ __|
@endcode
@param[in] X (2-D array) An array of X-values (see above)
@param[in] Y (array) An array of Y-values (only used in getting the least squares coefficients)
@param[in] w (array) An array of weights, hopefully normalized to one.
@param[out] Beta The least-squares coefficients
@param[out] Hat The hat matrix that takes linear combinations of data y-values to give fitted y-values (weights)
@param[out] yfit The fitted y-values
@param[out] MPPI The Moore-Penrose pseudoinverse (multiply by Y to get least-squares coefficients, multiply by dY/dk to get derivatives of least-squares coefficients)
"""
# X is a 'tall' matrix.
X = np.array(x)
if len(X.shape) == 1:
X = X[:,np.newaxis]
Y = col(y)
n_x = X.shape[0]
n_fit = X.shape[1]
if n_fit > n_x:
logger.warning("Argh? It seems like this problem is underdetermined!\n")
# Build the weight matrix.
if w is not None:
if len(w) != n_x:
warn_press_key("The weight array length (%i) must be the same as the number of 'X' data points (%i)!" % len(w), n_x)
w /= np.mean(w)
WH = np.diag(w**0.5)
else:
WH = np.eye(n_x)
# Make the Moore-Penrose Pseudoinverse.
# if n_fit == n_x:
# MPPI = np.linalg.inv(WH*X)
# else:
# This resembles the formula (X'WX)^-1 X' W^1/2
MPPI = np.linalg.pinv(np.dot(WH, X))
Beta = multi_dot([MPPI, WH, Y])
Hat = multi_dot([WH, X, MPPI])
yfit = flat(np.dot(Hat, Y))
# Return three things: the least-squares coefficients, the hat matrix (turns y into yfit), and yfit
# We could get these all from MPPI, but I might get confused later on, so might as well do it here :P
return np.array(Beta).flatten(), np.array(Hat), np.array(yfit).flatten(), np.array(MPPI)
#===========================================#
#| John's statisticalInefficiency function |#
#===========================================#
def statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
"""
Compute the (cross) statistical inefficiency of (two) timeseries.
Notes
The same timeseries can be used for both A_n and B_n to get the autocorrelation statistical inefficiency.
The fast method described in Ref [1] is used to compute g.
References
[1] J. D. Chodera, W. C. Swope, J. W. Pitera, C. Seok, and K. A. Dill. Use of the weighted
histogram analysis method for the analysis of simulated and parallel tempering simulations.
JCTC 3(1):26-41, 2007.
Examples
Compute statistical inefficiency of timeseries data with known correlation time.
>>> import timeseries
>>> A_n = timeseries.generateCorrelatedTimeseries(N=100000, tau=5.0)
>>> g = statisticalInefficiency(A_n, fast=True)
@param[in] A_n (required, numpy array) - A_n[n] is nth value of
timeseries A. Length is deduced from vector.
@param[in] B_n (optional, numpy array) - B_n[n] is nth value of
timeseries B. Length is deduced from vector. If supplied, the
cross-correlation of timeseries A and B will be estimated instead of
the autocorrelation of timeseries A.
@param[in] fast (optional, boolean) - if True, will use faster (but
less accurate) method to estimate correlation time, described in
Ref. [1] (default: False)
@param[in] mintime (optional, int) - minimum amount of correlation
function to compute (default: 3) The algorithm terminates after
computing the correlation time out to mintime when the correlation
function furst goes negative. Note that this time may need to be
increased if there is a strong initial negative peak in the
correlation function.
@return g The estimated statistical inefficiency (equal to 1 + 2
tau, where tau is the correlation time). We enforce g >= 1.0.
"""
# Create numpy copies of input arguments.
A_n = np.array(A_n)
if B_n is not None:
B_n = np.array(B_n)
else:
B_n = np.array(A_n)
# Get the length of the timeseries.
N = A_n.shape[0]
# Be sure A_n and B_n have the same dimensions.
if A_n.shape != B_n.shape:
logger.error('A_n and B_n must have same dimensions.\n')
raise ParameterError
# Initialize statistical inefficiency estimate with uncorrelated value.
g = 1.0
# Compute mean of each timeseries.
mu_A = A_n.mean()
mu_B = B_n.mean()
# Make temporary copies of fluctuation from mean.
dA_n = A_n.astype(np.float64) - mu_A
dB_n = B_n.astype(np.float64) - mu_B
# Compute estimator of covariance of (A,B) using estimator that will ensure C(0) = 1.
sigma2_AB = (dA_n * dB_n).mean() # standard estimator to ensure C(0) = 1
# Trap the case where this covariance is zero, and we cannot proceed.
if sigma2_AB == 0:
if warn:
logger.warning('Sample covariance sigma_AB^2 = 0 -- cannot compute statistical inefficiency\n')
return 1.0
# Accumulate the integrated correlation time by computing the normalized correlation time at
# increasing values of t. Stop accumulating if the correlation function goes negative, since
# this is unlikely to occur unless the correlation function has decayed to the point where it
# is dominated by noise and indistinguishable from zero.
t = 1
increment = 1
while t < N-1:
# compute normalized fluctuation correlation function at time t
C = sum( dA_n[0:(N-t)]*dB_n[t:N] + dB_n[0:(N-t)]*dA_n[t:N] ) / (2.0 * float(N-t) * sigma2_AB)
# Terminate if the correlation function has crossed zero and we've computed the correlation
# function at least out to 'mintime'.
if (C <= 0.0) and (t > mintime):
break
# Accumulate contribution to the statistical inefficiency.
g += 2.0 * C * (1.0 - float(t)/float(N)) * float(increment)
# Increment t and the amount by which we increment t.
t += increment
# Increase the interval if "fast mode" is on.
if fast: increment += 1
# g must be at least unity
if g < 1.0: g = 1.0
# Return the computed statistical inefficiency.
return g
def mean_stderr(ts):
"""Return mean and standard deviation of a time series ts."""
return np.mean(ts), \
np.std(ts)*np.sqrt(statisticalInefficiency(ts, warn=False)/len(ts))
# Slices a 2D array of data by column. The new array is fed into the statisticalInefficiency function.
def multiD_statisticalInefficiency(A_n, B_n=None, fast=False, mintime=3, warn=True):
n_row = A_n.shape[0]
n_col = A_n.shape[-1]
multiD_sI = np.zeros((n_row, n_col))
for col in range(n_col):
if B_n is None:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n, fast, mintime, warn)
else:
multiD_sI[:,col] = statisticalInefficiency(A_n[:,col], B_n[:,col], fast, mintime, warn)
return multiD_sI
#========================================#
#| Loading compressed pickles |#
#========================================#
def lp_dump(obj, fnm, protocol=0):
""" Write an object to a zipped pickle file specified by the path. """
# Safeguard against overwriting files? Nah.
# if os.path.exists(fnm):
# logger.error("lp_dump cannot write to an existing path")
# raise IOError
if os.path.islink(fnm):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % fnm)
os.unlink(fnm)
if HaveGZ:
f = gzip.GzipFile(fnm, 'wb')
elif HaveBZ2:
f = bz2.BZ2File(fnm, 'wb')
else:
f = open(fnm, 'wb')
Pickler(f, protocol).dump(obj)
f.close()
def lp_load(fnm):
""" Read an object from a bzipped file specified by the path. """
if not os.path.exists(fnm):
logger.error("lp_load cannot read from a path that doesn't exist (%s)" % fnm)
raise IOError
def load_uncompress():
logger.warning("Compressed file loader failed, attempting to read as uncompressed file\n")
f = open(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_bz2():
f = bz2.BZ2File(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
def load_gz():
f = gzip.GzipFile(fnm, 'rb')
try:
answer = Unpickler(f).load()
except UnicodeDecodeError:
answer = Unpickler(f, encoding='latin1').load()
f.close()
return answer
if HaveGZ:
try:
answer = load_gz()
except:
if HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
elif HaveBZ2:
try:
answer = load_bz2()
except:
answer = load_uncompress()
else:
answer = load_uncompress()
return answer
#==============================#
#| Work Queue stuff |#
#==============================#
try:
import work_queue
except:
pass
#logger.warning("Work Queue library import fail (You can't queue up jobs using Work Queue)\n")
# Global variable corresponding to the Work Queue object
WORK_QUEUE = None
# Global variable containing a mapping from target names to Work Queue task IDs
WQIDS = defaultdict(list)
def getWorkQueue():
global WORK_QUEUE
return WORK_QUEUE
def getWQIds():
global WQIDS
return WQIDS
def createWorkQueue(wq_port, debug=True, name=package):
global WORK_QUEUE
if debug:
work_queue.set_debug_flag('all')
WORK_QUEUE = work_queue.WorkQueue(port=wq_port)
WORK_QUEUE.specify_name(name)
# QYD: prefer the worker that is fastest in previous tasks
# another choice is first-come-first serve: WORK_QUEUE_SCHEDULE_FCFS
WORK_QUEUE.specify_algorithm(work_queue.WORK_QUEUE_SCHEDULE_TIME)
# QYD: We don't want to specify the following extremely long keepalive times
# because they will prevent checking "dead" workers, causing the program to wait forever
#WORK_QUEUE.specify_keepalive_timeout(8640000)
#WORK_QUEUE.specify_keepalive_interval(8640000)
def destroyWorkQueue():
# Convenience function to destroy the Work Queue objects.
global WORK_QUEUE, WQIDS
WORK_QUEUE = None
WQIDS = defaultdict(list)
def queue_up(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of files) A list of locations of the input files.
@param[in] output_files (list of files) A list of locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
cwd = os.getcwd()
for f in input_files:
lf = os.path.join(cwd,f)
task.specify_input_file(lf,f,cache=False)
for f in output_files:
lf = os.path.join(cwd,f)
task.specify_output_file(lf,f,cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, %staskid %i\n" % (command, "tag %s, " % tag if tag != command else "", taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def queue_up_src_dest(wq, command, input_files, output_files, tag=None, tgt=None, verbose=True, print_time=60):
"""
Submit a job to the Work Queue. This function is a bit fancier in that we can explicitly
specify where the input files come from, and where the output files go to.
@param[in] wq (Work Queue Object)
@param[in] command (string) The command to run on the remote worker.
@param[in] input_files (list of 2-tuples) A list of local and
remote locations of the input files.
@param[in] output_files (list of 2-tuples) A list of local and
remote locations of the output files.
"""
global WQIDS
task = work_queue.Task(command)
for f in input_files:
# print f[0], f[1]
task.specify_input_file(f[0],f[1],cache=False)
for f in output_files:
# print f[0], f[1]
task.specify_output_file(f[0],f[1],cache=False)
if tag is None: tag = command
task.specify_tag(tag)
task.print_time = print_time
taskid = wq.submit(task)
if verbose:
logger.info("Submitting command '%s' to the Work Queue, taskid %i\n" % (command, taskid))
if tgt is not None:
WQIDS[tgt.name].append(taskid)
else:
WQIDS["None"].append(taskid)
def wq_wait1(wq, wait_time=10, wait_intvl=1, print_time=60, verbose=False):
""" This function waits ten seconds to see if a task in the Work Queue has finished. """
global WQIDS
if verbose: logger.info('---\n')
if wait_intvl >= wait_time:
wait_time = wait_intvl
numwaits = 1
else:
numwaits = int(wait_time/wait_intvl)
for sec in range(numwaits):
task = wq.wait(wait_intvl)
if task:
exectime = task.cmd_execution_time/1000000
if verbose:
logger.info('A job has finished!\n')
logger.info('Job name = ' + task.tag + 'command = ' + task.command + '\n')
logger.info("status = " + task.status + '\n')
logger.info("return_status = " + task.return_status)
logger.info("result = " + task.result)
logger.info("host = " + task.hostname + '\n')
logger.info("execution time = " + exectime)
logger.info("total_bytes_transferred = " + task.total_bytes_transferred + '\n')
if task.result != 0:
oldid = task.id
oldhost = task.hostname
tgtname = "None"
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
tgtname = tnm
WQIDS[tnm].remove(task.id)
taskid = wq.submit(task)
logger.warning("Task '%s' (task %i) failed on host %s (%i seconds), resubmitted: taskid %i\n" % (task.tag, oldid, oldhost, exectime, taskid))
WQIDS[tgtname].append(taskid)
else:
if hasattr(task, 'print_time'):
print_time = task.print_time
if exectime > print_time: # Assume that we're only interested in printing jobs that last longer than a minute.
logger.info("Task '%s' (task %i) finished successfully on host %s (%i seconds)\n" % (task.tag, task.id, task.hostname, exectime))
for tnm in WQIDS:
if task.id in WQIDS[tnm]:
WQIDS[tnm].remove(task.id)
del task
# LPW 2018-09-10 Updated to use stats fields from CCTools 6.2.10
# Please upgrade CCTools version if errors are encountered during runtime.
if verbose:
logger.info("Workers: %i init, %i idle, %i busy, %i total joined, %i total removed\n" \
% (wq.stats.workers_init, wq.stats.workers_idle, wq.stats.workers_busy, wq.stats.workers_joined, wq.stats.workers_removed))
logger.info("Tasks: %i running, %i waiting, %i dispatched, %i submitted, %i total complete\n" \
% (wq.stats.tasks_running, wq.stats.tasks_waiting, wq.stats.tasks_dispatched, wq.stats.tasks_submitted, wq.stats.tasks_done))
logger.info("Data: %i / %i kb sent/received\n" % (int(wq.stats.bytes_sent/1024), int(wq.stats.bytes_received/1024)))
else:
logger.info("\r%s : %i/%i workers busy; %i/%i jobs complete \r" %\
(time.ctime(), wq.stats.workers_busy, wq.stats.workers_connected, wq.stats.tasks_done, wq.stats.tasks_submitted))
if time.time() - wq_wait1.t0 > 900:
wq_wait1.t0 = time.time()
logger.info('\n')
wq_wait1.t0 = time.time()
def wq_wait(wq, wait_time=10, wait_intvl=10, print_time=60, verbose=False):
""" This function waits until the work queue is completely empty. """
while not wq.empty():
wq_wait1(wq, wait_time=wait_time, wait_intvl=wait_intvl, print_time=print_time, verbose=verbose)
#=====================================#
#| File and process management stuff |#
#=====================================#
def click():
""" Stopwatch function for timing. """
ans = time.time() - click.t0
click.t0 = time.time()
return ans
click.t0 = time.time()
def splitall(path):
allparts = []
while 1:
parts = os.path.split(path)
if parts[0] == path: # sentinel for absolute paths
allparts.insert(0, parts[0])
break
elif parts[1] == path: # sentinel for relative paths
allparts.insert(0, parts[1])
break
else:
path = parts[0]
allparts.insert(0, parts[1])
return allparts
# Back up a file.
def bak(path, dest=None, cwd=None, start=1):
oldf = path
newf = None
if cwd != None:
if not os.path.exists(cwd):
raise RuntimeError("%s is not an existing folder" % cwd)
old_d = os.getcwd()
os.chdir(cwd)
if os.path.exists(path):
dnm, fnm = os.path.split(path)
if dnm == '' : dnm = '.'
base, ext = os.path.splitext(fnm)
if dest is None:
dest = dnm
if not os.path.isdir(dest): os.makedirs(dest)
i = start
while True:
fnm = "%s_%i%s" % (base,i,ext)
newf = os.path.join(dest, fnm)
if not os.path.exists(newf): break
i += 1
logger.info("Backing up %s -> %s\n" % (oldf, newf))
shutil.move(oldf,newf)
if cwd != None:
os.chdir(old_d)
return newf
# Purpose: Given a file name and/or an extension, do one of the following:
# 1) If provided a file name, check the file, crash if not exist and err==True. Return the file name.
# 2) If list is empty but extension is provided, check if one file exists that matches
# the extension. If so, return the file name.
# 3) If list is still empty and err==True, then crash with an error.
def onefile(fnm=None, ext=None, err=False):
if fnm is None and ext is None:
if err:
logger.error("Must provide either filename or extension to onefile()")
raise RuntimeError
else:
return None
if fnm is not None:
if os.path.exists(fnm):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
return os.path.basename(fnm)
elif err==True or ext is None:
logger.error("File specified by %s does not exist!" % fnm)
raise RuntimeError
elif ext is not None:
warn_once("File specified by %s does not exist - will try to autodetect .%s extension" % (fnm, ext))
answer = None
cwd = os.getcwd()
ls = [i for i in os.listdir(cwd) if i.endswith('.%s' % ext)]
if len(ls) != 1:
if err:
logger.error("Cannot find a unique file with extension .%s in %s (%i found; %s)" % (ext, cwd, len(ls), ' '.join(ls)))
raise RuntimeError
else:
warn_once("Cannot find a unique file with extension .%s in %s (%i found; %s)" %
(ext, cwd, len(ls), ' '.join(ls)), warnhash = "Found %i .%s files" % (len(ls), ext))
else:
answer = os.path.basename(ls[0])
warn_once("Autodetected %s in %s" % (answer, cwd), warnhash = "Autodetected %s" % answer)
return answer
# Purpose: Given a file name / file list and/or an extension, do one of the following:
# 1) If provided a file list, check each file in the list
# and crash if any file does not exist. Return the list.
# 2) If provided a file name, check the file and crash if the file
# does not exist. Return a length-one list with the file name.
# 3) If list is empty but extension is provided, check for files that
# match the extension. If so, append them to the list.
# 4) If list is still empty and err==True, then crash with an error.
def listfiles(fnms=None, ext=None, err=False, dnm=None):
answer = []
cwd = os.path.abspath(os.getcwd())
if dnm is not None:
os.chdir(dnm)
if isinstance(fnms, list):
for i in fnms:
if not os.path.exists(i):
logger.error('Specified %s but it does not exist' % i)
raise RuntimeError
answer.append(i)
elif isinstance(fnms, six.string_types):
if not os.path.exists(fnms):
logger.error('Specified %s but it does not exist' % fnms)
raise RuntimeError
answer = [fnms]
elif fnms is not None:
logger.info(str(fnms))
logger.error('First argument to listfiles must be a list, a string, or None')
raise RuntimeError
if answer == [] and ext is not None:
answer = [os.path.basename(i) for i in os.listdir(os.getcwd()) if i.endswith('.%s' % ext)]
if answer == [] and err:
logger.error('listfiles function failed to come up with a file! (fnms = %s ext = %s)' % (str(fnms), str(ext)))
raise RuntimeError
for ifnm, fnm in enumerate(answer):
if os.path.dirname(os.path.abspath(fnm)) != os.getcwd():
fsrc = os.path.abspath(fnm)
fdest = os.path.join(os.getcwd(), os.path.basename(fnm))
#-----
# If the file path doesn't correspond to the current directory, copy the file over
# If the file exists in the current directory already and it's different, then crash.
#-----
if os.path.exists(fdest):
if not filecmp.cmp(fsrc, fdest):
logger.error("onefile() will not overwrite %s with %s\n" % (os.path.join(os.getcwd(), os.path.basename(fnm)),os.path.abspath(fnm)))
raise RuntimeError
else:
logger.info("\x1b[93monefile() says the files %s and %s are identical\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
answer[ifnm] = os.path.basename(fnm)
else:
logger.info("\x1b[93monefile() will copy %s to %s\x1b[0m\n" % (os.path.abspath(fnm), os.getcwd()))
shutil.copy2(fsrc, fdest)
answer[ifnm] = os.path.basename(fnm)
os.chdir(cwd)
return answer
def extract_tar(tarfnm, fnms, force=False):
"""
Extract a list of files from .tar archive with any compression.
The file is extracted to the base folder of the archive.
Parameters
----------
tarfnm :
Name of the archive file.
fnms : str or list
File names to be extracted.
force : bool, optional
If true, then force extraction of file even if they already exist on disk.
"""
# Get path of tar file.
fdir = os.path.abspath(os.path.dirname(tarfnm))
# If all files exist, then return - no need to extract.
if (not force) and all([os.path.exists(os.path.join(fdir, f)) for f in fnms]): return
# If the tar file doesn't exist or isn't valid, do nothing.
if not os.path.exists(tarfnm): return
if not tarfile.is_tarfile(tarfnm): return
# Check type of fnms argument.
if isinstance(fnms, six.string_types): fnms = [fnms]
# Load the tar file.
arch = tarfile.open(tarfnm, 'r')
# Extract only the files we have (to avoid an exception).
all_members = arch.getmembers()
all_names = [f.name for f in all_members]
members = [f for f in all_members if f.name in fnms]
# Extract files to the destination.
arch.extractall(fdir, members=members)
def GoInto(Dir):
if os.path.exists(Dir):
if os.path.isdir(Dir): pass
else:
logger.error("Tried to create directory %s, it exists but isn't a directory\n" % newdir)
raise RuntimeError
else:
os.makedirs(Dir)
os.chdir(Dir)
def allsplit(Dir):
# Split a directory into all directories involved.
s = os.path.split(os.path.normpath(Dir))
if s[1] == '' or s[1] == '.' : return []
return allsplit(s[0]) + [s[1]]
def Leave(Dir):
if os.path.split(os.getcwd())[1] != Dir:
logger.error("Trying to leave directory %s, but we're actually in directory %s (check your code)\n" % (Dir,os.path.split(os.getcwd())[1]))
raise RuntimeError
for i in range(len(allsplit(Dir))):
os.chdir('..')
# Dictionary containing specific error messages for specific missing files or file patterns
specific_lst = [(['mdrun','grompp','trjconv','g_energy','g_traj'], "Make sure to install GROMACS and add it to your path (or set the gmxpath option)"),
(['force.mdin', 'stage.leap'], "This file is needed for setting up AMBER force matching targets"),
(['conf.pdb', 'mono.pdb'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['liquid.xyz', 'liquid.key', 'mono.xyz', 'mono.key'], "This file is needed for setting up OpenMM condensed phase property targets"),
(['dynamic', 'analyze', 'minimize', 'testgrad', 'vibrate', 'optimize', 'polarize', 'superpose'], "Make sure to install TINKER and add it to your path (or set the tinkerpath option)"),
(['runcuda.sh', 'npt.py', 'npt_tinker.py'], "This file belongs in the ForceBalance source directory, not sure why it is missing"),
(['input.xyz'], "This file is needed for TINKER molecular property targets"),
(['.*key$', '.*xyz$'], "I am guessing this file is probably needed by TINKER"),
(['.*gro$', '.*top$', '.*itp$', '.*mdp$', '.*ndx$'], "I am guessing this file is probably needed by GROMACS")
]
# Build a dictionary mapping all of the keys in the above lists to their error messages
specific_dct = dict(list(itertools.chain(*[[(j,i[1]) for j in i[0]] for i in specific_lst])))
def MissingFileInspection(fnm):
fnm = os.path.split(fnm)[1]
answer = ""
for key in specific_dct:
if answer == "":
answer += "\n"
if re.match(key, fnm):
answer += "%s\n" % specific_dct[key]
return answer
def wopen(dest, binary=False):
""" If trying to write to a symbolic link, remove it first. """
if os.path.islink(dest):
logger.warning("Trying to write to a symbolic link %s, removing it first\n" % dest)
os.unlink(dest)
if binary:
return open(dest,'wb')
else:
return open(dest,'w')
def LinkFile(src, dest, nosrcok = False):
if os.path.abspath(src) == os.path.abspath(dest): return
if os.path.exists(src):
# Remove broken link
if os.path.islink(dest) and not os.path.exists(dest):
os.remove(dest)
os.symlink(src, dest)
elif os.path.exists(dest):
if os.path.islink(dest): pass
else:
logger.error("Tried to create symbolic link %s to %s, destination exists but isn't a symbolic link\n" % (src, dest))
raise RuntimeError
else:
os.symlink(src, dest)
else:
if not nosrcok:
logger.error("Tried to create symbolic link %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def CopyFile(src, dest):
if os.path.exists(src):
if os.path.exists(dest):
if os.path.islink(dest):
logger.error("Tried to copy %s to %s, destination exists but it's a symbolic link\n" % (src, dest))
raise RuntimeError
else:
shutil.copy2(src, dest)
else:
logger.error("Tried to copy %s to %s, but source file doesn't exist%s\n" % (src,dest,MissingFileInspection(src)))
raise RuntimeError
def link_dir_contents(abssrcdir, absdestdir):
for fnm in os.listdir(abssrcdir):
srcfnm = os.path.join(abssrcdir, fnm)
destfnm = os.path.join(absdestdir, fnm)
if os.path.islink(destfnm) and not os.path.exists(destfnm):
os.remove(destfnm)
if os.path.isfile(srcfnm) or (os.path.isdir(srcfnm) and fnm == 'IC'):
if not os.path.exists(destfnm):
#print "Linking %s to %s" % (srcfnm, destfnm)
os.symlink(srcfnm, destfnm)
def remove_if_exists(fnm):
""" Remove the file if it exists (doesn't return an error). """
if os.path.exists(fnm):
os.remove(fnm)
def which(fnm):
# Get the location of a file. Works only on UNIX-like file systems.
try:
return os.path.split(os.popen('which %s 2> /dev/null' % fnm).readlines()[0].strip())[0]
except:
return ''
def copy_tree_over(src, dest):
"""
Copy a source directory tree to a destination directory tree,
overwriting files as necessary. This does not require removing
the destination folder, which can reduce the number of times
shutil.rmtree needs to be called.
"""
# From https://stackoverflow.com/questions/9160227/dir-util-copy-tree-fails-after-shutil-rmtree/28055993 :
# If you copy folder, then remove it, then copy again it will fail, because it caches all the created dirs.
# To workaround you can clear _path_created before copy:
distutils.dir_util._path_created = {}
distutils.dir_util.copy_tree(src, dest)
# Thanks to cesarkawakami on #python (IRC freenode) for this code.
class LineChunker(object):
def __init__(self, callback):
self.callback = callback
self.buf = ""
def push(self, data):
# Added by LPW during Py3 compatibility; ran into some trouble decoding strings such as
# "a" with umlaut on top. I guess we can ignore these for now. For some reason,
# Py2 never required decoding of data, I can simply add it to the wtring.
# self.buf += data # Old Py2 code...
self.buf += data.decode('utf-8')#errors='ignore')
self.nomnom()
def close(self):
if self.buf:
self.callback(self.buf + "\n")
def nomnom(self):
# Splits buffer by new line or carriage return, and passes
# the splitted results onto processing.
while "\n" in self.buf or "\r" in self.buf:
chunk, sep, self.buf = re.split(r"(\r|\n)", self.buf, maxsplit=1)
self.callback(chunk + sep)
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _exec(command, print_to_screen = False, outfnm = None, logfnm = None, stdin = "", print_command = True, copy_stdout = True, copy_stderr = False, persist = False, expand_cr=False, print_error=True, rbytes=1, cwd=None, **kwargs):
"""Runs command line using subprocess, optionally returning stdout.
Options:
command (required) = Name of the command you want to execute
outfnm (optional) = Name of the output file name (overwritten if exists)
logfnm (optional) = Name of the log file name (appended if exists)
stdin (optional) = A string to be passed to stdin, as if it were typed (use newline character to mimic Enter key)
print_command = Whether to print the command.
copy_stdout = Copy the stdout stream; can set to False in strange situations
copy_stderr = Copy the stderr stream to the stdout stream; useful for GROMACS which prints out everything to stderr (argh.)
expand_cr = Whether to expand carriage returns into newlines (useful for GROMACS mdrun).
print_error = Whether to print error messages on a crash. Should be true most of the time.
persist = Continue execution even if the command gives a nonzero return code.
rbytes = Number of bytes to read from stdout and stderr streams at a time. GMX requires rbytes = 1 otherwise streams are interleaved. Higher values for speed.
"""
# Dictionary of options to be passed to the Popen object.
cmd_options={'shell':isinstance(command, six.string_types), 'stdin':PIPE, 'stdout':PIPE, 'stderr':PIPE, 'universal_newlines':expand_cr, 'cwd':cwd}
# If the current working directory is provided, the outputs will be written to there as well.
if cwd is not None:
if outfnm is not None:
outfnm = os.path.abspath(os.path.join(cwd, outfnm))
if logfnm is not None:
logfnm = os.path.abspath(os.path.join(cwd, logfnm))
# "write to file" : Function for writing some characters to the log and/or output files.
def wtf(out):
if logfnm is not None:
with open(logfnm,'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
if outfnm is not None:
with open(outfnm,'wb+' if wtf.first else 'ab+') as f:
f.write(out.encode('utf-8'))
f.flush()
wtf.first = False
wtf.first = True
# Preserve backwards compatibility; sometimes None gets passed to stdin.
if stdin is None: stdin = ""
if print_command:
logger.info("Executing process: \x1b[92m%-50s\x1b[0m%s%s%s%s\n" % (' '.join(command) if type(command) is list else command,
" In: %s" % cwd if cwd is not None else "",
" Output: %s" % outfnm if outfnm is not None else "",
" Append: %s" % logfnm if logfnm is not None else "",
(" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
wtf("Executing process: %s%s\n" % (command, (" Stdin: %s" % stdin.replace('\n','\\n')) if stdin else ""))
cmd_options.update(kwargs)
p = subprocess.Popen(command, **cmd_options)
# Write the stdin stream to the process.
p.stdin.write(stdin.encode('ascii'))
p.stdin.close()
#===============================================================#
#| Read the output streams from the process. This is a bit |#
#| complicated because programs like GROMACS tend to print out |#
#| stdout as well as stderr streams, and also carriage returns |#
#| along with newline characters. |#
#===============================================================#
# stdout and stderr streams of the process.
streams = [p.stdout, p.stderr]
# Are we using Python 2?
p2 = sys.version_info.major == 2
# These are functions that take chunks of lines (read) as inputs.
def process_out(read):
if print_to_screen:
# LPW 2019-11-25: We should be writing a string, not a representation of bytes
if p2:
sys.stdout.write(str(read.encode('utf-8')))
else:
sys.stdout.write(read)
if copy_stdout:
process_out.stdout.append(read)
wtf(read)
process_out.stdout = []
def process_err(read):
if print_to_screen:
if p2:
sys.stderr.write(str(read.encode('utf-8')))
else:
sys.stderr.write(read)
process_err.stderr.append(read)
if copy_stderr:
process_out.stdout.append(read)
wtf(read)
process_err.stderr = []
# This reads the streams one byte at a time, and passes it to the LineChunker
# which splits it by either newline or carriage return.
# If the stream has ended, then it is removed from the list.
with LineChunker(process_out) as out_chunker, LineChunker(process_err) as err_chunker:
while True:
to_read, _, _ = select(streams, [], [])
for fh in to_read:
# We want to call fh.read below, but this can lead to a system hang when executing Tinker on mac.
# This hang can be avoided by running fh.read1 (with a "1" at the end), however python2.7
# doesn't implement ByteStream.read1. So, to enable python3 builds on mac to work, we pick the "best"
# fh.read function we can get
if hasattr(fh, 'read1'):
fhread = fh.read1
else:
fhread = fh.read
if fh is p.stdout:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fhread(rbytes)
read_nbytes += rbytes
else:
read += fhread(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stdout from external process.")
if not read:
streams.remove(p.stdout)
p.stdout.close()
break
else:
try:
out_chunker.push(read)
break
except UnicodeDecodeError:
pass
elif fh is p.stderr:
read_nbytes = 0
read = ''.encode('utf-8')
while True:
if read_nbytes == 0:
read += fhread(rbytes)
read_nbytes += rbytes
else:
read += fhread(1)
read_nbytes += 1
if read_nbytes > 10+rbytes:
raise RuntimeError("Failed to decode stderr from external process.")
if not read:
streams.remove(p.stderr)
p.stderr.close()
break
else:
try:
err_chunker.push(read)
break
except UnicodeDecodeError:
pass
else:
raise RuntimeError
if len(streams) == 0: break
p.wait()
process_out.stdout = ''.join(process_out.stdout)
process_err.stderr = ''.join(process_err.stderr)
_exec.returncode = p.returncode
if p.returncode != 0:
if process_err.stderr and print_error:
logger.warning("Received an error message:\n")
logger.warning("\n[====] \x1b[91mError Message\x1b[0m [====]\n")
logger.warning(process_err.stderr)
logger.warning("[====] \x1b[91mEnd o'Message\x1b[0m [====]\n")
if persist:
if print_error:
logger.info("%s gave a return code of %i (it may have crashed) -- carrying on\n" % (command, p.returncode))
else:
# This code (commented out) would not throw an exception, but instead exit with the returncode of the crashed program.
# sys.stderr.write("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n" % (command, p.returncode))
# sys.exit(p.returncode)
logger.error("\x1b[1;94m%s\x1b[0m gave a return code of %i (\x1b[91mit may have crashed\x1b[0m)\n\n" % (command, p.returncode))
raise RuntimeError
# Return the output in the form of a list of lines, so we can loop over it using "for line in output".
Out = process_out.stdout.split('\n')
if Out[-1] == '':
Out = Out[:-1]
return Out
_exec.returncode = None
def warn_press_key(warning, timeout=10):
logger.warning(warning + '\n')
if sys.stdin.isatty():
logger.warning("\x1b[1;91mPress Enter or wait %i seconds (I assume no responsibility for what happens after this!)\x1b[0m\n" % timeout)
try:
rlist, wlist, xlist = select([sys.stdin], [], [], timeout)
if rlist:
sys.stdin.readline()
except: pass
def warn_once(warning, warnhash = None):
""" Prints a warning but will only do so once in a given run. """
if warnhash is None:
warnhash = warning
if warnhash in warn_once.already:
return
warn_once.already.add(warnhash)
if type(warning) is str:
logger.info(warning + '\n')
elif type(warning) is list:
for line in warning:
logger.info(line + '\n')
warn_once.already = set()
#=========================================#
#| Development stuff (not commonly used) |#
#=========================================#
def concurrent_map(func, data):
"""
Similar to the bultin function map(). But spawn a thread for each argument
and apply `func` concurrently.
Note: unlike map(), we cannot take an iterable argument. `data` should be an
indexable sequence.
"""
N = len(data)
result = [None] * N
# wrapper to dispose the result in the right slot
def task_wrapper(i):
result[i] = func(data[i])
threads = [threading.Thread(target=task_wrapper, args=(i,)) for i in range(N)]
for t in threads:
t.start()
for t in threads:
t.join()
return result | 41.179962 | 231 | 0.58913 |
4a26907a7754589ecf0e67a972eb0efe1642b928 | 27,833 | py | Python | hikari/impl/rest_bot.py | Lunarmagpie/hikari | 3f4fed67f76c655845d379066f9d192e7dffd0b0 | [
"MIT"
] | null | null | null | hikari/impl/rest_bot.py | Lunarmagpie/hikari | 3f4fed67f76c655845d379066f9d192e7dffd0b0 | [
"MIT"
] | null | null | null | hikari/impl/rest_bot.py | Lunarmagpie/hikari | 3f4fed67f76c655845d379066f9d192e7dffd0b0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021-present davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Standard implementations of a Interaction based REST-only bot."""
from __future__ import annotations
__all__: typing.List[str] = ["RESTBot"]
import asyncio
import logging
import sys
import typing
from hikari import config
from hikari import errors
from hikari import traits
from hikari.api import interaction_server as interaction_server_
from hikari.impl import entity_factory as entity_factory_impl
from hikari.impl import interaction_server as interaction_server_impl
from hikari.impl import rest as rest_impl
from hikari.internal import aio
from hikari.internal import ux
if typing.TYPE_CHECKING:
import concurrent.futures
import socket as socket_
import ssl
from hikari import applications
from hikari.api import entity_factory as entity_factory_api
from hikari.api import rest as rest_api
from hikari.api import special_endpoints
from hikari.interactions import base_interactions
from hikari.interactions import command_interactions
from hikari.interactions import component_interactions
_InteractionT_co = typing.TypeVar("_InteractionT_co", bound=base_interactions.PartialInteraction, covariant=True)
_MessageResponseBuilderT = typing.Union[
special_endpoints.InteractionDeferredBuilder, special_endpoints.InteractionMessageBuilder
]
_LOGGER: typing.Final[logging.Logger] = logging.getLogger("hikari.rest_bot")
class RESTBot(traits.RESTBotAware, interaction_server_.InteractionServer):
"""Basic implementation of an interaction based REST-only bot.
Parameters
----------
token : typing.Union[builtins.str, builtins.None, hikari.api.rest.TokenStrategy]
The bot or bearer token. If no token is to be used,
this can be undefined.
token_type : typing.Union[builtins.str, hikari.applications.TokenType, builtins.None]
The type of token in use. This should only be passed when `builtins.str`
is passed for `token`, can be `"Bot"` or `"Bearer"` and will be
defaulted to `"Bearer"` in this situation.
This should be left as `builtins.None` when either
`hikari.api.rest.TokenStrategy` or `builtins.None` is passed for
`token`.
Other Parameters
----------------
allow_color : builtins.bool
Defaulting to `builtins.True`, this will enable coloured console logs
on any platform that is a TTY.
Setting a `"CLICOLOR"` environment variable to any **non `0`** value
will override this setting.
Users should consider this an advice to the application on whether it is
safe to show colours if possible or not. Since some terminals can be
awkward or not support features in a standard way, the option to
explicitly disable this is provided. See `force_color` for an
alternative.
banner : typing.Optional[builtins.str]
The package to search for a `banner.txt` in. Defaults to `"hikari"` for
the `"hikari/banner.txt"` banner.
Setting this to `builtins.None` will disable the banner being shown.
executor : typing.Optional[concurrent.futures.Executor]
Defaults to `builtins.None`. If non-`builtins.None`, then this executor
is used instead of the `concurrent.futures.ThreadPoolExecutor` attached
to the `asyncio.AbstractEventLoop` that the bot will run on. This
executor is used primarily for file-IO.
While mainly supporting the `concurrent.futures.ThreadPoolExecutor`
implementation in the standard lib, Hikari's file handling systems
should also work with `concurrent.futures.ProcessPoolExecutor`, which
relies on all objects used in IPC to be `pickle`able. Many third-party
libraries will not support this fully though, so your mileage may vary
on using ProcessPoolExecutor implementations with this parameter.
force_color : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, then this application
will __force__ colour to be used in console-based output. Specifying a
`"CLICOLOR_FORCE"` environment variable with a non-`"0"` value will
override this setting.
http_settings : typing.Optional[hikari.config.HTTPSettings]
Optional custom HTTP configuration settings to use. Allows you to
customise functionality such as whether SSL-verification is enabled,
what timeouts `aiohttp` should expect to use for requests, and behavior
regarding HTTP-redirects.
logs : typing.Union[builtins.None, LoggerLevel, typing.Dict[str, typing.Any]]
Defaults to `"INFO"`.
If `builtins.None`, then the Python logging system is left uninitialized
on startup, and you will need to configure it manually to view most
logs that are output by components of this library.
If one of the valid values in a `LoggerLevel`, then this will match a
call to `colorlog.basicConfig` (a facade for `logging.basicConfig` with
additional conduit for enabling coloured logging levels) with the
`level` kwarg matching this value.
If a `typing.Dict[str, typing.Any]` equivalent, then this value is
passed to `logging.config.dictConfig` to allow the user to provide a
specialized logging configuration of their choice. If any handlers are
defined in the dict, default handlers will not be setup.
As a side note, you can always opt to leave this on the default value
and then use an incremental `logging.config.dictConfig` that applies
any additional changes on top of the base configuration, if you prefer.
An example of can be found in the `Example` section.
Note that `"TRACE_HIKARI"` is a library-specific logging level
which is expected to be more verbose than `"DEBUG"`.
max_rate_limit : builtins.float
The max number of seconds to backoff for when rate limited. Anything
greater than this will instead raise an error.
This defaults to five minutes if left to the default value. This is to
stop potentially indefinitely waiting on an endpoint, which is almost
never what you want to do if giving a response to a user.
You can set this to `float("inf")` to disable this check entirely.
Note that this only applies to the REST API component that communicates
with Discord, and will not affect sharding or third party HTTP endpoints
that may be in use.
max_retries : typing.Optional[builtins.int]
Maximum number of times a request will be retried if
it fails with a `5xx` status. Defaults to 3 if set to `builtins.None`.
proxy_settings : typing.Optional[config.ProxySettings]
Custom proxy settings to use with network-layer logic
in your application to get through an HTTP-proxy.
public_key : typing.Union[builtins.str, builtins.bytes, builtins.None]
The public key to use to verify received interaction requests.
This may be a hex encoded `builtins.str` or the raw `builtins.bytes`.
If left as `builtins.None` then the client will try to work this value
out based on `token`.
rest_url : typing.Optional[builtins.str]
Defaults to the Discord REST API URL if `builtins.None`. Can be
overridden if you are attempting to point to an unofficial endpoint, or
if you are attempting to mock/stub the Discord API for any reason.
Generally you do not want to change this.
!!! note
`force_color` will always take precedence over `allow_color`.
Raises
------
builtins.ValueError
* If `token_type` is provided when a token strategy is passed for `token`.
* if `token_type` is left as `builtins.None` when a string is passed for `token`.
"""
__slots__: typing.Sequence[str] = (
"_close_event",
"_executor",
"_http_settings",
"_is_closing",
"_proxy_settings",
"_entity_factory",
"_rest",
"_server",
)
@typing.overload
def __init__(
self,
token: rest_api.TokenStrategy,
*,
public_key: typing.Union[bytes, str, None] = None,
allow_color: bool = True,
banner: typing.Optional[str] = "hikari",
executor: typing.Optional[concurrent.futures.Executor] = None,
force_color: bool = False,
http_settings: typing.Optional[config.HTTPSettings] = None,
logs: typing.Union[None, int, str, typing.Dict[str, typing.Any]] = "INFO",
max_rate_limit: float = 300.0,
max_retries: int = 3,
proxy_settings: typing.Optional[config.ProxySettings] = None,
rest_url: typing.Optional[str] = None,
) -> None:
...
@typing.overload
def __init__(
self,
token: str,
token_type: typing.Union[str, applications.TokenType],
public_key: typing.Union[bytes, str, None] = None,
*,
allow_color: bool = True,
banner: typing.Optional[str] = "hikari",
executor: typing.Optional[concurrent.futures.Executor] = None,
force_color: bool = False,
http_settings: typing.Optional[config.HTTPSettings] = None,
logs: typing.Union[None, int, str, typing.Dict[str, typing.Any]] = "INFO",
max_rate_limit: float = 300.0,
max_retries: int = 3,
proxy_settings: typing.Optional[config.ProxySettings] = None,
rest_url: typing.Optional[str] = None,
) -> None:
...
def __init__(
self,
token: typing.Union[str, rest_api.TokenStrategy],
token_type: typing.Union[applications.TokenType, str, None] = None,
public_key: typing.Union[bytes, str, None] = None,
*,
allow_color: bool = True,
banner: typing.Optional[str] = "hikari",
executor: typing.Optional[concurrent.futures.Executor] = None,
force_color: bool = False,
http_settings: typing.Optional[config.HTTPSettings] = None,
logs: typing.Union[None, int, str, typing.Dict[str, typing.Any]] = "INFO",
max_rate_limit: float = 300.0,
max_retries: int = 3,
proxy_settings: typing.Optional[config.ProxySettings] = None,
rest_url: typing.Optional[str] = None,
) -> None:
if isinstance(public_key, str):
public_key = bytes.fromhex(public_key)
# Beautification and logging
ux.init_logging(logs, allow_color, force_color)
self.print_banner(banner, allow_color, force_color)
# Settings and state
self._close_event: typing.Optional[asyncio.Event] = None
self._executor = executor
self._http_settings = http_settings if http_settings is not None else config.HTTPSettings()
self._is_closing = False
self._proxy_settings = proxy_settings if proxy_settings is not None else config.ProxySettings()
# Entity creation
self._entity_factory = entity_factory_impl.EntityFactoryImpl(self)
# RESTful API.
self._rest = rest_impl.RESTClientImpl(
cache=None,
entity_factory=self._entity_factory,
executor=self._executor,
http_settings=self._http_settings,
max_rate_limit=max_rate_limit,
max_retries=max_retries,
proxy_settings=self._proxy_settings,
rest_url=rest_url,
token=token,
token_type=token_type,
)
# IntegrationServer
self._server = interaction_server_impl.InteractionServer(
entity_factory=self._entity_factory,
public_key=public_key,
rest_client=self._rest,
)
@property
def is_alive(self) -> bool:
return self._close_event is not None
@property
def interaction_server(self) -> interaction_server_.InteractionServer:
return self._server
@property
def rest(self) -> rest_api.RESTClient:
return self._rest
@property
def entity_factory(self) -> entity_factory_api.EntityFactory:
return self._entity_factory
@property
def http_settings(self) -> config.HTTPSettings:
return self._http_settings
@property
def proxy_settings(self) -> config.ProxySettings:
return self._proxy_settings
@property
def executor(self) -> typing.Optional[concurrent.futures.Executor]:
return self._executor
@staticmethod
def print_banner(banner: typing.Optional[str], allow_color: bool, force_color: bool) -> None:
"""Print the banner.
This allows library vendors to override this behaviour, or choose to
inject their own "branding" on top of what hikari provides by default.
Normal users should not need to invoke this function, and can simply
change the `banner` argument passed to the constructor to manipulate
what is displayed.
Parameters
----------
banner : typing.Optional[builtins.str]
The package to find a `banner.txt` in.
allow_color : builtins.bool
A flag that allows advising whether to allow color if supported or
not. Can be overridden by setting a `"CLICOLOR"` environment
variable to a non-`"0"` string.
force_color : builtins.bool
A flag that allows forcing color to always be output, even if the
terminal device may not support it. Setting the `"CLICOLOR_FORCE"`
environment variable to a non-`"0"` string will override this.
!!! note
`force_color` will always take precedence over `allow_color`.
"""
ux.print_banner(banner, allow_color, force_color)
async def close(self) -> None:
if not self._close_event:
raise errors.ComponentStateConflictError("Cannot close an inactive interaction server")
if self._is_closing:
await self.join()
return
self._is_closing = True
close_event = self._close_event
await self._server.close()
await self._rest.close()
close_event.set()
self._close_event = None
async def join(self) -> None:
if not self._close_event:
raise errors.ComponentStateConflictError("Cannot wait for an inactive bot to join")
await self._close_event.wait()
async def on_interaction(self, body: bytes, signature: bytes, timestamp: bytes) -> interaction_server_.Response:
return await self._server.on_interaction(body, signature, timestamp)
def run(
self,
asyncio_debug: bool = False,
backlog: int = 128,
check_for_updates: bool = True,
close_loop: bool = True,
close_passed_executor: bool = False,
coroutine_tracking_depth: typing.Optional[int] = None,
enable_signal_handlers: bool = True,
host: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
path: typing.Optional[str] = None,
port: typing.Optional[int] = None,
reuse_address: typing.Optional[bool] = None,
reuse_port: typing.Optional[bool] = None,
shutdown_timeout: float = 60.0,
socket: typing.Optional[socket_.socket] = None,
ssl_context: typing.Optional[ssl.SSLContext] = None,
) -> None:
"""Open this REST server and block until it closes.
Other Parameters
----------------
asyncio_debug : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, then debugging is
enabled for the asyncio event loop in use.
backlog : builtins.int
The number of unaccepted connections that the system will allow before
refusing new connections.
check_for_updates : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, will check for
newer versions of `hikari` on PyPI and notify if available.
close_loop : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, then once the bot
enters a state where all components have shut down permanently
during application shutdown, then all asyngens and background tasks
will be destroyed, and the event loop will be shut down.
This will wait until all `hikari`-owned `aiohttp` connectors have
had time to attempt to shut down correctly (around 250ms), and on
Python 3.9 and newer, will also shut down the default event loop
executor too.
close_passed_executor : builtins.bool
Defaults to `builtins.False`. If `builtins.True`, any custom
`concurrent.futures.Executor` passed to the constructor will be
shut down when the application terminates. This does not affect the
default executor associated with the event loop, and will not
do anything if you do not provide a custom executor to the
constructor.
coroutine_tracking_depth : typing.Optional[builtins.int]
Defaults to `builtins.None`. If an integer value and supported by
the interpreter, then this many nested coroutine calls will be
tracked with their call origin state. This allows you to determine
where non-awaited coroutines may originate from, but generally you
do not want to leave this enabled for performance reasons.
enable_signal_handlers : builtins.bool
Defaults to `builtins.True`. If on a __non-Windows__ OS with builtin
support for kernel-level POSIX signals, then setting this to
`builtins.True` will allow treating keyboard interrupts and other
OS signals to safely shut down the application as calls to
shut down the application properly rather than just killing the
process in a dirty state immediately. You should leave this disabled
unless you plan to implement your own signal handling yourself.
host : typing.Optional[typing.Union[builtins.str, aiohttp.web.HostSequence]]
TCP/IP host or a sequence of hosts for the HTTP server.
port : typing.Optional[builtins.int]
TCP/IP port for the HTTP server.
path : typing.Optional[builtins.str]
File system path for HTTP server unix domain socket.
reuse_address : typing.Optional[builtins.bool]
Tells the kernel to reuse a local socket in TIME_WAIT state, without
waiting for its natural timeout to expire.
reuse_port : typing.Optional[builtins.bool]
Tells the kernel to allow this endpoint to be bound to the same port
as other existing endpoints are also bound to.
socket : typing.Optional[socket.socket]
A pre-existing socket object to accept connections on.
shutdown_timeout : builtins.float
A delay to wait for graceful server shutdown before forcefully
disconnecting all open client sockets. This defaults to 60 seconds.
ssl_context : typing.Optional[ssl.SSLContext]
SSL context for HTTPS servers.
"""
if self.is_alive:
raise errors.ComponentStateConflictError("Cannot start a bot that's already active")
loop = aio.get_or_make_loop()
if asyncio_debug:
loop.set_debug(True)
if coroutine_tracking_depth is not None:
try:
# Provisionally defined in CPython, may be removed without notice.
sys.set_coroutine_origin_tracking_depth(coroutine_tracking_depth)
except AttributeError:
_LOGGER.log(ux.TRACE, "cannot set coroutine tracking depth for sys, no functionality exists for this")
try:
loop.run_until_complete(
self.start(
backlog=backlog,
check_for_updates=check_for_updates,
enable_signal_handlers=enable_signal_handlers,
host=host,
port=port,
path=path,
reuse_address=reuse_address,
reuse_port=reuse_port,
socket=socket,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
)
)
loop.run_until_complete(self.join())
finally:
if close_passed_executor and self._executor:
self._executor.shutdown(wait=True)
self._executor = None
if close_loop:
loop.close()
async def start(
self,
backlog: int = 128,
check_for_updates: bool = True,
enable_signal_handlers: bool = True,
host: typing.Optional[typing.Union[str, typing.Sequence[str]]] = None,
port: typing.Optional[int] = None,
path: typing.Optional[str] = None,
reuse_address: typing.Optional[bool] = None,
reuse_port: typing.Optional[bool] = None,
socket: typing.Optional[socket_.socket] = None,
shutdown_timeout: float = 60.0,
ssl_context: typing.Optional[ssl.SSLContext] = None,
) -> None:
"""Start the bot and wait for the internal server to startup then return.
Other Parameters
----------------
backlog : builtins.int
The number of unaccepted connections that the system will allow before
refusing new connections.
check_for_updates : builtins.bool
Defaults to `builtins.True`. If `builtins.True`, will check for
newer versions of `hikari` on PyPI and notify if available.
enable_signal_handlers : builtins.bool
Defaults to `builtins.True`. If on a __non-Windows__ OS with builtin
support for kernel-level POSIX signals, then setting this to
`builtins.True` will allow treating keyboard interrupts and other
OS signals to safely shut down the application as calls to
shut down the application properly rather than just killing the
process in a dirty state immediately. You should leave this disabled
unless you plan to implement your own signal handling yourself.
host : typing.Optional[typing.Union[builtins.str, aiohttp.web.HostSequence]]
TCP/IP host or a sequence of hosts for the HTTP server.
port : typing.Optional[builtins.int]
TCP/IP port for the HTTP server.
path : typing.Optional[builtins.str]
File system path for HTTP server unix domain socket.
reuse_address : typing.Optional[builtins.bool]
Tells the kernel to reuse a local socket in TIME_WAIT state, without
waiting for its natural timeout to expire.
reuse_port : typing.Optional[builtins.bool]
Tells the kernel to allow this endpoint to be bound to the same port
as other existing endpoints are also bound to.
socket : typing.Optional[socket.socket]
A pre-existing socket object to accept connections on.
shutdown_timeout : builtins.float
A delay to wait for graceful server shutdown before forcefully
disconnecting all open client sockets. This defaults to 60 seconds.
ssl_context : typing.Optional[ssl.SSLContext]
SSL context for HTTPS servers.
!!! note
For more information on the other parameters such as defaults see
AIOHTTP's documentation.
"""
if self.is_alive:
raise errors.ComponentStateConflictError("Cannot start an already active interaction server")
self._is_closing = False
self._close_event = asyncio.Event()
if check_for_updates:
asyncio.create_task(
ux.check_for_updates(self._http_settings, self._proxy_settings),
name="check for package updates",
)
self._rest.start()
await self._server.start(
backlog=backlog,
enable_signal_handlers=enable_signal_handlers,
host=host,
port=port,
path=path,
reuse_address=reuse_address,
reuse_port=reuse_port,
socket=socket,
shutdown_timeout=shutdown_timeout,
ssl_context=ssl_context,
)
@typing.overload
def get_listener(
self, interaction_type: typing.Type[command_interactions.CommandInteraction], /
) -> typing.Optional[
interaction_server_.ListenerT[command_interactions.CommandInteraction, _MessageResponseBuilderT]
]:
...
@typing.overload
def get_listener(
self, interaction_type: typing.Type[component_interactions.ComponentInteraction], /
) -> typing.Optional[
interaction_server_.ListenerT[component_interactions.ComponentInteraction, _MessageResponseBuilderT]
]:
...
@typing.overload
def get_listener(
self, interaction_type: typing.Type[_InteractionT_co], /
) -> typing.Optional[interaction_server_.ListenerT[_InteractionT_co, special_endpoints.InteractionResponseBuilder]]:
...
def get_listener(
self, interaction_type: typing.Type[_InteractionT_co], /
) -> typing.Optional[interaction_server_.ListenerT[_InteractionT_co, special_endpoints.InteractionResponseBuilder]]:
return self._server.get_listener(interaction_type)
@typing.overload
def set_listener(
self,
interaction_type: typing.Type[command_interactions.CommandInteraction],
listener: typing.Optional[
interaction_server_.ListenerT[command_interactions.CommandInteraction, _MessageResponseBuilderT]
],
/,
*,
replace: bool = False,
) -> None:
...
@typing.overload
def set_listener(
self,
interaction_type: typing.Type[component_interactions.ComponentInteraction],
listener: typing.Optional[
interaction_server_.ListenerT[component_interactions.ComponentInteraction, _MessageResponseBuilderT]
],
/,
*,
replace: bool = False,
) -> None:
...
def set_listener(
self,
interaction_type: typing.Type[_InteractionT_co],
listener: typing.Optional[
interaction_server_.ListenerT[_InteractionT_co, special_endpoints.InteractionResponseBuilder]
],
/,
*,
replace: bool = False,
) -> None:
self._server.set_listener(interaction_type, listener, replace=replace) # type: ignore
| 43.625392 | 120 | 0.666691 |
4a26915a87fc459359ca81467847e4f5c79a0a2a | 9,115 | py | Python | standard_wallet/wallet_runnable.py | freddiecoleman/wallets | 20c815b404f3c136d4c6d64d5033c017a830e5d9 | [
"Apache-2.0"
] | 1 | 2022-01-27T09:26:48.000Z | 2022-01-27T09:26:48.000Z | standard_wallet/wallet_runnable.py | freddiecoleman/wallets | 20c815b404f3c136d4c6d64d5033c017a830e5d9 | [
"Apache-2.0"
] | 1 | 2021-09-05T01:50:18.000Z | 2021-09-05T01:50:18.000Z | standard_wallet/wallet_runnable.py | freddiecoleman/wallets | 20c815b404f3c136d4c6d64d5033c017a830e5d9 | [
"Apache-2.0"
] | 1 | 2021-09-05T02:02:00.000Z | 2021-09-05T02:02:00.000Z | import asyncio
from utilities.decorations import print_leaf, divider, prompt, start_list, close_list, selectable, informative
from utilities.puzzle_utilities import puzzlehash_from_string
from chiasim.hashable import Coin, Header, HeaderHash
from chiasim.clients.ledger_sim import connect_to_ledger_sim
from chiasim.wallet.deltas import additions_for_body, removals_for_body
from chiasim.hashable.Body import Body
from binascii import hexlify
from authorised_payees import ap_wallet_a_functions
from standard_wallet.wallet import Wallet
try:
import qrcode
from PIL import Image
from pyzbar.pyzbar import decode
except ImportError:
qrcode = None
def view_funds(wallet):
print(f"Current balance: {str(wallet.temp_balance)}")
print(f"UTXOs: {[x.amount for x in wallet.temp_utxos]}")
def print_my_details(wallet):
print(f"{informative} Name: {wallet.name}")
print(f"{informative} Pubkey: {hexlify(wallet.get_next_public_key().serialize()).decode('ascii')}")
print(f"{informative} Puzzlehash: {wallet.get_new_puzzlehash()}")
def make_QR(wallet):
print(divider)
qr = qrcode.QRCode(
version=1,
error_correction=qrcode.constants.ERROR_CORRECT_H,
box_size=10,
border=4,
)
qr.add_data(f"{wallet.get_new_puzzlehash()}")
qr.make(fit=True)
img = qr.make_image()
fn = input("Input file name: ")
img.save(f"{fn}.jpg")
print(f"QR code created in '{fn}.jpg'")
def read_qr(wallet):
amount = -1
if wallet.current_balance <= 0:
print("You need some money first")
return None
print("Input filename of QR code: ")
fn = input(prompt)
decoded = decode(Image.open(fn))
puzzlehash = puzzlehash_from_string(decoded[0].data)
while amount > wallet.temp_balance or amount <= 0:
amount = input("Amount: ")
if amount == "q":
return
if not amount.isdigit():
amount = -1
amount = int(amount)
return wallet.generate_signed_transaction(amount, puzzlehash)
def set_name(wallet):
selection = input("Enter a new name: ")
wallet.set_name(selection)
async def make_payment(wallet, ledger_api):
amount = -1
if wallet.current_balance <= 0:
print("You need some money first")
return None
while amount > wallet.temp_balance or amount < 0:
amount = input(f"{prompt} Enter amount to give recipient: ")
if amount == "q":
return
if not amount.isdigit():
amount = -1
amount = int(amount)
puzhashstring = input(f"{prompt} Enter puzzlehash: ")
puzzlehash = puzzlehash_from_string(puzhashstring)
tx = wallet.generate_signed_transaction(amount, puzzlehash)
if tx is not None:
await ledger_api.push_tx(tx=tx)
async def initiate_ap(wallet, ledger_api):
if wallet.temp_balance <= 0:
print("You need some money first")
return None
# TODO: add a strict format checker to input here (and everywhere tbh)
# Actual puzzle lockup/spend
a_pubkey = wallet.get_next_public_key().serialize()
b_pubkey = input("Enter recipient's pubkey: 0x")
amount = -1
while amount > wallet.temp_balance or amount < 0:
amount = input("Enter amount to give recipient: ")
if amount == "q":
return
if not amount.isdigit():
amount = -1
amount = int(amount)
APpuzzlehash = ap_wallet_a_functions.ap_get_new_puzzlehash(
a_pubkey, b_pubkey)
spend_bundle = wallet.generate_signed_transaction(amount, APpuzzlehash)
await ledger_api.push_tx(tx=spend_bundle)
print()
print(f"{informative} AP Puzzlehash is: {str(APpuzzlehash)}")
print(f"{informative} Pubkey used is: {hexlify(a_pubkey).decode('ascii')}")
sig = str(ap_wallet_a_functions.ap_sign_output_newpuzzlehash(
APpuzzlehash, wallet, a_pubkey).sig)
print(f"{informative} Approved change signature is: {sig}")
print()
print("Give the AP wallet the following initialisation string -")
print(f"{informative} Initialisation string: {str(APpuzzlehash)}:{hexlify(a_pubkey).decode('ascii')}:{sig}")
print()
print("The next step is to approve some contacts for the AP wallet to send to.")
print("From another standard wallet press '4' to print out their puzzlehash for receiving money.")
choice = ""
while choice != "q":
singlestr = input("Enter approved puzzlehash: ")
if singlestr == "q":
return
puzzlehash = puzzlehash_from_string(singlestr)
print()
#print("Puzzle: " + str(puzzlehash))
sig = wallet.sign(puzzlehash, a_pubkey)
#print("Signature: " + str(sig.sig))
name = input("Add a name for this puzzlehash: ")
print("Give the following contact string to the AP wallet.")
print(f"{informative} Contact string for AP Wallet: {name}:{str(puzzlehash)}:{str(sig.sig)}")
choice = input("Press 'c' to continue, or 'q' to quit to menu: ")
async def process_blocks(wallet, ledger_api, last_known_header, current_header_hash):
r = await ledger_api.hash_preimage(hash=current_header_hash)
header = Header.from_bytes(r)
body = Body.from_bytes(await ledger_api.hash_preimage(hash=header.body_hash))
if header.previous_hash != last_known_header:
await process_blocks(wallet, ledger_api, last_known_header, header.previous_hash)
print(f'processing block {HeaderHash(header)}')
additions = list(additions_for_body(body))
removals = removals_for_body(body)
removals = [Coin.from_bytes(await ledger_api.hash_preimage(hash=x)) for x in removals]
wallet.notify(additions, removals)
async def farm_block(wallet, ledger_api, last_known_header):
coinbase_puzzle_hash = wallet.get_new_puzzlehash()
fees_puzzle_hash = wallet.get_new_puzzlehash()
r = await ledger_api.next_block(coinbase_puzzle_hash=coinbase_puzzle_hash, fees_puzzle_hash=fees_puzzle_hash)
header = r['header']
header_hash = HeaderHash(header)
tip = await ledger_api.get_tip()
await process_blocks(wallet,
ledger_api,
tip['genesis_hash'] if last_known_header is None else last_known_header,
header_hash)
return header_hash
async def update_ledger(wallet, ledger_api, most_recent_header):
r = await ledger_api.get_tip()
if r['tip_hash'] != most_recent_header:
await process_blocks(wallet,
ledger_api,
r['genesis_hash'] if most_recent_header is None else most_recent_header,
r['tip_hash'])
return r['tip_hash']
async def main_loop():
ledger_api = await connect_to_ledger_sim("localhost", 9868)
selection = ""
wallet = Wallet()
print(divider)
print_leaf()
r = await ledger_api.get_tip()
most_recent_header = r['genesis_hash']
while selection != "q":
print(divider)
view_funds(wallet)
print(divider)
print(start_list)
print("Select a function:")
print(f"{selectable} 1: Make Payment")
print(f"{selectable} 2: Get Update")
print(f"{selectable} 3: Farm Block")
print(f"{selectable} 4: Print my details for somebody else")
print(f"{selectable} 5: Set my wallet name")
print(f"{selectable} 6: Initiate Authorised Payee")
if qrcode:
print(f"{selectable} 7: Make QR code")
print(f"{selectable} 8: Payment to QR code")
print(f"{selectable} q: Quit")
print(close_list)
selection = input(prompt)
if selection == "1":
r = await make_payment(wallet, ledger_api)
elif selection == "2":
most_recent_header = await update_ledger(wallet, ledger_api, most_recent_header)
elif selection == "3":
most_recent_header = await farm_block(wallet, ledger_api, most_recent_header)
elif selection == "4":
print_my_details(wallet)
elif selection == "5":
set_name(wallet)
elif selection == "6":
await initiate_ap(wallet, ledger_api)
if qrcode:
if selection == "7":
make_QR(wallet)
elif selection == "8":
r = read_qr(wallet)
if r is not None:
await ledger_api.push_tx(tx=r)
def main():
run = asyncio.get_event_loop().run_until_complete
run(main_loop())
if __name__ == "__main__":
main()
"""
Copyright 2018 Chia Network Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
| 37.204082 | 113 | 0.664838 |
4a26915c297b09d17cccb4bf14f3fb45bf1bc5e4 | 9,968 | py | Python | conftest.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 2 | 2021-04-08T07:04:55.000Z | 2021-05-14T04:20:33.000Z | conftest.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | 16 | 2021-04-13T18:04:38.000Z | 2021-04-13T18:05:07.000Z | conftest.py | s4-2/scancode-toolkit | 8931b42e2630b94d0cabc834dfb3c16f01f82321 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright (c) nexB Inc. and others http://www.nexb.com/. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from os import path
from subprocess import check_output
from subprocess import STDOUT
import sys
import pytest
TRACE = True
"""
A pytest conftest.py for scancode-toolkit to control which tests to run and when.
We use custom markers and command line options to determine which test suite to run.
To retest only code impacted by a change, it tries to find which code modules
have changed using git, and selectively runs only the tests for the impacted
modules when running in a feature branch. When running on the develop or master
branches all the tests run: none are skipped.
"""
################################################################################
# pytest custom markers and CLI options
################################################################################
SLOW_TEST = 'scanslow'
VALIDATION_TEST = 'scanvalidate'
def pytest_configure(config):
config.addinivalue_line('markers', SLOW_TEST + ': Mark a ScanCode test as a slow, long running test.')
config.addinivalue_line('markers', VALIDATION_TEST + ': Mark a ScanCode test as a validation test, super slow, long running test.')
TEST_SUITES = 'standard', 'all', 'validate'
def pytest_addoption(parser):
"""
Add options used for ScanCode tests customization.
"""
group = parser.getgroup('scancode', 'Test suite options for ScanCode')
group.addoption(
'--force-py3',
dest='force_py3',
action='store_true',
default=False,
help='[DEPRECATED and ignored] Python 3 port is completed.',
)
group.addoption(
'--test-suite',
action='store',
choices=TEST_SUITES,
dest='test_suite',
default='standard',
help='Select which test suite to run: '
'"standard" runs the standard test suite designed to run reasonably fast. '
'"all" runs "standard" and "slow" (long running) tests. '
'"validate" runs all the tests. '
'Use the @pytest.mark.scanslow marker to mark a test as "slow" test. '
'Use the @pytest.mark.scanvalidate marker to mark a test as a "validate" test.'
)
group.addoption(
'--changed-only',
dest='changed_only',
action='store_true',
default=False,
help='Run only the subset of tests impacted by your changes.'
'If selected, you can provide an optional --base-branch and the '
'changes are checked against that branch. '
'Otherwise, a git diff is made against the current branch.',
)
group.addoption(
'--base-branch',
dest='base_branch',
action='store',
default=None,
help='Optional name branch of the branch diff against to find what has '
'changed if --changed-only is selected.',
)
group.addoption(
'--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='Only print selected and deselected tests. Do not run anything.',
)
################################################################################
# Filter whcih tests to collect based on our CLI options and our custom markers
################################################################################
@pytest.mark.trylast
def pytest_collection_modifyitems(config, items):
test_suite = config.getvalue('test_suite')
changed_only = config.getoption('changed_only')
base_branch = config.getoption('base_branch')
dry_run = config.getoption('dry_run')
run_everything = test_suite == 'validate'
run_slow_test = test_suite in ('all', 'validate')
tests_to_run = []
tests_to_skip = []
if changed_only:
base_branch = base_branch or get_git_branch()
impacted_modules = get_impacted_modules(base_branch) or set()
all_is_changed = not(impacted_modules)
impacted_modules_paths = ['/{}/'.format(m) for m in impacted_modules]
print()
if not impacted_modules:
print('All modules impacted')
else:
print('Run tests only in these changed modules:', ', '.join(sorted(impacted_modules)))
for item in items:
is_validate = bool(item.get_closest_marker(VALIDATION_TEST))
is_slow = bool(item.get_closest_marker(SLOW_TEST))
if is_validate and not run_everything:
tests_to_skip.append(item)
continue
if is_slow and not run_slow_test:
tests_to_skip.append(item)
continue
if changed_only and not all_is_changed:
if not is_changed(item.fspath, impacted_modules_paths):
tests_to_skip.append(item)
continue
tests_to_run.append(item)
print()
print('{} tests selected, {} tests skipped.'.format(len(tests_to_run), len(tests_to_skip)))
if dry_run:
if config.getvalue('verbose'):
print()
print('The following tests will run: ')
for test in tests_to_run:
print(test.nodeid)
print('The following tests are skipped: ')
for test in tests_to_skip:
print(test.nodeid)
tests = items[:]
items[:] = []
config.hook.pytest_deselected(items=tests)
return
items[:] = tests_to_run
config.hook.pytest_deselected(items=tests_to_skip)
################################################################################
# Retest only tests for changed modules
################################################################################
def is_changed(path_string, impacted_module_paths, _cache={}):
"""
Return True if a `path_string` is for a path that has changed.
"""
path_string = str(path_string).replace('\\', '/')
cached = _cache.get(path_string)
if cached is not None:
return cached
if path_string.endswith(('setup.py', 'conftest.py')):
return False
changed = any(p in path_string for p in impacted_module_paths)
if TRACE and changed:
print('is_changed:', path_string, changed)
_cache[path_string] = changed
return changed
def get_all_modules():
"""
Return a set of top level modules.
"""
all_modules = set([p for p in os.listdir('src') if p.endswith('code')])
if TRACE:
print()
print('get_all_modules:', all_modules)
return all_modules
def get_impacted_modules(base_branch=None):
"""
Return a set of impacted top level modules under tests or src.
Return None if all modules are impacted and should be re-tested.
"""
try:
base_branch = base_branch or get_git_branch()
changed_files = get_changed_files(base_branch)
locally_changed_files = get_changed_files(None)
changed_files.extend(locally_changed_files)
except Exception as e:
# we test it all if we cannot get proper git information
print(e)
return
changed_modules = set()
for changed_file in changed_files:
segments = [s for s in changed_file.split('/') if s]
if segments[0] == 'thirdparty':
# test it all when changing thirdparty deps
return
if segments[0] not in ('src', 'tests'):
# test none on changes to other files
continue
module = segments[1]
changed_modules.add(module)
force_full_test = [
'scancode',
'commoncode',
'typecode',
'textcode',
'plugincode',
]
if any(m in changed_modules for m in force_full_test):
# test it all when certain widely dependended modules
return
# add dependencies
if 'licensedcode' in changed_modules:
changed_modules.add('packagedcode')
changed_modules.add('summarycode')
changed_modules.add('formattedcode')
changed_modules.add('scancode')
if 'cluecode' in changed_modules:
changed_modules.add('summarycode')
changed_modules.add('formattedcode')
changed_modules.add('scancode')
if TRACE:
print()
print('get_impacted_modules:', changed_modules)
return changed_modules
def get_changed_files(base_branch='develop'):
"""
Return a list of changed file paths against the `base_branch`.
Or locally only if `base_branch` is None.
Raise an exception on errors.
"""
# this may fail with exceptions
cmd = 'git', 'diff', '--name-only',
if base_branch:
cmd += base_branch + '...',
changed_files = check_output(cmd, stderr=STDOUT)
changed_files = changed_files.replace('\\', '/')
changed_files = changed_files.splitlines(False)
changed_files = [cf for cf in changed_files if cf.strip()]
if TRACE:
print()
print('get_changed_files:', changed_files)
return changed_files
def get_git_branch():
"""
Return the current branch or raise an exception.
"""
from subprocess import check_output, STDOUT
# this may fail with exceptions
cmd = 'git', 'status',
branch = check_output(cmd, stderr=STDOUT).splitlines(False)[0]
_, _, branch = branch.partition('On branch')
branch = branch.strip()
if TRACE:
print()
print('get_git_branch:', branch)
return branch
| 32.2589 | 135 | 0.616974 |
4a269183a0000e0ada1b599f177518c4fa2f578d | 190 | py | Python | src/stk/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | src/stk/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | src/stk/__init__.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | from .databases import * # noqa
from .ea import * # noqa
from .molecular import * # noqa
from .serialization import * # noqa
from .utilities import * # noqa
__version__ = '2022.5.2.1'
| 23.75 | 36 | 0.684211 |
4a26926b03c2e05fa1daf6399b54e504d8427c64 | 7,372 | py | Python | experiments/ashvin/corl2019/offpolicy/dcvae3.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | experiments/ashvin/corl2019/offpolicy/dcvae3.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | experiments/ashvin/corl2019/offpolicy/dcvae3.py | Asap7772/rail-rl-franka-eval | 4bf99072376828193d05b53cf83c7e8f4efbd3ba | [
"MIT"
] | null | null | null | import railrl.misc.hyperparameter as hyp
from experiments.murtaza.multiworld.skew_fit.reacher.generate_uniform_dataset import generate_uniform_dataset_reacher
from multiworld.envs.mujoco.cameras import sawyer_init_camera_zoomed_in, sawyer_pusher_camera_upright_v2
from railrl.launchers.launcher_util import run_experiment
from railrl.torch.grill.launcher import *
import railrl.torch.vae.vae_schedules as vae_schedules
from railrl.torch.vae.conv_vae import imsize48_default_architecture, imsize48_default_architecture_with_more_hidden_layers, imsize84_default_architecture
from railrl.launchers.arglauncher import run_variants
from railrl.torch.grill.launcher import grill_her_twin_sac_online_vae_full_experiment, grill_her_twin_sac_full_experiment
from multiworld.envs.pygame.multiobject_pygame_env import Multiobj2DEnv
from multiworld.envs.mujoco.sawyer_xyz.sawyer_push_multiobj_subset import SawyerMultiobjectEnv
from railrl.torch.vae.conditional_conv_vae import DeltaCVAE
from railrl.torch.vae.vae_trainer import DeltaCVAETrainer
def experiment(variant):
full_experiment_variant_preprocess(variant)
train_vae_and_update_variant(variant)
if __name__ == "__main__":
variant = dict(
double_algo=False,
online_vae_exploration=False,
imsize=48,
init_camera=sawyer_pusher_camera_upright_v2,
env_class=Multiobj2DEnv,
env_kwargs=dict(
render_onscreen=False,
ball_radius=1.5,
images_are_rgb=True,
show_goal=False,
fixed_colors=False,
change_background=False,
),
# env_class=Multiobj2DEnv,
# env_kwargs=dict(
# render_onscreen=False,
# ball_radius=1.5,
# images_are_rgb=True,
# show_goal=False,
# change_background=False,
# fixed_colors=False,
# ),
grill_variant=dict(
save_video=True,
custom_goal_sampler='replay_buffer',
online_vae_trainer_kwargs=dict(
beta=20,
lr=0,
),
save_video_period=100,
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
vf_kwargs=dict(
hidden_sizes=[400, 300],
),
max_path_length=50,
algo_kwargs=dict(
num_epochs=2000,
batch_size=128,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=500,
min_num_steps_before_training=1000,
vae_training_schedule=vae_schedules.never_train,
oracle_data=False,
vae_save_period=25,
parallel_vae_train=False,
),
twin_sac_trainer_kwargs=dict(
discount=0.99,
reward_scale=1,
soft_target_tau=1e-3,
target_update_period=1, # 1
use_automatic_entropy_tuning=True,
),
replay_buffer_kwargs=dict(
start_skew_epoch=10,
max_size=int(100000),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
exploration_rewards_type='None',
vae_priority_type='vae_prob',
priority_function_kwargs=dict(
sampling_method='importance_sampling',
decoder_distribution='gaussian_identity_variance',
# decoder_distribution='bernoulli',
num_latents_to_sample=10,
),
power=-1,
relabeling_goal_sampling_mode='vae_prior',
),
exploration_goal_sampling_mode='vae_prior',
evaluation_goal_sampling_mode='reset_of_env',
normalize=False,
render=False,
exploration_noise=0.2,
exploration_type='ou',
training_mode='train',
testing_mode='test',
reward_params=dict(
type='latent_distance',
),
observation_key='latent_observation',
desired_goal_key='latent_desired_goal',
vae_wrapped_env_kwargs=dict(
sample_from_true_prior=True,
),
algorithm='ONLINE-VAE-SAC-BERNOULLI',
),
train_vae_variant=dict(
representation_size=4,
beta=10,
beta_schedule_kwargs=dict(
x_values=(0, 1500),
y_values=(1, 50),
),
num_epochs=1500,
dump_skew_debug_plots=False,
# decoder_activation='gaussian',
decoder_activation='sigmoid',
use_linear_dynamics=False,
generate_vae_dataset_kwargs=dict(
#dataset_path="/tmp/SawyerMultiobjectEnv_N100000_sawyer_init_camera_zoomed_in_imsize48_random_oracle_split_0.npy",#"/home/khazatsky/rail/data/multiobj_two_puck.npy",
N=102000,
n_random_steps=5000,
test_p=.9,
use_cached=True,
show=False,
oracle_dataset=False,
oracle_dataset_using_set_to_goal=False,
non_presampled_goal_img_is_garbage=False,
random_rollout_data=True,
conditional_vae_dataset=True,
save_trajectories=False,
enviorment_dataset=False,
tag="offpolicy3",
),
vae_trainer_class=DeltaCVAETrainer,
vae_class=DeltaCVAE,
vae_kwargs=dict(
# dynamics_type='local',
input_channels=3,
architecture=imsize48_default_architecture_with_more_hidden_layers,
decoder_distribution='gaussian_identity_variance',
),
# TODO: why the redundancy?
algo_kwargs=dict(
start_skew_epoch=5000,
is_auto_encoder=False,
batch_size=32,
lr=1e-3,
skew_config=dict(
method='vae_prob',
power=0,
),
linearity_weight=0,
distance_weight=0,
skew_dataset=False,
priority_function_kwargs=dict(
decoder_distribution='gaussian_identity_variance',
sampling_method='importance_sampling',
# sampling_method='true_prior_sampling',
num_latents_to_sample=10,
),
use_parallel_dataloading=False,
),
save_period=25,
),
)
search_space = {
'seedid': range(1),
'train_vae_variant.representation_size': [(2, 4),], #(3 * objects, 3 * colors)
'train_vae_variant.generate_vae_dataset_kwargs.n_random_steps': [51],#, 50, 100],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, run_id=0)
| 38.196891 | 181 | 0.591291 |
4a269394698f07ce658ede47e579502e273e263a | 608 | py | Python | src/python/assign_unmapped_pos.py | mozack/test | 39e6297e578d0407fd687958c32cadc2dfc5845d | [
"MIT"
] | 77 | 2016-10-24T15:46:24.000Z | 2022-03-22T11:37:56.000Z | src/python/assign_unmapped_pos.py | mozack/test | 39e6297e578d0407fd687958c32cadc2dfc5845d | [
"MIT"
] | 47 | 2017-01-31T19:18:51.000Z | 2022-03-29T09:26:25.000Z | src/python/assign_unmapped_pos.py | mozack/test | 39e6297e578d0407fd687958c32cadc2dfc5845d | [
"MIT"
] | 9 | 2017-10-03T23:31:39.000Z | 2021-04-09T09:40:07.000Z | import sys
for line in sys.stdin:
line = line.rstrip()
if line.startswith('@'):
print line
else:
fields = line.split()
flag = int(fields[1])
# 4 = read unmapped. 8 = mate unmapped
if flag & 0xC == 4 and fields[2] == '*':
# This read is unmapped and mate is mapped
chrom = fields[6]
pos = fields[7]
fields[2] = chrom
fields[3] = pos
print '\t'.join(fields)
elif flag & 0xC == 8 and fields[6] == '*':
# This read is mapped and mate is unmapped
chrom = fields[2]
pos = fields[3]
fields[6] = chrom
fields[7] = pos
print '\t'.join(fields)
else:
print line | 23.384615 | 45 | 0.600329 |
4a269504071bd64042c9dab2eb226277a23dd837 | 197 | py | Python | Numpy/Sum_and_Prod.py | idarlenearaujo/HackerRank_Python | 3fc4d0baf37fe321737f1e1ae9320d67e5ee984a | [
"MIT"
] | null | null | null | Numpy/Sum_and_Prod.py | idarlenearaujo/HackerRank_Python | 3fc4d0baf37fe321737f1e1ae9320d67e5ee984a | [
"MIT"
] | null | null | null | Numpy/Sum_and_Prod.py | idarlenearaujo/HackerRank_Python | 3fc4d0baf37fe321737f1e1ae9320d67e5ee984a | [
"MIT"
] | null | null | null | import numpy as np
N, M = map(int, input().split())
# array A
arrayA = np.array([list(map(int, input().split())) for i in range(N)])
# sum sob eixo 0
# prod
print(np.prod(np.sum(arrayA, axis=0)))
| 21.888889 | 70 | 0.639594 |
4a2695b00752b2555132503b4d536ab399172763 | 19,044 | py | Python | helm/dagster/schema/schema_tests/test_instance.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | null | null | null | helm/dagster/schema/schema_tests/test_instance.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | null | null | null | helm/dagster/schema/schema_tests/test_instance.py | camvogel/dagster | b4df94bf34906e7f81c973a7fdad5429ae3697ba | [
"Apache-2.0"
] | 1 | 2021-12-08T18:13:19.000Z | 2021-12-08T18:13:19.000Z | import pytest
import yaml
from dagster.core.run_coordinator import QueuedRunCoordinator
from dagster_aws.s3.compute_log_manager import S3ComputeLogManager
from dagster_azure.blob.compute_log_manager import AzureBlobComputeLogManager
from dagster_gcp.gcs.compute_log_manager import GCSComputeLogManager
from kubernetes.client import models
from schema.charts.dagster.subschema.compute_log_manager import (
AzureBlobComputeLogManager as AzureBlobComputeLogManagerModel,
)
from schema.charts.dagster.subschema.compute_log_manager import (
ComputeLogManager,
ComputeLogManagerConfig,
ComputeLogManagerType,
)
from schema.charts.dagster.subschema.compute_log_manager import (
GCSComputeLogManager as GCSComputeLogManagerModel,
)
from schema.charts.dagster.subschema.compute_log_manager import (
S3ComputeLogManager as S3ComputeLogManagerModel,
)
from schema.charts.dagster.subschema.daemon import (
ConfigurableClass,
Daemon,
QueuedRunCoordinatorConfig,
RunCoordinator,
RunCoordinatorConfig,
RunCoordinatorType,
TagConcurrencyLimit,
)
from schema.charts.dagster.subschema.postgresql import PostgreSQL, Service
from schema.charts.dagster.subschema.python_logs import PythonLogs
from schema.charts.dagster.subschema.run_launcher import (
CeleryK8sRunLauncherConfig,
K8sRunLauncherConfig,
RunLauncher,
RunLauncherConfig,
RunLauncherType,
)
from schema.charts.dagster.values import DagsterHelmValues
from schema.utils.helm_template import HelmTemplate
def to_camel_case(s: str) -> str:
components = s.split("_")
return components[0] + "".join(x.title() for x in components[1:])
@pytest.fixture(name="template")
def helm_template() -> HelmTemplate:
return HelmTemplate(
helm_dir_path="helm/dagster",
subchart_paths=["charts/dagster-user-deployments"],
output="templates/configmap-instance.yaml",
model=models.V1ConfigMap,
)
@pytest.mark.parametrize("storage", ["schedule_storage", "run_storage", "event_log_storage"])
def test_storage_postgres_db_config(template: HelmTemplate, storage: str):
postgresql_username = "username"
postgresql_host = "1.1.1.1"
postgresql_database = "database"
postgresql_params = {
"connect_timeout": 10,
"application_name": "myapp",
"options": "-c synchronous_commit=off",
}
postgresql_port = 8080
helm_values = DagsterHelmValues.construct(
postgresql=PostgreSQL.construct(
postgresqlUsername=postgresql_username,
postgresqlHost=postgresql_host,
postgresqlDatabase=postgresql_database,
postgresqlParams=postgresql_params,
service=Service(port=postgresql_port),
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert instance[storage]
postgres_db = instance[storage]["config"]["postgres_db"]
assert postgres_db["username"] == postgresql_username
assert postgres_db["password"] == {"env": "DAGSTER_PG_PASSWORD"}
assert postgres_db["hostname"] == postgresql_host
assert postgres_db["db_name"] == postgresql_database
assert postgres_db["port"] == postgresql_port
assert postgres_db["params"] == postgresql_params
def test_k8s_run_launcher_config(template: HelmTemplate):
job_namespace = "namespace"
image_pull_policy = "Always"
load_incluster_config = True
env_config_maps = [{"name": "env_config_map"}]
env_secrets = [{"name": "secret"}]
env_vars = ["ENV_VAR"]
volume_mounts = [
{
"mountPath": "/opt/dagster/dagster_home/dagster.yaml",
"name": "dagster-instance",
"subPath": "dagster.yaml",
},
{
"name": "test-volume",
"mountPath": "/opt/dagster/test_mount_path/volume_mounted_file.yaml",
"subPath": "volume_mounted_file.yaml",
},
]
volumes = [
{"name": "test-volume", "configMap": {"name": "test-volume-configmap"}},
{"name": "test-pvc", "persistentVolumeClaim": {"claimName": "my_claim", "readOnly": False}},
]
helm_values = DagsterHelmValues.construct(
runLauncher=RunLauncher.construct(
type=RunLauncherType.K8S,
config=RunLauncherConfig.construct(
k8sRunLauncher=K8sRunLauncherConfig.construct(
jobNamespace=job_namespace,
imagePullPolicy=image_pull_policy,
loadInclusterConfig=load_incluster_config,
envConfigMaps=env_config_maps,
envSecrets=env_secrets,
envVars=env_vars,
volumeMounts=volume_mounts,
volumes=volumes,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
run_launcher_config = instance["run_launcher"]
assert run_launcher_config["module"] == "dagster_k8s"
assert run_launcher_config["class"] == "K8sRunLauncher"
assert run_launcher_config["config"]["job_namespace"] == job_namespace
assert run_launcher_config["config"]["load_incluster_config"] == load_incluster_config
assert run_launcher_config["config"]["image_pull_policy"] == image_pull_policy
assert run_launcher_config["config"]["env_config_maps"][1:] == [
configmap["name"] for configmap in env_config_maps
]
assert run_launcher_config["config"]["env_secrets"] == [
secret["name"] for secret in env_secrets
]
assert run_launcher_config["config"]["env_vars"] == env_vars
assert run_launcher_config["config"]["volume_mounts"] == volume_mounts
assert run_launcher_config["config"]["volumes"] == volumes
def test_celery_k8s_run_launcher_config(template: HelmTemplate):
image = {"repository": "test_repo", "tag": "test_tag", "pullPolicy": "Always"}
configSource = {
"broker_transport_options": {"priority_steps": [9]},
"worker_concurrency": 1,
}
workerQueues = [
{"name": "dagster", "replicaCount": 2},
{"name": "extra-queue-1", "replicaCount": 1},
]
helm_values = DagsterHelmValues.construct(
runLauncher=RunLauncher.construct(
type=RunLauncherType.CELERY,
config=RunLauncherConfig.construct(
celeryK8sRunLauncher=CeleryK8sRunLauncherConfig.construct(
image=image,
configSource=configSource,
workerQueues=workerQueues,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
run_launcher_config = instance["run_launcher"]
assert run_launcher_config["module"] == "dagster_celery_k8s"
assert run_launcher_config["class"] == "CeleryK8sRunLauncher"
assert run_launcher_config["config"]["config_source"] == configSource
assert run_launcher_config["config"]["broker"] == {"env": "DAGSTER_CELERY_BROKER_URL"}
assert run_launcher_config["config"]["backend"] == {"env": "DAGSTER_CELERY_BACKEND_URL"}
@pytest.mark.parametrize("enabled", [True, False])
def test_queued_run_coordinator_config(template: HelmTemplate, enabled: bool):
max_concurrent_runs = 50
tag_concurrency_limits = [TagConcurrencyLimit(key="key", value="value", limit=10)]
dequeue_interval_seconds = 50
helm_values = DagsterHelmValues.construct(
dagsterDaemon=Daemon.construct(
runCoordinator=RunCoordinator.construct(
enabled=enabled,
type=RunCoordinatorType.QUEUED,
config=RunCoordinatorConfig.construct(
queuedRunCoordinator=QueuedRunCoordinatorConfig.construct(
maxConcurrentRuns=max_concurrent_runs,
tagConcurrencyLimits=tag_concurrency_limits,
dequeueIntervalSeconds=dequeue_interval_seconds,
)
),
)
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert ("run_coordinator" in instance) == enabled
if enabled:
assert instance["run_coordinator"]["module"] == "dagster.core.run_coordinator"
assert instance["run_coordinator"]["class"] == "QueuedRunCoordinator"
assert instance["run_coordinator"]["config"]
run_coordinator_config = instance["run_coordinator"]["config"]
assert run_coordinator_config["max_concurrent_runs"] == max_concurrent_runs
assert run_coordinator_config["dequeue_interval_seconds"] == dequeue_interval_seconds
assert len(run_coordinator_config["tag_concurrency_limits"]) == len(tag_concurrency_limits)
assert run_coordinator_config["tag_concurrency_limits"] == [
tag_concurrency_limit.dict() for tag_concurrency_limit in tag_concurrency_limits
]
def test_custom_run_coordinator_config(template: HelmTemplate):
module = "a_module"
class_ = "Class"
config_field_one = "1"
config_field_two = "two"
config = {"config_field_one": config_field_one, "config_field_two": config_field_two}
helm_values = DagsterHelmValues.construct(
dagsterDaemon=Daemon.construct(
runCoordinator=RunCoordinator.construct(
enabled=True,
type=RunCoordinatorType.CUSTOM,
config=RunCoordinatorConfig.construct(
customRunCoordinator=ConfigurableClass.construct(
module=module,
class_=class_,
config=config,
)
),
)
)
)
configmaps = template.render(helm_values)
assert len(configmaps) == 1
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
assert instance["run_coordinator"]["module"] == module
assert instance["run_coordinator"]["class"] == class_
assert instance["run_coordinator"]["config"] == config
@pytest.mark.parametrize(
"compute_log_manager_type",
[ComputeLogManagerType.NOOP, ComputeLogManagerType.LOCAL],
ids=["noop", "local compute log manager becomes noop"],
)
def test_noop_compute_log_manager(
template: HelmTemplate, compute_log_manager_type: ComputeLogManagerType
):
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(type=compute_log_manager_type)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster.core.storage.noop_compute_log_manager"
assert compute_logs_config["class"] == "NoOpComputeLogManager"
def test_azure_blob_compute_log_manager(template: HelmTemplate):
storage_account = "account"
container = "container"
secret_key = "secret_key"
local_dir = "/dir"
prefix = "prefix"
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.AZURE,
config=ComputeLogManagerConfig.construct(
azureBlobComputeLogManager=AzureBlobComputeLogManagerModel(
storageAccount=storage_account,
container=container,
secretKey=secret_key,
localDir=local_dir,
prefix=prefix,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_azure.blob.compute_log_manager"
assert compute_logs_config["class"] == "AzureBlobComputeLogManager"
assert compute_logs_config["config"] == {
"storage_account": storage_account,
"container": container,
"secret_key": secret_key,
"local_dir": local_dir,
"prefix": prefix,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == AzureBlobComputeLogManager.config_type().keys()
def test_gcs_compute_log_manager(template: HelmTemplate):
bucket = "bucket"
local_dir = "/dir"
prefix = "prefix"
json_credentials_envvar = "ENV_VAR"
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.GCS,
config=ComputeLogManagerConfig.construct(
gcsComputeLogManager=GCSComputeLogManagerModel(
bucket=bucket,
localDir=local_dir,
prefix=prefix,
jsonCredentialsEnvvar=json_credentials_envvar,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_gcp.gcs.compute_log_manager"
assert compute_logs_config["class"] == "GCSComputeLogManager"
assert compute_logs_config["config"] == {
"bucket": bucket,
"local_dir": local_dir,
"prefix": prefix,
"json_credentials_envvar": json_credentials_envvar,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == GCSComputeLogManager.config_type().keys()
def test_s3_compute_log_manager(template: HelmTemplate):
bucket = "bucket"
local_dir = "/dir"
prefix = "prefix"
use_ssl = True
verify = True
verify_cert_path = "/path"
endpoint_url = "endpoint.com"
skip_empty_files = True
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.S3,
config=ComputeLogManagerConfig.construct(
s3ComputeLogManager=S3ComputeLogManagerModel(
bucket=bucket,
localDir=local_dir,
prefix=prefix,
useSsl=use_ssl,
verify=verify,
verifyCertPath=verify_cert_path,
endpointUrl=endpoint_url,
skipEmptyFiles=skip_empty_files,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == "dagster_aws.s3.compute_log_manager"
assert compute_logs_config["class"] == "S3ComputeLogManager"
assert compute_logs_config["config"] == {
"bucket": bucket,
"local_dir": local_dir,
"prefix": prefix,
"use_ssl": use_ssl,
"verify": verify,
"verify_cert_path": verify_cert_path,
"endpoint_url": endpoint_url,
"skip_empty_files": skip_empty_files,
}
# Test all config fields in configurable class
assert compute_logs_config["config"].keys() == S3ComputeLogManager.config_type().keys()
def test_custom_compute_log_manager_config(template: HelmTemplate):
module = "a_module"
class_ = "Class"
config_field_one = "1"
config_field_two = "two"
config = {"config_field_one": config_field_one, "config_field_two": config_field_two}
helm_values = DagsterHelmValues.construct(
computeLogManager=ComputeLogManager.construct(
type=ComputeLogManagerType.CUSTOM,
config=ComputeLogManagerConfig.construct(
customComputeLogManager=ConfigurableClass.construct(
module=module,
class_=class_,
config=config,
)
),
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
compute_logs_config = instance["compute_logs"]
assert compute_logs_config["module"] == module
assert compute_logs_config["class"] == class_
assert compute_logs_config["config"] == config
def test_custom_python_logs_config(template: HelmTemplate):
log_level = "INFO"
managed_python_loggers = ["foo", "bar", "baz"]
handler_config = {
"handlers": {
"myHandler": {"class": "logging.StreamHandler", "level": "INFO", "stream": "foo"}
},
"formatters": {"myFormatter": {"format": "%(message)s"}},
}
helm_values = DagsterHelmValues.construct(
pythonLogs=PythonLogs.construct(
pythonLogLevel=log_level,
managedPythonLoggers=managed_python_loggers,
dagsterHandlerConfig=handler_config,
)
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
python_logs_config = instance["python_logs"]
assert python_logs_config["python_log_level"] == log_level
assert python_logs_config["managed_python_loggers"] == managed_python_loggers
assert python_logs_config["dagster_handler_config"] == handler_config
def test_custom_python_logs_missing_config(template: HelmTemplate):
helm_values = DagsterHelmValues.construct(
pythonLogs=PythonLogs.construct(pythonLogLevel="INFO")
)
configmaps = template.render(helm_values)
instance = yaml.full_load(configmaps[0].data["dagster.yaml"])
python_logs_config = instance["python_logs"]
assert python_logs_config["python_log_level"] == "INFO"
assert "managed_python_loggers" not in python_logs_config
assert "dagster_handler_config" not in python_logs_config
@pytest.mark.parametrize(
argnames=["json_schema_model", "compute_log_manager_class"],
argvalues=[
(AzureBlobComputeLogManagerModel, AzureBlobComputeLogManager),
(GCSComputeLogManagerModel, GCSComputeLogManager),
(S3ComputeLogManagerModel, S3ComputeLogManager),
],
)
def test_compute_log_manager_has_schema(json_schema_model, compute_log_manager_class):
json_schema_fields = json_schema_model.schema()["properties"].keys()
compute_log_manager_fields = set(
map(to_camel_case, compute_log_manager_class.config_type().keys())
)
assert json_schema_fields == compute_log_manager_fields
@pytest.mark.parametrize(
argnames=["json_schema_model", "run_coordinator_class"],
argvalues=[
(QueuedRunCoordinatorConfig, QueuedRunCoordinator),
],
)
def test_run_coordinator_has_schema(json_schema_model, run_coordinator_class):
json_schema_fields = json_schema_model.schema()["properties"].keys()
run_coordinator_fields = set(map(to_camel_case, run_coordinator_class.config_type().keys()))
assert json_schema_fields == run_coordinator_fields
| 37.122807 | 100 | 0.678481 |
4a26961989d72c1b67a1f0991bbb8f4a7e7d0a14 | 125 | py | Python | tools/pdffer.py | BjornFJohansson/TravisSlideProcessor | 8af3531cf5b798000974d65c1d7fa4b5e8ec6a6d | [
"BSD-3-Clause"
] | 1 | 2018-05-09T15:11:46.000Z | 2018-05-09T15:11:46.000Z | tools/pdffer.py | BjornFJohansson/TravisSlideProcessor | 8af3531cf5b798000974d65c1d7fa4b5e8ec6a6d | [
"BSD-3-Clause"
] | 2 | 2017-09-25T06:36:48.000Z | 2017-09-25T09:54:17.000Z | tools/pdffer.py | BjornFJohansson/AutoSlideProcessor | 8af3531cf5b798000974d65c1d7fa4b5e8ec6a6d | [
"BSD-3-Clause"
] | null | null | null | from nbconvert import PDFExporter
exporter = PDFExporter()
output, resources = exporter.from_filename('betacarotene.ipynb') | 25 | 64 | 0.816 |
4a26970a24c5a1771426ef27aedb47fee5bf3380 | 3,064 | py | Python | lib/tool_shed/tool_shed_registry.py | julozi/galaxy | 90d9da03975f254ac128747cd04532c3595d6155 | [
"CC-BY-3.0"
] | 1 | 2020-09-02T23:26:12.000Z | 2020-09-02T23:26:12.000Z | lib/tool_shed/tool_shed_registry.py | julozi/galaxy | 90d9da03975f254ac128747cd04532c3595d6155 | [
"CC-BY-3.0"
] | null | null | null | lib/tool_shed/tool_shed_registry.py | julozi/galaxy | 90d9da03975f254ac128747cd04532c3595d6155 | [
"CC-BY-3.0"
] | 1 | 2016-06-14T13:21:13.000Z | 2016-06-14T13:21:13.000Z | import logging
import xml.etree.ElementTree
from collections import OrderedDict
from six.moves.urllib import request as urlrequest
from tool_shed.util import common_util, xml_util
log = logging.getLogger(__name__)
DEFAULT_TOOL_SHEDS_CONF_XML = """<?xml version="1.0"?>
<tool_sheds>
<tool_shed name="Galaxy Main Tool Shed" url="https://toolshed.g2.bx.psu.edu/"/>
</tool_sheds>
"""
class Registry(object):
def __init__(self, config=None):
self.tool_sheds = OrderedDict()
self.tool_sheds_auth = OrderedDict()
if config:
# Parse tool_sheds_conf.xml
tree, error_message = xml_util.parse_xml(config)
if tree is None:
log.warning("Unable to load references to tool sheds defined in file %s" % str(config))
return
root = tree.getroot()
else:
root = xml.etree.ElementTree.fromstring(DEFAULT_TOOL_SHEDS_CONF_XML)
config = "internal default config"
log.debug('Loading references to tool sheds from %s' % config)
for elem in root.findall('tool_shed'):
try:
name = elem.get('name', None)
url = elem.get('url', None)
username = elem.get('user', None)
password = elem.get('pass', None)
if name and url:
self.tool_sheds[name] = url
self.tool_sheds_auth[name] = None
log.debug('Loaded reference to tool shed: %s' % name)
if name and url and username and password:
pass_mgr = urlrequest.HTTPPasswordMgrWithDefaultRealm()
pass_mgr.add_password(None, url, username, password)
self.tool_sheds_auth[name] = pass_mgr
except Exception as e:
log.warning('Error loading reference to tool shed "%s", problem: %s' % (name, str(e)))
def password_manager_for_url(self, url):
"""
If the tool shed is using external auth, the client to the tool shed must authenticate to that
as well. This provides access to the six.moves.urllib.request.HTTPPasswordMgrWithdefaultRealm() object for the
url passed in.
Following more what galaxy.demo_sequencer.controllers.common does might be more appropriate at
some stage...
"""
url_sans_protocol = common_util.remove_protocol_from_tool_shed_url(url)
for shed_name, shed_url in self.tool_sheds.items():
shed_url_sans_protocol = common_util.remove_protocol_from_tool_shed_url(shed_url)
if url_sans_protocol.startswith(shed_url_sans_protocol):
return self.tool_sheds_auth[shed_name]
log.debug("Invalid url '%s' received by tool shed registry's password_manager_for_url method." % str(url))
return None
def url_auth(self, url):
password_manager = self.password_manager_for_url(url)
if password_manager is not None:
return urlrequest.HTTPBasicAuthHandler(password_manager)
| 42.555556 | 119 | 0.638708 |
4a2697ec70cc717240744fd758faeea80a81639f | 49 | py | Python | tools/deepke/name_entity_re/standard/tools/__init__.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | 1 | 2022-03-26T16:08:08.000Z | 2022-03-26T16:08:08.000Z | tools/deepke/name_entity_re/standard/tools/__init__.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | null | null | null | tools/deepke/name_entity_re/standard/tools/__init__.py | dfface/DoctorKG | 6bd6ebec8244a9ce0a2c8c278a704f02b9afaaf8 | [
"MIT"
] | null | null | null | from .dataset import *
from .preprocess import * | 24.5 | 25 | 0.755102 |
4a269829b489f4701fa8fe0c58344ec701026f58 | 8,816 | py | Python | Scripts/ORF_CNN_141.py | ShepherdCode/Soars2021 | ab4f304eaa09e52d260152397a6c53d7a05457da | [
"MIT"
] | 1 | 2021-08-16T14:49:04.000Z | 2021-08-16T14:49:04.000Z | Scripts/ORF_CNN_141.py | ShepherdCode/Soars2021 | ab4f304eaa09e52d260152397a6c53d7a05457da | [
"MIT"
] | null | null | null | Scripts/ORF_CNN_141.py | ShepherdCode/Soars2021 | ab4f304eaa09e52d260152397a6c53d7a05457da | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # ORF recognition by CNN
#
# Use variable number of bases between START and STOP. Thus, ncRNA will have its STOP out-of-frame or too close to the START, and pcRNA will have its STOP in-frame and far from the START.
# In[109]:
import time
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
# In[110]:
PC_SEQUENCES=10000 # how many protein-coding sequences
NC_SEQUENCES=10000 # how many non-coding sequences
PC_TESTS=1000
NC_TESTS=1000
RNA_LEN=32 # how long is each sequence
CDS_LEN=16 # min CDS len to be coding
ALPHABET=4 # how many different letters are possible
INPUT_SHAPE_2D = (RNA_LEN,ALPHABET,1) # Conv2D needs 3D inputs
INPUT_SHAPE = (RNA_LEN,ALPHABET) # Conv1D needs 2D inputs
FILTERS = 16 # how many different patterns the model looks for
NEURONS = 16
DROP_RATE = 0.8
WIDTH = 3 # how wide each pattern is, in bases
STRIDE_2D = (1,1) # For Conv2D how far in each direction
STRIDE = 1 # For Conv1D, how far between pattern matches, in bases
EPOCHS=100 # how many times to train on all the data
SPLITS=3 # SPLITS=3 means train on 2/3 and validate on 1/3
FOLDS=3 # train the model this many times (range 1 to SPLITS)
# In[111]:
import sys
IN_COLAB = False
try:
from google.colab import drive
IN_COLAB = True
except:
pass
if IN_COLAB:
print("On Google CoLab, mount cloud-local file, get our code from GitHub.")
PATH='/content/drive/'
#drive.mount(PATH,force_remount=True) # hardly ever need this
#drive.mount(PATH) # Google will require login credentials
DATAPATH=PATH+'My Drive/data/' # must end in "/"
import requests
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_describe.py')
with open('RNA_describe.py', 'w') as f:
f.write(r.text)
from RNA_describe import ORF_counter
from RNA_describe import Random_Base_Oracle
r = requests.get('https://raw.githubusercontent.com/ShepherdCode/Soars2021/master/SimTools/RNA_prep.py')
with open('RNA_prep.py', 'w') as f:
f.write(r.text)
from RNA_prep import prepare_inputs_len_x_alphabet
else:
print("CoLab not working. On my PC, use relative paths.")
DATAPATH='data/' # must end in "/"
sys.path.append("..") # append parent dir in order to use sibling dirs
from SimTools.RNA_describe import ORF_counter,Random_Base_Oracle
from SimTools.RNA_prep import prepare_inputs_len_x_alphabet
MODELPATH="BestModel" # saved on cloud instance and lost after logout
#MODELPATH=DATAPATH+MODELPATH # saved on Google Drive but requires login
# In[112]:
from os import listdir
import csv
from zipfile import ZipFile
import numpy as np
import pandas as pd
from scipy import stats # mode
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from keras.models import Sequential
from keras.layers import Dense,Embedding,Dropout
from keras.layers import Conv1D,Conv2D
from keras.layers import Flatten,MaxPooling1D,MaxPooling2D
from keras.losses import BinaryCrossentropy
# tf.keras.losses.BinaryCrossentropy
import matplotlib.pyplot as plt
from matplotlib import colors
mycmap = colors.ListedColormap(['red','blue']) # list color for label 0 then 1
np.set_printoptions(precision=2)
# In[113]:
rbo=Random_Base_Oracle(RNA_LEN,True)
pc_all,nc_all = rbo.get_partitioned_sequences(CDS_LEN,10) # just testing
pc_all,nc_all = rbo.get_partitioned_sequences(CDS_LEN,PC_SEQUENCES+PC_TESTS)
print("Use",len(pc_all),"PC seqs")
print("Use",len(nc_all),"NC seqs")
# In[114]:
# Describe the sequences
def describe_sequences(list_of_seq):
oc = ORF_counter()
num_seq = len(list_of_seq)
rna_lens = np.zeros(num_seq)
orf_lens = np.zeros(num_seq)
for i in range(0,num_seq):
rna_len = len(list_of_seq[i])
rna_lens[i] = rna_len
oc.set_sequence(list_of_seq[i])
orf_len = oc.get_max_orf_len()
orf_lens[i] = orf_len
print ("Average RNA length:",rna_lens.mean())
print ("Average ORF length:",orf_lens.mean())
print("Simulated sequences prior to adjustment:")
print("PC seqs")
describe_sequences(pc_all)
print("NC seqs")
describe_sequences(nc_all)
# In[115]:
pc_train=pc_all[:PC_SEQUENCES]
nc_train=nc_all[:NC_SEQUENCES]
pc_test=pc_all[PC_SEQUENCES:]
nc_test=nc_all[NC_SEQUENCES:]
# In[116]:
# Use code from our SimTools library.
X,y = prepare_inputs_len_x_alphabet(pc_train,nc_train,ALPHABET) # shuffles
print("Data ready.")
# In[117]:
def make_DNN():
print("make_DNN")
print("input shape:",INPUT_SHAPE)
dnn = Sequential()
#dnn.add(Embedding(input_dim=INPUT_SHAPE,output_dim=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same",
input_shape=INPUT_SHAPE))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(Conv1D(filters=FILTERS,kernel_size=WIDTH,strides=STRIDE,padding="same"))
dnn.add(MaxPooling1D())
dnn.add(Flatten())
dnn.add(Dense(NEURONS,activation="sigmoid",dtype=np.float32))
dnn.add(Dropout(DROP_RATE))
dnn.add(Dense(1,activation="sigmoid",dtype=np.float32))
dnn.compile(optimizer='adam',
loss=BinaryCrossentropy(from_logits=False),
metrics=['accuracy']) # add to default metrics=loss
dnn.build(input_shape=INPUT_SHAPE)
#ln_rate = tf.keras.optimizers.Adam(learning_rate = LN_RATE)
#bc=tf.keras.losses.BinaryCrossentropy(from_logits=False)
#model.compile(loss=bc, optimizer=ln_rate, metrics=["accuracy"])
return dnn
model = make_DNN()
print(model.summary())
# In[118]:
from keras.callbacks import ModelCheckpoint
def do_cross_validation(X,y):
cv_scores = []
fold=0
mycallbacks = [ModelCheckpoint(
filepath=MODELPATH, save_best_only=True,
monitor='val_accuracy', mode='max')]
splitter = KFold(n_splits=SPLITS) # this does not shuffle
for train_index,valid_index in splitter.split(X):
if fold < FOLDS:
fold += 1
X_train=X[train_index] # inputs for training
y_train=y[train_index] # labels for training
X_valid=X[valid_index] # inputs for validation
y_valid=y[valid_index] # labels for validation
print("MODEL")
# Call constructor on each CV. Else, continually improves the same model.
model = model = make_DNN()
print("FIT") # model.fit() implements learning
start_time=time.time()
history=model.fit(X_train, y_train,
epochs=EPOCHS,
verbose=1, # ascii art while learning
callbacks=mycallbacks, # called at end of each epoch
validation_data=(X_valid,y_valid))
end_time=time.time()
elapsed_time=(end_time-start_time)
print("Fold %d, %d epochs, %d sec"%(fold,EPOCHS,elapsed_time))
# print(history.history.keys()) # all these keys will be shown in figure
pd.DataFrame(history.history).plot(figsize=(8,5))
plt.grid(True)
plt.gca().set_ylim(0,1) # any losses > 1 will be off the scale
plt.show()
# In[119]:
do_cross_validation(X,y)
# In[120]:
from keras.models import load_model
X,y = prepare_inputs_len_x_alphabet(pc_test,nc_test,ALPHABET)
best_model=load_model(MODELPATH)
scores = best_model.evaluate(X, y, verbose=0)
print("The best model parameters were saved during cross-validation.")
print("Best was defined as maximum validation accuracy at end of any epoch.")
print("Now re-load the best model and test it on previously unseen data.")
print("Test on",len(pc_test),"PC seqs")
print("Test on",len(nc_test),"NC seqs")
print("%s: %.2f%%" % (best_model.metrics_names[1], scores[1]*100))
# In[121]:
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [0 for _ in range(len(y))]
bm_probs = best_model.predict(X)
ns_auc = roc_auc_score(y, ns_probs)
bm_auc = roc_auc_score(y, bm_probs)
ns_fpr, ns_tpr, _ = roc_curve(y, ns_probs)
bm_fpr, bm_tpr, _ = roc_curve(y, bm_probs)
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='Guess, auc=%.4f'%ns_auc)
plt.plot(bm_fpr, bm_tpr, marker='.', label='Model, auc=%.4f'%bm_auc)
plt.title('ROC')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend()
plt.show()
print("%s: %.2f%%" %('AUC',bm_auc*100.0))
# In[122]:
t = time.time()
time.strftime('%Y-%m-%d %H:%M:%S %Z', time.localtime(t))
# In[122]:
| 31.598566 | 187 | 0.694986 |
4a2698723e98668fdf4fcbb359f73176359503fe | 242 | py | Python | Tratamento de erros/adicao.py | g7jpedro/Python_exer | 9efbe179599eba4cdb39c9525fd850565d16f5fd | [
"MIT"
] | null | null | null | Tratamento de erros/adicao.py | g7jpedro/Python_exer | 9efbe179599eba4cdb39c9525fd850565d16f5fd | [
"MIT"
] | null | null | null | Tratamento de erros/adicao.py | g7jpedro/Python_exer | 9efbe179599eba4cdb39c9525fd850565d16f5fd | [
"MIT"
] | null | null | null |
n1 = input('Digite o primeiro valor: ')
n2 = input('Digite o segundo valor: ')
try:
somar = float(n1) + float(n2)
except ValueError:
print('ERRO ! Informe um tipo certo de dado.')
else:
print(f'A soma de {n1} + {n2} = {somar}') | 22 | 50 | 0.619835 |
4a26987c3f6748024d30258df2d6ac83da0d2b5b | 1,635 | py | Python | tests/integration_test.py | alexismhill3/pinetree | 45a1c2eae4cb3677b26794b9f1dc8a304e237550 | [
"MIT"
] | 6 | 2020-07-20T21:35:07.000Z | 2021-06-22T06:51:03.000Z | tests/integration_test.py | alexismhill3/pinetree | 45a1c2eae4cb3677b26794b9f1dc8a304e237550 | [
"MIT"
] | 12 | 2019-09-09T16:31:29.000Z | 2021-09-15T18:10:01.000Z | tests/integration_test.py | alexismhill3/pinetree | 45a1c2eae4cb3677b26794b9f1dc8a304e237550 | [
"MIT"
] | 4 | 2017-09-08T03:21:49.000Z | 2019-08-27T21:12:04.000Z | # Test simulation
import unittest
import subprocess
import tempfile
import importlib
class MainTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def run_test(self, prefix):
test_mod = importlib.import_module('.models.' + prefix, 'tests')
out_prefix = self.tempdir.name + "/" + prefix
test_mod.execute(out_prefix)
with open('tests/output/' + prefix + '_counts.tsv') as f:
text = f.read()
with open(out_prefix + '_counts.tsv') as results_file:
results = results_file.read()
self.assertEqual(results, text)
def test_single_gene(self):
self.run_test('single_gene')
# def test_three_genes(self):
# self.run_test('three_genes')
# def test_dual_polymerases(self):
# self.run_test('dual_polymerases')
# def test_dual_promoters(self):
# self.run_test('dual_promoter')
# def test_readthrough(self):
# self.run_test('readthrough')
# def test_genome_entry(self):
# self.run_test('genome_entry')
# def test_consecutive_promoters(self):
# self.run_test('consecutive_promoters')
# def test_lotka_voltera(self):
# self.run_test('lotka_voltera')
# def test_promoter_gene_overlap(self):
# self.run_test('promoter_gene_overlap')
# def test_three_genes_runoff(self):
# self.run_test('three_genes_runoff')
# def test_overlapping_genes(self):
# self.run_test('overlapping_genes')
if __name__ == '__main__':
unittest.main()
| 26.370968 | 72 | 0.653823 |
4a2698c0fbf1e8a4bc710064957ebae6fcc13e11 | 31 | py | Python | 3-Python-Advanced (May 2021)/modules/lab/triangle/__init__.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 3-Python-Advanced (May 2021)/modules/lab/triangle/__init__.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | 3-Python-Advanced (May 2021)/modules/lab/triangle/__init__.py | karolinanikolova/SoftUni-Software-Engineering | 7891924956598b11a1e30e2c220457c85c40f064 | [
"MIT"
] | null | null | null | from .draw import draw_triangle | 31 | 31 | 0.870968 |
4a2699061be5cdcf43d6bb3a9e5bc9aa6ca59122 | 6,026 | py | Python | DankMemes/commandHandler.py | Kritner/DankMemeBot | 32120951baf3a63c2e1d6793b180d4a49228e6be | [
"MIT"
] | null | null | null | DankMemes/commandHandler.py | Kritner/DankMemeBot | 32120951baf3a63c2e1d6793b180d4a49228e6be | [
"MIT"
] | 9 | 2018-04-04T17:37:32.000Z | 2018-04-05T19:40:15.000Z | DankMemes/commandHandler.py | Kritner/DankMemeBot | 32120951baf3a63c2e1d6793b180d4a49228e6be | [
"MIT"
] | null | null | null | import random
from slackclient import SlackClient
class CommandHandler():
_memeMode = "sequential"
_memeModeToggleString = "memeMode"
_currentTargetForDankness = "michael"
_helpCommandString = "wat do"
_memes = [
{"trigger" : "rules",
"channel" : None,
"index" : 0,
"responses":
["This place isn't quite dank enough to get into the rules..."]},
{"trigger" : "rules",
"channel" : "dank_memers",
"index" : 0,
"responses":
[("Here are the rules..." +
"\nRule #1: Don’t tell {0}" +
"\nRule #2: If {0} still finds out, invite him." +
"\nRule #3: If {0} is in this channel, he can select the next potential candidate.").format(_currentTargetForDankness)]},
{"trigger" : "flavortown",
"channel" : None,
"index" : 0,
"responses": [
"http://i0.kym-cdn.com/photos/images/newsfeed/001/355/960/bf2.jpg",
"http://i0.kym-cdn.com/photos/images/newsfeed/001/053/453/f5f.jpg",
"http://i0.kym-cdn.com/photos/images/newsfeed/000/972/194/cd9.jpg",
"https://i.chzbgr.com/full/9101308416/h26CA5A46/"
]},
{"trigger" : "it is wednesday my dudes",
"channel" : None,
"index" : 0,
"responses": [
"https://www.youtube.com/watch?v=du-TY1GUFGk",
"https://www.youtube.com/watch?v=YSDAAh6Lps4",
"https://www.youtube.com/watch?v=m2Z0CyuyfMI",
"https://youtu.be/Oct2xKMGOno?list=RDRT0soCWpH3Q",
"https://www.youtube.com/watch?v=OzQ-KvxLVT0",
"https://youtu.be/VaPMUACYWww",
"https://www.youtube.com/watch?v=csqJK8wwaHw",
"https://www.youtube.com/watch?v=JHO61_wDC30",
"https://youtu.be/RT0soCWpH3Q",
"https://youtu.be/0W51GIxnwKc",
"https://youtu.be/VfaNCw2bF48",
"https://youtu.be/RT0soCWpH3Q"
]},
{"trigger" : "doge",
"channel" : None,
"index" : 0,
"responses": [
"https://mtgcardsmith.com/view/complete/full/2017/8/21/1503338850980696.png",
"http://i0.kym-cdn.com/photos/images/newsfeed/000/581/723/a8b.jpg",
"http://i0.kym-cdn.com/photos/images/newsfeed/000/581/168/9f1.jpg",
"http://i0.kym-cdn.com/photos/images/newsfeed/000/581/567/bab.jpg",
"http://i0.kym-cdn.com/photos/images/newsfeed/000/661/142/c03.jpg"
]}]
def handle_command(self, slackClient, command, channel):
"""
Executes bot command if the command is known
"""
# Finds and executes the given command, filling in response
response = None
if command.startswith(self._helpCommandString):
response = self._get_help()
elif command.startswith(self._memeModeToggleString):
response = "changing current 'memeMode' of {0} ".format(self._memeMode)
if self._memeMode == "sequential":
self._memeMode = "random"
else:
self._memeMode = "sequential"
response = response + "to {0}.".format(self._memeMode)
else:
response = self._get_memes(slackClient, command, channel)
if response is None:
response = "Hmm... I'm not sure what you mean. Try '{0}' for commands".format(self._helpCommandString)
# Sends the response back to the channel
slackClient.api_call(
"chat.postMessage",
channel=channel,
text=response
)
def _get_current_channel_name(self, slackClient, channel):
channel_name = None
for chan in self._get_channel_list(slackClient, True):
if channel == chan["id"]:
channel_name = chan["name"]
return channel_name
def _get_channel_list(self, slackClient, includePrivate = True):
channelList = []
apiChannels = slackClient.api_call("channels.list")
channels = apiChannels["channels"]
for chan in channels:
channelList.append({"id": chan["id"], "name": chan["name"]})
if includePrivate:
apiChannels = slackClient.api_call("groups.list")
channels = apiChannels["groups"]
for chan in channels:
channelList.append({"id": chan["id"], "name": chan["name"]})
return channelList
def _get_help(self):
response = ("DankMemeBot is currently operating in {0} meme mode, " +
"and capable of acting on the following trigger words:").format(self._memeMode)
response = response + "\n----------\n"
for item in self._memes:
if item["trigger"] not in response:
response = response + "\n" + item["trigger"]
response = response + "\n----------\n"
response = response + ("\nUse command '{0}' " +
"to toggle the current meme mode (random or sequential)").format(self._memeModeToggleString)
return response
def _get_memes(self, slackClient, command, channel):
response = None
channel_name = self._get_current_channel_name(slackClient, channel)
for meme in self._memes:
if command.lower().startswith(meme["trigger"].lower()):
if meme["channel"] == channel_name:
response = self._get_meme(meme)
elif meme["channel"] is None:
response = self._get_meme(meme)
return response
def _get_meme(self, meme):
if len(meme["responses"]) == 1:
return meme["responses"][0]
if self._memeMode == "random":
return meme["responses"][random.randrange(0, len(meme["responses"]) - 1)]
if meme["index"] == len(meme["responses"]):
meme["index"] = 0
response = meme["responses"][meme["index"]]
meme["index"] += 1
return response
| 37.899371 | 133 | 0.565383 |
4a2699d343e6ce9904a1e79607d21af896c4c757 | 42,119 | py | Python | mmd_scripting/core/nuthouse01_core.py | Nuthouse01/VMD-to-text-Conversion-Tool | 0d9334bd5735accdd8bb6e1b69889fbe054a7481 | [
"MIT"
] | 4 | 2020-03-02T00:27:24.000Z | 2020-05-29T15:23:50.000Z | mmd_scripting/core/nuthouse01_core.py | Nuthouse01/VMD-to-text-Conversion-Tool | 0d9334bd5735accdd8bb6e1b69889fbe054a7481 | [
"MIT"
] | null | null | null | mmd_scripting/core/nuthouse01_core.py | Nuthouse01/VMD-to-text-Conversion-Tool | 0d9334bd5735accdd8bb6e1b69889fbe054a7481 | [
"MIT"
] | null | null | null | import math
import sys
import traceback
from os import path, listdir
from typing import Any, Tuple, List, Sequence, Callable, Iterable, TypeVar, Union
_SCRIPT_VERSION = "Script version: Nuthouse01 - v1.07.03 - 8/9/2021"
# This code is free to use and re-distribute, but I cannot be held responsible for damages that it may or may not cause.
#####################
# this contains a bunch of functions that are used throughout multiple different scripts
# it's better to keep them all in one place than copy them for each file
########################################################################################################################
# constants used in many files that I don't wanna keep copying over and over
########################################################################################################################
pmxe_material_csv_header = [";Material", "材質名", "材質名(英)", "拡散色_R", "拡散色_G", "拡散色_B", "拡散色_A(非透過度)",
"反射色_R", "反射色_G", "反射色_B", "反射強度", "環境色_R", "環境色_G", "環境色_B", "両面描画(0/1)",
"地面影(0/1)", "セルフ影マップ(0/1)", "セルフ影(0/1)", "頂点色(0/1)", "描画(0:Tri/1:Point/2:Line)",
"エッジ(0/1)", "エッジサイズ", "エッジ色_R", "エッジ色_G", "エッジ色_B", "エッジ色_A",
"テクスチャパス", "スフィアテクスチャパス", "スフィアモード(0:無効/1:乗算/2:加算/3:サブテクスチャ)",
"Toonテクスチャパス", "メモ"]
pmxe_material_csv_tag = "Material"
pmxe_vertex_csv_header = [";Vertex", "頂点Index", "位置_x", "位置_y", "位置_z", "法線_x", "法線_y", "法線_z", "エッジ倍率",
"UV_u", "UV_v", "追加UV1_x", "追加UV1_y", "追加UV1_z", "追加UV1_w", "追加UV2_x", "追加UV2_y",
"追加UV2_z", "追加UV2_w", "追加UV3_x", "追加UV3_y", "追加UV3_z", "追加UV3_w", "追加UV4_x",
"追加UV4_y", "追加UV4_z", "追加UV4_w", "ウェイト変形タイプ(0:BDEF1/1:BDEF2/2:BDEF4/3:SDEF/4:QDEF)",
"ウェイト1_ボーン名", "ウェイト1_ウェイト値", "ウェイト2_ボーン名", "ウェイト2_ウェイト値",
"ウェイト3_ボーン名", "ウェイト3_ウェイト値", "ウェイト4_ボーン名", "ウェイト4_ウェイト値", "C_x",
"C_y", "C_z", "R0_x", "R0_y", "R0_z", "R1_x", "R1_y", "R1_z"]
pmxe_vertex_csv_tag = "Vertex"
pmxe_bone_csv_header = [";Bone", "ボーン名", "ボーン名(英)", "変形階層", "物理後(0/1)", "位置_x", "位置_y", "位置_z",
"回転(0/1)", "移動(0/1)", "IK(0/1)", "表示(0/1)", "操作(0/1)", "親ボーン名", "表示先(0:オフセット/1:ボーン)",
"表示先ボーン名", "オフセット_x", "オフセット_y", "オフセット_z", "ローカル付与(0/1)", "回転付与(0/1)",
"移動付与(0/1)", "付与率", "付与親名", "軸制限(0/1)", "制限軸_x", "制限軸_y", "制限軸_z", "ローカル軸(0/1)",
"ローカルX軸_x", "ローカルX軸_y", "ローカルX軸_z", "ローカルZ軸_x", "ローカルZ軸_y", "ローカルZ軸_z",
"外部親(0/1)", "外部親Key", "IKTarget名", "IKLoop", "IK単位角[deg]"]
pmxe_bone_csv_tag = "Bone"
pmxe_morph_csv_header = [";Morph", "モーフ名", "モーフ名(英)", "パネル(0:無効/1:眉(左下)/2:目(左上)/3:口(右上)/4:その他(右下))",
"モーフ種類(0:グループモーフ/1:頂点モーフ/2:ボーンモーフ/3:UV(Tex)モーフ/4:追加UV1モーフ/5:追加UV2モーフ/6:追加UV3モーフ/7:追加UV4モーフ/8:材質モーフ/9:フリップモーフ/10:インパルスモーフ)"]
pmxe_morph_csv_tag = "Morph"
pmxe_morphvertex_csv_tag = "VertexMorph"
pmxe_morphmaterial_csv_tag = "MaterialMorph"
pmxe_morphuv_csv_tag = "UVMorph"
pmxe_rigidbody_csv_header = [";Body", "剛体名", "剛体名(英)", "関連ボーン名", "剛体タイプ(0:Bone/1:物理演算/2:物理演算+ボーン追従)",
"グループ(0~15)", "非衝突グループ文字列(ex:1 2 3 4)", "形状(0:球/1:箱/2:カプセル)", "サイズ_x",
"サイズ_y", "サイズ_z", "位置_x", "位置_y", "位置_z", "回転_x[deg]", "回転_y[deg]", "回転_z[deg]",
"質量", "移動減衰", "回転減衰", "反発力", "摩擦力"]
pmxe_rigidbody_csv_tag = "Body"
pmxe_face_csv_header = [";Face", "親材質名", "面Index", "頂点Index1", "頂点Index2", "頂点Index3"]
pmxe_face_csv_tag = "Face"
interpolation_default_linear = [20, 20, 107, 107]
########################################################################################################################
# misc functions and user-input functions
########################################################################################################################
def basic_print(*args, is_progress=False) -> None:
"""
CONSOLE FUNCTION: emulate builtin print() function and display text in console.
:param args: any number of string-able objects, will be joined with spaces.
:param is_progress: default false. if true, move the cursor to the beginning of THIS line after printing, so NEXT
print contents will overwrite this one.
"""
the_string = ' '.join([str(x) for x in args])
# replace the print() function with this so i can replace this with the text redirector
if is_progress:
# leave the cursor at the beginning of the line so the next print statement overwrites this
print(the_string, end='\r', flush=True)
# print('\r' + p, end='', flush=True) # leave cursor at the end of the line
# print('\r', end='', flush=False) # force NEXT print statement to begin by resetting to the start of the line
else:
# otherwise use the normal print
print(the_string)
# global variable holding a function pointer that i can overwrite with a different function pointer when in GUI mode
MY_PRINT_FUNC = basic_print
def pause_and_quit(message=None) -> None:
"""
CONSOLE FUNCTION: use input() to suspend until user presses ENTER, then die.
DO NOT USE THIS FUNCTION IN ANY SCRIPTS THAT WILL BE EXECUTED BY THE GUI.
:param message: optional string to print before dying
"""
# wait for user input before exiting because i want the window to stay open long enough for them to read output
MY_PRINT_FUNC(message)
MY_PRINT_FUNC("...press ENTER to exit...")
input()
exit()
PROGRESS_REFRESH_RATE = 0.03 # threshold for actually printing
PROGRESS_LAST_VALUE = 0.0 # last%
def print_progress_oneline(newpercent:float) -> None:
"""
Prints progress percentage on one continually-overwriting line. To minimize actual print-to-screen events, only
print in increments of PROGRESS_REFRESH_RATE (currently 3%) regardless of how often this function is called.
This uses the MY_PRINT_FUNC approach so this function works in both GUI and CONSOLE modes.
:param newpercent: float [0-1], current progress %
"""
global PROGRESS_LAST_VALUE
# if 'curr' is lower than it was last printed (meaning reset), or it's been a while since i last printed a %, then print
if (newpercent < PROGRESS_LAST_VALUE) or (newpercent >= PROGRESS_LAST_VALUE + PROGRESS_REFRESH_RATE):
# cursor gets left at the beginning of line, so the next print will overwrite this one
p = "...working: {:05.1%}".format(newpercent)
MY_PRINT_FUNC(p, is_progress=True)
PROGRESS_LAST_VALUE = newpercent
# useful as keys for sorting
def get1st(x):
return x[0]
def get2nd(x):
return x[1]
THING = TypeVar('THING') # Declare type variable so I can say "whatever input type is, it matches the output type"
def my_list_search(searchme: Iterable[THING], condition: Callable[[THING], bool], getitem=False):
# in a list of things, find the first thing where the condition is true
for d,row in enumerate(searchme):
if condition(row):
return row if getitem else d
return None
def my_list_partition(l: Iterable[THING], condition: Callable[[THING], bool]) -> Tuple[List[THING], List[THING]]:
"""
Split one list into two NEW lists based on a condition. Kinda like a list comprehension but it produces 2 results.
:param l: the list to be split in two
:param condition: lambda function that returns true or false
:return: tuple of lists, (list_lambda_true, list_lambda_false)
"""
list_where_true = []
list_where_false = []
for iiiii in l:
if condition(iiiii):
list_where_true.append(iiiii)
else:
list_where_false.append(iiiii)
return list_where_true, list_where_false
def prettyprint_file_size(size_b: int) -> str:
"""
Format a filesize in terms of bytes, KB, MB, GB, whatever is most appropriate.
:param size_b: int size in bytes
:return: string
"""
if abs(size_b) < 1024:
# bytes
ret = "%d B" % size_b
elif abs(size_b) < 1024*1024:
# kilobytes
s = size_b / 1024
ret = "{:.2f} KB".format(s)
elif abs(size_b) < 1024*1024*1024:
# megabytes
s = size_b / (1024*1024)
ret = "{:.2f} MB".format(s)
else:
# gigabytes
s = size_b / (1024*1024*1024)
ret = "{:.2f} GB".format(s)
return ret
MAXDIFFERENCE = 0
# recursively check for equality, using a loose comparison for floatingpoints
# operating on test file, the greatest difference introduced by quaternion transform is 0.000257
# lets set sanity-check threshold at double that, 0.0005
# return the number of times a float difference exceeded the threshold
# if there is a non-float difference, return infinity
def recursively_compare(A,B):
global MAXDIFFERENCE
# return 1/true if it FAILS, return 0/false if it MATCHES
if hasattr(A, "list"): A = A.list()
if hasattr(B, "list"): B = B.list()
if isinstance(A, float) and isinstance(B, float):
# for floats specifically, replace exact compare with approximate compare
diff = abs(A-B)
MAXDIFFERENCE = max(diff, MAXDIFFERENCE)
return diff >= 0.0005
if isinstance(A, list) and isinstance(B, list):
if len(A) != len(B):
return float("inf")
collect = 0
for A_, B_ in zip(A, B):
collect += recursively_compare(A_, B_)
return collect
# if not float and not list, then use standard compare
if A != B:
return float("inf")
return 0
def new_recursive_compare(L, R):
diffcount = 0
maxdiff = 0
if isinstance(L, (list,tuple)) and isinstance(R, (list,tuple)):
# if both are listlike, recurse on each element of 'em
if len(L) != len(R):
diffcount += 1
# walk down both for as long as it will go, i guess?
for d,(LL, RR) in enumerate(zip(L, R)):
thisdiff, thismax = new_recursive_compare(LL, RR)
diffcount += thisdiff
maxdiff = max(maxdiff, thismax)
elif hasattr(L,"validate") and hasattr(R,"validate"):
# for my custom classes, look over the members with "vars" because its fancy
Lvars = sorted(list(vars(L).items()))
Rvars = sorted(list(vars(R).items()))
for (nameL, LL), (nameR, RR) in zip(Lvars, Rvars):
thisdiff, thismax = new_recursive_compare(LL, RR)
diffcount += thisdiff
maxdiff = max(maxdiff, thismax)
elif isinstance(L, float) and isinstance(R, float):
# for floats specifically, replace exact compare with approximate compare
diff = abs(L - R)
maxdiff = diff
if L != R:
diffcount += 1
else:
# if not float and not list, then use standard compare
if L != R:
diffcount += 1
return diffcount, maxdiff
def flatten(x: Sequence) -> list:
"""
Recursively flatten a list of lists (or tuples). Empty lists get replaced with "None" instead of completely vanishing.
"""
retme = []
for thing in x:
if isinstance(thing, (list, tuple)):
if len(thing) == 0:
retme.append(None)
else:
retme += flatten(thing)
else:
retme.append(thing)
return retme
def justify_stringlist(j: List[str], right=False) -> List[str]:
"""
CONSOLE FUNCTION: justify all str in a list to match the length of the longest str in that list. Determined by
len() function, i.e. number of chars, not by true width when printed, so it doesn't work well with JP/CN chars.
:param j: list[str] to be justified
:param right: by default, left-justify (right-pad). if this is true, right-justify (left-pad) instead.
:return: list[str] after padding/justifying
"""
# first, look for an excuse to give up early
if len(j) == 0 or len(j) == 1: return j
# second, find the length of the longest string in the list
longest_name_len = max([len(p) for p in j])
# third, make a new list of strings that have been padded to be that length
if right:
# right-justify, force strings to right by padding on left
retlist = [(" " * (longest_name_len - len(p))) + p for p in j]
else:
# left-justify, force strings to left by padding on right
retlist = [p + (" " * (longest_name_len - len(p))) for p in j]
return retlist
# global variable holding a function pointer that i can overwrite with a different function pointer when in GUI mode
MY_JUSTIFY_STRINGLIST = justify_stringlist
def prompt_user_choice(options: Sequence[int], explain_info=None) -> int:
"""
CONSOLE FUNCTION: prompt for multiple-choice question & continue prompting until one of those options is chosen.
:param options: list/tuple of ints
:param explain_info: None or str or list[str], help text that will be printed when func is called
:return: int that the user chose
"""
if isinstance(explain_info, (list, tuple)):
for p in explain_info:
MY_PRINT_FUNC(p)
elif isinstance(explain_info, str):
MY_PRINT_FUNC(explain_info)
# create set for matching against
choicelist = [str(i) for i in options]
# create printable string which is all options separated by slashes
promptstr = "/".join(choicelist)
while True:
# continue prompting until the user gives valid input
choice = input(" Choose [" + promptstr + "]: ")
if choice in choicelist:
# if given valid input, break
break
# if given invalid input, prompt and loop again
MY_PRINT_FUNC("invalid choice")
return int(choice)
# global variable holding a function pointer that i can overwrite with a different function pointer when in GUI mode
MY_SIMPLECHOICE_FUNC = prompt_user_choice
def general_input(valid_check: Callable[[str], bool], explain_info=None) -> str:
"""
CONSOLE FUNCTION: Prompt for string input & continue prompting until given function 'valid_check' returns True.
'valid_check' should probably print some kind of error whenever it returns False, explaining why input isn't valid.
Trailing whitespace is removed before calling 'valid_check' and before returning result.
:param valid_check: function or lambda that takes str as in put and returns bool
:param explain_info: None or str or list[str], help text that will be printed when func is called
:return: input string (trailing whitespace removed)
"""
if explain_info is None:
pass
elif isinstance(explain_info, str):
MY_PRINT_FUNC(explain_info)
elif isinstance(explain_info, (list, tuple)):
for p in explain_info:
MY_PRINT_FUNC(p)
while True:
s = input("> ")
s = s.rstrip() # no use for trailing whitespace, sometimes have use for leading whitespace
# perform valid-check
if valid_check(s):
break
else:
# if given invalid input, prompt and loop again
MY_PRINT_FUNC("invalid input")
return s
# global variable holding a function pointer that i can overwrite with a different function pointer when in GUI mode
MY_GENERAL_INPUT_FUNC = general_input
def prompt_user_filename(label: str, ext_list: Union[str,Sequence[str]]) -> str:
"""
CONSOLE FUNCTION: prompt for file & continue prompting until user enters the name of an existing file with the
specified file extension. Returns case-correct absolute file path to the specified file.
:param label: {{short}} string label that identifies this kind of input, like "Text file" or "VMD file"
:param ext_list: list of acceptable extensions, or just one string
:return: case-correct absolute file path
"""
if isinstance(ext_list, str):
# if it comes in as a string, wrap it in a list
ext_list = [ext_list]
MY_PRINT_FUNC('(type/paste the path to the file, ".." means "go up a folder")')
MY_PRINT_FUNC('(path can be absolute, like C:/username/Documents/miku.pmx)')
MY_PRINT_FUNC('(or path can be relative to here, example: ../../mmd/models/miku.pmx)')
while True:
# continue prompting until the user gives valid input
if ext_list:
name = input(" {:s} path ending with [{:s}] = ".format(label, ", ".join(ext_list)))
valid_ext = any(name.lower().endswith(a.lower()) for a in ext_list)
if not valid_ext:
MY_PRINT_FUNC("Err: given file does not have acceptable extension")
continue
else:
# if given an empty sequence, then do not check for valid extension. accept anything.
name = input(" {:s} path = ".format(label))
if not path.isfile(name):
MY_PRINT_FUNC("Err: given name is not a file, did you type it wrong?")
abspath = path.abspath(name)
# find the point where the filepath breaks! walk up folders 1 by 1 until i find the last place where the path was valid
c = abspath
while c and not path.exists(c):
c = path.dirname(c)
whereitbreaks = (" " * len(c)) + " ^^^^"
MY_PRINT_FUNC(abspath)
MY_PRINT_FUNC(whereitbreaks)
continue
break
# it exists, so make it absolute
name = path.abspath(path.normpath(name))
# windows is case insensitive, so this doesn't matter, but to make it match the same case as the existing file:
return filepath_make_casecorrect(name)
# global variable holding a function pointer that i can overwrite with a different function pointer when in GUI mode
MY_FILEPROMPT_FUNC = prompt_user_filename
def filepath_splitdir(initial_name: str) -> Tuple[str,str]:
"""
Alias for path.split()
:param initial_name: string filepath
:return: (directories, filename)
"""
return path.split(initial_name)
def filepath_splitext(initial_name: str) -> Tuple[str,str]:
"""
Alias for path.splitext()
:param initial_name: string filepath
:return: (directories+filename, extension)
"""
return path.splitext(initial_name)
def filepath_insert_suffix(initial_name: str, suffix:str) -> str:
"""
Simple function, insert the suffix between the Basename and Extension.
:param initial_name: string filepath
:param suffix: string to append to filepath
:return: string filepath
"""
N,E = filepath_splitext(initial_name)
ret = N + suffix + E
return ret
def filepath_make_casecorrect(initial_name: str) -> str:
"""
Make the given path match the case of the file/folders on the disk.
If the path does not exist, then make it casecorrect up to the point where it no longer exists.
:param initial_name: string filepath
:return: string filepath, exactly the same as input except for letter case
"""
initial_name = path.normpath(initial_name)
# all "." are removed, all ".." are removed except for leading...
# first, break the given path into all of its segments
seglist = initial_name.split(path.sep)
if len(seglist) == 0:
raise ValueError("ERROR: input path '%s' is too short" % initial_name)
if path.isabs(initial_name):
first = seglist.pop(0) + path.sep
if path.ismount(first):
# windows absolute path! begins with a drive letter
reassemble_name = first.upper()
elif first == "":
# ???? linux ????
reassemble_name = path.sep
else:
MY_PRINT_FUNC("path is abs, but doesn't start with drive or filesep? what? '%s'" % initial_name)
reassemble_name = first
else:
# if not an absolute path, it needs to start as "." so that listdir works right (need to remove this when done tho)
reassemble_name = "."
while seglist:
nextseg = seglist.pop(0)
if nextseg == "..":
reassemble_name = path.join(reassemble_name, nextseg)
else:
try:
whats_here = listdir(reassemble_name)
except FileNotFoundError:
# fallback just in case I forgot about something
return initial_name
whats_here = [str(w) for w in whats_here]
whats_here_lower = [w.lower() for w in whats_here]
try:
# find which entry in listdir corresponds to nextseg, when both sides are lowered
idx = whats_here_lower.index(nextseg.lower())
except ValueError:
# the next segment isnt available in the listdir! the path is invalid from here on out!
# so, just join everything remaining & break out of the loop.
reassemble_name = path.join(reassemble_name, nextseg, *seglist)
break
# the next segment IS available in the listdir, so use the case-correct version of it
reassemble_name = path.join(reassemble_name, whats_here[idx])
# then, loop!
# call normpath one more time to get rid of leading ".\\" when path is relative!
reassemble_name = path.normpath(reassemble_name)
return reassemble_name
def filepath_get_unused_name(initial_name: str, checkdisk=True, namelist=None) -> str:
"""
Given a desired filepath, generate a path that is guaranteed to be unused & safe to write to.
Append integers to the end of the basename until it passes.
Often it doesn't need to append anything and returns initial_name unmodified.
:param initial_name: desired file path, absolute or relative.
:param checkdisk: default True. if true, then check uniqueness against names on disk.
:param namelist: default empty. if given, then check uniqueness against these names. list or set.
:return: same file path as initial_name, but with integers inserted until it becomes unique (if needed)
"""
# if namelist is given, check against namelist as well as what's on the disk...
# make an all-lower version of namelist
if namelist is None: namelist_lower = []
else: namelist_lower = [n.lower() for n in namelist]
basename, extension = path.splitext(initial_name)
test_name = basename + extension # first, try it without adding any numbers
for append_num in range(1, 1000):
diskpass = not (checkdisk and path.exists(test_name))
listpass = (test_name.lower() not in namelist_lower)
if diskpass and listpass:
# if this name passes the disk check (or disk check is skipped), AND it passes the list check (or list is empty),
# then this name will be kept.
return test_name
else:
# if test_name is already used, then assemle a new name that includes a number
test_name = "%s (%d)%s" % (basename, append_num, extension)
# if it hits here, it tried 999 file names and none of them worked
raise RuntimeError("ERROR: unable to find unused variation of '%s' for file-write" % initial_name)
def RUN_WITH_TRACEBACK(func: Callable, *args) -> None:
"""
Used to execute the "main" function of a script in direct-run mode.
If it runs succesfully, do a pause-and-quit afterward.
If an exception occurs, print the traceback info and do a pause-and-quit.
If it was CTRL+C aborted, do not pause-and-quit.
:param func: main-function
:param args: optional args to pass to main-function
"""
try:
MY_PRINT_FUNC("")
func(*args)
pause_and_quit("Done with everything! Goodbye!")
except (KeyboardInterrupt, SystemExit):
# this is normal and expected, do nothing and die
pass
except Exception as e:
# print an error and full traceback if an exception was received!
exc_type, exc_value, exc_traceback = sys.exc_info()
printme_list = traceback.format_exception(e.__class__, e, exc_traceback)
# now i have the complete traceback info as a list of strings, each ending with newline
MY_PRINT_FUNC("")
MY_PRINT_FUNC("".join(printme_list))
pause_and_quit("ERROR: the script did not complete succesfully.")
########################################################################################################################
# searching thru sorted lists for MASSIVE speedup
########################################################################################################################
# bisect_left and bisect_right literally just copied from the "bisect" library so I don't need to import that file
def bisect_left(a: Sequence[Any], x: Any) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in
a[i:] have e >= x. So if x already appears in the list, then i = the index
where the leftmost x can be found.
"""
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if a[mid] < x: lo = mid+1
else: hi = mid
return lo
def bisect_right(a: Sequence[Any], x: Any) -> int:
"""
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e <= x, and all e in
a[i:] have e > x. So if x already appears in the list, then i = index + 1
of the rightmost x already there.
"""
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < a[mid]: hi = mid
else: lo = mid+1
return lo
def binary_search_isin(x: Any, a: Sequence[Any]) -> bool:
"""
If x is in a, return True. Otherwise return False. a must be in ascending sorted order.
"""
pos = bisect_left(a, x) # find insertion position
return True if pos != len(a) and a[pos] == x else False # don't walk off the end
def binary_search_wherein(x: Any, a: Sequence[Any]) -> int:
"""
If x is in a, return its index. Otherwise return -1. a must be in ascending sorted order.
"""
pos = bisect_left(a, x) # find insertion position
return pos if pos != len(a) and a[pos] == x else -1 # don't walk off the end
########################################################################################################################
# simple, fundamental math operations
########################################################################################################################
def linear_map(x1: float, y1: float, x2: float, y2: float, x_in_val: float) -> float:
"""
Define a Y=MX+B slope via coords x1,y1 and x2,y2. Then given an X value, calculate the resulting Y.
:param x1: x1
:param y1: y1
:param x2: x2
:param y2: y2
:param x_in_val: any float, does not need to be constrained by x1/x2
:return: resulting Y
"""
m = (y2 - y1) / (x2 - x1)
b = y2 - (m * x2)
return x_in_val * m + b
def clamp(value: float, lower: float, upper: float) -> float:
"""
Basic clamp function: if below the floor, return floor; if above the ceiling, return ceiling; else return unchanged.
:param value: float input
:param lower: float floor
:param upper: float ceiling
:return: float within range [lower-upper]
"""
return lower if value < lower else upper if value > upper else value
def bidirectional_clamp(val: float, a: float, b: float) -> float:
"""
Clamp when you don't know the relative order of a and b.
:param val: float input
:param a: ceiling or floor
:param b: ceiling or floor
:return: float within range [lower-upper]
"""
return clamp(val, a, b) if a < b else clamp(val, b, a)
def my_dot(v0: Sequence[float], v1: Sequence[float]) -> float:
"""
Perform mathematical dot product between two same-length vectors. IE component-wise multiply, then sum.
:param v0: any number of floats
:param v1: same number of floats
:return: single float
"""
dot = 0.0
for (a, b) in zip(v0, v1):
dot += a * b
return dot
def my_euclidian_distance(x: Sequence[float]) -> float:
"""
Calculate Euclidian distance (square each component, sum, and square root).
:param x: list/tuple, any number of floats
:return: single float
"""
return math.sqrt(my_dot(x, x))
def normalize_distance(foo: Sequence[float]) -> List[float]:
"""
Normalize by Euclidian distance. Supports any number of dimensions.
:param foo: list/tuple, any number of floats
:return: list of floats
"""
LLL = my_euclidian_distance(foo)
return [t / LLL for t in foo]
def normalize_sum(foo: Sequence[float]) -> List[float]:
"""
Normalize by sum. Supports any number of dimensions.
:param foo: list/tuple, any number of floats
:return: list of floats
"""
LLL = sum(foo)
return [t / LLL for t in foo]
########################################################################################################################
# MyBezier object for bezier curve interpolation
########################################################################################################################
def _bezier_math(t: float, p1: Tuple[float, float], p2: Tuple[float, float]) -> Tuple[float, float]:
"""
Internal use only.
Use standard bezier equations, assuming p0=(0,0) and p3=(1,1) and p1/p2 are args, with a time value t, to calculate
the resulting X and Y. If X/Y of p1/p2 are within range [0-1] then output X/Y are guaranteed to also be within
[0-1].
:param t: float time value
:param p1: 2x float, coord of p1
:param p2: 2x float, coord of p2
:return: 2x float, resulting X Y coords
"""
x0, y0 = 0, 0
x1, y1 = p1
x2, y2 = p2
x3, y3 = 1, 1
x = (1 - t) ** 3 * x0 + 3 * (1 - t) ** 2 * t * x1 + 3 * (1 - t) * t ** 2 * x2 + t ** 3 * x3
y = (1 - t) ** 3 * y0 + 3 * (1 - t) ** 2 * t * y1 + 3 * (1 - t) * t ** 2 * y2 + t ** 3 * y3
return x, y
class MyBezier(object):
def __init__(self, p1: Tuple[int,int], p2: Tuple[int,int], resolution=50) -> None:
"""
This implements a linear approximation of a constrained Bezier curve for motion interpolation. After defining
the control points, Y values can be easily generated from X values using self.approximate(x).
:param p1: 2x int range [0-128], XY coordinates of control point
:param p2: 2x int range [0-128], XY coordinates of control point
:param resolution: int, number of points in the linear approximation of the bezier curve
"""
# first convert tuple(int [0-128]) to tuple(float [0.0-1.0])
point1 = (clamp(p1[0] / 128, 0.0, 1.0), clamp(p1[1] / 128, 0.0, 1.0))
point2 = (clamp(p2[0] / 128, 0.0, 1.0), clamp(p2[1] / 128, 0.0, 1.0))
retlist = [(0.0, 0.0)] # curve always starts at 0,0
# use bezier math to create a list of XY points along the actual bezier curve, evenly spaced in t=time
# both x-coords and y-coords are strictly increasing, but not evenly spaced
for i in range(1, resolution):
retlist.append(_bezier_math(i / resolution, point1, point2))
retlist.append((1.0, 1.0)) # curve always ends at 1,1
self.resolution = resolution # store resolution param
xx, yy = zip(*retlist) # unzip
self.xx = list(xx)
self.yy = list(yy)
def approximate(self, x: float) -> float:
"""
In a constrained bezier curve, X and Y have a perfect one-to-one correspondance, but the math makes it
incredibly difficult to exactly calculate a Y given an X. So, approximate it via a series of precalculated line
segments.
:param x: float input x [0.0-1.0]
:return: float output y [0.0-1.0]
"""
x = clamp(x, 0.0, 1.0)
# first take care of the corner cases, i.e. the cases I already know the answers to:
if x == 1.0: return 1.0
elif x == 0.0: return 0.0
else:
# use binary search to find pos, the idx of the entry in self.xx which is <= x
# if xx[3] < x < xx[4], then pos=4. so the segment starts at pos-1 and ends at pos.
pos = bisect_left(self.xx, x)
# use pos-1 and pos to get two xy points, to build a line segment, to perform linear approximation
return linear_map(self.xx[pos-1], self.yy[pos-1],
self.xx[pos], self.yy[pos],
x)
########################################################################################################################
# advanced geometric math functions
########################################################################################################################
def my_projection(x: Sequence[float], y: Sequence[float]) -> Tuple[float,float,float]:
"""
Project 3D vector X onto vector Y, i.e. the component of X that is parallel with Y.
:param x: 3x float X Y Z
:param y: 3x float X Y Z
:return: 3x float X Y Z
"""
# project x onto y: y * (my_dot(x, y) / my_dot(y, y))
scal = my_dot(x, y) / my_dot(y, y)
# out = tuple(y_ * scal for y_ in y)
return y[0]*scal, y[1]*scal, y[2]*scal
def my_cross_product(a: Sequence[float], b: Sequence[float]) -> Tuple[float,float,float]:
"""
Perform mathematical cross product between two 3D vectors.
:param a: 3x float
:param b: 3x float
:return: 3x float
"""
return a[1]*b[2] - a[2]*b[1],\
a[2]*b[0] - a[0]*b[2],\
a[0]*b[1] - a[1]*b[0]
def my_quat_conjugate(q: Sequence[float]) -> Tuple[float,float,float,float]:
"""
"invert" or "reverse" or "conjugate" a quaternion by negating the x/y/z components.
:param q: 4x float, W X Y Z quaternion
:return: 4x float, W X Y Z quaternion
"""
return q[0], -q[1], -q[2], -q[3]
def my_slerp(v0: Sequence[float], v1: Sequence[float], t: float) -> Tuple[float,float,float,float]:
"""
Spherically Linear intERPolates between quat1 and quat2 by t.
The param t will normally be clamped to the range [0, 1]. However, negative values or greater than 1 will still
work.
If t==0, return v0. If t==1, return v1.
:param v0: 4x float, W X Y Z quaternion
:param v1: 4x float, W X Y Z quaternion
:param t: float [0,1] how far to interpolate
:return: 4x float, W X Y Z quaternion
"""
# https://stackoverflow.com/questions/44706591/how-to-test-quaternion-slerp
# fuck this guy his code is mostly wrong, except for the quaternion flipping bit thats clever
# https://www.mathworks.com/help/fusion/ref/quaternion.slerp.html#mw_0419144b-0e16-4d56-b5d7-19783b790e4b
# this algorithm works tho
if math.isclose(t, 0.0, abs_tol=1e-6):
return v0
if math.isclose(t, 1.0, abs_tol=1e-6):
return v1
# If the dot product is negative, the quaternions
# have opposite handed-ness and slerp won't take
# the shorter path. Fix by reversing one quaternion.
dot = my_dot(v0, v1)
if dot < 0.0:
v1 = [-v for v in v1]
dot = -dot
# q0not = my_quat_conjugate(q0)
# a = hamilton_product(q1, q0not)
# a = normalize_distance(a)
# b = quat_pow(a, t)
# c = hamilton_product(b, q0)
# return c
# clamp just to be safe
dot = clamp(dot, -1.0, 1.0)
theta = math.acos(dot)
if theta == 0:
# if there is no angle between the two quaternions, then interpolation is pointless
return v0[0], v0[1], v0[2], v0[3]
# q1 * sin((1-t) * theta) / sin(theta) + q2 * sin(t * theta) / sin(theta)
factor0 = math.sin((1 - t) * theta) / math.sin(theta)
factor1 = math.sin(t * theta) / math.sin(theta)
res = tuple((v0[i] * factor0) + (v1[i] * factor1) for i in range(4))
return res[0], res[1], res[2], res[3]
# https://en.wikipedia.org/wiki/Quaternion#Exponential,_logarithm,_and_power_functions
# https://math.stackexchange.com/questions/939229/unit-quaternion-to-a-scalar-power
# wikipedia is always good, this stackexchange thing is a bit hard to parse
def quat_ln(_q: Tuple[float, float, float, float]) -> Tuple[float, float, float, float]:
vm = my_euclidian_distance(_q[1:4])
qm = my_euclidian_distance(_q)
tt = (math.acos(_q[0] / qm) / vm) if (vm > 1e-9) else 0.0
w = math.log(qm)
return w, _q[1] * tt, _q[2] * tt, _q[3] * tt
def quat_exp(_q: Tuple[float, float, float, float]) -> Tuple[float, float, float, float]:
r = my_euclidian_distance(_q[1:4])
et = math.exp(_q[0])
s = (et * math.sin(r) / r) if (r > 1e-9) else 0.0
w = et * math.cos(r)
return w, _q[1] * s, _q[2] * s, _q[3] * s
def quat_pow(_q: Tuple[float, float, float, float], _n: float) -> Tuple[float, float, float, float]:
aa = quat_ln(_q) # pycharm type checker can go to hell
bb = tuple(_n * i for i in aa)
cc = quat_exp(bb) # pycharm type checker can go to hell
return cc
'''
# code block to validate the SLERP code via 3d plotting
original_point = [1, 0, 0]
t_list = [i/20 for i in range(20)]
while True:
R = [random.randint(-170, 170) for _ in range(6)]
euler1 = R[0:3]
euler2 = R[3:6]
print(euler1, euler2)
quat1 = core.euler_to_quaternion(euler1)
quat2 = core.euler_to_quaternion(euler2)
point_list = []
point_list_new = []
for t in t_list:
rot = core.my_slerp(quat1, quat2, t) # old slerp
newpoint = core.rotate3d((0,0,0), rot, original_point)
point_list.append(newpoint)
rot = core.new_slerp(quat1, quat2, t) # new slerp
newpoint = core.rotate3d((0, 0, 0), rot, original_point)
point_list_new.append(newpoint)
# now graph them
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x,y,z = zip(*point_list)
ax.scatter(x,y,z, label="old")
x,y,z = zip(*point_list_new)
ax.scatter(x,y,z, label="new")
ax.scatter(0,0,0, label="origin") # plot the origin too
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.set_zlim(-1, 1)
STARTPOINT = core.rotate3d((0,0,0), quat1, original_point)
ENDPOINT = core.rotate3d((0,0,0), quat2, original_point)
ax.scatter(*STARTPOINT, marker='x', label='START')
ax.scatter(*ENDPOINT, marker='x', label='END')
ax.legend()
plt.show(block=True)
'''
def hamilton_product(quat1: Sequence[float], quat2: Sequence[float]) -> Tuple[float,float,float,float]:
"""
Perform the mathematical "hamilton product", effectively adds two quaternions. However the order of the inputs does matter.
Returns the equivalent of rotation quat2 followed by rotation quat1.
Result is another quaternion.
:param quat1: 4x float, W X Y Z quaternion
:param quat2: 4x float, W X Y Z quaternion
:return: 4x float, W X Y Z quaternion
"""
# thank you stackexchange and thank you wikipedia
(a1, b1, c1, d1) = quat1
(a2, b2, c2, d2) = quat2
a3 = (a1 * a2) - (b1 * b2) - (c1 * c2) - (d1 * d2)
b3 = (a1 * b2) + (b1 * a2) + (c1 * d2) - (d1 * c2)
c3 = (a1 * c2) - (b1 * d2) + (c1 * a2) + (d1 * b2)
d3 = (a1 * d2) + (b1 * c2) - (c1 * b2) + (d1 * a2)
return a3, b3, c3, d3
# def pure_euler_to_quaternion(euler):
# # THIS IS THE PURE MATH-ONLY TRANSFORM WITHOUT ANY OF THE MMD SPECIAL CASE COMPENSATION
# # angles are in radians
# # this logic copied from wikipedia: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
# (roll, pitch, yaw) = euler
#
# # r=x, p=y, y=z
#
# # roll (X), pitch (Y), yaw (Z)
# sr = math.sin(roll * 0.5)
# sp = math.sin(pitch * 0.5)
# sy = math.sin(yaw * 0.5)
# cr = math.cos(roll * 0.5)
# cp = math.cos(pitch * 0.5)
# cy = math.cos(yaw * 0.5)
#
# w = (cy * cp * cr) + (sy * sp * sr)
# x = (cy * cp * sr) - (sy * sp * cr)
# y = (sy * cp * sr) + (cy * sp * cr)
# z = (sy * cp * cr) - (cy * sp * sr)
#
# return [w, x, y, z]
#
# def pure_quaternion_to_euler(quaternion):
# # THIS IS THE PURE MATH-ONLY TRANSFORM WITHOUT ANY OF THE MMD SPECIAL CASE COMPENSATION
# # angles are in radians
# # this logic copied from wikipedia: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles
# (w, x, y, z) = quaternion
#
# # roll (x-axis2 rotation)
# sinr_cosp = 2 * ((w * x) + (y * z))
# cosr_cosp = 1 - (2 * ((x ** 2) + (y ** 2)))
# roll = math.atan2(sinr_cosp, cosr_cosp)
#
# # pitch (y-axis2 rotation)
# sinp = 2 * ((w * y) - (z * x))
# if sinp >= 1.0:
# pitch = math.pi / 2 # use 90 degrees if out of range
# elif sinp <= -1.0:
# pitch = -math.pi / 2
# else:
# pitch = math.asin(sinp)
#
# # yaw (z-axis2 rotation)
# siny_cosp = 2 * ((w * z) + (x * y))
# cosy_cosp = 1 - (2 * ((y ** 2) + (z ** 2)))
# yaw = math.atan2(siny_cosp, cosy_cosp)
#
# return [roll, pitch, yaw]
def euler_to_quaternion(euler: Sequence[float]) -> Tuple[float,float,float,float]:
"""
Convert XYZ euler angles to WXYZ quaternion, using the same method as MikuMikuDance.
Massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
:param euler: 3x float, X Y Z angle in degrees
:return: 4x float, W X Y Z quaternion
"""
# massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
# angles are in degrees, must convert to radians
roll, pitch, yaw = euler
roll = math.radians(roll)
pitch = math.radians(pitch)
yaw = math.radians(yaw)
# roll (X), pitch (Y), yaw (Z)
sx = math.sin(roll * 0.5)
sy = math.sin(pitch * 0.5)
sz = math.sin(yaw * 0.5)
cx = math.cos(roll * 0.5)
cy = math.cos(pitch * 0.5)
cz = math.cos(yaw * 0.5)
w = (cz * cy * cx) + (sz * sy * sx)
x = (cz * cy * sx) + (sz * sy * cx)
y = (sz * cy * sx) - (cz * sy * cx)
z = (cz * sy * sx) - (sz * cy * cx)
return w, x, y, z
def quaternion_to_euler(quat: Sequence[float]) -> Tuple[float,float,float]:
"""
Convert WXYZ quaternion to XYZ euler angles, using the same method as MikuMikuDance.
Massive thanks and credit to "Isometric" for helping me discover the transformation method used in mmd!!!!
:param quat: 4x float, W X Y Z quaternion
:return: 3x float, X Y Z angle in degrees
"""
w, x, y, z = quat
# pitch (y-axis rotation)
sinr_cosp = 2 * ((w * y) + (x * z))
cosr_cosp = 1 - (2 * ((x ** 2) + (y ** 2)))
pitch = -math.atan2(sinr_cosp, cosr_cosp)
# yaw (z-axis rotation)
siny_cosp = 2 * ((-w * z) - (x * y))
cosy_cosp = 1 - (2 * ((x ** 2) + (z ** 2)))
yaw = math.atan2(siny_cosp, cosy_cosp)
# roll (x-axis rotation)
sinp = 2 * ((z * y) - (w * x))
if sinp >= 1.0:
roll = -math.pi / 2 # use 90 degrees if out of range
elif sinp <= -1.0:
roll = math.pi / 2
else:
roll = -math.asin(sinp)
# fixing the x rotation, part 1
if x ** 2 > 0.5 or w < 0:
if x < 0:
roll = -math.pi - roll
else:
roll = math.pi * math.copysign(1, w) - roll
# fixing the x rotation, part 2
if roll > (math.pi / 2):
roll = math.pi - roll
elif roll < -(math.pi / 2):
roll = -math.pi - roll
roll = math.degrees(roll)
pitch = math.degrees(pitch)
yaw = math.degrees(yaw)
return roll, pitch, yaw
def rotate3d(rotate_around: Sequence[float],
angle_quat: Sequence[float],
initial_position: Sequence[float]) -> List[float]:
"""
Rotate a point within 3d space around another specified point by a specific quaternion angle.
:param rotate_around: X Y Z usually a bone location
:param angle_quat: W X Y Z quaternion rotation to apply
:param initial_position: X Y Z starting location of the point to be rotated
:return: X Y Z position after rotating
"""
# "rotate around a point in 3d space"
# subtract "origin" to move the whole system to rotating around 0,0,0
point = [p - o for p, o in zip(initial_position, rotate_around)]
# might need to scale the point down to unit-length???
# i'll do it just to be safe, it couldn't hurt
length = my_euclidian_distance(point)
if length != 0:
point = [p / length for p in point]
# set up the math as instructed by math.stackexchange
p_vect = [0.0] + point
r_prime_vect = my_quat_conjugate(angle_quat)
# r_prime_vect = [angle_quat[0], -angle_quat[1], -angle_quat[2], -angle_quat[3]]
# P' = R * P * R'
# P' = H( H(R,P), R')
temp = hamilton_product(angle_quat, p_vect)
p_prime_vect = hamilton_product(temp, r_prime_vect)
# note that the first element of P' will always be 0
point = p_prime_vect[1:4]
# might need to undo scaling the point down to unit-length???
point = [p * length for p in point]
# re-add "origin" to move the system to where it should have been
point = [p + o for p, o in zip(point, rotate_around)]
return point
def rotate2d(origin: Sequence[float], angle: float, point: Sequence[float]) -> Tuple[float,float]:
"""
Rotate a 2d point counterclockwise by a given angle around a given 2d origin.
The angle should be given in radians.
:param origin: 2x float X Y, rotate-around point
:param angle: float, radians to rotate
:param point: 2x float X Y, point-that-will-be-rotated
:return: 2x float X Y, point after rotation
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
if __name__ == '__main__':
print(_SCRIPT_VERSION)
pause_and_quit("you are not supposed to directly run this file haha")
| 38.605866 | 130 | 0.661459 |
4a269a1abab64bcb3bf3a4375a0f5d06bd659b21 | 12,943 | py | Python | dipy/sims/tests/test_voxel.py | omarocegueda/dipy | 520b724041116a958892bee0068b057314a21cb2 | [
"MIT"
] | 2 | 2018-07-25T14:04:20.000Z | 2021-02-10T07:10:10.000Z | dipy/sims/tests/test_voxel.py | aarya22/dipy-reco1 | 9d20c911b4afe83e52ded698eff9ba0f0fafeca8 | [
"MIT"
] | null | null | null | dipy/sims/tests/test_voxel.py | aarya22/dipy-reco1 | 9d20c911b4afe83e52ded698eff9ba0f0fafeca8 | [
"MIT"
] | 2 | 2018-07-24T21:20:54.000Z | 2018-08-27T04:08:24.000Z | import numpy as np
from nose.tools import (assert_true, assert_false, assert_equal,
assert_almost_equal)
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_)
from dipy.sims.voxel import (_check_directions, SingleTensor, MultiTensor,
multi_tensor_odf, all_tensor_evecs, add_noise,
single_tensor, sticks_and_ball, multi_tensor_dki,
kurtosis_element, dki_signal)
from dipy.core.geometry import (vec2vec_rotmat, sphere2cart)
from dipy.data import get_data, get_sphere
from dipy.core.gradients import gradient_table
from dipy.io.gradients import read_bvals_bvecs
fimg, fbvals, fbvecs = get_data('small_64D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
# 2 shells for techniques that requires multishell data
bvals_2s = np.concatenate((bvals, bvals * 2), axis=0)
bvecs_2s = np.concatenate((bvecs, bvecs), axis=0)
gtab_2s = gradient_table(bvals_2s, bvecs_2s)
def diff2eigenvectors(dx, dy, dz):
""" numerical derivatives 2 eigenvectors
"""
u = np.array([dx, dy, dz])
u = u / np.linalg.norm(u)
R = vec2vec_rotmat(basis[:, 0], u)
eig0 = u
eig1 = np.dot(R, basis[:, 1])
eig2 = np.dot(R, basis[:, 2])
eigs = np.zeros((3, 3))
eigs[:, 0] = eig0
eigs[:, 1] = eig1
eigs[:, 2] = eig2
return eigs, R
def test_check_directions():
# Testing spherical angles for two principal coordinate axis
angles = [(0, 0)] # axis z
sticks = _check_directions(angles)
assert_array_almost_equal(sticks, [[0, 0, 1]])
angles = [(0, 90)] # axis z again (phi can be anything it theta is zero)
sticks = _check_directions(angles)
assert_array_almost_equal(sticks, [[0, 0, 1]])
angles = [(90, 0)] # axis x
sticks = _check_directions(angles)
assert_array_almost_equal(sticks, [[1, 0, 0]])
# Testing if directions are already given in cartesian coordinates
angles = [(0, 0, 1)]
sticks = _check_directions(angles)
assert_array_almost_equal(sticks, [[0, 0, 1]])
# Testing more than one direction simultaneously
angles = np.array([[90, 0], [30, 0]])
sticks = _check_directions(angles)
ref_vec = [np.sin(np.pi*30/180), 0, np.cos(np.pi*30/180)]
assert_array_almost_equal(sticks, [[1, 0, 0], ref_vec])
# Testing directions not aligned to planes x = 0, y = 0, or z = 0
the1 = 0
phi1 = 90
the2 = 30
phi2 = 45
angles = np.array([(the1, phi1), (the2, phi2)])
sticks = _check_directions(angles)
ref_vec1 = (np.sin(np.pi*the1/180) * np.cos(np.pi*phi1/180),
np.sin(np.pi*the1/180) * np.sin(np.pi*phi1/180),
np.cos(np.pi*the1/180))
ref_vec2 = (np.sin(np.pi*the2/180) * np.cos(np.pi*phi2/180),
np.sin(np.pi*the2/180) * np.sin(np.pi*phi2/180),
np.cos(np.pi*the2/180))
assert_array_almost_equal(sticks, [ref_vec1, ref_vec2])
def test_sticks_and_ball():
d = 0.0015
S, sticks = sticks_and_ball(gtab, d=d, S0=1, angles=[(0, 0), ],
fractions=[100], snr=None)
assert_array_equal(sticks, [[0, 0, 1]])
S_st = SingleTensor(gtab, 1, evals=[d, 0, 0], evecs=[[0, 0, 0],
[0, 0, 0],
[1, 0, 0]])
assert_array_almost_equal(S, S_st)
def test_single_tensor():
evals = np.array([1.4, .35, .35]) * 10 ** (-3)
evecs = np.eye(3)
S = SingleTensor(gtab, 100, evals, evecs, snr=None)
assert_array_almost_equal(S[gtab.b0s_mask], 100)
assert_(np.mean(S[~gtab.b0s_mask]) < 100)
from dipy.reconst.dti import TensorModel
m = TensorModel(gtab)
t = m.fit(S)
assert_array_almost_equal(t.fa, 0.707, decimal=3)
def test_multi_tensor():
sphere = get_sphere('symmetric724')
vertices = sphere.vertices
mevals = np.array(([0.0015, 0.0003, 0.0003],
[0.0015, 0.0003, 0.0003]))
e0 = np.array([np.sqrt(2) / 2., np.sqrt(2) / 2., 0])
e1 = np.array([0, np.sqrt(2) / 2., np.sqrt(2) / 2.])
mevecs = [all_tensor_evecs(e0), all_tensor_evecs(e1)]
# odf = multi_tensor_odf(vertices, [0.5, 0.5], mevals, mevecs)
# assert_(odf.shape == (len(vertices),))
# assert_(np.all(odf <= 1) & np.all(odf >= 0))
fimg, fbvals, fbvecs = get_data('small_101D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gtab = gradient_table(bvals, bvecs)
s1 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
s2 = single_tensor(gtab, 100, mevals[1], mevecs[1], snr=None)
Ssingle = 0.5*s1 + 0.5*s2
S, sticks = MultiTensor(gtab, mevals, S0=100, angles=[(90, 45), (45, 90)],
fractions=[50, 50], snr=None)
assert_array_almost_equal(S, Ssingle)
def test_snr():
np.random.seed(1978)
s = single_tensor(gtab)
# For reasonably large SNR, var(signal) ~= sigma**2, where sigma = 1/SNR
for snr in [5, 10, 20]:
sigma = 1.0 / snr
for j in range(1000):
s_noise = add_noise(s, snr, 1, noise_type='rician')
assert_array_almost_equal(np.var(s_noise - s), sigma ** 2, decimal=2)
def test_all_tensor_evecs():
e0 = np.array([1/np.sqrt(2), 1/np.sqrt(2), 0])
# Vectors are returned column-wise!
desired = np.array([[1/np.sqrt(2), 1/np.sqrt(2), 0],
[-1/np.sqrt(2), 1/np.sqrt(2), 0],
[0, 0, 1]]).T
assert_array_almost_equal(all_tensor_evecs(e0), desired)
def test_kurtosis_elements():
""" Testing symmetry of the elements of the KT
As an 4th order tensor, KT has 81 elements. However, due to diffusion
symmetry the KT is fully characterized by 15 independent elements. This
test checks for this property.
"""
# two fiber not aligned to planes x = 0, y = 0, or z = 0
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
angles = [(80, 10), (80, 10), (20, 30), (20, 30)]
fie = 0.49 # intra axonal water fraction
frac = [fie * 50, (1-fie) * 50, fie * 50, (1-fie) * 50]
sticks = _check_directions(angles)
mD = np.zeros((len(frac), 3, 3))
for i in range(len(frac)):
R = all_tensor_evecs(sticks[i])
mD[i] = np.dot(np.dot(R, np.diag(mevals[i])), R.T)
# compute global DT
D = np.zeros((3, 3))
for i in range(len(frac)):
D = D + frac[i]*mD[i]
# compute voxel's MD
MD = (D[0][0] + D[1][1] + D[2][2]) / 3
# Reference dictionary with the 15 independent elements.
# Note: The multiplication of the indexes (i+1) * (j+1) * (k+1) * (l+1)
# for of an elements is only equal to this multiplication for another
# element if an only if the element corresponds to an symmetry element.
# Thus indexes multiplication is used as key of the reference dictionary
kt_ref = {1: kurtosis_element(mD, frac, 0, 0, 0, 0),
16: kurtosis_element(mD, frac, 1, 1, 1, 1),
81: kurtosis_element(mD, frac, 2, 2, 2, 2),
2: kurtosis_element(mD, frac, 0, 0, 0, 1),
3: kurtosis_element(mD, frac, 0, 0, 0, 2),
8: kurtosis_element(mD, frac, 0, 1, 1, 1),
24: kurtosis_element(mD, frac, 1, 1, 1, 2),
27: kurtosis_element(mD, frac, 0, 2, 2, 2),
54: kurtosis_element(mD, frac, 1, 2, 2, 2),
4: kurtosis_element(mD, frac, 0, 0, 1, 1),
9: kurtosis_element(mD, frac, 0, 0, 2, 2),
36: kurtosis_element(mD, frac, 1, 1, 2, 2),
6: kurtosis_element(mD, frac, 0, 0, 1, 2),
12: kurtosis_element(mD, frac, 0, 1, 1, 2),
18: kurtosis_element(mD, frac, 0, 1, 2, 2)}
# Testing all 81 possible elements
xyz = [0, 1, 2]
for i in xyz:
for j in xyz:
for k in xyz:
for l in xyz:
key = (i+1) * (j+1) * (k+1) * (l+1)
assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l),
kt_ref[key])
# Testing optional funtion inputs
assert_almost_equal(kurtosis_element(mD, frac, i, k, j, l),
kurtosis_element(mD, frac, i, k, j, l,
D, MD))
def test_DKI_simulations_aligned_fibers():
"""
Testing DKI simulations when aligning the same fiber to different axis.
If biological parameters don't change, kt[0] of a fiber aligned to axis x
has to be equal to kt[1] of a fiber aligned to the axis y and equal to
kt[2] of a fiber aligned to axis z. The same is applicable for dt
"""
# Defining parameters based on Neto Henriques et al., 2015. NeuroImage 111
mevals = np.array([[0.00099, 0, 0], # Intra-cellular
[0.00226, 0.00087, 0.00087]]) # Extra-cellular
frac = [49, 51] # Compartment volume fraction
# axis x
angles = [(90, 0), (90, 0)]
signal_fx, dt_fx, kt_fx = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac)
# axis y
angles = [(90, 90), (90, 90)]
signal_fy, dt_fy, kt_fy = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac)
# axis z
angles = [(0, 0), (0, 0)]
signal_fz, dt_fz, kt_fz = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac)
assert_array_equal([kt_fx[0], kt_fx[1], kt_fx[2]],
[kt_fy[1], kt_fy[0], kt_fy[2]])
assert_array_equal([kt_fx[0], kt_fx[1], kt_fx[2]],
[kt_fz[2], kt_fz[0], kt_fz[1]])
assert_array_equal([dt_fx[0], dt_fx[2], dt_fx[5]],
[dt_fy[2], dt_fy[0], dt_fy[5]])
assert_array_equal([dt_fx[0], dt_fx[2], dt_fx[5]],
[dt_fz[5], dt_fz[0], dt_fz[2]])
# testing S signal along axis x, y and z
bvals = np.array([0, 0, 0, 1000, 1000, 1000, 2000, 2000, 2000])
bvecs = np.asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1]])
gtab_axis = gradient_table(bvals, bvecs)
# axis x
S_fx = dki_signal(gtab_axis, dt_fx, kt_fx, S0=100)
assert_array_almost_equal(S_fx[0:3], [100, 100, 100]) # test S f0r b=0
# axis y
S_fy = dki_signal(gtab_axis, dt_fy, kt_fy, S0=100)
assert_array_almost_equal(S_fy[0:3], [100, 100, 100]) # test S f0r b=0
# axis z
S_fz = dki_signal(gtab_axis, dt_fz, kt_fz, S0=100)
assert_array_almost_equal(S_fz[0:3], [100, 100, 100]) # test S f0r b=0
# test S for b = 1000
assert_array_almost_equal([S_fx[3], S_fx[4], S_fx[5]],
[S_fy[4], S_fy[3], S_fy[5]])
assert_array_almost_equal([S_fx[3], S_fx[4], S_fx[5]],
[S_fz[5], S_fz[3], S_fz[4]])
# test S for b = 2000
assert_array_almost_equal([S_fx[6], S_fx[7], S_fx[8]],
[S_fy[7], S_fy[6], S_fy[8]])
assert_array_almost_equal([S_fx[6], S_fx[7], S_fx[8]],
[S_fz[8], S_fz[6], S_fz[7]])
def test_DKI_crossing_fibers_simulations():
""" Testing DKI simulations of a crossing fiber
"""
# two fiber not aligned to planes x = 0, y = 0, or z = 0
mevals = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087],
[0.00099, 0, 0], [0.00226, 0.00087, 0.00087]])
angles = [(80, 10), (80, 10), (20, 30), (20, 30)]
fie = 0.49
frac = [fie*50, (1 - fie)*50, fie*50, (1 - fie)*50]
signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
fractions=frac, snr=None)
# in this simulations dt and kt cannot have zero elements
for i in range(len(dt)):
assert dt[i] != 0
for i in range(len(kt)):
assert kt[i] != 0
# test S, dt and kt relative to the expected values computed from another
# DKI package - UDKI (Neto Henriques et al., 2015)
dt_ref = [1.0576161e-3, 0.1292542e-3, 0.4786179e-3,
0.2667081e-3, 0.1136643e-3, 0.9888660e-3]
kt_ref = [2.3529944, 0.8226448, 2.3011221, 0.2017312, -0.0437535,
0.0404011, 0.0355281, 0.2449859, 0.2157668, 0.3495910,
0.0413366, 0.3461519, -0.0537046, 0.0133414, -0.017441]
assert_array_almost_equal(dt, dt_ref)
assert_array_almost_equal(kt, kt_ref)
assert_array_almost_equal(signal,
dki_signal(gtab_2s, dt_ref, kt_ref, S0=1.,
snr=None),
decimal=5)
if __name__ == "__main__":
test_multi_tensor()
| 40.701258 | 79 | 0.564166 |
4a269a5496e8a0689b293858ea9c4958c00d883a | 4,788 | py | Python | src/bintray_client.py | hmrc/bintray-backup-restore | 26b4744fcbd736fea8c318fe36ce2c11e80bce04 | [
"Apache-2.0"
] | 1 | 2021-01-28T11:10:53.000Z | 2021-01-28T11:10:53.000Z | src/bintray_client.py | hmrc/bintray-backup-restore | 26b4744fcbd736fea8c318fe36ce2c11e80bce04 | [
"Apache-2.0"
] | null | null | null | src/bintray_client.py | hmrc/bintray-backup-restore | 26b4744fcbd736fea8c318fe36ce2c11e80bce04 | [
"Apache-2.0"
] | 1 | 2021-04-10T23:38:50.000Z | 2021-04-10T23:38:50.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import os
import requests
from progress.bar import IncrementalBar
from tenacity import retry
PROGRESS_BAR_FORMAT = "%(percent).1f%% - remaining %(remaining)d - eta %(eta)ds"
def get_sha1_hash(path):
pathstr = str(path)
stream = os.popen(f"sha1sum {pathstr}")
return stream.read().split(" ")[0]
class BintrayClient:
def __init__(self, organisation, api_creds):
self.api_creds = api_creds
self.organisation = organisation
def get_repository_names(self):
response = requests.get(
f"https://bintray.com/api/v1/repos/{self.organisation}/",
auth=self.api_creds,
)
response.raise_for_status()
repository_names = map(lambda repository: repository["name"], response.json())
return list(repository_names)
def get_package_names(self, repository):
discovered_packages = []
packages_api = f"https://bintray.com/api/v1/repos/{self.organisation}/{repository}/packages?start_pos="
start_pos = 0
while True:
print(f"getting package names: {start_pos}")
response = requests.get(f"{packages_api}{start_pos}", auth=self.api_creds)
response.raise_for_status()
discovered_packages.extend([package["name"] for package in response.json()])
# print(response.headers)
if "X-RangeLimit-EndPos" not in response.headers:
break
if int(response.headers["X-RangeLimit-EndPos"]) + 1 == int(
response.headers["X-RangeLimit-Total"]
):
break
start_pos = int(response.headers["X-RangeLimit-EndPos"]) + 1
return discovered_packages
def get_package_information(self, package_name, repository):
response = requests.get(
f"https://bintray.com/api/v1/packages/{self.organisation}/{repository}/{package_name}",
auth=self.api_creds,
)
response.raise_for_status()
return response.json()
def write_package_metadata(self, package_information, file_path):
with open(f"{file_path}/package_metadata.json", "w") as pm:
json.dump(package_information, pm)
def get_package_files(self, repository, package_name):
files_response = requests.get(
f"https://bintray.com/api/v1/packages/{self.organisation}/{repository}/{package_name}/files",
auth=self.api_creds,
)
files_response.raise_for_status()
return files_response.json()
@retry
def download_file(self, path, url):
with requests.get(url, auth=self.api_creds) as r:
r.raise_for_status()
with path.open(mode="wb") as f:
f.write(r.content)
@retry
def upload_file(self, path):
response = requests.put(
f"https://bintray.com/api/v1/content/{self.organisation}/{path}?publish=1&override=1",
auth=self.api_creds,
data=path.read_bytes(),
)
response.raise_for_status()
def get_metadata(self, repositories):
all_files = []
package_metadata = []
for repository in repositories:
package_names = self.get_package_names(repository)
for package in IncrementalBar(
f"Downloading '{repository}' package information",
suffix=PROGRESS_BAR_FORMAT,
).iter(package_names):
package_metadata.append(
self.get_package_information(package, repository)
)
all_files.extend(self.get_package_files(repository, package))
return all_files, package_metadata
def create_package(self, repository, local_metadata):
metadata = {
"name": local_metadata["name"],
"licenses": ["Apache-2.0"],
"vcs_url": local_metadata.get("vcs_url", "https://github.com/hmrc"),
"desc": local_metadata.get("desc"),
"labels": local_metadata.get("labels"),
"website_url": local_metadata.get("website_url"),
"issue_tracker_url": local_metadata.get("issue_tracker_url"),
"github_repo": local_metadata.get("github_repo"),
"github_release_notes_file": local_metadata.get(
"github_release_notes_file"
),
}
if metadata["vcs_url"] is None:
metadata["vcs_url"] = "https://github.com/hmrc"
package_response = requests.post(
f"https://bintray.com/api/v1/packages/{self.organisation}/{repository}",
auth=self.api_creds,
json=metadata,
)
package_response.raise_for_status()
return package_response.json()
| 38 | 111 | 0.615706 |
4a269a563cb050c243c7e242ae673c279282d512 | 1,027 | py | Python | test/001/test.py | NSSAC/nssacPreCommitHook | ff27364687c8d89d2d1d8fbe982bf67d851e02fc | [
"Apache-2.0"
] | null | null | null | test/001/test.py | NSSAC/nssacPreCommitHook | ff27364687c8d89d2d1d8fbe982bf67d851e02fc | [
"Apache-2.0"
] | null | null | null | test/001/test.py | NSSAC/nssacPreCommitHook | ff27364687c8d89d2d1d8fbe982bf67d851e02fc | [
"Apache-2.0"
] | null | null | null | # BEGIN: Copyright
# Copyright (C) 2019 Rector and Visitors of the University of Virginia
# All rights reserved
# END: Copyright
# BEGIN: License
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# END: License
from nssacPreCommitHook.header import Header
from nssacPreCommitHook.configuration import Configuration
from nssacPreCommitHook.git import Git
configuration = Configuration().loadJsonFile("test/example.json")
git = Git(repo_path="/home/shoops/git/COPASI")
header = Header(git, configuration["copyright"], configuration["license"] if "license" in configuration else None)
# header.updateHeader("/home/shoops/git/COPASI/copasi/model/CModel.cpp", commentStart="//", mode="actual")
header.updateHeader("/home/shoops/git/COPASI/copasi/xml/CopasiML.rng", commentStart="<!--", commentEnd="-->", prolog=[{"end": ">"}], mode="actual")
| 38.037037 | 147 | 0.745862 |
4a269ba6571baf473a81fefc9a02ab122543edbd | 552 | py | Python | cli/src/commands/run_config.py | mikiec84/docsearch-scraper | 08aa90ee9bf91b3e5e3e0d383e4d1b4d284c56b8 | [
"MIT"
] | 1 | 2020-11-09T21:06:27.000Z | 2020-11-09T21:06:27.000Z | cli/src/commands/run_config.py | mikiec84/docsearch-scraper | 08aa90ee9bf91b3e5e3e0d383e4d1b4d284c56b8 | [
"MIT"
] | 4 | 2021-03-31T19:59:26.000Z | 2022-03-02T15:02:55.000Z | cli/src/commands/run_config.py | gaybro8777/docsearch-scraper | 08aa90ee9bf91b3e5e3e0d383e4d1b4d284c56b8 | [
"MIT"
] | null | null | null | from scraper.src.index import run_config
from .abstract_command import AbstractCommand
class RunConfig(AbstractCommand):
def get_name(self):
return 'run'
def get_description(self):
return 'Run a config'
def get_usage(self):
return super(RunConfig, self).get_usage() + " config"
def get_options(self):
return [{"name": "config", "description": "path to the config to run"}]
def run(self, args):
self.check_not_docsearch_app_id('run a config manually')
return run_config(args[0])
| 26.285714 | 79 | 0.67029 |
4a269bd22e26504b747eda8f1c55cc828051a84a | 1,421 | py | Python | bin/app_setup.py | openstacker/splunk_app_catalyst_cloud | 6d0302aa7bc6be2192a757abcbc350b25a506848 | [
"Apache-1.1"
] | null | null | null | bin/app_setup.py | openstacker/splunk_app_catalyst_cloud | 6d0302aa7bc6be2192a757abcbc350b25a506848 | [
"Apache-1.1"
] | null | null | null | bin/app_setup.py | openstacker/splunk_app_catalyst_cloud | 6d0302aa7bc6be2192a757abcbc350b25a506848 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2017 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import splunk.admin as admin
class ConfigApp(admin.MConfigHandler):
def setup(self):
if self.requestedAction == admin.ACTION_EDIT:
for arg in ['baseurl', 'tenant']:
self.supportedArgs.addOptArg(arg)
def handleList(self, confInfo):
confDict = self.readConf("myconf")
if confDict:
for stanza, settings in confDict.items():
for key, val in settings.items():
if key in ['baseurl', 'tenant'] and not val:
val = ''
confInfo[stanza].append(key, val)
def handleEdit(self, confInfo):
name = self.callerArgs.id
args = self.callerArgs
self.writeConf('myconf', 'userinfo', self.callerArgs.data)
admin.init(ConfigApp, admin.CONTEXT_NONE)
| 33.046512 | 69 | 0.655876 |
4a269c5218af88fb30496502f1c1ecbd1872b258 | 783 | py | Python | src/terra/constants.py | eco-stake/staketaxcsv | 4d619b0304f8e58ad7940be50edeee4ccc05cd8a | [
"MIT"
] | 1 | 2022-02-05T05:37:08.000Z | 2022-02-05T05:37:08.000Z | src/terra/constants.py | eco-stake/staketaxcsv | 4d619b0304f8e58ad7940be50edeee4ccc05cd8a | [
"MIT"
] | null | null | null | src/terra/constants.py | eco-stake/staketaxcsv | 4d619b0304f8e58ad7940be50edeee4ccc05cd8a | [
"MIT"
] | 1 | 2022-02-05T05:37:11.000Z | 2022-02-05T05:37:11.000Z | MILLION = 1000000.0
CONTRACTS_LOTA = [
"terra1e7hzp3tnsswpfcu6gt4wlgfm20lcsqqywhaagu",
"terra14mevcmeqt0n4myggt7c56l5fl0xw2hwa2mhlg0", # register contract
"terra1342fp86c3z3q0lksq92lncjxpkfl9hujwh6xfn", # withdraw stake contract
"terra1ez46kxtulsdv07538fh5ra5xj8l68mu8eg24vr" # bond stake contract
]
CONTRACT_RANDOMEARTH = "terra1eek0ymmhyzja60830xhzm7k7jkrk99a60q2z2t"
CHAIN_ID = "columbus-5"
EXCHANGE_TERRA_BLOCKCHAIN = "terra_blockchain"
EXCHANGE_ANCHOR_EARN = "anchor_earn"
CUR_ANC = "ANC"
CUR_KRT = "KRT"
CUR_LUNA = "LUNA"
CUR_MIR = "MIR"
CUR_MNT = "MNT"
CUR_UST = "UST"
CUR_AUST = "aUST"
CUR_BLUNA = "bLUNA"
CUR_MINE = "MINE"
CUR_ORION = "ORION"
IBC_TOKEN_NAMES = {
"ibc/0471F1C4E7AFD3F07702BEF6DC365268D64570F7C1FDC98EA6098DD6DE59817B": "OSMO"
}
| 25.258065 | 82 | 0.781609 |
4a269d60dd5c1f4e80fa8dc77b4dcfa9dcab3bdc | 2,794 | py | Python | P1/test.py | Rogerwlk/Information-Retrieval-Search-Engine | 7f7f0237106344cdaadbf87d769a281218fe711f | [
"MIT"
] | null | null | null | P1/test.py | Rogerwlk/Information-Retrieval-Search-Engine | 7f7f0237106344cdaadbf87d769a281218fe711f | [
"MIT"
] | null | null | null | P1/test.py | Rogerwlk/Information-Retrieval-Search-Engine | 7f7f0237106344cdaadbf87d769a281218fe711f | [
"MIT"
] | null | null | null | import re
p_date = re.compile(r"""\b
([0-9]{1,2}-[0-9]{1,2}-[0-9]{2,4})
|(((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[.]?
|January|February|March|April|May|June|July|August
|September|October|November|December)
\ [0-9]{1,2}(st|nd|rd|th)?,\ [0-9]{2,4})
\b""", re.VERBOSE)
p_num1 = re.compile(r',([0-9]{3})')
p_num2 = re.compile(r'\d+[.]\d+')
def validDate(month, day, year):
if month <= 0 or month > 12 or year > 2018 or year < 0 or day <= 0:
return False
if (month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12) and day > 31:
return False
if month == 2 and day > 29:
return False
if (month == 4 or month == 6 or month == 9 or month == 11) and day > 30:
return False
return True
def dateReplace(match):
match = match.group()
month = day = year = 0
if match[0] >= '0' and match[0] <= '9':
date = match.split('-')
month, day, year = int(date[0]), int(date[1]), int(date[2])
else:
date = match.split()
t_m, t_d, year = date[0], date[1], int(date[2])
if t_m[0] == 'J':
if t_m[1] == 'a':
month = 1
elif t_m[2] == 'n':
month = 6
else:
month = 7
elif t_m[0] == 'F':
month = 2
elif t_m[0] == 'M':
if t_m[2] == 'r':
month = 3
else:
month = 5
elif t_m[0] == 'A':
if t_m[1] == 'p':
month = 4
else:
month = 8
elif t_m[0] == 'S':
month = 9
elif t_m[0] == 'O':
month = 10
elif t_m[0] == 'N':
month = 11
else:
month = 12
if t_d[1] >= '0' and t_d[1] <= '9':
day = 10 * int(t_d[0]) + int(t_d[1])
else:
day = int(t_d[0])
if not validDate(month, day, year):
return ''
if year < 100:
if year <= 18:
year += 2000
else:
year += 1900
s_month = str(month)
if month < 10:
s_month = '0' + s_month
s_day = str(day)
if day < 10:
s_day = '0' + s_day
return s_month+'/'+s_day+'/'+str(year)
# p_date = re.compile(r"""\b
# ([0-9]{1,2}-[0-9]{1,2}-[0-9]{2,4})
# |(((Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)[.]?
# |January|February|March|April|May|June|July|August
# |September|October|November|December)
# \ [0-9]{1,2}(st|nd|rd|th)?,\ [0-9]{2,4})
# \b""", re.X)
# p_value = re.compile(r',([0-9]{3})')
def round_number(match):
print(docno)
return str(round(float(match.group())))
p_num1 = re.compile(r',([0-9]{3})')
p_num2 = re.compile(r'\b(\d+)[.]0+\b')
temp = '1,000,000.001'
temp = p_num1.sub(r'\g<1>', temp) # remove ',' in 1,000
temp = p_num2.sub(r'\g<1>', temp) # remove '.00' in 1.00
print(temp)
# print(p_date.sub(dateReplace, '05-07-18 Jan 31, 2012 March 3rd, '))
# temp = p_num1.sub(r'\g<1>', 'abc 1,000,000')
# docno = 1
# print(p_num2.sub(round_number, temp))
# p_docno = re.compile(r'(?:<DOCNO>\s*)(.+)(?:\s*</DOCNO>)')
# print(p_docno.search('<DOCNO> FR940104-0-00021 </DOCNO>').group(1)) | 26.11215 | 116 | 0.557266 |
4a269dafdb0b91e2ee418b2070fcab4c3d821cf2 | 2,298 | py | Python | DefaultPythonSource/TA/TAPython/Python/ChameleonSketch/ChameleonSketch.py | cgerchenhp/TAPython_DefaultResources | 785175cbd93c4dd2185a64b7942d66fa8c2d8bea | [
"MIT"
] | null | null | null | DefaultPythonSource/TA/TAPython/Python/ChameleonSketch/ChameleonSketch.py | cgerchenhp/TAPython_DefaultResources | 785175cbd93c4dd2185a64b7942d66fa8c2d8bea | [
"MIT"
] | null | null | null | DefaultPythonSource/TA/TAPython/Python/ChameleonSketch/ChameleonSketch.py | cgerchenhp/TAPython_DefaultResources | 785175cbd93c4dd2185a64b7942d66fa8c2d8bea | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import time
import unreal
from Utilities.Utils import Singleton
import random
import os
class ChameleonSketch(metaclass=Singleton):
def __init__(self, jsonPath):
self.jsonPath = jsonPath
self.data = unreal.PythonBPLib.get_chameleon_data(self.jsonPath)
self.ui_names = ["SMultiLineEditableTextBox", "SMultiLineEditableTextBox_2"]
self.debug_index = 1
self.ui_python_not_ready = "IsPythonReadyImg"
self.ui_python_is_ready = "IsPythonReadyImgB"
self.ui_is_python_ready_text = "IsPythonReadyText"
print ("ChameleonSketch.Init")
def mark_python_ready(self):
print("set_python_ready call")
self.data.set_visibility(self.ui_python_not_ready, "Collapsed")
self.data.set_visibility(self.ui_python_is_ready, "Visible")
self.data.set_text(self.ui_is_python_ready_text, "Python Path Ready.")
def get_texts(self):
for name in self.ui_names:
n = self.data.get_text(name)
print(f"name: {n}")
def set_texts(self):
for name in self.ui_names:
self.data.set_text(name, ["AAA", "BBB", "CCC", "DDD", "EEE", "FFF"][random.randint(0, 5)])
def set_text_one(self):
self.data.set_text(self.ui_names[self.debug_index], ["AAA", "BBB", "CCC", "DDD", "EEE", "FFF"][random.randint(0, 5)] )
def get_text_one(self):
print(f"name: {self.data.get_text(self.ui_names[self.debug_index])}")
def tree(self):
print(time.time())
names = []
parent_indices = []
name_to_index = dict()
for root, folders, files in os.walk(r"D:\UnrealProjects\5_0\RDZ\Content"):
root_name = os.path.basename(root)
if root not in name_to_index:
name_to_index[root] = len(names)
parent_indices.append(-1 if not names else name_to_index[os.path.dirname(root)])
names.append(root_name)
parent_id = name_to_index[root]
for items in [folders, files]:
for item in items:
names.append(item)
parent_indices.append(parent_id)
print(len(names))
self.data.set_tree_view_items("TreeViewA", names, parent_indices)
print(time.time()) | 38.3 | 126 | 0.630548 |
4a269de20030a9d51d68aabd92e11d40be2ef843 | 787 | py | Python | thyme/wallet/puzzles/p2_puzzle_hash.py | yuanliuus/thyme-blockchain | 9ea5cddc78f601fcbe77101d74147cf8190e423d | [
"Apache-2.0"
] | 6 | 2021-06-30T13:03:47.000Z | 2021-07-10T12:46:37.000Z | thyme/wallet/puzzles/p2_puzzle_hash.py | yuanliuus/thyme-blockchain | 9ea5cddc78f601fcbe77101d74147cf8190e423d | [
"Apache-2.0"
] | 8 | 2021-07-01T15:45:09.000Z | 2021-09-08T04:30:46.000Z | thyme/wallet/puzzles/p2_puzzle_hash.py | yuanliuus/thyme-blockchain | 9ea5cddc78f601fcbe77101d74147cf8190e423d | [
"Apache-2.0"
] | 11 | 2021-07-03T17:30:57.000Z | 2022-03-15T08:47:03.000Z | """
Pay to puzzle hash
In this puzzle program, the solution must be a reveal of the puzzle with the given
hash along with its solution.
"""
from thyme.types.blockchain_format.program import Program
from thyme.types.blockchain_format.sized_bytes import bytes32
from .load_clvm import load_clvm
MOD = load_clvm("p2_puzzle_hash.clvm")
def puzzle_for_inner_puzzle_hash(inner_puzzle_hash: bytes32) -> Program:
program = MOD.curry(inner_puzzle_hash)
return program
def puzzle_for_inner_puzzle(inner_puzzle: Program) -> Program:
return puzzle_for_inner_puzzle_hash(inner_puzzle.get_tree_hash())
def solution_for_inner_puzzle_and_inner_solution(inner_puzzle: Program, inner_puzzle_solution: Program) -> Program:
return Program.to([inner_puzzle, inner_puzzle_solution])
| 29.148148 | 115 | 0.808132 |
4a269fec691f38d01e4f9a6a8a981fac050b06f4 | 11,358 | py | Python | test/objects/linode_test.py | rylabs-billy/linode_api4-python | 55a804d7ae9c208965b6fb759d772cdcebade896 | [
"BSD-3-Clause"
] | 67 | 2018-08-26T20:36:34.000Z | 2022-03-11T06:15:04.000Z | test/objects/linode_test.py | rylabs-billy/linode_api4-python | 55a804d7ae9c208965b6fb759d772cdcebade896 | [
"BSD-3-Clause"
] | 89 | 2016-04-19T00:37:00.000Z | 2018-04-16T14:46:39.000Z | test/objects/linode_test.py | rylabs-billy/linode_api4-python | 55a804d7ae9c208965b6fb759d772cdcebade896 | [
"BSD-3-Clause"
] | 43 | 2018-06-28T18:21:14.000Z | 2022-02-09T20:53:42.000Z | from datetime import datetime
from test.base import ClientBaseCase
from linode_api4.objects import Config, Disk, Image, Instance, Type
from linode_api4.objects.base import MappedObject
class LinodeTest(ClientBaseCase):
"""
Tests methods of the Linode class
"""
def test_get_linode(self):
"""
Tests that a client is loaded correctly by ID
"""
linode = Instance(self.client, 123)
self.assertEqual(linode._populated, False)
self.assertEqual(linode.label, "linode123")
self.assertEqual(linode.group, "test")
self.assertTrue(isinstance(linode.image, Image))
self.assertEqual(linode.image.label, "Ubuntu 17.04")
json = linode._raw_json
self.assertIsNotNone(json)
self.assertEqual(json['id'], 123)
self.assertEqual(json['label'], 'linode123')
self.assertEqual(json['group'], 'test')
# test that the _raw_json stored on the object is sufficient to populate
# a new object
linode2 = Instance(self.client, json['id'], json=json)
self.assertTrue(linode2._populated)
self.assertEqual(linode2.id, linode.id)
self.assertEqual(linode2.label, linode.label)
self.assertEqual(linode2.group, linode.group)
self.assertEqual(linode2._raw_json, linode._raw_json)
def test_transfer(self):
"""
Tests that you can get transfer
"""
linode = Instance(self.client, 123)
transfer = linode.transfer
self.assertEqual(transfer.quota, 471)
self.assertEqual(transfer.billable, 0)
self.assertEqual(transfer.used, 10369075)
def test_rebuild(self):
"""
Tests that you can rebuild with an image
"""
linode = Instance(self.client, 123)
with self.mock_post('/linode/instances/123') as m:
pw = linode.rebuild('linode/debian9')
self.assertIsNotNone(pw)
self.assertTrue(isinstance(pw, str))
self.assertEqual(m.call_url, '/linode/instances/123/rebuild')
self.assertEqual(m.call_data, {
"image": "linode/debian9",
"root_pass": pw,
})
def test_available_backups(self):
"""
Tests that a Linode can retrieve its own backups
"""
linode = Instance(self.client, 123)
backups = linode.available_backups
# assert we got the correct number of automatic backups
self.assertEqual(len(backups.automatic), 3)
# examine one automatic backup
b = backups.automatic[0]
self.assertEqual(b.id, 12345)
self.assertEqual(b._populated, True)
self.assertEqual(b.status, 'successful')
self.assertEqual(b.type, 'auto')
self.assertEqual(b.created, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.updated, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.finished, datetime(year=2018, month=1, day=9, hour=0,
minute=1, second=1))
self.assertEqual(b.region.id, 'us-east-1a')
self.assertEqual(b.label, None)
self.assertEqual(b.message, None)
self.assertEqual(len(b.disks), 2)
self.assertEqual(b.disks[0].size, 1024)
self.assertEqual(b.disks[0].label, 'Debian 8.1 Disk')
self.assertEqual(b.disks[0].filesystem, 'ext4')
self.assertEqual(b.disks[1].size, 0)
self.assertEqual(b.disks[1].label, '256MB Swap Image')
self.assertEqual(b.disks[1].filesystem, 'swap')
self.assertEqual(len(b.configs), 1)
self.assertEqual(b.configs[0], 'My Debian 8.1 Profile')
# assert that snapshots came back as expected
self.assertEqual(backups.snapshot.current, None)
self.assertEqual(backups.snapshot.in_progress, None)
def test_update_linode(self):
"""
Tests that a Linode can be updated
"""
with self.mock_put('linode/instances/123') as m:
linode = self.client.load(Instance, 123)
linode.label = "NewLinodeLabel"
linode.group = "new_group"
linode.save()
self.assertEqual(m.call_url, '/linode/instances/123')
self.assertEqual(m.call_data, {
"alerts": {
"cpu": 90,
"io": 5000,
"network_in": 5,
"network_out": 5,
"transfer_quota": 80
},
"label": "NewLinodeLabel",
"group": "new_group",
"tags": ["something"],
})
def test_delete_linode(self):
"""
Tests that deleting a Linode creates the correct api request
"""
with self.mock_delete() as m:
linode = Instance(self.client, 123)
linode.delete()
self.assertEqual(m.call_url, '/linode/instances/123')
def test_reboot(self):
"""
Tests that you can submit a correct reboot api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.reboot()
self.assertEqual(m.call_url, '/linode/instances/123/reboot')
def test_shutdown(self):
"""
Tests that you can submit a correct shutdown api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.shutdown()
self.assertEqual(m.call_url, '/linode/instances/123/shutdown')
def test_boot(self):
"""
Tests that you can submit a correct boot api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.boot()
self.assertEqual(m.call_url, '/linode/instances/123/boot')
def test_resize(self):
"""
Tests that you can submit a correct resize api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.resize(new_type='g6-standard-1')
self.assertEqual(m.call_url, '/linode/instances/123/resize')
self.assertEqual(m.call_data, {'type': 'g6-standard-1'})
def test_resize_with_class(self):
"""
Tests that you can submit a correct resize api request with a Base class type
"""
linode = Instance(self.client, 123)
ltype = Type(self.client, 'g6-standard-2')
result = {}
with self.mock_post(result) as m:
linode.resize(new_type=ltype)
self.assertEqual(m.call_url, '/linode/instances/123/resize')
self.assertEqual(m.call_data, {'type': 'g6-standard-2'})
def test_boot_with_config(self):
"""
Tests that you can submit a correct boot with a config api request
"""
linode = Instance(self.client, 123)
config = linode.configs[0]
result = {}
with self.mock_post(result) as m:
linode.boot(config=config)
self.assertEqual(m.call_url, '/linode/instances/123/boot')
def test_mutate(self):
"""
Tests that you can submit a correct mutate api request
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.mutate()
self.assertEqual(m.call_url, '/linode/instances/123/mutate')
def test_initiate_migration(self):
"""
Tests that you can initiate a pending migration
"""
linode = Instance(self.client, 123)
result = {}
with self.mock_post(result) as m:
linode.initiate_migration()
self.assertEqual(m.call_url, '/linode/instances/123/migrate')
def test_create_disk(self):
"""
Tests that disk_create behaves as expected
"""
linode = Instance(self.client, 123)
with self.mock_post("/linode/instances/123/disks/12345") as m:
disk, gen_pass = linode.disk_create(1234, label="test", authorized_users=["test"], image="linode/debian10")
self.assertEqual(m.call_url, "/linode/instances/123/disks")
print(m.call_data)
self.assertEqual(m.call_data, {
"size": 1234,
"label": "test",
"root_pass": gen_pass,
"image": "linode/debian10",
"authorized_users": ["test"],
"read_only": False,
})
assert disk.id == 12345
class DiskTest(ClientBaseCase):
"""
Tests for the Disk object
"""
def test_resize(self):
"""
Tests that a resize is submitted correctly
"""
disk = Disk(self.client, 12345, 123)
with self.mock_post({}) as m:
r = disk.resize(1000)
self.assertTrue(r)
self.assertEqual(m.call_url, '/linode/instances/123/disks/12345/resize')
self.assertEqual(m.call_data, {"size": 1000})
class ConfigTest(ClientBaseCase):
"""
Tests for the Config object
"""
def test_update_interfaces(self):
"""
Tests that a configs interfaces update correctly
"""
json = self.client.get('/linode/instances/123/configs/456789')
config = Config(self.client, 456789, 123, json=json)
with self.mock_put('/linode/instances/123/configs/456789') as m:
new_interfaces = [
{
'purpose': 'public'
},
{
'purpose': 'vlan',
'label': 'cool-vlan'
}
]
config.interfaces = new_interfaces
config.save()
self.assertEqual(m.call_url, '/linode/instances/123/configs/456789')
self.assertEqual(m.call_data.get('interfaces'), new_interfaces)
class TypeTest(ClientBaseCase):
def test_get_types(self):
"""
Tests that Linode types can be returned
"""
types = self.client.linode.types()
self.assertEqual(len(types), 4)
for t in types:
self.assertTrue(t._populated)
self.assertIsNotNone(t.id)
self.assertIsNotNone(t.label)
self.assertIsNotNone(t.disk)
self.assertIsNotNone(t.type_class)
self.assertIsNotNone(t.gpus)
def test_get_type_by_id(self):
"""
Tests that a Linode type is loaded correctly by ID
"""
t = Type(self.client, 'g5-nanode-1')
self.assertEqual(t._populated, False)
self.assertEqual(t.vcpus, 1)
self.assertEqual(t.gpus, 0)
self.assertEqual(t.label, "Linode 1024")
self.assertEqual(t.disk, 20480)
self.assertEqual(t.type_class, 'nanode')
def test_get_type_gpu(self):
"""
Tests that gpu types load up right
"""
t = Type(self.client, "g5-gpu-2")
self.assertEqual(t._populated, False)
self.assertEqual(t.gpus, 1)
self.assertEqual(t._populated, True)
| 32.267045 | 119 | 0.573781 |
4a269ff3651e182be35f62e2f75bed0bec9c8dc1 | 48,610 | py | Python | python/ccxt/bitmex.py | QuoineFinancial/ccxt | c715e44a64250c20bdefd5f5b80ac60f4ae80913 | [
"MIT"
] | null | null | null | python/ccxt/bitmex.py | QuoineFinancial/ccxt | c715e44a64250c20bdefd5f5b80ac60f4ae80913 | [
"MIT"
] | null | null | null | python/ccxt/bitmex.py | QuoineFinancial/ccxt | c715e44a64250c20bdefd5f5b80ac60f4ae80913 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.decimal_to_precision import TICK_SIZE
class bitmex (Exchange):
def describe(self):
return self.deep_extend(super(bitmex, self).describe(), {
'id': 'bitmex',
'name': 'BitMEX',
'countries': ['SC'], # Seychelles
'version': 'v1',
'userAgent': None,
'rateLimit': 2000,
'has': {
'CORS': False,
'fetchOHLCV': True,
'withdraw': True,
'editOrder': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchMyTrades': True,
'fetchLedger': True,
'fetchTransactions': 'emulated',
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'1h': '1h',
'1d': '1d',
},
'urls': {
'test': 'https://testnet.bitmex.com',
'logo': 'https://user-images.githubusercontent.com/1294454/27766319-f653c6e6-5ed4-11e7-933d-f0bc3699ae8f.jpg',
'api': 'https://www.bitmex.com',
'www': 'https://www.bitmex.com',
'doc': [
'https://www.bitmex.com/app/apiOverview',
'https://github.com/BitMEX/api-connectors/tree/master/official-http',
],
'fees': 'https://www.bitmex.com/app/fees',
'referral': 'https://www.bitmex.com/register/rm3C16',
},
'api': {
'public': {
'get': [
'announcement',
'announcement/urgent',
'funding',
'instrument',
'instrument/active',
'instrument/activeAndIndices',
'instrument/activeIntervals',
'instrument/compositeIndex',
'instrument/indices',
'insurance',
'leaderboard',
'liquidation',
'orderBook',
'orderBook/L2',
'quote',
'quote/bucketed',
'schema',
'schema/websocketHelp',
'settlement',
'stats',
'stats/history',
'trade',
'trade/bucketed',
],
},
'private': {
'get': [
'apiKey',
'chat',
'chat/channels',
'chat/connected',
'execution',
'execution/tradeHistory',
'notification',
'order',
'position',
'user',
'user/affiliateStatus',
'user/checkReferralCode',
'user/commission',
'user/depositAddress',
'user/margin',
'user/minWithdrawalFee',
'user/wallet',
'user/walletHistory',
'user/walletSummary',
],
'post': [
'apiKey',
'apiKey/disable',
'apiKey/enable',
'chat',
'order',
'order/bulk',
'order/cancelAllAfter',
'order/closePosition',
'position/isolate',
'position/leverage',
'position/riskLimit',
'position/transferMargin',
'user/cancelWithdrawal',
'user/confirmEmail',
'user/confirmEnableTFA',
'user/confirmWithdrawal',
'user/disableTFA',
'user/logout',
'user/logoutAll',
'user/preferences',
'user/requestEnableTFA',
'user/requestWithdrawal',
],
'put': [
'order',
'order/bulk',
'user',
],
'delete': [
'apiKey',
'order',
'order/all',
],
},
},
'exceptions': {
'exact': {
'Invalid API Key.': AuthenticationError,
'Access Denied': PermissionDenied,
'Duplicate clOrdID': InvalidOrder,
'orderQty is invalid': InvalidOrder,
'Invalid price': InvalidOrder,
'Invalid stopPx for ordType': InvalidOrder,
},
'broad': {
'Signature not valid': AuthenticationError,
'overloaded': ExchangeNotAvailable,
'Account has insufficient Available Balance': InsufficientFunds,
},
},
'precisionMode': TICK_SIZE,
'options': {
# https://blog.bitmex.com/api_announcement/deprecation-of-api-nonce-header/
# https://github.com/ccxt/ccxt/issues/4789
'api-expires': 5, # in seconds
},
})
def fetch_markets(self, params={}):
response = self.publicGetInstrumentActiveAndIndices(params)
result = []
for i in range(0, len(response)):
market = response[i]
active = (market['state'] != 'Unlisted')
id = market['symbol']
baseId = market['underlying']
quoteId = market['quoteCurrency']
basequote = baseId + quoteId
base = self.common_currency_code(baseId)
quote = self.common_currency_code(quoteId)
swap = (id == basequote)
# 'positionCurrency' may be empty("", as Bitmex currently returns for ETHUSD)
# so let's take the quote currency first and then adjust if needed
positionId = self.safe_string_2(market, 'positionCurrency', 'quoteCurrency')
type = None
future = False
prediction = False
position = self.common_currency_code(positionId)
symbol = id
if swap:
type = 'swap'
symbol = base + '/' + quote
elif id.find('B_') >= 0:
prediction = True
type = 'prediction'
else:
future = True
type = 'future'
precision = {
'amount': None,
'price': None,
}
lotSize = self.safe_float(market, 'lotSize')
tickSize = self.safe_float(market, 'tickSize')
if lotSize is not None:
precision['amount'] = lotSize
if tickSize is not None:
precision['price'] = tickSize
limits = {
'amount': {
'min': None,
'max': None,
},
'price': {
'min': tickSize,
'max': self.safe_float(market, 'maxPrice'),
},
'cost': {
'min': None,
'max': None,
},
}
limitField = 'cost' if (position == quote) else 'amount'
limits[limitField] = {
'min': lotSize,
'max': self.safe_float(market, 'maxOrderQty'),
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'limits': limits,
'taker': self.safe_float(market, 'takerFee'),
'maker': self.safe_float(market, 'makerFee'),
'type': type,
'spot': False,
'swap': swap,
'future': future,
'prediction': prediction,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
request = {
'currency': 'all',
}
response = self.privateGetUserMargin(self.extend(request, params))
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = currencyId
if currencyId in self.currencies_by_id:
code = self.currencies_by_id[currencyId]['code']
else:
code = self.common_currency_code(currencyId.upper())
account = self.account()
account['free'] = self.safe_float(balance, 'availableMargin')
account['total'] = self.safe_float(balance, 'marginBalance')
if code == 'BTC':
account['free'] = account['free'] * 0.00000001
account['total'] = account['total'] * 0.00000001
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['depth'] = limit
response = self.publicGetOrderBookL2(self.extend(request, params))
result = {
'bids': [],
'asks': [],
'timestamp': None,
'datetime': None,
'nonce': None,
}
for i in range(0, len(response)):
order = response[i]
side = 'asks' if (order['side'] == 'Sell') else 'bids'
amount = self.safe_float(order, 'size')
price = self.safe_float(order, 'price')
# https://github.com/ccxt/ccxt/issues/4926
# https://github.com/ccxt/ccxt/issues/4927
# the exchange sometimes returns null price in the orderbook
if price is not None:
result[side].append([price, amount])
result['bids'] = self.sort_by(result['bids'], 0, True)
result['asks'] = self.sort_by(result['asks'], 0)
return result
def fetch_order(self, id, symbol=None, params={}):
filter = {
'filter': {
'orderID': id,
},
}
response = self.fetch_orders(symbol, None, None, self.deep_extend(filter, params))
numResults = len(response)
if numResults == 1:
return response[0]
raise OrderNotFound(self.id + ': The order ' + id + ' not found.')
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetOrder(request)
return self.parse_orders(response, market, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
request = {
'filter': {
'open': True,
},
}
return self.fetch_orders(symbol, since, limit, self.deep_extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# Bitmex barfs if you set 'open': False in the filter...
orders = self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
request = self.deep_extend(request, params)
# why the hassle? urlencode in python is kinda broken for nested dicts.
# E.g. self.urlencode({"filter": {"open": True}}) will return "filter={'open':+True}"
# Bitmex doesn't like that. Hence resorting to self hack.
if 'filter' in request:
request['filter'] = self.json(request['filter'])
response = self.privateGetExecutionTradeHistory(request)
#
# [
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'Withdrawal': 'transaction',
'RealisedPNL': 'margin',
'Deposit': 'transaction',
'Transfer': 'transfer',
'AffiliatePayout': 'referral',
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
#
id = self.safe_string(item, 'transactID')
account = self.safe_string(item, 'account')
referenceId = self.safe_string(item, 'tx')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'transactType'))
currencyId = self.safe_string(item, 'currency')
code = None
if currencyId is not None:
currencyId = currencyId.upper()
code = self.common_currency_code(currencyId)
amount = self.safe_float(item, 'amount')
if amount is not None:
amount = amount * 1e-8
timestamp = self.parse8601(self.safe_string(item, 'transactTime'))
feeCost = self.safe_float(item, 'fee', 0)
if feeCost is not None:
feeCost = feeCost * 1e-8
fee = {
'cost': feeCost,
'currency': code,
}
after = self.safe_float(item, 'walletBalance')
if after is not None:
after = after * 1e-8
before = self.sum(after, -amount)
direction = None
if amount < 0:
direction = 'out'
amount = abs(amount)
else:
direction = 'in'
status = self.parse_transaction_status(self.safe_string(item, 'transactStatus'))
return {
'info': item,
'id': id,
'direction': direction,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'amount': amount,
'before': before,
'after': after,
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
#
# [
# {
# transactID: "69573da3-7744-5467-3207-89fd6efe7a47",
# account: 24321,
# currency: "XBt",
# transactType: "Withdrawal", # "AffiliatePayout", "Transfer", "Deposit", "RealisedPNL", ...
# amount: -1000000,
# fee: 300000,
# transactStatus: "Completed", # "Canceled", ...
# address: "1Ex4fkF4NhQaQdRWNoYpqiPbDBbq18Kdd9",
# tx: "3BMEX91ZhhKoWtsH9QRb5dNXnmnGpiEetA",
# text: "",
# transactTime: "2017-03-21T20:05:14.388Z",
# walletBalance: 0, # balance after
# marginBalance: null,
# timestamp: "2017-03-22T13:09:23.514Z"
# }
# ]
#
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = {
# 'start': 123,
}
#
# if since is not None:
# # date-based pagination not supported
# }
#
if limit is not None:
request['count'] = limit
response = self.privateGetUserWalletHistory(self.extend(request, params))
transactions = self.filter_by_array(response, 'transactType', ['Withdrawal', 'Deposit'], False)
currency = None
if code is not None:
currency = self.currency(code)
return self.parseTransactions(transactions, currency, since, limit)
def parse_transaction_status(self, status):
statuses = {
'Canceled': 'canceled',
'Completed': 'ok',
'Pending': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# {
# 'transactID': 'ffe699c2-95ee-4c13-91f9-0faf41daec25',
# 'account': 123456,
# 'currency': 'XBt',
# 'transactType': 'Withdrawal',
# 'amount': -100100000,
# 'fee': 100000,
# 'transactStatus': 'Completed',
# 'address': '385cR5DM96n1HvBDMzLHPYcw89fZAXULJP',
# 'tx': '3BMEXabcdefghijklmnopqrstuvwxyz123',
# 'text': '',
# 'transactTime': '2019-01-02T01:00:00.000Z',
# 'walletBalance': 99900000,
# 'marginBalance': None,
# 'timestamp': '2019-01-02T13:00:00.000Z'
# }
#
id = self.safe_string(transaction, 'transactID')
# For deposits, transactTime == timestamp
# For withdrawals, transactTime is submission, timestamp is processed
transactTime = self.parse8601(self.safe_string(transaction, 'transactTime'))
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
type = self.safe_string(transaction, 'transactType')
if type is not None:
type = type.lower()
# Deposits have no from address or to address, withdrawals have both
address = None
addressFrom = None
addressTo = None
if type == 'withdrawal':
address = self.safe_string(transaction, 'address')
addressFrom = self.safe_string(transaction, 'tx')
addressTo = address
amount = self.safe_integer(transaction, 'amount')
if amount is not None:
amount = abs(amount) * 1e-8
feeCost = self.safe_integer(transaction, 'fee')
if feeCost is not None:
feeCost = feeCost * 1e-8
fee = {
'cost': feeCost,
'currency': 'BTC',
}
status = self.safe_string(transaction, 'transactStatus')
if status is not None:
status = self.parse_transaction_status(status)
return {
'info': transaction,
'id': id,
'txid': None,
'timestamp': transactTime,
'datetime': self.iso8601(transactTime),
'addressFrom': addressFrom,
'address': address,
'addressTo': addressTo,
'tagFrom': None,
'tag': None,
'tagTo': None,
'type': type,
'amount': amount,
# BTC is the only currency on Bitmex
'currency': 'BTC',
'status': status,
'updated': timestamp,
'comment': None,
'fee': fee,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
if not market['active']:
raise ExchangeError(self.id + ': symbol ' + symbol + ' is delisted')
tickers = self.fetch_tickers([symbol], params)
ticker = self.safe_value(tickers, symbol)
if ticker is None:
raise ExchangeError(self.id + ' ticker symbol ' + symbol + ' not found')
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.publicGetInstrumentActiveAndIndices(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = self.safe_string(ticker, 'symbol')
if symbol is not None:
result[symbol] = ticker
return result
def parse_ticker(self, ticker, market=None):
#
# { symbol: "ETHH19",
# rootSymbol: "ETH",
# state: "Open",
# typ: "FFCCSX",
# listing: "2018-12-17T04:00:00.000Z",
# front: "2019-02-22T12:00:00.000Z",
# expiry: "2019-03-29T12:00:00.000Z",
# settle: "2019-03-29T12:00:00.000Z",
# relistInterval: null,
# inverseLeg: "",
# sellLeg: "",
# buyLeg: "",
# optionStrikePcnt: null,
# optionStrikeRound: null,
# optionStrikePrice: null,
# optionMultiplier: null,
# positionCurrency: "ETH",
# underlying: "ETH",
# quoteCurrency: "XBT",
# underlyingSymbol: "ETHXBT=",
# reference: "BMEX",
# referenceSymbol: ".BETHXBT30M",
# calcInterval: null,
# publishInterval: null,
# publishTime: null,
# maxOrderQty: 100000000,
# maxPrice: 10,
# lotSize: 1,
# tickSize: 0.00001,
# multiplier: 100000000,
# settlCurrency: "XBt",
# underlyingToPositionMultiplier: 1,
# underlyingToSettleMultiplier: null,
# quoteToSettleMultiplier: 100000000,
# isQuanto: False,
# isInverse: False,
# initMargin: 0.02,
# maintMargin: 0.01,
# riskLimit: 5000000000,
# riskStep: 5000000000,
# limit: null,
# capped: False,
# taxed: True,
# deleverage: True,
# makerFee: -0.0005,
# takerFee: 0.0025,
# settlementFee: 0,
# insuranceFee: 0,
# fundingBaseSymbol: "",
# fundingQuoteSymbol: "",
# fundingPremiumSymbol: "",
# fundingTimestamp: null,
# fundingInterval: null,
# fundingRate: null,
# indicativeFundingRate: null,
# rebalanceTimestamp: null,
# rebalanceInterval: null,
# openingTimestamp: "2019-02-13T08:00:00.000Z",
# closingTimestamp: "2019-02-13T09:00:00.000Z",
# sessionInterval: "2000-01-01T01:00:00.000Z",
# prevClosePrice: 0.03347,
# limitDownPrice: null,
# limitUpPrice: null,
# bankruptLimitDownPrice: null,
# bankruptLimitUpPrice: null,
# prevTotalVolume: 1386531,
# totalVolume: 1387062,
# volume: 531,
# volume24h: 17118,
# prevTotalTurnover: 4741294246000,
# totalTurnover: 4743103466000,
# turnover: 1809220000,
# turnover24h: 57919845000,
# homeNotional24h: 17118,
# foreignNotional24h: 579.19845,
# prevPrice24h: 0.03349,
# vwap: 0.03383564,
# highPrice: 0.03458,
# lowPrice: 0.03329,
# lastPrice: 0.03406,
# lastPriceProtected: 0.03406,
# lastTickDirection: "ZeroMinusTick",
# lastChangePcnt: 0.017,
# bidPrice: 0.03406,
# midPrice: 0.034065,
# askPrice: 0.03407,
# impactBidPrice: 0.03406,
# impactMidPrice: 0.034065,
# impactAskPrice: 0.03407,
# hasLiquidity: True,
# openInterest: 83679,
# openValue: 285010674000,
# fairMethod: "ImpactMidPrice",
# fairBasisRate: 0,
# fairBasis: 0,
# fairPrice: 0.03406,
# markMethod: "FairPrice",
# markPrice: 0.03406,
# indicativeTaxRate: 0,
# indicativeSettlePrice: 0.03406,
# optionUnderlyingPrice: null,
# settledPrice: null,
# timestamp: "2019-02-13T08:40:30.000Z",
# }
#
symbol = None
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_value(self.markets_by_id, marketId, market)
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
open = self.safe_float(ticker, 'prevPrice24h')
last = self.safe_float(ticker, 'lastPrice')
change = None
percentage = None
if last is not None and open is not None:
change = last - open
if open > 0:
percentage = change / open * 100
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': None,
'vwap': self.safe_float(ticker, 'vwap'),
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': self.sum(open, last) / 2,
'baseVolume': self.safe_float(ticker, 'homeNotional24h'),
'quoteVolume': self.safe_float(ticker, 'foreignNotional24h'),
'info': ticker,
}
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
timestamp = self.parse8601(self.safe_string(ohlcv, 'timestamp'))
return [
timestamp,
self.safe_float(ohlcv, 'open'),
self.safe_float(ohlcv, 'high'),
self.safe_float(ohlcv, 'low'),
self.safe_float(ohlcv, 'close'),
self.safe_float(ohlcv, 'volume'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
self.load_markets()
# send JSON key/value pairs, such as {"key": "value"}
# filter by individual fields and do advanced queries on timestamps
# filter = {'key': 'value'}
# send a bare series(e.g. XBU) to nearest expiring contract in that series
# you can also send a timeframe, e.g. XBU:monthly
# timeframes: daily, weekly, monthly, quarterly, and biquarterly
market = self.market(symbol)
request = {
'symbol': market['id'],
'binSize': self.timeframes[timeframe],
'partial': True, # True == include yet-incomplete current bins
# 'filter': filter, # filter by individual fields and do advanced queries
# 'columns': [], # will return all columns if omitted
# 'start': 0, # starting point for results(wtf?)
# 'reverse': False, # True == newest first
# 'endTime': '', # ending date filter for results
}
if limit is not None:
request['count'] = limit # default 100, max 500
# if since is not set, they will return candles starting from 2017-01-01
if since is not None:
ymdhms = self.ymdhms(since)
request['startTime'] = ymdhms # starting date filter for results
response = self.publicGetTradeBucketed(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# }
#
# fetchMyTrades(private)
#
# {
# "execID": "string",
# "orderID": "string",
# "clOrdID": "string",
# "clOrdLinkID": "string",
# "account": 0,
# "symbol": "string",
# "side": "string",
# "lastQty": 0,
# "lastPx": 0,
# "underlyingLastPx": 0,
# "lastMkt": "string",
# "lastLiquidityInd": "string",
# "simpleOrderQty": 0,
# "orderQty": 0,
# "price": 0,
# "displayQty": 0,
# "stopPx": 0,
# "pegOffsetValue": 0,
# "pegPriceType": "string",
# "currency": "string",
# "settlCurrency": "string",
# "execType": "string",
# "ordType": "string",
# "timeInForce": "string",
# "execInst": "string",
# "contingencyType": "string",
# "exDestination": "string",
# "ordStatus": "string",
# "triggered": "string",
# "workingIndicator": True,
# "ordRejReason": "string",
# "simpleLeavesQty": 0,
# "leavesQty": 0,
# "simpleCumQty": 0,
# "cumQty": 0,
# "avgPx": 0,
# "commission": 0,
# "tradePublishIndicator": "string",
# "multiLegReportingType": "string",
# "text": "string",
# "trdMatchID": "string",
# "execCost": 0,
# "execComm": 0,
# "homeNotional": 0,
# "foreignNotional": 0,
# "transactTime": "2019-03-05T12:47:02.762Z",
# "timestamp": "2019-03-05T12:47:02.762Z"
# }
#
timestamp = self.parse8601(self.safe_string(trade, 'timestamp'))
price = self.safe_float(trade, 'price')
amount = self.safe_float_2(trade, 'size', 'lastQty')
id = self.safe_string(trade, 'trdMatchID')
order = self.safe_string(trade, 'orderID')
side = self.safe_string(trade, 'side').lower()
# price * amount doesn't work for all symbols(e.g. XBT, ETH)
cost = self.safe_float(trade, 'execCost')
if cost is not None:
cost = abs(cost) / 100000000
fee = None
if 'execComm' in trade:
feeCost = self.safe_float(trade, 'execComm')
feeCost = feeCost / 100000000
currencyId = self.safe_string(trade, 'settlCurrency')
currencyId = currencyId.upper()
feeCurrency = self.common_currency_code(currencyId)
feeRate = self.safe_float(trade, 'commission')
fee = {
'cost': feeCost,
'currency': feeCurrency,
'rate': feeRate,
}
takerOrMaker = None
if fee is not None:
takerOrMaker = fee['cost'] < 'maker' if 0 else 'taker'
symbol = None
marketId = self.safe_string(trade, 'symbol')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
else:
symbol = marketId
type = self.safe_string(trade, 'ordType')
if type is not None:
type = type.lower()
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': order,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'cost': cost,
'amount': amount,
'fee': fee,
}
def parse_order_status(self, status):
statuses = {
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'DoneForDay': 'open',
'Canceled': 'canceled',
'PendingCancel': 'open',
'PendingNew': 'open',
'Rejected': 'rejected',
'Expired': 'expired',
'Stopped': 'open',
'Untriggered': 'open',
'Triggered': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'ordStatus'))
symbol = None
if market is not None:
symbol = market['symbol']
else:
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
lastTradeTimestamp = self.parse8601(self.safe_string(order, 'transactTime'))
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'orderQty')
filled = self.safe_float(order, 'cumQty', 0.0)
remaining = None
if amount is not None:
if filled is not None:
remaining = max(amount - filled, 0.0)
average = self.safe_float(order, 'avgPx')
cost = None
if filled is not None:
if average is not None:
cost = average * filled
elif price is not None:
cost = price * filled
id = self.safe_string(order, 'orderID')
type = self.safe_string(order, 'ordType')
if type is not None:
type = type.lower()
side = self.safe_string(order, 'side')
if side is not None:
side = side.lower()
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = self.iso8601(since)
if limit is not None:
request['count'] = limit
response = self.publicGetTrade(self.extend(request, params))
#
# [
# {
# timestamp: '2018-08-28T00:00:02.735Z',
# symbol: 'XBTUSD',
# side: 'Buy',
# size: 2000,
# price: 6906.5,
# tickDirection: 'PlusTick',
# trdMatchID: 'b9a42432-0a46-6a2f-5ecc-c32e9ca4baf8',
# grossValue: 28958000,
# homeNotional: 0.28958,
# foreignNotional: 2000
# },
# {
# timestamp: '2018-08-28T00:00:03.778Z',
# symbol: 'XBTUSD',
# side: 'Sell',
# size: 1000,
# price: 6906,
# tickDirection: 'MinusTick',
# trdMatchID: '0d4f1682-5270-a800-569b-4a0eb92db97c',
# grossValue: 14480000,
# homeNotional: 0.1448,
# foreignNotional: 1000
# },
# ]
#
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'symbol': self.market_id(symbol),
'side': self.capitalize(side),
'orderQty': amount,
'ordType': self.capitalize(type),
}
if price is not None:
request['price'] = price
response = self.privatePostOrder(self.extend(request, params))
order = self.parse_order(response)
id = self.safe_string(order, 'id')
self.orders[id] = order
return self.extend({'info': response}, order)
def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
self.load_markets()
request = {
'orderID': id,
}
if amount is not None:
request['orderQty'] = amount
if price is not None:
request['price'] = price
response = self.privatePutOrder(self.extend(request, params))
order = self.parse_order(response)
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
response = self.privateDeleteOrder(self.extend({'orderID': id}, params))
order = response[0]
error = self.safe_string(order, 'error')
if error is not None:
if error.find('Unable to cancel order due to existing state') >= 0:
raise OrderNotFound(self.id + ' cancelOrder() failed: ' + error)
order = self.parse_order(order)
self.orders[order['id']] = order
return self.extend({'info': response}, order)
def is_fiat(self, currency):
if currency == 'EUR':
return True
if currency == 'PLN':
return True
return False
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
# currency = self.currency(code)
if code != 'BTC':
raise ExchangeError(self.id + ' supoprts BTC withdrawals only, other currencies coming soon...')
request = {
'currency': 'XBt', # temporarily
'amount': amount,
'address': address,
# 'otpToken': '123456', # requires if two-factor auth(OTP) is enabled
# 'fee': 0.001, # bitcoin network fee
}
response = self.privatePostUserRequestWithdrawal(self.extend(request, params))
return {
'info': response,
'id': response['transactID'],
}
def handle_errors(self, code, reason, url, method, headers, body, response):
if response is None:
return
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
if code >= 400:
error = self.safe_value(response, 'error', {})
message = self.safe_string(error, 'message')
feedback = self.id + ' ' + body
exact = self.exceptions['exact']
if message in exact:
raise exact[message](feedback)
broad = self.exceptions['broad']
broadKey = self.findBroadlyMatchedKey(broad, message)
if broadKey is not None:
raise broad[broadKey](feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = '/api/' + self.version + '/' + path
if method == 'GET':
if params:
query += '?' + self.urlencode(params)
else:
format = self.safe_string(params, '_format')
if format is not None:
query += '?' + self.urlencode({'_format': format})
params = self.omit(params, '_format')
url = self.urls['api'] + query
if api == 'private':
self.check_required_credentials()
auth = method + query
expires = self.safe_integer(self.options, 'api-expires')
headers = {
'Content-Type': 'application/json',
'api-key': self.apiKey,
}
expires = self.sum(self.seconds(), expires)
expires = str(expires)
auth += expires
headers['api-expires'] = expires
if method == 'POST' or method == 'PUT' or method == 'DELETE':
if params:
body = self.json(params)
auth += body
headers['api-signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| 40.780201 | 126 | 0.464657 |
4a26a0d66200c005abbaa3686785bf20e1864acc | 851 | py | Python | main.py | GuiSAlmeida/OS-Scheduler | 1b7c3fb2dff5a7a92366519e72cbc86fa2fa337b | [
"MIT"
] | null | null | null | main.py | GuiSAlmeida/OS-Scheduler | 1b7c3fb2dff5a7a92366519e72cbc86fa2fa337b | [
"MIT"
] | null | null | null | main.py | GuiSAlmeida/OS-Scheduler | 1b7c3fb2dff5a7a92366519e72cbc86fa2fa337b | [
"MIT"
] | null | null | null | #################################################################
# Algorithms simulating OS scheduler process #
# Author: Guilherme Almeida #
# Contact: https://www.guisalmeida.com #
#################################################################
import pandas as pd
from diagram import create_gantt
from algorithms import fifo, sjf, priority
import csv
filename = 'files/processos.csv'
procs = []
with open(filename, 'r') as csv_data:
for line in csv.DictReader(csv_data):
procs.append(dict(line))
def main(alg, procs):
sorted_procs = alg(procs)
df = pd.DataFrame(sorted_procs)
df.to_csv(f'files/processos_{alg.__name__}.csv')
create_gantt(sorted_procs, f'{alg.__name__}')
main(fifo, procs)
main(sjf, procs)
main(priority, procs)
| 26.59375 | 65 | 0.540541 |
4a26a1065e394fa14a61c4eb605ce92ff8e71965 | 4,586 | py | Python | fiction_scraper/filters.py | de3sw2aq1/fiction-scrapers | bcda0c708c679273b8e2e962faa0d4e8f7988fea | [
"MIT"
] | null | null | null | fiction_scraper/filters.py | de3sw2aq1/fiction-scrapers | bcda0c708c679273b8e2e962faa0d4e8f7988fea | [
"MIT"
] | 2 | 2021-03-22T16:57:08.000Z | 2021-12-13T19:36:23.000Z | fiction_scraper/filters.py | de3sw2aq1/fiction-scrapers | bcda0c708c679273b8e2e962faa0d4e8f7988fea | [
"MIT"
] | 1 | 2020-07-22T14:16:40.000Z | 2020-07-22T14:16:40.000Z | """Filter functions for spiders.
This module contains functions that can be applied to the <body> of a story to
modify it. The body element will be passed to the filter function as the first
argument. Some filters have optional keyword arguments that may be configured
by setting the arguments with `functools.partial()`. Custom filters may also
be written and used in a spider.
Example usage:
from . import Spider, filters
def my_filter(root):
pass
class MySpider(Spider):
filters = (my_filter, filters.kill_classes, *filters.DEFAULT_FILTERS)
Always ensure that `DEFAULT_FILTERS` are included in the filter list, unless
you are very sure you want to override them.
"""
import re
from lxml.html import clean, builder as E
# TODO: add filter to move attributes from <p> tags onto a <div> for Pandoc
# TODO: add configurable filter to remove tags by class name
# TODO: add filter to ensure ids are unique throughout the document
# TODO: add lower_heading_levels filter. Will be manually run.
# Set a max output heading level.
# Attempt to autodetect the document's heading levels.
class Cleaner(clean.Cleaner):
"""Cleaner to remove unnecessary tags and attributes.
Currently removes <div> tags preserving their children.
All attributes except what lxml considers to be "safe" are removed.
Additionally the attributes `width`, `height`, `dir` and `align` are
removed.
Instances of this class are usable as a filter.
"""
remove_tags = ('div')
safe_attrs = clean.Cleaner.safe_attrs - {'width', 'height', 'align', 'dir'}
def kill_classes(root, classes=('reaction_buttons', 'sharedaddy')):
"""Filter to remove tags with unwanted classes.
If any tag has one of the specified classes, it and it's children will be
removed. By default this filter removes classes that match divs at the
end of blog posts with social media sharing buttons.
"""
for c in classes:
for e in root.find_class(c):
e.drop_tree()
# TODO: add stylesheet that includes alignment classes
def text_alignment(root, directions=None):
"""Convert inline styles and align attributes for text alignment to
a class.
This is needed because inline styles are stripped by default. Currently
`text-align: justified` isn't handled, only left, center and right are.
It may be a good idea to remove left from the set of handled alignments.
Then the document's default alignment, which may be justified, can be
applied.
"""
if not directions:
directions = {
'left': 'align-left',
'right': 'align-right',
'center': 'align-center'}
for e in root.xpath('.//*[@style]'):
style = e.get('style')
for direction, css_class in directions.items():
if re.search(r'text-align:\s*'+direction, style, re.I):
e.classes.add(css_class)
for e in root.xpath('.//*[@align]'):
align = e.get('align')
for direction, css_class in directions.items():
if direction == align.lower():
e.classes.add(css_class)
del e.attrib['align']
# TODO: add stylesheet that includes decoration classes
def text_decoration(root, decorations=None):
"""Convert `text-decoration: underline` in style attributes to
a class."""
if not decorations:
decorations = {'underline': 'underlined'}
for e in root.xpath(r'.//*[@style]'):
style = e.get('style')
for decoration, css_class in decorations.items():
if re.search(r'text-decoration:\s*'+decoration, style, re.I):
e.classes.add(css_class)
def move_attrs_to_div(root, elements=('p', 'hr')):
"""If an element has attributes move them to a <div> wrapping it.
Pandoc doesn't support attributes on all types of elements. Notably it
does not support attributes on <p> tags. This filter moves all the
attributes off of the specified element types onto a <div> wrapping the
element.
"""
# FIXME: the behavior of iter() is undefined if the tree is modified
# during iteration. But this seems to work for now.
for e in root.iter(*elements):
if e.attrib:
wrapper = E.DIV(dict(e.attrib))
e.attrib.clear()
e.addprevious(wrapper)
wrapper.insert(0, e)
# The styles, etc shouldn't affect the tail text of the element
wrapper.tail = e.tail
e.tail = None
DEFAULT_FILTERS = (text_decoration, Cleaner(), move_attrs_to_div)
| 32.992806 | 79 | 0.668993 |
4a26a18ed24dab94094c049b9fdd1b7477650e46 | 1,529 | py | Python | DonnaAlert.py | TobiasMR/donna-alert | 5db41b3dbbbbfb7656376e421587d9b87508bc92 | [
"MIT"
] | null | null | null | DonnaAlert.py | TobiasMR/donna-alert | 5db41b3dbbbbfb7656376e421587d9b87508bc92 | [
"MIT"
] | null | null | null | DonnaAlert.py | TobiasMR/donna-alert | 5db41b3dbbbbfb7656376e421587d9b87508bc92 | [
"MIT"
] | null | null | null | import sys
import json
import pprint
import argparse
from flask import Flask, make_response, render_template, jsonify, send_from_directory
from flask.ext.sqlalchemy import SQLAlchemy
from birdy.twitter import AppClient
from models import Source, Mention, Base
from twitter import get_twitter_mentions
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite+pysqlite:///sqlite.db'
db = SQLAlchemy(app)
@app.route('/')
def index():
"""Return the main view for mentions."""
return render_template('index.html')
@app.route('/update/<source>', methods=['POST'])
def get_updates_for_source(source):
"""Return the number of updates found after getting new data from
*source*."""
if source == 'twitter':
updates = get_twitter_mentions()
return jsonify({'updates': updates})
@app.route('/read/<id>', methods=['POST'])
def read(id):
"""Mark a particular mention as having been read."""
session = db.session()
mention = session.query(Mention).get(id)
mention.seen = True
session.add(mention)
session.commit()
return jsonify({})
@app.route('/mentions')
def show_mentions():
"""Return a list of all mentions in JSON."""
session = db.session()
mentions = session.query(Mention).all()
values = [mention.to_json() for mention in mentions]
response = make_response()
response.data = json.dumps(values)
return response
def main():
"""Main entry point for script."""
app.run()
if __name__ == '__main__':
sys.exit(main()) | 27.303571 | 85 | 0.691956 |
4a26a1dc9fc77166691f212b697855db485ce345 | 4,647 | py | Python | homeassistant/components/sensor/command_line.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | 7 | 2018-06-29T01:13:54.000Z | 2020-04-01T16:08:27.000Z | homeassistant/components/sensor/command_line.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/sensor/command_line.py | don66/home-assistant | a277470363c0758bb305410aad49c257ff8bac40 | [
"Apache-2.0"
] | 3 | 2018-10-09T08:37:48.000Z | 2019-11-16T08:32:27.000Z | """
Allows to configure custom shell commands to turn a value for a sensor.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.command_line/
"""
import logging
import subprocess
import shlex
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.helpers import template
from homeassistant.exceptions import TemplateError
from homeassistant.const import (
CONF_NAME, CONF_VALUE_TEMPLATE, CONF_UNIT_OF_MEASUREMENT, CONF_COMMAND,
STATE_UNKNOWN)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Command Sensor'
SCAN_INTERVAL = timedelta(seconds=60)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COMMAND): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Command Sensor."""
name = config.get(CONF_NAME)
command = config.get(CONF_COMMAND)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
data = CommandSensorData(hass, command)
add_devices([CommandSensor(hass, data, name, unit, value_template)], True)
class CommandSensor(Entity):
"""Representation of a sensor that is using shell commands."""
def __init__(self, hass, data, name, unit_of_measurement, value_template):
"""Initialize the sensor."""
self._hass = hass
self.data = data
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
def update(self):
"""Get the latest data and updates the state."""
self.data.update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
self._state = self._value_template.render_with_possible_json_value(
value, STATE_UNKNOWN)
else:
self._state = value
class CommandSensorData(object):
"""The class for handling the data retrieval."""
def __init__(self, hass, command):
"""Initialize the data object."""
self.value = None
self.hass = hass
self.command = command
def update(self):
"""Get the latest data with a shell command."""
command = self.command
cache = {}
if command in cache:
prog, args, args_compiled = cache[command]
elif ' ' not in command:
prog = command
args = None
args_compiled = None
cache[command] = (prog, args, args_compiled)
else:
prog, args = command.split(' ', 1)
args_compiled = template.Template(args, self.hass)
cache[command] = (prog, args, args_compiled)
if args_compiled:
try:
args_to_render = {"arguments": args}
rendered_args = args_compiled.render(args_to_render)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
shell = True
else:
# Template used. Construct the string used in the shell
command = str(' '.join([prog] + shlex.split(rendered_args)))
shell = True
try:
_LOGGER.info("Running command: %s", command)
return_value = subprocess.check_output(
command, shell=shell, timeout=15)
self.value = return_value.strip().decode('utf-8')
except subprocess.CalledProcessError:
_LOGGER.error("Command failed: %s", command)
except subprocess.TimeoutExpired:
_LOGGER.error("Timeout for command: %s", command)
| 32.270833 | 79 | 0.649021 |
4a26a261c392d3cfb759ed5ba5cd2e0593c8deed | 13,264 | py | Python | spinup/algos/sapm/sapm.py | kyungjaelee/soft_action_particle_method | 8a37dabcb079907bcaebffc45935df8372f78469 | [
"MIT"
] | 32 | 2019-08-28T16:48:12.000Z | 2021-10-15T04:33:14.000Z | spinup/algos/sapm/sapm.py | kyungjaelee/soft_action_particle_method | 8a37dabcb079907bcaebffc45935df8372f78469 | [
"MIT"
] | null | null | null | spinup/algos/sapm/sapm.py | kyungjaelee/soft_action_particle_method | 8a37dabcb079907bcaebffc45935df8372f78469 | [
"MIT"
] | 1 | 2019-08-28T16:46:48.000Z | 2019-08-28T16:46:48.000Z | import numpy as np
import gym
import random
import collections
import matplotlib.pyplot as plt
import time
from spinup.algos.sapm import Defs
from spinup.algos.sapm.Q_2layers import Q2_Network
from spinup.algos.sapm.Q_3layers import Q3_Network
from spinup.utils.logx import EpochLogger
"""
SAPM (Soft Action Particle Method)
"""
def sapm(env_fn, ac_kwargs=dict(), seed=0,
steps_per_epoch=5000, epochs=200, replay_size=int(1e6),
#######################################options########################################
# N_layers : 2(2layers Q-Network), 3(3layers Q-Network)
# DPP : 0(Random Sampling), 1(DPP Sampling)
# Ablation : 0(Fixed Particles), 1(Grad + No Resampling), 2(Grad + Resampling), 3(SAPM)
N_layers = 2, DPP = 1, Ablation = 3,
######################################################################################
layer_size1=300, layer_size2=400, lr_Q=0.0001, lr_M=0.01, lr_S=0.01,
batch_size=100, start_steps=10000, act_train_step=100, policy_delay=1,
act_size=32, min_d=0.5, scale=0.2, std=10,
max_ep_len=1000, logger_kwargs=dict(), save_freq=1):
"""
Args:
env_fn : A function which creates a copy of the environment.
The environment must satisfy the OpenAI Gym API.
actor_critic: A function which takes in placeholder symbols
for state, ``x_ph``, and action, ``a_ph``, and returns the main
outputs from the agent's Tensorflow computation graph:
=========== ================ ======================================
Symbol Shape Description
=========== ================ ======================================
``pi`` (batch, act_dim) | Deterministically computes actions
| from policy given states.
``q1`` (batch,) | Gives one estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q2`` (batch,) | Gives another estimate of Q* for
| states in ``x_ph`` and actions in
| ``a_ph``.
``q1_pi`` (batch,) | Gives the composition of ``q1`` and
| ``pi`` for states in ``x_ph``:
| q1(x, pi(x)).
=========== ================ ======================================
ac_kwargs (dict): Any kwargs appropriate for the actor_critic
function you provided to TD3.
seed (int): Seed for random number generators.
steps_per_epoch (int): Number of steps of interaction (state-action pairs)
for the agent and the environment in each epoch.
epochs (int): Number of epochs to run and train agent.
replay_size (int): Maximum length of replay buffer.
gamma (float): Discount factor. (Always between 0 and 1.)
polyak (float): Interpolation factor in polyak averaging for target
networks. Target networks are updated towards main networks
according to:
.. math:: \\theta_{\\text{targ}} \\leftarrow
\\rho \\theta_{\\text{targ}} + (1-\\rho) \\theta
where :math:`\\rho` is polyak. (Always between 0 and 1, usually
close to 1.)
pi_lr (float): Learning rate for policy.
q_lr (float): Learning rate for Q-networks.
batch_size (int): Minibatch size for SGD.
start_steps (int): Number of steps for uniform-random action selection,
before running real policy. Helps exploration.
act_noise (float): Stddev for Gaussian exploration noise added to
policy at training time. (At test time, no noise is added.)
target_noise (float): Stddev for smoothing noise added to target
policy.
noise_clip (float): Limit for absolute value of target policy
smoothing noise.
policy_delay (int): Policy will only be updated once every
policy_delay times for each update of the Q-networks.
max_ep_len (int): Maximum length of trajectory / episode / rollout.
logger_kwargs (dict): Keyword args for EpochLogger.
save_freq (int): How often (in terms of gap between epochs) to save
the current policy and value function.
"""
logger = EpochLogger(**logger_kwargs)
logger.save_config(locals())
np.random.seed(seed)
rng = np.random.RandomState(seed)
random.seed(seed)
env, test_env = env_fn(), env_fn()
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
env.seed(seed)
test_env.seed(seed)
# Action limit for clamping: critically, assumes all dimensions share the same bound!
act_limit = env.action_space.high[0]
# Share information about action space with policy architecture
ac_kwargs['action_space'] = env.action_space
replay_buffer = collections.deque()
if N_layers == 2:
Critic_Network = Q2_Network(seed, obs_dim, act_dim, act_size, act_limit, batch_size, scale, layer_size1, layer_size2, lr_Q, lr_M, lr_S, std, Ablation)
elif N_layers == 3:
Critic_Network = Q3_Network(seed, obs_dim, act_dim, act_size, act_limit, batch_size, scale, layer_size1, layer_size2, lr_Q, lr_M, lr_S, std, Ablation)
M_batch, S_batch = Critic_Network.get_action_batch()
t_M_batch, t_S_batch = Critic_Network.get_target_action_batch()
"""
# Count variables
scopes = ['actions/action_batch', 'q_critic', 'H1']
var_counts = tuple(Critic_Network.count_vars(scope) for scope in scopes)
print(var_counts)
print('\nNumber of parameters: \t pi: %d, \t q: %d, \t total: %d\n'%var_counts)
# Setup model saving
logger.setup_tf_saver(sess, inputs={'x': x_ph, 'a': a_ph}, outputs={'pi': pi, 'q': q})
"""
def get_action(o):
actions, action_soft = Critic_Network.get_softmax(o, scale)
action0 = np.random.choice(len(action_soft), size=1, p=action_soft)[0]
return actions[action0]
def test_get_action(o, actions):
action_test = Critic_Network.get_test_q_batch(np.reshape(o,[1,-1]), actions)
action_test = np.reshape(action_test, [1,-1])[0]
return actions[np.argmax(action_test)]
def test_agent(actions, flag=True, n=10):
result = 0
for j in range(n):
o_t, r_t, d_t, ep_ret_t, ep_len_t = test_env.reset(), 0, False, 0, 0
while not(d_t or (ep_len_t == max_ep_len)):
o_t, r_t, d_t, _ = test_env.step(test_get_action(o_t, actions))
ep_ret_t += r_t
ep_len_t += 1
result += ep_ret_t
if flag:
logger.store(TestEpRet2=ep_ret_t, TestEpLen=ep_len_t)
else:
logger.store(TestEpRet=ep_ret_t, TestEpLen=ep_len_t)
return result/n
start_time = time.time()
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
total_steps = steps_per_epoch * epochs
global_steps, max_ret = 0, -10000
best_actions = M_batch
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
if t > start_steps:
a = get_action(o)
else:
a0 = np.random.randint(act_size)
a = np.random.normal(M_batch[a0], S_batch[a0])
# Step the env
o2, r, d, _ = env.step(a)
ep_ret += r
ep_len += 1
global_steps += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.append((o, o2, a, r, d))
if len(replay_buffer)>replay_size:
replay_buffer.popleft()
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
if (d or (ep_len == max_ep_len)) and global_steps > batch_size:
for tr in range(global_steps-ep_len+1, global_steps+1):
minibatch = random.sample(replay_buffer, batch_size)
cost, q_t_1, state_t_batch = Defs.train(Critic_Network, minibatch, scale, obs_dim, act_dim, act_size, act_limit)
logger.store(LossQ=cost, QVals=q_t_1)
if tr % policy_delay == 0:
Critic_Network.update_target_critic()
if (Ablation!=0) and (tr % act_train_step == 0):
for k in range(1):
actor_loss, _, _ = Critic_Network.train_weighted_actor(state_t_batch)
logger.store(LossPi=np.sum(actor_loss))
M_batch, S_batch = Critic_Network.get_action_batch()
t_M_batch, t_S_batch = Critic_Network.get_target_action_batch()
M_batch, S_batch, t_M_batch, t_S_batch = Defs.clipping_action(M_batch, S_batch, t_M_batch, t_S_batch, act_limit, std)
if Ablation!=1:
if DPP == 0:
M_batch, S_batch, t_M_batch, t_S_batch = Defs.resampling_random(M_batch, S_batch, t_M_batch, t_S_batch, act_limit, std, min_d, np)
elif DPP == 1:
M_batch, S_batch, t_M_batch, t_S_batch = Defs.resampling_dpp(M_batch, S_batch, t_M_batch, t_S_batch, act_limit, std, min_d, np)
else:
print(" It is invalid RESAMPLING option. ")
Critic_Network.realign_action_batch(M_batch, S_batch, t_M_batch, t_S_batch)
Critic_Network.update_action_target_critic()
M_batch, S_batch = Critic_Network.get_action_batch()
t_M_batch, t_S_batch = Critic_Network.get_target_action_batch()
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
Defs.get_action_variance(M_batch, min_d, act_limit)
# Save model
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': env}, None)
# Test the performance of the deterministic version of the agent.
test_result1 = test_agent(M_batch, flag=False)
test_result2 = test_agent(best_actions, flag=True)
if max_ret < test_result1 and test_result2 < test_result1:
best_actions = M_batch
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet2', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('QVals', with_min_and_max=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default='HalfCheetah-v2')
parser.add_argument('--hid', type=int, default=300)
parser.add_argument('--l', type=int, default=1)
parser.add_argument('--gamma', type=float, default=0.99)
parser.add_argument('--seed', '-s', type=int, default=0)
parser.add_argument('--DPP', type=int, default=0)
parser.add_argument('--N_layers', type=int, default=2)
parser.add_argument('--Ablation', type=int, default=3)
parser.add_argument('--act_size', type=int, default=32)
parser.add_argument('--min_d', type=float, default=0.1)
parser.add_argument('--scale', type=float, default=0.1)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--exp_name', type=str, default='sapm')
args = parser.parse_args()
from spinup.utils.run_utils import setup_logger_kwargs
logger_kwargs = setup_logger_kwargs(args.exp_name, args.seed)
sapm(lambda : gym.make(args.env), #actor_critic=core.mlp_actor_critic,
ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),
gamma=args.gamma, seed=args.seed, epochs=args.epochs,
logger_kwargs=logger_kwargs) | 43.205212 | 162 | 0.573809 |
4a26a376bbf103b9f4d149ee37cd9e8bce49523d | 2,541 | py | Python | ckanext/example_idatasetform/tests/test_controllers.py | GlobalMaksimum/ckan | bdba078d26d485e75554ba9570e292ec480eb9e4 | [
"Apache-2.0"
] | 1 | 2018-11-29T22:13:01.000Z | 2018-11-29T22:13:01.000Z | ckanext/example_idatasetform/tests/test_controllers.py | GlobalMaksimum/ckan | bdba078d26d485e75554ba9570e292ec480eb9e4 | [
"Apache-2.0"
] | 135 | 2019-07-03T19:58:12.000Z | 2020-02-14T19:57:33.000Z | ckanext/example_idatasetform/tests/test_controllers.py | GlobalMaksimum/ckan | bdba078d26d485e75554ba9570e292ec480eb9e4 | [
"Apache-2.0"
] | 3 | 2019-06-21T11:57:57.000Z | 2020-01-20T12:36:38.000Z | # encoding: utf-8
from nose.tools import assert_equal
from ckan.lib.helpers import url_for
import ckan.plugins as plugins
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.model as model
submit_and_follow = helpers.submit_and_follow
def _get_package_edit_page(app, package_name):
user = factories.User()
env = {'REMOTE_USER': user['name'].encode('ascii')}
response = app.get(
url=url_for('dataset.edit', id=package_name),
extra_environ=env,
)
return env, response
class TestPackageController(helpers.FunctionalTestBase):
@classmethod
def _apply_config_changes(cls, cfg):
cfg['ckan.plugins'] = 'example_idatasetform'
@classmethod
def setup_class(cls):
super(TestPackageController, cls).setup_class()
@classmethod
def teardown_class(cls):
if plugins.plugin_loaded('example_idatasetform'):
plugins.unload('example_idatasetform')
super(TestPackageController, cls).teardown_class()
def test_edit_converted_extra_field(self):
dataset = factories.Dataset(custom_text='foo')
app = self._get_test_app()
env, response = _get_package_edit_page(app, dataset['name'])
form = response.forms['dataset-edit']
form['custom_text'] = u'bar'
response = submit_and_follow(app, form, env, 'save')
# just check it has finished the edit, rather than being sent on to the
# resource create/edit form.
assert_equal(response.request.path, '/dataset/%s' % dataset['name'])
pkg = model.Package.by_name(dataset['name'])
assert_equal(pkg.extras['custom_text'], u'bar')
def test_edit_custom_extra_field(self):
# i.e. an extra field that is not mentioned in the schema, filled in on
# the form in the 'custom-fields' section
dataset = factories.Dataset(extras=[{'key': 'testkey',
'value': 'foo'}])
app = self._get_test_app()
env, response = _get_package_edit_page(app, dataset['name'])
form = response.forms['dataset-edit']
form['extras__0__value'] = u'bar'
response = submit_and_follow(app, form, env, 'save')
# just check it has finished the edit, rather than being sent on to the
# resource create/edit form.
assert_equal(response.request.path, '/dataset/%s' % dataset['name'])
pkg = model.Package.by_name(dataset['name'])
assert_equal(pkg.extras['testkey'], u'bar')
| 35.788732 | 79 | 0.662731 |
4a26a37b58964ba8c7ee9e15acc83687adc1b2b5 | 243 | py | Python | python/8kyu/i_love_you_a_little_a_lot_passionately_not_at_all.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | 3 | 2021-06-08T01:57:13.000Z | 2021-06-26T10:52:47.000Z | python/8kyu/i_love_you_a_little_a_lot_passionately_not_at_all.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | null | null | null | python/8kyu/i_love_you_a_little_a_lot_passionately_not_at_all.py | Sigmanificient/codewars | b34df4bf55460d312b7ddf121b46a707b549387a | [
"MIT"
] | 2 | 2021-06-10T21:20:13.000Z | 2021-06-30T10:13:26.000Z | """Kata url: https://www.codewars.com/kata/57f24e6a18e9fad8eb000296."""
def how_much_i_love_you(nb_petals: int) -> str:
return (
"I love you", "a little", "a lot", "passionately", "madly", "not at all"
)[(nb_petals - 1) % 6]
| 30.375 | 80 | 0.625514 |
4a26a48749d5927956d07f7c4b2d65a1f627d2dc | 1,514 | py | Python | tests/test_utils.py | atrodahl/slack-cli | ee72fa39a5ae516132a6021aa32095a2e5533624 | [
"MIT"
] | null | null | null | tests/test_utils.py | atrodahl/slack-cli | ee72fa39a5ae516132a6021aa32095a2e5533624 | [
"MIT"
] | null | null | null | tests/test_utils.py | atrodahl/slack-cli | ee72fa39a5ae516132a6021aa32095a2e5533624 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import patch
from slackcli import messaging
class ParseStatusTests(unittest.TestCase):
def test_parse_full_status(self):
self.assertEqual(
{"status_emoji": ":office:", "status_text": "In office"},
messaging.parse_status_update("/status :office: In office"),
)
def test_parse_no_emoji_status(self):
self.assertEqual(
{"status_emoji": ":speech_balloon:", "status_text": "At home"},
messaging.parse_status_update("/status At home"),
)
def test_parse_no_text_status(self):
self.assertEqual(
{"status_emoji": ":office:", "status_text": None},
messaging.parse_status_update("/status :office:"),
)
def test_parse_empty_status(self):
self.assertEqual(None, messaging.parse_status_update("/status"))
def test_parse_clear_status(self):
self.assertEqual(
{"status_emoji": None, "status_text": ""},
messaging.parse_status_update("/status clear"),
)
class FormatMessageTests(unittest.TestCase):
@patch.object(messaging.names, "get_user_id", return_value="U024BE7LH")
def test_format_outgoing_message(self, mock_user_id):
self.assertEqual(
"Hello <@U024BE7LH>, did you see my file?",
messaging.format_outgoing_message(
"Hello @loremipsum, did you see my file?"
),
)
mock_user_id.assert_called_with("loremipsum")
| 32.913043 | 75 | 0.639366 |
4a26a5168c5687fc58032a675a610528831c3c4c | 47,555 | py | Python | chives/wallet/wallet_node.py | zcomputerwiz/chives-blockchain | 73d268bf76f50ff6133c868b58891e75739a2708 | [
"Apache-2.0"
] | 75 | 2021-06-27T03:30:59.000Z | 2022-03-20T12:32:55.000Z | chives/wallet/wallet_node.py | zcomputerwiz/chives-blockchain | 73d268bf76f50ff6133c868b58891e75739a2708 | [
"Apache-2.0"
] | 39 | 2021-07-02T07:11:24.000Z | 2022-03-20T15:00:07.000Z | chives/wallet/wallet_node.py | zcomputerwiz/chives-blockchain | 73d268bf76f50ff6133c868b58891e75739a2708 | [
"Apache-2.0"
] | 41 | 2021-06-24T11:24:43.000Z | 2022-03-14T16:11:38.000Z | import asyncio
import json
import logging
import time
import traceback
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple, Union
from blspy import PrivateKey
from chives.consensus.block_record import BlockRecord
from chives.consensus.blockchain_interface import BlockchainInterface
from chives.consensus.constants import ConsensusConstants
from chives.consensus.multiprocess_validation import PreValidationResult
from chives.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
KeyringIsEmpty,
KeyringIsLocked,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chives.pools.pool_puzzles import SINGLETON_LAUNCHER_HASH
from chives.protocols import wallet_protocol
from chives.protocols.full_node_protocol import RequestProofOfWeight, RespondProofOfWeight
from chives.protocols.protocol_message_types import ProtocolMessageTypes
from chives.protocols.wallet_protocol import (
RejectAdditionsRequest,
RejectRemovalsRequest,
RequestAdditions,
RequestHeaderBlocks,
RespondAdditions,
RespondBlockHeader,
RespondHeaderBlocks,
RespondRemovals,
)
from chives.server.node_discovery import WalletPeers
from chives.server.outbound_message import Message, NodeType, make_msg
from chives.server.peer_store_resolver import PeerStoreResolver
from chives.server.server import ChivesServer
from chives.server.ws_connection import WSChivesConnection
from chives.types.blockchain_format.coin import Coin, hash_coin_list
from chives.types.blockchain_format.sized_bytes import bytes32
from chives.types.coin_spend import CoinSpend
from chives.types.header_block import HeaderBlock
from chives.types.mempool_inclusion_status import MempoolInclusionStatus
from chives.types.peer_info import PeerInfo
from chives.util.byte_types import hexstr_to_bytes
from chives.util.check_fork_next_block import check_fork_next_block
from chives.util.config import WALLET_PEERS_PATH_KEY_DEPRECATED, load_config
from chives.util.errors import Err, ValidationError
from chives.util.ints import uint32, uint128
from chives.util.keychain import Keychain
from chives.util.lru_cache import LRUCache
from chives.util.merkle_set import MerkleSet, confirm_included_already_hashed, confirm_not_included_already_hashed
from chives.util.network import get_host_addr
from chives.util.path import mkdir, path_from_root
from chives.wallet.block_record import HeaderBlockRecord
from chives.wallet.derivation_record import DerivationRecord
from chives.wallet.settings.settings_objects import BackupInitialized
from chives.wallet.transaction_record import TransactionRecord
from chives.wallet.util.backup_utils import open_backup_file
from chives.wallet.util.wallet_types import WalletType
from chives.wallet.wallet_action import WalletAction
from chives.wallet.wallet_blockchain import ReceiveBlockResult
from chives.wallet.wallet_state_manager import WalletStateManager
from chives.util.profiler import profile_task
class WalletNode:
key_config: Dict
config: Dict
constants: ConsensusConstants
keychain_proxy: Optional[KeychainProxy]
local_keychain: Optional[Keychain] # For testing only. KeychainProxy is used in normal cases
server: Optional[ChivesServer]
log: logging.Logger
wallet_peers: WalletPeers
# Maintains the state of the wallet (blockchain and transactions), handles DB connections
wallet_state_manager: Optional[WalletStateManager]
# How far away from LCA we must be to perform a full sync. Before then, do a short sync,
# which is consecutive requests for the previous block
short_sync_threshold: int
_shut_down: bool
root_path: Path
state_changed_callback: Optional[Callable]
syncing: bool
full_node_peer: Optional[PeerInfo]
peer_task: Optional[asyncio.Task]
logged_in: bool
wallet_peers_initialized: bool
def __init__(
self,
config: Dict,
root_path: Path,
consensus_constants: ConsensusConstants,
name: str = None,
local_keychain: Optional[Keychain] = None,
):
self.config = config
self.constants = consensus_constants
self.keychain_proxy = None
self.local_keychain = local_keychain
self.root_path = root_path
self.base_config = load_config(root_path, "config.yaml")
self.log = logging.getLogger(name if name else __name__)
# Normal operation data
self.cached_blocks: Dict = {}
self.future_block_hashes: Dict = {}
# Sync data
self._shut_down = False
self.proof_hashes: List = []
self.header_hashes: List = []
self.header_hashes_error = False
self.short_sync_threshold = 15 # Change the test when changing this
self.potential_blocks_received: Dict = {}
self.potential_header_hashes: Dict = {}
self.state_changed_callback = None
self.wallet_state_manager = None
self.backup_initialized = False # Delay first launch sync after user imports backup info or decides to skip
self.server = None
self.wsm_close_task = None
self.sync_task: Optional[asyncio.Task] = None
self.logged_in_fingerprint: Optional[int] = None
self.peer_task = None
self.logged_in = False
self.wallet_peers_initialized = False
self.last_new_peak_messages = LRUCache(5)
async def ensure_keychain_proxy(self) -> KeychainProxy:
if not self.keychain_proxy:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_key_for_fingerprint(self, fingerprint: Optional[int]) -> Optional[PrivateKey]:
key: PrivateKey = None
try:
keychain_proxy = await self.ensure_keychain_proxy()
key = await keychain_proxy.get_key_for_fingerprint(fingerprint)
except KeyringIsEmpty:
self.log.warning("No keys present. Create keys with the UI, or with the 'chives keys' program.")
return None
except KeyringIsLocked:
self.log.warning("Keyring is locked")
return None
except KeychainProxyConnectionFailure as e:
tb = traceback.format_exc()
self.log.error(f"Missing keychain_proxy: {e} {tb}")
raise e # Re-raise so that the caller can decide whether to continue or abort
return key
async def _start(
self,
fingerprint: Optional[int] = None,
new_wallet: bool = False,
backup_file: Optional[Path] = None,
skip_backup_import: bool = False,
) -> bool:
try:
private_key = await self.get_key_for_fingerprint(fingerprint)
except KeychainProxyConnectionFailure:
self.log.error("Failed to connect to keychain service")
return False
if private_key is None:
self.logged_in = False
return False
if self.config.get("enable_profiler", False):
asyncio.create_task(profile_task(self.root_path, "wallet", self.log))
db_path_key_suffix = str(private_key.get_g1().get_fingerprint())
db_path_replaced: str = (
self.config["database_path"]
.replace("CHALLENGE", self.config["selected_network"])
.replace("KEY", db_path_key_suffix)
)
path = path_from_root(self.root_path, db_path_replaced)
mkdir(path.parent)
self.new_peak_lock = asyncio.Lock()
assert self.server is not None
self.wallet_state_manager = await WalletStateManager.create(
private_key, self.config, path, self.constants, self.server, self.root_path
)
self.wsm_close_task = None
assert self.wallet_state_manager is not None
backup_settings: BackupInitialized = self.wallet_state_manager.user_settings.get_backup_settings()
if backup_settings.user_initialized is False:
if new_wallet is True:
await self.wallet_state_manager.user_settings.user_created_new_wallet()
self.wallet_state_manager.new_wallet = True
elif skip_backup_import is True:
await self.wallet_state_manager.user_settings.user_skipped_backup_import()
elif backup_file is not None:
await self.wallet_state_manager.import_backup_info(backup_file)
else:
self.backup_initialized = False
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
self.logged_in = False
return False
self.backup_initialized = True
# Start peers here after the backup initialization has finished
# We only want to do this once per instantiation
# However, doing it earlier before backup initialization causes
# the wallet to spam the introducer
if self.wallet_peers_initialized is False:
asyncio.create_task(self.wallet_peers.start())
self.wallet_peers_initialized = True
if backup_file is not None:
json_dict = open_backup_file(backup_file, self.wallet_state_manager.private_key)
if "start_height" in json_dict["data"]:
start_height = json_dict["data"]["start_height"]
self.config["starting_height"] = max(0, start_height - self.config["start_height_buffer"])
else:
self.config["starting_height"] = 0
else:
self.config["starting_height"] = 0
if self.state_changed_callback is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
self._shut_down = False
self.peer_task = asyncio.create_task(self._periodically_check_full_node())
self.sync_event = asyncio.Event()
self.sync_task = asyncio.create_task(self.sync_job())
if fingerprint is None:
self.logged_in_fingerprint = private_key.get_g1().get_fingerprint()
else:
self.logged_in_fingerprint = fingerprint
self.logged_in = True
return True
def _close(self):
self.log.info("self._close")
self.logged_in_fingerprint = None
self._shut_down = True
async def _await_closed(self):
self.log.info("self._await_closed")
await self.server.close_all_connections()
asyncio.create_task(self.wallet_peers.ensure_is_closed())
if self.wallet_state_manager is not None:
await self.wallet_state_manager.close_all_stores()
self.wallet_state_manager = None
if self.sync_task is not None:
self.sync_task.cancel()
self.sync_task = None
if self.peer_task is not None:
self.peer_task.cancel()
self.peer_task = None
self.logged_in = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_callback(self.state_changed_callback)
self.wallet_state_manager.set_pending_callback(self._pending_tx_handler)
def _pending_tx_handler(self):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
asyncio.create_task(self._resend_queue())
async def _action_messages(self) -> List[Message]:
if self.wallet_state_manager is None or self.backup_initialized is False:
return []
actions: List[WalletAction] = await self.wallet_state_manager.action_store.get_all_pending_actions()
result: List[Message] = []
for action in actions:
data = json.loads(action.data)
action_data = data["data"]["action_data"]
if action.name == "request_puzzle_solution":
coin_name = bytes32(hexstr_to_bytes(action_data["coin_name"]))
height = uint32(action_data["height"])
msg = make_msg(
ProtocolMessageTypes.request_puzzle_solution,
wallet_protocol.RequestPuzzleSolution(coin_name, height),
)
result.append(msg)
return result
async def _resend_queue(self):
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
for msg, sent_peers in await self._messages_to_resend():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
full_nodes = self.server.get_full_node_connections()
for peer in full_nodes:
if peer.peer_node_id in sent_peers:
continue
await peer.send_message(msg)
for msg in await self._action_messages():
if (
self._shut_down
or self.server is None
or self.wallet_state_manager is None
or self.backup_initialized is None
):
return None
await self.server.send_to_all([msg], NodeType.FULL_NODE)
async def _messages_to_resend(self) -> List[Tuple[Message, Set[bytes32]]]:
if self.wallet_state_manager is None or self.backup_initialized is False or self._shut_down:
return []
messages: List[Tuple[Message, Set[bytes32]]] = []
records: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_not_sent()
for record in records:
if record.spend_bundle is None:
continue
msg = make_msg(
ProtocolMessageTypes.send_transaction,
wallet_protocol.SendTransaction(record.spend_bundle),
)
already_sent = set()
for peer, status, _ in record.sent_to:
if status == MempoolInclusionStatus.SUCCESS.value:
already_sent.add(hexstr_to_bytes(peer))
messages.append((msg, already_sent))
return messages
def set_server(self, server: ChivesServer):
self.server = server
DNS_SERVERS_EMPTY: list = []
network_name: str = self.config["selected_network"]
# TODO: Perhaps use a different set of DNS seeders for wallets, to split the traffic.
self.wallet_peers = WalletPeers(
self.server,
self.config["target_peer_count"],
PeerStoreResolver(
self.root_path,
self.config,
selected_network=network_name,
peers_file_path_key="wallet_peers_file_path",
legacy_peer_db_path_key=WALLET_PEERS_PATH_KEY_DEPRECATED,
default_peers_file_path="wallet/db/wallet_peers.dat",
),
self.config["introducer_peer"],
DNS_SERVERS_EMPTY,
self.config["peer_connect_interval"],
network_name,
None,
self.log,
)
async def on_connect(self, peer: WSChivesConnection):
if self.wallet_state_manager is None or self.backup_initialized is False:
return None
messages_peer_ids = await self._messages_to_resend()
self.wallet_state_manager.state_changed("add_connection")
for msg, peer_ids in messages_peer_ids:
if peer.peer_node_id in peer_ids:
continue
await peer.send_message(msg)
if not self.has_full_node() and self.wallet_peers is not None:
asyncio.create_task(self.wallet_peers.on_connect(peer))
async def _periodically_check_full_node(self) -> None:
tries = 0
while not self._shut_down and tries < 5:
if self.has_full_node():
await self.wallet_peers.ensure_is_closed()
if self.wallet_state_manager is not None:
self.wallet_state_manager.state_changed("add_connection")
break
tries += 1
await asyncio.sleep(self.config["peer_connect_interval"])
def has_full_node(self) -> bool:
if self.server is None:
return False
if "full_node_peer" in self.config:
full_node_peer = PeerInfo(
self.config["full_node_peer"]["host"],
self.config["full_node_peer"]["port"],
)
peers = [c.get_peer_info() for c in self.server.get_full_node_connections()]
# If full_node_peer is already an address, use it, otherwise
# resolve it here.
if full_node_peer.is_valid():
full_node_resolved = full_node_peer
else:
full_node_resolved = PeerInfo(
get_host_addr(full_node_peer.host, self.base_config.get("prefer_ipv6")), full_node_peer.port
)
if full_node_peer in peers or full_node_resolved in peers:
self.log.info(f"Will not attempt to connect to other nodes, already connected to {full_node_peer}")
for connection in self.server.get_full_node_connections():
if (
connection.get_peer_info() != full_node_peer
and connection.get_peer_info() != full_node_resolved
):
self.log.info(f"Closing unnecessary connection to {connection.get_peer_logging()}.")
asyncio.create_task(connection.close())
return True
return False
async def complete_blocks(self, header_blocks: List[HeaderBlock], peer: WSChivesConnection):
if self.wallet_state_manager is None:
return None
header_block_records: List[HeaderBlockRecord] = []
assert self.server
trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"])
async with self.wallet_state_manager.blockchain.lock:
for block in header_blocks:
if block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
block, block.transactions_filter, None
)
# Get Additions
added_coins = await self.get_additions(peer, block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
# If there is a launcher created, or we have a singleton spent, fetches the required solutions
additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends(
peer, block, added_coins, removed_coins
)
hbr = HeaderBlockRecord(block, added_coins, removed_coins)
else:
hbr = HeaderBlockRecord(block, [], [])
header_block_records.append(hbr)
additional_coin_spends = []
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
hbr, trusted=trusted, additional_coin_spends=additional_coin_spends
)
if result == ReceiveBlockResult.NEW_PEAK:
if not self.wallet_state_manager.sync_mode:
self.wallet_state_manager.blockchain.clean_block_records()
self.wallet_state_manager.state_changed("new_block")
self.wallet_state_manager.state_changed("sync_changed")
await self.wallet_state_manager.new_peak()
elif result == ReceiveBlockResult.INVALID_BLOCK:
self.log.info(f"Invalid block from peer: {peer.get_peer_logging()} {error}")
await peer.close()
return
else:
self.log.debug(f"Result: {result}")
async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSChivesConnection):
if self.wallet_state_manager is None:
return
if self.wallet_state_manager.blockchain.contains_block(peak.header_hash):
self.log.debug(f"known peak {peak.header_hash}")
return
if self.wallet_state_manager.sync_mode:
self.last_new_peak_messages.put(peer, peak)
return
async with self.new_peak_lock:
curr_peak = self.wallet_state_manager.blockchain.get_peak()
if curr_peak is not None and curr_peak.weight >= peak.weight:
return
request = wallet_protocol.RequestBlockHeader(peak.height)
response: Optional[RespondBlockHeader] = await peer.request_block_header(request)
if response is None or not isinstance(response, RespondBlockHeader) or response.header_block is None:
self.log.warning(f"bad peak response from peer {response}")
return
header_block = response.header_block
curr_peak_height = 0 if curr_peak is None else curr_peak.height
if (curr_peak_height == 0 and peak.height < self.constants.WEIGHT_PROOF_RECENT_BLOCKS) or (
curr_peak_height > peak.height - 200
):
if peak.height <= curr_peak_height + self.config["short_sync_blocks_behind_threshold"]:
await self.wallet_short_sync_backtrack(header_block, peer)
else:
await self.batch_sync_to_peak(curr_peak_height, peak)
elif peak.height >= self.constants.WEIGHT_PROOF_RECENT_BLOCKS:
# Request weight proof
# Sync if PoW validates
weight_request = RequestProofOfWeight(peak.height, peak.header_hash)
weight_proof_response: RespondProofOfWeight = await peer.request_proof_of_weight(
weight_request, timeout=360
)
if weight_proof_response is None:
return
weight_proof = weight_proof_response.wp
if self.wallet_state_manager is None:
return
if self.server is not None and self.server.is_trusted_peer(peer, self.config["trusted_peers"]):
valid, fork_point = self.wallet_state_manager.weight_proof_handler.get_fork_point_no_validations(
weight_proof
)
else:
valid, fork_point, _ = await self.wallet_state_manager.weight_proof_handler.validate_weight_proof(
weight_proof
)
if not valid:
self.log.error(
f"invalid weight proof, num of epochs {len(weight_proof.sub_epochs)}"
f" recent blocks num ,{len(weight_proof.recent_chain_data)}"
)
self.log.debug(f"{weight_proof}")
return
self.log.info(f"Validated, fork point is {fork_point}")
self.wallet_state_manager.sync_store.add_potential_fork_point(
header_block.header_hash, uint32(fork_point)
)
self.wallet_state_manager.sync_store.add_potential_peak(header_block)
self.start_sync()
async def wallet_short_sync_backtrack(self, header_block, peer):
top = header_block
blocks = [top]
# Fetch blocks backwards until we hit the one that we have,
# then complete them with additions / removals going forward
while not self.wallet_state_manager.blockchain.contains_block(top.prev_header_hash) and top.height > 0:
request_prev = wallet_protocol.RequestBlockHeader(top.height - 1)
response_prev: Optional[RespondBlockHeader] = await peer.request_block_header(request_prev)
if response_prev is None or not isinstance(response_prev, RespondBlockHeader):
raise RuntimeError("bad block header response from peer while syncing")
prev_head = response_prev.header_block
blocks.append(prev_head)
top = prev_head
blocks.reverse()
await self.complete_blocks(blocks, peer)
await self.wallet_state_manager.create_more_puzzle_hashes()
async def batch_sync_to_peak(self, fork_height, peak):
advanced_peak = False
batch_size = self.constants.MAX_BLOCK_COUNT_PER_REQUESTS
for i in range(max(0, fork_height - 1), peak.height, batch_size):
start_height = i
end_height = min(peak.height, start_height + batch_size)
peers = self.server.get_full_node_connections()
added = False
for peer in peers:
try:
added, advanced_peak = await self.fetch_blocks_and_validate(
peer, uint32(start_height), uint32(end_height), None if advanced_peak else fork_height
)
if added:
break
except Exception as e:
await peer.close()
exc = traceback.format_exc()
self.log.error(f"Error while trying to fetch from peer:{e} {exc}")
if not added:
raise RuntimeError(f"Was not able to add blocks {start_height}-{end_height}")
curr_peak = self.wallet_state_manager.blockchain.get_peak()
assert peak is not None
self.wallet_state_manager.blockchain.clean_block_record(
min(end_height, curr_peak.height) - self.constants.BLOCKS_CACHE_SIZE
)
def start_sync(self) -> None:
self.log.info("self.sync_event.set()")
self.sync_event.set()
async def check_new_peak(self) -> None:
if self.wallet_state_manager is None:
return None
current_peak: Optional[BlockRecord] = self.wallet_state_manager.blockchain.get_peak()
if current_peak is None:
return None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
for _, block in potential_peaks:
if current_peak.weight < block.weight:
await asyncio.sleep(5)
self.start_sync()
return None
async def sync_job(self) -> None:
while True:
self.log.info("Loop start in sync job")
if self._shut_down is True:
break
asyncio.create_task(self.check_new_peak())
await self.sync_event.wait()
self.last_new_peak_messages = LRUCache(5)
self.sync_event.clear()
if self._shut_down is True:
break
try:
assert self.wallet_state_manager is not None
self.wallet_state_manager.set_sync_mode(True)
await self._sync()
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Loop exception in sync {e}. {tb}")
finally:
if self.wallet_state_manager is not None:
self.wallet_state_manager.set_sync_mode(False)
for peer, peak in self.last_new_peak_messages.cache.items():
asyncio.create_task(self.new_peak_wallet(peak, peer))
self.log.info("Loop end in sync job")
async def _sync(self) -> None:
"""
Wallet has fallen far behind (or is starting up for the first time), and must be synced
up to the LCA of the blockchain.
"""
if self.wallet_state_manager is None or self.backup_initialized is False or self.server is None:
return None
highest_weight: uint128 = uint128(0)
peak_height: uint32 = uint32(0)
peak: Optional[HeaderBlock] = None
potential_peaks: List[
Tuple[bytes32, HeaderBlock]
] = self.wallet_state_manager.sync_store.get_potential_peaks_tuples()
self.log.info(f"Have collected {len(potential_peaks)} potential peaks")
for header_hash, potential_peak_block in potential_peaks:
if potential_peak_block.weight > highest_weight:
highest_weight = potential_peak_block.weight
peak_height = potential_peak_block.height
peak = potential_peak_block
if peak_height is None or peak_height == 0:
return None
if self.wallet_state_manager.peak is not None and highest_weight <= self.wallet_state_manager.peak.weight:
self.log.info("Not performing sync, already caught up.")
return None
peers: List[WSChivesConnection] = self.server.get_full_node_connections()
if len(peers) == 0:
self.log.info("No peers to sync to")
return None
async with self.wallet_state_manager.blockchain.lock:
fork_height = None
if peak is not None:
fork_height = self.wallet_state_manager.sync_store.get_potential_fork_point(peak.header_hash)
assert fork_height is not None
# This is the fork point in SES in the case where no fork was detected
peers = self.server.get_full_node_connections()
fork_height = await check_fork_next_block(
self.wallet_state_manager.blockchain, fork_height, peers, wallet_next_block_check
)
if fork_height is None:
fork_height = uint32(0)
await self.wallet_state_manager.blockchain.warmup(fork_height)
await self.batch_sync_to_peak(fork_height, peak)
async def fetch_blocks_and_validate(
self,
peer: WSChivesConnection,
height_start: uint32,
height_end: uint32,
fork_point_with_peak: Optional[uint32],
) -> Tuple[bool, bool]:
"""
Returns whether the blocks validated, and whether the peak was advanced
"""
if self.wallet_state_manager is None:
return False, False
self.log.info(f"Requesting blocks {height_start}-{height_end}")
request = RequestHeaderBlocks(uint32(height_start), uint32(height_end))
res: Optional[RespondHeaderBlocks] = await peer.request_header_blocks(request)
if res is None or not isinstance(res, RespondHeaderBlocks):
raise ValueError("Peer returned no response")
header_blocks: List[HeaderBlock] = res.header_blocks
advanced_peak = False
if header_blocks is None:
raise ValueError(f"No response from peer {peer}")
assert self.server
trusted = self.server.is_trusted_peer(peer, self.config["trusted_peers"])
pre_validation_results: Optional[List[PreValidationResult]] = None
if not trusted:
pre_validation_results = await self.wallet_state_manager.blockchain.pre_validate_blocks_multiprocessing(
header_blocks
)
if pre_validation_results is None:
return False, advanced_peak
assert len(header_blocks) == len(pre_validation_results)
for i in range(len(header_blocks)):
header_block = header_blocks[i]
if not trusted and pre_validation_results is not None and pre_validation_results[i].error is not None:
raise ValidationError(Err(pre_validation_results[i].error))
fork_point_with_old_peak = None if advanced_peak else fork_point_with_peak
if header_block.is_transaction_block:
# Find additions and removals
(additions, removals,) = await self.wallet_state_manager.get_filter_additions_removals(
header_block, header_block.transactions_filter, fork_point_with_old_peak
)
# Get Additions
added_coins = await self.get_additions(peer, header_block, additions)
if added_coins is None:
raise ValueError("Failed to fetch additions")
# Get removals
removed_coins = await self.get_removals(peer, header_block, added_coins, removals)
if removed_coins is None:
raise ValueError("Failed to fetch removals")
# If there is a launcher created, or we have a singleton spent, fetches the required solutions
additional_coin_spends: List[CoinSpend] = await self.get_additional_coin_spends(
peer, header_block, added_coins, removed_coins
)
header_block_record = HeaderBlockRecord(header_block, added_coins, removed_coins)
else:
header_block_record = HeaderBlockRecord(header_block, [], [])
additional_coin_spends = []
start_t = time.time()
if trusted:
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record,
None,
trusted,
fork_point_with_old_peak,
additional_coin_spends=additional_coin_spends,
)
else:
assert pre_validation_results is not None
(result, error, fork_h,) = await self.wallet_state_manager.blockchain.receive_block(
header_block_record,
pre_validation_results[i],
trusted,
fork_point_with_old_peak,
additional_coin_spends=additional_coin_spends,
)
self.log.debug(
f"Time taken to validate {header_block.height} with fork "
f"{fork_point_with_old_peak}: {time.time() - start_t}"
)
if result == ReceiveBlockResult.NEW_PEAK:
advanced_peak = True
self.wallet_state_manager.state_changed("new_block")
elif result == ReceiveBlockResult.INVALID_BLOCK:
raise ValueError("Value error peer sent us invalid block")
if advanced_peak:
await self.wallet_state_manager.create_more_puzzle_hashes()
return True, advanced_peak
def validate_additions(
self,
coins: List[Tuple[bytes32, List[Coin]]],
proofs: Optional[List[Tuple[bytes32, bytes, Optional[bytes]]]],
root,
):
if proofs is None:
# Verify root
additions_merkle_set = MerkleSet()
# Addition Merkle set contains puzzlehash and hash of all coins with that puzzlehash
for puzzle_hash, coins_l in coins:
additions_merkle_set.add_already_hashed(puzzle_hash)
additions_merkle_set.add_already_hashed(hash_coin_list(coins_l))
additions_root = additions_merkle_set.get_root()
if root != additions_root:
return False
else:
for i in range(len(coins)):
assert coins[i][0] == proofs[i][0]
coin_list_1: List[Coin] = coins[i][1]
puzzle_hash_proof: bytes32 = proofs[i][1]
coin_list_proof: Optional[bytes32] = proofs[i][2]
if len(coin_list_1) == 0:
# Verify exclusion proof for puzzle hash
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if not_included is False:
return False
else:
try:
# Verify inclusion proof for coin list
included = confirm_included_already_hashed(
root,
hash_coin_list(coin_list_1),
coin_list_proof,
)
if included is False:
return False
except AssertionError:
return False
try:
# Verify inclusion proof for puzzle hash
included = confirm_included_already_hashed(
root,
coins[i][0],
puzzle_hash_proof,
)
if included is False:
return False
except AssertionError:
return False
return True
def validate_removals(self, coins, proofs, root):
if proofs is None:
# If there are no proofs, it means all removals were returned in the response.
# we must find the ones relevant to our wallets.
# Verify removals root
removals_merkle_set = MerkleSet()
for name_coin in coins:
# TODO review all verification
name, coin = name_coin
if coin is not None:
removals_merkle_set.add_already_hashed(coin.name())
removals_root = removals_merkle_set.get_root()
if root != removals_root:
return False
else:
# This means the full node has responded only with the relevant removals
# for our wallet. Each merkle proof must be verified.
if len(coins) != len(proofs):
return False
for i in range(len(coins)):
# Coins are in the same order as proofs
if coins[i][0] != proofs[i][0]:
return False
coin = coins[i][1]
if coin is None:
# Verifies merkle proof of exclusion
not_included = confirm_not_included_already_hashed(
root,
coins[i][0],
proofs[i][1],
)
if not_included is False:
return False
else:
# Verifies merkle proof of inclusion of coin name
if coins[i][0] != coin.name():
return False
included = confirm_included_already_hashed(
root,
coin.name(),
proofs[i][1],
)
if included is False:
return False
return True
async def fetch_puzzle_solution(self, peer, height: uint32, coin: Coin) -> CoinSpend:
solution_response = await peer.request_puzzle_solution(
wallet_protocol.RequestPuzzleSolution(coin.name(), height)
)
if solution_response is None or not isinstance(solution_response, wallet_protocol.RespondPuzzleSolution):
raise ValueError(f"Was not able to obtain solution {solution_response}")
return CoinSpend(coin, solution_response.response.puzzle, solution_response.response.solution)
async def get_additional_coin_spends(
self, peer, block, added_coins: List[Coin], removed_coins: List[Coin]
) -> List[CoinSpend]:
assert self.wallet_state_manager is not None
additional_coin_spends: List[CoinSpend] = []
if len(removed_coins) > 0:
removed_coin_ids = set([coin.name() for coin in removed_coins])
all_added_coins = await self.get_additions(peer, block, [], get_all_additions=True)
assert all_added_coins is not None
if all_added_coins is not None:
all_added_coin_parents = [c.parent_coin_info for c in all_added_coins]
for coin in all_added_coins:
# This searches specifically for a launcher being created, and adds the solution of the launcher
if (
coin.puzzle_hash == SINGLETON_LAUNCHER_HASH # Check that it's a launcher
and coin.name() in all_added_coin_parents # Check that it's ephemermal
and coin.parent_coin_info in removed_coin_ids # Check that an interesting coin created it
):
cs: CoinSpend = await self.fetch_puzzle_solution(peer, block.height, coin)
additional_coin_spends.append(cs)
# Apply this coin solution, which might add things to interested list
await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False)
all_removed_coins: Optional[List[Coin]] = await self.get_removals(
peer, block, added_coins, removed_coins, request_all_removals=True
)
assert all_removed_coins is not None
all_removed_coins_dict: Dict[bytes32, Coin] = {coin.name(): coin for coin in all_removed_coins}
keep_searching = True
while keep_searching:
# This keeps fetching solutions for coins we are interested list, in this block, until
# there are no more interested things to fetch
keep_searching = False
interested_ids: List[
bytes32
] = await self.wallet_state_manager.interested_store.get_interested_coin_ids()
for coin_id in interested_ids:
if coin_id in all_removed_coins_dict:
coin = all_removed_coins_dict[coin_id]
cs = await self.fetch_puzzle_solution(peer, block.height, coin)
# Apply this coin solution, which might add things to interested list
await self.wallet_state_manager.get_next_interesting_coin_ids(cs, False)
additional_coin_spends.append(cs)
keep_searching = True
all_removed_coins_dict.pop(coin_id)
break
return additional_coin_spends
async def get_additions(
self, peer: WSChivesConnection, block_i, additions: Optional[List[bytes32]], get_all_additions: bool = False
) -> Optional[List[Coin]]:
if (additions is not None and len(additions) > 0) or get_all_additions:
if get_all_additions:
additions = None
additions_request = RequestAdditions(block_i.height, block_i.header_hash, additions)
additions_res: Optional[Union[RespondAdditions, RejectAdditionsRequest]] = await peer.request_additions(
additions_request
)
if additions_res is None:
await peer.close()
return None
elif isinstance(additions_res, RespondAdditions):
validated = self.validate_additions(
additions_res.coins,
additions_res.proofs,
block_i.foliage_transaction_block.additions_root,
)
if not validated:
await peer.close()
return None
added_coins = []
for ph_coins in additions_res.coins:
ph, coins = ph_coins
added_coins.extend(coins)
return added_coins
elif isinstance(additions_res, RejectRemovalsRequest):
await peer.close()
return None
return None
else:
return [] # No added coins
async def get_removals(
self, peer: WSChivesConnection, block_i, additions, removals, request_all_removals=False
) -> Optional[List[Coin]]:
assert self.wallet_state_manager is not None
# Check if we need all removals
for coin in additions:
puzzle_store = self.wallet_state_manager.puzzle_store
record_info: Optional[DerivationRecord] = await puzzle_store.get_derivation_record_for_puzzle_hash(
coin.puzzle_hash
)
if record_info is not None and record_info.wallet_type == WalletType.COLOURED_COIN:
# TODO why ?
request_all_removals = True
break
if record_info is not None and record_info.wallet_type == WalletType.DISTRIBUTED_ID:
request_all_removals = True
break
if len(removals) > 0 or request_all_removals:
if request_all_removals:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, None)
else:
removals_request = wallet_protocol.RequestRemovals(block_i.height, block_i.header_hash, removals)
removals_res: Optional[Union[RespondRemovals, RejectRemovalsRequest]] = await peer.request_removals(
removals_request
)
if removals_res is None:
return None
elif isinstance(removals_res, RespondRemovals):
validated = self.validate_removals(
removals_res.coins,
removals_res.proofs,
block_i.foliage_transaction_block.removals_root,
)
if validated is False:
await peer.close()
return None
removed_coins = []
for _, coins_l in removals_res.coins:
if coins_l is not None:
removed_coins.append(coins_l)
return removed_coins
elif isinstance(removals_res, RejectRemovalsRequest):
return None
else:
return None
else:
return []
async def wallet_next_block_check(
peer: WSChivesConnection, potential_peek: uint32, blockchain: BlockchainInterface
) -> bool:
block_response = await peer.request_header_blocks(
wallet_protocol.RequestHeaderBlocks(potential_peek, potential_peek)
)
if block_response is not None and isinstance(block_response, wallet_protocol.RespondHeaderBlocks):
our_peak = blockchain.get_peak()
if our_peak is not None and block_response.header_blocks[0].prev_header_hash == our_peak.header_hash:
return True
return False
| 45.550766 | 118 | 0.617075 |
4a26a56be93f97b4a95b6eaa6dcde724f588621e | 2,710 | py | Python | setup.py | loot-king/cmake-python-distributions | edc3205a6aa354db1bde47dfe20478353d11c045 | [
"Apache-2.0"
] | null | null | null | setup.py | loot-king/cmake-python-distributions | edc3205a6aa354db1bde47dfe20478353d11c045 | [
"Apache-2.0"
] | null | null | null | setup.py | loot-king/cmake-python-distributions | edc3205a6aa354db1bde47dfe20478353d11c045 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
from distutils.text_file import TextFile
from skbuild import setup
# Add current folder to path
# This is required to import versioneer in an isolated pip build
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import versioneer # noqa: E402
with open('README.rst', 'r') as fp:
readme = fp.read()
with open('HISTORY.rst', 'r') as fp:
history = fp.read().replace('.. :changelog:', '')
def parse_requirements(filename):
with open(filename, 'r') as file:
return TextFile(filename, file).readlines()
requirements = []
test_requirements = parse_requirements('requirements-test.txt')
# Require pytest-runner only when running tests
pytest_runner = (['pytest-runner>=2.0,<3dev']
if any(arg in sys.argv for arg in ('pytest', 'test'))
else [])
setup_requires = pytest_runner
setup(
name='cmake',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author='Jean-Christophe Fillion-Robin',
author_email='[email protected]',
package_dir={'': 'src'},
packages=['cmake'],
cmake_install_dir='src/cmake/data',
entry_points={
'console_scripts': [
'cmake=cmake:cmake', 'cpack=cmake:cpack', 'ctest=cmake:ctest'
]
},
url='https://cmake.org/',
download_url='https://cmake.org/download',
project_urls={
"Documentation": "https://cmake-python-distributions.readthedocs.io/",
"Source Code": "https://github.com/scikit-build/cmake-python-distributions",
"Mailing list": "https://groups.google.com/forum/#!forum/scikit-build",
"Bug Tracker": "https://github.com/scikit-build/cmake-python-distributions/issues",
},
description='CMake is an open-source, cross-platform family of '
'tools designed to build, test and package software',
long_description=readme + '\n\n' + history,
long_description_content_type='text/x-rst',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'License :: OSI Approved :: BSD License',
'Programming Language :: C',
'Programming Language :: C++',
'Programming Language :: Fortran',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools'
],
license='Apache 2.0',
keywords='CMake build c++ fortran cross-platform cross-compilation',
install_requires=requirements,
tests_require=test_requirements,
setup_requires=setup_requires
)
| 28.526316 | 91 | 0.656089 |
4a26a5c38ceb94810fd34500c5c6cc236cae7976 | 8,327 | py | Python | meercat/utils.py | rloganiv/meercat-aux | 4d9006095e9fb91034f8dae0baaa81a1567f6606 | [
"Apache-2.0"
] | 1 | 2021-11-24T03:32:00.000Z | 2021-11-24T03:32:00.000Z | meercat/utils.py | rloganiv/meercat-aux | 4d9006095e9fb91034f8dae0baaa81a1567f6606 | [
"Apache-2.0"
] | 1 | 2020-12-09T00:15:33.000Z | 2021-05-27T00:52:03.000Z | meercat/utils.py | rloganiv/streaming-cdc | 4d9006095e9fb91034f8dae0baaa81a1567f6606 | [
"Apache-2.0"
] | null | null | null | """Utilities."""
import csv
import collections
import itertools
import json
import logging
import os
import pickle
import random
import torch
from tqdm import tqdm
logger = logging.getLogger(__name__)
ENTITY_VOCAB_FILENAME = 'entities.txt'
ENTITY_MENTION_START = '[E_START]'
ENTITY_MENTION_END = '[E_END]'
def add_mention_seps(
tokenizer,
):
"""Adds entity mention seperator tokens if needed."""
# start_id = tokenizer.convert_tokens_to_ids(ENTITY_MENTION_START)
# if start_id == tokenizer.unk_token_id:
# tokenizer.add_tokens(ENTITY_MENTION_START)
# end_id = tokenizer.convert_tokens_to_ids(ENTITY_MENTION_END)
# if end_id == tokenizer.unk_token_id:
# tokenizer.add_tokens(ENTITY_MENTION_END)
tokenizer.add_special_tokens({
'additional_special_tokens': [ENTITY_MENTION_START, ENTITY_MENTION_END],
})
class EntityTokenizer:
def __init__(self, entities, counts):
self.idx_to_entity = entities
self.entity_to_idx = {x: i for i, x in enumerate(entities)}
self.counts = counts
def __len__(self):
return len(self.idx_to_entity)
def __call__(self, entity_id):
if entity_id not in self.entity_to_idx:
logger.warning('Adding entity to vocabulary: %s.', entity_id)
self.entity_to_idx[entity_id] = len(self.entity_to_idx)
self.idx_to_entity.append(entity_id)
return self.entity_to_idx[entity_id]
def save_pretrained(
self,
save_directory,
):
assert os.path.isdir(save_directory), 'save_directory is not a directory.'
output_path = os.path.join(save_directory, ENTITY_VOCAB_FILENAME)
with open(output_path, 'w') as g:
writer = csv.writer(g)
for entity, count in zip(self.idx_to_entity, self.counts):
writer.writerow((entity, count))
@classmethod
def from_pretrained(cls, path):
if os.path.isdir(path):
path = os.path.join(path, ENTITY_VOCAB_FILENAME)
entities = []
counts = []
with open(path, 'r') as f:
reader = csv.reader(f)
for entity, count in reader:
entities.append(entity)
counts.append(int(count))
return cls(entities, counts)
def _encode_mention(data, tokenizer):
mention_tokens = tokenizer.tokenize(data['mention'])
mention_tokens = [ENTITY_MENTION_START, *mention_tokens, ENTITY_MENTION_END]
left_tokens = tokenizer.tokenize(data['left_context'])
right_tokens = tokenizer.tokenize(data['right_context'])
# Get a roughly centered window around the mention.
context_size = tokenizer.model_max_length - len(mention_tokens) - 2
left_size = right_size = context_size // 2
if len(left_tokens) < left_size:
right_size += left_size - len(left_tokens)
left_size = len(left_tokens)
if len(right_tokens) < right_size:
left_size += right_size - len(right_tokens)
right_size = len(right_tokens)
left_tokens = left_tokens[-left_size:]
right_tokens = right_tokens[:right_size]
tokens = left_tokens + mention_tokens + right_tokens
mention_encoding = tokenizer.encode_plus(
text=tokens,
add_special_tokens=True,
padding='max_length',
truncation=True,
return_tensors='pt',
)
mention_encoding = {k: v.squeeze(0) for k, v in mention_encoding.items()}
return mention_encoding
class ELDataset(torch.utils.data.Dataset):
def __init__(self, mention_encodings, labels=None):
if labels is not None:
assert len(mention_encodings) == len(labels)
self._mention_encodings = mention_encodings
self._labels = labels
def __len__(self):
return len(self._mention_encodings)
def __getitem__(self, idx):
out = self._mention_encodings[idx]
if self._labels is not None:
out['labels'] = self._labels[idx]
return out
@classmethod
def from_jsonl(
cls,
fname,
tokenizer,
entity_tokenizer,
):
mention_encodings = []
labels = []
with open(fname, 'r') as f:
for line in tqdm(f):
data = json.loads(line)
mention_encodings.append(_encode_mention(data, tokenizer))
labels.append(entity_tokenizer(data['entity_id']))
return cls(mention_encodings, labels)
@classmethod
def load(cls, fname):
with open(fname, 'rb') as f:
state_dict = pickle.load(f)
return cls(**state_dict)
def save(self, fname):
state_dict = {
'mention_encodings': self._mention_encodings,
'labels': self._labels,
}
with open(fname, 'wb') as f:
pickle.dump(state_dict, f)
def streaming_shuffle(iterable, chunk_size=32768):
# TODO: Test that this works with multiprocessing
chunks = [iter(iterable)] * chunk_size
for chunk in itertools.zip_longest(*chunks, fillvalue=None):
chunk = [x for x in chunk if x is not None]
random.shuffle(chunk)
for element in chunk:
yield element
class ELIterableDataset(torch.utils.data.IterableDataset):
def __init__(
self,
fname,
tokenizer,
entity_tokenizer,
rank,
world_size,
shuffle=False,
):
super().__init__()
self._fname = fname
self._tokenizer = tokenizer
self._entity_tokenizer = entity_tokenizer
self._rank = rank
self._world_size = world_size
self._shuffle = shuffle
def __iter__(self):
def generator():
worker_info = torch.utils.data.get_worker_info()
with open(self._fname, 'r') as f:
iter_ = streaming_shuffle(f) if self._shuffle else f
for i, line in enumerate(iter_):
# Ensures data isn't repeated across processes
if self._world_size is not None:
if (i % self._world_size) != self._rank:
continue
data = json.loads(line)
model_inputs = _encode_mention(data, self._tokenizer)
model_inputs['labels'] = self._entity_tokenizer(data['entity_id'])
yield model_inputs
return generator()
class APIterableDataset(torch.utils.data.IterableDataset):
def __init__(
self,
entity_ids,
buckets,
tokenizer,
rank,
world_size,
):
super().__init__()
self._entity_ids = entity_ids
self._buckets = buckets
self._tokenizer = tokenizer
self._rank = rank
self._world_size = world_size
@classmethod
def load(cls, fname, tokenizer, rank, world_size):
entity_ids = collections.Counter()
buckets = collections.defaultdict(list)
with open(fname, 'r') as f:
for line in f:
data = json.loads(line)
entity_ids[data['entity_id']] += 1
buckets[data['entity_id']].append(data)
return cls(entity_ids, buckets, tokenizer, rank, world_size)
def __iter__(self):
# Entities w/ more than 1 mention
multi_mentions = [x for x, y in self._entity_ids.items() if y > 1]
choices = list(self._entity_ids.keys())
for i, pos_entity_id in enumerate(multi_mentions):
# Ensures data isn't repeated across processes
if self._world_size is not None:
if (i % self._world_size) != self._rank:
continue
# Select two mentions from positive entity bucket
pos_1, pos_2 = random.sample(self._buckets[pos_entity_id], 2)
# Select a negative entity id at random
neg_entity_id = random.choice(choices)
while neg_entity_id == pos_entity_id:
neg_entity_id = random.choice(choices)
neg = random.choice(self._buckets[neg_entity_id])
pos_1 = _encode_mention(pos_1, self._tokenizer)
pos_2 = _encode_mention(pos_2, self._tokenizer)
neg = _encode_mention(neg, self._tokenizer)
yield pos_1, pos_2, neg
| 33.175299 | 86 | 0.620151 |
4a26a62af11cef4606bd08f0aa0c00ea83d8c152 | 3,173 | py | Python | ucsmsdk/mometa/firmware/FirmwareSystemFsmTask.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 78 | 2015-11-30T14:10:05.000Z | 2022-02-13T00:29:08.000Z | ucsmsdk/mometa/firmware/FirmwareSystemFsmTask.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 113 | 2015-11-20T09:42:46.000Z | 2022-03-16T16:53:29.000Z | ucsmsdk/mometa/firmware/FirmwareSystemFsmTask.py | Kego/ucsmsdk | 244f283a5c295cf746110bb96686d079b19927ce | [
"Apache-2.0"
] | 86 | 2015-12-12T08:22:18.000Z | 2022-01-23T03:56:34.000Z | """This module contains the general information for FirmwareSystemFsmTask ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class FirmwareSystemFsmTaskConsts:
COMPLETION_CANCELLED = "cancelled"
COMPLETION_COMPLETED = "completed"
COMPLETION_PROCESSING = "processing"
COMPLETION_SCHEDULED = "scheduled"
ITEM_APPLY_CATALOG_PACK = "ApplyCatalogPack"
ITEM_DEPLOY = "Deploy"
ITEM_NOP = "nop"
class FirmwareSystemFsmTask(ManagedObject):
"""This is FirmwareSystemFsmTask class."""
consts = FirmwareSystemFsmTaskConsts()
naming_props = set(['item'])
mo_meta = MoMeta("FirmwareSystemFsmTask", "firmwareSystemFsmTask", "task-[item]", VersionMeta.Version211a, "OutputOnly", 0xf, [], [""], ['firmwareSystem'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"completion": MoPropertyMeta("completion", "completion", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["cancelled", "completed", "processing", "scheduled"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"flags": MoPropertyMeta("flags", "flags", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, r"""(defaultValue){0,1}""", [], []),
"item": MoPropertyMeta("item", "item", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, None, None, None, None, ["ApplyCatalogPack", "Deploy", "nop"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302c, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"seq_id": MoPropertyMeta("seq_id", "seqId", "uint", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"completion": "completion",
"dn": "dn",
"flags": "flags",
"item": "item",
"rn": "rn",
"sacl": "sacl",
"seqId": "seq_id",
"status": "status",
}
def __init__(self, parent_mo_or_dn, item, **kwargs):
self._dirty_mask = 0
self.item = item
self.child_action = None
self.completion = None
self.flags = None
self.sacl = None
self.seq_id = None
self.status = None
ManagedObject.__init__(self, "FirmwareSystemFsmTask", parent_mo_or_dn, **kwargs)
| 52.016393 | 248 | 0.656161 |
4a26a6b527c288d82161e6050f0933670ecb88f7 | 2,627 | py | Python | docs/conf.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | 1 | 2021-08-17T00:50:48.000Z | 2021-08-17T00:50:48.000Z | docs/conf.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | docs/conf.py | darrowco/readthedocs.org | fa7fc5a24306f1f6a27c7393f381c594ab29b357 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import os
import sys
from configparser import RawConfigParser
import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('..'))
sys.path.append(os.path.dirname(__file__))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "readthedocs.settings.dev")
from django.conf import settings
from django.utils import timezone
import django
django.setup()
def get_version():
"""Return package version from setup.cfg."""
config = RawConfigParser()
config.read(os.path.join('..', 'setup.cfg'))
return config.get('metadata', 'version')
sys.path.append(os.path.abspath('_ext'))
extensions = [
'sphinx.ext.autosectionlabel',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinxcontrib.httpdomain',
'djangodocs',
'doc_extensions',
'sphinx_tabs.tabs',
'sphinx-prompt',
'recommonmark',
'notfound.extension',
'sphinx_search.extension',
]
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'Read the Docs'
copyright = '2010-{}, Read the Docs, Inc & contributors'.format(
timezone.now().year
)
version = get_version()
release = version
exclude_patterns = ['_build']
default_role = 'obj'
intersphinx_mapping = {
'python': ('https://python.readthedocs.io/en/latest/', None),
'django': ('https://django.readthedocs.io/en/1.11.x/', None),
'sphinx': ('https://sphinx.readthedocs.io/en/latest/', None),
}
htmlhelp_basename = 'ReadTheDocsdoc'
latex_documents = [
('index', 'ReadTheDocs.tex', u'Read the Docs Documentation',
u'Eric Holscher, Charlie Leifer, Bobby Grace', 'manual'),
]
man_pages = [
('index', 'read-the-docs', u'Read the Docs Documentation',
[u'Eric Holscher, Charlie Leifer, Bobby Grace'], 1)
]
exclude_patterns = [
# 'api' # needed for ``make gettext`` to not die.
]
language = 'en'
locale_dirs = [
'locale/',
]
gettext_compact = False
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_logo = 'img/logo.svg'
html_theme_options = {
'logo_only': True,
'display_version': False,
}
# Activate autosectionlabel plugin
autosectionlabel_prefix_document = True
# sphinx-notfound-page
# https://github.com/readthedocs/sphinx-notfound-page
notfound_context = {
'title': 'Page Not Found',
'body': '''
<h1>Page Not Found</h1>
<p>Sorry, we couldn't find that page.</p>
<p>Try using the search box or go to the homepage.</p>
''',
}
def setup(app):
app.add_stylesheet('css/sphinx_prompt_css.css')
| 23.881818 | 75 | 0.69547 |
4a26a6b58dca85b3ad6b9015ab0948c9f4ef318d | 1,804 | py | Python | aliyun-python-sdk-resourcemanager/aliyunsdkresourcemanager/request/v20200331/GetPolicyRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-resourcemanager/aliyunsdkresourcemanager/request/v20200331/GetPolicyRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-resourcemanager/aliyunsdkresourcemanager/request/v20200331/GetPolicyRequest.py | jia-jerry/aliyun-openapi-python-sdk | e90f3683a250cfec5b681b5f1d73a68f0dc9970d | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkresourcemanager.endpoint import endpoint_data
class GetPolicyRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ResourceManager', '2020-03-31', 'GetPolicy','resourcemanager')
self.set_protocol_type('https')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PolicyType(self):
return self.get_query_params().get('PolicyType')
def set_PolicyType(self,PolicyType):
self.add_query_param('PolicyType',PolicyType)
def get_Language(self):
return self.get_query_params().get('Language')
def set_Language(self,Language):
self.add_query_param('Language',Language)
def get_PolicyName(self):
return self.get_query_params().get('PolicyName')
def set_PolicyName(self,PolicyName):
self.add_query_param('PolicyName',PolicyName) | 36.08 | 92 | 0.768293 |
4a26a75e95f521afc2f7282a4d793c55ee618818 | 14,191 | py | Python | fairseq_master/fairseq/models/transformer_lm.py | Vicky-Wil/NLE_CLR_NMT | 48758b6ee7de3f8546f1e54c6650cd0082c7dee7 | [
"MIT"
] | 41 | 2020-07-06T03:26:59.000Z | 2022-01-12T07:34:23.000Z | fairseq_master/fairseq/models/transformer_lm.py | Vicky-Wil/NLE_CLR_NMT | 48758b6ee7de3f8546f1e54c6650cd0082c7dee7 | [
"MIT"
] | 13 | 2019-08-15T18:32:54.000Z | 2020-03-31T17:32:24.000Z | fairseq_master/fairseq/models/transformer_lm.py | Vicky-Wil/NLE_CLR_NMT | 48758b6ee7de3f8546f1e54c6650cd0082c7dee7 | [
"MIT"
] | 11 | 2019-10-01T04:54:17.000Z | 2021-04-07T14:27:48.000Z | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import options, utils
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
Embedding,
TransformerDecoder,
)
from fairseq.modules import (
AdaptiveInput,
CharacterTokenEmbedder,
)
DEFAULT_MAX_TARGET_POSITIONS = 1024
@register_model('transformer_lm')
class TransformerLanguageModel(FairseqLanguageModel):
@classmethod
def hub_models(cls):
return {
'transformer_lm.gbw.adaptive_huge': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_gbw_huge.tar.bz2',
'transformer_lm.wiki103.adaptive': 'https://dl.fbaipublicfiles.com/fairseq/models/lm/adaptive_lm_wiki103.tar.bz2',
}
def __init__(self, decoder):
super().__init__(decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', default=0.1, type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', default=0., type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-output-dim', type=int, metavar='N',
help='decoder output dimension')
parser.add_argument('--decoder-input-dim', type=int, metavar='N',
help='decoder input dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-normalize-before', default=False, action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--no-decoder-final-norm', default=False, action='store_true',
help='don\'t add an extra layernorm after the last decoder block')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion')
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--adaptive-softmax-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--character-embeddings', default=False, action='store_true',
help='if set, uses character embedding convolutions to produce token embeddings')
parser.add_argument('--character-filters', type=str, metavar='LIST',
default='[(1, 64), (2, 128), (3, 192), (4, 256), (5, 256), (6, 256), (7, 256)]',
help='size of character embeddings')
parser.add_argument('--character-embedding-dim', default=4, type=int, metavar='N',
help='size of character embeddings')
parser.add_argument('--char-embedder-highway-layers', default=2, type=int, metavar='N',
help='number of highway layers for character token embeddder')
parser.add_argument('--adaptive-input', action='store_true',
help='if set, uses adaptive input')
parser.add_argument('--adaptive-input-factor', type=float, metavar='N',
help='adaptive input factor')
parser.add_argument('--adaptive-input-cutoff', metavar='EXPR',
help='comma separated list of adaptive input cutoff points.')
parser.add_argument('--tie-adaptive-weights', action='store_true',
help='if set, ties the weights of adaptive softmax and adaptive input')
parser.add_argument('--tie-adaptive-proj', action='store_true',
help='if set, ties the projection weights of adaptive softmax and adaptive input')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
# fmt: on
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_lm_architecture(args)
if getattr(args, 'max_target_positions', None) is None:
args.max_target_positions = getattr(args, 'tokens_per_sample', DEFAULT_MAX_TARGET_POSITIONS)
if args.character_embeddings:
embed_tokens = CharacterTokenEmbedder(
task.source_dictionary, eval(args.character_filters),
args.character_embedding_dim, args.decoder_embed_dim,
args.char_embedder_highway_layers,
)
elif args.adaptive_input:
embed_tokens = AdaptiveInput(
len(task.source_dictionary), task.source_dictionary.pad(), args.decoder_input_dim,
args.adaptive_input_factor, args.decoder_embed_dim,
options.eval_str_list(args.adaptive_input_cutoff, type=int),
)
else:
embed_tokens = Embedding(len(task.source_dictionary), args.decoder_input_dim, task.source_dictionary.pad())
if args.tie_adaptive_weights:
assert args.adaptive_input
assert args.adaptive_input_factor == args.adaptive_softmax_factor
assert args.adaptive_softmax_cutoff == args.adaptive_input_cutoff, '{} != {}'.format(
args.adaptive_softmax_cutoff, args.adaptive_input_cutoff)
assert args.decoder_input_dim == args.decoder_output_dim
decoder = TransformerDecoder(
args, task.target_dictionary, embed_tokens, no_encoder_attn=True,
)
return TransformerLanguageModel(decoder)
@register_model_architecture('transformer_lm', 'transformer_lm')
def base_lm_architecture(args):
# backward compatibility for older model checkpoints
if hasattr(args, 'no_tie_adaptive_proj'):
# previous models defined --no-tie-adaptive-proj, so use the existence of
# that option to determine if this is an "old" model checkpoint
args.no_decoder_final_norm = True # old models always set this to True
if args.no_tie_adaptive_proj is False:
args.tie_adaptive_proj = True
if hasattr(args, 'decoder_final_norm'):
args.no_decoder_final_norm = not args.decoder_final_norm
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048)
args.decoder_layers = getattr(args, 'decoder_layers', 6)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.add_bos_token = getattr(args, 'add_bos_token', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.character_embeddings = getattr(args, 'character_embeddings', False)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
# Model training is not stable without this
args.decoder_normalize_before = True
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False)
@register_model_architecture('transformer_lm', 'transformer_lm_big')
def transformer_lm_big(args):
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
base_lm_architecture(args)
@register_model_architecture('transformer_lm', 'transformer_lm_wiki103')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_wiki103')
def transformer_lm_baevski_wiki103(args):
args.decoder_layers = getattr(args, 'decoder_layers', 16)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8)
args.dropout = getattr(args, 'dropout', 0.3)
args.adaptive_input = getattr(args, 'adaptive_input', True)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True)
args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000')
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.2)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_dropout = getattr(args, 'activation_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True)
transformer_lm_big(args)
@register_model_architecture('transformer_lm', 'transformer_lm_gbw')
@register_model_architecture('transformer_lm', 'transformer_lm_baevski_gbw')
def transformer_lm_baevski_gbw(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True)
transformer_lm_big(args)
@register_model_architecture('transformer_lm', 'transformer_lm_gpt')
def transformer_lm_gpt(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_small')
def transformer_lm_gpt2_small(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096)
args.decoder_layers = getattr(args, 'decoder_layers', 24)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_medium')
def transformer_lm_gpt2_medium(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1280)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 5120)
args.decoder_layers = getattr(args, 'decoder_layers', 36)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 20)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
@register_model_architecture('transformer_lm', 'transformer_lm_gpt2_big')
def transformer_lm_gpt2_big(args):
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1600)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 6400)
args.decoder_layers = getattr(args, 'decoder_layers', 48)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 25)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0.1)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
base_lm_architecture(args)
| 53.753788 | 128 | 0.691776 |
4a26a764f91c9328e58e1679814a45499aa929c6 | 1,216 | py | Python | venv/Lib/site-packages/gevent/resolver/blocking.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 13 | 2018-03-28T23:07:01.000Z | 2022-03-12T06:01:21.000Z | venv/Lib/site-packages/gevent/resolver/blocking.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 11 | 2018-06-18T15:49:07.000Z | 2021-11-25T01:45:33.000Z | venv/Lib/site-packages/gevent/resolver/blocking.py | asanka9/Quession-Discussion-App-Socket.Io-NLP | 95a49a8afa572dc3908a0bade45e424c3751f191 | [
"Apache-2.0"
] | 5 | 2018-03-28T23:07:05.000Z | 2021-12-09T19:02:00.000Z | # Copyright (c) 2018 gevent contributors. See LICENSE for details.
import _socket
__all__ = [
'Resolver',
]
class Resolver(object):
"""
A resolver that directly uses the system's resolver functions.
.. caution::
This resolver is *not* cooperative.
This resolver has the lowest overhead of any resolver and
typically approaches the speed of the unmodified :mod:`socket`
functions. However, it is not cooperative, so if name resolution
blocks, the entire thread and all its greenlets will be blocked.
This can be useful during debugging, or it may be a good choice if
your operating system provides a good caching resolver (such as
macOS's Directory Services) that is usually very fast and
functionally non-blocking.
.. versionchanged:: 1.3a2
This was previously undocumented and existed in :mod:`gevent.socket`.
"""
def __init__(self, hub=None):
pass
def close(self):
pass
for method in (
'gethostbyname',
'gethostbyname_ex',
'getaddrinfo',
'gethostbyaddr',
'getnameinfo'
):
locals()[method] = staticmethod(getattr(_socket, method))
| 26.434783 | 76 | 0.657072 |
4a26a7d2ef7bcc39fd71182b0cd8e2cca7a504d4 | 6,006 | py | Python | find.py | matthewkos/DREAM-ALS-Stratification-Prize4Life-Challenge | 907676e0b7f3a52ab4a131089fc562375f09170b | [
"WTFPL"
] | null | null | null | find.py | matthewkos/DREAM-ALS-Stratification-Prize4Life-Challenge | 907676e0b7f3a52ab4a131089fc562375f09170b | [
"WTFPL"
] | null | null | null | find.py | matthewkos/DREAM-ALS-Stratification-Prize4Life-Challenge | 907676e0b7f3a52ab4a131089fc562375f09170b | [
"WTFPL"
] | null | null | null | import pandas
from TicToc import TicToc
import numpy as np
from process import print_pd
import sklearn.feature_selection
def read_data():
train_filename = 'Data/Train_data_processed.pkl'
val_filename = 'Data/Val_data_processed.pkl'
train_data = pandas.read_pickle(train_filename)
val_data = pandas.read_pickle(val_filename)
return train_data, val_data
def process_into_np_sclar(x, name):
m = dict()
array_name = []
for i in x.keys():
if i in ['SubjectID', 'Age', 'Gender', 'ALSFRS_slope', 'time_event', 'status']:
m[i] = x[i].values.reshape(-1, 1)
array_name.append(i)
continue
m[i] = np.zeros((x.shape[0], 1))
array_name.append(i)
for j in range(m[i].shape[0]):
m[i][j] = np.sum(x[i][j])
mx = np.concatenate(tuple((m[i] for i in x.keys())), axis=1)
mx = mx.astype(float)
fx = mx[:, :-3]
fy = mx[:, -3]
fz = mx[:, -3:]
return fx, fy, fz, np.array(array_name)
def process_into_np_slope(x, name):
m = dict()
array_name = []
for i in x.keys():
if i in ['SubjectID', 'Age', 'Gender', 'ALSFRS_slope', 'time_event', 'status']:
m[i] = x[i].values.reshape(-1, 1)
array_name.append(i)
continue
m[i] = np.zeros((x.shape[0], 1))
array_name.append(i)
for j in range(m[i].shape[0]):
m[i][j] = (x[i][j][-1] - x[i][j][-0]) / 90.0
mx = np.concatenate(tuple((m[i] for i in x.keys())), axis=1)
mx = mx.astype(float)
fx = mx[:, :-3]
fy = mx[:, -3]
fz = mx[:, -3:]
return fx, fy, fz, np.array(array_name)
def process_into_np_avg(x, name):
m = dict()
array_name = []
for i in x.keys():
if i in ['SubjectID', 'Age', 'Gender', 'ALSFRS_slope', 'time_event', 'status']:
m[i] = x[i].values.reshape(-1, 1)
array_name.append(i)
continue
m[i] = np.zeros((x.shape[0], 1))
array_name.append(i)
for j in range(m[i].shape[0]):
m[i][j] = np.average(x[i][j])
mx = np.concatenate(tuple((m[i] for i in x.keys())), axis=1)
mx = mx.astype(float)
fx = mx[:, :-3]
fy = mx[:, -3]
fz = mx[:, -3:]
return fx, fy, fz, np.array(array_name)
def process_into_np_idv(x, name):
m = dict()
array_name = []
for i in x.keys():
if i in ['SubjectID', 'Age', 'Gender', 'ALSFRS_slope', 'time_event', 'status']:
m[i] = x[i].values.reshape(-1, 1)
array_name.append(i)
continue
m[i] = np.zeros((x.shape[0], x[i][0].shape[0]))
for k in range(m[i].shape[1]):
array_name.append(i + '_' + str(k * 15))
for j in range(m[i].shape[0]):
m[i][j] = x[i][j]
mx = np.concatenate(tuple((m[i] for i in x.keys())), axis=1)
mx = mx.astype(float)
fx = mx[:, :-3]
fy = mx[:, -3]
fz = mx[:, -3:]
# np.savetxt('Data/{}_fx.txt'.format(name), fx, header=str(array_name[:-3]))
# np.savetxt('Data/{}_fy.txt'.format(name), fy, header=str(array_name[-3]))
# np.savetxt('Data/{}_fz.txt'.format(name), fz, header=str(array_name[-3:]))
return fx, fy, fz, np.array(array_name)
def process_into_np_idv(x, name):
m = dict()
array_name = []
for i in x.keys():
if i in ['SubjectID', 'Age', 'Gender', 'ALSFRS_slope', 'time_event', 'status']:
m[i] = x[i].values.reshape(-1, 1)
array_name.append(i)
continue
m[i] = np.zeros((x.shape[0], x[i][0].shape[0]))
for k in range(m[i].shape[1]):
array_name.append(i + '_' + str(k * 15))
for j in range(m[i].shape[0]):
m[i][j] = x[i][j]
mx = np.concatenate(tuple((m[i] for i in x.keys())), axis=1)
mx = mx.astype(float)
fx = mx[:, :-3]
fy = mx[:, -3]
fz = mx[:, -3:]
# np.savetxt('Data/{}_fx.txt'.format(name), fx, header=str(array_name[:-3]))
# np.savetxt('Data/{}_fy.txt'.format(name), fy, header=str(array_name[-3]))
# np.savetxt('Data/{}_fz.txt'.format(name), fz, header=str(array_name[-3:]))
return fx, fy, fz, np.array(array_name)
def analysis_mi_slope(x, y, array_name):
print('*' * 20, "Slope", '*' * 20)
mi = sklearn.feature_selection.mutual_info_regression(x, y, n_neighbors=3)
mi_sort_index = np.argsort(mi)[::-1]
for i in range(mi.shape[0]):
print('{:>20} : {:.4f}'.format(array_name[mi_sort_index[i]], mi[mi_sort_index[i]]))
print('*' * 50)
def analysis_mi_dead(x, y, array_name):
print('*' * 20, "Dead", '*'*20)
mi = sklearn.feature_selection.mutual_info_classif(x, y, n_neighbors=3)
mi_sort_index = np.argsort(mi)[::-1]
for i in range(mi.shape[0]):
print('{:>20} : {:.4f}'.format(array_name[mi_sort_index[i]], mi[mi_sort_index[i]]))
print('*' * 50)
def MI_slope(x):
# fx, fy, fz, array_name = process_into_np_idv(x, 'train')
# analysis_mi_slope(fx, fy, array_name)
# fx, fy, fz, array_name = process_into_np_avg(x, 'train')
# analysis_mi_slope(fx, fy, array_name)
# fx, fy, fz, array_name = process_into_np_slope(x, 'train')
# analysis_mi_slope(fx, fy, array_name)
fx, fy, fz, array_name = process_into_np_sclar(x, 'train')
analysis_mi_slope(fx, fy, array_name)
def MI_dead(x):
# fx, fy, fz, array_name = process_into_np_idv(x, 'train')
# analysis_mi_dead(fx, fz[:,-1], array_name)
# fx, fy, fz, array_name = process_into_np_avg(x, 'train')
# analysis_mi_dead(fx, fz[:, -1], array_name)
# fx, fy, fz, array_name = process_into_np_slope(x, 'train')
# analysis_mi_dead(fx, fz[:, -1], array_name)
fx, fy, fz, array_name = process_into_np_sclar(x, 'train')
analysis_mi_dead(fx, fz[:, -1], array_name)
if __name__ == '__main__':
tic = TicToc()
train_data, val_data = read_data()
print("training")
MI_slope(train_data)
MI_dead(train_data)
print("validation")
MI_slope(val_data)
MI_dead(val_data)
tic.toc()
| 34.125 | 91 | 0.566101 |
4a26a84acfcc32a81b246d3dad04b84a7dcb53a5 | 6,388 | py | Python | pytext/utils/timing.py | victorling8/pytext | bc78c39edd06797b766ba7db4bef731e6e1ac533 | [
"BSD-3-Clause"
] | null | null | null | pytext/utils/timing.py | victorling8/pytext | bc78c39edd06797b766ba7db4bef731e6e1ac533 | [
"BSD-3-Clause"
] | null | null | null | pytext/utils/timing.py | victorling8/pytext | bc78c39edd06797b766ba7db4bef731e6e1ac533 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import collections
import functools
import timeit
import traceback
import weakref
from json import dumps as json_dumps
from .ascii_table import ascii_table
class SnapshotList(list):
"""lists are not weakref-able by default."""
class Timings:
sum: float
count: int
max: float
def __init__(self, sum: float = 0.0, count: int = 0, max: float = -float("inf")):
self.sum = sum
self.count = count
self.max = max
@property
def average(self):
return self.sum / (self.count or 1)
def add(self, time):
self.sum += time
self.count += 1
self.max = max(self.max, time)
SECONDS_IN_MINUTE = 60
SECONDS_IN_HOUR = 60 * SECONDS_IN_MINUTE
SECONDS_IN_DAY = 24 * SECONDS_IN_HOUR
def format_time(seconds):
if seconds > 60:
days, seconds = int(seconds // SECONDS_IN_DAY), seconds % SECONDS_IN_DAY
hours, seconds = int(seconds // SECONDS_IN_HOUR), seconds % SECONDS_IN_HOUR
minutes, seconds = (
int(seconds // SECONDS_IN_MINUTE),
seconds % SECONDS_IN_MINUTE,
)
if days:
if minutes >= 30:
hours += 1
return f"{days}d{hours}h"
elif hours:
if seconds >= 30:
minutes += 1
return f"{hours}h{minutes}m"
else:
seconds = int(round(seconds))
return f"{minutes}m{seconds}s"
elif seconds > 1:
return f"{seconds:.1f}s"
elif seconds > 0.001:
return f"{seconds * 1000:.1f}ms"
else:
return f"{seconds * 1000000:.1f}ns"
class Snapshot:
def __init__(self):
self.times = collections.defaultdict(Timings)
self.start = timeit.default_timer()
def report(self, report_pep=False):
snapshot_total = timeit.default_timer() - self.start
def path(key):
return " -> ".join(label for label, _ in key)
def print_pep(results, snapshot_total):
for key, times in sorted(self.times.items()):
info = {
"type": path(key),
"metric": "latency",
"unit": "ms",
"value": f"{times.sum * 1000:.1f}",
}
if times.sum < 0.001:
info["unit"] = "ns"
info["value"] = f"{times.sum * 1000000:.1f}"
if times.count > 1:
info["info_string"] = " ".join(
[
"Count",
str(times.count),
"Average",
f"{times.average * 1000:.1f}",
"Max",
f"{times.max * 1000:.1f}",
]
)
print("PyTorchObserver " + json_dumps(info))
info = {
"type": "NET",
"metric": "latency",
"unit": "ms",
"value": f"{snapshot_total * 1000:.1f}",
}
print("PyTorchObserver " + json_dumps(info))
results = [
{
"name": path(key),
"total": format_time(times.sum),
"avg": format_time(times.average),
"max": format_time(times.max),
"count": times.count,
}
for key, times in sorted(self.times.items())
]
print(
ascii_table(
results,
human_column_names={
"name": "Stage",
"total": "Total",
"avg": "Average",
"max": "Max",
"count": "Count",
},
footer={"name": "Total time", "total": format_time(snapshot_total)},
alignments={"name": "<"},
)
)
if report_pep:
print_pep(results, snapshot_total)
class HierarchicalTimer:
def __init__(self):
self.current_stack = []
self.all_snapshots = SnapshotList()
def snapshot(self):
snapshot = Snapshot()
self.all_snapshots.append(weakref.ref(snapshot))
return snapshot
def _clean_snapshots(self):
self.all_snapshots = [ref for ref in self.all_snapshots if ref() is not None]
def push(self, label, caller_id):
self.current_stack.append((label, caller_id, timeit.default_timer()))
def pop(self):
label, _, start_time = self.current_stack[-1]
key = tuple((label, caller) for label, caller, _ in self.current_stack)
delta = timeit.default_timer() - start_time
for ref in self.all_snapshots:
snapshot = ref()
if snapshot is not None:
snapshot.times[key].add(delta)
self.current_stack.pop()
# Need to put this somewhere
self._clean_snapshots()
def time(self, label):
return _TimerContextManager(label, self)
class _TimerContextManager:
def __init__(self, label, timer, caller_id=None):
self.label = label
self.timer = timer
self.caller_id = caller_id
def __enter__(self):
if self.caller_id:
caller_id = self.caller_id
else:
stack = traceback.extract_stack()
caller = stack[-2]
caller_id = (caller.filename, caller.line)
self.timer.push(self.label, caller_id)
def __exit__(self, *exception_info):
self.timer.pop()
def __call__(self, fn):
"""Decorator syntax"""
caller_id = (fn.__code__.co_filename, fn.__code__.co_firstlineno)
timer_context = _TimerContextManager(self.label, self.timer, caller_id)
@functools.wraps(fn)
def wrapper(*args, **kwargs):
with timer_context:
return fn(*args, **kwargs)
return wrapper
TIMER = HierarchicalTimer()
time = TIMER.time
snapshot = TIMER.snapshot
SNAPSHOT = TIMER.snapshot()
report = SNAPSHOT.report
def report_snapshot(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
snapshot = TIMER.snapshot()
result = fn(*args, **kwargs)
snapshot.report()
return result
return wrapper
| 28.904977 | 85 | 0.526143 |
4a26a8763d267d6f5851971232107bf610c64c60 | 4,335 | py | Python | synthetic_simpleAE.py | wushanshan/L1AE | c15b2a3f74a48d6a51d8efcea09689f0d2ebf46a | [
"Apache-2.0"
] | 19 | 2018-06-28T15:47:22.000Z | 2021-11-13T09:01:32.000Z | synthetic_simpleAE.py | wushanshan/L1AE | c15b2a3f74a48d6a51d8efcea09689f0d2ebf46a | [
"Apache-2.0"
] | 1 | 2019-05-03T14:18:52.000Z | 2019-05-05T12:31:21.000Z | synthetic_simpleAE.py | wushanshan/L1AE | c15b2a3f74a48d6a51d8efcea09689f0d2ebf46a | [
"Apache-2.0"
] | 5 | 2019-08-11T22:19:26.000Z | 2020-09-10T11:48:12.000Z | """Train an autoencoder over synthetic datasets"""
from __future__ import division
from datasets import synthetic_block_sparse_data
from utils import l1_min_avg_err
from baselines import simple_AE_l1
import os
import numpy as np
import tensorflow as tf
SEED = 43
np.random.seed(SEED)
flags = tf.app.flags
flags.DEFINE_integer("input_dim", 1000, "Input dimension [1000]")
flags.DEFINE_integer("block_dim", 10, "Size of a block in block-sparsity [10]")
flags.DEFINE_integer("sparsity_level", 1, "Sparsity in block-sparsity [1]")
flags.DEFINE_integer("emb_dim", 10, "Number of measurements [10]")
flags.DEFINE_integer("num_samples", 10000, "Number of total samples [10000]")
flags.DEFINE_integer("batch_size", 128, "Batch size [128]")
flags.DEFINE_float("learning_rate", 0.1, "Learning rate for SGD [0.01]")
flags.DEFINE_integer("max_training_epochs", 2e4,
"Maximum number of training epochs [2e4]")
flags.DEFINE_integer("display_interval", 100,
"Print the training info every [100] epochs")
flags.DEFINE_integer("validation_interval", 1,
"Compute validation loss every [10] epochs")
flags.DEFINE_integer("max_steps_not_improve", 1,
"stop training when the validation loss \
does not improve for [5] validation_intervals")
flags.DEFINE_string("checkpoint_dir", "ckpts/synthetic/",
"Directory name to save the checkpoints \
[ckpts/synthetic/]")
flags.DEFINE_integer("num_random_dataset", 10,
"Number of random datasets [10]")
flags.DEFINE_integer("num_experiment", 1,
"Number of experiments for each dataset [1]")
FLAGS = flags.FLAGS
# model parameters
input_dim = FLAGS.input_dim
block_dim = FLAGS.block_dim
sparsity_level = FLAGS.sparsity_level
emb_dim = FLAGS.emb_dim
num_samples = FLAGS.num_samples
# training parameters
batch_size = FLAGS.batch_size
learning_rate = FLAGS.learning_rate
max_training_epochs = FLAGS.max_training_epochs
display_interval = FLAGS.display_interval
validation_interval = FLAGS.validation_interval
max_steps_not_improve = FLAGS.max_steps_not_improve
# checkpoint directory
checkpoint_dir = FLAGS.checkpoint_dir
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# number of experiments
num_random_dataset = FLAGS.num_random_dataset
num_experiment = FLAGS.num_experiment
results_dict = {}
def merge_dict(a, b):
"""Merge two dictionaries"""
for k in b.keys():
if k in a:
a[k].append(b[k])
else:
a[k] = [b[k]]
for dataset_i in xrange(num_random_dataset):
X_train, X_valid, X_test = synthetic_block_sparse_data(
input_dim, block_dim,
sparsity_level, num_samples)
for experiment_i in xrange(num_experiment):
print("---Dataset: %d, Experiment: %d---" % (dataset_i, experiment_i))
err, G = simple_AE_l1(input_dim, emb_dim, X_train, X_valid, X_test,
batch_size, learning_rate, max_training_epochs,
display_interval, validation_interval,
max_steps_not_improve)
Y = X_test.dot(G)
sim_l1_err, sim_l1_exact, _ = l1_min_avg_err(np.transpose(G), Y,
X_test, use_pos=False)
sim_l1_err_pos, sim_l1_exact_pos, _ = l1_min_avg_err(
np.transpose(G), Y,
X_test, use_pos=True)
res = {}
res['simple_ae_err'] = np.sqrt(err) # RMSE
res['simple_ae_l1_err'] = sim_l1_err
res['simple_ae_l1_exact'] = sim_l1_exact
res['simple_ae_l1_err_pos'] = sim_l1_err_pos
res['simple_ae_l1_exact_pos'] = sim_l1_exact_pos
merge_dict(results_dict, res)
# save results_dict
file_name = ('simple_ae_input_%d_'+'block_%d_'+'sparse_%d_') % (input_dim,
block_dim,
sparsity_level)
file_name = file_name + ('emb_%d.npy') % (emb_dim)
file_path = checkpoint_dir + file_name
np.save(file_path, results_dict)
| 39.054054 | 79 | 0.635755 |
4a26a89f1010253a44386473be1090c35d7c9137 | 4,038 | py | Python | sisjuridico/apps/reportes/views.py | AnthonyWainer/sistemaJuridico | 83acf42fd20793ea05d6bff57322e17355893fc9 | [
"MIT"
] | null | null | null | sisjuridico/apps/reportes/views.py | AnthonyWainer/sistemaJuridico | 83acf42fd20793ea05d6bff57322e17355893fc9 | [
"MIT"
] | null | null | null | sisjuridico/apps/reportes/views.py | AnthonyWainer/sistemaJuridico | 83acf42fd20793ea05d6bff57322e17355893fc9 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.db.models import Max, Q
from apps.seguridad.models import permisos, User
from apps.expediente.models import expedientes, categoria, resolucion
from apps.paginacion import paginacion
from .models import historial
import json as simplejson
import datetime
today = datetime.datetime.now()
fecha = today.strftime("%Y-%m-%d")
# Crea tus vista aqui.
def permi(request,url):
idp = request.user.idperfil_id
mod = permisos.objects.filter(idmodulo__url=url, idperfil_id=idp).values('idmodulo__url','buscar','eliminar','editar','insertar','imprimir','ver')
return mod
@login_required(login_url='/')
def reportes_consolidados(request):
if request.method == 'POST':
e = expedientes.objects.filter(fecha__range= [request.POST["fecha_ini"],request.POST["fecha_ter"]])
ep = 0
a = 0
na = 0
for i in e:
if i.estado == "en proceso":
ep += 1
elif i.estado == "aprobado":
a += 1
elif i.estado == "no aprobado":
na += 1
if request.POST["v"] == "v":
data = []
data.append({"label":"Aprobado","data":a,"color": "#d3d3d3",})
data.append({"label":"No Aprobado","data":na,"color": "#79d2c0",})
data.append({"label":"En Proceso","data":ep,"color": "#1ab394",})
return HttpResponse(simplejson.dumps(data), content_type="application/json" )
return render(request,"reportes/reportes_consolidados/ajax_reportes_consolidados.html",{'ep':ep,'ap':a,'na':na})
return render(request,"reportes/reportes_consolidados/reportes_consolidados.html",{'fecha':fecha})
@login_required(login_url='/')
def reportes_detallados(request):
estado = permi(request, "reportes_detallados")
listaCategoria = [{'id':con.id,'descripcion':con.descripcion} for con in categoria.objects.all()]
listaResolucion = [{'id':con.id,'numero':con.numero} for con in resolucion.objects.all()]
if request.method == 'POST':
idc = request.POST.get("idc","")
expediente = expedientes.objects.filter(idcategoria= idc).order_by('id')
return render(request,'reportes/reportes_detallados/ajax_reportes_detallados.html',{'lista':expediente,'estado':estado})
else:
idc= 1
expediente = expedientes.objects.filter(idcategoria= idc).order_by('id')
modulo = {'url':'reportes_detallados/','n':'reportesU','estado':estado,'idcategoria':listaCategoria, 'idresolucion':listaResolucion,'fecha' :fecha}
return paginacion(request,expediente, modulo, 'reportes/reportes_detallados/reportes_detallados.html' )
def busqueda(request):
e = expedientes.objects.filter( Q(nro__contains=request.POST["nro"]), Q(idcategoria__id__contains=request.POST["idcategoria"]), Q(fecha__contains=request.POST["fecha"]), Q(estado__contains=request.POST["estado"]) )[:10]
#print (e.query)
modulo = {'lista':e}
return render(request,'reportes/reportes_detallados/ajax_reportes_detallados.html', modulo)
@login_required(login_url='/')
def reportes_transacciones(request):
estado = permi(request, "reportes_transacciones")
listaUsuarios = [{'id':con.id,'usuario':con.usuario} for con in User.objects.all()]
if request.method == 'GET':
idc= 1
historias = historial.objects.all().order_by('id')
modulo = { 'url':'reportes_transacciones/','n':'reportesU','estado':estado,'idusuario':listaUsuarios,'fecha' :fecha}
return paginacion(request,historias, modulo, 'reportes/reportes_transacciones/reportes_transacciones.html' )
def transacciones(request):
e = historial.objects.filter( idusuario_id=request.POST["idusuario"]).filter(Q(fecha__contains=request.POST["fecha"]))[:10]
modulo = {'lista':e}
return render(request,'reportes/reportes_transacciones/ajax_reportes_transacciones.html', modulo) | 48.650602 | 223 | 0.684002 |
4a26a8cd452530b8d89bbe6d84f7ee0a5ec35580 | 3,241 | py | Python | koran.py | Reylyer/discord-koran | b2ca8fc728c2944a691ed5ce67007118a8017201 | [
"MIT"
] | null | null | null | koran.py | Reylyer/discord-koran | b2ca8fc728c2944a691ed5ce67007118a8017201 | [
"MIT"
] | null | null | null | koran.py | Reylyer/discord-koran | b2ca8fc728c2944a691ed5ce67007118a8017201 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import requests, json, asyncio
import argparse, inspect
from datetime import datetime
from datetime import timedelta
class Koran(commands.Cog):
tformat = '%Y-%m-%dT%H:%M:%S+00:00'
print_tformat = '%a, %d %b %Y %H:%M'
def __init__(self, client) -> None:
super().__init__()
self.client = client
self.__get_self_methods()
self.__create_parser()
# ENTRY
@commands.command()
async def koran(self, ctx, *, argument):
await ctx.send(f"args: {argument}")
try:
kwargs = self.parser.parse_args(argument.split())
await ctx.send(f"kwargs: {json.dumps(kwargs.__dict__, indent=4)}")
await self.ctftime(ctx, kwargs.__dict__)
except Exception as e:
await ctx.send(e)
# SUBKORAN
async def ctftime(self, ctx, kwargs):
await ctx.send("Berhasil subs ke ctftime")
asd_json = ''
while True:
print("requesting")
cjson = request_ctftime()
if cjson and asd_json != cjson:
asd_json = cjson
embed = discord.Embed(color = discord.Color.from_rgb(215, 0, 10))
print(type(cjson))
start = datetime.strptime(cjson['start'], self.tformat) + timedelta(hours=7)
fin = datetime.strptime(cjson['finish'], self.tformat) + timedelta(hours=7)
embed.set_author(name="CTFtime", url=cjson['ctftime_url'], icon_url="https://pbs.twimg.com/profile_images/2189766987/ctftime-logo-avatar_400x400.png")
desc = f"[{cjson['title']}]({cjson['url']})\n\n"
desc += f"Format: {cjson['format']}\n"
desc += f"Time: {start.strftime(self.print_tformat)} - {fin.strftime(self.print_tformat)}\n"
desc += f"Duration: {cjson['duration']['days']} days {cjson['duration']['hours']} hours\n"
desc += f"Weight: {cjson['weight']}\n"
desc += f"Restrictions: {cjson['restrictions']}\n"
embed.description = desc
embed.set_image(url=cjson['logo'])
await ctx.send("**New CTF!!!**", embed=embed)
# except Exception as e:
# await ctx.send(e)
await asyncio.sleep(kwargs['delay'])
def whatsapp(self, ctx, **kwargs):
pass
def nhentai(self, ctx, **kwargs):
pass
# INTERNAL USE
def __create_parser(self):
self.parser = parser = argparse.ArgumentParser()
parser.add_argument("subkoran", type=str, help="pilihan langganan", choices=self.methods)
parser.add_argument("-d", "--delay", type=int, default=3600, help="delay between request(for request based)")
def __get_self_methods(self):
self.methods = mtds = [t[0] for t in inspect.getmembers(self, predicate=inspect.ismethod) if not t[0].startswith('_Koran_')]
print(mtds)
# mtds.remove("koran")
def request_ctftime():
r = requests.get("https://ctftime.org/api/v1/events/?limit=1", headers = {"User-Agent": 'browser palsu'})
if r.status_code == 200:
return json.loads(r.text)[0]
else:
return False
| 36.41573 | 166 | 0.586239 |
4a26a9198529978fd1fc3a8ba63016402c9a0475 | 753 | py | Python | livemark/plugins/comments/plugin.py | AyrtonB/livemark | f8c49d449ea6242c674cf345823468aaabea6e6b | [
"MIT"
] | 1 | 2021-10-05T15:52:17.000Z | 2021-10-05T15:52:17.000Z | livemark/plugins/comments/plugin.py | AyrtonB/livemark | f8c49d449ea6242c674cf345823468aaabea6e6b | [
"MIT"
] | null | null | null | livemark/plugins/comments/plugin.py | AyrtonB/livemark | f8c49d449ea6242c674cf345823468aaabea6e6b | [
"MIT"
] | null | null | null | from ...plugin import Plugin
# TODO: use base url from the SitePlugin
class CommentsPlugin(Plugin):
identity = "comments"
priority = 45
validity = {
"type": "object",
"properties": {
"enable": {"type": "boolean"},
},
}
# Context
@property
def code(self):
return self.config.get("code")
@property
def link(self):
return self.config.get("link")
# Process
def process_markup(self, markup):
if self.code and self.link:
markup.add_style("style.css")
markup.add_script("https://livemark.disqus.com/count.js")
markup.add_script("script.js")
markup.add_markup("markup.html", target="#livemark-main")
| 21.514286 | 69 | 0.572377 |
4a26a9d7c775441f0c0a7e05fcdf2b01a2d59f1b | 11,353 | py | Python | config.py | KatanoShingo/Keyhac | 4b68bc866f6bfe9cbf7f43dcc8f60d2022fd88e1 | [
"MIT"
] | 3 | 2020-05-23T13:39:52.000Z | 2021-02-22T07:48:43.000Z | config.py | KatanoShingo/Keyhac | 4b68bc866f6bfe9cbf7f43dcc8f60d2022fd88e1 | [
"MIT"
] | 2 | 2020-04-12T11:10:50.000Z | 2020-04-12T15:27:47.000Z | config.py | KatanoShingo/Keyhac | 4b68bc866f6bfe9cbf7f43dcc8f60d2022fd88e1 | [
"MIT"
] | 1 | 2021-03-06T11:01:54.000Z | 2021-03-06T11:01:54.000Z | import sys
import os
import datetime
import pyauto
from keyhac import *
class MAP:
def __init__(self, keymap):
self.ime = False
#ユーザー設定
MultiDisplay = True
#どのウインドウにフォーカスがあっても効くキーマップ
keymap_global = keymap.defineWindowKeymap()
#アプリ切り替え
keymap_global[ "RS-Z" ] = "Win-1"
keymap_global[ "RS-X" ] = "Win-2"
keymap_global[ "RS-C" ] = "Win-3"
keymap_global[ "RS-V" ] = "Win-4"
keymap_global[ "RS-B" ] = "Win-5"
if MultiDisplay:
#ウィンドウの移動の配置変更
keymap_global[ "LC-LW-Left" ] = "LW-LS-Left"
keymap_global[ "LC-LW-Right" ] = "LW-LS-Right"
#仮想ウデスクトップ追加
keymap_global[ "LC-LW-LA-N" ] = "LC-LW-D"
#仮想ウデスクトップ削除
keymap_global[ "LC-LW-LA-Delete" ] = "LC-LW-F4"
#仮想ウデスクトップ移動
keymap_global[ "LC-LW-LA-Left" ] = "LC-LW-Right"
keymap_global[ "LC-LW-LA-Right" ] = "LC-LW-Left"
# アンダースコア入れ替え
keymap_global[ "Underscore" ] = "S-Underscore"
keymap_global[ "S-Underscore" ] = "Underscore"
#左十字キー
keymap_global[ "RA-W" ] = "Up"
keymap_global[ "RA-A" ] = "Left"
keymap_global[ "RA-D" ] = "Right"
keymap_global[ "RA-S" ] = "Down"
def key_A():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "A" )()
keymap_global[ "A" ] = key_A
def key_B():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "B" )()
keymap_global[ "B" ] = key_B
def key_C():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "C" )()
keymap_global[ "C" ] = key_C
def key_D():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "D" )()
keymap_global[ "D" ] = key_D
def key_E():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "E" )()
keymap_global[ "E" ] = key_E
def key_F():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "F" )()
keymap_global[ "F" ] = key_F
def key_G():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "G" )()
keymap_global[ "G" ] = key_G
def key_H():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "H" )()
keymap_global[ "H" ] = key_H
def key_I():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "I" )()
keymap_global[ "I" ] = key_I
def key_J():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "J" )()
keymap_global[ "J" ] = key_J
def key_L():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "L" )()
keymap_global[ "L" ] = key_L
def key_N():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "N" )()
keymap_global[ "N" ] = key_N
def key_M():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "M" )()
keymap_global[ "M" ] = key_M
def key_O():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "O" )()
keymap_global[ "O" ] = key_O
def key_P():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "P" )()
keymap_global[ "P" ] = key_P
def key_Q():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "Q" )()
keymap_global[ "Q" ] = key_Q
def key_R():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "R" )()
keymap_global[ "R" ] = key_R
def key_S():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "S" )()
keymap_global[ "S" ] = key_S
def key_T():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "T" )()
keymap_global[ "T" ] = key_T
def key_U():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "U" )()
keymap_global[ "U" ] = key_U
def key_V():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "V" )()
keymap_global[ "V" ] = key_V
def key_W():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "W" )()
keymap_global[ "W" ] = key_W
def key_X():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "X" )()
keymap_global[ "X" ] = key_X
def key_Y():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "Y" )()
keymap_global[ "Y" ] = key_Y
def key_Z():
if ( self.ime ):
keymap.getWindow().setImeStatus(1)
else:
keymap.getWindow().setImeStatus(0)
return keymap.InputKeyCommand( "Z" )()
keymap_global[ "Z" ] = key_Z
#CapsLock
def key_240():
self.ime = not self.ime
keymap.InputKeyCommand( "(240)" )()
keymap_global[ "(240)" ] = key_240
#半角全角
def key_243():
self.ime = not self.ime
keymap.InputKeyCommand( "(243)" )()
keymap_global[ "(243)" ] = key_243
def key_244():
self.ime = not self.ime
keymap.InputKeyCommand( "(244)" )()
keymap_global[ "(244)" ] = key_244
#数字対応
def num_1():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "1" )()
keymap_global[ "1" ] = num_1
def num_2():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "2" )()
keymap_global[ "2" ] = num_2
def num_3():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "3" )()
keymap_global[ "3" ] = num_3
def num_4():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "4" )()
keymap_global[ "4" ] = num_4
def num_5():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "5" )()
keymap_global[ "5" ] = num_5
def num_6():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "6" )()
keymap_global[ "6" ] = num_6
def num_7():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "7" )()
keymap_global[ "7" ] = num_7
def num_8():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "8" )()
keymap_global[ "8" ] = num_8
def num_9():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "9" )()
keymap_global[ "9" ] = num_9
def num_0():
if ( keymap.wnd.getImeStatus() ):
keymap.getWindow().setImeStatus(0)
keymap.InputKeyCommand( "0" )()
keymap_global[ "0" ] = num_0
#Insertをbackspaceに置き換え
keymap.replaceKey( 45, 8 )
#無変換、変換を未定義の仮想キーに置き換え
keymap.replaceKey( 28, 58 )
keymap.replaceKey( 29, 59 )
#IMEオン
def ime_on():
keymap.getWindow().setImeStatus(1)
self.ime = True
keymap_global[ "(58)" ] = ime_on
#IMEオフ
def ime_off():
keymap.getWindow().setImeStatus(0)
self.ime = False
keymap_global[ "(59)" ] = ime_off
keymap_chrome = keymap.defineWindowKeymap( exe_name="chrome.exe" )
#クローム用タブ切り替え
keymap_chrome[ "RS-LS-Z" ] = "C-1"
keymap_chrome[ "RS-LS-X" ] = "C-2"
keymap_chrome[ "RS-LS-C" ] = "C-3"
keymap_chrome[ "RS-LS-V" ] = "C-4"
keymap_chrome[ "RS-LS-B" ] = "C-5"
#クローム用タブ削除
keymap_chrome[ "LC-Delete" ] = "LC-W"
def configure(keymap):
if 1:
MAP(keymap) | 32.344729 | 74 | 0.485334 |
4a26ad07d036befce0b4dffb7f8c99e28cbb90ce | 931 | py | Python | python/examples/guess_number_1.py | RodolfoDiaz/CodeLibrary | 603ac9eee5b014bef8f04545bc4e73b0ec376131 | [
"MIT"
] | 1 | 2018-10-11T14:29:40.000Z | 2018-10-11T14:29:40.000Z | python/examples/guess_number_1.py | RodolfoDiaz/CodeLibrary | 603ac9eee5b014bef8f04545bc4e73b0ec376131 | [
"MIT"
] | 2 | 2020-04-26T21:12:00.000Z | 2020-10-28T21:10:03.000Z | python/examples/guess_number_1.py | RodolfoDiaz/CodeLibrary | 603ac9eee5b014bef8f04545bc4e73b0ec376131 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Code sample: using random numbers and loops."""
import random
minNumber = 1 # input('Enter the minimum number: ')
maxNumber = 5 # input('Enter the maximum number: ')
attempts = 3
if (minNumber < maxNumber):
random_number = random.randint(minNumber, maxNumber)
count = 0
while count < attempts:
count = count + 1
guess = int(input("Guess the 'Magic Number'."
" Enter a value between {0} and {1}: "
.format(minNumber, maxNumber)))
if random_number == guess:
print('You got it, the magic number was {0}'.format(guess))
break
elif random_number != guess and count >= attempts:
print('You lost!, the magic number was {0}'.format(random_number))
else:
print('Try again...')
else:
print('The maximum number must be greater than the minimum number')
| 33.25 | 78 | 0.586466 |
4a26ad433e9264c22f384439d8ce50576eb12806 | 13,422 | py | Python | venv/lib/python3.9/site-packages/pyarrow/tests/test_gandiva.py | qarik-hanrattyjen/apache-airflow-backport-providers-google-2021.3.3 | 630dcef73e6a258b6e9a52f934e2dd912ce741f8 | [
"Apache-2.0"
] | 3 | 2021-03-29T19:21:08.000Z | 2021-12-31T09:30:11.000Z | python/pyarrow/tests/test_gandiva.py | royalstream/arrow | eb20a3dbc7732f612e5ce54be5f4291440829350 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 95 | 2020-12-08T14:14:27.000Z | 2021-07-17T00:43:39.000Z | python/pyarrow/tests/test_gandiva.py | royalstream/arrow | eb20a3dbc7732f612e5ce54be5f4291440829350 | [
"CC-BY-3.0",
"Apache-2.0",
"CC0-1.0",
"MIT"
] | 9 | 2021-02-24T10:27:55.000Z | 2022-03-28T08:27:12.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import datetime
import pytest
import pyarrow as pa
@pytest.mark.gandiva
def test_tree_exp_builder():
import pyarrow.gandiva as gandiva
builder = gandiva.TreeExprBuilder()
field_a = pa.field('a', pa.int32())
field_b = pa.field('b', pa.int32())
schema = pa.schema([field_a, field_b])
field_result = pa.field('res', pa.int32())
node_a = builder.make_field(field_a)
node_b = builder.make_field(field_b)
condition = builder.make_function("greater_than", [node_a, node_b],
pa.bool_())
if_node = builder.make_if(condition, node_a, node_b, pa.int32())
expr = builder.make_expression(if_node, field_result)
projector = gandiva.make_projector(
schema, [expr], pa.default_memory_pool())
# Gandiva generates compute kernel function named `@expr_X`
assert projector.llvm_ir.find("@expr_") != -1
a = pa.array([10, 12, -20, 5], type=pa.int32())
b = pa.array([5, 15, 15, 17], type=pa.int32())
e = pa.array([10, 15, 15, 17], type=pa.int32())
input_batch = pa.RecordBatch.from_arrays([a, b], names=['a', 'b'])
r, = projector.evaluate(input_batch)
assert r.equals(e)
@pytest.mark.gandiva
def test_table():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([pa.array([1.0, 2.0]), pa.array([3.0, 4.0])],
['a', 'b'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
sum = builder.make_function("add", [node_a, node_b], pa.float64())
field_result = pa.field("c", pa.float64())
expr = builder.make_expression(sum, field_result)
projector = gandiva.make_projector(
table.schema, [expr], pa.default_memory_pool())
# TODO: Add .evaluate function which can take Tables instead of
# RecordBatches
r, = projector.evaluate(table.to_batches()[0])
e = pa.array([4.0, 6.0])
assert r.equals(e)
@pytest.mark.gandiva
def test_filter():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([pa.array([1.0 * i for i in range(10000)])],
['a'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
thousand = builder.make_literal(1000.0, pa.float64())
cond = builder.make_function("less_than", [node_a, thousand], pa.bool_())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
# Gandiva generates compute kernel function named `@expr_X`
assert filter.llvm_ir.find("@expr_") != -1
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array(range(1000), type=pa.uint32()))
@pytest.mark.gandiva
def test_in_expr():
import pyarrow.gandiva as gandiva
arr = pa.array(["ga", "an", "nd", "di", "iv", "va"])
table = pa.Table.from_arrays([arr], ["a"])
# string
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, ["an", "nd"], pa.string())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 2], type=pa.uint32()))
# int32
arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4])
table = pa.Table.from_arrays([arr.cast(pa.int32())], ["a"])
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [1, 5], pa.int32())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32()))
# int64
arr = pa.array([3, 1, 4, 1, 5, 9, 2, 6, 5, 4])
table = pa.Table.from_arrays([arr], ["a"])
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [1, 5], pa.int64())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 3, 4, 8], type=pa.uint32()))
@pytest.mark.skip(reason="Gandiva C++ did not have *real* binary, "
"time and date support.")
def test_in_expr_todo():
import pyarrow.gandiva as gandiva
# TODO: Implement reasonable support for timestamp, time & date.
# Current exceptions:
# pyarrow.lib.ArrowException: ExpressionValidationError:
# Evaluation expression for IN clause returns XXXX values are of typeXXXX
# binary
arr = pa.array([b"ga", b"an", b"nd", b"di", b"iv", b"va"])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [b'an', b'nd'], pa.binary())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([1, 2], type=pa.uint32()))
# timestamp
datetime_1 = datetime.datetime.utcfromtimestamp(1542238951.621877)
datetime_2 = datetime.datetime.utcfromtimestamp(1542238911.621877)
datetime_3 = datetime.datetime.utcfromtimestamp(1542238051.621877)
arr = pa.array([datetime_1, datetime_2, datetime_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [datetime_2], pa.timestamp('ms'))
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
# time
time_1 = datetime_1.time()
time_2 = datetime_2.time()
time_3 = datetime_3.time()
arr = pa.array([time_1, time_2, time_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [time_2], pa.time64('ms'))
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
# date
date_1 = datetime_1.date()
date_2 = datetime_2.date()
date_3 = datetime_3.date()
arr = pa.array([date_1, date_2, date_3])
table = pa.Table.from_arrays([arr], ["a"])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
cond = builder.make_in_expression(node_a, [date_2], pa.date32())
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert list(result.to_array()) == [1]
@pytest.mark.gandiva
def test_boolean():
import pyarrow.gandiva as gandiva
table = pa.Table.from_arrays([
pa.array([1., 31., 46., 3., 57., 44., 22.]),
pa.array([5., 45., 36., 73., 83., 23., 76.])],
['a', 'b'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
fifty = builder.make_literal(50.0, pa.float64())
eleven = builder.make_literal(11.0, pa.float64())
cond_1 = builder.make_function("less_than", [node_a, fifty], pa.bool_())
cond_2 = builder.make_function("greater_than", [node_a, node_b],
pa.bool_())
cond_3 = builder.make_function("less_than", [node_b, eleven], pa.bool_())
cond = builder.make_or([builder.make_and([cond_1, cond_2]), cond_3])
condition = builder.make_condition(cond)
filter = gandiva.make_filter(table.schema, condition)
result = filter.evaluate(table.to_batches()[0], pa.default_memory_pool())
assert result.to_array().equals(pa.array([0, 2, 5], type=pa.uint32()))
@pytest.mark.gandiva
def test_literals():
import pyarrow.gandiva as gandiva
builder = gandiva.TreeExprBuilder()
builder.make_literal(True, pa.bool_())
builder.make_literal(0, pa.uint8())
builder.make_literal(1, pa.uint16())
builder.make_literal(2, pa.uint32())
builder.make_literal(3, pa.uint64())
builder.make_literal(4, pa.int8())
builder.make_literal(5, pa.int16())
builder.make_literal(6, pa.int32())
builder.make_literal(7, pa.int64())
builder.make_literal(8.0, pa.float32())
builder.make_literal(9.0, pa.float64())
builder.make_literal("hello", pa.string())
builder.make_literal(b"world", pa.binary())
builder.make_literal(True, "bool")
builder.make_literal(0, "uint8")
builder.make_literal(1, "uint16")
builder.make_literal(2, "uint32")
builder.make_literal(3, "uint64")
builder.make_literal(4, "int8")
builder.make_literal(5, "int16")
builder.make_literal(6, "int32")
builder.make_literal(7, "int64")
builder.make_literal(8.0, "float32")
builder.make_literal(9.0, "float64")
builder.make_literal("hello", "string")
builder.make_literal(b"world", "binary")
with pytest.raises(TypeError):
builder.make_literal("hello", pa.int64())
with pytest.raises(TypeError):
builder.make_literal(True, None)
@pytest.mark.gandiva
def test_regex():
import pyarrow.gandiva as gandiva
elements = ["park", "sparkle", "bright spark and fire", "spark"]
data = pa.array(elements, type=pa.string())
table = pa.Table.from_arrays([data], names=['a'])
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
regex = builder.make_literal("%spark%", pa.string())
like = builder.make_function("like", [node_a, regex], pa.bool_())
field_result = pa.field("b", pa.bool_())
expr = builder.make_expression(like, field_result)
projector = gandiva.make_projector(
table.schema, [expr], pa.default_memory_pool())
r, = projector.evaluate(table.to_batches()[0])
b = pa.array([False, True, True, True], type=pa.bool_())
assert r.equals(b)
@pytest.mark.gandiva
def test_get_registered_function_signatures():
import pyarrow.gandiva as gandiva
signatures = gandiva.get_registered_function_signatures()
assert type(signatures[0].return_type()) is pa.DataType
assert type(signatures[0].param_types()) is list
assert hasattr(signatures[0], "name")
@pytest.mark.gandiva
def test_filter_project():
import pyarrow.gandiva as gandiva
mpool = pa.default_memory_pool()
# Create a table with some sample data
array0 = pa.array([10, 12, -20, 5, 21, 29], pa.int32())
array1 = pa.array([5, 15, 15, 17, 12, 3], pa.int32())
array2 = pa.array([1, 25, 11, 30, -21, None], pa.int32())
table = pa.Table.from_arrays([array0, array1, array2], ['a', 'b', 'c'])
field_result = pa.field("res", pa.int32())
builder = gandiva.TreeExprBuilder()
node_a = builder.make_field(table.schema.field("a"))
node_b = builder.make_field(table.schema.field("b"))
node_c = builder.make_field(table.schema.field("c"))
greater_than_function = builder.make_function("greater_than",
[node_a, node_b], pa.bool_())
filter_condition = builder.make_condition(
greater_than_function)
project_condition = builder.make_function("less_than",
[node_b, node_c], pa.bool_())
if_node = builder.make_if(project_condition,
node_b, node_c, pa.int32())
expr = builder.make_expression(if_node, field_result)
# Build a filter for the expressions.
filter = gandiva.make_filter(table.schema, filter_condition)
# Build a projector for the expressions.
projector = gandiva.make_projector(
table.schema, [expr], mpool, "UINT32")
# Evaluate filter
selection_vector = filter.evaluate(table.to_batches()[0], mpool)
# Evaluate project
r, = projector.evaluate(
table.to_batches()[0], selection_vector)
exp = pa.array([1, -21, None], pa.int32())
assert r.equals(exp)
| 36.672131 | 79 | 0.668008 |
4a26af76283ec0a8023f05f5cae6c1e188af7216 | 2,815 | py | Python | solmate/programs/system_program/instructions/create_account.py | nimily/solmate | acd12d6e79598ca53fcb94b0fc3c524cb6c50b7c | [
"MIT"
] | 3 | 2022-03-11T19:06:15.000Z | 2022-03-22T02:40:53.000Z | solmate/programs/system_program/instructions/create_account.py | nimily/solmate | acd12d6e79598ca53fcb94b0fc3c524cb6c50b7c | [
"MIT"
] | null | null | null | solmate/programs/system_program/instructions/create_account.py | nimily/solmate | acd12d6e79598ca53fcb94b0fc3c524cb6c50b7c | [
"MIT"
] | null | null | null | # LOCK-BEGIN[imports]: DON'T MODIFY
from .instruction_tag import InstructionTag
from dataclasses import dataclass
from io import BytesIO
from podite import (
BYTES_CATALOG,
U64,
)
from solana.publickey import PublicKey
from solana.transaction import (
AccountMeta,
TransactionInstruction,
)
from solmate.programs.system_program.addrs import PROGRAM_ID
from solmate.utils import to_account_meta
from typing import (
List,
Optional,
Union,
)
# LOCK-END
# LOCK-BEGIN[ix_cls(create_account)]: DON'T MODIFY
@dataclass
class CreateAccountIx:
program_id: PublicKey
# account metas
from_pubkey: AccountMeta
to_pubkey: AccountMeta
remaining_accounts: Optional[List[AccountMeta]]
# data fields
lamports: U64
space: U64
owner: PublicKey
def to_instruction(self):
keys = []
keys.append(self.from_pubkey)
keys.append(self.to_pubkey)
if self.remaining_accounts is not None:
keys.extend(self.remaining_accounts)
buffer = BytesIO()
buffer.write(InstructionTag.to_bytes(InstructionTag.CREATE_ACCOUNT))
buffer.write(BYTES_CATALOG.pack(U64, self.lamports))
buffer.write(BYTES_CATALOG.pack(U64, self.space))
buffer.write(BYTES_CATALOG.pack(PublicKey, self.owner))
return TransactionInstruction(
keys=keys,
program_id=self.program_id,
data=buffer.getvalue(),
)
# LOCK-END
# LOCK-BEGIN[ix_fn(create_account)]: DON'T MODIFY
def create_account(
from_pubkey: Union[str, PublicKey, AccountMeta],
to_pubkey: Union[str, PublicKey, AccountMeta],
lamports: U64,
space: U64,
owner: PublicKey,
remaining_accounts: Optional[List[Union[str, PublicKey, AccountMeta]]] = None,
program_id: PublicKey = PROGRAM_ID,
):
if isinstance(from_pubkey, (str, PublicKey)):
from_pubkey = to_account_meta(
from_pubkey,
is_signer=True,
is_writable=True,
)
if isinstance(to_pubkey, (str, PublicKey)):
to_pubkey = to_account_meta(
to_pubkey,
is_signer=True,
is_writable=True,
)
if isinstance(remaining_accounts, list):
for i in range(len(remaining_accounts)):
if isinstance(remaining_accounts[i], (str, PublicKey)):
remaining_accounts[i] = to_account_meta(
remaining_accounts[i],
is_signer=False,
is_writable=False,
)
return CreateAccountIx(
program_id=program_id,
from_pubkey=from_pubkey,
to_pubkey=to_pubkey,
remaining_accounts=remaining_accounts,
lamports=lamports,
space=space,
owner=owner,
).to_instruction()
# LOCK-END
| 25.825688 | 82 | 0.652575 |
4a26afa35d4665b71ed5ce927337e57e32bc9eec | 691 | py | Python | pitty/bin/django-admin.py | othienoJoe/Moringa-Tribune | 7bb86e1b45f81dba3079abf6ab1f302d22234cd1 | [
"Unlicense"
] | null | null | null | pitty/bin/django-admin.py | othienoJoe/Moringa-Tribune | 7bb86e1b45f81dba3079abf6ab1f302d22234cd1 | [
"Unlicense"
] | null | null | null | pitty/bin/django-admin.py | othienoJoe/Moringa-Tribune | 7bb86e1b45f81dba3079abf6ab1f302d22234cd1 | [
"Unlicense"
] | null | null | null | #!/home/moringa/Desktop/Moringa-Tribune/pitty/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| 31.409091 | 80 | 0.727931 |
4a26afd7fa3c76dfe085506dd6ddb52c150528e5 | 181 | py | Python | settings/channel_archiver/NIHSAMPLE_FROZEN_OPTICAL_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | settings/channel_archiver/NIHSAMPLE_FROZEN_OPTICAL_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | settings/channel_archiver/NIHSAMPLE_FROZEN_OPTICAL_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | MEAN.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.SAMPLE_FROZEN_OPTICAL.MEAN.txt'
MEAN2.filename = '//mx340hs/data/anfinrud_1903/Archive/NIH.SAMPLE_FROZEN_OPTICAL.MEAN2.txt' | 90.5 | 91 | 0.828729 |
4a26b0a39ecf4bca8d56c7539b428348d71afc73 | 5,940 | py | Python | migrations/versions/9aa069b2417d_.py | jiejiang/inventory | 9ce34a54812a23785504bbe62d914afde797895f | [
"Apache-2.0"
] | 1 | 2021-10-06T04:28:09.000Z | 2021-10-06T04:28:09.000Z | migrations/versions/9aa069b2417d_.py | jiejiang/inventory | 9ce34a54812a23785504bbe62d914afde797895f | [
"Apache-2.0"
] | 6 | 2021-03-19T01:45:44.000Z | 2022-03-11T23:52:09.000Z | migrations/versions/9aa069b2417d_.py | jiejiang/inventory | 9ce34a54812a23785504bbe62d914afde797895f | [
"Apache-2.0"
] | null | null | null | """empty message
Revision ID: 9aa069b2417d
Revises: None
Create Date: 2016-08-22 15:22:08.772102
"""
# revision identifiers, used by Alembic.
revision = '9aa069b2417d'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('city',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('name', sa.String(length=32), nullable=False),
sa.Column('type', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['parent_id'], ['city.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_city_name'), 'city', ['name'], unique=False)
op.create_index(op.f('ix_city_type'), 'city', ['type'], unique=False)
op.create_table('job',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=64), nullable=False),
sa.Column('percentage', sa.Integer(), nullable=False),
sa.Column('status', sa.Integer(), nullable=False),
sa.Column('creation_time', sa.DateTime(), nullable=True),
sa.Column('completion_time', sa.DateTime(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_job_uuid'), 'job', ['uuid'], unique=True)
op.create_table('product_info',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('net_weight', sa.Float(), nullable=False),
sa.Column('price_per_kg', sa.Float(), nullable=False),
sa.Column('full_name', sa.String(length=128), nullable=True),
sa.Column('deprecated', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_product_info_name'), 'product_info', ['name'], unique=True)
op.create_table('retraction',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('uuid', sa.String(length=64), nullable=False),
sa.Column('success', sa.Boolean(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_retraction_uuid'), 'retraction', ['uuid'], unique=True)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=False),
sa.Column('password', sa.String(length=255), nullable=False),
sa.Column('reset_password_token', sa.String(length=100), nullable=False),
sa.Column('email', sa.String(length=255), nullable=False),
sa.Column('confirmed_at', sa.DateTime(), nullable=True),
sa.Column('is_enabled', sa.Boolean(), nullable=False),
sa.Column('first_name', sa.String(length=50), nullable=False),
sa.Column('last_name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('order',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('order_number', sa.String(length=128), nullable=False),
sa.Column('type', sa.Integer(), nullable=True),
sa.Column('upload_time', sa.DateTime(), nullable=True),
sa.Column('used', sa.Boolean(), nullable=False),
sa.Column('used_time', sa.DateTime(), nullable=True),
sa.Column('sender_address', sa.String(length=256), nullable=True),
sa.Column('receiver_address', sa.String(length=256), nullable=True),
sa.Column('receiver_name', sa.String(length=32), nullable=True),
sa.Column('receiver_id_number', sa.String(length=64), nullable=True),
sa.Column('job_id', sa.Integer(), nullable=True),
sa.Column('retraction_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['job_id'], ['job.id'], ),
sa.ForeignKeyConstraint(['retraction_id'], ['retraction.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_order_order_number'), 'order', ['order_number'], unique=True)
op.create_index(op.f('ix_order_type'), 'order', ['type'], unique=False)
op.create_index(op.f('ix_order_used'), 'order', ['used'], unique=False)
op.create_table('product_count_info',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('product_info_id', sa.Integer(), nullable=True),
sa.Column('count', sa.Integer(), nullable=False),
sa.Column('gross_weight_per_box', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['product_info_id'], ['product_info.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('product_info_id', 'count')
)
op.create_index(op.f('ix_product_count_info_count'), 'product_count_info', ['count'], unique=False)
op.create_index(op.f('ix_product_count_info_product_info_id'), 'product_count_info', ['product_info_id'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_product_count_info_product_info_id'), table_name='product_count_info')
op.drop_index(op.f('ix_product_count_info_count'), table_name='product_count_info')
op.drop_table('product_count_info')
op.drop_index(op.f('ix_order_used'), table_name='order')
op.drop_index(op.f('ix_order_type'), table_name='order')
op.drop_index(op.f('ix_order_order_number'), table_name='order')
op.drop_table('order')
op.drop_table('user')
op.drop_index(op.f('ix_retraction_uuid'), table_name='retraction')
op.drop_table('retraction')
op.drop_index(op.f('ix_product_info_name'), table_name='product_info')
op.drop_table('product_info')
op.drop_index(op.f('ix_job_uuid'), table_name='job')
op.drop_table('job')
op.drop_index(op.f('ix_city_type'), table_name='city')
op.drop_index(op.f('ix_city_name'), table_name='city')
op.drop_table('city')
### end Alembic commands ###
| 46.771654 | 123 | 0.685017 |
4a26b0c4195824609cb3fc706b2b3bdbca4fa1f9 | 4,859 | py | Python | api.py | locus-ioe/SF21-Project-1-Template | ad813ddd5619d66e9375d17a0e58b3d3f3d99610 | [
"MIT"
] | null | null | null | api.py | locus-ioe/SF21-Project-1-Template | ad813ddd5619d66e9375d17a0e58b3d3f3d99610 | [
"MIT"
] | null | null | null | api.py | locus-ioe/SF21-Project-1-Template | ad813ddd5619d66e9375d17a0e58b3d3f3d99610 | [
"MIT"
] | null | null | null | """
This file contains the code to setup your own REST API server for use in the
project. The custom REST API server fetches data from the original One API
and only provides the first 20 characters for use in the actual app. The
comments throughout the file will guide you to fully setup your own API server.
The required utility functions are also provided in the file itself.
"""
import requests # noqa
from flask import Flask, jsonify, request # noqa
app = Flask(__name__)
# The following variable is a global variable that stores only the required
# data from The One API that is to be sent to your web app.
characters = []
# Import the config file which contains your API key. Head over to config.py
# and set up your API key there. If you don't have it yet, follow the URL and
# create one.
app.config.from_pyfile("config.py")
API_KEY = app.config["API_KEY"]
URL = "https://the-one-api.dev/v2/"
# These entities are the only ones that you will be needing in your app.
# You will be extracting them from the fetched data before making it
# available to your app.
required_keys = {"_id", "name", "race", "gender", "wikiUrl"}
# This is the utility function that will extract the required entities
# mentioned above. This function accepts the list of characters obtained
# from The One API and returns the list of characters with only the required
# five fields
def filter_character_fields(raw_characters):
filtered_characters = []
for character in raw_characters:
filtered_characters.append(
{key: value for key, value in character.items() if key in required_keys}
)
return filtered_characters
# This is your own character retrieving API endpoint. You will be hitting this
# endpoint to get the characters with only the required fields in your app.
@app.route("/character", methods=["GET"])
def get_characters():
headers = {"Authorization": f"Bearer {API_KEY}"} # noqa
# Send out a request to The One API including the headers above to retrieve
# the characters. You have to fetch only 20 characters, so you need to set
# a limit while fetching. The limit to retrieve characters can be set as
# "URL_TO_RETRIEVE_CHARACTER?limit=20". For further information, you can
# consult the documentation of The One API by following this URL,
# https://the-one-api.dev/documentation
# The following two variables are placeholders to store the received
# response from the API and for the list of characters after converting
# that into JSON format. Edit these lines to obtain the required behaviour.
response = None # noqa
raw_characters = [] # noqa
# This variable stores the filtered character list that only contains the
# fields required in yoyur web app. You can call the utility function that
# we have provided above to extract the required fields. Edit the following
# line to include the funciton call for extraction.
characters = []
return jsonify(characters)
# This is your own character adding API endpoint. You will be hitting this
# endpoint with the POST method to add new characters to your API data.
@app.route("/character", methods=["POST"])
def create_character():
# Get a JSON of the POST request to this endpoint and store it in the
# following variable.
character = None # noqa
# Edit the following lines to store the values obtained from the POST
# request. Store the values of field names in the corresponding field name
# variables provided below. Extract the values from the character variable.
_id = None
name = None
race = None
gender = None
wikiUrl = None
# This line inserts the character details obtained from the POST request to
# the global data variable. You need not change anything in the following
# line but you are highly encouraged to understand the code and search for
# similar related list manipulation functions.
characters.insert(
0,
{"_id": _id, "name": name, "race": race, "gender": gender, "wikiUrl": wikiUrl},
)
return jsonify(message="Character Created!", status="200")
# This is your own character deleting API endpoint. You will be hitting this
# endpoint with the DELETE method to delete characters from your API data.
@app.route("/character/<id>", methods=["DELETE"])
def delete_character(id):
# Using a for loop to go through all the characters to find the character
# with the matching ID for deletion
for character in characters:
if character["_id"] == id:
# Write down the character removal logic here. Removing the
# characters means deleting its entry from the global character
# list in this file.
pass
return jsonify()
# API Runs on port 5001
if __name__ == "__main__":
app.run(port=5001, debug=True)
| 40.831933 | 87 | 0.722165 |
4a26b11db112ba49b60015a7ec1f730fa53507af | 991 | py | Python | kommersant_parser.py | Zed-chi/news-parser | 20c5592baaa8d80182b45b0a2aafde4baaf3357a | [
"MIT"
] | 1 | 2019-01-14T17:59:34.000Z | 2019-01-14T17:59:34.000Z | kommersant_parser.py | Zed-chi/news-parser | 20c5592baaa8d80182b45b0a2aafde4baaf3357a | [
"MIT"
] | null | null | null | kommersant_parser.py | Zed-chi/news-parser | 20c5592baaa8d80182b45b0a2aafde4baaf3357a | [
"MIT"
] | 1 | 2019-01-14T17:59:37.000Z | 2019-01-14T17:59:37.000Z | from bs4 import BeautifulSoup as web
from basic_parser import Parser
class Kommersant_parser(Parser):
def __init__(self):
super().__init__("http://www.kommersant.ru/RSS/news.xml")
self.encoding = "windows-1251"
self.site_encoding = "windows-1251"
def grab(self, url):
content = web(self.get_content(url, self.site_encoding), "html.parser")
obj = {}
if content.select(".article_name"):
obj["title"] = content.select(".article_name")[0].text
if content.select(".article_text_wrapper > p"):
obj["content"] = list(
map(
lambda x: x.text,
content.select(".article_text_wrapper > p")
)
)
return obj
if __name__ == "__main__":
kom = Kommersant_parser()
news = kom.news()
print(news)
url = news[0]["link"]
title = news[0]["title"]
print(title, url)
data = kom.grab(url)
print(data)
| 28.314286 | 79 | 0.566095 |
4a26b1643b6fb3d83c9b6501d4279a47860b8b65 | 8,372 | py | Python | tritam_crm_sms/models/stock_picking.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | tritam_crm_sms/models/stock_picking.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | tritam_crm_sms/models/stock_picking.py | kenysmile/test_facebook | 844a3ddd53abd319c0115de86909118a37106c67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from odoo import models, fields, api,tools,_
import calendar
from datetime import timedelta, date
import datetime
import re
from threading import Timer, Thread
#import imp
import sys
#reload(sys)
#sys.setdefaultencoding("utf8")
from odoo.exceptions import UserError
import logging
_logger = logging.getLogger(__name__)
from pytz import timezone
from dateutil.relativedelta import relativedelta
class Duoc_Delivery_Carrier(models.Model):
_inherit = "delivery.carrier"
type_method = fields.Selection([
(1, 'Ship ngoài'),
(2, 'Hãng vận chuyển'),
], string='Loại vận chuyển', default= 1)
mail_template = fields.Many2one('mail.template', "SMS Template", domain=[('model_id.model', '=', 'stock.picking')])
mail_template_do_out = fields.Many2one('mail.template', "SMS Template DO OUT", domain=[('model_id.model', '=', 'stock.picking')])
link_shipping = fields.Char(string="Link tra cứu MVĐ")
class Duoc_Stock_Picking(models.Model):
_inherit = "stock.picking"
@api.multi
def do_new_transfer(self):
# so = self.env['sale.order'].browse(self.group_id.procurement_ids[0].sale_line_id.order_id.id) \
# or self.env['sale.order'].search([('name','=',self.group_id.name)])
if self.group_id.procurement_ids:
so = self.env['sale.order'].browse(self.group_id.procurement_ids[0].sale_line_id.order_id.id)
else:
so = self.env['sale.order'].search([('name', '=', self.group_id.name)])
user_tz = self.env.user.tz or u'UTC'
local_time = datetime.datetime.now(timezone(user_tz))
utc_time = datetime.datetime.now(timezone('UTC'))
if local_time.day > utc_time.day:
delay_hours_local = local_time.hour + 24 - utc_time.hour
elif local_time.day < utc_time.day:
delay_hours_local = local_time.hour - (utc_time.hour + 24)
else:
delay_hours_local = local_time.hour - utc_time.hour
if self.name.find('PICK') != -1:
if so.carrier_id:
if not so.carrier_id.mail_template:
raise UserError(_("Phương án giao hàng của %s chưa có SMS Template " % (so.name)))
lines = ""
for line in so.order_line:
qty = line.product_uom_qty or ""
price_unit = line.price_unit or ""
product = line.product_id.name or ""
lines += "Số lượng: " + str(qty) + " Đơn Giá: " + str(price_unit) + " Sản Phẩm: " + str(
product) + ", "
content = re.sub(r'<.*?>', '', so.carrier_id.mail_template.body_html).replace('\n', '')
content_cv = content.format(lines=lines, amount_total=str(so.amount_total),
contact=so.partner_id.name,
address=so.partner_id.contact_address or "",
sale_order=so.name,link_mvd=so.carrier_id.link_shipping or "",company_phone=self.env.user.company_id.phone or "",
product=so.partner_id.x_product_id.name or "",
category=so.partner_id.x_product_id.categ_id.name or "")
if so.carrier_id.type_method == 1:
if so.partner_id.phone and so.partner_id.sms:
self.env['tritam.sms'].send_sms_api(so.partner_id.phone, content_cv)
body_html = "<div><ul>" \
"<li>Gửi Sms</li>" \
"<li>Người gửi: {sale_man}</li>" \
"<li>Ngày gửi: {time}</li>" \
"<li>Hành động: gửi SMS</li>" \
"<li>Nội dung SMS: {note} </li>" \
"</ul></div>".format(sale_man=self.env.user.name,
time=fields.datetime.now()-relativedelta(hours=delay_hours_local), note=content_cv)
self.message_post(body_html)
self.env['tritam.history.sms'].sudo().create({
'mail_template': so.carrier_id.mail_template.id,
'user_id': self.env.user.id,
'partner_id': so.partner_id.id,
'date': fields.datetime.now(),
})
elif so.carrier_id.type_method == 2:
now = datetime.datetime.now()-relativedelta(hours=delay_hours_local)
run_at = (now + timedelta(days=1))
giventime = run_at.replace(hour=8, minute=00, second=0, microsecond=0)
_logger.info(
"--------------before-------------" + str(now)+"------------------------------------------")
_logger.info(
"--------------before-------------" + str(giventime)+"------------------------------------------")
if so.partner_id.phone and so.partner_id.sms:
self.env['tritam.sms'].send_sms_api_delay(so.partner_id.phone, content_cv,str(giventime))
body_html = "<div><ul>" \
"<li>Gửi Sms</li>" \
"<li>Người gửi: {sale_man}</li>" \
"<li>Ngày gửi: {time}</li>" \
"<li>Hành động: gửi SMS</li>" \
"<li>Nội dung SMS: {note} </li>" \
"</ul></div>".format(sale_man=self.env.user.name,
time=giventime, note=content_cv)
self.message_post(body_html)
self.env['tritam.history.sms'].sudo().create({
'mail_template': so.carrier_id.mail_template.id,
'user_id': self.env.user.id,
'partner_id': so.partner_id.id,
'date': str(giventime+relativedelta(hours=delay_hours_local)),
})
else :
raise UserError(_("Phương án giao hàng của %s chưa có loại vận chuyển" % (so.name)))
else:
raise UserError(_("%s chưa có phương án giao hàng"%(so.name)))
if self.name.find('OUT') != -1:
if so.carrier_id:
if not so.carrier_id.mail_template_do_out:
raise UserError(_("Phương án giao hàng của %s chưa có SMS Template " % (so.name)))
content = re.sub(r'<.*?>', '', so.carrier_id.mail_template_do_out.body_html).replace('\n', '')
content_cv = content.format(contact=so.partner_id.name,product = so.partner_id.x_product_id.name or "",
category=so.partner_id.x_product_id.categ_id.name or "")
if so.partner_id.phone and so.partner_id.sms:
self.env['tritam.sms'].send_sms_api(so.partner_id.phone, content_cv)
body_html = "<div><ul>" \
"<li>Gửi Sms</li>" \
"<li>Người gửi: {sale_man}</li>" \
"<li>Ngày gửi: {time}</li>" \
"<li>Hành động: gửi SMS</li>" \
"<li>Nội dung SMS: {note} </li>" \
"</ul></div>".format(sale_man=self.env.user.name,
time=fields.datetime.now()-relativedelta(hours=delay_hours_local), note=content_cv)
self.message_post(body_html)
self.env['tritam.history.sms'].sudo().create({
'mail_template': so.carrier_id.mail_template_do_out.id,
'user_id': self.env.user.id,
'partner_id': so.partner_id.id,
'date': fields.datetime.now(),
})
else:
raise UserError(_("%s chưa có phương án giao hàng"%(so.name)))
res = super(Duoc_Stock_Picking, self).do_new_transfer()
return res
| 56.952381 | 157 | 0.491758 |
4a26b1665885f6ff757ab0504ffe9658b1722fb5 | 11,180 | py | Python | src/robot/running/handlers.py | d-biehl/robotframework | ac4803649702ada9d18c5357ad7fcbbebcd0fb89 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-02-14T12:19:18.000Z | 2020-02-14T12:19:18.000Z | src/robot/running/handlers.py | imust6226/robotframework | 08c56fef2ebc64d682c7f99acd77c480d8d0e028 | [
"ECL-2.0",
"Apache-2.0"
] | 26 | 2020-04-07T04:25:35.000Z | 2022-03-01T08:08:23.000Z | src/robot/running/handlers.py | imust6226/robotframework | 08c56fef2ebc64d682c7f99acd77c480d8d0e028 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import copy
import inspect
from robot.utils import (getdoc, getshortdoc, is_list_like, normpath, printable_name,
split_tags_from_doc, type_name)
from robot.errors import DataError
from robot.model import Tags
from .arguments import ArgumentSpec, DynamicArgumentParser, PythonArgumentParser
from .dynamicmethods import GetKeywordSource, GetKeywordTypes
from .librarykeywordrunner import (EmbeddedArgumentsRunner,
LibraryKeywordRunner, RunKeywordRunner)
from .runkwregister import RUN_KW_REGISTER
def Handler(library, name, method):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _RunKeywordHandler(library, name, method)
return _PythonHandler(library, name, method)
def DynamicHandler(library, name, method, doc, argspec, tags=None):
if RUN_KW_REGISTER.is_run_keyword(library.orig_name, name):
return _DynamicRunKeywordHandler(library, name, method, doc, argspec, tags)
return _DynamicHandler(library, name, method, doc, argspec, tags)
def InitHandler(library, method=None, docgetter=None):
return _PythonInitHandler(library, '__init__', method, docgetter)
class _RunnableHandler:
def __init__(self, library, handler_name, handler_method, doc='', tags=None):
self.library = library
self._handler_name = handler_name
self.name = self._get_name(handler_name, handler_method)
self.arguments = self._parse_arguments(handler_method)
self._method = self._get_initial_handler(library, handler_name,
handler_method)
doc, tags_from_doc = split_tags_from_doc(doc or '')
tags_from_attr = self._get_tags_from_attribute(handler_method)
self._doc = doc
self.tags = Tags(tuple(tags_from_doc) +
tuple(tags_from_attr) +
tuple(tags or ()))
def _get_name(self, handler_name, handler_method):
robot_name = getattr(handler_method, 'robot_name', None)
name = robot_name or printable_name(handler_name, code_style=True)
if not name:
raise DataError('Keyword name cannot be empty.')
return name
def _parse_arguments(self, handler_method):
raise NotImplementedError
def _get_tags_from_attribute(self, handler_method):
tags = getattr(handler_method, 'robot_tags', ())
if not is_list_like(tags):
raise DataError("Expected tags to be list-like, got %s."
% type_name(tags))
return tags
def _get_initial_handler(self, library, name, method):
if library.scope.is_global:
return self._get_global_handler(method, name)
return None
def resolve_arguments(self, args, variables=None):
return self.arguments.resolve(args, variables, self.library.converters)
@property
def doc(self):
return self._doc
@property
def longname(self):
return '%s.%s' % (self.library.name, self.name)
@property
def shortdoc(self):
return getshortdoc(self.doc)
@property
def libname(self):
return self.library.name
@property
def source(self):
return self.library.source
@property
def lineno(self):
return -1
def create_runner(self, name):
return LibraryKeywordRunner(self)
def current_handler(self):
if self._method:
return self._method
return self._get_handler(self.library.get_instance(), self._handler_name)
def _get_global_handler(self, method, name):
return method
def _get_handler(self, lib_instance, handler_name):
try:
return getattr(lib_instance, handler_name)
except AttributeError:
# Occurs with old-style classes.
if handler_name == '__init__':
return None
raise
class _PythonHandler(_RunnableHandler):
def __init__(self, library, handler_name, handler_method):
_RunnableHandler.__init__(self, library, handler_name, handler_method,
getdoc(handler_method))
def _parse_arguments(self, handler_method):
return PythonArgumentParser().parse(handler_method, self.longname)
@property
def source(self):
handler = self.current_handler()
# `getsourcefile` can return None and raise TypeError.
try:
source = inspect.getsourcefile(inspect.unwrap(handler))
except TypeError:
source = None
return normpath(source) if source else self.library.source
@property
def lineno(self):
handler = self.current_handler()
try:
lines, start_lineno = inspect.getsourcelines(inspect.unwrap(handler))
except (TypeError, OSError, IOError):
return -1
for increment, line in enumerate(lines):
if line.strip().startswith('def '):
return start_lineno + increment
return start_lineno
class _DynamicHandler(_RunnableHandler):
def __init__(self, library, handler_name, dynamic_method, doc='',
argspec=None, tags=None):
self._argspec = argspec
self._run_keyword_method_name = dynamic_method.name
self._supports_kwargs = dynamic_method.supports_kwargs
_RunnableHandler.__init__(self, library, handler_name,
dynamic_method.method, doc, tags)
self._source_info = None
def _parse_arguments(self, handler_method):
spec = DynamicArgumentParser().parse(self._argspec, self.longname)
if not self._supports_kwargs:
if spec.var_named:
raise DataError("Too few '%s' method parameters for **kwargs "
"support." % self._run_keyword_method_name)
if spec.named_only:
raise DataError("Too few '%s' method parameters for "
"keyword-only arguments support."
% self._run_keyword_method_name)
get_keyword_types = GetKeywordTypes(self.library.get_instance())
spec.types = get_keyword_types(self._handler_name)
return spec
@property
def source(self):
if self._source_info is None:
self._source_info = self._get_source_info()
return self._source_info[0]
def _get_source_info(self):
get_keyword_source = GetKeywordSource(self.library.get_instance())
try:
source = get_keyword_source(self._handler_name)
except DataError as err:
self.library.report_error(
"Getting source information for keyword '%s' failed: %s"
% (self.name, err.message), err.details
)
return None, -1
if not source:
return self.library.source, -1
if ':' not in source:
return source, -1
path, lineno = source.rsplit(':', 1)
try:
return path or self.library.source, int(lineno)
except ValueError:
return source, -1
@property
def lineno(self):
if self._source_info is None:
self._source_info = self._get_source_info()
return self._source_info[1]
def resolve_arguments(self, arguments, variables=None):
positional, named = super().resolve_arguments(arguments, variables)
if not self._supports_kwargs:
positional, named = self.arguments.map(positional, named)
return positional, named
def _get_handler(self, lib_instance, handler_name):
runner = getattr(lib_instance, self._run_keyword_method_name)
return self._get_dynamic_handler(runner, handler_name)
def _get_global_handler(self, method, name):
return self._get_dynamic_handler(method, name)
def _get_dynamic_handler(self, runner, name):
def handler(*positional, **kwargs):
if self._supports_kwargs:
return runner(name, positional, kwargs)
else:
return runner(name, positional)
return handler
class _RunKeywordHandler(_PythonHandler):
def create_runner(self, name):
default_dry_run_keywords = ('name' in self.arguments.positional and
self._args_to_process)
return RunKeywordRunner(self, default_dry_run_keywords)
@property
def _args_to_process(self):
return RUN_KW_REGISTER.get_args_to_process(self.library.orig_name,
self.name)
def resolve_arguments(self, args, variables=None):
return self.arguments.resolve(args, variables, self.library.converters,
resolve_named=False,
resolve_variables_until=self._args_to_process)
class _DynamicRunKeywordHandler(_DynamicHandler, _RunKeywordHandler):
_parse_arguments = _RunKeywordHandler._parse_arguments
resolve_arguments = _RunKeywordHandler.resolve_arguments
class _PythonInitHandler(_PythonHandler):
def __init__(self, library, handler_name, handler_method, docgetter):
_PythonHandler.__init__(self, library, handler_name, handler_method)
self._docgetter = docgetter
@property
def doc(self):
if self._docgetter:
self._doc = self._docgetter() or self._doc
self._docgetter = None
return self._doc
def _parse_arguments(self, init_method):
parser = PythonArgumentParser(type='Library')
return parser.parse(init_method or (lambda: None), self.library.name)
class EmbeddedArgumentsHandler:
def __init__(self, name_regexp, orig_handler):
self.arguments = ArgumentSpec() # Show empty argument spec for Libdoc
self.name_regexp = name_regexp
self._orig_handler = orig_handler
def __getattr__(self, item):
return getattr(self._orig_handler, item)
@property
def library(self):
return self._orig_handler.library
@library.setter
def library(self, library):
self._orig_handler.library = library
def matches(self, name):
return self.name_regexp.match(name) is not None
def create_runner(self, name):
return EmbeddedArgumentsRunner(self, name)
def __copy__(self):
orig_handler = copy(self._orig_handler)
return EmbeddedArgumentsHandler(self.name_regexp, orig_handler)
| 35.833333 | 85 | 0.657513 |
4a26b29fe5e4b988588579acfcf2c792c3ff4f6d | 3,554 | py | Python | sdk/python/pulumi_azure/compute/get_dedicated_host.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/compute/get_dedicated_host.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/compute/get_dedicated_host.py | AdminTurnedDevOps/pulumi-azure | affd9eaaee3016f350f0d0469694dbd52850300b | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class GetDedicatedHostResult:
"""
A collection of values returned by getDedicatedHost.
"""
def __init__(__self__, dedicated_host_group_name=None, id=None, location=None, name=None, resource_group_name=None, tags=None):
if dedicated_host_group_name and not isinstance(dedicated_host_group_name, str):
raise TypeError("Expected argument 'dedicated_host_group_name' to be a str")
__self__.dedicated_host_group_name = dedicated_host_group_name
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
__self__.location = location
"""
The location where the Dedicated Host exists.
"""
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
__self__.name = name
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
__self__.resource_group_name = resource_group_name
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
__self__.tags = tags
"""
A mapping of tags assigned to the Dedicated Host.
"""
class AwaitableGetDedicatedHostResult(GetDedicatedHostResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDedicatedHostResult(
dedicated_host_group_name=self.dedicated_host_group_name,
id=self.id,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_dedicated_host(dedicated_host_group_name=None,name=None,resource_group_name=None,opts=None):
"""
Use this data source to access information about an existing Dedicated Host.
:param str dedicated_host_group_name: Specifies the name of the Dedicated Host Group the Dedicated Host is located in.
:param str name: Specifies the name of the Dedicated Host.
:param str resource_group_name: Specifies the name of the resource group the Dedicated Host is located in.
"""
__args__ = dict()
__args__['dedicatedHostGroupName'] = dedicated_host_group_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure:compute/getDedicatedHost:getDedicatedHost', __args__, opts=opts).value
return AwaitableGetDedicatedHostResult(
dedicated_host_group_name=__ret__.get('dedicatedHostGroupName'),
id=__ret__.get('id'),
location=__ret__.get('location'),
name=__ret__.get('name'),
resource_group_name=__ret__.get('resourceGroupName'),
tags=__ret__.get('tags'))
| 40.850575 | 131 | 0.689645 |
4a26b3511eabb848557335172dccd4c669daef06 | 1,969 | py | Python | xrpl/ledger/main.py | florent-uzio/xrpl-py | ce396a15a6fc39ca20e035636463b58b231ffeac | [
"0BSD"
] | 1 | 2021-04-07T16:59:01.000Z | 2021-04-07T16:59:01.000Z | xrpl/ledger/main.py | florent-uzio/xrpl-py | ce396a15a6fc39ca20e035636463b58b231ffeac | [
"0BSD"
] | null | null | null | xrpl/ledger/main.py | florent-uzio/xrpl-py | ce396a15a6fc39ca20e035636463b58b231ffeac | [
"0BSD"
] | null | null | null | """High-level ledger methods with the XRPL ledger."""
from typing import Any, Dict, cast
from xrpl.clients import Client, XRPLRequestFailureException
from xrpl.models.requests import Fee, Ledger
def get_latest_validated_ledger_sequence(client: Client) -> int:
"""
Returns the sequence number of the latest validated ledger.
Args:
client: The network client to use to send the request.
Returns:
The sequence number of the latest validated ledger.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
response = client.request(Ledger(ledger_index="validated"))
result = cast(Dict[str, Any], response.result)
if response.is_successful():
return cast(int, result["ledger_index"])
raise XRPLRequestFailureException(result)
def get_latest_open_ledger_sequence(client: Client) -> int:
"""
Returns the sequence number of the latest open ledger.
Args:
client: The network client to use to send the request.
Returns:
The sequence number of the latest open ledger.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
response = client.request(Ledger(ledger_index="open"))
result = cast(Dict[str, Any], response.result)
if response.is_successful():
return cast(int, result["ledger_index"])
raise XRPLRequestFailureException(result)
def get_fee(client: Client) -> str:
"""
Query the ledger for the current minimum transaction fee.
Args:
client: the network client used to make network calls.
Returns:
The minimum fee for transactions.
Raises:
XRPLRequestFailureException: if the rippled API call fails.
"""
response = client.request(Fee())
result = cast(Dict[str, Any], response.result)
if response.is_successful():
return cast(str, result["drops"]["minimum_fee"])
raise XRPLRequestFailureException(result)
| 28.128571 | 67 | 0.694769 |
4a26b364a3062eebf66a8b8c3cb3402284c322eb | 1,473 | py | Python | MechFinder/urls.py | srinidh-007/MechFinder | 6c217ee28963ea11b0960c982aa07cff35e31709 | [
"Apache-2.0"
] | null | null | null | MechFinder/urls.py | srinidh-007/MechFinder | 6c217ee28963ea11b0960c982aa07cff35e31709 | [
"Apache-2.0"
] | null | null | null | MechFinder/urls.py | srinidh-007/MechFinder | 6c217ee28963ea11b0960c982aa07cff35e31709 | [
"Apache-2.0"
] | null | null | null | """mechcheck URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from home import views
urlpatterns = [
path('admin/', admin.site.urls),
path('mechanic/', include('mechanic.urls')),
path('customer/', include('customer.urls'), name="customer"),
path('', views.home, name="home"),
path('signup', views.signupuser, name="signupuser"),
# path('customer', views.customer, name="customer"),
path('login', views.loginuser, name="loginuser"),
path('logout', views.logoutuser, name="logoutuser"),
path('edit', views.editProfile, name="editProfile"),
path('services', views.services, name="services"),
path('about', views.about, name="about"),
path('faqs', views.faqs, name="faqs"),
path('contact/', views.contact, name="contact"),
path('feedback', views.feedback, name="feedback"),
]
| 38.763158 | 77 | 0.682281 |
4a26b41f252e099f6bb08f49519ef23506e6c1db | 148 | py | Python | library_app/library_app/doctype/trancation/test_trancation.py | priyatoshy/FrappeLibraryApplication | 73237b49f9647024ef7262b756fa799fe201b6d9 | [
"MIT"
] | null | null | null | library_app/library_app/doctype/trancation/test_trancation.py | priyatoshy/FrappeLibraryApplication | 73237b49f9647024ef7262b756fa799fe201b6d9 | [
"MIT"
] | null | null | null | library_app/library_app/doctype/trancation/test_trancation.py | priyatoshy/FrappeLibraryApplication | 73237b49f9647024ef7262b756fa799fe201b6d9 | [
"MIT"
] | null | null | null | # Copyright (c) 2022, Priyatosh and Contributors
# See license.txt
# import frappe
import unittest
class TestTrancation(unittest.TestCase):
pass
| 16.444444 | 48 | 0.783784 |
4a26b4fa42ce8085af307df776c8bc8a236820ad | 3,542 | py | Python | neurokit2/rsp/rsp_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | neurokit2/rsp/rsp_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | neurokit2/rsp/rsp_process.py | danibene/NeuroKit | df0ab6696e7418cf8b8dcd3ed82dbf879fa61b3a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pandas as pd
from ..signal import signal_rate, signal_sanitize
from .rsp_amplitude import rsp_amplitude
from .rsp_clean import rsp_clean
from .rsp_peaks import rsp_peaks
from .rsp_phase import rsp_phase
def rsp_process(rsp_signal, sampling_rate=1000, method="khodadad2018"):
"""**Process a respiration (RSP) signal**
Convenience function that automatically processes a respiration signal with one of the
following methods:
* `Khodadad et al. (2018) <https://iopscience.iop.org/article/10.1088/1361-6579/aad7e6/meta>`_
* `BioSPPy <https://github.com/PIA-Group/BioSPPy/blob/master/biosppy/signals/resp.py>`_
Parameters
----------
rsp_signal : Union[list, np.array, pd.Series]
The raw respiration channel (as measured, for instance, by a respiration belt).
sampling_rate : int
The sampling frequency of :func:`.rsp_signal` (in Hz, i.e., samples/second).
method : str
The processing pipeline to apply. Can be one of ``"khodadad2018"`` (default)
or ``"biosppy"``.
Returns
-------
signals : DataFrame
A DataFrame of same length as :func:`.rsp_signal` containing the following columns:
* ``"RSP_Raw"``: the raw signal.
* ``"RSP_Clean"``: the cleaned signal.
* ``"RSP_Peaks"``: the respiratory peaks (exhalation onsets) marked as "1" in a list of
zeros.
* ``"RSP_Troughs"``: the respiratory troughs (inhalation onsets) marked as "1" in a list of
zeros.
* ``"RSP_Rate"``: breathing rate interpolated between inhalation peaks.
* ``"RSP_Amplitude"``: breathing amplitude interpolated between inhalation peaks.
* ``"RSP_Phase"``: breathing phase, marked by "1" for inspiration and "0" for expiration.
* ``"RSP_PhaseCompletion"``: breathing phase completion, expressed in percentage (from 0 to
1), representing the stage of the current respiratory phase.
info : dict
A dictionary containing the samples at which inhalation peaks and exhalation troughs occur,
accessible with the keys ``"RSP_Peaks"``, and ``"RSP_Troughs"`` respectively, as well as the
signals' sampling rate.
See Also
--------
rsp_clean, rsp_findpeaks, signal_rate, rsp_amplitude, rsp_plot
Examples
--------
.. ipython:: python
import neurokit2 as nk
rsp = nk.rsp_simulate(duration=90, respiratory_rate=15)
signals, info = nk.rsp_process(rsp, sampling_rate=1000)
@savefig p_rsp_process_1.png scale=100%
fig = nk.rsp_plot(signals)
@suppress
plt.close()
"""
# Sanitize input
rsp_signal = signal_sanitize(rsp_signal)
# Clean signal
rsp_cleaned = rsp_clean(rsp_signal, sampling_rate=sampling_rate, method=method)
# Extract, fix and format peaks
peak_signal, info = rsp_peaks(rsp_cleaned, sampling_rate=sampling_rate, method=method, amplitude_min=0.3)
info['sampling_rate'] = sampling_rate # Add sampling rate in dict info
# Get additional parameters
phase = rsp_phase(peak_signal, desired_length=len(rsp_signal))
amplitude = rsp_amplitude(rsp_cleaned, peak_signal)
rate = signal_rate(info["RSP_Troughs"], sampling_rate=sampling_rate, desired_length=len(rsp_signal))
# Prepare output
signals = pd.DataFrame(
{"RSP_Raw": rsp_signal, "RSP_Clean": rsp_cleaned, "RSP_Amplitude": amplitude, "RSP_Rate": rate}
)
signals = pd.concat([signals, phase, peak_signal], axis=1)
return signals, info
| 38.5 | 109 | 0.682947 |
4a26b56d05aa2314899dcaa6729330c086fb5d37 | 6,463 | py | Python | filer/admin/clipboardadmin.py | pabloriveracelerity/django-filer | db9d7c02c51e85af46fa8a47be522e6f391c1264 | [
"BSD-3-Clause"
] | null | null | null | filer/admin/clipboardadmin.py | pabloriveracelerity/django-filer | db9d7c02c51e85af46fa8a47be522e6f391c1264 | [
"BSD-3-Clause"
] | null | null | null | filer/admin/clipboardadmin.py | pabloriveracelerity/django-filer | db9d7c02c51e85af46fa8a47be522e6f391c1264 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from django.conf.urls import url
from django.contrib import admin
from django.forms.models import modelform_factory
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . import views
from .. import settings as filer_settings
from ..models import Clipboard, ClipboardItem, Folder
from ..utils.files import (
UploadException,
handle_request_files_upload,
handle_upload,
)
from ..utils.loader import load_model
NO_FOLDER_ERROR = "Can't find folder to upload. Please refresh and try again"
NO_PERMISSIONS_FOR_FOLDER = (
"Can't use this folder, Permission Denied. Please select another folder."
)
Image = filer_settings.FILER_IMAGE_MODEL
# ModelAdmins
class ClipboardItemInline(admin.TabularInline):
model = ClipboardItem
class ClipboardAdmin(admin.ModelAdmin):
model = Clipboard
inlines = [ClipboardItemInline]
filter_horizontal = ('files',)
raw_id_fields = ('user',)
verbose_name = "DEBUG Clipboard"
verbose_name_plural = "DEBUG Clipboards"
def get_urls(self):
return [
url(r'^operations/paste_clipboard_to_folder/$',
self.admin_site.admin_view(views.paste_clipboard_to_folder),
name='filer-paste_clipboard_to_folder'),
url(r'^operations/discard_clipboard/$',
self.admin_site.admin_view(views.discard_clipboard),
name='filer-discard_clipboard'),
url(r'^operations/delete_clipboard/$',
self.admin_site.admin_view(views.delete_clipboard),
name='filer-delete_clipboard'),
url(r'^operations/upload/(?P<folder_id>[0-9]+)/$',
ajax_upload,
name='filer-ajax_upload'),
url(r'^operations/upload/no_folder/$',
ajax_upload,
name='filer-ajax_upload'),
] + super(ClipboardAdmin, self).get_urls()
def get_model_perms(self, *args, **kwargs):
"""
It seems this is only used for the list view. NICE :-)
"""
return {
'add': False,
'change': False,
'delete': False,
}
@csrf_exempt
def ajax_upload(request, folder_id=None):
"""
Receives an upload from the uploader. Receives only one file at a time.
"""
folder = None
if folder_id:
try:
# Get folder
folder = Folder.objects.get(pk=folder_id)
except Folder.DoesNotExist:
return JsonResponse({'error': NO_FOLDER_ERROR})
# check permissions
if folder and not folder.has_add_children_permission(request):
return JsonResponse({'error': NO_PERMISSIONS_FOR_FOLDER})
try:
if len(request.FILES) == 1:
# dont check if request is ajax or not, just grab the file
upload, filename, is_raw = handle_request_files_upload(request)
else:
# else process the request as usual
upload, filename, is_raw = handle_upload(request)
# TODO: Deprecated/refactor
# Get clipboad
# clipboard = Clipboard.objects.get_or_create(user=request.user)[0]
# find the file type
for filer_class in filer_settings.FILER_FILE_MODELS:
FileSubClass = load_model(filer_class)
# TODO: What if there are more than one that qualify?
if FileSubClass.matches_file_type(filename, upload, request):
FileForm = modelform_factory(
model=FileSubClass,
fields=('original_filename', 'owner', 'file')
)
break
uploadform = FileForm({'original_filename': filename,
'owner': request.user.pk},
{'file': upload})
if uploadform.is_valid():
file_obj = uploadform.save(commit=False)
# Enforce the FILER_IS_PUBLIC_DEFAULT
file_obj.is_public = filer_settings.FILER_IS_PUBLIC_DEFAULT
file_obj.folder = folder
file_obj.save()
# TODO: Deprecated/refactor
# clipboard_item = ClipboardItem(
# clipboard=clipboard, file=file_obj)
# clipboard_item.save()
# Try to generate thumbnails.
if not file_obj.icons:
# There is no point to continue, as we can't generate
# thumbnails for this file. Usual reasons: bad format or
# filename.
file_obj.delete()
# This would be logged in BaseImage._generate_thumbnails()
# if FILER_ENABLE_LOGGING is on.
return JsonResponse(
{'error': 'failed to generate icons for file'},
status=500,
)
thumbnail = None
# Backwards compatibility: try to get specific icon size (32px)
# first. Then try medium icon size (they are already sorted),
# fallback to the first (smallest) configured icon.
for size in (['32'] +
filer_settings.FILER_ADMIN_ICON_SIZES[1::-1]):
try:
thumbnail = file_obj.icons[size]
break
except KeyError:
continue
data = {
'thumbnail': thumbnail,
'alt_text': '',
'label': str(file_obj),
'file_id': file_obj.pk,
}
if type(file_obj) == Image:
thumbnail_180_options = {
'size': (180, 180),
'crop': True,
'upscale': True,
}
thumbnail_180 = file_obj.file.get_thumbnail(
thumbnail_180_options)
data['thumbnail_180'] = thumbnail_180.url
data['original_image'] = file_obj.url
return JsonResponse(data)
else:
form_errors = '; '.join(['%s: %s' % (
field,
', '.join(errors)) for field, errors in list(
uploadform.errors.items())
])
raise UploadException(
"AJAX request not valid: form invalid '%s'" % (
form_errors,))
except UploadException as e:
return JsonResponse({'error': str(e)}, status=500)
| 36.931429 | 77 | 0.57218 |
4a26b6568ed7e9fc334ef470f659697381265ba1 | 2,300 | py | Python | tests/chainer_tests/functions_tests/array_tests/test_spatial_transformer_grid.py | disktnk/chainer | 133798db470f6fd95973b882b9ccbd0c9726ac13 | [
"MIT"
] | 90 | 2017-02-23T04:04:47.000Z | 2020-04-09T12:06:50.000Z | tests/chainer_tests/functions_tests/array_tests/test_spatial_transformer_grid.py | disktnk/chainer | 133798db470f6fd95973b882b9ccbd0c9726ac13 | [
"MIT"
] | 7 | 2017-07-23T13:38:06.000Z | 2018-07-10T07:09:03.000Z | tests/chainer_tests/functions_tests/array_tests/test_spatial_transformer_grid.py | disktnk/chainer | 133798db470f6fd95973b882b9ccbd0c9726ac13 | [
"MIT"
] | 32 | 2017-02-28T07:40:38.000Z | 2021-02-17T11:33:09.000Z | import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'never'],
}))
class TestSpatialTransformerGrid(unittest.TestCase):
def setUp(self):
B = 3
self.theta = numpy.random.uniform(size=(B, 2, 3)).astype(numpy.float32)
self.output_shape = (5, 6)
self.grads = numpy.random.uniform(
size=(B, 2) + self.output_shape).astype(self.theta.dtype)
self.check_backward_options = {
'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, theta, output_shape):
grid = functions.spatial_transformer_grid(theta, output_shape).data
theta = cuda.to_cpu(theta)
B = theta.shape[0]
H, W = output_shape
expected = []
for b in range(B):
for i in numpy.linspace(-1., 1., H):
for j in numpy.linspace(-1., 1., W):
coord = numpy.array([j, i, 1])
expected.append(self.theta[b].dot(coord))
expected = numpy.array(
expected).reshape(B, H, W, 2).transpose(0, 3, 1, 2)
testing.assert_allclose(grid, expected)
self.assertEqual(grid.dtype, theta.dtype)
def test_forward_cpu(self):
self.check_forward(self.theta, self.output_shape)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.theta), self.output_shape)
def check_backward(self, theta, output_shape, grads):
with chainer.using_config('use_cudnn', self.use_cudnn):
gradient_check.check_backward(
functions.SpatialTransformerGrid(output_shape),
(theta,), (grads,), dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.theta, self.output_shape, self.grads)
@attr.gpu
def test_backward_gpu(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
self.check_backward(cuda.to_gpu(self.theta), self.output_shape,
cuda.to_gpu(self.grads))
testing.run_module(__name__, __file__)
| 32.394366 | 79 | 0.633913 |
4a26b6f0eeb024c64cd89fdd6ae3293bffcc0087 | 1,337 | py | Python | source/evaluation/classification/evaluation.py | vered1986/NC_embeddings | 8dec4e2f7918ab7606abf61b9d90e4f2786a9652 | [
"Apache-2.0"
] | 9 | 2019-06-11T02:55:07.000Z | 2019-09-04T23:51:36.000Z | source/evaluation/classification/evaluation.py | vered1986/NC_embeddings | 8dec4e2f7918ab7606abf61b9d90e4f2786a9652 | [
"Apache-2.0"
] | null | null | null | source/evaluation/classification/evaluation.py | vered1986/NC_embeddings | 8dec4e2f7918ab7606abf61b9d90e4f2786a9652 | [
"Apache-2.0"
] | 2 | 2020-08-26T10:20:07.000Z | 2021-02-24T07:00:33.000Z | import codecs
from sklearn import metrics
def output_predictions(predictions_file, relations, predictions, test_set_keys, test_labels):
"""
Output the model predictions for the test set
:param predictions_file: the output file path
:param relations: the ordered list of relations
:param predictions: the predicted labels for the test set
:param test_set: the test set - a list of (w1, w2, relation) instances
:return:
"""
with codecs.open(predictions_file, 'w', 'utf-8') as f_out:
for i, (w1, w2) in enumerate(test_set_keys):
f_out.write('\t'.join([w1, w2, relations[test_labels[i]], relations[predictions[i]]]) + '\n')
def evaluate(test_set, predictions):
"""
Evaluate performance of the model on the test set
:param test_set: the test set object
:param predictions: the predicted values
:return: mean F1 over all classes
"""
full_report = metrics.classification_report(test_set.labels, predictions,
labels=range(len(test_set.index2label)),
target_names=test_set.index2label, digits=3)
pre, rec, f1, support = metrics.precision_recall_fscore_support(test_set.labels, predictions, average='weighted')
return pre, rec, f1, support, full_report
| 40.515152 | 117 | 0.667165 |
4a26b7224ae81404fd3db93f5c4e82c36aa013a0 | 39,971 | py | Python | tensorflow/python/framework/tensor_shape.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 848 | 2019-12-03T00:16:17.000Z | 2022-03-31T22:53:17.000Z | tensorflow/python/framework/tensor_shape.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 656 | 2019-12-03T00:48:46.000Z | 2022-03-31T18:41:54.000Z | tensorflow/python/framework/tensor_shape.py | MathMachado/tensorflow | 56afda20b15f234c23e8393f7e337e7dd2659c2d | [
"Apache-2.0"
] | 506 | 2019-12-03T00:46:26.000Z | 2022-03-30T10:34:56.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper classes for tensor shape inference."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python import tf2
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import dtypes
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_TENSORSHAPE_V2_OVERRIDE = None
_api_usage_gauge = monitoring.BoolGauge(
"/tensorflow/api/v2_tensorshape",
"Whether tensor_shape.enable_v2_tensorshape() is called.")
@tf_export(v1=["enable_v2_tensorshape"])
def enable_v2_tensorshape():
"""In TensorFlow 2.0, iterating over a TensorShape instance returns values.
This enables the new behavior.
Concretely, `tensor_shape[i]` returned a Dimension instance in V1, but
it V2 it returns either an integer, or None.
Examples:
```
#######################
# If you had this in V1:
value = tensor_shape[i].value
# Do this in V2 instead:
value = tensor_shape[i]
#######################
# If you had this in V1:
for dim in tensor_shape:
value = dim.value
print(value)
# Do this in V2 instead:
for value in tensor_shape:
print(value)
#######################
# If you had this in V1:
dim = tensor_shape[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# Do this in V2 instead:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
dim.assert_is_compatible_with(other_shape) # or using any other shape method
# The V2 suggestion above is more explicit, which will save you from
# the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be.
```
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = True
_api_usage_gauge.get_cell().set(True)
@tf_export(v1=["disable_v2_tensorshape"])
def disable_v2_tensorshape():
"""Disables the V2 TensorShape behavior and reverts to V1 behavior.
See docstring for `enable_v2_tensorshape` for details about the new behavior.
"""
global _TENSORSHAPE_V2_OVERRIDE # pylint: disable=invalid-name
_TENSORSHAPE_V2_OVERRIDE = False
_api_usage_gauge.get_cell().set(False)
@tf_export(
"compat.dimension_value", v1=["dimension_value", "compat.dimension_value"])
def dimension_value(dimension):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
When accessing the value of a TensorShape dimension,
use this utility, like this:
```
# If you had this in your V1 code:
value = tensor_shape[i].value
# Use `dimension_value` as direct replacement compatible with both V1 & V2:
value = dimension_value(tensor_shape[i])
# This would be the V2 equivalent:
value = tensor_shape[i] # Warning: this will return the dim value in V2!
```
Arguments:
dimension: Either a `Dimension` instance, an integer, or None.
Returns:
A plain value, i.e. an integer or None.
"""
if isinstance(dimension, Dimension):
return dimension.value
return dimension
@tf_export(
"compat.dimension_at_index",
v1=["dimension_at_index", "compat.dimension_at_index"])
def dimension_at_index(shape, index):
"""Compatibility utility required to allow for both V1 and V2 behavior in TF.
Until the release of TF 2.0, we need the legacy behavior of `TensorShape` to
coexist with the new behavior. This utility is a bridge between the two.
If you want to retrieve the Dimension instance corresponding to a certain
index in a TensorShape instance, use this utility, like this:
```
# If you had this in your V1 code:
dim = tensor_shape[i]
# Use `dimension_at_index` as direct replacement compatible with both V1 & V2:
dim = dimension_at_index(tensor_shape, i)
# Another possibility would be this, but WARNING: it only works if the
# tensor_shape instance has a defined rank.
dim = tensor_shape.dims[i] # `dims` may be None if the rank is undefined!
# In native V2 code, we recommend instead being more explicit:
if tensor_shape.rank is None:
dim = Dimension(None)
else:
dim = tensor_shape.dims[i]
# Being more explicit will save you from the following trap (present in V1):
# you might do in-place modifications to `dim` and expect them to be reflected
# in `tensor_shape[i]`, but they would not be (as the Dimension object was
# instantiated on the fly.
```
Arguments:
shape: A TensorShape instance.
index: An integer index.
Returns:
A dimension object.
"""
assert isinstance(shape, TensorShape)
if shape.rank is None:
return Dimension(None)
else:
return shape.dims[index]
@tf_export(v1=["Dimension"])
class Dimension(object):
"""Represents the value of one dimension in a TensorShape."""
def __init__(self, value):
"""Creates a new Dimension with the given value."""
if value is None:
self._value = None
elif isinstance(value, Dimension):
self._value = value.value
elif isinstance(value, dtypes.DType):
raise TypeError("Cannot convert %s to Dimension" % value)
else:
self._value = int(value)
if (not isinstance(value, compat.bytes_or_text_types) and
self._value != value):
raise ValueError("Ambiguous dimension: %s" % value)
if self._value < 0:
raise ValueError("Dimension %d must be >= 0" % self._value)
def __repr__(self):
return "Dimension(%s)" % repr(self._value)
def __str__(self):
value = self._value
return "?" if value is None else str(value)
def __eq__(self, other):
"""Returns true if `other` has the same known value as this Dimension."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value == other.value
def __ne__(self, other):
"""Returns true if `other` has a different known value from `self`."""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return None
return self._value != other.value
def __int__(self):
return self._value
# This is needed for Windows.
# See https://github.com/tensorflow/tensorflow/pull/9780
def __long__(self):
return self._value
def __index__(self):
# Allow use in Python 3 range
return self._value
@property
def value(self):
"""The value of this dimension, or None if it is unknown."""
return self._value
def is_compatible_with(self, other):
"""Returns true if `other` is compatible with this Dimension.
Two known Dimensions are compatible if they have the same value.
An unknown Dimension is compatible with all other Dimensions.
Args:
other: Another Dimension.
Returns:
True if this Dimension and `other` are compatible.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
return (self._value is None or other.value is None or
self._value == other.value)
def assert_is_compatible_with(self, other):
"""Raises an exception if `other` is not compatible with this Dimension.
Args:
other: Another Dimension.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
if not self.is_compatible_with(other):
raise ValueError("Dimensions %s and %s are not compatible" %
(self, other))
def merge_with(self, other):
"""Returns a Dimension that combines the information in `self` and `other`.
Dimensions are combined as follows:
```python
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(None)) ==
tf.compat.v1.Dimension(n)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(n)) ==
tf.compat.v1.Dimension(n)
# equivalent to tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None).merge_with(tf.compat.v1.Dimension(None))
# raises ValueError for n != m
tf.compat.v1.Dimension(n) .merge_with(tf.compat.v1.Dimension(m))
```
Args:
other: Another Dimension.
Returns:
A Dimension containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible (see
is_compatible_with).
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
self.assert_is_compatible_with(other)
if self._value is None:
return Dimension(other.value)
else:
return Dimension(self._value)
def __add__(self, other):
"""Returns the sum of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m + n)
tf.compat.v1.Dimension(m) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) + tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value + other.value)
def __radd__(self, other):
"""Returns the sum of `other` and `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the sum of `self` and `other`.
"""
return self + other
def __sub__(self, other):
"""Returns the subtraction of `other` from `self`.
Dimensions are subtracted as follows:
```python
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m - n)
tf.compat.v1.Dimension(m) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) - tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `other` from `self`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value - other.value)
def __rsub__(self, other):
"""Returns the subtraction of `self` from `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the subtraction of `self` from `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value - self._value)
def __mul__(self, other):
"""Returns the product of `self` and `other`.
Dimensions are summed as follows:
```python
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m * n)
tf.compat.v1.Dimension(m) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) * tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value * other.value)
def __rmul__(self, other):
"""Returns the product of `self` and `other`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is the product of `self` and `other`.
"""
return self * other
def __floordiv__(self, other):
"""Returns the quotient of `self` and `other` rounded down.
Dimensions are divided as follows:
```python
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m // n)
tf.compat.v1.Dimension(m) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) // tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
try:
other = as_dimension(other)
except (TypeError, ValueError):
return NotImplemented
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value // other.value)
def __rfloordiv__(self, other):
"""Returns the quotient of `other` and `self` rounded down.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(other.value // self._value)
def __div__(self, other):
"""DEPRECATED: Use `__floordiv__` via `x // y` instead.
This function exists only for backwards compatibility purposes; new code
should use `__floordiv__` via the syntax `x // y`. Using `x // y`
communicates clearly that the result rounds down, and is forward compatible
to Python 3.
Args:
other: Another `Dimension`.
Returns:
A `Dimension` whose value is the integer quotient of `self` and `other`.
"""
return self // other
def __rdiv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __truediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'Dimension' and 'int'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: 'Dimension' and '{}', "
"please use // instead".format(type(other).__name__))
def __rtruediv__(self, other):
"""Use `__floordiv__` via `x // y` instead.
This function exists only to have a better error message. Instead of:
`TypeError: unsupported operand type(s) for /: 'int' and 'Dimension'`,
this function will explicitly call for usage of `//` instead.
Args:
other: Another `Dimension`.
Raises:
TypeError.
"""
raise TypeError("unsupported operand type(s) for /: '{}' and 'Dimension', "
"please use // instead".format(type(other).__name__))
def __mod__(self, other):
"""Returns `self` modulo `other`.
Dimension moduli are computed as follows:
```python
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(n) ==
tf.compat.v1.Dimension(m % n)
tf.compat.v1.Dimension(m) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(n) # equiv. to
tf.compat.v1.Dimension(None)
tf.compat.v1.Dimension(None) % tf.compat.v1.Dimension(None) # equiv. to
tf.compat.v1.Dimension(None)
```
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `self` modulo `other`.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return Dimension(None)
else:
return Dimension(self._value % other.value)
def __rmod__(self, other):
"""Returns `other` modulo `self`.
Args:
other: Another Dimension, or a value accepted by `as_dimension`.
Returns:
A Dimension whose value is `other` modulo `self`.
"""
other = as_dimension(other)
return other % self
def __lt__(self, other):
"""Returns True if `self` is known to be less than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(n)) == (m < n)
(tf.compat.v1.Dimension(m) < tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) < tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value < other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value < other.value
def __le__(self, other):
"""Returns True if `self` is known to be less than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(n)) == (m <= n)
(tf.compat.v1.Dimension(m) <= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) <= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value <= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value <= other.value
def __gt__(self, other):
"""Returns True if `self` is known to be greater than `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(n)) == (m > n)
(tf.compat.v1.Dimension(m) > tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) > tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value > other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value > other.value
def __ge__(self, other):
"""Returns True if `self` is known to be greater than or equal to `other`.
Dimensions are compared as follows:
```python
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(n)) == (m >= n)
(tf.compat.v1.Dimension(m) >= tf.compat.v1.Dimension(None)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(n)) == None
(tf.compat.v1.Dimension(None) >= tf.compat.v1.Dimension(None)) == None
```
Args:
other: Another Dimension.
Returns:
The value of `self.value >= other.value` if both are known, otherwise
None.
"""
other = as_dimension(other)
if self._value is None or other.value is None:
return None
else:
return self._value >= other.value
def __reduce__(self):
return Dimension, (self._value,)
def as_dimension(value):
"""Converts the given value to a Dimension.
A Dimension input will be returned unmodified.
An input of `None` will be converted to an unknown Dimension.
An integer input will be converted to a Dimension with that value.
Args:
value: The value to be converted.
Returns:
A Dimension corresponding to the given value.
"""
if isinstance(value, Dimension):
return value
else:
return Dimension(value)
@tf_export("TensorShape")
class TensorShape(object):
"""Represents the shape of a `Tensor`.
A `TensorShape` represents a possibly-partial shape specification for a
`Tensor`. It may be one of the following:
* *Fully-known shape:* has a known number of dimensions and a known size
for each dimension. e.g. `TensorShape([16, 256])`
* *Partially-known shape:* has a known number of dimensions, and an unknown
size for one or more dimension. e.g. `TensorShape([None, 256])`
* *Unknown shape:* has an unknown number of dimensions, and an unknown
size in all dimensions. e.g. `TensorShape(None)`
If a tensor is produced by an operation of type `"Foo"`, its shape
may be inferred if there is a registered shape function for
`"Foo"`. See [Shape
functions](https://tensorflow.org/extend/adding_an_op#shape_functions_in_c)
for details of shape functions and how to register them. Alternatively,
the shape may be set explicitly using `tf.Tensor.set_shape`.
"""
def __init__(self, dims):
"""Creates a new TensorShape with the given dimensions.
Args:
dims: A list of Dimensions, or None if the shape is unspecified.
Raises:
TypeError: If dims cannot be converted to a list of dimensions.
"""
if dims is None:
self._dims = None
elif isinstance(dims, compat.bytes_or_text_types):
raise TypeError("A string has ambiguous TensorShape, please wrap in a "
"list or convert to an int: %s" % dims)
elif isinstance(dims, tensor_shape_pb2.TensorShapeProto):
if dims.unknown_rank:
self._dims = None
else:
self._dims = [
# Protos store variable-size dimensions as -1
as_dimension(dim.size if dim.size != -1 else None)
for dim in dims.dim
]
elif isinstance(dims, TensorShape):
self._dims = dims.dims
else:
try:
dims_iter = iter(dims)
except TypeError:
# Treat as a singleton dimension
self._dims = [as_dimension(dims)]
else:
# Got a list of dimensions
self._dims = [as_dimension(d) for d in dims_iter]
@property
def _v2_behavior(self):
if _TENSORSHAPE_V2_OVERRIDE is None:
return tf2.enabled()
return _TENSORSHAPE_V2_OVERRIDE
def __repr__(self):
if self._v2_behavior:
if self._dims is not None:
return "TensorShape(%r)" % [dim.value for dim in self._dims]
else:
return "TensorShape(None)"
else:
return "TensorShape(%r)" % self._dims
def __str__(self):
if self.rank is None:
return "<unknown>"
elif self.rank == 1:
if self._v2_behavior:
return "(%s,)" % self._dims[0].value
else:
return "(%s,)" % self._dims[0]
else:
if self._v2_behavior:
return "(%s)" % ", ".join(str(d.value) for d in self._dims)
else:
return "(%s)" % ", ".join(str(d) for d in self._dims)
@property
def rank(self):
"""Returns the rank of this shape, or None if it is unspecified."""
if self._dims is not None:
return len(self._dims)
return None
@property
def dims(self):
"""Returns a list of Dimensions, or None if the shape is unspecified."""
return self._dims
@property
def ndims(self):
"""Deprecated accessor for `rank`."""
return self.rank
def __len__(self):
"""Returns the rank of this shape, or raises ValueError if unspecified."""
if self._dims is None:
raise ValueError("Cannot take the length of shape with unknown rank.")
return len(self._dims)
def __bool__(self):
"""Returns True if this shape contains non-zero information."""
return self._dims is not None
# Python 3 wants __bool__, Python 2.7 wants __nonzero__
__nonzero__ = __bool__
def __iter__(self):
"""Returns `self.dims` if the rank is known, otherwise raises ValueError."""
if self._dims is None:
raise ValueError("Cannot iterate over a shape with unknown rank.")
else:
if self._v2_behavior:
return iter(d.value for d in self._dims)
else:
return iter(d for d in self._dims)
def __getitem__(self, key):
"""Returns the value of a dimension or a shape, depending on the key.
Args:
key: If `key` is an integer, returns the dimension at that index;
otherwise if `key` is a slice, returns a TensorShape whose dimensions
are those selected by the slice from `self`.
Returns:
An integer if `key` is an integer, or a `TensorShape` if `key` is a
slice.
Raises:
ValueError: If `key` is a slice and `self` is completely unknown and
the step is set.
"""
if self._dims is not None:
if isinstance(key, slice):
return TensorShape(self._dims[key])
else:
if self._v2_behavior:
return self._dims[key].value
else:
return self._dims[key]
else:
if isinstance(key, slice):
start = key.start if key.start is not None else 0
stop = key.stop
if key.step is not None:
# TODO(mrry): Handle these maybe.
raise ValueError("Steps are not yet handled")
if stop is None:
# NOTE(mrry): This implies that TensorShape(None) is compatible with
# TensorShape(None)[1:], which is obviously not true. It would be
# possible to track the number of dimensions symbolically,
# and perhaps we should do that.
return unknown_shape()
elif start < 0 or stop < 0:
# TODO(mrry): Handle this better, as it will be useful for handling
# suffixes of otherwise unknown shapes.
return unknown_shape()
else:
return unknown_shape(rank=stop - start)
else:
if self._v2_behavior:
return None
else:
return Dimension(None)
def num_elements(self):
"""Returns the total number of elements, or none for incomplete shapes."""
if self.is_fully_defined():
size = 1
for dim in self._dims:
size *= dim.value
return size
else:
return None
def merge_with(self, other):
"""Returns a `TensorShape` combining the information in `self` and `other`.
The dimensions in `self` and `other` are merged elementwise,
according to the rules defined for `Dimension.merge_with()`.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` containing the combined information of `self` and
`other`.
Raises:
ValueError: If `self` and `other` are not compatible.
"""
other = as_shape(other)
if self._dims is None:
return other
else:
try:
self.assert_same_rank(other)
new_dims = []
for i, dim in enumerate(self._dims):
new_dims.append(dim.merge_with(other[i]))
return TensorShape(new_dims)
except ValueError:
raise ValueError("Shapes %s and %s are not compatible" % (self, other))
def __add__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return self.concatenate(other)
def __radd__(self, other):
if not isinstance(other, TensorShape):
other = TensorShape(other)
return other.concatenate(self)
def concatenate(self, other):
"""Returns the concatenation of the dimension in `self` and `other`.
*N.B.* If either `self` or `other` is completely unknown,
concatenation will discard information about the other shape. In
future, we might support concatenation that preserves this
information for use with slicing.
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` whose dimensions are the concatenation of the
dimensions in `self` and `other`.
"""
# TODO(mrry): Handle the case where we concatenate a known shape with a
# completely unknown shape, so that we can use the partial information.
other = as_shape(other)
if self._dims is None or other.dims is None:
return unknown_shape()
else:
return TensorShape(self._dims + other.dims)
def assert_same_rank(self, other):
"""Raises an exception if `self` and `other` do not have compatible ranks.
Args:
other: Another `TensorShape`.
Raises:
ValueError: If `self` and `other` do not represent shapes with the
same rank.
"""
other = as_shape(other)
if self.rank is not None and other.rank is not None:
if self.rank != other.rank:
raise ValueError("Shapes %s and %s must have the same rank" %
(self, other))
def assert_has_rank(self, rank):
"""Raises an exception if `self` is not compatible with the given `rank`.
Args:
rank: An integer.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
if self.rank not in (None, rank):
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank(self, rank):
"""Returns a shape based on `self` with the given rank.
This method promotes a completely unknown shape to one with a
known rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with the given rank.
Raises:
ValueError: If `self` does not represent a shape with the given `rank`.
"""
try:
return self.merge_with(unknown_shape(rank=rank))
except ValueError:
raise ValueError("Shape %s must have rank %d" % (self, rank))
def with_rank_at_least(self, rank):
"""Returns a shape based on `self` with at least the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at least the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at least the given
`rank`.
"""
if self.rank is not None and self.rank < rank:
raise ValueError("Shape %s must have rank at least %d" % (self, rank))
else:
return self
def with_rank_at_most(self, rank):
"""Returns a shape based on `self` with at most the given rank.
Args:
rank: An integer.
Returns:
A shape that is at least as specific as `self` with at most the given
rank.
Raises:
ValueError: If `self` does not represent a shape with at most the given
`rank`.
"""
if self.rank is not None and self.rank > rank:
raise ValueError("Shape %s must have rank at most %d" % (self, rank))
else:
return self
def is_compatible_with(self, other):
"""Returns True iff `self` is compatible with `other`.
Two possibly-partially-defined shapes are compatible if there
exists a fully-defined shape that both shapes can represent. Thus,
compatibility allows the shape inference code to reason about
partially-defined shapes. For example:
* TensorShape(None) is compatible with all shapes.
* TensorShape([None, None]) is compatible with all two-dimensional
shapes, such as TensorShape([32, 784]), and also TensorShape(None). It is
not compatible with, for example, TensorShape([None]) or
TensorShape([None, None, None]).
* TensorShape([32, None]) is compatible with all two-dimensional shapes
with size 32 in the 0th dimension, and also TensorShape([None, None])
and TensorShape(None). It is not compatible with, for example,
TensorShape([32]), TensorShape([32, None, 1]) or TensorShape([64, None]).
* TensorShape([32, 784]) is compatible with itself, and also
TensorShape([32, None]), TensorShape([None, 784]), TensorShape([None,
None]) and TensorShape(None). It is not compatible with, for example,
TensorShape([32, 1, 784]) or TensorShape([None]).
The compatibility relation is reflexive and symmetric, but not
transitive. For example, TensorShape([32, 784]) is compatible with
TensorShape(None), and TensorShape(None) is compatible with
TensorShape([4, 4]), but TensorShape([32, 784]) is not compatible with
TensorShape([4, 4]).
Args:
other: Another TensorShape.
Returns:
True iff `self` is compatible with `other`.
"""
other = as_shape(other)
if self._dims is not None and other.dims is not None:
if self.rank != other.rank:
return False
for x_dim, y_dim in zip(self._dims, other.dims):
if not x_dim.is_compatible_with(y_dim):
return False
return True
def assert_is_compatible_with(self, other):
"""Raises exception if `self` and `other` do not represent the same shape.
This method can be used to assert that there exists a shape that both
`self` and `other` represent.
Args:
other: Another TensorShape.
Raises:
ValueError: If `self` and `other` do not represent the same shape.
"""
if not self.is_compatible_with(other):
raise ValueError("Shapes %s and %s are incompatible" % (self, other))
def most_specific_compatible_shape(self, other):
"""Returns the most specific TensorShape compatible with `self` and `other`.
* TensorShape([None, 1]) is the most specific TensorShape compatible with
both TensorShape([2, 1]) and TensorShape([5, 1]). Note that
TensorShape(None) is also compatible with above mentioned TensorShapes.
* TensorShape([1, 2, 3]) is the most specific TensorShape compatible with
both TensorShape([1, 2, 3]) and TensorShape([1, 2, 3]). There are more
less specific TensorShapes compatible with above mentioned TensorShapes,
e.g. TensorShape([1, 2, None]), TensorShape(None).
Args:
other: Another `TensorShape`.
Returns:
A `TensorShape` which is the most specific compatible shape of `self`
and `other`.
"""
other = as_shape(other)
if self._dims is None or other.dims is None or self.rank != other.rank:
return unknown_shape()
dims = [(Dimension(None))] * self.rank
for i, (d1, d2) in enumerate(zip(self._dims, other.dims)):
if d1 is not None and d2 is not None and d1 == d2:
dims[i] = d1
return TensorShape(dims)
def is_fully_defined(self):
"""Returns True iff `self` is fully defined in every dimension."""
return (self._dims is not None and
all(dim.value is not None for dim in self._dims))
def assert_is_fully_defined(self):
"""Raises an exception if `self` is not fully defined in every dimension.
Raises:
ValueError: If `self` does not have a known value for every dimension.
"""
if not self.is_fully_defined():
raise ValueError("Shape %s is not fully defined" % self)
def as_list(self):
"""Returns a list of integers or `None` for each dimension.
Returns:
A list of integers or `None` for each dimension.
Raises:
ValueError: If `self` is an unknown shape with an unknown rank.
"""
if self._dims is None:
raise ValueError("as_list() is not defined on an unknown TensorShape.")
return [dim.value for dim in self._dims]
def as_proto(self):
"""Returns this shape as a `TensorShapeProto`."""
if self._dims is None:
return tensor_shape_pb2.TensorShapeProto(unknown_rank=True)
else:
return tensor_shape_pb2.TensorShapeProto(dim=[
tensor_shape_pb2.TensorShapeProto.Dim(
size=-1 if d.value is None else d.value) for d in self._dims
])
def __eq__(self, other):
"""Returns True if `self` is equivalent to `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
return self._dims == other.dims
def __ne__(self, other):
"""Returns True if `self` is known to be different from `other`."""
try:
other = as_shape(other)
except TypeError:
return NotImplemented
if self.rank is None or other.rank is None:
raise ValueError("The inequality of unknown TensorShapes is undefined.")
if self.rank != other.rank:
return True
return self._dims != other.dims
def __reduce__(self):
return TensorShape, (self._dims,)
def __concat__(self, other):
return self.concatenate(other)
def as_shape(shape):
"""Converts the given object to a TensorShape."""
if isinstance(shape, TensorShape):
return shape
else:
return TensorShape(shape)
def unknown_shape(rank=None, **kwargs):
"""Returns an unknown TensorShape, optionally with a known rank.
Args:
rank: (Optional) If specified, the number of dimensions in the shape.
**kwargs: For backwards compatibility.
Returns:
An unknown TensorShape.
Raises:
TypeError: In case of invalid arguments.
"""
if rank is None and "ndims" in kwargs:
rank = kwargs.pop("ndims")
if kwargs:
raise TypeError("Unknown argument: %s" % kwargs)
if rank is None:
return TensorShape(None)
else:
return TensorShape([Dimension(None)] * rank)
@deprecation.deprecated(None, "Use tf.TensorShape([]).")
def scalar():
"""Returns a shape representing a scalar."""
return TensorShape([])
@deprecation.deprecated(None, "Use tf.TensorShape([length]).")
def vector(length):
"""Returns a shape representing a vector.
Args:
length: The length of the vector, which may be None if unknown.
Returns:
A TensorShape representing a vector of the given length.
"""
return TensorShape([length])
@deprecation.deprecated(None, "Use tf.TensorShape([rows, cols]).")
def matrix(rows, cols):
"""Returns a shape representing a matrix.
Args:
rows: The number of rows in the matrix, which may be None if unknown.
cols: The number of columns in the matrix, which may be None if unknown.
Returns:
A TensorShape representing a matrix of the given size.
"""
return TensorShape([rows, cols])
| 31.399057 | 80 | 0.660029 |
4a26b8cb6dad6d78f867d2551fc91352720b66e2 | 4,195 | py | Python | ops.py | postBG/fashionMNIST-DCGAN-tensorflow | cd13df6425d30132064ddaa1925280433e827d2c | [
"MIT"
] | null | null | null | ops.py | postBG/fashionMNIST-DCGAN-tensorflow | cd13df6425d30132064ddaa1925280433e827d2c | [
"MIT"
] | null | null | null | ops.py | postBG/fashionMNIST-DCGAN-tensorflow | cd13df6425d30132064ddaa1925280433e827d2c | [
"MIT"
] | null | null | null | import tensorflow as tf
from layers import leaky_relu, batch_norm
GENERATOR = 'generator'
DISCRIMINATOR = 'discriminator'
# TODO: How to collect this all variables
# TODO: Extract Hyper parameter
def generator(z, output_channel=1, reuse=False, training=True, kernel_size=4):
"""
This function creates generator.
:param z: random noise, ex) tensor shapes [None, 100]
:param output_channel: number of channels of generated data, ex) 3 for SVHN, 1 for MNIST
:param reuse: ...
:param training: ...
:param kernel_size: transposed conv layer's kernel size
:return: generated data(fake data) tensor, ex) tensor shapes [None, 28, 28, 1] for MNIST
"""
with tf.variable_scope(GENERATOR, reuse=reuse):
with tf.name_scope('layer1'):
projected_z = tf.layers.dense(z, 7 * 7 * 256)
reshaped_z = tf.reshape(projected_z, [-1, 7, 7, 256])
layer1 = batch_norm(reshaped_z, training=training)
layer1 = tf.nn.relu(layer1)
with tf.name_scope('layer2'):
layer2 = tf.layers.conv2d_transpose(layer1, 128, kernel_size, strides=2, padding='same')
layer2 = batch_norm(layer2, training=training)
layer2 = tf.nn.relu(layer2)
with tf.name_scope('layer3'):
layer3 = tf.layers.conv2d_transpose(layer2, 64, kernel_size, strides=2, padding='same')
layer3 = batch_norm(layer3, training=training)
layer3 = tf.nn.relu(layer3)
with tf.name_scope('output'):
logits = tf.layers.conv2d_transpose(layer3, output_channel, kernel_size, strides=1, padding='same')
output = tf.nn.tanh(logits)
return output
# TODO: Extract Hyper parameter and Care about initializer
def discriminator(images, reuse=False, alpha=0.2):
"""
Create Discriminator
:param images: tensor shapes [None, 28, 28, 1]
:param reuse: ...
:param alpha: leaky relu alpha
:return: discriminator output and logits
"""
with tf.variable_scope(DISCRIMINATOR, reuse=reuse):
with tf.name_scope('layer1'):
layer1 = tf.layers.conv2d(images, 64, 4, strides=2, padding='same')
layer1 = leaky_relu(layer1, alpha)
# 14x14x64
with tf.name_scope('layer2'):
layer2 = tf.layers.conv2d(layer1, 128, 4, strides=2, padding='same')
layer2 = batch_norm(layer2, training=True)
layer2 = leaky_relu(layer2, alpha)
# 7x7x128
with tf.name_scope('layer3'):
layer3 = tf.layers.conv2d(layer2, 256, 4, strides=2, padding='same')
layer3 = batch_norm(layer3, training=True)
layer3 = leaky_relu(layer3, alpha)
# 4x4x256
# TODO: Make robust to tensor shapes using tensor's get_shape method
with tf.name_scope('output'):
flatten = tf.reshape(layer3, [-1, 4 * 4 * 256])
logits = tf.layers.dense(flatten, 1)
output = tf.nn.sigmoid(logits)
return output, logits
def model_loss(input_real, input_z, output_channel=1, kernel_size=4, alpha=0.2):
"""
Get the loss for the discriminator and generator
:param input_real: should be normalized [-1, 1]
:param input_z:
:param output_channel: The number of channels in the output image
:param kernel_size:
:param alpha: relu alpha
:return: d_loss, g_loss
"""
g_model = generator(input_z, output_channel=output_channel, reuse=False, training=True, kernel_size=kernel_size)
d_model_real, d_logits_real = discriminator(input_real, reuse=False, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, alpha=alpha)
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.ones_like(d_logits_fake)))
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real, labels=tf.ones_like(d_logits_real)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake, labels=tf.zeros_like(d_logits_fake)))
d_loss = d_loss_real + d_loss_fake
return d_loss, g_loss
| 38.842593 | 116 | 0.66031 |
4a26b915385f936315fa9bb75ce019517bfe4fb2 | 2,453 | py | Python | internal/notes/builtin-SAVE/packages/r-genomicalignments/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 1 | 2019-01-17T20:07:19.000Z | 2019-01-17T20:07:19.000Z | internal/notes/builtin-SAVE/packages/r-genomicalignments/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | null | null | null | internal/notes/builtin-SAVE/packages/r-genomicalignments/package.py | HPCToolkit/hpctest | 5ff4455582bf39e75530a31badcf6142081b386b | [
"BSD-3-Clause"
] | 2 | 2019-08-06T18:13:57.000Z | 2021-11-05T18:19:49.000Z | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RGenomicalignments(RPackage):
"""Provides efficient containers for storing and manipulating short genomic
alignments (typically obtained by aligning short reads to a reference
genome). This includes read counting, computing the coverage, junction
detection, and working with the nucleotide content of the alignments."""
homepage = "https://bioconductor.org/packages/GenomicAlignments/"
url = "https://git.bioconductor.org/packages/GenomicAlignments"
list_url = homepage
version('1.12.2', git='https://git.bioconductor.org/packages/GenomicAlignments', commit='b5d6f19e4a89b6c1c3e9e58e5ea4eb13870874ef')
depends_on('r-biocgenerics', type=('build', 'run'))
depends_on('r-s4vectors', type=('build', 'run'))
depends_on('r-iranges', type=('build', 'run'))
depends_on('r-genomeinfodb', type=('build', 'run'))
depends_on('r-genomicranges', type=('build', 'run'))
depends_on('r-summarizedexperiment', type=('build', 'run'))
depends_on('r-biostrings', type=('build', 'run'))
depends_on('r-rsamtools', type=('build', 'run'))
depends_on('r-biocparallel', type=('build', 'run'))
depends_on('[email protected]:3.4.9', when='@1.12.2')
| 49.06 | 135 | 0.684468 |
4a26b9543333c1b3a047270ae2244ed31295e414 | 7,186 | py | Python | pkg/scheduler/patcher/main.py | yaroslaver/csi-baremetal | f0dc8d36de06bd19613ff5683069919534db54ce | [
"Apache-2.0"
] | null | null | null | pkg/scheduler/patcher/main.py | yaroslaver/csi-baremetal | f0dc8d36de06bd19613ff5683069919534db54ce | [
"Apache-2.0"
] | null | null | null | pkg/scheduler/patcher/main.py | yaroslaver/csi-baremetal | f0dc8d36de06bd19613ff5683069919534db54ce | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*
# Copyright © 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
import time
import argparse
import sys
from os.path import isfile, dirname, basename
from os import makedirs
from shutil import copy
from signal import signal, SIGINT, SIGTERM
from filecmp import cmp, clear_cache
import logging
log = logging.getLogger('patcher')
def run():
parser = argparse.ArgumentParser(
description='Patcher script for csi-baremetal kube-extender')
parser.add_argument(
'--manifest', help='path to the scheduler manifest file', required=True)
parser.add_argument(
'--restore', help='restore manifest when on shutdown', action='store_true')
parser.add_argument('--interval', type=int,
help='interval to check manifest config')
parser.add_argument('--target-config-path',
help='target path for scheduler config file', required=True)
parser.add_argument('--target-policy-path',
help='target path for scheduler policy file', required=True)
parser.add_argument('--source-config-path',
help='source path for scheduler config file', required=True)
parser.add_argument('--source-policy-path',
help='source path for scheduler policy file', required=True)
parser.add_argument(
'--loglevel', help="Set level for logging", dest="loglevel", default='info')
parser.add_argument(
'--backup-path', help="Set path for backup folder", default='/etc/kubernetes/scheduler')
args = parser.parse_args()
logging.basicConfig(level=logging.getLevelName(args.loglevel.upper()))
log.info('patcher started')
source_config = File(args.source_config_path)
source_policy = File(args.source_policy_path)
target_config = File(args.target_config_path)
target_policy = File(args.target_policy_path)
config_volume = Volume("scheduler-config", args.target_config_path)
config_volume.compile_config()
policy_volume = Volume("scheduler-policy", args.target_policy_path)
policy_volume.compile_config()
manifest = ManifestFile(
args.manifest, [config_volume, policy_volume], args.target_config_path, args.backup_path)
# add watcher on signals
killer = GracefulKiller(args.restore, manifest)
killer.watch(SIGINT)
killer.watch(SIGTERM)
while True:
# check everything is in a right place
_must_exist(manifest, source_config, source_policy)
# copy config and policy if they don't exist or they have different content
copy_not_equal(source_config, target_config)
copy_not_equal(source_policy, target_policy)
# work with content of manifest file
manifest.load()
manifest.patch()
if manifest.changed:
manifest.backup()
manifest.flush()
log.info('manifest file({}) was patched'.format(manifest.path))
log.debug('sleeping {} seconds'.format(args.interval))
time.sleep(args.interval)
class GracefulKiller:
def __init__(self, restore, file):
self.restore = restore
self.file = file
def exit_gracefully(self, signum, frame):
if self.restore:
self.file.restore()
sys.exit(0)
def watch(self, sig):
signal(sig, self.exit_gracefully)
class File:
def __init__(self, path):
self.path = path
def copy(self, target_file):
makedirs(dirname(target_file.path), exist_ok=True)
copy(self.path, target_file.path)
def exists(self):
return isfile(self.path)
def equal(self, target_file):
if target_file.exists():
clear_cache()
return cmp(self.path, target_file.path)
return False
class Volume:
def __init__(self, name, path):
self.name = name
self.path = path
def compile_config(self):
self.mount_path = {'name': self.name,
'mountPath': self.path, 'readOnly': True}
self.container_volume = {
'name': self.name,
'hostPath': {
'path': self.path,
'type': 'File'}
}
class ManifestFile(File):
def __init__(self, path, volumes, config_path, backup_folder):
self.path = path
self.backup_folder = backup_folder
self.volumes = volumes
self.config_path = config_path
def backup(self):
makedirs(dirname(self.backup_folder), exist_ok=True)
backup_path = self.backup_folder + basename(self.path)
copy(self.path, backup_path)
def restore(self):
backup_path = self.backup_folder + basename(self.path)
copy(backup_path, self.path)
def need_patching(self):
self.changed = True
def load(self):
with open(self.path, 'r') as f:
content = f.read()
self.content = yaml.load(content, Loader=yaml.FullLoader)
log.debug('manifest {} loaded'.format(self.path))
self.changed = False
def flush(self):
with open(self.path, 'w') as f:
yaml.dump(self.content, f)
log.debug('manifest {} dumped'.format(self.path))
def patch_volumes(self):
volumes = self.content['spec']['volumes']
volumeMounts = self.content['spec']['containers'][0]['volumeMounts']
for volume in self.volumes:
if not _name_exists(volumes, volume.name):
volumes.append(volume.container_volume)
self.need_patching()
if not _name_exists(volumeMounts, volume.name):
volumeMounts.append(volume.mount_path)
self.need_patching()
def patch_commands(self):
commands = self.content['spec']['containers'][0]['command']
config_command = '--config={}'.format(self.config_path)
if config_command not in commands:
commands.append(config_command)
self.need_patching()
def patch(self):
self.patch_commands()
self.patch_volumes()
def _name_exists(items, name):
for i in items:
if i['name'] == name:
return True
return False
def _must_exist(*files):
for f in files:
if not f.exists():
raise FileNotFoundError(
'One of the required files is not there - {}'.format(f.path))
def copy_not_equal(src, dst):
if not src.equal(dst):
src.copy(dst)
log.info('{} copied to {}'.format(src.path, dst.path))
if __name__ == "__main__":
run()
| 32.224215 | 97 | 0.641108 |
4a26b9ce13b5e35499044d559c681b52c96e991c | 1,101 | py | Python | tapis_cli/commands/taccapis/v2/systems/models/system_history.py | shwetagopaul92/tapis-cli-ng | 6f424b8352c0d034d4f5547fac21d5c8dd097a7f | [
"BSD-3-Clause"
] | null | null | null | tapis_cli/commands/taccapis/v2/systems/models/system_history.py | shwetagopaul92/tapis-cli-ng | 6f424b8352c0d034d4f5547fac21d5c8dd097a7f | [
"BSD-3-Clause"
] | null | null | null | tapis_cli/commands/taccapis/v2/systems/models/system_history.py | shwetagopaul92/tapis-cli-ng | 6f424b8352c0d034d4f5547fac21d5c8dd097a7f | [
"BSD-3-Clause"
] | null | null | null | from tapis_cli.display import Verbosity
from tapis_cli.search import argtype, argmod
from .system import System
__all__ = ['SystemHistory']
class SystemHistory(System):
"""Model of a Tapis system history record
"""
SEARCH_ARGS = [
# JSON_field, type, verbosity, mods_allowed, default_mod, choices, override_option, searchable
("_links", argtype.OBJECT, Verbosity.RECORD, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("id", argtype.STRING, Verbosity.RECORD, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("status", argtype.STRING, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("created", argtype.DATETIME, Verbosity.BRIEF, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("createdBy", argtype.STRING, Verbosity.LISTING_VERBOSE,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("description", argtype.STRING, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False)
]
| 42.346154 | 102 | 0.69119 |
4a26baba45298984e8192ce5c2686265f895daba | 6,003 | py | Python | wdae/wdae/datasets_api/views.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | wdae/wdae/datasets_api/views.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | 82 | 2019-07-22T11:44:23.000Z | 2022-01-13T15:27:33.000Z | wdae/wdae/datasets_api/views.py | iossifovlab/gpf | e556243d29666179dbcb72859845b4d6c011af2b | [
"MIT"
] | null | null | null | from rest_framework.response import Response
from rest_framework import status
from guardian.shortcuts import get_groups_with_perms
from query_base.query_base import QueryBaseView
from studies.study_wrapper import StudyWrapperBase
from .models import Dataset
from groups_api.serializers import GroupSerializer
from datasets_api.permissions import get_wdae_parents, \
user_has_permission
class DatasetView(QueryBaseView):
def augment_accessibility(self, dataset, user):
dataset_object = Dataset.objects.get(dataset_id=dataset["id"])
dataset["access_rights"] = user_has_permission(
user, dataset_object)
return dataset
def augment_with_groups(self, dataset):
dataset_object = Dataset.objects.get(dataset_id=dataset["id"])
groups = get_groups_with_perms(dataset_object)
serializer = GroupSerializer(groups, many=True)
dataset["groups"] = serializer.data
return dataset
def augment_with_parents(self, dataset):
parents = get_wdae_parents(dataset["id"])
parents = [ds.dataset_id for ds in parents]
dataset["parents"] = parents
return dataset
def get(self, request, dataset_id=None):
user = request.user
if dataset_id is None:
selected_genotype_data = \
self.gpf_instance.get_selected_genotype_data() \
or self.gpf_instance.get_genotype_data_ids()
datasets = filter(None, [
self.gpf_instance.get_wdae_wrapper(genotype_data_id)
for genotype_data_id in selected_genotype_data
])
# assert all([d is not None for d in datasets]), \
# selected_genotype_data
res = [
StudyWrapperBase.build_genotype_data_group_description(
self.gpf_instance,
dataset.config,
dataset.description,
dataset.person_set_collection_configs
)
for dataset in datasets
]
if not self.gpf_instance.get_selected_genotype_data():
res = sorted(
res,
key=lambda desc: desc["name"]
)
res = [self.augment_accessibility(ds, user) for ds in res]
res = [self.augment_with_groups(ds) for ds in res]
res = [self.augment_with_parents(ds) for ds in res]
return Response({"data": res})
else:
dataset = self.gpf_instance.get_wdae_wrapper(dataset_id)
if dataset:
res = StudyWrapperBase.build_genotype_data_group_description(
self.gpf_instance,
dataset.config,
dataset.description,
dataset.person_set_collection_configs
)
res = self.augment_accessibility(res, user)
res = self.augment_with_groups(res)
res = self.augment_with_parents(res)
return Response({"data": res})
return Response(
{"error": "Dataset {} not found".format(dataset_id)},
status=status.HTTP_404_NOT_FOUND,
)
class PermissionDeniedPromptView(QueryBaseView):
def __init__(self):
super(PermissionDeniedPromptView, self).__init__()
prompt_filepath = (
self.gpf_instance.dae_config.gpfjs.permission_denied_prompt_file
)
with open(prompt_filepath, "r") as infile:
self.permission_denied_prompt = infile.read()
def get(self, request):
return Response({"data": self.permission_denied_prompt})
class DatasetDetailsView(QueryBaseView):
def get(self, request, dataset_id):
if dataset_id is None:
return Response(
{"error": "No dataset ID given"},
status=status.HTTP_400_BAD_REQUEST,
)
# genotype_data = self.gpf_instance.get_wdae_wrapper(dataset_id)
genotype_data_config = \
self.gpf_instance.get_genotype_data_config(dataset_id)
if genotype_data_config is None:
return Response(
{"error": f"No such dataset {dataset_id}"},
status=status.HTTP_400_BAD_REQUEST,
)
has_denovo = genotype_data_config.get("has_denovo", False)
dataset_details = {
"hasDenovo": has_denovo,
"genome": genotype_data_config.genome,
"chrPrefix": genotype_data_config.chr_prefix,
}
return Response(dataset_details)
class DatasetPedigreeView(QueryBaseView):
def get(self, request, dataset_id, column):
if dataset_id is None:
return Response(
{"error": "No dataset ID given"},
status=status.HTTP_400_BAD_REQUEST,
)
genotype_data = self.gpf_instance.get_genotype_data(dataset_id)
if genotype_data is None:
return Response(
{"error": f"No such dataset {dataset_id}"},
status=status.HTTP_400_BAD_REQUEST,
)
values_domain = list(
map(str, genotype_data.families.ped_df[column].unique())
)
return Response(
{"column_name": column, "values_domain": values_domain}
)
class DatasetConfigView(DatasetView):
def get(self, request, dataset_id):
if dataset_id is None:
return Response(
{"error": "No dataset ID given"},
status=status.HTTP_400_BAD_REQUEST,
)
genotype_data = self.gpf_instance.get_genotype_data(dataset_id)
if genotype_data is None:
return Response(
{"error": f"No such dataset {dataset_id}"},
status=status.HTTP_400_BAD_REQUEST,
)
return Response(
self.augment_with_parents(genotype_data.config.to_dict())
)
| 33.724719 | 77 | 0.604864 |
4a26bb509a7159b49b1d584c93c7330044ba392a | 209 | py | Python | Dataset/Leetcode/train/136/168.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/136/168.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/136/168.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def singleNumber(self, nums: List[int]) -> int:
nums.sort()
for i in range(len(nums)//2):
if nums[2*i] != nums[2*i+1]:return nums[2*i]
return nums[-1]
| 26.125 | 56 | 0.5311 |
Subsets and Splits