id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1712127
|
S = input()
match = int(S.replace('/',''))
if 20190430 >= match:
print('Heisei')
else:
print('TBD')
|
StarcoderdataPython
|
29178
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import serial
import time
from math import sin, cos, pi
import argparse
import ast
from comms import *
from boards import *
from livegraph import livegraph
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Drive motor module(s) with a given control mode and plot current measurements.')
parser.add_argument('serial', type=str, help='Serial port')
parser.add_argument('--baud_rate', type=int, help='Serial baud rate')
parser.add_argument('board_ids', type=str, help='Board ID (separate with comma)')
parser.add_argument('mode', type=str, help='Control mode: \
current (Id[A], Iq[A]), \
phase (dc,dc,dc), \
torque (N*m), \
velocity (rad/s), \
position (rad), \
pos_vel (rad,rad/s), \
pos_ff (rad,ff[A]), \
pwm (dc)')
parser.add_argument('actuations', type=str, help='Actuation amount in the units of the selected mode (if requires multiple args, separate by comma)')
parser.set_defaults(baud_rate=COMM_DEFAULT_BAUD_RATE, offset=COMM_BOOTLOADER_OFFSET)
args = parser.parse_args()
make_list = lambda x: list(x) if (type(x) == list or type(x) == tuple) else [x]
make_int = lambda x: [int(y) for y in x]
board_ids = make_int(make_list(ast.literal_eval(args.board_ids)))
actuations = make_list(ast.literal_eval(args.actuations))
mode = args.mode
ser = serial.Serial(port=args.serial, baudrate=args.baud_rate, timeout=0.05)
client = BLDCControllerClient(ser)
initialized = initBoards(client, board_ids)
client.leaveBootloader(board_ids)
client.resetInputBuffer()
initMotor(client, board_ids)
def updateCurrent(i):
data = []
for board_id in board_ids:
try:
driveMotor(client, board_ids, actuations, mode)
# Read the iq calulated
read = struct.unpack('<f', client.readRegisters([board_id], [0x3003], [1])[0])
data.append(read)
# Read the iq command
read = struct.unpack('<f', client.readRegisters([board_id], [0x3020], [1])[0])
data.append(read)
except (ProtocolError, struct.error):
#print("Failed to communicate with board: ", board_id)
data.append([0.0])
data.append([0.0])
return time.time(), data
flatten = lambda l: [item for sublist in l for item in sublist]
labels = []
labels.extend([[str(bid) + '\'s iq Reading', str(bid) + '\'s iq PID output'] for bid in board_ids])
labels = flatten(labels)
graph = livegraph(updateCurrent, labels, sample_interval=1, window_size = 2000)
graph.start()
|
StarcoderdataPython
|
3230907
|
<filename>tests/test_06_math/test_608_intersection_line_line_2d.py
# Copyright (c) 2020 <NAME>
# License: MIT License
import pytest
from ezdxf.math import intersection_line_line_2d, Vec2
def vec2(x, y):
return Vec2((x, y))
def test_intersect_virtual():
ray1 = (vec2(10, 1), vec2(20, 10))
ray2 = (vec2(17, -7), vec2(-10, 3))
point = intersection_line_line_2d(ray1, ray2)
assert point.isclose(vec2(5.7434, -2.8309), abs_tol=1e-4)
def test_intersect_with_vertical():
ray1 = (vec2(10, 1), vec2(10, -7))
ray2 = (vec2(-10, 3), vec2(17, -7))
point = intersection_line_line_2d(ray1, ray2)
assert point.x == 10
assert point.isclose(vec2(10.0, -4.4074), abs_tol=1e-4)
def testintersect_with_horizontal():
ray1 = (vec2(-10, 10), vec2(10, 10))
ray2 = (vec2(-10, 20), vec2(10, 0))
point = intersection_line_line_2d(ray1, ray2)
assert point.y == 10
assert point.isclose(vec2(0.0, 10.0), abs_tol=1e-4)
def test_intersect_with_vertical_and_horizontal():
ray1 = (vec2(-10, 10), vec2(10, 10))
ray2 = (vec2(5, 0), vec2(5, 20))
point = intersection_line_line_2d(ray1, ray2)
assert point.y == 10
assert point.x == 5
assert point.isclose(vec2(5.0, 10.0), abs_tol=1e-4)
def test_intersect_parallel_vertical():
ray1 = (vec2(10, 1), vec2(10, -7))
ray2 = (vec2(12, -10), vec2(12, 7))
assert intersection_line_line_2d(ray1, ray2) is None
def test_intersect_parallel_horizontal():
ray3 = (vec2(11, 0), vec2(-11, 0))
ray4 = (vec2(0, 0), vec2(1, 0))
assert intersection_line_line_2d(ray3, ray4) is None
def test_intersect_normal_vertical():
ray = (vec2(10, 1), vec2(10, -7))
ortho = (vec2(0, 3), vec2(10, 3))
point = intersection_line_line_2d(ray, ortho)
assert point.isclose(vec2(10, 3))
def test_intersect_real():
line1 = (vec2(0, 0), vec2(4, 4))
line2 = (vec2(3, 2), vec2(5, 0))
point = intersection_line_line_2d(line1, line2, virtual=False)
assert point is None
def test_intersect_real_colinear():
line1 = (vec2(0, 0), vec2(4, 4))
line2 = (vec2(2, 2), vec2(4, 0)) # intersection point, is endpoint of ray2
point = intersection_line_line_2d(line1, line2, virtual=False)
assert point.isclose(vec2(2, 2))
@pytest.mark.parametrize(
"p2", [(4, 0), (0, 4), (4, 4)], ids=["horiz", "vert", "diag"]
)
def test_intersect_coincident_lines(p2):
line1 = (Vec2(0, 0), Vec2(p2))
point = intersection_line_line_2d(line1, line1, virtual=False)
assert point is None
def test_issue_128():
line1 = (vec2(175.0, 5.0), vec2(175.0, 50.0))
line2 = (vec2(-10.1231, 30.1235), vec2(300.2344, 30.1235))
point = intersection_line_line_2d(line1, line2, virtual=False)
assert point is not None
assert point.isclose(vec2(175.0, 30.1235))
if __name__ == "__main__":
pytest.main([__file__])
|
StarcoderdataPython
|
3332833
|
<reponame>mmendez3800/web-crawler-scraper
from rtypes import pcc_set, dimension, primarykey
@pcc_set
class Register(object):
crawler_id = primarykey(str)
load_balancer = dimension(tuple)
fresh = dimension(bool)
invalid = dimension(bool)
def __init__(self, crawler_id, fresh):
self.crawler_id = crawler_id
self.load_balancer = tuple()
self.fresh = fresh
self.invalid = False
|
StarcoderdataPython
|
1606736
|
"""
Unit test for selection operators.
"""
import random
from math import nan
import numpy as np
import pytest
from leap_ec import Individual
from leap_ec import ops, statistical_helpers
from leap_ec.binary_rep.problems import MaxOnes
from leap_ec.data import test_population
from leap_ec.real_rep.problems import SpheroidProblem
##############################
# Tests for sus_selection()
##############################
def test_sus_selection1():
''' Test of a deterministic case of stochastic universal sampling '''
# Make a population where sus_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
# This selection operator will always choose the [1, 1, 1] individual
# since [0, 0, 0] has zero fitness
selector = ops.sus_selection(pop)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# run one more time to test shuffle
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
@pytest.mark.stochastic
def test_sus_selection_shuffle():
''' Test of a stochastic case of SUS selection '''
# Make a population where sus_selection has an obvious
# reproducible choice
# Proportions here should be 1/4 and 3/4, respectively
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.sus_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(
lambda: next(selected).id, samples=N)
expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N}
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist,
observed_dist, p=p_thresh))
def test_sus_selection_offset():
''' Test of SUS selection with a non-default offset '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# evaluate population and negate fitness of second individual
pop = Individual.evaluate_population(pop)
pop[1].fitness = -pop[1].fitness
# now we try to evaluate normally (this should throw a ValueError)
# due to the negative fitness
with pytest.raises(ValueError):
selector = ops.sus_selection(pop)
selected = next(selector)
# it should work by setting the offset to +3
# this adds 3 to each fitness value, making the second
# individual's fitness 0.
selector = ops.sus_selection(pop, offset=3)
# we expect the first individual to always be selected
# since the new zero point is now -3.
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_sus_selection_pop_min():
''' Test of SUS selection with pop-min offset '''
# Create a population of positive fitness individuals
# scaling the fitness by the population minimum makes it so the
# least fit member never gets selected.
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
selector = ops.sus_selection(pop, offset='pop-min')
# we expect that the second individual is always selected
# since the new zero point will be at the minimum fitness
# of the population
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
def test_sus_selection_custom_key():
''' Test of SUS selection with custom evaluation '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
def custom_key(individual):
''' Returns fitness based on MaxZeros '''
return np.count_nonzero(individual.genome == 0)
pop = Individual.evaluate_population(pop)
selector = ops.sus_selection(pop, key=custom_key)
# we expect the first individual to always be selected
# since its genome is all 0s
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_sus_selection_num_points():
''' Test of SUS selection with varying `n` random points '''
# the second individual should always be selected
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
# with negative points
with pytest.raises(ValueError):
selector = ops.sus_selection(pop, n=-1)
selected = next(selector)
# with n = None (default)
selector = ops.sus_selection(pop, n=None)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# with n less than len(population)
selector = ops.sus_selection(pop, n=1)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
# with n greater than len(population)
selector = ops.sus_selection(pop, n=3)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
##############################
# Tests for proportional_selection()
##############################
def test_proportional_selection1():
''' Test of a deterministic case of proportional selection '''
# Make a population where proportional_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
parents = Individual.evaluate_population(pop)
# This selection operator will always select the [1, 1, 1] individual since
# [0, 0, 0] has zero fitness
selector = ops.proportional_selection(parents)
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
@pytest.mark.stochastic
def test_proportional_selection2():
''' Test of a stochastic proportional selection '''
# Make a population where fitness proportional selection has an obvious
# reproducible choice
# Proportions here should be 1/4 and 3/4, respectively
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.proportional_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(
lambda: next(selected).id, samples=N)
expected_dist = {pop[0].id: 0.25*N, pop[1].id: 0.75*N}
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist,
observed_dist, p=p_thresh))
def test_proportional_selection_offset():
''' Test of proportional selection with a non-default offset '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# evaluate population and negate fitness of second individual
pop = Individual.evaluate_population(pop)
pop[1].fitness = -pop[1].fitness
# now we try to evaluate normally (this should throw a ValueError)
# due to the negative fitness
with pytest.raises(ValueError):
selector = ops.proportional_selection(pop)
selected = next(selector)
# it should work by setting the offset to +3
# this adds 3 to each fitness value, making the second
# individual's fitness 0.
selector = ops.proportional_selection(pop, offset=3)
# we expect the first individual to always be selected
# since the new zero point is now -3.
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
def test_proportional_selection_pop_min():
''' Test of proportional selection with pop-min offset '''
# Create a population of positive fitness individuals
# scaling the fitness by the population minimum makes it so the
# least fit member never gets selected.
pop = [Individual(np.array([0, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
pop = Individual.evaluate_population(pop)
selector = ops.proportional_selection(pop, offset='pop-min')
# we expect that the second individual is always selected
# since the new zero point will be at the minimum fitness
# of the population
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
selected = next(selector)
assert np.all(selected.genome == [1, 1, 1])
def test_proportional_selection_custom_key():
''' Test of proportional selection with custom evaluation '''
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
def custom_key(individual):
''' Returns fitness based on MaxZeros '''
return np.count_nonzero(individual.genome == 0)
pop = Individual.evaluate_population(pop)
selector = ops.proportional_selection(pop, key=custom_key)
# we expect the first individual to always be selected
# since its genome is all 0s
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 0, 0])
##############################
# Tests for naive_cyclic_selection()
##############################
def test_naive_cyclic_selection():
""" Test of the naive deterministic cyclic selection """
pop = [Individual(np.array([0, 0]), problem=MaxOnes()),
Individual(np.array([0, 1]), problem=MaxOnes())]
# This selection operator will deterministically cycle through the
# given population
selector = ops.naive_cyclic_selection(pop)
selected = next(selector)
assert np.all(selected.genome == [0, 0])
selected = next(selector)
assert np.all(selected.genome == [0, 1])
# And now we cycle back to the first individual
selected = next(selector)
assert np.all(selected.genome == [0, 0])
##############################
# Tests for cyclic_selection()
##############################
def test_cyclic_selection():
""" Test of the deterministic cyclic selection """
# Set seed so that we get consistent test results. I.e., it is possible
# by happenstance for some tests to fail even though they're actually ok.
# E.g., the cyclic selection tests will test if the test_sequence
# shuffles between a complete cycle, but there's a chance that the same
# test_sequence may come up in the random shuffle, so the test will fail.
# However, if we set a random seed ahead of time, then we can control for
# those pathological scenarios.
random.seed(123)
# We're just going to use integers for the population as that's
# sufficient for testing this selection operator; we don't want to get in
# the weeds with comparing individuals for test_sequence equivalency
# testing.
pop = list(range(4))
# This selection operator will deterministically cycle through the
# given population
selector = ops.cyclic_selection(pop)
# first cycle should be the same order as we started
first_iteration = [next(selector) for _ in range(len(pop))]
assert pop == first_iteration
# the second iteration should be shuffled
second_iteration = [next(selector) for _ in range(len(pop))]
assert pop != second_iteration
##############################
# Tests for truncation_selection()
##############################
def test_truncation_selection():
""" Basic truncation selection test"""
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([0, 0, 1]), problem=MaxOnes()),
Individual(np.array([1, 1, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# We first need to evaluate all the individuals so that truncation
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
truncated = ops.truncation_selection(pop, 2)
assert len(truncated) == 2
# Just to make sure, check that the two best individuals from the
# original population are in the selected population
assert pop[2] in truncated
assert pop[3] in truncated
def test_truncation_parents_selection():
""" Test (mu + lambda), i.e., parents competing with offspring
Create parent and offspring populations such that each has an "best" individual that will be selected by
truncation selection.
"""
parents = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 0]), problem=MaxOnes())]
parents = Individual.evaluate_population(parents)
offspring = [Individual(np.array([0, 0, 1]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
offspring = Individual.evaluate_population(offspring)
truncated = ops.truncation_selection(offspring, 2, parents=parents)
assert len(truncated) == 2
assert parents[1] in truncated
assert offspring[1] in truncated
def test_truncation_selection_with_nan1():
"""If truncation selection encounters a NaN and non-NaN fitness
while maximizing, the non-NaN wins.
"""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
problem = MaxOnes()
pop = [Individual(np.array([0, 0, 0]), problem=problem),
Individual(np.array([1, 1, 1]), problem=problem)]
# We first need to evaluate all the individuals so that truncation
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
# Now set the "best" to NaN
pop[1].fitness = nan
best = ops.truncation_selection(pop, size=1)
assert pop[0] == best[0]
def test_truncation_selection_with_nan2():
"""If truncation selection encounters a NaN and non-NaN fitness
while minimizing, the non-NaN wins.
"""
problem = SpheroidProblem(maximize=False)
pop = []
pop.append(Individual(np.array([0]), problem=problem))
pop.append(Individual(np.array([1]), problem=problem))
pop = Individual.evaluate_population(pop)
# First *normal* selection should yield the 0 as the "best"
best = ops.truncation_selection(pop, size=1)
assert pop[0] == best[0]
# But now let's set that best to a NaN, which *should* force the other
# individual to be selected.
pop[0].fitness = nan
best = ops.truncation_selection(pop, size=1)
assert pop[1] == best[0]
##############################
# Tests for tournament_selection()
##############################
@pytest.mark.stochastic
def test_tournament_selection1():
"""If there are just two individuals in the population, then binary tournament
selection will select the better one with 75% probability."""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.tournament_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.25*N, pop[1].id: 0.75*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
@pytest.mark.stochastic
def test_tournament_selection2():
"""If there are just two individuals in the population, and we set select_worst=True,
then binary tournament selection will select the worse one with 75% probability."""
# Make a population where binary tournament_selection has an obvious
# reproducible choice
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.tournament_selection(pop, select_worst=True)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.75*N, pop[1].id: 0.25*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
def test_tournament_selection_indices():
"""If an empty list is provided to tournament selection, it should be populated with
the index of the selected individual.
If we select a second individual, the list should be cleared and populated with the
index of the second individual."""
pop = test_population
indices = []
op = ops.tournament_selection(indices=indices)
# Select an individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
# Select another individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
##############################
# Tests for random_selection()
##############################
@pytest.mark.stochastic
def test_random_selection1():
"""If there are just two individuals in the population, then random
selection will select the better one with 50% probability."""
pop = [Individual(np.array([0, 0, 0]), problem=MaxOnes()),
Individual(np.array([1, 1, 1]), problem=MaxOnes())]
# Assign a unique identifier to each individual
pop[0].id = 0
pop[1].id = 1
# We first need to evaluate all the individuals so that
# selection has fitnesses to compare
pop = Individual.evaluate_population(pop)
selected = ops.random_selection(pop)
N = 1000
p_thresh = 0.1
observed_dist = statistical_helpers.collect_distribution(lambda: next(selected).id, samples=N)
expected_dist = { pop[0].id: 0.5*N, pop[1].id: 0.5*N }
print(f"Observed: {observed_dist}")
print(f"Expected: {expected_dist}")
assert(statistical_helpers.stochastic_equals(expected_dist, observed_dist, p=p_thresh))
def test_random_selection_indices():
"""If an empty list is provided to random selection, it should be populated with
the index of the selected individual.
If we select a second individual, the list should be cleared and populated with the
index of the second individual."""
pop = test_population
indices = []
op = ops.random_selection(indices=indices)
# Select an individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
# Select another individual
s = next(op(pop))
# Ensure the returned index is correct
assert(len(indices) == 1)
idx = indices[0]
assert(idx >= 0)
assert(idx < len(pop))
assert(pop[idx] is s)
|
StarcoderdataPython
|
85079
|
from gusto import *
from firedrake import (IcosahedralSphereMesh, cos, sin,
SpatialCoordinate, FunctionSpace)
import sys
dt = 900.
day = 24.*60.*60.
if '--running-tests' in sys.argv:
tmax = dt
else:
tmax = 14*day
refinements = 4 # number of horizontal cells = 20*(4^refinements)
R = 6371220.
H = 8000.
mesh = IcosahedralSphereMesh(radius=R,
refinement_level=refinements)
x = SpatialCoordinate(mesh)
mesh.init_cell_orientations(x)
fieldlist = ['u', 'D']
timestepping = TimesteppingParameters(dt=dt)
output = OutputParameters(dirname='sw_rossby_wave_ll',
dumpfreq=24,
dumplist_latlon=['D'],
log_level='INFO')
parameters = ShallowWaterParameters(H=H)
diagnostics = Diagnostics(*fieldlist)
diagnostic_fields = [CourantNumber()]
state = State(mesh, horizontal_degree=1,
family="BDM",
timestepping=timestepping,
output=output,
parameters=parameters,
diagnostics=diagnostics,
fieldlist=fieldlist,
diagnostic_fields=diagnostic_fields)
# interpolate initial conditions
# Initial/current conditions
u0 = state.fields("u")
D0 = state.fields("D")
omega = 7.848e-6 # note lower-case, not the same as Omega
K = 7.848e-6
g = parameters.g
Omega = parameters.Omega
theta, lamda = latlon_coords(mesh)
u_zonal = R*omega*cos(theta) + R*K*(cos(theta)**3)*(4*sin(theta)**2 - cos(theta)**2)*cos(4*lamda)
u_merid = -R*K*4*(cos(theta)**3)*sin(theta)*sin(4*lamda)
uexpr = sphere_to_cartesian(mesh, u_zonal, u_merid)
def Atheta(theta):
return 0.5*omega*(2*Omega + omega)*cos(theta)**2 + 0.25*(K**2)*(cos(theta)**8)*(5*cos(theta)**2 + 26 - 32/(cos(theta)**2))
def Btheta(theta):
return (2*(Omega + omega)*K/30)*(cos(theta)**4)*(26 - 25*cos(theta)**2)
def Ctheta(theta):
return 0.25*(K**2)*(cos(theta)**8)*(5*cos(theta)**2 - 6)
Dexpr = H + (R**2)*(Atheta(theta) + Btheta(theta)*cos(4*lamda) + Ctheta(theta)*cos(8*lamda))/g
# Coriolis
fexpr = 2*Omega*x[2]/R
V = FunctionSpace(mesh, "CG", 1)
f = state.fields("coriolis", V)
f.interpolate(fexpr) # Coriolis frequency (1/s)
u0.project(uexpr, form_compiler_parameters={'quadrature_degree': 8})
D0.interpolate(Dexpr)
state.initialise([('u', u0),
('D', D0)])
ueqn = EulerPoincare(state, u0.function_space())
Deqn = AdvectionEquation(state, D0.function_space(), equation_form="continuity")
advected_fields = []
advected_fields.append(("u", ThetaMethod(state, u0, ueqn)))
advected_fields.append(("D", SSPRK3(state, D0, Deqn)))
linear_solver = ShallowWaterSolver(state)
# Set up forcing
sw_forcing = ShallowWaterForcing(state)
# build time stepper
stepper = CrankNicolson(state, advected_fields, linear_solver,
sw_forcing)
stepper.run(t=0, tmax=tmax)
|
StarcoderdataPython
|
3242033
|
"""preactresnet in pytorch
[1] <NAME>, <NAME>, <NAME>, <NAME>
Identity Mappings in Deep Residual Networks
https://arxiv.org/abs/1603.05027
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class SepConv(nn.Module):
def __init__(self, channel_in, channel_out, kernel_size=3, stride=2, padding=1, affine=True):
super(SepConv, self).__init__()
self.op = nn.Sequential(
nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=channel_in, bias=False),
nn.Conv2d(channel_in, channel_in, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(channel_in, affine=affine),
nn.ReLU(inplace=False),
nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=1, padding=padding, groups=channel_in, bias=False),
nn.Conv2d(channel_in, channel_out, kernel_size=1, padding=0, bias=False),
nn.BatchNorm2d(channel_out, affine=affine),
nn.ReLU(inplace=False),
)
def forward(self, x):
return self.op(x)
class DeepwiseAuxiliaryClassifier(nn.Module):
def __init__(self, channel, num_classes=100, downsample=0):
super(DeepwiseAuxiliaryClassifier, self).__init__()
self.channel = channel
self.pool = nn.AdaptiveAvgPool2d(1)
self.downsample = downsample
self.layer = self._make_conv_layer()
self.fc = nn.Linear(self.channel, num_classes)
def _make_conv_layer(self):
layer_list = []
for i in range(self.downsample):
layer_list.append(SepConv(self.channel, self.channel*2))
self.channel *= 2
layer_list.append(nn.AdaptiveAvgPool2d(1))
return nn.Sequential(*layer_list)
def forward(self, x):
x = self.layer(x)
x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
class PreActBasic(nn.Module):
expansion = 1
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.residual = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * PreActBasic.expansion, kernel_size=3, padding=1)
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * PreActBasic.expansion:
self.shortcut = nn.Conv2d(in_channels, out_channels * PreActBasic.expansion, 1, stride=stride)
def forward(self, x):
res = self.residual(x)
shortcut = self.shortcut(x)
return res + shortcut
class PreActBottleNeck(nn.Module):
expansion = 4
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.residual = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, out_channels, 1, stride=stride),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * PreActBottleNeck.expansion, 1)
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * PreActBottleNeck.expansion:
self.shortcut = nn.Conv2d(in_channels, out_channels * PreActBottleNeck.expansion, 1, stride=stride)
def forward(self, x):
res = self.residual(x)
shortcut = self.shortcut(x)
return res + shortcut
class PreActResNet(nn.Module):
def __init__(self, block, num_block, class_num=100):
super().__init__()
self.input_channels = 64
self.pre = nn.Sequential(
nn.Conv2d(3, 64, 3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
self.deepwise1 = DeepwiseAuxiliaryClassifier(channel=64 * block.expansion, downsample=3)
self.deepwise2 = DeepwiseAuxiliaryClassifier(channel=128 * block.expansion, downsample=2)
self.deepwise3 = DeepwiseAuxiliaryClassifier(channel=256 * block.expansion, downsample=1)
self.deepwise4 = DeepwiseAuxiliaryClassifier(channel=512 * block.expansion, downsample=0)
self.stage1 = self._make_layers(block, num_block[0], 64, 1)
self.stage2 = self._make_layers(block, num_block[1], 128, 2)
self.stage3 = self._make_layers(block, num_block[2], 256, 2)
self.stage4 = self._make_layers(block, num_block[3], 512, 2)
def _make_layers(self, block, block_num, out_channels, stride):
layers = []
layers.append(block(self.input_channels, out_channels, stride))
self.input_channels = out_channels * block.expansion
while block_num - 1:
layers.append(block(self.input_channels, out_channels, 1))
self.input_channels = out_channels * block.expansion
block_num -= 1
return nn.Sequential(*layers)
def forward(self, x):
feature_list = []
x = self.pre(x)
x = self.stage1(x)
feature_list.append(x)
x = self.stage2(x)
feature_list.append(x)
x = self.stage3(x)
feature_list.append(x)
x = self.stage4(x)
feature_list.append(x)
x1 = self.deepwise1(feature_list[-4])
x2 = self.deepwise2(feature_list[-3])
x3 = self.deepwise3(feature_list[-2])
x4 = self.deepwise4(feature_list[-1])
feature = [x4, x3, x2, x1]
x1 = self.deepwise1.fc(x1)
x2 = self.deepwise2.fc(x2)
x3 = self.deepwise3.fc(x3)
x4 = self.deepwise4.fc(x4)
return [x4, x3, x2, x1], feature
def preactresnet18():
return PreActResNet(PreActBasic, [2, 2, 2, 2])
def preactresnet34():
return PreActResNet(PreActBasic, [3, 4, 6, 3])
def preactresnet50():
return PreActResNet(PreActBottleNeck, [3, 4, 6, 3])
def preactresnet101():
return PreActResNet(PreActBottleNeck, [3, 4, 23, 3])
def preactresnet152():
return PreActResNet(PreActBottleNeck, [3, 8, 36, 3])
|
StarcoderdataPython
|
3251443
|
<filename>08.Graph/TSP.py
#input
# 4 9
# 0 1 2
# 1 0 1
# 0 2 9
# 1 2 6
# 2 1 7
# 1 3 4
# 3 1 3
# 3 0 6
# 2 3 8
def pprint(arr):
for line in arr:
print(line)
N, M = map(int, input().split(" "))
W = [[0] * N for _ in range(N)]
D = []
for _ in range(M):
v1, v2, cost = map(int, input().split(" "))
W[v1][v2] = cost
for i in range(N):
for j in range(N):
if i == j:
W[i][j] = 0
elif W[i][j] == 0:
W[i][j] = float('inf')
def travel():
for i in range(1, N+1):
D[i]
pprint(W)
|
StarcoderdataPython
|
3344750
|
<gh_stars>10-100
class Solution(object):
def isPalindrome(self, x: int) -> bool:
if x < 0:
return False
b = int(str(x) [::-1])
if b == x:
return True
return False
|
StarcoderdataPython
|
1625449
|
def metodogauss(A, n, b):
for k in range(n - 1):
for i in range(k + 1, n):
m = ((-1) * A[i][k]) / A[k][k]
for j in range(k, n):
A[i][j] = A[i][j] + m * A[k][j]
b[i] = b[i] + m * b[k]
return A, b
def read_a(A, n):
for i in range(0, n):
for j in range(0, n):
A[i][j] = float(input("Digite o valor de A[{}][{}]:".format(i + 1, j + 1)))
return A
def read_b(b, n):
for i in range(0, n):
for j in range(0, 1):
b[i] = float(input("Digite o valor de b[{}]:".format(i + 1)))
return b
def cria_matriz_a(n):
l = c = n
matriz = [0] * l
for i in range(0, l):
matriz[i] = [0] * c
return matriz
def cria_matriz_b(n):
matriz = [0] * n
for i in range(0, 1):
matriz[i] = [0] * 1
return matriz
def sub_ret(A, n, b):
x = [0 for f in range(0, n)]
n = n - 1
x[n] = b[n] / A[n][n]
for i in range(n - 1, -1, -1):
soma = 0
for j in range(i, n + 1):
soma = soma + A[i][j] * x[j]
x[i] = (b[i] - soma) / A[i][i]
return x
def gauss():
o = int(input("Digite a ordem da matriz: "))
print()
A = cria_matriz_a(o)
b = cria_matriz_a(o)
print("Criando matriz A: ")
print()
A = read_a(A, o)
print()
print("Criando matriz dos termos independentes (b): ")
print()
b = read_b(b, o)
print()
print("Matriz informada: ")
for l in range(0, o):
for c in range(0, o):
print(f'[ {A[l][c]} ]', end='')
print(f' = [ {b[l]} ]')
A, b = metodogauss(A, o, b)
print()
print('Matriz triangularizada: ')
for l in range(0, o):
for c in range(0, o):
print(f'[ {A[l][c]} ]', end='')
print(f' = [ {b[l]} ]')
x = sub_ret(A, o, b)
print()
print('Solução: ')
for l in range(0, len(x)):
print(f'[ {x[l]} ]', end='')
print()
if __name__ == '__main__':
gauss()
|
StarcoderdataPython
|
4806833
|
<gh_stars>1-10
import numpy as np
from matplotlib.colors import ListedColormap
def psf_lut():
"""
PSF LUT created by <NAME> Fiji.
"""
colors = [
[255, 0, 0],
[249, 5, 2],
[243, 10, 4],
[237, 16, 6],
[231, 21, 8],
[226, 26, 10],
[220, 32, 13],
[214, 37, 15],
[208, 42, 17],
[203, 48, 19],
[197, 53, 21],
[191, 59, 24],
[185, 64, 26],
[180, 69, 28],
[174, 75, 30],
[168, 80, 32],
[162, 85, 35],
[157, 91, 37],
[151, 96, 39],
[145, 101, 41],
[139, 107, 43],
[134, 112, 46],
[128, 118, 48],
[122, 123, 50],
[116, 128, 52],
[111, 134, 54],
[105, 139, 57],
[99, 144, 59],
[93, 150, 61],
[88, 155, 63],
[82, 160, 65],
[76, 166, 68],
[70, 171, 70],
[65, 177, 72],
[59, 182, 74],
[53, 187, 76],
[47, 193, 79],
[42, 198, 81],
[36, 203, 83],
[30, 209, 85],
[24, 214, 87],
[19, 219, 91],
[19, 218, 91],
[19, 218, 92],
[20, 217, 93],
[20, 216, 93],
[20, 215, 94],
[20, 214, 95],
[20, 214, 95],
[20, 213, 96],
[21, 212, 97],
[21, 211, 97],
[21, 210, 98],
[21, 210, 98],
[21, 209, 99],
[22, 208, 100],
[22, 207, 100],
[22, 206, 101],
[22, 206, 102],
[22, 205, 102],
[22, 204, 103],
[23, 203, 104],
[23, 202, 104],
[23, 201, 105],
[23, 201, 106],
[23, 200, 106],
[24, 199, 107],
[24, 198, 108],
[24, 197, 108],
[24, 197, 109],
[24, 196, 109],
[24, 195, 110],
[25, 194, 111],
[25, 193, 111],
[25, 193, 112],
[25, 192, 113],
[25, 191, 113],
[26, 190, 114],
[26, 189, 115],
[26, 189, 115],
[26, 188, 116],
[26, 187, 117],
[26, 186, 117],
[27, 185, 118],
[27, 184, 119],
[27, 184, 119],
[27, 183, 120],
[27, 182, 120],
[28, 181, 121],
[28, 180, 122],
[28, 180, 122],
[28, 179, 123],
[28, 178, 124],
[28, 177, 124],
[29, 176, 125],
[29, 176, 126],
[29, 175, 126],
[29, 174, 127],
[29, 173, 128],
[30, 172, 128],
[30, 172, 129],
[30, 171, 130],
[30, 170, 130],
[30, 169, 131],
[30, 168, 131],
[31, 167, 132],
[31, 167, 133],
[31, 166, 133],
[31, 165, 134],
[31, 164, 135],
[32, 163, 135],
[32, 163, 136],
[32, 162, 137],
[32, 161, 137],
[32, 160, 138],
[32, 159, 139],
[33, 159, 139],
[33, 158, 140],
[33, 157, 141],
[33, 156, 141],
[33, 155, 142],
[34, 153, 143],
[35, 152, 143],
[35, 151, 143],
[36, 150, 143],
[37, 149, 143],
[37, 148, 143],
[38, 147, 143],
[39, 146, 143],
[39, 145, 143],
[40, 144, 143],
[41, 143, 144],
[41, 142, 144],
[42, 141, 144],
[42, 140, 144],
[43, 139, 144],
[44, 138, 144],
[44, 137, 144],
[45, 136, 144],
[46, 135, 144],
[46, 134, 144],
[47, 133, 145],
[48, 132, 145],
[48, 131, 145],
[49, 130, 145],
[49, 128, 145],
[50, 127, 145],
[51, 126, 145],
[51, 125, 145],
[52, 124, 145],
[53, 123, 145],
[53, 122, 146],
[54, 121, 146],
[55, 120, 146],
[55, 119, 146],
[56, 118, 146],
[57, 117, 146],
[57, 116, 146],
[58, 115, 146],
[58, 114, 146],
[59, 113, 146],
[60, 112, 146],
[60, 111, 147],
[61, 110, 147],
[62, 109, 147],
[62, 108, 147],
[63, 107, 147],
[64, 106, 147],
[64, 105, 147],
[65, 103, 147],
[65, 102, 147],
[66, 101, 147],
[67, 100, 148],
[67, 99, 148],
[68, 98, 148],
[69, 97, 148],
[69, 96, 148],
[70, 95, 148],
[71, 94, 148],
[71, 93, 148],
[72, 92, 148],
[72, 91, 148],
[73, 90, 149],
[74, 89, 149],
[74, 88, 149],
[75, 87, 149],
[76, 86, 149],
[76, 85, 149],
[77, 84, 149],
[78, 83, 149],
[78, 82, 149],
[79, 81, 149],
[85, 85, 85],
[87, 87, 87],
[90, 90, 90],
[93, 93, 93],
[96, 96, 96],
[99, 99, 99],
[102, 102, 102],
[105, 105, 105],
[108, 108, 108],
[111, 111, 111],
[114, 114, 114],
[117, 117, 117],
[120, 120, 120],
[123, 123, 123],
[126, 126, 126],
[129, 129, 129],
[132, 132, 132],
[135, 135, 135],
[138, 138, 138],
[141, 141, 141],
[144, 144, 144],
[147, 147, 147],
[150, 150, 150],
[153, 153, 153],
[156, 156, 156],
[158, 158, 158],
[161, 161, 161],
[164, 164, 164],
[167, 167, 167],
[169, 169, 169],
[172, 172, 172],
[175, 175, 175],
[178, 178, 178],
[180, 180, 180],
[183, 183, 183],
[186, 186, 186],
[189, 189, 189],
[191, 191, 191],
[194, 194, 194],
[197, 197, 197],
[200, 200, 200],
[202, 202, 202],
[205, 205, 205],
[208, 208, 208],
[211, 211, 211],
[213, 213, 213],
[216, 216, 216],
[219, 219, 219],
[222, 222, 222],
[224, 224, 224],
[227, 227, 227],
[230, 230, 230],
[233, 233, 233],
[235, 235, 235],
[238, 238, 238],
[241, 241, 241],
[244, 244, 244],
[246, 246, 246],
[249, 249, 249],
[252, 252, 252],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
[255, 255, 255],
]
colors = np.array(colors) / 255
return ListedColormap(colors[::-1], colors)
|
StarcoderdataPython
|
1738010
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=protected-access
import json
import os
import sys
import unittest
import mock
import re
from copy import deepcopy
from adal import AdalError
from azure.mgmt.resource.subscriptions.v2016_06_01.models import \
(SubscriptionState, Subscription, SubscriptionPolicies, SpendingLimit)
from azure.cli.core._profile import (Profile, CredsCache, SubscriptionFinder,
ServicePrincipalAuth, _AUTH_CTX_FACTORY)
from azure.cli.core.mock import DummyCli
from knack.util import CLIError
class TestProfile(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tenant_id = 'microsoft.com'
cls.user1 = '<EMAIL>'
cls.id1 = 'subscriptions/1'
cls.display_name1 = 'foo account'
cls.state1 = SubscriptionState.enabled
# Dummy Subscription from SDK azure.mgmt.resource.subscriptions.v2016_06_01.operations._subscriptions_operations.SubscriptionsOperations.list
# tenant_id shouldn't be set as tenantId isn't returned by REST API
# Must be deepcopied before used as mock_arm_client.subscriptions.list.return_value
cls.subscription1_raw = SubscriptionStub(cls.id1,
cls.display_name1,
cls.state1)
# Dummy result of azure.cli.core._profile.SubscriptionFinder._find_using_specific_tenant
# tenant_id denotes token tenant
cls.subscription1 = SubscriptionStub(cls.id1,
cls.display_name1,
cls.state1,
cls.tenant_id)
# Dummy result of azure.cli.core._profile.Profile._normalize_properties
cls.subscription1_normalized = {
'environmentName': 'AzureCloud',
'id': '1',
'name': cls.display_name1,
'state': cls.state1.value,
'user': {
'name': cls.user1,
'type': 'user'
},
'isDefault': False,
'tenantId': cls.tenant_id
}
cls.raw_token1 = '<PASSWORD>'
cls.token_entry1 = {
"_clientId": "04b07795-8ddb-461a-bbee-02f9e1bf7b46",
"resource": "https://management.core.windows.net/",
"tokenType": "Bearer",
"expiresOn": "2016-03-31T04:26:56.610Z",
"expiresIn": 3599,
"identityProvider": "live.com",
"_authority": "https://login.microsoftonline.com/common",
"isMRRT": True,
"refreshToken": "<PASSWORD>",
"accessToken": cls.raw_token1,
"userId": cls.user1
}
cls.user2 = '<EMAIL>'
cls.id2 = 'subscriptions/2'
cls.display_name2 = 'bar account'
cls.state2 = SubscriptionState.past_due
cls.subscription2_raw = SubscriptionStub(cls.id2,
cls.display_name2,
cls.state2)
cls.subscription2 = SubscriptionStub(cls.id2,
cls.display_name2,
cls.state2,
cls.tenant_id)
cls.subscription2_normalized = {
'environmentName': 'AzureCloud',
'id': '2',
'name': cls.display_name2,
'state': cls.state2.value,
'user': {
'name': cls.user2,
'type': 'user'
},
'isDefault': False,
'tenantId': cls.tenant_id
}
cls.test_msi_tenant = '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a'
cls.test_msi_access_token = ('eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsIng1dCI6IlZXVkljMVdEMVRrc2JiMzAxc2FzTTVrT3E1'
'<KEY>'
'nZW1lbnQuY29yZS53aW5kb3dzLm5ldC<KEY>'
'yNmIyMi0zOGQ2LTRmYjItYmF<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'hYTktYTQyZi05OTg2OGQzMTQ2OTkiLCI1NDgwMzkxNy00YzcxLTRkNmMtOGJkZi1iYmQ5MzEwMTBmOGM'
'<KEY>'
'<KEY>'
'6InVzZXJfaW1wZXJzb25hdGlvbiIsInN1YiI6ImhRenl3b3FTLUEtRzAySTl6ZE5TRmtGd3R2MGVwZ2l'
'<KEY>ZHaFEiLCJ0aWQiOiI1NDgyNmIyMi0zOGQ2LTRmYjItYmFkOS1iN2I5M2EzZTljNWEiLCJ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>pZrrmP-z'
'7DlN9-U0A0nEYDoXzXvo-ACTkm9_TakfADd36YlYB5aLna-yO0B7rk5W9ANelkzUQgRfidSHtCmV6i4V'
'e-lOym1sH5iOcxfIjXF0Tp2y0f3zM7qCq8Cp1ZxEwz6xYIgByoxjErNXrOME5Ld1WizcsaWxTXpwxJn_'
'Q8U2g9kXHrbYFeY2gJxF_hnfLvNKxUKUBnftmyYxZwKi0GDS0BvdJnJnsqSRSpxUx__Ra9QJkG1IaDzj'
'ZcSZPHK45T6ohK9Hk9ktZo0crVl7Tmw')
def test_normalize(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
expected = self.subscription1_normalized
self.assertEqual(expected, consolidated[0])
# verify serialization works
self.assertIsNotNone(json.dumps(consolidated[0]))
def test_normalize_with_unicode_in_subscription_name(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
test_display_name = 'sub' + chr(255)
polished_display_name = 'sub?'
test_subscription = SubscriptionStub('subscriptions/sub1',
test_display_name,
SubscriptionState.enabled,
'tenant1')
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
self.assertTrue(consolidated[0]['name'] in [polished_display_name, test_display_name])
def test_normalize_with_none_subscription_name(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
test_display_name = None
polished_display_name = ''
test_subscription = SubscriptionStub('subscriptions/sub1',
test_display_name,
SubscriptionState.enabled,
'tenant1')
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
self.assertTrue(consolidated[0]['name'] == polished_display_name)
def test_update_add_two_different_subscriptions(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# add the first and verify
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 1)
subscription1 = storage_mock['subscriptions'][0]
subscription1_is_default = deepcopy(self.subscription1_normalized)
subscription1_is_default['isDefault'] = True
self.assertEqual(subscription1, subscription1_is_default)
# add the second and verify
consolidated = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 2)
subscription2 = storage_mock['subscriptions'][1]
subscription2_is_default = deepcopy(self.subscription2_normalized)
subscription2_is_default['isDefault'] = True
self.assertEqual(subscription2, subscription2_is_default)
# verify the old one stays, but no longer active
self.assertEqual(storage_mock['subscriptions'][0]['name'],
subscription1['name'])
self.assertFalse(storage_mock['subscriptions'][0]['isDefault'])
def test_update_with_same_subscription_added_twice(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# add one twice and verify we will have one but with new token
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
new_subscription1 = SubscriptionStub(self.id1,
self.display_name1,
self.state1,
self.tenant_id)
consolidated = profile._normalize_properties(self.user1,
[new_subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(len(storage_mock['subscriptions']), 1)
self.assertTrue(storage_mock['subscriptions'][0]['isDefault'])
def test_set_active_subscription(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
consolidated = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated)
self.assertTrue(storage_mock['subscriptions'][1]['isDefault'])
profile.set_active_subscription(storage_mock['subscriptions'][0]['id'])
self.assertFalse(storage_mock['subscriptions'][1]['isDefault'])
self.assertTrue(storage_mock['subscriptions'][0]['isDefault'])
def test_default_active_subscription_to_non_disabled_one(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
subscriptions = profile._normalize_properties(
self.user2, [self.subscription2, self.subscription1], False)
profile._set_subscriptions(subscriptions)
# verify we skip the overdued subscription and default to the 2nd one in the list
self.assertEqual(storage_mock['subscriptions'][1]['name'], self.subscription1.display_name)
self.assertTrue(storage_mock['subscriptions'][1]['isDefault'])
def test_get_subscription(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(self.display_name1, profile.get_subscription()['name'])
self.assertEqual(self.display_name1,
profile.get_subscription(subscription=self.display_name1)['name'])
sub_id = self.id1.split('/')[-1]
self.assertEqual(sub_id, profile.get_subscription()['id'])
self.assertEqual(sub_id, profile.get_subscription(subscription=sub_id)['id'])
self.assertRaises(CLIError, profile.get_subscription, "random_id")
def test_get_auth_info_fail_on_user_account(self):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# testing dump of existing logged in account
self.assertRaises(CLIError, profile.get_sp_auth_info)
@mock.patch('azure.cli.core.profiles.get_api_version', autospec=True)
def test_subscription_finder_constructor(self, get_api_mock):
cli = DummyCli()
get_api_mock.return_value = '2016-06-01'
cli.cloud.endpoints.resource_manager = 'http://foo_arm'
finder = SubscriptionFinder(cli, None, None, arm_client_factory=None)
result = finder._arm_client_factory(mock.MagicMock())
self.assertEqual(result.config.base_url, 'http://foo_arm')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_get_auth_info_for_logged_in_service_principal(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
profile.find_subscriptions_on_login(False, '1234', 'my-secret', True, self.tenant_id, use_device_code=False,
allow_no_subscriptions=False, subscription_finder=finder)
# action
extended_info = profile.get_sp_auth_info()
# assert
self.assertEqual(self.id1.split('/')[-1], extended_info['subscriptionId'])
self.assertEqual('1234', extended_info['clientId'])
self.assertEqual('my-secret', extended_info['clientSecret'])
self.assertEqual('https://login.microsoftonline.com', extended_info['activeDirectoryEndpointUrl'])
self.assertEqual('https://management.azure.com/', extended_info['resourceManagerEndpointUrl'])
def test_get_auth_info_for_newly_created_service_principal(self):
cli = DummyCli()
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1], False)
profile._set_subscriptions(consolidated)
# action
extended_info = profile.get_sp_auth_info(name='1234', cert_file='/tmp/123.pem')
# assert
self.assertEqual(self.id1.split('/')[-1], extended_info['subscriptionId'])
self.assertEqual(self.tenant_id, extended_info['tenantId'])
self.assertEqual('1234', extended_info['clientId'])
self.assertEqual('/tmp/123.pem', extended_info['clientCertificate'])
self.assertIsNone(extended_info.get('clientSecret', None))
self.assertEqual('https://login.microsoftonline.com', extended_info['activeDirectoryEndpointUrl'])
self.assertEqual('https://management.azure.com/', extended_info['resourceManagerEndpointUrl'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_thru_service_principal(self, mock_auth_context):
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = []
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
True,
self.tenant_id,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.tenant_id)
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], 'N/A(tenant level account)')
self.assertTrue(profile.is_tenant_level_account())
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_with_subscriptions_allow_no_subscriptions_thru_service_principal(self, mock_auth_context):
"""test subscription is returned even with --allow-no-subscriptions. """
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
True,
self.tenant_id,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.id1.split('/')[-1])
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], self.display_name1)
self.assertFalse(profile.is_tenant_level_account())
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_thru_common_tenant(self, mock_auth_context):
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
cli = DummyCli()
tenant_object = mock.MagicMock()
tenant_object.id = "foo-bar"
tenant_object.tenant_id = self.tenant_id
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = []
mock_arm_client.tenants.list.return_value = (x for x in [tenant_object])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
profile._management_resource_uri = 'https://management.core.windows.net/'
# action
result = profile.find_subscriptions_on_login(False,
'1234',
'my-secret',
False,
None,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertEqual(1, len(result))
self.assertEqual(result[0]['id'], self.tenant_id)
self.assertEqual(result[0]['state'], 'Enabled')
self.assertEqual(result[0]['tenantId'], self.tenant_id)
self.assertEqual(result[0]['name'], 'N/A(tenant level account)')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_create_account_without_subscriptions_without_tenant(self, mock_auth_context):
cli = DummyCli()
finder = mock.MagicMock()
finder.find_through_interactive_flow.return_value = []
storage_mock = {'subscriptions': []}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# action
result = profile.find_subscriptions_on_login(True,
'1234',
'my-secret',
False,
None,
use_device_code=False,
allow_no_subscriptions=True,
subscription_finder=finder)
# assert
self.assertTrue(0 == len(result))
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_get_current_account_user(self, mock_read_cred_file):
cli = DummyCli()
# setup
mock_read_cred_file.return_value = [TestProfile.token_entry1]
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
user = profile.get_current_account_user()
# verify
self.assertEqual(user, self.user1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', return_value=None)
def test_create_token_cache(self, mock_read_file):
cli = DummyCli()
mock_read_file.return_value = []
profile = Profile(cli_ctx=cli, use_global_creds_cache=False, async_persist=False)
cache = profile._creds_cache.adal_token_cache
self.assertFalse(cache.read_items())
self.assertTrue(mock_read_file.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_load_cached_tokens(self, mock_read_file):
cli = DummyCli()
mock_read_file.return_value = [TestProfile.token_entry1]
profile = Profile(cli_ctx=cli, use_global_creds_cache=False, async_persist=False)
cache = profile._creds_cache.adal_token_cache
matched = cache.find({
"_authority": "https://login.microsoftonline.com/common",
"_clientId": "04b07795-8ddb-461a-bbee-02f9e1bf7b46",
"userId": self.user1
})
self.assertEqual(len(matched), 1)
self.assertEqual(matched[0]['accessToken'], self.raw_token1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSI-DEV-INC', self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties(self.user1,
[test_subscription],
False)
profile._set_subscriptions(consolidated)
# action
cred, subscription_id, _ = profile.get_login_credentials()
# verify
self.assertEqual(subscription_id, test_subscription_id)
# verify the cred._tokenRetriever is a working lambda
token_type, token = cred._token_retriever()
self.assertEqual(token, self.raw_token1)
self.assertEqual(some_token_type, token_type)
mock_get_token.assert_called_once_with(mock.ANY, self.user1, test_tenant_id,
'https://management.core.windows.net/')
self.assertEqual(mock_get_token.call_count, 1)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_aux_subscriptions(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
raw_token2 = '<PASSWORD>'
token_entry2 = {
"resource": "https://management.core.windows.net/",
"tokenType": "Bearer",
"_authority": "https://login.microsoftonline.com/common",
"accessToken": raw_token2,
}
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1, token_entry2]
mock_get_token.side_effect = [(some_token_type, TestProfile.raw_token1), (some_token_type, raw_token2)]
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_subscription_id2 = '12345678-1bf0-4dda-aec3-cb9272f09591'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_tenant_id2 = '12345678-38d6-4fb2-bad9-b7b93a3e4321'
test_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSI-DEV-INC', self.state1, test_tenant_id)
test_subscription2 = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id2),
'MSI-DEV-INC2', self.state1, test_tenant_id2)
consolidated = profile._normalize_properties(self.user1,
[test_subscription, test_subscription2],
False)
profile._set_subscriptions(consolidated)
# action
cred, subscription_id, _ = profile.get_login_credentials(subscription_id=test_subscription_id,
aux_subscriptions=[test_subscription_id2])
# verify
self.assertEqual(subscription_id, test_subscription_id)
# verify the cred._tokenRetriever is a working lambda
token_type, token = cred._token_retriever()
self.assertEqual(token, self.raw_token1)
self.assertEqual(some_token_type, token_type)
token2 = cred._external_tenant_token_retriever()
self.assertEqual(len(token2), 1)
self.assertEqual(token2[0][1], raw_token2)
self.assertEqual(mock_get_token.call_count, 2)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
def test_get_login_credentials_msi_system_assigned(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'systemAssignedIdentity'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id, 'MSI', self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user,
[msi_subscription],
True)
profile._set_subscriptions(consolidated)
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_client_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'userAssignedIdentity'
test_client_id = '12345678-38d6-4fb2-bad9-b7b93a3e8888'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id, 'MSIClient-{}'.format(test_client_id), self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user, [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.client_id, test_client_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_object_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_object_id = '12345678-38d6-4fb2-bad9-b7b93a3e9999'
msi_subscription = SubscriptionStub('/subscriptions/12345678-1bf0-4dda-aec3-cb9272f09590',
'MSIObject-{}'.format(test_object_id),
self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties('userAssignedIdentity', [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.object_id, test_object_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
def test_get_login_credentials_msi_user_assigned_with_res_id(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_res_id = ('/subscriptions/{}/resourceGroups/r1/providers/Microsoft.ManagedIdentity/'
'userAssignedIdentities/id1').format(test_subscription_id)
msi_subscription = SubscriptionStub('/subscriptions/{}'.format(test_subscription_id),
'MSIResource-{}'.format(test_res_id),
self.state1, '12345678-38d6-4fb2-bad9-b7b93a3e1234')
consolidated = profile._normalize_properties('userAssignedIdentity', [msi_subscription], True)
profile._set_subscriptions(consolidated, secondary_key_name='name')
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_login_credentials()
# assert
self.assertEqual(subscription_id, test_subscription_id)
# sniff test the msi_auth object
cred.set_token()
cred.token
self.assertTrue(cred.set_token_invoked_count)
self.assertTrue(cred.token_read_count)
self.assertTrue(cred.msi_res_id, test_res_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_raw_token(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1,
TestProfile.token_entry1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
creds, sub, tenant = profile.get_raw_token(resource='https://foo')
# verify
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
# the last in the tuple is the whole token entry which has several fields
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://foo')
self.assertEqual(mock_get_token.call_count, 1)
self.assertEqual(sub, '1')
self.assertEqual(tenant, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_service_principal', autospec=True)
def test_get_raw_token_for_sp(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1,
TestProfile.token_entry1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties('sp1',
[self.subscription1],
True)
profile._set_subscriptions(consolidated)
# action
creds, sub, tenant = profile.get_raw_token(resource='https://foo')
# verify
self.assertEqual(creds[0], self.token_entry1['tokenType'])
self.assertEqual(creds[1], self.raw_token1)
# the last in the tuple is the whole token entry which has several fields
self.assertEqual(creds[2]['expiresOn'], self.token_entry1['expiresOn'])
mock_get_token.assert_called_once_with(mock.ANY, 'sp1', 'https://foo', self.tenant_id, False)
self.assertEqual(mock_get_token.call_count, 1)
self.assertEqual(sub, '1')
self.assertEqual(tenant, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
def test_get_raw_token_msi_system_assigned(self, mock_msi_auth, mock_read_cred_file):
mock_read_cred_file.return_value = []
# setup an existing msi subscription
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
test_subscription_id = '12345678-1bf0-4dda-aec3-cb9272f09590'
test_tenant_id = '12345678-38d6-4fb2-bad9-b7b93a3e1234'
test_user = 'systemAssignedIdentity'
msi_subscription = SubscriptionStub('/subscriptions/' + test_subscription_id,
'MSI', self.state1, test_tenant_id)
consolidated = profile._normalize_properties(test_user,
[msi_subscription],
True)
profile._set_subscriptions(consolidated)
mock_msi_auth.side_effect = MSRestAzureAuthStub
# action
cred, subscription_id, _ = profile.get_raw_token(resource='http://test_resource')
# assert
self.assertEqual(subscription_id, test_subscription_id)
self.assertEqual(cred[0], 'Bearer')
self.assertEqual(cred[1], TestProfile.test_msi_access_token)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_for_graph_client(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
cred, _, tenant_id = profile.get_login_credentials(
resource=cli.cloud.endpoints.active_directory_graph_resource_id)
_, _ = cred._token_retriever()
# verify
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://graph.windows.net/')
self.assertEqual(tenant_id, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.retrieve_token_for_user', autospec=True)
def test_get_login_credentials_for_data_lake_client(self, mock_get_token, mock_read_cred_file):
cli = DummyCli()
some_token_type = 'Bearer'
mock_read_cred_file.return_value = [TestProfile.token_entry1]
mock_get_token.return_value = (some_token_type, TestProfile.raw_token1)
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, [self.subscription1],
False)
profile._set_subscriptions(consolidated)
# action
cred, _, tenant_id = profile.get_login_credentials(
resource=cli.cloud.endpoints.active_directory_data_lake_resource_id)
_, _ = cred._token_retriever()
# verify
mock_get_token.assert_called_once_with(mock.ANY, self.user1, self.tenant_id,
'https://datalake.azure.net/')
self.assertEqual(tenant_id, self.tenant_id)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('azure.cli.core._profile.CredsCache.persist_cached_creds', autospec=True)
def test_logout(self, mock_persist_creds, mock_read_cred_file):
cli = DummyCli()
# setup
mock_read_cred_file.return_value = [TestProfile.token_entry1]
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
profile._set_subscriptions(consolidated)
self.assertEqual(1, len(storage_mock['subscriptions']))
# action
profile.logout(self.user1)
# verify
self.assertEqual(0, len(storage_mock['subscriptions']))
self.assertEqual(mock_read_cred_file.call_count, 1)
self.assertEqual(mock_persist_creds.call_count, 1)
@mock.patch('azure.cli.core._profile._delete_file', autospec=True)
def test_logout_all(self, mock_delete_cred_file):
cli = DummyCli()
# setup
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1,
[self.subscription1],
False)
consolidated2 = profile._normalize_properties(self.user2,
[self.subscription2],
False)
profile._set_subscriptions(consolidated + consolidated2)
self.assertEqual(2, len(storage_mock['subscriptions']))
# action
profile.logout_all()
# verify
self.assertEqual([], storage_mock['subscriptions'])
self.assertEqual(mock_delete_cred_file.call_count, 1)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_thru_username_password(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_token_with_username_password.assert_called_once_with(
mgmt_resource, self.user1, 'bar', mock.ANY)
mock_auth_context.acquire_token.assert_called_once_with(
mgmt_resource, self.user1, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_thru_username_non_password(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = None
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: None)
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, 'http://goo-resource')
# assert
self.assertEqual([], subs)
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
@mock.patch('azure.cli.core._profile._get_cloud_console_token_endpoint', autospec=True)
@mock.patch('azure.cli.core._profile.SubscriptionFinder', autospec=True)
def test_find_subscriptions_in_cloud_console(self, mock_subscription_finder, mock_get_token_endpoint,
mock_get_client_class, mock_msi_auth):
class SubscriptionFinderStub:
def find_from_raw_token(self, tenant, token):
# make sure the tenant and token args match 'TestProfile.test_msi_access_token'
if token != TestProfile.test_msi_access_token or tenant != '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a':
raise AssertionError('find_from_raw_token was not invoked with expected tenant or token')
return [TestProfile.subscription1]
mock_subscription_finder.return_value = SubscriptionFinderStub()
mock_get_token_endpoint.return_value = "http://great_endpoint"
mock_msi_auth.return_value = MSRestAzureAuthStub()
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
# action
subscriptions = profile.find_subscriptions_in_cloud_console()
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], '<EMAIL>')
self.assertEqual(s['user']['cloudShellID'], True)
self.assertEqual(s['user']['type'], 'user')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['id'], self.id1.split('/')[-1])
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_system_assigned(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [deepcopy(TestProfile.subscription1_raw)]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi()
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'systemAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSI')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['id'], self.id1.split('/')[-1])
self.assertEqual(s['tenantId'], '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a')
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_no_subscriptions(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = []
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(allow_no_subscriptions=True)
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'systemAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSI')
self.assertEqual(s['name'], 'N/A(tenant level account)')
self.assertEqual(s['id'], self.test_msi_tenant)
self.assertEqual(s['tenantId'], self.test_msi_tenant)
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_client_id(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [deepcopy(TestProfile.subscription1_raw)]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
test_client_id = '54826b22-38d6-4fb2-bad9-b7b93a3e9999'
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_client_id)
# assert
self.assertEqual(len(subscriptions), 1)
s = subscriptions[0]
self.assertEqual(s['user']['name'], 'userAssignedIdentity')
self.assertEqual(s['user']['type'], 'servicePrincipal')
self.assertEqual(s['name'], self.display_name1)
self.assertEqual(s['user']['assignedIdentityInfo'], 'MSIClient-{}'.format(test_client_id))
self.assertEqual(s['id'], self.id1.split('/')[-1])
self.assertEqual(s['tenantId'], '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a')
@mock.patch('azure.cli.core.adal_authentication.MSIAuthenticationWrapper', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
@mock.patch('azure.cli.core._profile.SubscriptionFinder', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_object_id(self, mock_subscription_finder, mock_get_client_class,
mock_msi_auth):
from requests import HTTPError
class SubscriptionFinderStub:
def find_from_raw_token(self, tenant, token):
# make sure the tenant and token args match 'TestProfile.test_msi_access_token'
if token != TestProfile.test_msi_access_token or tenant != '54826b22-38d6-4fb2-bad9-b7b93a3e9c5a':
raise AssertionError('find_from_raw_token was not invoked with expected tenant or token')
return [TestProfile.subscription1]
class AuthStub:
def __init__(self, **kwargs):
self.token = None
self.client_id = kwargs.get('client_id')
self.object_id = kwargs.get('object_id')
# since msrestazure 0.4.34, set_token in init
self.set_token()
def set_token(self):
# here we will reject the 1st sniffing of trying with client_id and then acccept the 2nd
if self.object_id:
self.token = {
'token_type': 'Bearer',
'access_token': <PASSWORD>Profile.test_msi_access_token
}
else:
mock_obj = mock.MagicMock()
mock_obj.status, mock_obj.reason = 400, 'Bad Request'
raise HTTPError(response=mock_obj)
profile = Profile(cli_ctx=DummyCli(), storage={'subscriptions': None}, use_global_creds_cache=False,
async_persist=False)
mock_subscription_finder.return_value = SubscriptionFinderStub()
mock_msi_auth.side_effect = AuthStub
test_object_id = '54826b22-38d6-4fb2-bad9-b7b93a3e9999'
# action
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_object_id)
# assert
self.assertEqual(subscriptions[0]['user']['assignedIdentityInfo'], 'MSIObject-{}'.format(test_object_id))
@mock.patch('requests.get', autospec=True)
@mock.patch('azure.cli.core.profiles._shared.get_client_class', autospec=True)
def test_find_subscriptions_in_vm_with_msi_user_assigned_with_res_id(self, mock_get_client_class, mock_get):
class ClientStub:
def __init__(self, *args, **kwargs):
self.subscriptions = mock.MagicMock()
self.subscriptions.list.return_value = [deepcopy(TestProfile.subscription1_raw)]
self.config = mock.MagicMock()
self._client = mock.MagicMock()
mock_get_client_class.return_value = ClientStub
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
test_token_entry = {
'token_type': 'Bearer',
'access_token': TestProfile.test_msi_access_token
}
test_res_id = ('/subscriptions/0b1f6471-1bf0-4dda-aec3-cb9272f09590/resourcegroups/g1/'
'providers/Microsoft.ManagedIdentity/userAssignedIdentities/id1')
encoded_test_token = json.dumps(test_token_entry).encode()
good_response = mock.MagicMock()
good_response.status_code = 200
good_response.content = encoded_test_token
mock_get.return_value = good_response
subscriptions = profile.find_subscriptions_in_vm_with_msi(identity_id=test_res_id)
# assert
self.assertEqual(subscriptions[0]['user']['assignedIdentityInfo'], 'MSIResource-{}'.format(test_res_id))
@mock.patch('adal.AuthenticationContext.acquire_token_with_username_password', autospec=True)
@mock.patch('adal.AuthenticationContext.acquire_token', autospec=True)
def test_find_subscriptions_thru_username_password_adfs(self, mock_acquire_token,
mock_acquire_token_username_password):
cli = DummyCli()
TEST_ADFS_AUTH_URL = 'https://adfs.local.azurestack.external/adfs'
def test_acquire_token(self, resource, username, password, client_id):
global acquire_token_invoked
acquire_token_invoked = True
if (self.authority.url == TEST_ADFS_AUTH_URL and self.authority.is_adfs_authority):
return TestProfile.token_entry1
else:
raise ValueError('AuthContext was not initialized correctly for ADFS')
mock_acquire_token_username_password.side_effect = test_acquire_token
mock_acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
cli.cloud.endpoints.active_directory = TEST_ADFS_AUTH_URL
finder = SubscriptionFinder(cli, _AUTH_CTX_FACTORY, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
self.assertTrue(acquire_token_invoked)
@mock.patch('adal.AuthenticationContext', autospec=True)
@mock.patch('azure.cli.core._profile.logger', autospec=True)
def test_find_subscriptions_thru_username_password_with_account_disabled(self, mock_logger, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.side_effect = AdalError('Account is disabled')
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_user_account(self.user1, 'bar', None, mgmt_resource)
# assert
self.assertEqual([], subs)
mock_logger.warning.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_particular_tenent(self, mock_auth_context):
def just_raise(ex):
raise ex
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.side_effect = lambda: just_raise(
ValueError("'tenants.list' should not occur"))
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
subs = finder.find_from_user_account(self.user1, 'bar', self.tenant_id, 'http://someresource')
# assert
self.assertEqual([self.subscription1], subs)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_through_device_code_flow(self, mock_auth_context):
cli = DummyCli()
test_nonsense_code = {'message': 'magic code for you'}
mock_auth_context.acquire_user_code.return_value = test_nonsense_code
mock_auth_context.acquire_token_with_device_code.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_through_interactive_flow(None, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_user_code.assert_called_once_with(
mgmt_resource, mock.ANY)
mock_auth_context.acquire_token_with_device_code.assert_called_once_with(
mgmt_resource, test_nonsense_code, mock.ANY)
mock_auth_context.acquire_token.assert_called_once_with(
mgmt_resource, self.user1, mock.ANY)
@mock.patch('adal.AuthenticationContext', autospec=True)
@mock.patch('azure.cli.core._profile._get_authorization_code', autospec=True)
def test_find_subscriptions_through_authorization_code_flow(self, _get_authorization_code_mock, mock_auth_context):
import adal
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
token_cache = adal.TokenCache()
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, token_cache, lambda _: mock_arm_client)
_get_authorization_code_mock.return_value = {
'code': 'code1',
'reply_url': 'http://localhost:8888'
}
mgmt_resource = 'https://management.core.windows.net/'
temp_token_cache = mock.MagicMock()
type(mock_auth_context).cache = temp_token_cache
temp_token_cache.read_items.return_value = []
mock_auth_context.acquire_token_with_authorization_code.return_value = self.token_entry1
# action
subs = finder.find_through_authorization_code_flow(None, mgmt_resource, 'https:/some_aad_point/common')
# assert
self.assertEqual([self.subscription1], subs)
mock_auth_context.acquire_token.assert_called_once_with(mgmt_resource, self.user1, mock.ANY)
mock_auth_context.acquire_token_with_authorization_code.assert_called_once_with('code1',
'http://localhost:8888',
mgmt_resource, mock.ANY,
None)
_get_authorization_code_mock.assert_called_once_with(mgmt_resource, 'https:/some_aad_point/common')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_interactive_from_particular_tenent(self, mock_auth_context):
def just_raise(ex):
raise ex
cli = DummyCli()
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.side_effect = lambda: just_raise(
ValueError("'tenants.list' should not occur"))
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
subs = finder.find_through_interactive_flow(self.tenant_id, 'http://someresource')
# assert
self.assertEqual([self.subscription1], subs)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_id(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth('my secret'),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_credentials.assert_called_once_with(
mgmt_resource, 'my app', 'my secret')
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_using_cert(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_certificate.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth(test_cert_file),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_certificate.assert_called_once_with(
mgmt_resource, 'my app', mock.ANY, mock.ANY, None)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_find_subscriptions_from_service_principal_using_cert_sn_issuer(self, mock_auth_context):
cli = DummyCli()
mock_auth_context.acquire_token_with_client_certificate.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.subscriptions.list.return_value = [deepcopy(self.subscription1_raw)]
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
mgmt_resource = 'https://management.core.windows.net/'
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
with open(test_cert_file) as cert_file:
cert_file_string = cert_file.read()
match = re.search(r'\-+BEGIN CERTIFICATE.+\-+(?P<public>[^-]+)\-+END CERTIFICATE.+\-+',
cert_file_string, re.I)
public_certificate = match.group('public').strip()
# action
subs = finder.find_from_service_principal_id('my app', ServicePrincipalAuth(test_cert_file, use_cert_sn_issuer=True),
self.tenant_id, mgmt_resource)
# assert
self.assertEqual([self.subscription1], subs)
mock_arm_client.tenants.list.assert_not_called()
mock_auth_context.acquire_token.assert_not_called()
mock_auth_context.acquire_token_with_client_certificate.assert_called_once_with(
mgmt_resource, 'my app', mock.ANY, mock.ANY, public_certificate)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_one_user_account(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = deepcopy([self.subscription1_raw, self.subscription2_raw])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(2, len(result))
self.assertEqual(self.id1.split('/')[-1], result[0]['id'])
self.assertEqual(self.id2.split('/')[-1], result[1]['id'])
self.assertTrue(result[0]['isDefault'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_one_user_account_one_sp_account(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
sp_subscription1 = SubscriptionStub('sp-sub/3', 'foo-subname', self.state1, 'foo_tenant.onmicrosoft.com')
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
consolidated += profile._normalize_properties('http://foo', [sp_subscription1], True)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_auth_context.acquire_token_with_client_credentials.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.side_effect = deepcopy([[self.subscription1], [self.subscription2, sp_subscription1]])
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
profile._creds_cache.retrieve_secret_of_service_principal = lambda _: 'verySecret'
profile._creds_cache.flush_to_disk = lambda _: ''
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(3, len(result))
self.assertEqual(self.id1.split('/')[-1], result[0]['id'])
self.assertEqual(self.id2.split('/')[-1], result[1]['id'])
self.assertEqual('3', result[2]['id'])
self.assertTrue(result[0]['isDefault'])
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_refresh_accounts_with_nothing(self, mock_auth_context):
cli = DummyCli()
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
consolidated = profile._normalize_properties(self.user1, deepcopy([self.subscription1]), False)
profile._set_subscriptions(consolidated)
mock_auth_context.acquire_token_with_username_password.return_value = self.token_entry1
mock_auth_context.acquire_token.return_value = self.token_entry1
mock_arm_client = mock.MagicMock()
mock_arm_client.tenants.list.return_value = [TenantStub(self.tenant_id)]
mock_arm_client.subscriptions.list.return_value = []
finder = SubscriptionFinder(cli, lambda _, _1, _2: mock_auth_context, None, lambda _: mock_arm_client)
# action
profile.refresh_accounts(finder)
# assert
result = storage_mock['subscriptions']
self.assertEqual(0, len(result))
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_load_tokens_and_sp_creds_with_secret(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "<PASSWORD>"
}
mock_read_file.return_value = [self.token_entry1, test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
# assert
token_entries = [entry for _, entry in creds_cache.load_adal_token_cache().read_items()]
self.assertEqual(token_entries, [self.token_entry1])
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_load_tokens_and_sp_creds_with_cert(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"certificateFile": 'junkcert.pem'
}
mock_read_file.return_value = [test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
creds_cache.load_adal_token_cache()
# assert
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
def test_credscache_retrieve_sp_secret_with_cert(self, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"certificateFile": 'junkcert.pem'
}
mock_read_file.return_value = [test_sp]
# action
creds_cache = CredsCache(cli, async_persist=False)
creds_cache.load_adal_token_cache()
# assert
self.assertEqual(creds_cache.retrieve_secret_of_service_principal(test_sp['servicePrincipalId']), None)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_new_sp_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
test_sp2 = {
"servicePrincipalId": "myapp2",
"servicePrincipalTenant": "mytenant2",
"accessToken": "Secret2"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1, test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action
creds_cache.save_service_principal_cred(test_sp2)
# assert
token_entries = [e for _, e in creds_cache.adal_token_cache.read_items()] # noqa: F812
self.assertEqual(token_entries, [self.token_entry1])
self.assertEqual(creds_cache._service_principal_creds, [test_sp, test_sp2])
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_preexisting_sp_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action
creds_cache.save_service_principal_cred(test_sp)
# assert
self.assertEqual(creds_cache._service_principal_creds, [test_sp])
self.assertFalse(mock_open_for_write.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_add_preexisting_sp_new_secret(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
creds_cache = CredsCache(cli, async_persist=False)
new_creds = test_sp.copy()
new_creds['accessToken'] = 'Secret2'
# action
creds_cache.save_service_principal_cred(new_creds)
# assert
self.assertEqual(creds_cache._service_principal_creds, [new_creds])
self.assertTrue(mock_open_for_write.called)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_match_service_principal_correctly(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [test_sp]
factory = mock.MagicMock()
factory.side_effect = ValueError('SP was found')
creds_cache = CredsCache(cli, factory, async_persist=False)
# action and verify(we plant an exception to throw after the SP was found; so if the exception is thrown,
# we know the matching did go through)
self.assertRaises(ValueError, creds_cache.retrieve_token_for_service_principal, 'myapp', 'resource1', 'mytenant', False)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
def test_credscache_remove_creds(self, _, mock_open_for_write, mock_read_file):
cli = DummyCli()
test_sp = {
"servicePrincipalId": "myapp",
"servicePrincipalTenant": "mytenant",
"accessToken": "Secret"
}
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1, test_sp]
creds_cache = CredsCache(cli, async_persist=False)
# action #1, logout a user
creds_cache.remove_cached_creds(self.user1)
# assert #1
token_entries = [e for _, e in creds_cache.adal_token_cache.read_items()] # noqa: F812
self.assertEqual(token_entries, [])
# action #2 logout a service principal
creds_cache.remove_cached_creds('myapp')
# assert #2
self.assertEqual(creds_cache._service_principal_creds, [])
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
self.assertEqual(mock_open_for_write.call_count, 2)
@mock.patch('azure.cli.core._profile._load_tokens_from_file', autospec=True)
@mock.patch('os.fdopen', autospec=True)
@mock.patch('os.open', autospec=True)
@mock.patch('adal.AuthenticationContext', autospec=True)
def test_credscache_new_token_added_by_adal(self, mock_adal_auth_context, _, mock_open_for_write, mock_read_file): # pylint: disable=line-too-long
cli = DummyCli()
token_entry2 = {
"accessToken": "new token",
"tokenType": "Bearer",
"userId": self.user1
}
def acquire_token_side_effect(*args): # pylint: disable=unused-argument
creds_cache.adal_token_cache.has_state_changed = True
return token_entry2
def get_auth_context(_, authority, **kwargs): # pylint: disable=unused-argument
mock_adal_auth_context.cache = kwargs['cache']
return mock_adal_auth_context
mock_adal_auth_context.acquire_token.side_effect = acquire_token_side_effect
mock_open_for_write.return_value = FileHandleStub()
mock_read_file.return_value = [self.token_entry1]
creds_cache = CredsCache(cli, auth_ctx_factory=get_auth_context, async_persist=False)
# action
mgmt_resource = 'https://management.core.windows.net/'
token_type, token, _ = creds_cache.retrieve_token_for_user(self.user1, self.tenant_id,
mgmt_resource)
mock_adal_auth_context.acquire_token.assert_called_once_with(
'https://management.core.windows.net/',
self.user1,
mock.ANY)
# assert
mock_open_for_write.assert_called_with(mock.ANY, 'w+')
self.assertEqual(token, 'new token')
self.assertEqual(token_type, token_entry2['tokenType'])
@mock.patch('azure.cli.core._profile.get_file_json', autospec=True)
def test_credscache_good_error_on_file_corruption(self, mock_read_file):
mock_read_file.side_effect = ValueError('a bad error for you')
cli = DummyCli()
# action
creds_cache = CredsCache(cli, async_persist=False)
# assert
with self.assertRaises(CLIError) as context:
creds_cache.load_adal_token_cache()
self.assertTrue(re.findall(r'bad error for you', str(context.exception)))
def test_service_principal_auth_client_secret(self):
sp_auth = ServicePrincipalAuth('<PASSWORD>!')
result = sp_auth.get_entry_to_persist('sp_id1', 'tenant1')
self.assertEqual(result, {
'servicePrincipalId': 'sp_id1',
'servicePrincipalTenant': 'tenant1',
'accessToken': '<PASSWORD>!'
})
def test_service_principal_auth_client_cert(self):
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_cert_file = os.path.join(curr_dir, 'sp_cert.pem')
sp_auth = ServicePrincipalAuth(test_cert_file)
result = sp_auth.get_entry_to_persist('sp_id1', 'tenant1')
self.assertEqual(result, {
'servicePrincipalId': 'sp_id1',
'servicePrincipalTenant': 'tenant1',
'certificateFile': test_cert_file,
'thumbprint': 'F0:6A:53:84:8B:BE:71:4A:42:90:D6:9D:33:52:79:C1:D0:10:73:FD'
})
def test_detect_adfs_authority_url(self):
cli = DummyCli()
adfs_url_1 = 'https://adfs.redmond.ext-u15f2402.masd.stbtest.microsoft.com/adfs/'
cli.cloud.endpoints.active_directory = adfs_url_1
storage_mock = {'subscriptions': None}
profile = Profile(cli_ctx=cli, storage=storage_mock, use_global_creds_cache=False, async_persist=False)
# test w/ trailing slash
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, adfs_url_1.rstrip('/'))
# test w/o trailing slash
adfs_url_2 = 'https://adfs.redmond.ext-u15f2402.masd.stbtest.microsoft.com/adfs'
cli.cloud.endpoints.active_directory = adfs_url_2
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, adfs_url_2)
# test w/ regular aad
aad_url = 'https://login.microsoftonline.com'
cli.cloud.endpoints.active_directory = aad_url
r = profile.auth_ctx_factory(cli, 'common', None)
self.assertEqual(r.authority.url, aad_url + '/common')
class FileHandleStub(object): # pylint: disable=too-few-public-methods
def write(self, content):
pass
def __enter__(self):
return self
def __exit__(self, _2, _3, _4):
pass
class SubscriptionStub(Subscription): # pylint: disable=too-few-public-methods
def __init__(self, id, display_name, state, tenant_id=None): # pylint: disable=redefined-builtin
policies = SubscriptionPolicies()
policies.spending_limit = SpendingLimit.current_period_off
policies.quota_id = 'some quota'
super(SubscriptionStub, self).__init__(subscription_policies=policies, authorization_source='some_authorization_source')
self.id = id
self.subscription_id = id.split('/')[1]
self.display_name = display_name
self.state = state
# for a SDK Subscription, tenant_id isn't present
# for a _find_using_specific_tenant Subscription, tenant_id means token tenant id
if tenant_id:
self.tenant_id = tenant_id
class TenantStub(object): # pylint: disable=too-few-public-methods
def __init__(self, tenant_id):
self.tenant_id = tenant_id
class MSRestAzureAuthStub:
def __init__(self, *args, **kwargs):
self._token = {
'token_type': 'Bearer',
'access_token': <PASSWORD>_access_token
}
self.set_token_invoked_count = 0
self.token_read_count = 0
self.client_id = kwargs.get('client_id')
self.object_id = kwargs.get('object_id')
self.msi_res_id = kwargs.get('msi_res_id')
def set_token(self):
self.set_token_invoked_count += 1
@property
def token(self):
self.token_read_count += 1
return self._token
@token.setter
def token(self, value):
self._token = value
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
1603403
|
<gh_stars>1-10
from elasticsearch_dsl import Text
from document.base import BaseDocument
class ConnectionDocument(BaseDocument):
"""Represents an connection between execution
environments and network links."""
# id already defined by Elasticsearch
exec_env_id = Text(required=True)
network_link_id = Text(required=True)
description = Text()
class Index:
"""Elasticsearch configuration."""
name = 'connection'
|
StarcoderdataPython
|
4817727
|
import numpy as np
import pycollo
class BackendMock:
def __init__(self, ocp):
self.ocp = ocp
ocp = pycollo.OptimalControlProblem("Dummy OCP")
ocp.settings.quadrature_method = "gauss"
# ocp.settings.quadrature_method = "lobatto"
backend = BackendMock(ocp)
quadrature = pycollo.quadrature.Quadrature(backend)
# print(quadrature.quadrature_point(5))
# print(quadrature.quadrature_weight(5))
# print(quadrature.butcher_array(5))
print(quadrature.D_matrix(5))
print(quadrature.A_matrix(5))
# print(quadrature.A_index_array(5))
# print(quadrature.D_index_array(5))
|
StarcoderdataPython
|
1708381
|
<filename>tests/test_zfs.py
import os
import unittest
from zfs_uploader.config import Config
from zfs_uploader.zfs import (create_filesystem, create_snapshot,
destroy_filesystem, destroy_snapshot,
open_snapshot_stream,
open_snapshot_stream_inc, list_snapshots)
class ZFSTests(unittest.TestCase):
def setUp(self):
# Given
config = Config('config.cfg')
job = next(iter(config.jobs.values()))
self.filesystem = job.filesystem
self.snapshot_name = 'snap_1'
self.test_file = f'/{self.filesystem}/test_file'
self.test_data = str(list(range(100_000)))
out = create_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'w') as f:
f.write(self.test_data)
def tearDown(self):
out = destroy_filesystem(self.filesystem)
if out.returncode:
self.assertIn('dataset does not exist', out.stderr)
def test_create_snapshot(self):
""" Create snapshot. """
# When
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
out = list_snapshots()
self.assertIn(f'{self.filesystem}@{self.snapshot_name}',
list(out.keys()))
def test_create_incremental_snapshot(self):
""" Create incremental snapshot. """
# When
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'a') as f:
f.write('append')
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
self.assertIn(b'1, 2', snapshot)
self.assertNotIn(b'append', snapshot)
with open_snapshot_stream_inc(self.filesystem, self.snapshot_name,
'snap_2') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
self.assertIn(b'append', snapshot)
self.assertNotIn(b'1, 2', snapshot)
def test_restore_filesystem(self):
""" Restore filesystem from snapshot stream. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'w') as f:
f.stdin.write(snapshot)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open(self.test_file, 'r') as f:
out = f.read()
self.assertEqual(self.test_data, out)
def test_restore_filesystem_with_increment(self):
""" Restore filesystem from initial and increment snapshot stream. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
with open(self.test_file, 'a') as f:
f.write('append')
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'r') as f:
snapshot_initial = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open_snapshot_stream_inc(self.filesystem, self.snapshot_name,
'snap_2') as f:
snapshot_increment = f.stdout.read()
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
with open_snapshot_stream(self.filesystem, self.snapshot_name,
'w') as f:
f.stdin.write(snapshot_initial)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open_snapshot_stream(self.filesystem, 'snap_2', 'w') as f:
f.stdin.write(snapshot_increment)
stderr = f.stderr.read().decode('utf-8')
self.assertEqual(0, f.returncode, msg=stderr)
with open(self.test_file, 'r') as f:
out = f.read()
self.assertEqual(self.test_data + 'append', out)
def test_destroy_filesystem(self):
""" Destroy filesystem. """
out = destroy_filesystem(self.filesystem)
self.assertEqual(0, out.returncode, msg=out.stderr)
self.assertFalse(os.path.isfile(self.test_file))
def test_destroy_snapshot(self):
""" Destroy snapshot. """
# Given
out = create_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
out = create_snapshot(self.filesystem, 'snap_2')
self.assertEqual(0, out.returncode, msg=out.stderr)
# When
out = destroy_snapshot(self.filesystem, self.snapshot_name)
self.assertEqual(0, out.returncode, msg=out.stderr)
# Then
out = list_snapshots()
self.assertNotIn(f'{self.filesystem}@{self.snapshot_name}',
list(out.keys()))
self.assertIn(f'{self.filesystem}@snap_2', list(out.keys()))
|
StarcoderdataPython
|
102949
|
s=input()
if s[-1] in '24579':
print('hon')
elif s[-1] in '0168':
print('pon')
else:
print('bon')
|
StarcoderdataPython
|
39110
|
from flask import Flask, jsonify, request, render_template
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['CUSTOM_VAR'] = 5
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///web_app.db'
db = SQLAlchemy(app)
migrate = Migrate(app, db)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
class Tweet(db.Model):
id = db.Column(db.Integer, primary_key=True)
status = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
#Routing
@app.route('/')
def index():
#return 'hello world'
return render_template('homepage.html')
@app.route('/about')
def about():
return 'about me'
@app.route('/users')
@app.route('/users.json')
def users():
users = User.query.all()
users_response = []
for u in users:
user_dict = u.__dict__
del user_dict['_sa_instance_state']
users_response.append(user_dict)
return jsonify(users_response)
@app.route('/users/create', methods=['Post'])
def create_user():
print('Creating a new user...')
print('Form data:', dict(request.form))
if 'name' in request.form:
name = request.form['name']
print(name)
db.session.add(User(name=name))
db.session.commit()
return jsonify({'message': 'created ok', 'name': name})
else:
return jsonify({'message': 'oops please specify a name'})
@app.route('/hello')
def hello(name=None):
print('Visiting the hello page')
print('Request params:', dict(request.args))
if 'name' in request.args:
name = request.args['name']
message = f'hello, {name}'
else:
message = 'hello world'
return render_template('hello.html', message=message)
|
StarcoderdataPython
|
1757776
|
from itertools import product
# 入力
H, W = map(int, input().split())
c = [list(map(int, input().split())) for _ in range(10)]
A = [list(map(int, input().split())) for _ in range(H)]
# 整数 i を 0 に書き換えるのに必要な魔力の最小量を求める
G = [{} for _ in range(10)]
for i, j in product(range(10), repeat=2):
G[i][j] = c[i][j]
INF = 10**10
def warshall_floyd(G):
dp = [
[
0 if i == j else
G[i][j] if j in G[i] else
INF
for j in range(len(G))
]
for i in range(len(G))
]
for k in range(len(G)):
for i in range(len(G)):
for j in range(len(G)):
dp[i][j] = (
dp[i][j] if dp[i][k] == INF or dp[k][j] == INF else
min(dp[i][j], dp[i][k] + dp[k][j])
)
return dp
dp = warshall_floyd(G)
# すべての数字について、dp を用いて解を求める
ans = sum(
dp[x][1]
for a in A
for x in a
if x >= 0
)
# 出力
print(ans)
|
StarcoderdataPython
|
133074
|
<reponame>bozbil/Diffie-Hellman-and-RC4
#kahramankostas
#Lab_Assignment_2.py
################COMBINED LINEAR CONGRUENTIAL GENERATOR ###########
import random
m_1 = 2147483563
m_2 = 2147483399
a_1 = 40014
a_2 = 20692
y_1 = (random.randint(1, m_1 - 1))
y_2 = (random.randint(1, m_1 - 1))
one_time_private_keys=[]
for i in range (0,2):
y_1 = a_1 * y_1%m_1
y_2 = a_2 * y_2%m_2
decision = (y_1-y_2)%(m_1-1)
if decision > 0:
one_time_private_keys+=[int((decision / m_1) * 500)]
elif decision < 0:
one_time_private_keys+=[int((decision / m_1 + 1) * 500)]
else:
one_time_private_keys+=[int(((m_1 - 1)/m_1) * 500)]
print("COMBINED LINEAR CONGRUENTIAL GENERATOR")
for i in one_time_private_keys:
print("User %s Private Key= " %(chr(65+one_time_private_keys.index(i))) ,i)
print("\n")
########################### DIFFIE-HELLMAN ALGORITHM ##############
print("DIFFIE-HELLMAN ALGORITHM")
q,alpha=353,3
x,y=[],[]
for i in one_time_private_keys:
x+=[i % q]
y+=[(alpha ** i) % q]
secretkey=(y[0] ** x[1]) % q
print(" Xa= %s\n Xb= %s\n Ya= %s\n Yb= %s " %(x[0],x[1],y[0],y[1]))
print("Diffie-Hellman algorithm Secret Key = %s" %(secretkey))
print("\n")
########################### RC4 ALGORITHM ENCRYPTION #############
print("RC4 ALGORITHM ENCRYPTION")
try:
bookfile = open("book.txt")
book=bookfile.read()
bookfile.close()
except:
print ("This program needs a data file (book.txt)")
exit(1)
block_128_bits=[]
for i in range(0,len(book),16):
block_128_bits += [book[ i : i + 16 ]]
book=block_128_bits[len(block_128_bits)-1]
keystream, s, t = [],[],[]
key =str(secretkey)
# Initialization
for i in range (0 , 256 ) :
s+=[i]
t+=[ key[i % len(key ) ]]
# Initial Permutation of S
j = 0
for i in range (0,256 ) :
j = (j + s[i] + ord(t[i] ) ) % 256
s[i] ,s[j] = s[j] ,s[i ]
# Stream Generation
j = 0
for i in range (1,len(book ) + 1 ) :
j = (j + s[i] ) % 256
s[i] , s[j] = s[j] , s[i]
counter = (s[i] + s[j] ) % 256
keystream+=[s[counter] ]
encrypted_msg = ""
counter = 0
for i in book:
temporary = ("%02X" % (ord( i ) ^ (keystream[counter] ) ) )
encrypted_msg = encrypted_msg + str(temporary)
counter += 1
print("Encrypted text with RC4 algorithm = %s" %(encrypted_msg))
################### RC4 ALGORITHM DECRYPTION ##########################
print("\nRC4 ALGORITHM DECRYPTION")
counter = 0
decrypted_msg=""
for i in range(0,len(encrypted_msg)-1,2):
temporary = (((int(encrypted_msg[i:i+2],16)) ^ (keystream[counter] ) ) )
decrypted_msg = decrypted_msg + chr(temporary)
counter += 1
print("Decrypted text with RC4 algorithm = %s" %(decrypted_msg) )
|
StarcoderdataPython
|
3390208
|
# Generated by Django 2.2.5 on 2019-11-03 23:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('libros', '0003_auto_20191026_1059'),
]
operations = [
migrations.AlterField(
model_name='pagina',
name='numero',
field=models.IntegerField(),
),
migrations.AlterField(
model_name='pagina',
name='texto',
field=models.CharField(blank=True, default='', max_length=5000),
),
]
|
StarcoderdataPython
|
106324
|
print("welcome to SBI bank ATM")
restart=('y')
chances = 3
balance = 1000
while chances>0:
restart=('y')
pin = int(input("please enter your secret number"))
if pin == 1234:
print('you entered your pin correctly\n')
while restart not in ('n','N','no','NO'):
print('press 1 for balance enquiry\n')
print('press 2 for withdrawl\n')
print('press 3 for credit money\n')
print('press 4 for cancel the transaction\n')
option = int(input("enter your choice\n"))
if option == 1:
print('your balance is = ',balance)
restart = input('would you like to go back?\n')
if restart in ('n','N','no','NO'):
print("thank you")
break
elif option == 2:
option2 = ('y')
withdrawl = float(input("enter the amount for withdrawl = \n"))
if withdrawl in [100,200,500,1000,]:
balance=balance-withdrawl
print("your balance is now Rs",balance)
restart=input('would you like to restart\n')
if restart in ['n','no','N','NO']:
print('thank you')
break
elif withdrawl != [100,200,500,1000,]:
print('invalid amount! please re-try\n')
restart = ('y')
elif withdrawl == 1:
withdrawl=float(input('please enter desired amount\n'))
elif option == 3:
credit = float(input('enter money which you want to add\n'))
balance = balance+credit
print('your balance is = ',balance)
restart = input('would you like to go back\n')
if restart in ['n','no','NO','N']:
print('thank you')
break
elif option == 4:
print("your transaction is cancelled")
print('thank you for your service')
break
else:
print("please enter a correct number\n")
restart = ('y')
elif pin != ('1234'):
print("incorrect password\n")
chances=chances-1
if chances == 0:
print('no more try\n')
break
|
StarcoderdataPython
|
4836572
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2014 <NAME>
#
# The following terms apply to all files associated
# with the software unless explicitly disclaimed in individual files.
#
# The authors hereby grant permission to use, copy, modify, distribute,
# and license this software and its documentation for any purpose, provided
# that existing copyright notices are retained in all copies and that this
# notice is included verbatim in any distributions. No written agreement,
# license, or royalty fee is required for any of the authorized uses.
# Modifications to this software may be copyrighted by their authors
# and need not follow the licensing terms described here, provided that
# the new terms are clearly indicated on the first page of each file where
# they apply.
#
# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY
# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE
# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE
# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
# MODIFICATIONS.
import ctypes
import ctypes.util
import logging
import sys
__all__ = [
'LibaryException',
'LibraryNotFoundException',
'NoLibraryCandidatesException',
'LibraryNotLoadedException',
'LibraryMissingSymbolsException',
'locate_library',
'load_library',
'load_locate_library'
]
_LOGGER = logging.getLogger('usb.libloader')
class LibaryException(OSError):
pass
class LibraryNotFoundException(LibaryException):
pass
class NoLibraryCandidatesException(LibraryNotFoundException):
pass
class LibraryNotLoadedException(LibaryException):
pass
class LibraryMissingSymbolsException(LibaryException):
pass
def locate_library (candidates, find_library=ctypes.util.find_library):
"""Tries to locate a library listed in candidates using the given
find_library() function (or ctypes.util.find_library).
Returns the first library found, which can be the library's name
or the path to the library file, depending on find_library().
Returns None if no library is found.
arguments:
* candidates -- iterable with library names
* find_library -- function that takes one positional arg (candidate)
and returns a non-empty str if a library has been found.
Any "false" value (None,False,empty str) is interpreted
as "library not found".
Defaults to ctypes.util.find_library if not given or
None.
"""
if find_library is None:
find_library = ctypes.util.find_library
use_dll_workaround = (
sys.platform == 'win32' and find_library is ctypes.util.find_library
)
for candidate in candidates:
# Workaround for CPython 3.3 issue#16283 / pyusb #14
if use_dll_workaround:
candidate += '.dll'
libname = find_library(candidate)
if libname:
return libname
# -- end for
return None
def load_library(lib, name=None, lib_cls=None):
"""Loads a library. Catches and logs exceptions.
Returns: the loaded library or None
arguments:
* lib -- path to/name of the library to be loaded
* name -- the library's identifier (for logging)
Defaults to None.
* lib_cls -- library class. Defaults to None (-> ctypes.CDLL).
"""
try:
if lib_cls:
return lib_cls(lib)
else:
return ctypes.CDLL(lib)
except Exception:
if name:
lib_msg = '%s (%s)' % (name, lib)
else:
lib_msg = lib
lib_msg += ' could not be loaded'
if sys.platform == 'cygwin':
lib_msg += ' in cygwin'
_LOGGER.error(lib_msg, exc_info=True)
return None
def load_locate_library(candidates, cygwin_lib, name,
win_cls=None, cygwin_cls=None, others_cls=None,
find_library=None, check_symbols=None):
"""Locates and loads a library.
Returns: the loaded library
arguments:
* candidates -- candidates list for locate_library()
* cygwin_lib -- name of the cygwin library
* name -- lib identifier (for logging). Defaults to None.
* win_cls -- class that is used to instantiate the library on
win32 platforms. Defaults to None (-> ctypes.CDLL).
* other_cls -- library class for cygwin platforms.
Defaults to None (-> ctypes.CDLL).
* cygwin_cls -- library class for all other platforms.
Defaults to None (-> ctypes.CDLL).
* find_library -- see locate_library(). Defaults to None.
* check_symbols -- either None or a list of symbols that the loaded lib
must provide (hasattr(<>)) in order to be considered
valid. LibraryMissingSymbolsException is raised if
any symbol is missing.
raises:
* NoLibraryCandidatesException
* LibraryNotFoundException
* LibraryNotLoadedException
* LibraryMissingSymbolsException
"""
if sys.platform == 'cygwin':
if cygwin_lib:
loaded_lib = load_library(cygwin_lib, name, cygwin_cls)
else:
raise NoLibraryCandidatesException(name)
elif candidates:
lib = locate_library(candidates, find_library)
if lib:
if sys.platform == 'win32':
loaded_lib = load_library(lib, name, win_cls)
else:
loaded_lib = load_library(lib, name, others_cls)
else:
_LOGGER.error('%r could not be found', (name or candidates))
raise LibraryNotFoundException(name)
else:
raise NoLibraryCandidatesException(name)
if loaded_lib is None:
raise LibraryNotLoadedException(name)
elif check_symbols:
symbols_missing = [
s for s in check_symbols if not hasattr(loaded_lib, s)
]
if symbols_missing:
msg = ('%r, missing symbols: %r', lib, symbols_missing )
_LOGGER.error(msg)
raise LibraryMissingSymbolsException(lib)
else:
return loaded_lib
else:
return loaded_lib
|
StarcoderdataPython
|
189023
|
<filename>TrackingTools/GeomPropagators/python/AnyDirectionAnalyticalPropagator_cfi.py
import FWCore.ParameterSet.Config as cms
AnyDirectionAnalyticalPropagator = cms.ESProducer("AnalyticalPropagatorESProducer",
MaxDPhi = cms.double(1.6),
ComponentName = cms.string('AnyDirectionAnalyticalPropagator'),
PropagationDirection = cms.string('anyDirection')
)
|
StarcoderdataPython
|
3363995
|
from copy import copy
import pytest
from mock import mock, patch
from tweet.bills import Bills
from tweet.conftest import EXAMPLE_INTRODUCTIONS
from tweet.ocd_api import BillsRequestParams, BillsAPI
from tweet.query import get_new_introductions, filter_already_exists, filter_missing_date, save_introductions
@pytest.fixture
def bills_api():
bills_api = mock.create_autospec(BillsAPI)
bills_api.get_bills.return_value = EXAMPLE_INTRODUCTIONS
yield bills_api
@pytest.fixture
def bills_request_params():
bills_request_params = mock.create_autospec(BillsRequestParams)
yield bills_request_params
@pytest.fixture
def bills():
bills = mock.create_autospec(Bills)
bills.exists.return_value = False
yield bills
@patch('tweet.query.filter_already_exists')
@patch('tweet.query.filter_missing_date')
def test_get_new_introductions(mock_filter_missing_date, mock_filter_already_exists,
bills_api, bills_request_params, bills):
bills_api.get_bills.return_value = EXAMPLE_INTRODUCTIONS
mock_filter_already_exists.return_value = EXAMPLE_INTRODUCTIONS
bills_api.add_bills_dates.return_value = EXAMPLE_INTRODUCTIONS
mock_filter_missing_date.return_value = EXAMPLE_INTRODUCTIONS
new_introductions = get_new_introductions(bills_api, bills_request_params, bills)
bills_api.get_bills.assert_called_with(bills_request_params)
mock_filter_already_exists.assert_called_with(bills, EXAMPLE_INTRODUCTIONS)
bills_api.add_bills_dates.assert_called_with(EXAMPLE_INTRODUCTIONS)
mock_filter_missing_date.assert_called_with(EXAMPLE_INTRODUCTIONS)
assert new_introductions == EXAMPLE_INTRODUCTIONS
def test_filter_already_exists(bills):
bills.exists.side_effect = [True, False]
new_bills = filter_already_exists(bills, EXAMPLE_INTRODUCTIONS)
assert new_bills == [EXAMPLE_INTRODUCTIONS[1]]
def test_filter_missing_date():
introductions = [copy(introduction) for introduction in EXAMPLE_INTRODUCTIONS]
introductions[1].date = ''
introductions[0].date = '10/28/2018'
valid_bills = filter_missing_date(introductions)
assert valid_bills == [introductions[0]]
def test_save_introductions(bills):
save_introductions(bills, EXAMPLE_INTRODUCTIONS)
assert bills.insert.call_count == len(EXAMPLE_INTRODUCTIONS)
|
StarcoderdataPython
|
1799753
|
<reponame>Data-Science-in-Mechanical-Engineering/vision-based-furuta-pendulum
"""
Examples for calibrating the real Qube to a theta of 0.
@Author: <NAME>
"""
from gym_brt.control import calibrate, QubeFlipUpControl
from gym_brt.envs.reinforcementlearning_extensions.wrapper import CalibrationWrapper
from gym_brt.envs import QubeSwingupEnv
import math
def test_calibration():
frequency = 120
u_max = 1.0
desired_theta = 0.0
calibrate(desired_theta=desired_theta, frequency=frequency, u_max=u_max)
def test_calibration_wrapper():
n_trials = 3
frequency = 120
with CalibrationWrapper(QubeSwingupEnv(frequency=frequency), noise=True) as env:
controller = QubeFlipUpControl(sample_freq=frequency)
for episode in range(n_trials):
state = env.reset()
for step in range(30000):
action = controller.action(state)
state, reward, done, info = env.step(action)
if done:
break
if __name__ == '__main__':
test_calibration_wrapper()
|
StarcoderdataPython
|
3311236
|
from django.shortcuts import render
def index(req):
return render(
req,
"accounts/index2.html")
|
StarcoderdataPython
|
1602899
|
<gh_stars>1-10
import pytest
import sys
sys.path.append('../')
from codemaker import CodeMaker # NOQA
@pytest.fixture
def codemaker_player():
return CodeMaker()
def test_code_generation(codemaker_player):
"""
1. Test if the length of code is correct
2. Test if all the elements of the code belong to the given set
3. Test if all the elements appear only once within the code
"""
available_elements = [1, 2, 3, 4, 5, 6]
code = codemaker_player.code
assert len(code) == 4
for i in range(0, 4):
assert available_elements.count(code[i]) != 0
assert code.count(code[i]) == 1
def test_analyze_move(codemaker_player):
codemaker_player.code = [1, 2, 3, 4]
""" When the color and positions of all elements match """
all_matching_move = [1, 2, 3, 4]
expected_feedback = [1, 1, 1, 1]
assert codemaker_player.analyze_move(
all_matching_move) == expected_feedback
"""
When only color matches for two elements,
but their position do not match
"""
move_with_only_matching_color = [3, 2, 1, 4]
expected_feedback = [0, 1, 0, 1]
assert codemaker_player.analyze_move(
move_with_only_matching_color) == expected_feedback
"""
When there is an element whose color and
position, both do not match
"""
move_with_wrong_color = [3, 2, 1, 6]
expected_feedback = [0, 1, 0, -1]
assert codemaker_player.analyze_move(
move_with_wrong_color) == expected_feedback
|
StarcoderdataPython
|
161324
|
import torch
from tools.engine import Engine
from tools.config import use_cuda
class GMF(torch.nn.Module):
def __init__(self, config):
super(GMF, self).__init__()
self.num_users = config['num_users']
self.num_items = config['num_items']
self.latent_dim = config['latent_dim']
self.embedding_user = torch.nn.Embedding(num_embeddings=self.num_users, embedding_dim=self.latent_dim)
self.embedding_item = torch.nn.Embedding(num_embeddings=self.num_items, embedding_dim=self.latent_dim)
self.affine_output = torch.nn.Linear(in_features=self.latent_dim, out_features=1)
self.logistic = torch.nn.Sigmoid()
def forward(self, user_indices, item_indices):
user_embedding = self.embedding_user(user_indices)
item_embedding = self.embedding_item(item_indices)
element_product = torch.mul(user_embedding, item_embedding)
logits = self.affine_output(element_product)
rating = self.logistic(logits)
return rating
def init_weight(self):
pass
class GMFEngine(Engine):
'''Engine for training & evaluating GMF model'''
def __init__(self, config):
self.model = GMF(config)
if config['use_cuda'] is True:
use_cuda(True, config['device_id'])
self.model.cuda()
super(GMFEngine, self).__init__(config)
|
StarcoderdataPython
|
1722676
|
"""Main module."""
from collections import deque, defaultdict
import json
from typing import AnyStr, Dict
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Trie:
def __init__(self):
# For empty string `""`
self.trie = {'flag': True}
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
node = self.trie
for char in word:
if char in node:
node = node[char]
else:
node[char] = {}
node = node[char]
# indentify whether path is a word
node['flag'] = True
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
node = self.trie
for char in word:
if char in node: node = node[char]
else: return False
return node.get('flag', False)
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
node = self.trie
for char in prefix:
if char in node: node = node[char]
else: return False
return True
class Codec:
@classmethod
def serialize_listnode(cls, head: ListNode) -> AnyStr:
assert isinstance(head, ListNode)
res = []
while head:
res.append(head.val)
head = head.next
return json.dumps(res)
@classmethod
def deserialize_listnode(cls, data: AnyStr) -> ListNode:
vals = json.loads(data)
assert isinstance(vals, list)
if not vals: return
head = ListNode(vals[0])
cursor = head
for val in vals[1:]:
cursor.next = ListNode(val)
cursor = cursor.next
return head
@classmethod
def serialize_treenode(cls, root: TreeNode) -> AnyStr:
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
assert isinstance(root, TreeNode)
if not root: return "[]"
ans, queue = [], deque(root)
while queue:
node = queue.popleft()
if node:
ans.append(node.val)
queue.append(node.left)
queue.append(node.right)
else:
ans.append(None)
return json.dumps(ans)
@classmethod
def deserialize_treenode(cls, data: AnyStr) -> TreeNode:
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
assert isinstance(data, str)
vals = json.loads(data)
if not vals: return
cursor = 1
root = TreeNode(vals[0])
queue = deque(root)
while queue or cursor < len(vals):
node = queue.popleft()
if vals[cursor] is not None:
node.left = TreeNode(vals[cursor])
queue.append(node.left)
cursor += 1
if vals[cursor] is not None:
node.right = TreeNode(vals[cursor])
queue.append(node.right)
cursor += 1
return root
|
StarcoderdataPython
|
4836092
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.utils.translation import ugettext_lazy as _
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from aldryn_people import models, forms, DEFAULT_APP_NAMESPACE
from .utils import get_valid_languages
NAMESPACE_ERROR = _(
"Seems that there is no valid application hook for aldryn-people."
"Links can't be rendered without an app hook."
)
class PeoplePlugin(CMSPluginBase):
TEMPLATE_NAME = 'aldryn_people/plugins/%s/people_list.html'
module = 'People'
render_template = TEMPLATE_NAME % models.PeoplePlugin.STYLE_CHOICES[0][0]
name = _('People list')
model = models.PeoplePlugin
fieldsets = (
(None, {
'fields': (
'style',
),
}),
(_('People'), {
'description': _('Select and arrange specific people, or leave '
'blank to use all.'),
'fields': (
'people',
)
}),
(_('Options'), {
'fields': (
('group_by_group', 'show_ungrouped', ),
'show_links',
'show_vcard',
)
})
)
def group_people(self, people):
groups = defaultdict(list)
for person in people:
for group in person.groups.all():
groups[group].append(person)
# Fixes a template resolution-related issue. See:
# http://stackoverflow.com/questions/4764110/django-template-cant-loop-defaultdict # noqa
groups.default_factory = None
return groups
def render(self, context, instance, placeholder):
people = instance.get_selected_people()
if not people:
people = models.Person.objects.published()
valid_languages = get_valid_languages(
DEFAULT_APP_NAMESPACE, instance.language, context['request'])
people = people.translated(*valid_languages)
if not valid_languages:
context['namespace_error'] = NAMESPACE_ERROR
self.render_template = self.TEMPLATE_NAME % instance.style
context['instance'] = instance
context['people'] = people
if instance.group_by_group:
context['people_groups'] = self.group_people(people)
if instance.show_ungrouped:
groupless = people.filter(groups__isnull=True)
else:
groupless = people.none()
context['groupless_people'] = groupless
else:
context['people_groups'] = []
context['groupless_people'] = people.none()
return context
plugin_pool.register_plugin(PeoplePlugin)
@plugin_pool.register_plugin
class RelatedPeoplePlugin(CMSPluginBase):
TEMPLATE_NAME = 'aldryn_people/plugins/related_people__%s.html'
module = 'People'
render_template = TEMPLATE_NAME % forms.LAYOUT_CHOICES[0][0]
name = _('Related People')
model = models.RelatedPeoplePlugin
form = forms.RelatedPeoplePluginForm
def render(self, context, instance, placeholder):
request = context.get('request')
context['instance'] = instance
context['title'] = instance.title
context['icon'] = instance.icon
context['image'] = instance.image
qs = instance.related_people.published()
related_groups = instance.related_groups.all()
related_locations = instance.related_locations.all()
related_categories = instance.related_categories.all()
related_services = instance.related_services.all()
if not qs.exists():
qs = models.Person.objects.published().distinct()
if related_groups.exists():
qs = qs.filter(groups__in=related_groups)
if related_locations.exists():
qs = qs.filter(location__in=related_locations)
if related_categories.exists():
qs = qs.filter(categories__in=related_categories)
if related_services.exists():
qs = qs.filter(services__in=related_services)
context['related_people'] = qs[:int(instance.number_of_people)]
return context
def get_render_template(self, context, instance, placeholder):
return self.TEMPLATE_NAME % instance.layout
|
StarcoderdataPython
|
3222149
|
<gh_stars>1-10
def box_print(string_array):
width = max(*map(len, string_array))
print("*"*(width+4))
[print_fix_width(x, width) for x in string_array]
print("*"*(width+4))
def print_fix_width(string, size):
print(f'* {string}{" "*(size - len(string))} *')
if __name__ == '__main__':
strings_to_print = 'Hello World in a frame'.split(' ')
box_print(strings_to_print)
|
StarcoderdataPython
|
1694999
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
@Author: Adam
@Date: 2020-04-17 14:07:14
@LastEditTime: 2020-04-17 14:19:53
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /LearnPython/web/hello.py
'''
def application(environ, start_response):
start_response('200 OK',[('Content-Type', 'text/html')])
body = '<h1>Hello, %s!</h1>'%(environ['PATH_INFO'][1:] or 'web')
return [body.encode('utf-8')]
|
StarcoderdataPython
|
134943
|
<filename>rl/utils/return_utils.py
import torch
from torch import Tensor
__all__ = ['compute_return', 'compute_gae_return']
def compute_return(
rewards: Tensor,
terminals: Tensor,
bootstrap_value: Tensor,
discount_rate: float,
batch_first: bool = False) -> Tensor:
"""Computes the return based on sequences of rewards, terminals and the bootstrapped values of the next observation.
Arguments:
rewards (Tensor): Float tensor of shape `(time, batch)` that contains the reward sequence.
terminals (Tensor): Boolean tensor of shape `(time, batch)` that contains the terminal sequence.
bootstrap_value (Tensor): Float tensor of shape `(batch)` that contains the bootstrapped values of the next
obsrvation.
discount_rate (float): The discount rate that is used to compute the return.
batch_first (bool, optional): Whether the rewards and terminals are of shape `(batch, time)` instead. The
returns will also be of that shape. Defaults to ``False``.
Returns:
Tensor of shape `(time, batch)` containing the computed returns (or of shape `(batch, time)`, if `batch_first`
is ``True``).
"""
return_ = torch.zeros_like(rewards)
next_return = bootstrap_value
num_steps = rewards.shape[1 if batch_first else 0]
for t in reversed(range(num_steps)):
index_t = (slice(None), t) if batch_first else (t, slice(None))
non_terminal = ~terminals[index_t]
return_[index_t] = rewards[index_t]
return_[index_t][non_terminal] += discount_rate * next_return[non_terminal]
next_return = return_[index_t]
return return_
def compute_gae_return(
rewards: Tensor,
terminals: Tensor,
values: Tensor,
bootstrap_value: Tensor,
discount_rate: float,
gae_lambda: float,
batch_first: bool = False) -> Tensor:
"""TODO docstring
Implementation adopted from [1].
References:
[1] https://stable-baselines.readthedocs.io/en/master/_modules/stable_baselines/ppo2/ppo2.html#PPO2
"""
return_ = torch.zeros_like(rewards)
next_value = bootstrap_value
old_gae = torch.zeros_like(next_value)
num_steps = rewards.shape[1 if batch_first else 0]
for t in reversed(range(num_steps)):
index_t = (slice(None), t) if batch_first else (t, slice(None))
non_terminal = ~terminals[index_t]
value_t = values[index_t]
delta = rewards[index_t] - value_t
delta[non_terminal] += discount_rate * next_value[non_terminal]
gae = delta.clone()
gae[non_terminal] += discount_rate * gae_lambda * old_gae[non_terminal]
old_gae = gae
return_[index_t] = gae + value_t
next_value = value_t
return return_
|
StarcoderdataPython
|
1608423
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
from rosinstall.multiproject_cmd import cmd_persist_config as multipersist
from rosinstall import setupfiles
from rosinstall.helpers import ROSINSTALL_FILENAME, is_path_ros
def cmd_persist_config(config, config_filename=ROSINSTALL_FILENAME, header=''):
## Save .rosinstall
header = (header or '') + """\
# IT IS UNLIKELY YOU WANT TO EDIT THIS FILE BY HAND,
# UNLESS FOR REMOVING ENTRIES.
# IF YOU WANT TO CHANGE THE ROS ENVIRONMENT VARIABLES
# USE THE rosinstall TOOL INSTEAD.
# IF YOU CHANGE IT, USE rosinstall FOR THE CHANGES TO TAKE EFFECT
"""
multipersist(config, config_filename, header)
def _ros_requires_boostrap(config):
"""
Tests whether workspace contains a core ros stack, to decide
whether to rosmake
:param config: workspace config object
"""
for entry in config.get_source():
if is_path_ros(os.path.join(config.get_base_path(), entry.get_local_name())):
# we assume that if any of the elements we installed came
# from a VCS source, a bootsrap might be useful
if entry.get_scmtype() is not None:
return True
return False
def cmd_maybe_refresh_ros_files(config):
"""
Regenerates setup.* files if they exist already
:param config: workspace config object
"""
if (os.path.isfile(os.path.join(config.get_base_path(), 'setup.sh'))):
print("Overwriting setup.sh, setup.bash, and setup.zsh in %s" %
config.get_base_path())
setupfiles.generate_setup(config, no_ros_allowed=True)
def cmd_generate_ros_files(config, path, nobuild=False, rosdep_yes=False, catkin=False, catkinpp=None, no_ros_allowed=False):
"""
Generates ROS specific setup files
:param nobuild: Unless True, invokes rosmake to build all packages if core ROS stack is detected
:param rosdep_yes: If True, adds --rosdep-yes to rosmake command
:param catkin: if true, generates catkin(fuerte) CMakeLists.txt instead of invoking rosmake
:param catkinpp: Prefix path for catkin if generating for catkin
:param no_ros_allowed: if true, does not look for a core ros stack
"""
# Catkin must be enabled if catkinpp is set
if catkinpp is not None:
catkin = True
## bootstrap the build if installing ros
if catkin:
setupfiles.generate_catkin_cmake(path, catkinpp)
else: # DRY install case
## Generate setup.sh and save
print("(Over-)Writing setup.sh, setup.bash, and setup.zsh in %s" %
config.get_base_path())
setupfiles.generate_setup(config, no_ros_allowed)
if _ros_requires_boostrap(config) and not nobuild:
print("Bootstrapping ROS build")
rosdep_yes_insert = ""
if rosdep_yes:
rosdep_yes_insert = " --rosdep-yes"
ros_comm_insert = ""
if 'ros_comm' in [os.path.basename(tree.get_path()) for tree in config.get_config_elements()]:
print("Detected ros_comm bootstrapping it too.")
ros_comm_insert = " ros_comm"
cmd = ("source %s && rosmake ros%s --rosdep-install%s" %
(os.path.join(path, 'setup.sh'),
ros_comm_insert,
rosdep_yes_insert))
subprocess.check_call(cmd, shell=True, executable='/bin/bash')
|
StarcoderdataPython
|
3231643
|
<reponame>kwanhur/leetcode<gh_stars>0
#! /usr/bin/env python
# _*_ coding:utf-8 _*_
def revert(str):
if not str or len(str) == 1:
return str
i, base, s = -1, -len(str), ''
while True:
s += str[i]
i -= 1
if i < base:
break
return s
def revert_iterator(str):
s = ''
if not str or len(str) == 1:
return str or s
else:
s = str[-1] + revert_iterator(str[0:len(str) - 1])
return s
if __name__ == '__main__':
s = "string"
print s[::-1]
print revert(s)
print revert_iterator(s)
|
StarcoderdataPython
|
3207784
|
<reponame>B-ROY/TESTGIT<filename>app/customer/models/chat.py
# coding=utf-8
from django.db import models
import logging
import datetime
from wi_model_util.imodel import *
from mongoengine import *
from base.settings import CHATPAMONGO
from app.util.messageque.msgsender import MessageSender
from app.customer.models.user import User
from app.util.shumeitools.shumeitools import *
connect(CHATPAMONGO.db, host=CHATPAMONGO.host, port=CHATPAMONGO.port, username=CHATPAMONGO.username,
password=<PASSWORD>)
class ChatMessage(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"消息类型") # 1:文本 2:图片 3: 音频
content = StringField(max_length=1024, verbose_name=u"消息内容")
conversation_id = StringField(verbose_name=u"会话id", max_length=64)
resource_url = StringField(verbose_name=u"图片,音频 资源地址", max_length=512)
show_status = IntField(verbose_name=u"图片,音频 鉴定状态") # 1:通过 2:屏蔽 3:鉴定中
@classmethod
def create_chat_message(cls, from_user_id, to_user_id, type, content, conversation_id, resource_url, user_ip):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.content = content
obj_.create_time = datetime.datetime.now()
obj_.conversation_id = conversation_id
obj_.resource_url = resource_url
if int(type) == 2:
obj_.show_status = 3
else:
obj_.show_status = 1
if int(type) == 1:
# 文本内容鉴黄
user = User.objects.filter(id=from_user_id).first()
ret, duration = shumei_text_spam(text=content, timeout=1, user_id=from_user_id, channel="MESSAGE", nickname=user.nickname,
phone=user.phone, ip=user_ip)
is_pass = 0
if ret["code"] == 1100:
if ret["riskLevel"] == "PASS":
is_pass = 1
obj_.show_status = 1
if ret["riskLevel"] == "REJECT":
is_pass = 0
obj_.show_status = 2
if ret["riskLevel"] == "REVIEW":
# todo +人工审核逻辑
is_pass = 1
obj_.show_status = 1
obj_.save()
if not is_pass:
message = u"经系统检测,您的内容涉及违规因素,请重新编辑"
return 2, None, None, message
# 改变会话状态:
status = 0
create_time = None
conversation = UserConversation.objects.filter(id=conversation_id).first()
con_type = conversation.type
now = datetime.datetime.now()
if con_type == 3:
# 道具阶段状态
conversation.update(set__type=2)
conversation.update(set__wait_time=now)
if con_type == 2:
# 查看对方是否有此会话的回复: 如果有, 变成建立状态
message = ChatMessage.objects.filter(conversation_id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if message:
conversation.update(set__type=1)
conversation.update(set__start_time=now)
status = 1
create_time = now
if int(type) == 2:
# 图片鉴定
MessageSender.send_picture_detect(pic_url=resource_url, user_id=0, pic_channel=0, source=4, obj_id=str(obj_.id))
return status, create_time, conversation_id, ""
class UserConversation(Document):
from_user_id = IntField(verbose_name=u"用户id")
to_user_id = IntField(verbose_name=u"接收用户id")
send_id = IntField(verbose_name=u"道具使用 用户id")
create_time = DateTimeField(verbose_name=u"创建时间", default=datetime.datetime.now())
type = IntField(verbose_name=u"会话状态") # 1:建立 2:未建立 3:道具阶段 4:关闭
start_time = DateTimeField(verbose_name=u"会话开始时间")
stop_time = DateTimeField(verbose_name=u"会话关闭时间")
wait_time = DateTimeField(verbose_name=u"等待开始时间")
is_send_tool = IntField(verbose_name=u"是否使用道具") # 1:使用 2:未使用
tool_time_type = IntField(verbose_name=u"道具消耗的类型") # 0:限时 1:永久
stop_type = IntField(verbose_name=u"是否使用道具") # 1:到时关闭 2:取消操作
@classmethod
def create_conversation_message(cls, from_user_id, to_user_id, type, is_send_tool):
obj_ = cls()
obj_.from_user_id = from_user_id
obj_.to_user_id = to_user_id
obj_.type = type
obj_.is_send_tool = is_send_tool
obj_.create_time = datetime.datetime.now()
obj_.save()
return obj_
@classmethod
def cancel(cls, conversation_id, from_user_id, to_user_id):
conversation = cls.objects.filter(id=conversation_id, from_user_id=from_user_id, to_user_id=to_user_id).first()
rever_conversation = cls.objects.filter(id=conversation_id, from_user_id=to_user_id, to_user_id=from_user_id).first()
if conversation:
conversation.update(set__type=4)
conversation.update(set__stop_time=datetime.datetime.now())
conversation.update(set__stop_type=2)
if rever_conversation:
rever_conversation.update(set__type=4)
rever_conversation.update(set__stop_time=datetime.datetime.now())
rever_conversation.update(set__stop_type=2)
|
StarcoderdataPython
|
1630252
|
"""HandleImport transformation takes care of importing user-defined modules."""
from pythran.passmanager import Transformation
from pythran.tables import MODULES, pythran_ward
from pythran.syntax import PythranSyntaxError
import gast as ast
import logging
import os
logger = logging.getLogger('pythran')
def add_filename_field(node, filename):
for descendant in ast.walk(node):
descendant.filename = filename
def mangle_imported_module(module_name):
return pythran_ward + "imported__" + module_name.replace('.', '$') + '$'
def mangle_imported_function(module_name, func_name):
return mangle_imported_module(module_name) + func_name
def demangle(name):
return name[len(pythran_ward + "imported__"):-1].replace('$', '.')
def is_builtin_function(func_name):
"""Test if a function is a builtin (like len(), map(), ...)."""
return func_name in MODULES["__builtin__"]
def is_builtin_module(module_name):
"""Test if a module is a builtin module (numpy, math, ...)."""
module_name = module_name.split(".")[0]
return module_name in MODULES
def is_mangled_module(name):
return name.endswith('$')
def getsource(name, module_dir, level):
# Try to load py file
module_base = name.replace('.', os.path.sep) + '.py'
if module_dir is None:
assert level <= 0, "Cannot use relative path without module_dir"
module_file = module_base
else:
module_file = os.path.sep.join(([module_dir] + ['..'] * (level - 1)
+ [module_base]))
try:
with open(module_file, 'r') as fp:
from pythran.frontend import raw_parse
node = raw_parse(fp.read())
add_filename_field(node, name + ".py")
return node
except IOError:
raise PythranSyntaxError("Module '{}' not found."
.format(name))
class HandleImport(Transformation):
"""This pass handle user-defined import, mangling name for function from
other modules and include them in the current module, patching all call
site accordingly.
"""
def __init__(self):
super(HandleImport, self).__init__()
self.identifiers = [{}]
self.imported = set()
self.prefixes = [""]
def lookup(self, name):
for renaming in reversed(self.identifiers):
if name in renaming:
return renaming[name]
return None
def is_imported(self, name):
return name in self.imported
def visit_Module(self, node):
self.imported_stmts = list()
self.generic_visit(node)
node.body = self.imported_stmts + node.body
return node
def rename(self, node, attr):
prev_name = getattr(node, attr)
new_name = self.prefixes[-1] + prev_name
setattr(node, attr, new_name)
self.identifiers[-1][prev_name] = new_name
def rename_top_level_functions(self, node):
for stmt in node.body:
if isinstance(stmt, ast.FunctionDef):
self.rename(stmt, 'name')
elif isinstance(stmt, ast.Assign):
for target in stmt.targets:
if isinstance(target, ast.Name):
self.rename(target, 'id')
def visit_FunctionDef(self, node):
self.identifiers.append({})
self.generic_visit(node)
self.identifiers.pop()
return node
def visit_ListComp(self, node):
# change transversal order so that store happens before load
for generator in node.generators:
self.visit(generator)
self.visit(node.elt)
return node
visit_SetComp = visit_ListComp
visit_GeneratorExp = visit_ListComp
def visit_DictComp(self, node):
for generator in node.generators:
self.visit(generator)
self.visit(node.key)
self.visit(node.value)
return node
def visit_comprehension(self, node):
self.visit(node.iter)
for if_ in node.ifs:
self.visit(if_)
self.visit(node.target)
return node
def visit_assign(self, node):
self.visit(node.value)
for target in node.targets:
self.visit(target)
return node
def visit_Assign(self, node):
if not isinstance(node.value, ast.Name):
return self.visit_assign(node)
renaming = self.lookup(node.value.id)
if not renaming:
return self.visit_assign(node)
if not is_mangled_module(renaming):
return self.visit_assign(node)
if any(not isinstance(target, ast.Name) for target in node.targets):
raise PythranSyntaxError("Invalid module assignment", node)
return node
def visit_Name(self, node):
if isinstance(node.ctx, ast.Load):
renaming = self.lookup(node.id)
if renaming:
node.id = renaming
elif isinstance(node.ctx, (ast.Store, ast.Param)):
self.identifiers[-1][node.id] = node.id
elif isinstance(node.ctx, ast.Del):
pass
else:
raise NotImplementedError(node)
return node
def visit_Attribute(self, node):
if not isinstance(node.ctx, ast.Load):
return node
# is that a module attribute load?
root = node.value
while isinstance(root, ast.Attribute):
root = root.value
if not isinstance(root, ast.Name):
return node
renaming = self.lookup(root.id)
if not renaming:
return node
if not is_mangled_module(renaming):
return node
base_module = demangle(renaming)
if is_builtin_module(base_module):
return node
renaming = self.lookup(root.id)
root = node
suffix = ""
while isinstance(root, ast.Attribute):
root = root.value
suffix = '$' + node.attr + suffix
return ast.Name(renaming + suffix[1:], node.ctx, None, None)
def import_module(self, module_name, module_level):
self.imported.add(module_name)
module_node = getsource(module_name,
self.passmanager.module_dir,
module_level)
self.prefixes.append(mangle_imported_module(module_name))
self.identifiers.append({})
self.rename_top_level_functions(module_node)
self.generic_visit(module_node)
self.prefixes.pop()
self.identifiers.pop()
return module_node.body
def visit_ImportFrom(self, node):
if node.module == '__future__':
return None
if is_builtin_module(node.module):
for alias in node.names:
name = alias.asname or alias.name
self.identifiers[-1][name] = name
return node
else:
for alias in node.names:
name = alias.asname or alias.name
self.identifiers[-1][name] = mangle_imported_function(
node.module, alias.name)
if self.is_imported(node.module):
return None
new_stmts = self.import_module(node.module, node.level)
self.imported_stmts.extend(new_stmts)
return None
def visit_Import(self, node):
new_aliases = []
for alias in node.names:
name = alias.asname or alias.name
self.identifiers[-1][name] = mangle_imported_module(alias.name)
if alias.name in self.imported:
continue
if is_builtin_module(alias.name):
new_aliases.append(alias)
continue
new_stmts = self.import_module(alias.name, 0)
self.imported_stmts.extend(new_stmts)
if new_aliases:
node.names = new_aliases
return node
else:
return None
|
StarcoderdataPython
|
3240889
|
from markdown.test_tools import TestCase
from cell_row_span import CellRowSpanExtension
class colspan(TestCase):
def runTest(self):
src = self.dedent("""
c11 | c12 | c13
----|-----|-----
c21 || c22
c31 | c32 | c33
""")
exp = self.dedent("""
<table>
<thead>
<tr>
<th>c11</th>
<th>c12</th>
<th>c13</th>
</tr>
</thead>
<tbody>
<tr>
<td colspan="2">c21</td>
<td>c22</td>
</tr>
<tr>
<td>c31</td>
<td>c32</td>
<td>c33</td>
</tr>
</tbody>
</table>
""")
self.assertMarkdownRenders(src, exp, output_format="html",
extensions=['tables',
CellRowSpanExtension()])
|
StarcoderdataPython
|
1732188
|
<filename>jplib/ocr.py
#!/usr/bin/env python3
"""
OCR with the Tesseract engine from Google
this is a wrapper around pytesser (http://code.google.com/p/pytesser/)
# from jabbapylib.ocr import ocr
"""
from jplib import config as cfg
from jplib.process import get_simple_cmd_output
TEST_DIR = cfg.TEST_ASSETS_DIR + '/ocr'
def image_file_to_string(fname):
"""Convert an image file to text using OCR."""
cmd = "{tesseract} {fname} stdout".format(
tesseract=cfg.TESSERACT,
fname=fname
)
return get_simple_cmd_output(cmd).rstrip('\n')
#############################################################################
if __name__ == "__main__":
print(image_file_to_string(TEST_DIR + '/fnord.png'))
print('=' * 20)
print(image_file_to_string(TEST_DIR + '/fonts_test.png'))
print('=' * 20)
print(image_file_to_string(TEST_DIR + '/phototest.png'))
|
StarcoderdataPython
|
953
|
<filename>store/adminshop/templatetags/admin_extras.py
# -*- coding: utf-8 -*-
# @Author: <NAME> <valle>
# @Date: 27-Aug-2017
# @Email: <EMAIL>
# @Filename: admin_extras.py
# @Last modified by: valle
# @Last modified time: 02-Feb-2018
# @License: Apache license vesion 2.0
from django import template
from django.db.models import Q
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from adminshop.models import Testeo, Compras, Presupuesto
import json
import sys
register = template.Library()
@register.filter(name='get_nombre_cliente')
def get_nombre_cliente(f):
return f.get_nombre_cliente()
@register.filter(name='enviado')
def enviado(f):
return "No" if not f.enviado else "Si"
@register.filter(name='get_user')
def get_user(f):
return f.get_user()
@register.filter(name='get_ns_imei')
def get_ns_imei(f):
return f.get_ns_imei()
@register.filter(name='get_producto_pk')
def get_producto_pk(f):
return f.get_producto_pk()
@register.filter(name='addcss')
def addcss(field, css):
return field.as_widget(attrs={"class":css})
@register.filter(name='reparacion')
def reparacion(p):
try:
pres = Presupuesto.objects.filter(producto__pk=p.id)[0]
return pres.notas_cliente
except:
return ""
@register.filter(name='num_pres')
def num_pres(p):
try:
pres = Presupuesto.objects.filter(producto__pk=p.id)[0]
return pres.pk
except:
return -1
@register.filter(name='precio_venta')
def precio_venta(p):
precio = 0 if p.precio_venta == None else p.precio_venta
return "{0:.2f} €".format(precio)
@register.filter(name='precio_usado')
def precio_usado(p):
return "{0:.2f} €".format(p.modelo.precio_usado * p.tipo.incremento)
@register.filter(name='document_show')
def document_show(p):
compras = Compras.objects.filter(producto__id=p.pk)
if len(compras) > 0:
compra = compras[0]
else:
compra = Compras()
return p.estado in ["ST", "VD", "OL", "VT"]
@register.filter(name='document_href')
def document_href(p):
if p.estado in ["ST", "VT", "OL"]:
return reverse("get_document_by_id", args=[p.pk])
elif p.estado in ["RP", "OK", "PD"]:
return reverse("get_presupuesto_pdf", args=[p.pk])
elif p.estado == "VD":
return reverse("get_all_document", args=[p.pk])
else:
return "#"
@register.filter(name='have_sign')
def have_sign(p):
compras = Compras.objects.filter(producto__id=p.pk)
compra = Compras()
if len(compras) > 0:
compra = compras[0]
return p.estado in ["ST", "VD", "OL", "VT"] and compra.firma == ""
@register.filter(name='editable')
def editable(p):
return p.estado in ["ST", "OL", "VT"]
@register.simple_tag(name='get_estado_value')
def get_estado_value(test_id, p_id, estado):
testeos = Testeo.objects.filter(Q(descripcion__pk=test_id) &
Q(producto__pk=p_id))
send = ""
if len(testeos) > 0 and testeos[0].estado == estado:
send = "selected"
return send
@register.filter(name='addattrs')
def addattrs(field, args):
attr = {}
try:
args_parse = args.replace("'", '"')
attr = json.loads(args_parse)
except Exception as error:
print(error)
return field.as_widget(attrs=attr)
@register.filter('klass')
def klass(ob):
return ob.field.widget.__class__.__name__
@register.filter('display')
def display(form, value):
return dict(form.field.choices)[value]
@register.filter('modelo')
def modelo(p):
if p.modelo != None:
return str(p.modelo)
else:
return p.detalle
@register.filter('marca')
def marca(p):
if p.modelo != None:
return str(p.modelo.marca)
else:
return ""
|
StarcoderdataPython
|
86590
|
#!/usr/bin/env python3
"""
This script is used for course notes.
Author: <NAME>
Date: 01/06/2020
"""
import psutil
def check_cpu_usage(percent):
usage = psutil.cpu_percent(1)
print("DEBUG: usage: {}".format(usage))
return usage < percent
if not check_cpu_usage(75):
print("ERROR! CPU is overloaded")
else:
print("Everything ok")
|
StarcoderdataPython
|
1631618
|
<filename>poky-dunfell/meta/lib/oeqa/runtime/cases/_qemutiny.py<gh_stars>10-100
#
# SPDX-License-Identifier: MIT
#
from oeqa.runtime.case import OERuntimeTestCase
class QemuTinyTest(OERuntimeTestCase):
def test_boot_tiny(self):
status, output = self.target.run_serial('uname -a')
msg = "Cannot detect poky tiny boot!"
self.assertTrue("yocto-tiny" in output, msg)
|
StarcoderdataPython
|
3208185
|
<filename>Downsize_Update/ledapp/ledapp.py
import serial, time, datetime
from datetime import timedelta
from flask import Flask, render_template, request, jsonify
app = Flask(__name__)
@app.route("/")
def hello():
global hardware
hardware=1
global on
on=False
if 'ser' not in locals():
global ser
ser = serial.Serial('/dev/ttyUSB0', 38400)
return render_template('ui.html')
@app.route("/apply")
def application():
hardware=0
red=int(request.args.get('r'))
green=int(request.args.get('g'))
blue=int(request.args.get('b'))
sendbit=int(request.args.get('s'))
ba=bytearray()
ba[0:3]=[red,green,blue,sendbit]
for index,value in enumerate(ba):
ba[index]=min(255,value+1)
ser.write(ba)
ser.write('\0')
return('potato')
@app.route("/supply")
def supplication():
new=0
r=0
g=0
b=0
if(ser.in_waiting >= 4):
ba=bytearray()
ba[0:4]=[90,90,90,90,90]
i=0
x='w'
while(x != '\0'):
x=ser.read()
ba[i]=x
i=i+1
r=ba[0]-1
g=ba[1]-1
b=ba[2]-1
new=1
return jsonify(red=r,green=g,blue=b,info=new)
@app.route("/nappy")
def nappytime():
on=True
#start=int(request.args.get('start'))
#end=int(request.args.get('end'))
alarm=datetime.datetime(2016,10,19,22,39)
print "initialize"
while(on):
now=datetime.datetime.now()
delta = timedelta(alarm - now)
if(delta.seconds<0):
print "caramel"
break
if __name__ == "__main__":
app.run(processes=2)
|
StarcoderdataPython
|
3326818
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_("Name of User"), blank=True, max_length=255)
profile_image = models.ImageField(null = True)
followers = models.ManyToManyField("self", blank = True)
following = models.ManyToManyField("self" , blank=True)
website = models.CharField(max_length = 100 , null = True)
intro = models.TextField(null = True)
phone = models.CharField(max_length = 100 , null = True)
@property
def images_count(self):
return self.images.all().count()
def get_absolute_url(self):
return reverse("users:detail", kwargs={"username": self.username})
@property
def followers_count(self):
return self.followers.all().count()
@property
def following_count(self):
return self.following.all().count()
|
StarcoderdataPython
|
1309
|
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import matplotlib.pyplot as plt
# local model
import sys
sys.path.append("../network")
import Coral
from lstm import LSTMHardSigmoid
from AdaBN import AdaBN
sys.path.append("../network/AutoEncoder")
import AutoEncoder
class cnnblstm_with_adabn(nn.Module):
PARAMS_FILE = "params.pkl"
PARAMS_AE = "params_ae.pkl"
NET1_ADABN = "net1_adabn"
NET2_ADABN = "net2_adabn"
NET3_ADABN = "net3_adabn"
def __init__(self, time_steps = 800, n_features = 3, n_outputs = 10, use_cuda = False, params_dir = "./params", enable_CORAL = False):
super(cnnblstm_with_adabn, self).__init__()
self.time_steps = time_steps
self.n_features = n_features
self.n_outputs = n_outputs
self.use_cuda = use_cuda
self.params_dir = params_dir
if not os.path.exists(self.params_dir):
os.mkdir(self.params_dir)
self.enable_CORAL = enable_CORAL
self.n_filters = 128
self.kernel_size = 15
self.n_hidden = 150 # 150
self.n_layers = 1
self.bidirectional = True
# self.ae = AutoEncoder.load_AE(type = "ConvAE", time_steps = self.time_steps, n_features = self.n_features, use_cuda = self.use_cuda, params_pkl = os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_AE))
# build net1 cnn
self.net1 = nn.Sequential(
nn.Conv1d(in_channels = self.n_features, out_channels = self.n_filters, kernel_size = self.kernel_size),
# nn.Conv1d(in_channels = self.ae.n_filters3, out_channels = self.n_filters, kernel_size = self.kernel_size),
nn.ReLU(),
# nn.Sigmoid(),
nn.Dropout(p = 0.5),
nn.MaxPool1d(kernel_size = 2)
)
# build net1_adabn
self.net1_adabn = AdaBN(self.n_filters, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET1_ADABN), use_cuda = self.use_cuda)
# build net2 blstm
# self.net2 = nn.LSTM(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
self.net2 = LSTMHardSigmoid(input_size = self.n_filters, hidden_size = self.n_hidden, num_layers = self.n_layers, dropout = 0.2, batch_first = True, bidirectional = self.bidirectional, bias = True)
# build net2_adabn
if self.bidirectional:
n_blstm_output = self.n_hidden * 2
else:
n_blstm_output = self.n_hidden
self.net2_adabn = AdaBN(n_blstm_output, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET2_ADABN), use_cuda = self.use_cuda)
# build net3 fc
self.net3 = nn.Sequential(
nn.Linear(n_blstm_output, 50, bias = True),
nn.ReLU(),
# nn.Sigmoid(),
)
# build net3_adabn
self.net3_adabn = AdaBN(50, variables_dir = os.path.join(self.params_dir, cnnblstm_with_adabn.NET3_ADABN), use_cuda = self.use_cuda)
# build net4 fc
self.net4 = nn.Sequential(
nn.Dropout(p = 0.2),
nn.Linear(50, self.n_outputs, bias = True),
nn.Softmax(dim = 1)
)
def init_hidden(self, batch_size):
"""
init blstm's hidden states
"""
if self.bidirectional:
n_layers = self.n_layers * 2
else:
n_layers = self.n_layers
if self.use_cuda:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden).cuda()
else:
hidden_state = torch.zeros(n_layers, batch_size, self.n_hidden)
cell_state = torch.zeros(n_layers, batch_size, self.n_hidden)
self.hidden = (hidden_state, cell_state)
def reset_parameters(self):
"""
temp useless
Here we reproduce Keras default initialization weights for consistency with Keras version
"""
# get weights & bias set
net1_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net1" in name) and ("net1_adabn" not in name))))
net1_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net1" in name) and ("net1_adabn" not in name))))
# net2_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net2" in name) and ("net2_adabn" not in name))))
# net2_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net2" in name) and ("net2_adabn" not in name))))
net3_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net3_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net3" in name) and ("net3_adabn" not in name))))
net4_weights = ((name, param.data) for name, param in self.named_parameters() if (("weight" in name) and (("net4" in name) and ("net4_adabn" not in name))))
net4_biases = ((name, param.data) for name, param in self.named_parameters() if (("bias" in name) and (("net4" in name) and ("net4_adabn" not in name))))
# init weights & bias
# self.ae.reset_parameters()
for name, params_data in net1_weights:
# print(name)
nn.init.xavier_uniform_(params_data)
for name, params_data in net1_biases:
nn.init.constant_(params_data, 0)
self.net1_adabn.reset_parameters()
self.net2.reset_parameters() # lstm reset parameters
self.net2_adabn.reset_parameters()
for name, params_data in net3_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net3_biases:
nn.init.constant_(params_data, 0)
self.net3_adabn.reset_parameters()
for name, params_data in net4_weights:
nn.init.xavier_uniform_(params_data)
for name, params_data in net4_biases:
nn.init.constant_(params_data, 0)
def forward(self, input):
"""
compute the output of input according to the entire network model
"""
# print(input.shape)
# AutoEncoder
# input = self.ae.encoder(input)
# input = self.ae(input)
# MaxPool1d
maxPool1d_output = self.net1(input)
# maxPool1d_adabn_output = maxPool1d_output
maxPool1d_adabn_output, maxPool1d_output = self.net1_adabn(maxPool1d_output), None
maxPool1d_adabn_t_output = maxPool1d_adabn_output.permute(0, 2, 1).contiguous()
# BiLSTM
(bilstm_output, _), maxPool1d_adabn_t_output = self.net2(maxPool1d_adabn_t_output, None), None
# MaxPooling1D time_steps
bilstm_output = bilstm_output.permute(0, 2, 1)
maxPooling_output, bilstm_output = F.max_pool1d(bilstm_output, kernel_size = bilstm_output.size(2)).squeeze(2), None
# maxPooling_adabn_output = maxPooling_output
maxPooling_adabn_output, maxPooling_output = self.net2_adabn(maxPooling_output), None
# get classifier
net3_output, maxPooling_adabn_output = self.net3(maxPooling_adabn_output), None
net3_adabn_output, net3_output = self.net3_adabn(net3_output), None
linear2_softmax_output, net3_adabn_output = self.net4(net3_adabn_output), None
return linear2_softmax_output
def update_adabn_running_stats(self):
"""
update adabn running states, update mu_j with mu_j_next to start next round
"""
self.net1_adabn.update_running_stats()
self.net2_adabn.update_running_stats()
self.net3_adabn.update_running_stats()
def trainAllLayers(self, train_x, train_y, test_x = None, learning_rate = 0.001, n_epoches = 20, batch_size = 20, shuffle = True):
"""
train all layers of network model
"""
# print(os.environ["CUDA_VISIBLE_DEVICES"])
# CORAL
if self.enable_CORAL:
if test_x == None:
print("ERROR: (in cnnblstm_with_adabn.trainAllLayers) test_x == None!")
return
# review train_x & test_x
train_x = train_x.view(-1, self.time_steps * self.n_features)
test_x = test_x.view(-1, self.time_steps * self.n_features)
# get CORAL(train_x, test_x)
train_x = Coral.CORAL_torch(train_x, test_x)
# review train_x
train_x = train_x.view(-1, self.n_features, self.time_steps)
# optimize all cnn parameters
params = [{"params": model.parameters()} for model in self.children() if model not in [self.ae]]
optimizer = torch.optim.Adam(params, lr = learning_rate)
# the target label is not one-hotted
loss_func = nn.CrossEntropyLoss()
# init params
self.reset_parameters()
# load params
self.load_params()
# set train mode True
self.train()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# if use_cuda
if self.use_cuda:
train_x = train_x.cuda()
train_y = train_y.cuda()
"""
# get autoencoder
self.ae = AutoEncoder.train_AE(self.ae, train_x, train_x, n_epoches = 20)
self.ae.save_params()
"""
# get train_data
train_data = torch.utils.data.TensorDataset(train_x, train_y)
# Data Loader for easy mini-batch return in training
train_loader = torch.utils.data.DataLoader(dataset = train_data, batch_size = batch_size, shuffle = shuffle)
# training and testing
for epoch in range(n_epoches):
# init loss & acc
train_loss = 0
train_acc = 0
for step, (b_x, b_y) in enumerate(train_loader): # gives batch data
b_x = b_x.view(-1, self.n_features, self.time_steps) # reshape x to (batch, n_features, time_step)
if self.use_cuda:
b_x, b_y = Variable(b_x).cuda(), Variable(b_y).cuda()
else:
b_x, b_y = Variable(b_x), Variable(b_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(b_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(b_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
output = parallel_cba(b_x) # CNN_BLSTM output
# get loss
loss = loss_func(output, b_y) # cross entropy loss
train_loss += loss.item() * len(b_y)
_, pre = torch.max(output, 1)
num_acc = (pre == b_y).sum()
train_acc += num_acc.item()
# backward
optimizer.zero_grad() # clear gradients for this training step
loss.backward() # backpropagation, compute gradients
optimizer.step() # apply gradients
# print loss
# if (step + 1) % 5 == 0:
# print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(step, len(train_loader), train_loss / ((step + 1) * batch_size), train_acc / ((step + 1) * batch_size)))
print("[{}/{}], train loss is: {:.6f}, train acc is: {:.6f}".format(len(train_loader), len(train_loader), train_loss / (len(train_loader) * batch_size), train_acc / (len(train_loader) * batch_size)))
# save params
self.save_params()
# print("train finish!")
def getTestAccuracy(self, test_x, test_y):
"""
test network model with test set
"""
# init params
self.reset_parameters()
# load params
self.load_params()
# set eval
self.eval()
# get parallel model
parallel_cba = self
if self.use_cuda:
# print("we use cuda!")
parallel_cba = torch.nn.DataParallel(self, device_ids = range(torch.cuda.device_count()))
# parallel_cba = parallel_cba.cuda()
# cuda test_data
with torch.no_grad():
if self.use_cuda:
test_x, test_y = Variable(test_x).cuda(), Variable(test_y).cuda()
else:
test_x, test_y = Variable(test_x), Variable(test_y)
"""
# get hidden
if self.use_cuda:
self.init_hidden(test_x.size(0) // torch.cuda.device_count())
else:
self.init_hidden(test_x.size(0))
"""
# update adabn running stats
self.update_adabn_running_stats()
# get output
with torch.no_grad():
output = parallel_cba(test_x)
# print(output)
prediction = torch.max(output, 1)[1]
pred_y = prediction.cpu().data.numpy()
# print(pred_y)
target_y = test_y.cpu().data.numpy()
# print(test_y)
accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size)
# print("Accuracy: ", str(accuracy))
return accuracy
def save_params(self):
"""
save params & adabn's inner stats
"""
self.save_adabn_variables()
torch.save(self.state_dict(), os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE))
# self.ae.save_params()
# print("save_params success!")
def save_adabn_variables(self):
"""
save adabn's inner stats
"""
self.net1_adabn.save_attrs()
self.net2_adabn.save_attrs()
self.net3_adabn.save_attrs()
def load_params(self):
"""
load params & adabn's inner stats
"""
self.load_adabn_variables()
if os.path.exists(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE)):
if self.use_cuda:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cuda')))
else:
self.load_state_dict(torch.load(os.path.join(self.params_dir, cnnblstm_with_adabn.PARAMS_FILE), map_location = torch.device('cpu')))
# print("load_params success!")
# self.ae.load_params()
def load_adabn_variables(self):
"""
load adabn's inner stats
"""
self.net1_adabn.load_attrs()
self.net2_adabn.load_attrs()
self.net3_adabn.load_attrs()
def get_model(self, pre_trained = False):
"""
get pretrained model
"""
if pre_trained:
self.load_params()
return self
if __name__ == '__main__':
use_cuda = torch.cuda.is_available()
if use_cuda:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda).cuda()
else:
cnnblstm = cnnblstm_with_adabn(use_cuda = use_cuda)
print(cnnblstm)
# get train_x, train_y
train_x = torch.rand(20, 3, 800, dtype = torch.float32)
train_y = torch.randint(10, (20, ), dtype = torch.int64)
# train_y = torch.LongTensor(20, 1).random_() % 10
print(train_x.type())
# train_y = torch.zeros(20, 10).scatter_(1, train_y, 1)
print(train_y)
train_data = torch.utils.data.TensorDataset(train_x, train_y)
cnnblstm.trainAllLayers(train_data)
|
StarcoderdataPython
|
1694781
|
<filename>src/packageschema/schema.py<gh_stars>1-10
"""This contains all schema related bits of packageschema.
This serves a dual-purpose:
1. It has the versioned schema that validates the ``packageschema.yaml``
files.
2. It reads and parses the ``packageschema.yaml`` files.
As a result, there are a few classes hidden in here.
"""
import os.path
import attr
import jsonschema
import yaml
from packageschema import exceptions
from packageschema import package
SCHEMATA_VERSIONS = [
'2016.12',
]
Version201612 = {
'type': 'object',
'required': [
'schema',
'package',
],
'properties': {
'schema': {
'type': 'object',
'required': [
'version',
],
'properties': {
'version': {
'type': 'string',
'enum': SCHEMATA_VERSIONS,
},
},
},
'package': {
'type': 'object',
'required': [
'name',
'version',
'module_names',
'exports',
],
'properties': {
'name': {
'type': 'string',
},
'version': {
'type': 'string',
},
'module_names': {
'type': 'array',
'minItems': 1,
'items': {
'type': 'string',
},
},
'exports': {
'type': 'array',
'minItems': 1,
'items': {
'type': 'string',
},
},
},
},
},
}
Version201612Validator = jsonschema.validators.Draft4Validator(Version201612)
@attr.s
class Schema:
schema_data = attr.ib(
validator=attr.validators.instance_of(dict),
repr=False,
)
version = attr.ib(init=False, default=None)
validator = attr.ib(
init=False,
repr=False,
validator=attr.validators.instance_of(jsonschema.validators.Draft4Validator),
)
validators = {
'2016.12': Version201612Validator,
}
def __attrs_post_init__(self):
self.version = self.schema_data['version']
self.validator = self.validators.get(self.version)
if self.validator is None:
raise exceptions.InvalidSchemaVersion(self.version)
def validate(self, packageschema):
"""Use the loaded schema to apply JSON Schema validation."""
self.validator.validate(packageschema)
@attr.s
class PackageSchema:
"""This handles and represents the parsed ``packageschema.yaml`` file."""
schema = attr.ib(validator=attr.validators.instance_of(Schema))
package = attr.ib(validator=attr.validators.instance_of(package.Package))
@classmethod
def from_file(cls, path):
"""Read the file specified by ``path`` and parse it.
:param str path:
Absolute path to the file to read.
:returns:
Parsed package schema file.
:rtype:
PackageSchema
"""
if not os.path.abspath(path):
raise ValueError(
"You are required to pass an absolute path to the file to "
"parse but you provided a relative path ({!r})".format(path)
)
if not os.path.isfile(path):
raise exceptions.FileDoesNotExist(path)
with open(path, 'r') as packageschema_file:
filedata = yaml.safe_load(packageschema_file)
schema = Schema(filedata['schema'])
schema.validate(filedata)
return cls(
schema=schema,
package=package.Package(**filedata['package']),
)
def validate(self):
"""Validate using the declared schema version."""
self.schema.validate(self.package)
|
StarcoderdataPython
|
16052
|
import time
from annotypes import Anno, add_call_types
from malcolm.core import PartRegistrar
from malcolm.modules import builtin
# Pull re-used annotypes into our namespace in case we are subclassed
APartName = builtin.parts.APartName
AMri = builtin.parts.AMri
with Anno("The demand value to move our counter motor to"):
ADemand = float
with Anno("The amount of time to get to the demand position"):
ADuration = float
# How long between ticks of the "motor" position while moving
UPDATE_TICK = 0.1
# We will set these attributes on the child block, so don't save them
@builtin.util.no_save("counter")
class CounterMovePart(builtin.parts.ChildPart):
"""Provides control of a `counter_block` within a `ManagerController`"""
def __init__(self, name, mri):
# type: (APartName, AMri) -> None
super(CounterMovePart, self).__init__(
name, mri, stateful=False, initial_visibility=True)
def setup(self, registrar):
# type: (PartRegistrar) -> None
super(CounterMovePart, self).setup(registrar)
# Method
registrar.add_method_model(
self.move, self.name + "Move", needs_context=True)
@add_call_types
def move(self, context, demand, duration=0):
# type: (builtin.hooks.AContext, ADemand, ADuration) -> None
"""Move the counter to the demand value, taking duration seconds like
a motor would do"""
start = time.time()
child = context.block_view(self.mri)
distance = demand - child.counter.value
remaining = duration
# "Move" the motor, ticking at UPDATE_TICK rate
while remaining > 0:
child.counter.put_value(demand - distance * remaining / duration)
context.sleep(min(remaining, UPDATE_TICK))
remaining = start + duration - time.time()
# Final move to make sure we end up at the right place
child.counter.put_value(demand)
|
StarcoderdataPython
|
1723435
|
from utils.bert_utils import get_bert_layer_representations
import time as tm
import numpy as np
import torch
import os
import argparse
def save_layer_representations(model_layer_dict, model_name, seq_len, save_dir):
for layer in model_layer_dict.keys():
np.save('{}/{}_length_{}_layer_{}.npy'.format(save_dir,model_name,seq_len,layer+1),np.vstack(model_layer_dict[layer]))
print('Saved extracted features to {}'.format(save_dir))
return 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--nlp_model", default='bert', choices=model_options)
parser.add_argument("--sequence_length", type=int, default=1, help='length of context to provide to NLP model (default: 1)')
parser.add_argument("--output_dir", required=True, help='directory to save extracted representations to')
args = parser.parse_args()
print(args)
text_array = np.load(os.getcwd() + '/data/stimuli_words.npy')
remove_chars = [",","\"","@"]
word_ind_to_extract = -2
nlp_features = get_bert_layer_representations(args.sequence_length, text_array, remove_chars, word_ind_to_extract)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
save_layer_representations(nlp_features, args.nlp_model, args.sequence_length, args.output_dir)
|
StarcoderdataPython
|
1615352
|
import unittest
import os
import subprocess
import socket
import json
import sys
bin_path = os.path.join(os.getcwd(), "..", "..", "bin")
if os.name == "nt":
client = os.path.join(bin_path, "Debug", "client.exe")
else:
client = os.path.join(bin_path, "client")
address = "localhost"
port = 5750
def convert(val):
if sys.version < "3":
return str(val)
else:
return str(val).encode("utf-8")
class TrackerTest(unittest.TestCase):
@classmethod
def setUpClass(self):
self.process = subprocess.Popen([client], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@classmethod
def tearDownClass(self):
self.process.kill()
def test_connection(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(s.connect_ex((address, port)), 0)
s.close()
if __name__ == '__main__':
print(client)
unittest.main()
|
StarcoderdataPython
|
42284
|
#!/usr/bin/env python
import glob
import os
import shutil
import subprocess
import uuid
def renameFiles(tooldir, dadir, fromnames, toname):
"Recursively replace file names and contents."
tooldir = os.path.join('..', tooldir)
os.chdir(dadir)
for fromname in fromnames:
fromspaced = "".join([x if x.islower() else ' '+x for x in fromname]).strip()
tospaced = "".join([x if x.islower() else ' '+x for x in toname]).strip()
fromunder = fromspaced.lower().replace(' ', '_')
tounder = tospaced.lower().replace(' ', '_')
fromlo = fromname.lower()
tolo = toname.lower()
print("Renaming '%s' to '%s' in dir '%s'." % (fromname, toname, dadir))
subprocess.call(['python', tooldir+'/renamefiles.py', '*'+fromname, '*'+toname])
subprocess.call(['python', tooldir+'/renamefiles.py', fromname+'*', toname+'*'])
subprocess.call(['python', tooldir+'/regexpfiles.py', fromname, toname, '*'])
subprocess.call(['python', tooldir+'/regexpfiles.py', fromspaced, tospaced, '*'])
subprocess.call(['python', tooldir+'/regexpfiles.py', fromunder, tounder, '*'])
subprocess.call(['python', tooldir+'/regexpfiles.py', fromlo, tolo, '*'])
files = glob.glob('*')
for fn in files:
if os.path.isdir(fn):
renameFiles(tooldir, fn, fromnames, toname)
os.chdir('..')
def add_makefile_generator(filename, fromname, toname):
outfn = filename+".tmp"
with open(filename, "rt") as r:
with open(outfn, "wt") as w:
for line in r:
w.write(line)
if fromname in line:
toline = line.replace(fromname, toname)
w.write(toline)
os.remove(filename)
os.rename(outfn, filename)
def add_builders(filename, fromname, toname):
if len(fromname) <= 5:
fromsuffix = fromname.lower()
else:
fromsuffix = "".join(filter(str.isupper, fromname)).lower()
fromsuffix = '_'+fromsuffix+'():'
if len(toname) <= 5:
tosuffix = toname.lower()
else:
tosuffix = "".join(filter(str.isupper, toname)).lower()
tosuffix = '_'+tosuffix+'():'
fromspaced = "".join([x if x.islower() else ' '+x for x in fromname]).strip()
tospaced = "".join([x if x.islower() else ' '+x for x in toname]).strip()
outfn = filename+".tmp"
with open(filename, "rt") as r:
with open(outfn, "wt") as w:
inblock = False
block = ""
for line in r:
wasinblock = inblock
if line.startswith("def "):
inblock = line.strip().endswith(fromsuffix)
if inblock:
block += line
if wasinblock and not inblock:
block = block.replace(fromname, toname)
block = block.replace(fromspaced, tospaced)
block = block.replace(fromsuffix, tosuffix)
w.write(block)
block = ""
w.write(line)
os.remove(filename)
os.rename(outfn, filename)
def fix_guids(toname):
for filename in glob.glob(toname+'/'+toname+'*.vc*proj*'):
outfn = filename+".tmp"
with open(filename, "rt") as r:
with open(outfn, "wt") as w:
prefixes = ('ProjectGUID="{', '<ProjectGuid>{', '<UniqueIdentifier>{')
for line in r:
for prefix in prefixes:
if prefix in line:
new_guid = str(uuid.uuid1()).upper()
index1 = line.index(prefix) + len(prefix)
index2 = index1 + len(new_guid)
line = line[:index1] + new_guid + line[index2:]
break
w.write(line)
os.remove(filename)
os.rename(outfn, filename)
def clone_project(fromnames, toname):
print("Copying files...")
fromname = fromnames[0]
pat = ('*.user', 'makefile', '*.mesh', '*.class', '*.phys', 'Unicode Debug', 'Unicode Release Candiate', 'Unicode Final', 'Final', 'Debug', 'Release')
shutil.copytree(fromname, toname, ignore=shutil.ignore_patterns(*pat))
todir = toname
renameFiles('Tools/Util', todir, fromnames, toname)
print("Files and contents renamed successfully.")
add_makefile_generator('Tools/GCC/generate_makefile.py', fromname, toname)
print("Makefile generation added successfully.")
add_builders('Tools/Build/rgo.py', fromname, toname)
print("Builders added successfully.")
fix_guids(toname)
print("GUIDs changed.")
print(fromname, "->", toname, "done!")
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("Usage: %s <fromname> [fromname...] <toname>" % sys.argv[0])
print("Example: %s KillCutie GrenadeRun TireFire" % sys.argv[0])
sys.exit(1)
clone_project(sys.argv[1:-1], sys.argv[-1])
|
StarcoderdataPython
|
3251670
|
<filename>UC. Curso em Aula II/A1D5.py
n1 = float(input('Digite a sua 1 nota: '))
n2 = float(input('Digite a sua 2 nota: '))
media = (n1+n2)/2
if media < 5.1:
print('Reprovado')
elif media > 6.9:
print('Aprovado')
else:
print('Recuperação')
|
StarcoderdataPython
|
4834391
|
from __future__ import absolute_import
from __future__ import print_function
import math
import functools
from collections import OrderedDict
import veriloggen.core.vtypes as vtypes
import veriloggen.types.axi as axi
from veriloggen.fsm.fsm import FSM
from veriloggen.optimizer import try_optimize as optimize
from .ttypes import _MutexFunction
from .ram import RAM, MultibankRAM, to_multibank_ram
from .fifo import FIFO
class AXIStreamIn(axi.AxiStreamIn, _MutexFunction):
""" AXI Stream Interface for Input """
__intrinsics__ = ('read',
'write_ram', 'write_ram_async',
'wait_write_ram')
def __init__(self, m, name, clk, rst, datawidth=32, addrwidth=32,
with_last=True, with_strb=False,
id_width=0, user_width=0, dest_width=0,
noio=False,
enable_async=True,
num_cmd_delay=0, num_data_delay=0,
op_sel_width=8, fsm_as_module=False):
axi.AxiStreamIn.__init__(self, m, name, clk, rst, datawidth,
with_last, with_strb,
id_width, user_width, dest_width,
noio)
self.addrwidth = addrwidth
self.enable_async = enable_async
self.num_cmd_delay = num_cmd_delay
self.num_data_delay = num_data_delay
self.op_sel_width = op_sel_width
self.fsm_as_module = fsm_as_module
self.mutex = None
self.read_start = self.m.Reg('_'.join(['', self.name, 'read_start']),
initval=0)
self.read_op_sel = self.m.Reg('_'.join(['', self.name, 'read_op_sel']),
self.op_sel_width, initval=0)
self.read_local_addr = self.m.Reg('_'.join(['', self.name, 'read_local_addr']),
self.addrwidth, initval=0)
self.read_size = self.m.Reg('_'.join(['', self.name, 'read_size']),
self.addrwidth + 1, initval=0)
self.read_local_stride = self.m.Reg('_'.join(['', self.name, 'read_local_stride']),
self.addrwidth, initval=0)
self.read_idle = self.m.Reg(
'_'.join(['', self.name, 'read_idle']), initval=1)
self.seq(
self.read_start(0)
)
self.read_op_id_map = OrderedDict()
self.read_op_id_count = 1
self.read_reqs = OrderedDict()
self.read_ops = []
self.read_fsm = None
self.read_data_wire = None
self.read_valid_wire = None
self.read_rest_size = None
self.read_narrow_fsms = OrderedDict() # key: pack_size
self.read_narrow_pack_counts = OrderedDict() # key: pack_size
self.read_narrow_data_wires = OrderedDict() # key: pack_size
self.read_narrow_valid_wires = OrderedDict() # key: pack_size
self.read_narrow_rest_size_wires = OrderedDict() # key: pack_size
self.read_wide_fsms = OrderedDict() # key: pack_size
self.read_wide_pack_counts = OrderedDict() # key: pack_size
self.read_wide_data_wires = OrderedDict() # key: pack_size
self.read_wide_valid_wires = OrderedDict() # key: pack_size
self.read_wide_rest_size_wires = OrderedDict() # key: pack_size
def read(self, fsm):
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
rdata = self.m.TmpReg(self.datawidth, initval=0,
signed=True, prefix='axistreamin_rdata')
if last is not None:
rlast = self.m.TmpReg(1, initval=0,
signed=False, prefix='axistreamin_rlast')
else:
rlast = True
fsm.If(valid)(
rdata(data),
rlast(last) if last is not None else ()
)
fsm.Then().goto_next()
return rdata, rlast
def write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if self.enable_async:
self.wait_write_ram(fsm)
self._write_ram(fsm, ram, local_addr, size,
local_stride, port, ram_method)
self.wait_write_ram(fsm)
def write_ram_async(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.wait_write_ram(fsm)
self._write_ram(fsm, ram, local_addr, size,
local_stride, port, ram_method)
def wait_write_ram(self, fsm):
fsm.If(self.read_idle).goto_next()
def _write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
if isinstance(ram, (tuple, list)):
ram = to_multibank_ram(ram)
if not isinstance(ram, (RAM, MultibankRAM)):
raise TypeError('RAM object is required.')
if ram_method is None:
ram_method = getattr(ram, 'write_dataflow')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_read_request(ram, port, ram_method, start,
local_addr, size, local_stride)
self._synthesize_read_fsm(ram, port, ram_method)
fsm.goto_next()
def _set_read_request(self, ram, port, ram_method, start,
local_addr, size, local_stride):
op_id = self._get_read_op_id(ram, port, ram_method)
if op_id in self.read_reqs:
(read_start, read_op_sel,
read_local_addr_in,
read_size_in, read_local_stride_in) = self.read_reqs[op_id]
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr_in(local_addr),
read_size_in(size),
read_local_stride_in(local_stride)
)
return
port = str(vtypes.to_int(port))
read_start = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_start']),
initval=0)
read_op_sel = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_op_sel']),
self.op_sel_width, initval=0)
read_local_addr = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_addr']),
self.addrwidth, initval=0)
read_size = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_size']),
self.addrwidth + 1, initval=0)
read_local_stride = self.m.Reg(
'_'.join(['', self.name, ram.name, port, 'read_local_stride']),
self.addrwidth, initval=0)
self.seq(
read_start(0)
)
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_local_addr(local_addr),
read_size(size),
read_local_stride(local_stride)
)
self.read_reqs[op_id] = (read_start, read_op_sel,
read_local_addr,
read_size, read_local_stride)
if self.num_cmd_delay > 0:
read_start = self.seq.Prev(read_start, self.num_cmd_delay)
read_op_sel = self.seq.Prev(read_op_sel, self.num_cmd_delay)
read_local_addr = self.seq.Prev(
read_local_addr, self.num_cmd_delay)
read_size = self.seq.Prev(read_size, self.num_cmd_delay)
read_local_stride = self.seq.Prev(
read_local_stride, self.num_cmd_delay)
self.seq.If(read_start)(
self.read_idle(0)
)
self.seq.If(read_start)(
self.read_start(1),
self.read_op_sel(read_op_sel),
self.read_local_addr(read_local_addr),
self.read_size(read_size),
self.read_local_stride(read_local_stride)
)
def _synthesize_read_fsm(self, ram, port, ram_method):
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
ram_datawidth = (ram.datawidth if ram_method is None else
ram.orig_datawidth if 'bcast' in ram_method_name else
ram.orig_datawidth if 'block' in ram_method_name else
ram.datawidth)
if not isinstance(self.datawidth, int):
raise TypeError("axi.datawidth must be int, not '%s'" %
str(type(self.datawidth)))
if not isinstance(ram_datawidth, int):
raise TypeError("ram_datawidth must be int, not '%s'" %
str(type(ram_datawidth)))
if self.datawidth == ram_datawidth:
return self._synthesize_read_fsm_same(ram, port, ram_method, ram_datawidth)
if self.datawidth < ram_datawidth:
return self._synthesize_read_fsm_narrow(ram, port, ram_method, ram_datawidth)
return self._synthesize_read_fsm_wide(ram, port, ram_method, ram_datawidth)
def _synthesize_read_fsm_same(self, ram, port, ram_method, ram_datawidth):
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if self.read_fsm is not None:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_fsm
data = self.read_data_wire
valid = self.read_valid_wire
rest_size = self.read_rest_size
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'read_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_fsm = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name, 'read_rest_size']),
self.addrwidth + 1, initval=0)
self.read_rest_size = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(self.read_size)
)
fsm.If(cond).goto_next()
# state 1
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
self.read_data_wire = data
self.read_valid_wire = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(valid_cond)(
wdata(data),
wvalid(1),
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(valid, rest_size <= 1).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_narrow(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth < ram.datawidth """
if ram_datawidth % self.datawidth != 0:
raise ValueError(
'ram_datawidth must be multiple number of axi.datawidth')
pack_size = ram_datawidth // self.datawidth
dma_size = (self.read_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.read_size * pack_size)
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_narrow_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_narrow_fsms[pack_size]
pack_count = self.read_narrow_pack_counts[pack_size]
data = self.read_narrow_data_wires[pack_size]
valid = self.read_narrow_valid_wires[pack_size]
rest_size = self.read_narrow_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_narrow_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_narrow_rest_size_wires[pack_size] = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, self.read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_narrow_pack_counts[pack_size] = pack_count
data, last, _id, user, dest, valid = self.read_data(cond=fsm)
self.read_narrow_data_wires[pack_size] = data
self.read_narrow_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:ram_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(wvalid, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_wide(self, ram, port, ram_method, ram_datawidth):
""" axi.datawidth > ram.datawidth """
if self.datawidth % ram_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of ram_datawidth')
pack_size = self.datawidth // ram_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.read_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.read_size >> shamt) + res
actual_read_size = dma_size << shamt
op_id = self._get_read_op_id(ram, port, ram_method)
port = vtypes.to_int(port)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_wide_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_wide_fsms[pack_size]
pack_count = self.read_wide_pack_counts[pack_size]
data = self.read_wide_data_wires[pack_size]
valid = self.read_wide_valid_wires[pack_size]
rest_size = self.read_wide_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_wide_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_wide_rest_size_wires[pack_size] = rest_size
# state 0
wdata, wvalid, w = self._get_op_write_dataflow(ram_datawidth)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
ram_method(port, self.read_local_addr, w, actual_read_size,
stride=self.read_local_stride, cond=cond)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_wide_pack_counts[pack_size] = pack_count
cond = vtypes.Ands(fsm.here, pack_count == 0)
data, last, _id, user, dest, valid = self.read_data(cond=cond)
self.read_wide_data_wires[pack_size] = data
self.read_wide_valid_wires[pack_size] = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
wlast = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wlast']),
initval=0)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
wlast(last),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> ram_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count == pack_size - 1)(
pack_count(0)
)
fsm.If(pack_count == 0, valid_cond)(
rest_size.dec()
)
fsm.If(pack_count == pack_size - 1, rest_size == 0).goto_next()
for _ in range(self.num_data_delay):
fsm.goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _set_flag(self, fsm, prefix='axistreamin_flag'):
flag = self.m.TmpReg(initval=0, prefix=prefix)
fsm(
flag(1)
)
fsm.Delay(1)(
flag(0)
)
fsm.goto_next()
return flag
def _get_read_op_id(self, ram, port, ram_method):
ram_id = ram._id()
port = vtypes.to_int(port)
ram_method_name = (ram_method.func.__name__
if isinstance(ram_method, functools.partial) else
ram_method.__name__)
op = (ram_id, port, ram_method_name)
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def _get_op_write_dataflow(self, ram_datawidth):
if self.datawidth == ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
if self.datawidth < ram_datawidth:
wdata = self.m.TmpReg(ram_datawidth, initval=0, prefix='_wdata')
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
wdata = self.m.TmpReg(self.datawidth, initval=0, prefix='_wdata')
wdata_ram = self.m.TmpWire(ram_datawidth, prefix='_wdata_ram')
wdata_ram.assign(wdata)
wvalid = self.m.TmpReg(initval=0, prefix='_wvalid')
w = self.df.Variable(wdata_ram, wvalid,
width=ram_datawidth, signed=False)
if self.num_data_delay > 0:
for _ in range(self.num_data_delay):
w = self.df._Delay(w)
return (wdata, wvalid, w)
class AXIStreamInFifo(AXIStreamIn):
""" AXI Stream Interface to FIFO for Input """
__intrinsics__ = ('read',
'write_fifo',
'wait_write_fifo')
def write_ram(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
raise NotImplementedError('Use AXIStreamIn.')
def write_ram_async(self, fsm, ram, local_addr, size,
local_stride=1, port=0, ram_method=None):
raise NotImplementedError('Use AXIStreamIn.')
def wait_write_ram(self, fsm):
raise NotImplementedError('Use AXIStreamIn.')
def write_fifo(self, fsm, fifo, size):
if not self.enable_async:
raise ValueError(
"Async mode is disabled. Set 'True' to AXIM.enable_async.")
self.wait_write_fifo(fsm)
self._write_fifo(fsm, fifo, size)
def wait_write_fifo(self, fsm):
fsm.If(self.read_idle).goto_next()
def _get_read_op_id_fifo(self, fifo):
fifo_id = fifo._id()
op = fifo_id
if op in self.read_op_id_map:
op_id = self.read_op_id_map[op]
else:
op_id = self.read_op_id_count
self.read_op_id_count += 1
self.read_op_id_map[op] = op_id
return op_id
def _write_fifo(self, fsm, fifo, size):
if self.num_data_delay != 0:
raise ValueError('num_data_delay must be 0.')
if not isinstance(fifo, FIFO):
raise TypeError('FIFO object is required.')
start = self._set_flag(fsm)
for _ in range(self.num_cmd_delay + 1):
fsm.goto_next()
self._set_read_request_fifo(fifo, start, size)
self._synthesize_read_fsm_fifo(fifo)
def _set_read_request_fifo(self, fifo, start, size):
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
(read_start, read_op_sel, read_size_in) = self.read_reqs[op_id]
self.seq.If(start)(
read_start(1),
read_op_sel(op_id)
)
return
read_start = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_start']),
initval=0)
read_op_sel = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_op_sel']),
self.op_sel_width, initval=0)
read_size = self.m.Reg(
'_'.join(['', self.name, fifo.name, 'read_size']),
self.addrwidth + 1, initval=0)
self.seq(
read_start(0)
)
self.seq.If(start)(
read_start(1),
read_op_sel(op_id),
read_size(size),
)
self.read_reqs[op_id] = (read_start, read_op_sel, read_size)
if self.num_cmd_delay > 0:
read_start = self.seq.Prev(read_start, self.num_cmd_delay)
read_op_sel = self.seq.Prev(read_op_sel, self.num_cmd_delay)
read_size = self.seq.Prev(read_size, self.num_cmd_delay)
self.seq.If(read_start)(
self.read_idle(0)
)
self.seq.If(read_start)(
self.read_start(1),
self.read_op_sel(read_op_sel),
self.read_size(read_size),
)
def _synthesize_read_fsm_fifo(self, fifo):
fifo_datawidth = fifo.datawidth
if not isinstance(fifo_datawidth, int):
raise TypeError("fifo_datawidth must be int, not '%s'" %
str(type(fifo_datawidth)))
if self.datawidth == fifo_datawidth:
return self._synthesize_read_fsm_fifo_same(fifo, fifo_datawidth)
if self.datawidth < fifo_datawidth:
return self._synthesize_read_fsm_fifo_narrow(fifo, fifo_datawidth)
return self._synthesize_read_fsm_fifo_wide(fifo, fifo_datawidth)
def _synthesize_read_fsm_fifo_same(self, fifo, fifo_datawidth):
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if self.read_fsm is not None:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_fsm
data = self.read_data_wire
valid = self.read_valid_wire
rest_size = self.read_rest_size
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(data, cond=valid_cond)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name, 'read_fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_fsm = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name, 'read_rest_size']),
self.addrwidth + 1, initval=0)
self.read_rest_size = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(self.read_size)
)
fsm.If(cond).goto_next()
# state 1
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
data, last, _id, user, dest, valid = self.read_data(cond=read_cond)
self.read_data_wire = data
self.read_valid_wire = valid
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(data, cond=valid_cond)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(valid, rest_size <= 1).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_fifo_narrow(self, fifo, fifo_datawidth):
""" axi.datawidth < fifo.datawidth """
if fifo_datawidth % self.datawidth != 0:
raise ValueError(
'fifo_datawidth must be multiple number of axi.datawidth')
pack_size = fifo_datawidth // self.datawidth
dma_size = (self.read_size << int(math.log(pack_size, 2))
if math.log(pack_size, 2) % 1.0 == 0.0 else
self.read_size * pack_size)
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_narrow_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_narrow_fsms[pack_size]
pack_count = self.read_narrow_pack_counts[pack_size]
data = self.read_narrow_data_wires[pack_size]
valid = self.read_narrow_valid_wires[pack_size]
rest_size = self.read_narrow_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
wdata = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wdata']),
fifo_datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_narrow', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_narrow_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_narrow_rest_size_wires[pack_size] = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_narrow_pack_counts[pack_size] = pack_count
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
data, last, _id, user, dest, valid = self.read_data(cond=read_cond)
self.read_narrow_data_wires[pack_size] = data
self.read_narrow_valid_wires[pack_size] = valid
wdata = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wdata']),
fifo_datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_narrow', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(rest_size == 0, pack_count > 0)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(valid_cond)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(0),
pack_count.inc()
)
fsm.If(rest_size == 0, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond, pack_count == pack_size - 1)(
wdata(vtypes.Cat(data, wdata[self.datawidth:fifo_datawidth])),
wvalid(1),
pack_count(0)
)
fsm.If(valid_cond)(
rest_size.dec()
)
fsm.If(wvalid, rest_size == 0).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
def _synthesize_read_fsm_fifo_wide(self, fifo, fifo_datawidth):
""" axi.datawidth > fifo.datawidth """
if self.datawidth % fifo_datawidth != 0:
raise ValueError(
'axi.datawidth must be multiple number of fifo_datawidth')
pack_size = self.datawidth // fifo_datawidth
shamt = int(math.log(pack_size, 2))
res = vtypes.Mux(
vtypes.And(self.read_size, 2 ** shamt - 1) > 0, 1, 0)
dma_size = (self.read_size >> shamt) + res
actual_read_size = dma_size << shamt
op_id = self._get_read_op_id_fifo(fifo)
if op_id in self.read_ops:
""" already synthesized op """
return
if pack_size in self.read_wide_fsms:
""" new op """
self.read_ops.append(op_id)
fsm = self.read_wide_fsms[pack_size]
pack_count = self.read_wide_pack_counts[pack_size]
data = self.read_wide_data_wires[pack_size]
valid = self.read_wide_valid_wires[pack_size]
rest_size = self.read_wide_rest_size_wires[pack_size]
# state 0
fsm.set_index(0)
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(cond).goto_next()
# state 1
fsm.set_index(1)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> fifo_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(valid_cond)(
rest_size.dec()
)
return
""" new op and fsm """
fsm = FSM(self.m, '_'.join(['', self.name,
'read_wide', str(pack_size),
'fsm']),
self.clk, self.rst, as_module=self.fsm_as_module)
self.read_wide_fsms[pack_size] = fsm
self.read_ops.append(op_id)
rest_size = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'rest_size']),
self.addrwidth + 1, initval=0)
self.read_wide_rest_size_wires[pack_size] = rest_size
# state 0
cond = vtypes.Ands(self.read_start, self.read_op_sel == op_id)
fsm.If(self.read_start)(
rest_size(dma_size)
)
fsm.If(cond).goto_next()
# state 1
pack_count = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'pack_count']),
int(math.ceil(math.log(pack_size, 2))), initval=0)
self.read_wide_pack_counts[pack_size] = pack_count
ready = vtypes.Not(fifo.almost_full)
read_cond = vtypes.Ands(fsm.here, ready)
cond = vtypes.Ands(fsm.here, pack_count == 0, read_cond)
data, last, _id, user, dest, valid = self.read_data(cond=cond)
self.read_wide_data_wires[pack_size] = data
self.read_wide_valid_wires[pack_size] = valid
wdata = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wdata']),
self.datawidth, initval=0)
wvalid = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wvalid']),
initval=0)
valid_cond = vtypes.Ands(valid, self.read_op_sel == op_id)
stay_cond = self.read_op_sel == op_id
ack, _ = fifo.enq_rtl(wdata, cond=wvalid)
wlast = self.m.Reg('_'.join(['', self.name,
'read_wide', str(pack_size),
'wlast']),
initval=0)
fsm.Delay(1)(
wvalid(0)
)
fsm.If(pack_count == 0, valid_cond)(
wdata(data),
wvalid(1),
wlast(last),
pack_count.inc()
)
fsm.If(pack_count > 0, stay_cond)(
wdata(wdata >> fifo_datawidth),
wvalid(1),
pack_count.inc()
)
fsm.If(pack_count == pack_size - 1)(
pack_count(0)
)
fsm.If(pack_count == 0, valid_cond)(
rest_size.dec()
)
fsm.If(pack_count == pack_size - 1, rest_size == 0).goto_next()
# state 2
set_idle = self._set_flag(fsm)
self.seq.If(set_idle)(
self.read_idle(1)
)
fsm.goto_init()
|
StarcoderdataPython
|
1787079
|
<gh_stars>0
import os
from boogie.configurations.tools import module_exists
from .paths import PathsConf
class TemplatesConf(PathsConf):
"""
Configure templates.
"""
def get_templates(self):
templates = [self.DJANGO_TEMPLATES, self.JINJA_TEMPLATES]
return [x for x in templates if x]
#
# DJANGO TEMPLATES
#
def get_django_templates(self):
return {
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": self.DJANGO_TEMPLATES_DIRS,
"APP_DIRS": True,
"OPTIONS": {"context_processors": self.DJANGO_CONTEXT_PROCESSORS},
}
def get_django_context_processors(self):
return [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
#
# JINJA2 TEMPLATES
#
def get_jinja_templates(self):
options = {}
env = self.JINJA_ENVIRONMENT
if env is not None:
options["environment"] = env
return {
"BACKEND": "django.template.backends.jinja2.Jinja2",
"DIRS": self.JINJA_TEMPLATES_DIRS,
"APP_DIRS": True,
"OPTIONS": {"extensions": self.JINJA_EXTENSIONS, **options},
}
def get_django_templates_dirs(self):
return []
def get_jinja_templates_dirs(self):
return []
def get_jinja_extensions(self):
return ["jinja2.ext.i18n"]
def get_jinja_environment(self):
base, _, end = os.environ["DJANGO_SETTINGS_MODULE"].rpartition(".")
module = base + ".jinja2"
if module_exists(module):
return module + ".environment"
return "boogie.jinja2.boogie_environment"
|
StarcoderdataPython
|
1752735
|
import numpy as np
import torch
def generate_eom(q, qdot):
M = np.array([
[5/3 + np.cos(q[1]), 1/3 + 1/2*np.cos(q[1])],
[1/3 + 1/2*np.cos(q[1]), 1/3 ]
])
c = np.array([
[-1/2*(2*qdot[0]*qdot[1] + qdot[1]**2)*np.sin(q[1])],
[1/2*(qdot[0]**2)*np.sin(q[1])]
])
g = np.array([
[-3/2 * np.sin(q[0]) - 1/2*np.sin(q[0]+q[1])],
[-1/2 * np.sin(q[0] + q[1])]
]) * 9.8
return M, c, g
def split_states(states):
return states[...,0], states[...,1], states[...,2]
class Datasets(torch.utils.data.Dataset):
def __init__(self, states, targets):
assert len(states) == len(targets)
self.data_num = len(targets)
self.states = states
self.targets = targets
def __len__(self):
return self.data_num
def __getitem__(self, idx):
return self.states[idx], self.targets[idx]
def gen_loader(states, targets, batch_size=1, shuffle=False):
states, targets = torch.tensor(states), torch.tensor(targets)
datasets = Datasets(states, targets)
train_loader = torch.utils.data.DataLoader(datasets, batch_size=batch_size, shuffle=shuffle)
return train_loader
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
StarcoderdataPython
|
1678527
|
from typing import Optional
from fastapi.encoders import jsonable_encoder
from .models import Conversation, ConversationCreate, ConversationUpdate
def get(*, db_session, conversation_id: int) -> Optional[Conversation]:
"""Returns a conversation based on the given conversation id."""
return db_session.query(Conversation).filter(Conversation.id == conversation_id).one_or_none()
def get_by_channel_id(db_session, channel_id: str) -> Optional[Conversation]:
"""Returns a conversation based on the given channel id."""
return (
db_session.query(Conversation).filter(Conversation.channel_id == channel_id).one_or_none()
)
def get_all(*, db_session):
"""Returns all conversations."""
return db_session.query(Conversation)
def create(*, db_session, conversation_in: ConversationCreate) -> Conversation:
"""Creates a new conversation."""
conversation = Conversation(**conversation_in.dict())
db_session.add(conversation)
db_session.commit()
return conversation
def update(
*, db_session, conversation: Conversation, conversation_in: ConversationUpdate
) -> Conversation:
"""Updates a conversation."""
conversation_data = jsonable_encoder(conversation)
update_data = conversation_in.dict(skip_defaults=True)
for field in conversation_data:
if field in update_data:
setattr(conversation, field, update_data[field])
db_session.add(conversation)
db_session.commit()
return conversation
def delete(*, db_session, conversation_id: int):
"""Deletes a conversation."""
db_session.query(Conversation).filter(Conversation.id == conversation_id).delete()
db_session.commit()
|
StarcoderdataPython
|
3218343
|
<filename>crypto_trading/algo/security.py
import json
import logging
from . import model
from . import utils
class Security(object):
"""Class to thread hold to sell when the lost in a transaction is
too high."""
def __init__(self, config_dict):
"""Class Initialisation."""
logging.debug('')
config = json.load(open(config_dict, mode='r'))
self.mean = None
self.maxLost = None
self.take_profit = None
if config.get('maxLost') is not None:
model.MaxLost.__PERCENTAGE__ = config.get('maxLost').get(
'percentage')
model.MaxLost.__UPDATE__ = config.get('maxLost').get(
'percentage_update', 1)
self.mean = config.get('maxLost').get('mean')
if config.get('takeProfit') is not None:
self.take_profit = config.get('takeProfit').get('percentage')
if self.mean:
model.create()
def process(self, current_value, currency):
logging.debug('')
if self.mean:
real_values = model.pricing.get_last_values(
count=self.mean,
currency=currency)
model.rolling_mean_pricing.insert_value(
currency=currency,
frequency=self.mean,
values=real_values)
def sell(self, current_value, transaction):
"""Process data, it returned 1 to buy and -1 to sell."""
logging.debug('')
# Get last security
if not self.maxLost or self.maxLost.transaction_id != transaction.id:
result = model.MaxLost.select(
model.MaxLost.q.transaction_id == transaction.id)
self.maxLost = result[0] if result.count() else None
if not self.maxLost:
self.maxLost = model.MaxLost(
buy_value=transaction.currency_buy_value,
min_gain=100 - model.MaxLost.__PERCENTAGE__,
min_value=(transaction.currency_buy_value
* (100 - model.MaxLost.__PERCENTAGE__)/100),
transaction_id=transaction.id)
if self.maxLost.process(current_value):
return True
percentage = current_value/transaction.currency_buy_value * 100
if self.take_profit and percentage >= 100 + self.take_profit:
logging.warning('Take Profit: value {}, gain:{}'.format(
current_value, percentage))
return True
# Sell if value goes below this mean.
if self.mean:
avg = model.rolling_mean_pricing.get_last_values(
transaction.currency,
frequency=self.mean,
count=1)[0]
if avg and current_value <= avg:
logging.error('SELL: Current Value: {} lower than mean {} '
'at frequency: {}'.format(
current_value, avg, self.mean))
return True
return False
def buy(self, current_value, currency):
# Buy if current_value is upper than mean and mean is increasing.
if self.mean:
values = model.rolling_mean_pricing.get_last_values(
currency,
frequency=self.mean,
count=10)
if all(x is not None for x in values):
return utils.is_increasing(values)
return False
|
StarcoderdataPython
|
3384070
|
<filename>test_to_padding.py
import cv2
import os
import numpy as np
#coding=utf-8
def get_file_name(path):
file_list=[]
for root, dirs, files in os.walk(path, topdown=False):
for file in files:
#full_file=os.path.join(root,file)
file_list.append(file)
return file_list
if __name__=='__main__':
ori_path='./test_images'
pad_path='./intermediate_file/padding_images_to_detection'
if not os.path.isdir(pad_path):
os.mkdir(pad_path)
files=get_file_name(ori_path)
count=0
for file in files:
image=cv2.imdecode(np.fromfile(os.path.join(ori_path,file),dtype=np.uint8),-1)
#print(file,'shape is ',image.shape)
h,w,_=image.shape
new_fn=file
if h>w:
#higher
diff=(h-w)//2
biger_image=cv2.copyMakeBorder(image,0, 0, diff, diff, cv2.BORDER_REPLICATE)
new_fn='h_'+str(diff)+'_'+new_fn
else:
#wider
diff=(w-h)//2
biger_image=cv2.copyMakeBorder(image,diff, diff, 0, 0, cv2.BORDER_REPLICATE)
new_fn='w_'+str(diff)+'_'+file
new_fn=os.path.join(pad_path,new_fn)
#print(new_fn,'shape is',biger_image.shape)
cv2.imencode('.jpg', biger_image)[1].tofile(new_fn)
#count+=1
#if count>10:
# break
print('save')
|
StarcoderdataPython
|
1733790
|
<filename>cdk/infrastructure/stages/dev.py
import aws_cdk as cdk
from constructs import Construct
from infrastructure.constructs.existing import igvf_dev
from infrastructure.config import Config
from infrastructure.stacks.backend import BackendStack
from infrastructure.stacks.postgres import PostgresStack
from typing import Any
class DevelopmentDeployStage(cdk.Stage):
def __init__(
self,
scope: Construct,
construct_id: str,
*,
config: Config,
**kwargs: Any
) -> None:
super().__init__(scope, construct_id, **kwargs)
self.postgres_stack = PostgresStack(
self,
'PostgresStack',
config=config,
existing_resources_class=igvf_dev.Resources,
env=igvf_dev.US_WEST_2,
)
self.backend_stack = BackendStack(
self,
'BackendStack',
config=config,
postgres=self.postgres_stack.postgres,
existing_resources_class=igvf_dev.Resources,
env=igvf_dev.US_WEST_2,
)
|
StarcoderdataPython
|
63092
|
<filename>libft/regularizers/__init__.py
from libft.regularizers.l1 import L1
from libft.regularizers.l1l2 import L1L2
from libft.regularizers.l2 import L2
from libft.regularizers.regularizer import Regularizer
REGULARIZERS = {
'l1': L1,
'l2': L2,
'l1l2': L1L2,
}
def get(identifier, **kwargs):
"""Regularizer instance getter.
Arguments:
identifier: string or Regularizer
An Regularizer instance or it's name.
kwargs: dict
Keywords arguments for instance initialisation.
Raises:
ValueError:
If identifier does not match with an existing Regularizer instance.
Returns:
An Regularizer instance.
"""
if identifier is None:
return None
if isinstance(identifier, Regularizer):
return identifier
identifier = identifier.lower()
if identifier not in REGULARIZERS:
raise ValueError(f"Could not interpret Regularizer instance "
f"identifier: {identifier}")
regularizer = REGULARIZERS[identifier](**kwargs)
return regularizer
__all__ = [
'get',
'L1',
'L2',
'L1L2',
]
|
StarcoderdataPython
|
3263230
|
<reponame>AllenInstitute/OpenScope_CA_Analysis
"""
corr_analys.py
This script contains functions for USI correlation analysis.
Authors: <NAME>
Date: January, 2021
Note: this code uses python 3.7.
"""
import copy
import logging
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
import scipy.ndimage as scind
from util import logger_util, gen_util, math_util, rand_util
from sess_util import sess_ntuple_util
from analysis import misc_analys, usi_analys
logger = logging.getLogger(__name__)
TAB = " "
#############################################
def get_corr_pairs(sess_df, consec_only=True):
"""
get_corr_pairs(sess_df)
Returns correlation pairs.
Required args:
- sess_df (pd.DataFrame):
dataframe containing session information, including the following
keys: "sess_ns", "lines", "planes"
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
Returns:
- corr_ns (list):
session number pairs, e.g. [[s1, s2], [s2, s3], ...]
"""
# identify correlation pairs
corr_ns = []
for _, lp_df in sess_df.groupby(["lines", "planes"]):
sess_ns = np.sort(lp_df["sess_ns"].unique())
if len(sess_ns) == 1:
continue
for i, sess1 in enumerate(sess_ns):
for sess2 in sess_ns[i + 1:]:
if consec_only and (sess2 - sess1 != 1):
continue
corr_pair = [sess1, sess2]
if corr_pair not in corr_ns:
corr_ns.append(corr_pair)
if len(corr_ns) == 0:
raise RuntimeError("No session pairs found.")
return corr_ns
#############################################
def set_multcomp(permpar, sessions, analyspar, consec_only=True, factor=1):
"""
set_multcomp(permpar, sessions, analyspar)
Returns permpar updated with the number of comparisons computed from the
sessions.
Required args:
- permpar (PermPar or dict):
named tuple containing permutation parameters
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
- factor (int):
multiplicative factor
default: 1
Returns:
- permpar (PermPar):
updated permutation parameter named tuple
"""
sess_df = misc_analys.get_check_sess_df(sessions, analyspar=analyspar)
n_comps = 0
for _, lp_df in sess_df.groupby(["lines", "planes"]):
corr_ns = get_corr_pairs(lp_df, consec_only=consec_only)
n_comps += len(corr_ns)
n_comps = n_comps * factor
permpar = sess_ntuple_util.get_modif_ntuple(permpar, "multcomp", n_comps)
return permpar
#############################################
def get_corr_info(permpar, corr_type="corr", permute="sess", norm=True):
"""
get_corr_info(permpar)
Returns updated correlation parameters.
Required args:
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- permute (str):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
Returns:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- paired (bool):
type of permutation pairing
default: True
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
"""
# determine type of randomization to use
if permute == "sess":
paired = True
elif permute == "tracking":
paired = "within"
elif permute == "all":
paired = False
else:
gen_util.accepted_values_error(
"permute", permute, ["sess", "tracking", "all"]
)
# get permutation information
if permute in ["sess", "all"] and "diff_" not in corr_type:
corr_type = f"diff_{corr_type}"
if corr_type != "diff_corr":
norm = False # does not apply
if "R_sqr" in corr_type and permpar.tails != "hi":
raise NotImplementedError(
"For R-squared analyses, permpar.tails should be set to 'hi'."
)
corr_types = ["corr", "diff_corr", "R_sqr", "diff_R_sqr"]
if corr_type not in corr_types:
gen_util.accepted_values_error("corr_type", corr_type, corr_types)
return corr_type, paired, norm
#############################################
def get_norm_corrs(corr_data, med=0, corr_type="diff_corr"):
"""
get_norm_corrs(corr_data)
Returns normalized correlation values.
Required args:
- corr_data (1D array):
values to normalize
Optional args:
- med (float):
null distribution median for normalization
default: 0
- corr_type (str):
type of correlation run (for checking), i.e. "diff_corr"
default: "corr"
Returns:
- norm_corr_data (1D array): normalized correlations
"""
if corr_type != "diff_corr":
raise ValueError("Normalization should only be used with 'diff_corr'.")
corr_data = np.asarray(corr_data)
# normalize all data
if np.absolute(med) > 1:
raise RuntimeError(
"Absolute correlations should not be greater than 1."
)
lens_to_bound = np.asarray([np.absolute(med + 1), np.absolute(1 - med)])
corr_sides = (corr_data > med).astype(int)
norm_corr_data = (corr_data - med) / lens_to_bound[corr_sides]
return norm_corr_data
#############################################
def corr_bootstrapped_std(data, n_samples=1000, randst=None, corr_type="corr",
return_rand=False, nanpol=None, med=0, norm=True):
"""
corr_bootstrapped_std(data)
Returns bootstrapped standard deviation for Pearson correlations.
Required args:
- data (2D array):
values to correlate for each of 2 groups (2, n)
Optional args:
- n (int):
number of datapoints in dataset. Required if proportion is True.
default: None
- n_samples (int):
number of samplings to take for bootstrapping
default: 1000
- randst (int or np.random.RandomState):
seed or random state to use when generating random values.
default: None
- return_rand (bool): if True, random correlations are returned
default: False
- nanpol (str):
policy for NaNs, "omit" or None
default: None
- med (float):
null distribution median for normalization, if norm is True
default: 0
- norm (bool):
if True, normalized correlation data is returned
default: True
Returns:
- bootstrapped_std (float):
bootstrapped standard deviation of correlations,
normalized if norm is True
if return_rand:
- rand_corrs (1D array):
randomly generated correlations, normalized if norm is True
"""
randst = rand_util.get_np_rand_state(randst, set_none=True)
n_samples = int(n_samples)
data = np.asarray(data)
if len(data.shape) != 2 or data.shape[0] != 2:
raise ValueError(
"data must have 2 dimensions, with the first having length 2."
)
n = data.shape[1]
# random choices
choices = np.arange(n)
# random corrs
rand_corrs = math_util.calc_op(
list(data[:, randst.choice(choices, (n, n_samples), replace=True)]),
op=corr_type, nanpol=nanpol, axis=0,
)
if norm:
rand_corrs = get_norm_corrs(rand_corrs, med=med, corr_type=corr_type)
bootstrapped_std = math_util.error_stat(
rand_corrs, stats="mean", error="std", nanpol=nanpol
)
if return_rand:
return bootstrapped_std, rand_corrs
else:
return bootstrapped_std
#############################################
def get_corr_data(sess_pair, data_df, analyspar, permpar,
corr_type="corr", permute="sess", absolute=False, norm=True,
return_data=False, return_rand=False, n_rand_ex=1,
randst=None, raise_no_pair=True):
"""
get_corr_data(sess_pair, data_df, analyspar, permpar)
Returns correlation data for a session pair.
Required args:
- sess_pair (list):
sessions to correlate, e.g. [1, 2]
- data_df (pd.DataFrame):
dataframe with one row per line/plane/session, and the following
columns, in addition to the basic sess_df columns:
- roi_idxs (list): index for each ROI
- analyspar (AnalysPar):
named tuple containing analysis parameters
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- permute (str):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- absolute (bool):
if True, absolute USIs are used for correlation calculation instead
of signed USIs
default: False
- norm (bool):
if True, normalized correlation data is returned, if corr_type if
"diff_corr"
default: True
- return_data (bool):
if True, data to correlate is returned
default: False
- return_rand (bool):
if True, random normalized correlation values are returned, along
with random data to correlate for one example permutation
default: False
- n_rand_ex (int):
number of examples to return, if return_rand is True
default: 1
- randst (int or np.random.RandomState):
random state or seed value to use. (-1 treated as None)
default: None
- raise_no_pair (bool):
if True, if sess_pair session numbers are not found, an error is
raised. Otherwise, None is returned.
default: True
Returns:
- roi_corr (float):
(normalized) correlation between sessions
- roi_corr_std (float):
bootstrapped standard deviation for the (normalized) correlation
between sessions
- null_CI (1D array):
adjusted, null CI for the (normalized) correlation between sessions
- p_val (float):
uncorrected p-value for the correlation between sessions
if return_data:
- corr_data (2D array):
data to correlate (grps (2) x datapoints)
if return_rand:
- rand_corrs (1D array):
(normalized) random correlation between sessions
- rand_ex (3D array):
example randomized data pairs to correlate
(grps (2) x datapoints x n_rand_ex)
- rand_ex_corr (1D array):
correlation for example randomized data pairs
"""
nanpol = None if analyspar.rem_bad else "omit"
if analyspar.stats != "mean" or analyspar.error != "std":
raise NotImplementedError(
"analyspar.stats must be set to 'mean', and "
"analyspar.error must be set to 'std'."
)
roi_idxs = []
for sess_n in sess_pair:
row = data_df.loc[data_df["sess_ns"] == sess_n]
if len(row) < 1:
continue
elif len(row) > 1:
raise RuntimeError("Expected at most one row.")
data = np.asarray(row.loc[row.index[0], "roi_idxs"])
roi_idxs.append(data)
if len(roi_idxs) != 2:
if raise_no_pair:
raise RuntimeError("Session pairs not found.")
else:
return None
if roi_idxs[0].shape != roi_idxs[1].shape:
raise RuntimeError(
"Sessions should have the same number of ROI indices."
)
# get updated correlation parameters
corr_type, paired, norm = get_corr_info(
permpar, corr_type=corr_type, permute=permute, norm=norm
)
# check correlation type and related parameters
corr_data = np.vstack([roi_idxs[0], roi_idxs[1]]) # 2 x datapoints
if absolute:
corr_data = np.absolute(corr_data)
# get actual correlation
roi_corr = math_util.calc_op(corr_data, nanpol=nanpol, op=corr_type)
# get first set of random values
if return_rand:
use_randst = copy.deepcopy(randst)
if paired:
perm_data = corr_data.T # groups x datapoints (2)
else:
perm_data = corr_data.reshape(1, -1) # 2 groups concatenate
rand_exs = rand_util.run_permute(
perm_data, n_perms=n_rand_ex, paired=paired, randst=use_randst
)
rand_exs = np.transpose(rand_exs, [1, 0, 2])
if not paired:
rand_exs = rand_exs.reshape(2, -1, n_rand_ex)
rand_ex_corrs = math_util.calc_op(
rand_exs, nanpol=nanpol, op=corr_type, axis=1
)
# get random correlation info
returns = rand_util.get_op_p_val(
corr_data, n_perms=permpar.n_perms,
stats=analyspar.stats, op=corr_type, return_CIs=True,
p_thresh=permpar.p_val, tails=permpar.tails,
multcomp=permpar.multcomp, paired=paired, nanpol=nanpol,
return_rand=return_rand, randst=randst
)
if return_rand:
p_val, null_CI, rand_corrs = returns
else:
p_val, null_CI = returns
med = null_CI[1]
null_CI = np.asarray(null_CI)
if norm:
# normalize all data
roi_corr = float(get_norm_corrs(roi_corr, med=med, corr_type=corr_type))
null_CI = get_norm_corrs(null_CI, med=med, corr_type=corr_type)
# get bootstrapped std over corr
roi_corr_std = corr_bootstrapped_std(
corr_data, n_samples=misc_analys.N_BOOTSTRP, randst=randst,
return_rand=False, nanpol=nanpol, norm=norm, med=med,
corr_type=corr_type
)
returns = [roi_corr, roi_corr_std, null_CI, p_val]
if return_data:
corr_data = np.vstack(corr_data)
if "diff" in corr_type: # take diff
corr_data[1] = corr_data[1] - corr_data[0]
returns = returns + [corr_data]
if return_rand:
if norm:
rand_corrs = get_norm_corrs(
rand_corrs, med=med, corr_type=corr_type
)
if "diff" in corr_type: # take diff
rand_exs[1] = rand_exs[1] - rand_exs[0]
returns = returns + [rand_corrs, rand_exs, rand_ex_corrs]
return returns
#############################################
def get_lp_idx_df(sessions, analyspar, stimpar, basepar, idxpar, permpar=None,
sig_only=False, randst=None, parallel=False):
"""
get_lp_idx_df(sessions, analyspar, stimpar, basepar, idxpar)
Returns ROI index dataframe, grouped by line/plane/session.
Required args:
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
- basepar (BasePar):
named tuple containing baseline parameters
- idxpar (IdxPar):
named tuple containing index parameters
Optional args:
- permpar (PermPar):
named tuple containing permutation parameters, required if
sig_only is True
default: None
- sig_only (bool):
if True, ROIs with significant USIs are included
(only possible if analyspar.tracked is True)
default: False
- randst (int or np.random.RandomState):
random state or seed value to use. (-1 treated as None)
default: None
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- lp_idx_df (pd.DataFrame):
dataframe with one row per line/plane/session, and the following
columns, in addition to the basic sess_df columns:
- roi_idxs (list): index for each ROI
(or each ROI that is significant in at least one session,
if sig_only)
"""
if analyspar.tracked:
misc_analys.check_sessions_complete(sessions, raise_err=True)
if sig_only and permpar is None:
raise ValueError("If sig_only is True, permpar cannot be None.")
initial_columns = misc_analys.get_sess_df_columns(sessions[0], analyspar)
args_dict = {
"analyspar": analyspar,
"stimpar" : stimpar,
"basepar" : basepar,
"idxpar" : idxpar,
"parallel" : parallel,
}
if sig_only:
idx_df = usi_analys.get_idx_sig_df(
sessions,
permpar=permpar,
randst=randst,
aggreg_sess=True,
**args_dict
)
else:
idx_df = usi_analys.get_idx_only_df(sessions, **args_dict)
# aggregate by line/plane/session
lp_idx_df = pd.DataFrame(columns=initial_columns + ["roi_idxs"])
# aggregate within line/plane/sessions
group_columns = ["lines", "planes", "sess_ns"]
aggreg_cols = [col for col in initial_columns if col not in group_columns]
for grp_vals, grp_df in idx_df.groupby(group_columns):
grp_df = grp_df.sort_values("mouse_ns")
row_idx = len(lp_idx_df)
for g, group_column in enumerate(group_columns):
lp_idx_df.loc[row_idx, group_column] = grp_vals[g]
# add aggregated values for initial columns
lp_idx_df = misc_analys.aggreg_columns(
grp_df, lp_idx_df, aggreg_cols, row_idx=row_idx, in_place=True
)
roi_idxs = grp_df["roi_idxs"].tolist()
if sig_only:
roi_idxs = [
np.asarray(idx_vals)[np.asarray(sig_ns).astype(int)]
for idx_vals, sig_ns in zip(roi_idxs, grp_df["sig_idxs"])
]
lp_idx_df.at[row_idx, "roi_idxs"] = np.concatenate(roi_idxs).tolist()
lp_idx_df["sess_ns"] = lp_idx_df["sess_ns"].astype(int)
return lp_idx_df
#############################################
def get_basic_idx_corr_df(lp_idx_df, consec_only=False, null_CI_cols=True):
"""
get_basic_idx_corr_df(lp_idx_df)
Returns index correlation dataframe for each line/plane, and optionally
columns added for null confidence intervals.
Required args:
- lp_idx_df (pd.DataFrame):
dataframe with one row per line/plane/session, and the following
columns, in addition to the basic sess_df columns:
- roi_idxs (list): index for each ROI
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
- null_CI_cols (bool):
if True, null CI columns are included in the dataframe.
Returns:
- idx_corr_df (pd.DataFrame):
dataframe with one row per line/plane, and the following
columns, in addition to the basic sess_df columns:
- roi_idxs (list): index for each ROI
if null_CI_cols:
for session comparisons, e.g. 1v2
- {}v{}_null_CIs (object): empty
"""
initial_columns = [col for col in lp_idx_df.columns if col != "roi_idxs"]
# get correlation pairs
corr_ns = get_corr_pairs(lp_idx_df, consec_only=consec_only)
# aggregate by line/plane for correlation dataframe
group_columns = ["lines", "planes"]
all_columns = initial_columns
if null_CI_cols:
CI_columns = [
f"{corr_pair[0]}v{corr_pair[1]}_null_CIs" for corr_pair in corr_ns
]
all_columns = initial_columns + CI_columns
idx_corr_df = pd.DataFrame(columns=all_columns)
aggreg_cols = [
col for col in initial_columns if col not in group_columns
]
for grp_vals, grp_df in lp_idx_df.groupby(group_columns):
grp_df = grp_df.sort_values("sess_ns") # mice already aggregated
row_idx = len(idx_corr_df)
for g, group_column in enumerate(group_columns):
idx_corr_df.loc[row_idx, group_column] = grp_vals[g]
# add aggregated values for initial columns
idx_corr_df = misc_analys.aggreg_columns(
grp_df, idx_corr_df, aggreg_cols, row_idx=row_idx,
in_place=True, sort_by="sess_ns"
)
# amend mouse info
for col in ["mouse_ns", "mouseids"]:
vals = [tuple(ns) for ns in idx_corr_df.loc[row_idx, col]]
if len(list(set(vals))) != 1:
raise RuntimeError(
"Aggregated sessions should share same mouse "
"information."
)
idx_corr_df.at[row_idx, col] = list(vals[0])
return idx_corr_df
#############################################
def get_ex_idx_corr_norm_df(sessions, analyspar, stimpar, basepar, idxpar,
permpar, permute="sess", sig_only=False, n_bins=40,
randst=None, parallel=False):
"""
get_ex_idx_corr_norm_df(sessions, analyspar, stimpar, basepar, idxpar,
permpar)
Returns example correlation normalization data.
Required args:
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
- basepar (BasePar):
named tuple containing baseline parameters
- idxpar (IdxPar):
named tuple containing index parameters
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- permute (bool):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- sig_only (bool):
if True, ROIs with significant USIs are included
(only possible if analyspar.tracked is True)
default: False
- n_bins (int):
number of bins
default: 40
- randst (int or np.random.RandomState):
seed value to use. (-1 treated as None)
default: None
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- idx_corr_norm_df (pd.DataFrame):
dataframe with one row for a line/plane, and the
following columns, in addition to the basic sess_df columns:
for a specific session comparison, e.g. 1v2
- {}v{}_corrs (float): unnormalized intersession ROI index
correlations
- {}v{}_norm_corrs (float): normalized intersession ROI index
correlations
- {}v{}_rand_ex_corrs (float): unnormalized intersession
ROI index correlations for an example of randomized data
- {}v{}_rand_corr_meds (float): median of randomized correlations
- {}v{}_corr_data (list): intersession values to correlate
- {}v{}_rand_ex (list): intersession values for an example of
randomized data
- {}v{}_rand_corrs_binned (list): binned random unnormalized
intersession ROI index correlations
- {}v{}_rand_corrs_bin_edges (list): bins edges
"""
nanpol = None if analyspar.rem_bad else "omit"
initial_columns = misc_analys.get_sess_df_columns(sessions[0], analyspar)
lp_idx_df = get_lp_idx_df(
sessions,
analyspar=analyspar,
stimpar=stimpar,
basepar=basepar,
idxpar=idxpar,
permpar=permpar,
sig_only=sig_only,
randst=randst,
parallel=parallel,
)
idx_corr_norm_df = get_basic_idx_corr_df(lp_idx_df, consec_only=False)
if len(idx_corr_norm_df) != 1:
raise ValueError("sessions should be from the same line/plane.")
# get correlation pairs
corr_ns = get_corr_pairs(lp_idx_df)
if len(corr_ns) != 1:
raise ValueError("Sessions should allow only one pair.")
sess_pair = corr_ns[0]
corr_name = f"{sess_pair[0]}v{sess_pair[1]}"
drop_columns = [
col for col in idx_corr_norm_df.columns if col not in initial_columns
]
idx_corr_norm_df = idx_corr_norm_df.drop(columns=drop_columns)
logger.info(
("Calculating ROI USI correlations for a single session pair..."),
extra={"spacing": TAB}
)
corr_type = "diff_corr"
returns = get_corr_data(
sess_pair,
data_df=lp_idx_df,
analyspar=analyspar,
permpar=permpar,
permute=permute,
corr_type=corr_type,
absolute=False,
norm=False,
return_data=True,
return_rand=True,
n_rand_ex=1,
randst=randst
)
roi_corr, _, _, _, corr_data, rand_corrs, rand_exs, rand_ex_corrs = returns
rand_ex = rand_exs[..., 0]
rand_ex_corr = rand_ex_corrs[0]
rand_corr_med = math_util.mean_med(
rand_corrs, stats="median", nanpol=nanpol
)
norm_roi_corr = float(
get_norm_corrs(roi_corr, med=rand_corr_med, corr_type=corr_type)
)
row_idx = idx_corr_norm_df.index[0]
idx_corr_norm_df.loc[row_idx, f"{corr_name}_corrs"] = roi_corr
idx_corr_norm_df.loc[row_idx, f"{corr_name}_rand_ex_corrs"] = rand_ex_corr
idx_corr_norm_df.loc[row_idx, f"{corr_name}_rand_corr_meds"] = rand_corr_med
idx_corr_norm_df.loc[row_idx, f"{corr_name}_norm_corrs"] = norm_roi_corr
cols = [
f"{corr_name}_{col_name}"
for col_name in
["corr_data", "rand_ex", "rand_corrs_binned", "rand_corrs_bin_edges"]
]
idx_corr_norm_df = gen_util.set_object_columns(
idx_corr_norm_df, cols, in_place=True
)
idx_corr_norm_df.at[row_idx, f"{corr_name}_corr_data"] = corr_data.tolist()
idx_corr_norm_df.at[row_idx, f"{corr_name}_rand_ex"] = rand_ex.tolist()
fcts = [np.min, np.max] if nanpol is None else [np.nanmin, np.nanmax]
bounds = [fct(rand_corrs) for fct in fcts]
bins = np.linspace(*bounds, n_bins + 1)
rand_corrs_binned = np.histogram(rand_corrs, bins=bins)[0]
idx_corr_norm_df.at[row_idx, f"{corr_name}_rand_corrs_bin_edges"] = \
[bounds[0], bounds[-1]]
idx_corr_norm_df.at[row_idx, f"{corr_name}_rand_corrs_binned"] = \
rand_corrs_binned.tolist()
return idx_corr_norm_df
#############################################
def get_idx_corrs_df(sessions, analyspar, stimpar, basepar, idxpar, permpar,
consec_only=True, permute="sess", corr_type="corr",
sig_only=False, randst=None, parallel=False):
"""
get_idx_corrs_df(sessions, analyspar, stimpar, basepar, idxpar, permpar)
Returns ROI index correlation data for each line/plane/session comparison.
Required args:
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
- basepar (BasePar):
named tuple containing baseline parameters
- idxpar (IdxPar):
named tuple containing index parameters
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- consec_only (bool):
if True, only consecutive session numbers are correlated
default: True
- corr_type (str):
type of correlation to run, i.e. "corr" or "R_sqr"
default: "corr"
- permute (bool):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- sig_only (bool):
if True, ROIs with significant USIs are included
(only possible if analyspar.tracked is True)
default: False
- randst (int or np.random.RandomState):
seed value to use. (-1 treated as None)
default: None
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- idx_corr_df (pd.DataFrame):
dataframe with one row per line/plane, and the
following columns, in addition to the basic sess_df columns:
for correlation data (normalized if corr_type is "diff_corr") for
session comparisons, e.g. 1v2
- {}v{}{norm_str}_corrs (float): intersession ROI index correlations
- {}v{}{norm_str}_corr_stds (float): bootstrapped intersession ROI
index correlation standard deviation
- {}v{}_null_CIs (list): adjusted null CI for intersession ROI
index correlations
- {}v{}_raw_p_vals (float): p-value for intersession correlations
- {}v{}_p_vals (float): p-value for intersession correlations,
corrected for multiple comparisons and tails
"""
lp_idx_df = get_lp_idx_df(
sessions,
analyspar=analyspar,
stimpar=stimpar,
basepar=basepar,
idxpar=idxpar,
permpar=permpar,
sig_only=sig_only,
randst=randst,
parallel=parallel,
)
idx_corr_df = get_basic_idx_corr_df(lp_idx_df, consec_only=consec_only)
# get correlation pairs
corr_ns = get_corr_pairs(lp_idx_df, consec_only=consec_only)
# get norm information
norm = False
if permute in ["sess", "all"]:
corr_type = f"diff_{corr_type}"
if corr_type == "diff_corr":
norm = True
norm_str = "_norm" if norm else ""
logger.info(
("Calculating ROI USI correlations across sessions..."),
extra={"spacing": TAB}
)
group_columns = ["lines", "planes"]
for grp_vals, grp_df in lp_idx_df.groupby(group_columns):
grp_df = grp_df.sort_values("sess_ns") # mice already aggregated
line, plane = grp_vals
row_idx = idx_corr_df.loc[
(idx_corr_df["lines"] == line) &
(idx_corr_df["planes"] == plane)
].index
if len(row_idx) != 1:
raise RuntimeError("Expected exactly one row to match.")
row_idx = row_idx[0]
use_randst = copy.deepcopy(randst) # reset each time
# obtain correlation data
args_dict = {
"data_df" : grp_df,
"analyspar": analyspar,
"permpar" : permpar,
"permute" : permute,
"corr_type": corr_type,
"absolute" : False,
"norm" : norm,
"randst" : use_randst,
}
all_corr_data = gen_util.parallel_wrap(
get_corr_data,
corr_ns,
args_dict=args_dict,
parallel=parallel,
zip_output=False
)
# add to dataframe
for sess_pair, corr_data in zip(corr_ns, all_corr_data):
if corr_data is None:
continue
corr_name = f"{sess_pair[0]}v{sess_pair[1]}"
roi_corr, roi_corr_std, null_CI, p_val = corr_data
idx_corr_df.loc[row_idx, f"{corr_name}{norm_str}_corrs"] = roi_corr
idx_corr_df.loc[row_idx, f"{corr_name}{norm_str}_corr_stds"] = \
roi_corr_std
idx_corr_df.at[row_idx, f"{corr_name}_null_CIs"] = null_CI.tolist()
idx_corr_df.loc[row_idx, f"{corr_name}_p_vals"] = p_val
# corrected p-values
idx_corr_df = misc_analys.add_corr_p_vals(idx_corr_df, permpar)
return idx_corr_df
#############################################
def corr_scatterplots(sessions, analyspar, stimpar, basepar, idxpar, permpar,
permute="sess", sig_only=False, randst=None, n_bins=200,
parallel=False):
"""
corr_scatterplots(sessions, analyspar, stimpar, basepar, idxpar, permpar)
Returns ROI index correlation scatterplot data for each line/plane/session
comparison.
Required args:
- sessions (list):
Session objects
- analyspar (AnalysPar):
named tuple containing analysis parameters
- stimpar (StimPar):
named tuple containing stimulus parameters
- basepar (BasePar):
named tuple containing baseline parameters
- idxpar (IdxPar):
named tuple containing index parameters
- permpar (PermPar):
named tuple containing permutation parameters.
Optional args:
- permute (bool):
type of permutation to due ("tracking", "sess" or "all")
default: "sess"
- sig_only (bool):
if True, ROIs with significant USIs are included
(only possible if analyspar.tracked is True)
default: False
- randst (int or np.random.RandomState):
seed value to use. (-1 treated as None)
default: None
- n_bins (int):
number of bins for random data
default: 200
- parallel (bool):
if True, some of the analysis is run in parallel across CPU cores
default: False
Returns:
- idx_corr_df (pd.DataFrame):
dataframe with one row per line/plane, and the
following columns, in addition to the basic sess_df columns:
for correlation data (normalized if corr_type is "diff_corr") for
session comparisons (x, y), e.g. 1v2
- binned_rand_stats (list): number of random correlation values per
bin (xs x ys)
- corr_data_xs (list): USI values for x
- corr_data_ys (list): USI values for y
- corrs (float): correlation between session data (x and y)
- p_vals (float): p-value for correlation, corrected for
multiple comparisons and tails
- rand_corr_meds (float): median of the random correlations
- raw_p_vals (float): p-value for intersession correlations
- regr_coefs (float): regression correlation coefficient (slope)
- regr_intercepts (float): regression correlation intercept
- x_bin_mids (list): x mid point for each random correlation bin
- y_bin_mids (list): y mid point for each random correlation bin
"""
lp_idx_df = get_lp_idx_df(
sessions,
analyspar=analyspar,
stimpar=stimpar,
basepar=basepar,
idxpar=idxpar,
permpar=permpar,
sig_only=sig_only,
randst=randst,
parallel=parallel,
)
idx_corr_df = get_basic_idx_corr_df(
lp_idx_df, consec_only=False, null_CI_cols=False
)
# get correlation pairs
corr_ns = get_corr_pairs(lp_idx_df)
if len(corr_ns) != 1:
raise ValueError("Expected only 1 session correlation pair.")
sess_pair = corr_ns[0]
# get norm information
norm = False
corr_type = "corr"
if permute in ["sess", "all"]:
corr_type = "diff_corr"
norm = True
# add array columns
columns = ["corr_data_xs", "corr_data_ys", "binned_rand_stats",
"x_bin_mids", "y_bin_mids"]
idx_corr_df = gen_util.set_object_columns(idx_corr_df, columns)
logger.info(
("Calculating ROI USI correlations across sessions..."),
extra={"spacing": TAB}
)
group_columns = ["lines", "planes"]
for grp_vals, grp_df in lp_idx_df.groupby(group_columns):
grp_df = grp_df.sort_values("sess_ns") # mice already aggregated
line, plane = grp_vals
row_idx = idx_corr_df.loc[
(idx_corr_df["lines"] == line) &
(idx_corr_df["planes"] == plane)
].index
if len(row_idx) != 1:
raise RuntimeError("Expected exactly one row to match.")
row_idx = row_idx[0]
if len(grp_df) > 2:
raise RuntimeError("Expected no more than 2 rows to correlate.")
if len(grp_df) < 2:
continue # no pair
use_randst = copy.deepcopy(randst) # reset each time
# obtain correlation data
args_dict = {
"data_df" : grp_df,
"analyspar" : analyspar,
"permpar" : permpar,
"permute" : permute,
"corr_type" : corr_type,
"absolute" : False,
"norm" : norm,
"randst" : use_randst,
"return_data": True,
"return_rand": True,
"n_rand_ex" : 1000,
}
all_corr_data = get_corr_data(sess_pair, **args_dict)
[roi_corr, _, null_CI, p_val, corr_data, _, rand_exs, _] = all_corr_data
regr = LinearRegression().fit(corr_data[0].reshape(-1, 1), corr_data[1])
# bin data
rand_stats, x_edge, y_edge = np.histogram2d(
rand_exs[0].reshape(-1), rand_exs[1].reshape(-1), bins=n_bins,
density=False
)
x_mids = np.diff(x_edge) / 2 + x_edge[:-1]
y_mids = np.diff(y_edge) / 2 + y_edge[:-1]
rand_binned = scind.gaussian_filter(
rand_stats, n_bins / 20, mode="constant"
)
idx_corr_df.loc[row_idx, "corrs"] = roi_corr
idx_corr_df.loc[row_idx, "rand_corr_meds"] = null_CI[1]
idx_corr_df.loc[row_idx, "regr_coefs"] = regr.coef_
idx_corr_df.loc[row_idx, "regr_intercepts"] = regr.intercept_
idx_corr_df.at[row_idx, "corr_data_xs"] = corr_data[0].tolist()
idx_corr_df.at[row_idx, "corr_data_ys"] = corr_data[1].tolist()
idx_corr_df.at[row_idx, "binned_rand_stats"] = rand_binned.tolist()
idx_corr_df.at[row_idx, "x_bin_mids"] = x_mids.tolist()
idx_corr_df.at[row_idx, "y_bin_mids"] = y_mids.tolist()
idx_corr_df.loc[row_idx, "p_vals"] = p_val
# corrected p-values
idx_corr_df = misc_analys.add_corr_p_vals(idx_corr_df, permpar)
return idx_corr_df
|
StarcoderdataPython
|
3227230
|
<reponame>gowtham3105/guardAIns-environment
import random
import time
from Action import Action
from Cells.Beast import Beast
from Cells.Cell import Cell
from Cells.Clue import Clue
from Cells.HealPoint import HealPoint
from Cells.Teleporter import Teleporter
# from Cells.Cell import Cell
from Feedback import Feedback
from InfinityStone import InfinityStone
from Player import Player
from State import State
class Environment:
def __init__(self, room_id, start_time, height, width, max_penalty_score, player_timeout, max_rounds) -> None:
self.__env = {'__name__': 'GuardAIns', '__version__': '0.1'}
self.__room_id = room_id
self.__start_time = start_time
self.__graph = None
self.__rounds = 0
self.__player1_feedback = []
self.__player2_feedback = []
self.__player1 = None
self.__player2 = None
self.__width = width
self.__height = height
self.__printable_matrix = None
self.__player1_actions = []
self.__player2_actions = []
self.__player1_penalty_score = max_penalty_score
self.__player2_penalty_score = max_penalty_score
self.__max_penalty_score = max_penalty_score
self.__player_timeout = player_timeout
self.__winner = None
self.__game_over = False
self.__max_rounds = max_rounds
self.__infinity_stone = None
def get_env(self):
return self.__env
def get_start_time(self):
return self.__start_time
def get_room_id(self):
return self.__room_id
def get_graph(self):
return self.__graph
def get_rounds(self):
return self.__rounds
def get_player1(self) -> Player:
return self.__player1
def get_player2(self) -> Player:
return self.__player2
def set_player1(self, player1: Player) -> None:
self.__player1 = player1
def set_player2(self, player2: Player) -> None:
self.__player2 = player2
def get_width(self):
return self.__width
def get_height(self):
return self.__height
def get_player1_actions(self):
return self.__player1_actions
def get_player2_actions(self):
return self.__player2_actions
def get_player1_penality_score(self):
return self.__player1_penalty_score
def get_player2_penality_score(self):
return self.__player2_penalty_score
def get_winner(self):
return self.__winner
def add_action_to_player1(self, action: Action) -> None:
self.__player1_actions.append(action)
def add_action_to_player2(self, action: Action) -> None:
self.__player2_actions.append(action)
def get_player1_feedbacks(self):
return self.__player1_feedback
def get_player2_feedbacks(self):
return self.__player2_feedback
def add_player1_feedback(self, feedback: Feedback) -> None:
self.__player1_feedback.append(feedback)
def add_player2_feedback(self, feedback: Feedback) -> None:
self.__player2_feedback.append(feedback)
def create_graph(self) -> None:
matrix = []
wall = ['|'] + ['-', '|'] * self.__width
printable_matrix = [wall, ]
for i in range(self.__height):
temp_matrix = []
for j in range(self.__width):
temp_matrix.append(Cell((j, i)))
matrix.append(temp_matrix)
cell_spaces = ['|'] + [' ', '|'] * self.__width
printable_matrix.append(cell_spaces.copy())
printable_matrix.append(wall.copy())
stack = [matrix[0][0]]
visited = [[0 for _ in range(self.__width)] for _ in range(self.__height)]
while len(stack):
current_cell = stack.pop()
if visited[current_cell.get_coordinates()[1]][current_cell.get_coordinates()[0]] > 1:
continue
visited[current_cell.get_coordinates()[1]][current_cell.get_coordinates()[0]] += 1
possible_neighbours = [[1, 0], [-1, 0], [0, 1], [0, -1]]
random.shuffle(possible_neighbours)
# possible_neighbours = random.choices(possible_neighbours, k=3)
for neighbour in possible_neighbours:
if neighbour[1] + current_cell.get_coordinates()[1] < 0 \
or neighbour[1] + current_cell.get_coordinates()[1] >= self.__height:
continue
if neighbour[0] + current_cell.get_coordinates()[0] < 0 \
or neighbour[0] + current_cell.get_coordinates()[0] >= self.__width:
continue
if visited[neighbour[1] + current_cell.get_coordinates()[1]][
neighbour[0] + current_cell.get_coordinates()[0]] > 1:
continue
if matrix[neighbour[1] + current_cell.get_coordinates()[1]][
neighbour[0] + current_cell.get_coordinates()[0]] in stack:
continue
current_cell.add_neighbour_cell(matrix[neighbour[1] + current_cell.get_coordinates()[1]][
neighbour[0] + current_cell.get_coordinates()[0]])
matrix[neighbour[1] + current_cell.get_coordinates()[1]][
neighbour[0] + current_cell.get_coordinates()[0]].add_neighbour_cell(current_cell)
stack.append(matrix[neighbour[1] + current_cell.get_coordinates()[1]][
neighbour[0] + current_cell.get_coordinates()[0]])
printable_matrix[
2 * (current_cell.get_coordinates()[1]) + 1 + neighbour[1]][
2 * (current_cell.get_coordinates()[0]) + 1 + neighbour[0]] = ' '
self.__graph = matrix
self.__printable_matrix = printable_matrix
def print_graph(self):
if not self.__printable_matrix:
return False
for i in range(len(self.__printable_matrix)):
print("".join(self.__printable_matrix[i]))
return True
def is_graph_connected(self):
all_cells = []
for i in range(self.__height):
for j in range(self.__width):
all_cells.append(self.__graph[i][j])
queue = [self.__graph[0][0]]
visited = []
while len(queue):
node = queue.pop(0)
for cell in node.get_neighbour_cells():
if cell in visited:
continue
if cell in all_cells:
all_cells.remove(cell)
visited.append(cell)
queue.append(cell)
if len(all_cells) == 0:
return True
else:
return False
def place_special_cells(self, no_of_teleporters, no_of_healpoints, no_of_clues, no_of_beasts):
if no_of_teleporters + no_of_healpoints + no_of_clues + no_of_beasts > self.__width * self.__height:
print("Not enough cells to place all the special cells")
raise RuntimeError("Not enough cells to place all the special cells")
return False
for i in range(no_of_teleporters):
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
while self.__graph[y][x].get_cell_type() != 'Normal':
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
if self.__graph[y][x].get_cell_type() == 'Normal':
prev_cell = self.__graph[y][x]
self.__graph[y][x] = Teleporter(self.__graph[y][x])
# replace the cell with teleporter in the neighbour cells
for cell in self.__graph[y][x].get_neighbour_cells():
cell.remove_neighbour_cell(prev_cell)
cell.add_neighbour_cell(self.__graph[y][x])
for i in range(no_of_healpoints):
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
while self.__graph[y][x].get_cell_type() != 'Normal':
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
if self.__graph[y][x].get_cell_type() == 'Normal':
prev_cell = self.__graph[y][x]
self.__graph[y][x] = HealPoint(self.__graph[y][x])
# replace the cell with healpoint in the neighbour cells
for cell in self.__graph[y][x].get_neighbour_cells():
cell.remove_neighbour_cell(prev_cell)
cell.add_neighbour_cell(self.__graph[y][x])
for i in range(no_of_clues):
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
while self.__graph[y][x].get_cell_type() != 'Normal':
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
if self.__graph[y][x].get_cell_type() == 'Normal':
prev_cell = self.__graph[y][x]
self.__graph[y][x] = Clue(self.__graph[y][x])
# replace the cell with clue in the neighbour cells
for cell in self.__graph[y][x].get_neighbour_cells():
cell.remove_neighbour_cell(prev_cell)
cell.add_neighbour_cell(self.__graph[y][x])
for i in range(no_of_beasts):
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
while self.__graph[y][x].get_cell_type() != 'Normal':
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
if self.__graph[y][x].get_cell_type() == 'Normal':
prev_cell = self.__graph[y][x]
self.__graph[y][x] = Beast(self.__graph[y][x])
# replace the cell with beast in the neighbour cells
for cell in self.__graph[y][x].get_neighbour_cells():
cell.remove_neighbour_cell(prev_cell)
cell.add_neighbour_cell(self.__graph[y][x])
x = random.randint(0, self.__width - 1)
y = random.randint(0, self.__height - 1)
while self.__graph[y][x].get_cell_type() != 'Normal':
x = random.randint(1, self.__width - 2)
y = random.randint(0, self.__height - 1)
if self.__graph[y][x].get_cell_type() == 'Normal':
power_stone = InfinityStone(self.get_graph()[y][x])
self.__infinity_stone = power_stone
return True
def movegen(self, player: Player) -> dict:
# sent as input for the player object, it contains the neighboring cells,
# current locations of troops, health of troops and feedback.
return_dict = {
# 0 - up, 1 - left, 2 - down, 3 - right
'Gamora': [[], [], [], []],
'Drax': [[], [], [], []],
'Rocket': [[], [], [], []],
'Groot': [[], [], [], []],
'StarLord': [[], [], [], []]
}
for key in player.get_guardians().keys():
# for all directions
current_guardian_obj = player.get_guardians()[key]
for i in range(4): # 0 - up, 1 - left, 2 - down, 3 - right
dir_ver = 0
dir_hor = 0
current_coordinates = current_guardian_obj.coordinates.get_coordinates()
current_cell = current_guardian_obj.coordinates
if i == 0 or i == 2:
dir_ver = i - 1 # dir_ver = -1 or 1
else:
dir_hor = i - 2 # dir_hor = -1 or 1
for x in range(1, current_guardian_obj.get_vision() + 1):
possible_neighbour = (
current_coordinates[1] + dir_ver * x, current_coordinates[0] + dir_hor * x)
if possible_neighbour[0] < 0 or possible_neighbour[0] >= self.__height or possible_neighbour[
1] < 0 or possible_neighbour[1] >= self.__width:
break
neighbour_cells_tuples = [x.get_coordinates() for x in current_cell.get_neighbour_cells()]
if self.__graph[possible_neighbour[0]][
possible_neighbour[1]].get_coordinates() in neighbour_cells_tuples:
return_dict[key][i].append(
self.__graph[possible_neighbour[0]][possible_neighbour[1]])
current_cell = self.__graph[possible_neighbour[0]
][possible_neighbour[1]]
else:
break
if key == "StarLord":
return_dict[key] = [return_dict[key], [[], [], [], []]]
# return all the cells that are in the vision of StarLord even if wall is in between
for i in range(4): # 0 - up, 1 - left, 2 - down, 3 - right
dir_ver = 0
dir_hor = 0
current_coordinates = current_guardian_obj.coordinates.get_coordinates()
current_cell = current_guardian_obj.coordinates
if i == 0 or i == 2:
dir_ver = i - 1
else:
dir_hor = i - 2
for x in range(1, current_guardian_obj.get_vision() + 1):
possible_neighbour = (
current_coordinates[0] + dir_ver * x, current_coordinates[1] + dir_hor * x)
if possible_neighbour[0] < 0 or possible_neighbour[0] >= self.__height or possible_neighbour[
1] < 0 or possible_neighbour[1] >= self.__width:
break
return_dict[key][1][i].append(
self.__graph[possible_neighbour[0]][possible_neighbour[1]])
return return_dict
def update_rounds(self, sio):
while self.get_start_time() - time.time() > 0:
print('Game Starts in ' + str(self.get_start_time() - time.time()) + " seconds")
sio.emit('game_status', 'Game Starts in ' + str(self.get_start_time() - time.time()) + " seconds")
time.sleep(1)
print('Starting Update Rounds')
print(self.get_player1(), self.get_player2())
if self.get_player1() is None:
if self.get_player2() is None:
# both players are dead
print("Both players are Not Connected")
self.__winner = None
self.__game_over = True
return True
else:
# player 2 is alive
self.__winner = self.get_player2()
self.__game_over = True
print("Player 2 is the Winner")
sio.emit('game_status', 'Player 2 is the Winner')
return True
else:
if self.get_player2() is None:
# player 1 is alive
self.__winner = self.get_player1()
self.__game_over = True
print("Player 1 is the Winner")
sio.emit('game_status', 'Player 1 is the Winner')
return True
while True:
print("Round: ", self.__rounds)
if self.__infinity_stone:
if self.__infinity_stone.get_coordinates() == self.get_player1().get_base_coordinates():
self.__winner = self.get_player1()
self.__game_over = True
self.__infinity_stone.set_returned_to_base(True)
print("Player 1 is the Winner, because of Infinity Stone reached to base",
self.__infinity_stone.get_coordinates())
sio.emit('game_status', 'Player 1 is the Winner')
return True
if self.__infinity_stone.get_coordinates() == self.get_player2().get_base_coordinates():
self.__winner = self.get_player2()
self.__game_over = True
self.__infinity_stone.set_returned_to_base(True)
print("Player 2 is the Winner because of Infinity Stone reached to base",
self.__infinity_stone.get_coordinates())
sio.emit('game_status', 'Player 2 is the Winner')
return True
if self.__player1_penalty_score < 0 <= self.__player2_penalty_score: # player 2 wins
self.__winner = self.__player2
self.__game_over = True
print("Player 2 is the Winner because of Penalty Score")
sio.emit('game_status', 'Player 2 is the Winner')
return True
if self.__player2_penalty_score < 0 <= self.__player1_penalty_score: # player 1 wins
self.__winner = self.__player1
self.__game_over = True
print("Player 1 is the Winner because of Penalty Score")
sio.emit('game_status', 'Player 1 is the Winner')
return True
if self.__player1_penalty_score < 0 and self.__player2_penalty_score < 0: # draw
winner = self.evaluate_draw()
if winner is self.__player1:
self.__winner = self.__player1
self.__game_over = True
print("Player 1 is the Winner because of Draw and evaluate draw said ")
sio.emit('game_status', 'Player 1 is the Winner')
return True
elif winner is self.__player2:
self.__winner = self.__player2
self.__game_over = True
print("Player 2 is the Winner")
sio.emit('game_status', 'Player 2 is the Winner')
return True
else:
self.__winner = None
self.__game_over = True
print("Draw")
sio.emit('game_status', 'Draw')
return True
if self.__max_rounds < self.__rounds: # If Max Rounds is reached
winner = self.evaluate_draw()
if winner == self.get_player1():
self.__winner = self.get_player1()
print("Player 1 is the Winner")
sio.emit('game_status', 'Player 1 is the Winner')
elif winner == self.get_player2():
self.__winner = self.get_player2()
print("Player 2 is the Winner")
sio.emit('game_status', 'Player 2 is the Winner')
else:
self.__winner = None
print("Draw")
sio.emit('game_status', 'Game is a Draw')
print("Game Over")
sio.emit('game_status', 'Game Over')
return True
player1_state = State(self.movegen(self.get_player1()), self.__player1_feedback,
self.__player1_penalty_score, self.get_rounds(), self.get_player1(),
self.__infinity_stone)
player2_state = State(self.movegen(self.get_player2()), self.__player2_feedback,
self.__player2_penalty_score, self.get_rounds(), self.get_player2(),
self.__infinity_stone)
player1_error = False
player2_error = False
player1_action = None
player2_action = None
self.__player1_feedback = []
if self.get_player1().is_connected():
try:
sio.call("action", to=self.get_player1().get_socket_id(), data=player1_state.json(),
timeout=self.__player_timeout)
if len(self.__player1_actions):
player1_action = self.__player1_actions[-1]
if player1_action.get_round_no() != self.get_rounds():
raise RuntimeError("Player 1 Action Data Inconsistent")
else:
raise RuntimeError('Player 1 Action Not found')
except TimeoutError:
self.add_player1_feedback(Feedback("timeout"))
player1_error = True
self.reduce_score(self.get_player1().get_player_id(), "timeout")
except Exception as e:
print(e)
self.add_player1_feedback(Feedback("error", {"error": str(e)}))
player1_error = True
self.reduce_score(self.get_player1().get_player_id(), "error")
else:
player1_error = True
self.__player2_feedback = []
if self.get_player2().is_connected():
try:
sio.call("action", to=self.get_player2().get_socket_id(), data=player2_state.json(),
timeout=self.__player_timeout)
if len(self.__player2_actions):
player2_action = self.__player2_actions[-1]
if player2_action.get_round_no() != self.get_rounds():
raise RuntimeError("Player 2 Action Data Inconsistent")
else:
raise RuntimeError('Player 2 Action Not found')
except TimeoutError:
self.add_player2_feedback(Feedback("timeout"))
player2_error = True
self.reduce_score(self.get_player2().get_player_id(), "timeout")
except Exception as e:
print(e)
self.add_player2_feedback(Feedback("error", {'error': str(e)}))
player2_error = True
self.reduce_score(self.get_player2().get_player_id(), "error")
else:
player2_error = True
self.execute_action(player1_action, player2_action, player1_error, player2_error)
if self.__infinity_stone:
feedback, feedback_to_player, player_id = self.__infinity_stone.update_coordinates()
if feedback:
self.add_player1_feedback(feedback)
self.add_player2_feedback(feedback)
if player_id == self.get_player1().get_player_id():
self.add_player1_feedback(feedback_to_player)
elif player_id == self.get_player2().get_player_id():
self.add_player2_feedback(feedback_to_player)
else:
raise RuntimeError("Invalid Player ID")
for troop in self.get_player1().get_guardians().values():
troop.add_score(troop.get_health() / troop.MAX_HEALTH)
if troop.get_type() == "Groot":
troop.special_ability(self.get_rounds())
if troop.get_coordinates().get_cell_type() == Cell.Teleporter:
dest = troop.get_coordinates().generate_destination(self.__infinity_stone, self.get_graph())
if dest.get_cell_type() == Cell.Normal:
troop.get_coordinates().remove_guardian_from_cell(troop)
troop.set_coordinates(dest)
troop.get_coordinates().add_guardian_to_cell(troop)
self.add_player1_feedback(Feedback("teleport_success", {"coordinates": dest.get_coordinates(),
"guardian": troop.get_type()}))
elif troop.get_coordinates().get_cell_type() == Cell.Clue:
clue, two_players_present = troop.get_coordinates().get_clue(self.get_player2(),
self.__infinity_stone,
troop)
# two_players_present = False
if clue:
self.add_player1_feedback(clue)
if not two_players_present:
new_cell = Cell(troop.get_coordinates().get_coordinates(),
troop.get_coordinates().get_guardians_present(),
troop.get_coordinates().get_neighbour_cells(), Cell.Normal)
self.__graph[troop.get_coordinates().get_coordinates()[1]][
troop.get_coordinates().get_coordinates()[0]] = new_cell
troop.set_coordinates(new_cell)
for guardian_in_cell in troop.get_coordinates().get_guardians_present():
guardian_in_cell.set_coordinates(troop.get_coordinates())
elif troop.get_coordinates().get_cell_type() == Cell.Beast:
feedback, two_players_present = troop.get_coordinates().update_health(self.get_player1(),
self.get_player2(),
self.__infinity_stone,
self.get_rounds())
if feedback:
if feedback.get_feedback_code() == "GUARDIAN_DEAD":
feedback.set_data({"attacker_type": "Beast",
'victim_type': troop.get_type()})
self.add_player1_feedback(feedback)
if not two_players_present:
new_cell = Cell(troop.get_coordinates().get_coordinates(),
troop.get_coordinates().get_guardians_present(),
troop.get_coordinates().get_neighbour_cells(), Cell.Normal)
self.__graph[troop.get_coordinates().get_coordinates()[1]][
troop.get_coordinates().get_coordinates()[0]] = new_cell
troop.set_coordinates(new_cell)
for guardian_in_cell in troop.get_coordinates().get_guardians_present():
guardian_in_cell.set_coordinates(troop.get_coordinates())
elif troop.get_coordinates().get_cell_type() == Cell.HealPoint:
feedback = troop.get_coordinates().update_rounds_present(self.get_rounds())
if feedback:
self.add_player1_feedback(feedback)
for troop in self.get_player2().get_guardians().values():
troop.add_score(troop.get_health() / troop.MAX_HEALTH)
if troop.get_type() == "Groot":
troop.special_ability(self.get_rounds())
if troop.get_coordinates().get_cell_type() == Cell.Teleporter:
dest = troop.get_coordinates().generate_destination(self.__infinity_stone, self.get_graph())
if dest.get_cell_type() == Cell.Normal:
troop.get_coordinates().remove_guardian_from_cell(troop)
troop.set_coordinates(dest)
troop.get_coordinates().add_guardian_to_cell(troop)
self.add_player2_feedback(Feedback("teleport_success", {"coordinates": dest.get_coordinates(),
"guardian": troop.get_type()}))
elif troop.get_coordinates().get_cell_type() == Cell.Clue:
clue, _ = troop.get_coordinates().get_clue(self.get_player1(), self.__infinity_stone, troop)
if clue:
self.add_player2_feedback(clue)
new_cell = Cell(troop.get_coordinates().get_coordinates(),
troop.get_coordinates().get_guardians_present(),
troop.get_coordinates().get_neighbour_cells(), Cell.Normal)
self.__graph[troop.get_coordinates().get_coordinates()[1]][
troop.get_coordinates().get_coordinates()[0]] = new_cell
troop.set_coordinates(new_cell)
for guardian_in_cell in troop.get_coordinates().get_guardians_present():
guardian_in_cell.set_coordinates(troop.get_coordinates())
else:
raise Exception("Clue not found but cell type is clue")
elif troop.get_coordinates().get_cell_type() == Cell.Beast:
feedback, two_players_present = troop.get_coordinates().update_health(self.get_player2(),
self.get_player1(),
self.__infinity_stone,
self.get_rounds())
if feedback:
if feedback.get_feedback_code() == "GUARDIAN_DEAD":
feedback.set_data({"attacker_type": "Beast",
'victim_type': troop.get_type()})
self.add_player2_feedback(feedback)
new_cell = Cell(troop.get_coordinates().get_coordinates(),
troop.get_coordinates().get_guardians_present(),
troop.get_coordinates().get_neighbour_cells(), Cell.Normal)
self.__graph[troop.get_coordinates().get_coordinates()[1]][
troop.get_coordinates().get_coordinates()[0]] = new_cell
troop.set_coordinates(new_cell)
for guardian_in_cell in troop.get_coordinates().get_guardians_present():
guardian_in_cell.set_coordinates(troop.get_coordinates())
elif troop.get_coordinates().get_cell_type() == Cell.HealPoint:
feedback = troop.get_coordinates().update_rounds_present(self.get_rounds())
if feedback:
self.add_player2_feedback(feedback)
self.__rounds += 1
sio.emit('game_status', "Game Running - Round " + str(self.__rounds))
self.get_reduced_score()
print("Player 1 Penality Score: ", self.get_player1_penality_score())
print("Player 2 Penality Score: ", self.get_player2_penality_score())
print("Player 1 Feedback: ", self.__player1_feedback)
print("Player 2 Feedback: ", self.__player2_feedback)
print("player 1 Guardians: ", self.get_player1().get_guardians())
print("player 2 Guardians: ", self.get_player2().get_guardians())
return True
def validate_action(self, action: Action) -> bool:
# Always check if the acting guardian is alive or not
if action is None:
print("Action is None")
return False
player = action.get_player_id()
if player == self.get_player1().get_player_id():
guardian = self.__player1.get_guardian_by_type(action.get_guardian_type())
elif player == self.get_player2().get_player_id():
guardian = self.__player2.get_guardian_by_type(action.get_guardian_type())
else:
guardian = None
if guardian is not None:
if guardian.is_alive():
if action.get_action_type() == "SPECIAL":
if guardian.get_cooldown() > self.get_rounds():
return False
# check for cool down first then this
if action.get_guardian_type() == "Gamora":
tg = action.get_target(self.get_graph()).get_coordinates()
cg = guardian.get_coordinates().get_coordinates()
if ((tg[0] - cg[0]) ** 2 + (tg[1] - cg[1]) ** 2) <= 25:
return True
else:
print("target out of range SPECIAL jump")
return False
elif action.get_guardian_type() == "Drax":
tg = action.get_target(self.get_graph()).get_coordinates()
cg = guardian.get_coordinates().get_coordinates()
if ((tg[0] == 1 + cg[0] or tg[0] == cg[0] - 1) and tg[1] == cg[1]) or (
(tg[1] == 1 + cg[1] or tg[1] == cg[1] - 1) and tg[0] == cg[0]):
return True
else:
print("target range out of range SPECIAL break walls")
return False
else:
print("Invalid guardian type SPECIAL")
return False
elif action.get_action_type() == "ATTACK":
if ((guardian.coordinates.get_coordinates()[0] - guardian.get_vision()) <=
action.get_target_coordinates()[0] <=
(guardian.coordinates.get_coordinates()[0] + guardian.get_vision())) and (
guardian.coordinates.get_coordinates()[1] - guardian.get_vision() <=
action.get_target_coordinates()[1] <=
guardian.coordinates.get_coordinates()[1] + guardian.get_vision()):
return True
print("Target out of range")
return False
elif action.get_action_type() == "MOVE":
if ((guardian.coordinates.get_coordinates()[0] - guardian.get_speed()) <=
action.get_target_coordinates()[
0] <=
(guardian.coordinates.get_coordinates()[0] + guardian.get_speed())) and (
guardian.coordinates.get_coordinates()[1] - guardian.get_speed() <=
action.get_target_coordinates()[1] <=
guardian.coordinates.get_coordinates()[1] + guardian.get_speed()):
return True
print("Target out of range MOVE")
return False
else:
print("Guardian is dead")
return False
else:
print("Guardian not found")
return False
def execute_action(self, player1_action: Action, player2_action: Action, player1_error: bool, player2_error: bool):
if not player1_error:
if not self.validate_action(player1_action):
player1_error = True
self.add_player1_feedback(Feedback("invalid_action"))
self.reduce_score(self.get_player1().get_player_id(), 'invalid_action')
if not player2_error:
if not self.validate_action(player2_action):
player2_error = True
self.add_player2_feedback(Feedback("invalid_action"))
self.reduce_score(self.get_player2().get_player_id(), 'invalid_action')
if not player1_error:
if player1_action.get_action_type() == Action.ATTACK and player1_action.get_guardian_type() == "Rocket":
# PLAYER 1 ROCKET
guardians_present = player1_action.get_target(self.get_graph()).get_guardians_present()
our_guardian = self.__player1.get_guardian_by_type(
player1_action.get_guardian_type())
if guardians_present and our_guardian:
for guardian in guardians_present:
# if multiple enemy __guardians are present then attack all, if none of them are there then
# for __guardians present would be empty
if guardian.get_belongs_to_player() == self.__player2 and guardian.is_alive():
# update get_troop to return guardian object after checking if the guardian is not dead
feedback = guardian.set_health(guardian.get_health() - our_guardian.get_attack_damage(),
self.get_rounds())
if feedback:
feedback.set_data({"attacker_type": our_guardian.get_type(),
'victim_type': guardian.get_type()})
self.add_player2_feedback(feedback)
self.add_player1_feedback(Feedback("attack_success",
{"victim_type": guardian.get_type(),
"attacker": our_guardian.get_type()}))
self.add_player2_feedback(Feedback("you_have_been_attacked",
{"attacker": our_guardian.get_type(),
"victim_type": guardian.get_type()}))
if not player2_error:
if player2_action.get_action_type() == Action.ATTACK and player2_action.get_guardian_type() == "Rocket":
# PLAYER 2 ROCKET
guardians_present = player2_action.get_target(self.get_graph()).get_guardians_present()
our_guardian = self.__player2.get_guardian_by_type(
player2_action.get_guardian_type())
if guardians_present and our_guardian:
for guardian in guardians_present:
# if multiple enemy __guardians are present then attack all, if none of them are there then
# for __guardians present would be empty
if guardian.get_belongs_to_player() == self.__player1 and guardian.is_alive():
# update get_troop to return guardian object after checking if the guardian is not dead
feedback = guardian.set_health(guardian.get_health() - our_guardian.get_attack_damage(),
self.get_rounds())
if feedback:
feedback.set_data({"attacker_type": our_guardian.get_type(),
'victim_type': guardian.get_type()})
self.add_player1_feedback(feedback)
self.add_player2_feedback(Feedback("attack_success",
{"victim_type": guardian.get_type(),
"attacker": our_guardian.get_type()}))
self.add_player1_feedback(Feedback("you_have_been_attacked",
{"attacker": our_guardian.get_type(),
"victim_type": guardian.get_type()}))
if not player1_error:
if player1_action.get_action_type() == Action.SPECIAL:
# PLAYER 1 SPECIAL Gamora or Drax
if (player1_action.get_guardian_type() == "Gamora"):
tg = player1_action.get_target(self.get_graph())
cg = self.__player1.get_guardian_by_type(player1_action.get_guardian_type()).get_coordinates()
guardian = self.__player1.get_guardian_by_type(
player1_action.get_guardian_type()) # update it to return
# guardian object directly
cg.remove_guardian_from_cell(guardian)
guardian.set_coordinates(tg)
tg.add_guardian_to_cell(guardian)
guardian.set_cooldown(self.get_rounds())
# self.add_player1_feedback(Feedback("move_success",
# {"guardian_type": player1_action.get_guardian_type(),
# "target_type": tg.get_type()}))
elif (player1_action.get_guardian_type() == "Drax"):
tg = player1_action.get_target(self.get_graph())
cg = self.__player1.get_guardian_by_type(player1_action.get_guardian_type()).get_coordinates()
guardian = self.__player1.get_guardian_by_type(
player1_action.get_guardian_type())
tg.add_neighbour_cell(cg)
cg.add_neighbour_cell(tg)
guardian.set_cooldown(self.get_rounds())
if not player2_error:
if player2_action.get_action_type() == Action.SPECIAL:
# PLAYER 2 SPECIAL Gamora or Drax
if (player2_action.get_guardian_type() == "Gamora"):
tg = player2_action.get_target(self.get_graph())
cg = self.__player2.get_guardian_by_type(player2_action.get_guardian_type()).get_coordinates()
guardian = self.__player2.get_guardian_by_type(
player2_action.get_guardian_type()) # update it to return
# guardian object directly
cg.remove_guardian_from_cell(guardian)
guardian.set_coordinates(tg)
tg.add_guardian_to_cell(guardian)
guardian.set_cooldown(self.get_rounds())
# self.add_player1_feedback(Feedback("move_success",
# {"guardian_type": player1_action.get_guardian_type(),
# "target_type": tg.get_type()}))
elif (player2_action.get_guardian_type() == "Drax"):
tg = player2_action.get_target(self.get_graph())
cg = self.__player2.get_guardian_by_type(player2_action.get_guardian_type()).get_coordinates()
guardian = self.__player2.get_guardian_by_type(
player1_action.get_guardian_type())
tg.add_neighbour_cell(cg)
cg.add_neighbour_cell(tg)
guardian.set_cooldown(self.get_rounds())
if not player1_error:
if player1_action.get_action_type() == Action.MOVE:
guardian = self.__player1.get_guardian_by_type(
player1_action.get_guardian_type()) # update it to return
# guardian object directly
if player1_action.get_target(self.__graph) != guardian.get_coordinates():
guardian.get_coordinates().remove_guardian_from_cell(guardian)
guardian.set_coordinates(player1_action.get_target(self.__graph))
guardian.get_coordinates().add_guardian_to_cell(guardian)
if not player2_error:
if player2_action.get_action_type() == Action.MOVE:
guardian = self.__player2.get_guardian_by_type(
player2_action.get_guardian_type()) # update it to return
# guardian object directly
if player2_action.get_target(self.__graph) != guardian.get_coordinates():
guardian.get_coordinates().remove_guardian_from_cell(guardian)
guardian.set_coordinates(player2_action.get_target(self.__graph))
guardian.get_coordinates().add_guardian_to_cell(guardian)
if not player1_error:
if player1_action.get_action_type() == Action.ATTACK and not player1_action.get_guardian_type() == "Rocket":
guardians_present = player1_action.get_target(self.get_graph()).get_guardians_present()
our_guardian = self.__player1.get_guardian_by_type(
player1_action.get_guardian_type())
if guardians_present and our_guardian:
for guardian in guardians_present:
# if multiple enemy __guardians are present then attack all, if none of them are there then
# for __guardians present would be empty
if guardian.get_belongs_to_player() == self.__player2 and guardian.is_alive():
# update get_troop to return guardian object after checking if the guardian is not dead
feedback = guardian.set_health(guardian.get_health() - our_guardian.get_attack_damage(),
self.get_rounds())
if feedback:
feedback.set_data({"attacker_type": our_guardian.get_type(),
'victim_type': guardian.get_type()})
guardian.get_coordinates().remove_guardian_from_cell(guardian)
self.add_player2_feedback(feedback)
self.add_player1_feedback(Feedback("attack_success",
{"victim_type": guardian.get_type(),
"attacker": our_guardian.get_type()}))
self.add_player2_feedback(Feedback("you_have_been_attacked",
{"attacker": our_guardian.get_type(),
"victim_type": guardian.get_type()}))
if not player2_error:
if player2_action.get_action_type() == Action.ATTACK and not player2_action.get_guardian_type() == "Rocket":
guardians_present = player2_action.get_target(self.get_graph()).get_guardians_present()
our_guardian = self.__player2.get_guardian_by_type(
player2_action.get_guardian_type())
if guardians_present and our_guardian:
for guardian in guardians_present:
if guardian.get_belongs_to_player() == self.__player1 and guardian.is_alive():
# update get_troop to return guardian object after checking if the guardian is not dead
feedback = guardian.set_health(guardian.get_health() - our_guardian.get_attack_damage(),
self.get_rounds())
if feedback:
feedback.set_data({"attacker_type": our_guardian.get_type(),
'victim_type': guardian.get_type()})
guardian.get_coordinates().remove_guardian_from_cell(guardian)
self.add_player1_feedback(feedback)
self.add_player2_feedback(Feedback("attack_success",
{"victim_type": guardian.get_type(),
"attacker": our_guardian.get_type()}))
self.add_player1_feedback(Feedback("you_have_been_attacked",
{"attacker": our_guardian.get_type(),
"victim_type": guardian.get_type()}))
return True
def reduce_score(self, player: str, feedback_code: str):
FEEDBACKS_CODES = {
"timeout": -1,
"error": -2,
"invalid_action": -2,
}
if feedback_code in FEEDBACKS_CODES.keys():
if player == self.get_player1().get_player_id():
self.__player1_penalty_score += FEEDBACKS_CODES[feedback_code]
elif player == self.get_player2().get_player_id():
self.__player2_penalty_score += FEEDBACKS_CODES[feedback_code]
else:
raise Exception("Invalid player id")
else:
raise ValueError("Invalid feedback code")
def evaluate_draw(self):
reduced_score_player1, reduced_score_player2 = self.get_reduced_score()
if reduced_score_player1 > reduced_score_player2:
return self.__player1
elif reduced_score_player1 < reduced_score_player2:
return self.__player2
else:
return None
def get_reduced_score(self):
reduced_score_player1 = (self.__player1_penalty_score / self.__max_penalty_score) * 0.5
reduced_score_player2 = (self.__player2_penalty_score / self.__max_penalty_score) * 0.5
player1_alive_score = 0
for guardian in self.__player1.get_guardians().values():
player1_alive_score += guardian.get_score()
reduced_score_player1 += (player1_alive_score / (self.__rounds * 5)) * 0.5
player2_alive_score = 0
for guardian in self.__player2.get_guardians().values():
player2_alive_score += guardian.get_score()
reduced_score_player2 += (player2_alive_score / (self.__rounds * 5)) * 0.5
print("Reduced score player 1: ", reduced_score_player1)
print("Reduced score player 2: ", reduced_score_player2)
return reduced_score_player1, reduced_score_player2
|
StarcoderdataPython
|
72639
|
<reponame>PaulRaid/H-index-prediction<gh_stars>0
import networkx as nx
import pandas as pd
import argparse
import progress_bar as pb
pb.init(1, _prefix="Refactoring metrics \t \t")
parser = argparse.ArgumentParser(
description="GCN")
parser.add_argument("edge_index")
parser.add_argument("author_abstract_count")
parser.add_argument("output")
args = parser.parse_args()
edge_index_file_name = args.edge_index
author_abstract_count_name = args.author_abstract_count
output_file_name = args.output
df_edge_index = pd.read_csv(edge_index_file_name, sep=";")
df_author_abstract_count = pd.read_csv(author_abstract_count_name, sep=";", index_col=0)
df_graph_metrics = pd.DataFrame()
### Calcul des features du graph
G = nx.from_pandas_edgelist(df_edge_index, source= 'author_2', target= 'author_1')
degree = nx.degree(G)
core_number = nx.core_number(G)
degree_centr = nx.degree_centrality(G)
nbr_avg_deg = nx.average_neighbor_degree(G)
pagerank = nx.pagerank(G, alpha=0.9)
olayer = nx.onion_layers(G)
df_graph_metrics.insert(0, "degree", degree)
df_graph_metrics.insert(1, "core_number", core_number)
df_graph_metrics.insert(2, "degree_centr", degree_centr)
df_graph_metrics.insert(3, "nbr_avg_deg", nbr_avg_deg)
df_graph_metrics.insert(4, "pagerank", pagerank)
df_graph_metrics.insert(5, "olayer", olayer)
pb.set_length(df_graph_metrics.shape[0])
def compute_row_feature(row):
pb.progress(row.name)
row.degree = G.degree[row.name]
row.core_number = core_number[row.name]
row.degree_centr = degree_centr[row.name]
row.nbr_avg_deg = nbr_avg_deg[row.name]
row.pagerank = pagerank[row.name]
row.olayer = olayer[row.name]
return row
df_graph_metrics = df_graph_metrics.apply(lambda x: compute_row_feature(x), axis=1)
df_graph_metrics.index.name="author"
df_graph_metrics.insert(0, "author_abstract_count", df_author_abstract_count["count"])
pb.progress(df_graph_metrics.shape[0])
print(df_graph_metrics)
df_graph_metrics.to_csv(output_file_name, sep=";",index=True)
|
StarcoderdataPython
|
147485
|
<filename>fluentcheck/tests/tests_is/test_collections_is.py
import unittest
from fluentcheck import Is
from fluentcheck.exceptions import CheckError
# noinspection PyStatementEffect
class TestIsCollectionsAssertions(unittest.TestCase):
def test_is_set_pass(self):
obj = set()
self.assertIsInstance(Is(obj).set, Is)
def test_is_set_fail(self):
with self.assertRaises(CheckError):
Is(42).set
def test_is_not_set_pass(self):
self.assertIsInstance(Is(42).not_set, Is)
def test_is_not_set_fail(self):
obj = set()
with self.assertRaises(CheckError):
Is(obj).not_set
def test_is_subset_of_pass(self):
obj = {1, 2, 3}
full_set = {1, 2, 3, 4, 5}
self.assertIsInstance(Is(obj).subset_of(full_set), Is)
def test_is_subset_of_fail(self):
obj = {1, 2, 100}
full_set = {1, 2, 3, 4, 5}
with self.assertRaises(CheckError):
Is(obj).subset_of(full_set)
def test_is_not_subset_of_pass(self):
obj = {1, 2, 3}
full_set = {1, 7}
self.assertIsInstance(Is(obj).not_subset_of(full_set), Is)
def test_is_not_subset_of_fail(self):
obj = {1, 2, 5}
full_set = {1, 2, 3, 4, 5}
with self.assertRaises(CheckError):
Is(obj).not_subset_of(full_set)
def test_is_superset_of_pass(self):
obj = {1, 2, 3, 4, 5}
subset = {1, 2, 3}
self.assertIsInstance(Is(obj).superset_of(subset), Is)
def test_is_superset_of_fail(self):
obj = {1, 2, 3, 4, 5}
subset = {1, 2, 100}
with self.assertRaises(CheckError):
Is(obj).superset_of(subset)
def test_is_not_superset_of_pass(self):
obj = {1, 2, 3, 4, 5}
subset = {1, 100, 3}
self.assertIsInstance(Is(obj).not_superset_of(subset), Is)
def test_is_not_superset_of_fail(self):
obj = {1, 2, 3, 4, 5}
subset = {1, 2, 5}
with self.assertRaises(CheckError):
Is(obj).not_superset_of(subset)
def test_is_intersects_pass(self):
obj = {1, 2, 3, 4, 5}
other_set = {1, 2, 3, 9, 100}
self.assertIsInstance(Is(obj).intersects(other_set), Is)
def test_is_intersects_fail(self):
obj = {1, 2, 3, 4, 5}
other_set = {-1, 100}
with self.assertRaises(CheckError):
Is(obj).intersects(other_set)
def test_is_not_intersects_pass(self):
obj = {1, 2, 3, 4, 5}
other_set = {-1, 100}
self.assertIsInstance(Is(obj).not_intersects(other_set), Is)
def test_is_not_intersects_fail(self):
obj = {1, 2, 3, 4, 5}
other_set = {1, 2, 3, 9, 100}
with self.assertRaises(CheckError):
Is(obj).not_intersects(other_set)
|
StarcoderdataPython
|
3296611
|
<filename>FunUQ/qoi.py
# FunUQ v0.1, 2018; <NAME>; Strachan Research Group
# https://github.rcac.purdue.edu/StrachanGroup
# import general
import sys, os, subprocess, shutil, numpy as np
from random import random, sample; from glob import glob
from matplotlib import pyplot as plt
from copy import deepcopy
# import local functions
from .utils import is_thermo, is_fluct, copy_files, read_file, replace_template, submit_lammps, FUQerror
from .parsetools import read_thermo, find_columns
class QuantitiesOfInterest(object):
'''
Class for running LAMMPS and extracting thermodynamic results
'''
def __init__(self, Qlist, Potential, maindir, init, run,
**kwargs):
'''
Defaults for base class
INPUTS:
Required:
Qlist - List of quantities of interest
Potential - FunUQ potential object
maindir - Full path to calculations
init - Directory name for LAMMPS and potential files
run - Directory name for LAMMPS simulations and FunUQ results
Optional:
description - String describing calculations
'''
self.overwrite = False
# Quantity of interest
self.Q_names = list(Qlist)
self.ensemble = kwargs.get('ensemble', 'nvt')
if self.ensemble == 'nvt':
requiredQ = ['PotEng']
elif self.ensemble == 'npt':
requiredQ = ['PotEng', 'Volume']
for rQ in requiredQ:
if rQ not in self.Q_names:
self.Q_names += [rQ]
# Distinguish direct thermodynamic properties,
# fluctuation properties, and unrecognized properties
self.Q_thermo = ['']*len(self.Q_names)
for qc, q in enumerate(self.Q_names):
thermo = is_thermo(q)
self.Q_thermo[qc] = thermo
if not thermo:
fluct = is_fluct(q)
if fluct:
print("WARNING: fluctionation properties still in development")
if not fluct:
raise FUQerror("{} is not a supported quantity of interest."
.format(q))
self.pot = Potential
self.units = kwargs.get('units', ['']*len(self.Q_names))
self.unittype = kwargs.get('unittype', 'metal')
self.get_units() # This is only for energy (from unittype)
# Files and directories
self.Ntimes = kwargs.get('Ntimes')
self.Ncopies = kwargs.get('Ncopies', 1)
self.sample = kwargs.get('sample', True)
self.Nmax = None
self.init = init
self.run = run
self.maindir = maindir
self.intemplate = kwargs.get('intemplate', 'in.template')
self.subtemplate = kwargs.get('subtemplate', 'submit.template')
self.infile = kwargs.get('infile', 'in.lammps')
self.subfile = kwargs.get('subfile', 'run.pbs')
self.logfile = kwargs.get('logfile', 'log.lammps')
self.FDname = kwargs.get('FDname', 'out.funcder')
#self.parampath = os.path.join(initdir, self.paramfile)
self.initdir = os.path.join(self.maindir, self.init)
self.inpath = os.path.join(self.initdir, self.intemplate)
self.subpath = os.path.join(self.initdir, self.subtemplate)
# Other
self.description = kwargs.get('description', '')
self.name = kwargs.get('name', self.pot.potname)
self.rerun_folder = kwargs.get('rerun_folder', 'rerun_')
self.copy_folder = kwargs.get('copy_folder', 'copy_')
self.copy_start = kwargs.get('copy_start', 0)
self.create_pot = kwargs.get('create_pot', True)
# User overrides defaults
'''
if input_dict != None:
for key, val in input_dict.items():
try:
setattr(self, key, val)
except:
raise KeyError("{} is not a valid input parameter.".format(key))
'''
# Get user templates
self.intxt = read_file(self.inpath)
self.mode = kwargs.get('mode')
if self.mode == 'PBS':
self.subtxt = read_file(self.subpath)
# Create/find simulation directories
self.resultsdir = os.path.join(self.maindir, 'results/')
self.rundir = os.path.join(self.maindir, '{}_runs/'.format(self.run))
for d in [self.rundir, self.resultsdir]:
try:
os.mkdir(d)
except: pass
# COPY potential table
#if self.create_pot:
# self.pot.create(self.rundir)
#else:
# self.pot.copy(self.initdir, self.rundir)
#self.pot.copy(self.initdir, self.rundir)
self.Qavg = [0]*len(self.Q_names)
def __str__(self):
out = '\t'.join(self.Q_names) + '\n'
for avg in self.Qavg:
out += '{:.3f}\t'.format(avg)
out += '\n'
return out
# This is mostly for plotting
def get_units(self):
if self.unittype == 'metal':
self.PE_units = ' (eV)'
elif self.unittype == 'real':
self.PE_units = ' (kcal/mol)'
else:
self.PE_units == ''
def run_lammps(self, mode='PBS'):
'''
Run unmodified potential (multiple copies)
Main perturbative or verification second potential
'''
replace_in = {'SEED':'0', 'TABLECOEFF':'', 'TABLESTYLE':'',
'RUNDIR':'', 'TEMP':'0', 'POTFILE':''}
# TODO: this is confusing; it's either local or it's in one dir
if mode == 'nanoHUB_submit':
self.pot.paircoeff = self.pot.paircoeff.replace(self.pot.paramdir, '.')
replace_in['RUNDIR'] = '.'
potpath=self.pot.potpath
initdir=self.pot.paramdir,
else:
replace_in['RUNDIR'] = self.pot.paramdir
potpath = None
initdir = None
replace_in['TABLECOEFF'] = self.pot.paircoeff
replace_in['TABLESTYLE'] = self.pot.pairstyle
replace_in['POTFILE'] = self.pot.potfile
replace_sub = {'NAME': self.name, 'INFILE': self.infile}
copy_list = np.arange(self.copy_start, self.Ncopies, dtype='int')
copy_rundir = os.path.join(self.rundir, self.copy_folder+'{}')
submit_lammps(replace_in, self.infile, self.intxt,
copy_rundir, copy_list, self.overwrite,
potpath=potpath, initdir=initdir,
replace_sub=replace_sub, mode=mode)
def extract_lammps(self, log='log.lammps'):
'''
Read lammps log thermo data; location from potential class input
Used for base properties and perturbative calculations
'''
for copy0, copy in enumerate(range(self.copy_start, self.copy_start + self.Ncopies)):
logfile = os.path.join(self.rundir, self.copy_folder+str(copy), log)
cols, thermo, Natoms = read_thermo(logfile)
# Only extract these values once
if not copy0:
self.Natoms = Natoms
self.Nmax = np.shape(thermo)[0]
if self.Ntimes is None:
self.Ntimes = self.Nmax
self.times = np.zeros([self.Ntimes, self.Ncopies], dtype='int')
#self.Q = np.zeros([self.Ntimes, self.Ncopies, len(self.Q_names)])
self.Q = np.zeros([self.Ntimes, self.Ncopies, 1,1,1, len(self.Q_names)])
self.Qavg = np.zeros([len(self.Q_names)])
self.Qstd = np.zeros([len(self.Q_names)])
# Find names of thermo/fluctuation properties
Q_thermonames = []
for q,qthermo in zip(self.Q_names, self.Q_thermo):
if qthermo:
Q_thermonames += [q]
# Get columns for thermo properties
Q_cols = find_columns(cols, Q_thermonames) #self.Q_names)
self.Q_cols = ['X']*len(self.Q_names)
for qc, q in enumerate(self.Q_names):
if q in Q_thermonames:
self.Q_cols[qc] = Q_cols[Q_thermonames.index(q)]
# Get other necessary properties
self.V = thermo[0, find_columns(cols, ['Volume'])[0]]
self.P = (thermo[0, find_columns(cols, ['Press'])[0]]
*0.0001/160.21766208) # bar -> GPa -> eV/A**3
self.T = thermo[0, find_columns(cols, ['Temp'])[0]]
self.beta = 1./8.617e-5/self.T
# Next copy may have more or fewer steps finished
# Return reduced/padded array
thermo = fix_arr(self.Ntimes, thermo)
# Randomly sample for convergence plots
if self.sample:
self.times[:,copy0] = np.array(sample(range(self.Nmax), self.Ntimes))
else:
self.times[:,copy0] = np.arange(self.Ntimes)
for qc, (q,qn) in enumerate(zip(self.Q_cols, self.Q_names)):
if qn == 'PotEng':
self.PEcol = qc
if qn == 'Press':
self.Pcol = qc
if qn == 'Volume':
self.Vcol = qc
if self.Q_thermo[qc]:
self.Q[:, copy0, 0,0,0, qc] = thermo[self.times[:,copy0],q]
elif qn == 'HeatCapacityVol':
self.Q[:, copy0, 0,0,0, qc] = thermo[self.times,self.Q_cols[self.PEcol]]**2
elif qn == 'HeatCapacityPress':
self.Q[:, copy0, 0,0,0, qc] = (thermo[self.times,self.Q_cols[self.PEcol]]
+ (thermo[self.times,self.Q_cols[self.Pcol]]*
thermo[self.times,self.Q_cols[self.Vcol]]))**2
elif qn == 'Compressibility':
self.Q[:, copy0, 0,0,0, qc] = thermo[self.times,self.Q_cols[self.Vcol]]**2
elif qn == 'ThermalExpansion':
self.Q[:, copy0, 0,0,0, qc] = (thermo[self.times,self.Q_cols[self.Vcol]]**2)
#(thermo[:,self.Q_cols[self.PEcol]]
# + (thermo[:,self.Q_cols[self.Pcol]]*
# thermo[:,self.Q_cols[self.Vcol]])))
# TODO: if there are issues, fill with NaN to ignore
# Works okay while jobs running IF the first copy stays ahead
self.Qavg = np.nanmean(self.Q, axis=(0,1,2,3,4))
self.Qstd = np.nanstd(self.Q, axis=(0,1,2,3,4))
for qc, q in enumerate(self.Q_names):
if q == 'HeatCapacityVol':
self.Qavg[qc] = fluctuation(self.beta/self.T, self.Qavg[qc], self.Qavg[self.PEcol])
elif q == 'HeatCapacityPress':
self.Qavg[qc] = fluctuation(self.beta/self.T, self.Qavg[qc], self.Qavg[self.PEcol] + self.Qavg[self.Pcol]*self.Qavg[self.Vcol])
elif q == 'Compressibility':
self.Qavg[qc] = fluctuation(self.beta/self.V, self.Qavg[qc], self.Qavg[self.Vcol])
elif q == 'ThermalExpansion':
self.Qavg[qc] = fluctuation(self.beta/self.T/self.V, self.Qavg[qc], self.Qavg[self.PEcol] + self.Qavg[self.Pcol]*self.Qavg[self.Vcol])
# ONLY CONVERT after all fluctuations calculated
self.get_conversions()
for qc, q in enumerate(self.Q_names):
self.Qavg[qc] = self.Qavg[qc]*self.conversions[qc]
def get_conversions(self):
self.conversions = [1.]*len(self.Q_names)
for qc, q in enumerate(self.Q_names):
if q == 'PotEng' or q == 'E_vdwl':
if self.unittype == 'metal':
self.conversions[qc] = 1./self.Natoms
elif self.unittype == 'real':
self.conversions[qc] = 1. #0.0433644/self.Natoms
elif q == 'Press':
if self.unittype == 'metal':
self.conversions[qc] = 0.0001
elif self.unittype == 'real':
self.conversions[qc] = 0.000101325
elif q == 'Volume':
self.conversions[qc] = 0.001
elif 'HeatCapacity' in q:
self.conversions[qc] = 1./self.Natoms/8.617e-5
elif q == 'Compressibility':
self.conversions[qc] = 1e4
elif q == 'ThermalExpansion':
self.conversions[qc] = 1.
def fluctuation(pre, avg_ofthe_square, avg):
return pre*(avg_ofthe_square - avg**2)
def fix_arr(Nmax, arr):
(Ncurr, col) = np.shape(arr)
### Need all the thermo data for sampling
#if Ncurr > Nmax:
# arr = arr[:Nmax,:]
if Ncurr < Nmax:
Nblank = Nmax - Ncurr
arr = np.pad(arr, [(0, Nblank), (0, 0)], 'constant', constant_values=np.nan)
# else return without modifying
return arr
|
StarcoderdataPython
|
3343439
|
<filename>apispec_oneofschema/plugin.py
# apispec-oneofschema - Plugin for apispec providing support for
# Marshmallow-OneOfSchema schemas
# Copyright (C) 2019 <NAME>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from apispec.ext import marshmallow
from apispec.ext.marshmallow import common
from marshmallow_oneofschema import OneOfSchema
class OneofOpenAPIConverter(marshmallow.OpenAPIConverter):
def schema2jsonschema(self, schema):
if self.openapi_version.major < 3 or not is_oneof(schema):
return super(OneofOpenAPIConverter, self).schema2jsonschema(schema)
mapping = {}
oneof = []
for name, type_schema in schema.type_schemas.items():
schema_instance = common.resolve_schema_instance(type_schema)
schema_key = common.make_schema_key(schema_instance)
if schema_key not in self.refs:
component_name = self.schema_name_resolver(type_schema) or name
self.spec.components.schema(component_name, schema=type_schema)
ref_dict = self.get_ref_dict(schema_instance)
mapping.update({name: ref_dict['$ref']})
oneof.append(ref_dict)
return {
'oneOf': oneof,
'discriminator': {
'propertyName': schema.type_field,
'mapping': mapping
}
}
def is_oneof(schema):
return (
(isinstance(schema, type) and issubclass(schema, OneOfSchema))
or
isinstance(schema, OneOfSchema)
)
class MarshmallowPlugin(marshmallow.MarshmallowPlugin):
Converter = OneofOpenAPIConverter
|
StarcoderdataPython
|
109139
|
<filename>docs/structs/small.py
f = open('graph.dot')
def find_between( s, first, last ):
try:
start = s.index( first ) + len( first )
end = s.index( last, start )
return s[start:end]
except ValueError:
return ""
s1 = "digraph "
s2 = """
{
graph [ rankdir="RL" ]
"""
s3 = "}"
for line in f:
name = find_between(line, 'label="', '\\n')
g = open(name+'.dot', 'w')
res = s1 + name + s2 + line + s3
g.write(res)
g.close()
f.close()
|
StarcoderdataPython
|
3382227
|
<reponame>umairwaheed/scrapy-do
#-------------------------------------------------------------------------------
# Author: <NAME> <<EMAIL>>
# Date: 26.11.2017
#
# Licensed under the 3-Clause BSD License, see the LICENSE file for details.
#-------------------------------------------------------------------------------
"""
A collection of utility classes and functions used throughout the project.
"""
import importlib
import OpenSSL
import time
import pem
import os
import re
from twisted.internet.protocol import ProcessProtocol
from twisted.internet.defer import Deferred
from twisted.internet.ssl import CertificateOptions
from twisted.internet import reactor, task
from distutils.spawn import find_executable
from datetime import datetime
from schedule import Job as SchJob
from schedule import IntervalError
#-------------------------------------------------------------------------------
def exc_repr(e):
"""
Return a string representation of an exception together with the excepion
name.
"""
return "{}: {}".format(type(e).__name__, str(e))
#-------------------------------------------------------------------------------
def get_object(name):
"""
Retrieve an object from a module given its fully qualified name. For
example: `get_object('scrapy_do.webservice.Status')`.
"""
name = name.split('.')
object_name = name[-1]
module = importlib.import_module('.'.join(name[:-1]))
return getattr(module, object_name)
#-------------------------------------------------------------------------------
class TimeStamper:
"""
Set the timestamp attribute of the object whenever the associated attribute
is set. For example:
:Example:
>>> class Test:
>>> attr = TimeStamper('_attr')
>>>
>>> def __init__(self, attr):
>>> self._attr = attr
>>> self.timestamp = datetime.now()
>>> test = Test('foo')
>>> test.attr
'foo'
>>> test.timestamp
datetime.datetime(2017, 12, 2, 23, 0, 56, 671634)
>>> test.attr = 'bar'
>>> test.timestamp
datetime.datetime(2017, 12, 2, 23, 1, 9, 688899)
"""
#---------------------------------------------------------------------------
def __init__(self, attr_name):
self.attr_name = attr_name
#---------------------------------------------------------------------------
def __get__(self, obj, obj_type):
return getattr(obj, self.attr_name)
#---------------------------------------------------------------------------
def __set__(self, obj, value):
obj.timestamp = datetime.now()
return setattr(obj, self.attr_name, value)
#-------------------------------------------------------------------------------
def _build_directive_map(job):
#---------------------------------------------------------------------------
# A list of valid directives
#---------------------------------------------------------------------------
directive_names = ['second', 'seconds', 'minute', 'minutes', 'hour',
'hours', 'day', 'days', 'week', 'weeks', 'monday',
'tuesday', 'wednesday', 'thursday', 'friday',
'saturday', 'sunday', 'at', 'to']
#---------------------------------------------------------------------------
# Get an appropriate setter reference
#---------------------------------------------------------------------------
def get_attr(obj, attr):
for obj in [obj] + obj.__class__.mro():
if attr in obj.__dict__:
ret = obj.__dict__[attr]
if isinstance(ret, property):
return lambda x: ret.__get__(x, type(x))
return ret
#---------------------------------------------------------------------------
# Build the dictionary of setters
#---------------------------------------------------------------------------
directive_map = {}
for d in directive_names:
directive_map[d] = get_attr(job, d)
return directive_map
#-------------------------------------------------------------------------------
def _parse_args(directive, directives):
#---------------------------------------------------------------------------
# Check the argument to "to"
#---------------------------------------------------------------------------
if directive == 'to':
arg = directives.pop()
try:
arg = int(arg)
except ValueError:
raise ValueError('The "to" directive expects an integer')
return [arg]
#---------------------------------------------------------------------------
# Check the argument to "at"
#---------------------------------------------------------------------------
if directive == 'at':
arg = directives.pop()
arg_split = arg.split(':')
if len(arg_split) != 2:
raise ValueError('The "at" directive expects a string like "12:34"')
try:
int(arg_split[0])
int(arg_split[1])
except ValueError:
raise ValueError('The "at" directive expects a string like "12:34"')
return [arg]
#---------------------------------------------------------------------------
# Nothing else accepts arguments
#---------------------------------------------------------------------------
return []
#-------------------------------------------------------------------------------
def _parse_spec(job, spec):
#---------------------------------------------------------------------------
# Check the directive
#---------------------------------------------------------------------------
directives = spec.lower().split()
if len(directives) < 2:
raise ValueError('Spec too short')
if directives[0] != 'every':
raise ValueError('Spec must start with "every"')
#---------------------------------------------------------------------------
# Set up the interval if necessary
#---------------------------------------------------------------------------
try:
interval = int(directives[1])
job.interval = interval
if len(directives) < 3:
raise ValueError("Spec to short")
directives = directives[2:]
except ValueError:
directives = directives[1:]
#---------------------------------------------------------------------------
# Parse the spec
#---------------------------------------------------------------------------
directive_map = _build_directive_map(job)
directives.reverse()
while directives:
directive = directives.pop()
if directive not in directive_map:
raise ValueError('Unknown directive: ' + directive)
args = _parse_args(directive, directives)
try:
directive_map[directive](job, *args)
except IntervalError as e:
raise ValueError(str(e))
return job
#-------------------------------------------------------------------------------
def schedule_job(scheduler, spec):
"""
Take a `schedule.Scheduler` object and an interval spec and convert it
to a `schedule.Job` registered with the scheduler. The spec can be
any string that can be translated to `schedule calls
<https://schedule.readthedocs.io/en/stable/>`_. For example: string
'every 2 to 3 minutes' corresponds to `schedule.every(2).to(3).minutes`.
:param scheduler: A `schedule.Scheduler`
:param spec: String containing the interval spec
:return: A `schedule.Job` registered with the scheduler
:raises ValueError: If the spec is not a valid sequence of `schedule`
method calls
"""
job = SchJob(1, scheduler)
try:
_parse_spec(job, spec)
except Exception:
scheduler.cancel_job(job)
raise
return job
#-------------------------------------------------------------------------------
def arg_require_all(dict, args):
"""
Check if all of the args are present in the dict.
:raises KeyError: If any argument is missing from the dict.
"""
for arg in args:
if arg not in dict:
raise KeyError('Missing argument "{}"'.format(arg))
#-------------------------------------------------------------------------------
def arg_require_any(dict, args):
"""
Check if any of the args is in the dict.
:raises KeyError: If none of the args is present in the dict.
"""
for arg in args:
if arg in dict:
return
raise KeyError('Neither argument present: "{}"'.format(str(args)))
#-------------------------------------------------------------------------------
def twisted_sleep(time):
"""
Return a deferred that will be triggered after the specified amount of
time passes
"""
return task.deferLater(reactor, time, lambda: None)
#-------------------------------------------------------------------------------
class LoggedProcessProtocol(ProcessProtocol):
"""
An implementation of ProcessProtocol that forwards the program output
to logfiles. It creates files `job_name.out` and `job_name.err` and
redirects the standard output and standard error output of the
process to the respective file. If a log file is empty upon program
exit it is deleted. The :data:`finished <LoggedProcessProtocol.finished>`
deferred is triggered upon process exit and called with it's exit code.
:param job_name: Name of the job
:param log_dir: A directory to put the log files in
"""
#---------------------------------------------------------------------------
def __init__(self, job_name, log_dir):
self.finished = Deferred()
self.out_path = os.path.join(log_dir, job_name + '.out')
self.err_path = os.path.join(log_dir, job_name + '.err')
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
self.out_fd = os.open(self.out_path, flags, 0o644)
self.err_fd = os.open(self.err_path, flags, 0o644)
os.set_inheritable(self.out_fd, True)
os.set_inheritable(self.err_fd, True)
#---------------------------------------------------------------------------
def processExited(self, status):
"""
Callback called by `twisted` upon process exit.
"""
out_size = os.fstat(self.out_fd).st_size
err_size = os.fstat(self.err_fd).st_size
os.close(self.out_fd)
os.close(self.err_fd)
if out_size == 0:
os.remove(self.out_path)
if err_size == 0:
os.remove(self.err_path)
self.finished.callback(status.value.exitCode)
#-------------------------------------------------------------------------------
def run_process(cmd, args, job_name, log_dir, env=None, path=None):
"""
Run a process using :class:`LoggedProcessProtocol <LoggedProcessProtocol>`
:param cmd: Command to run
:param args: Argument passed to the command
:param job_name: Name of the job that will be used for the name of the
log files
:param log_dir: Directory where the log files will be stored
:param env: A dictionary with environment variables and their values
:param path: Program's working directory
:return: A tuple of an `IProcessTransport` object as returned
by twisted's `reactor.spawnProcess` and a deferred
called on program exit with the return code of the
process.
"""
cmd = find_executable(cmd)
args = [cmd] + args
pp = LoggedProcessProtocol(job_name, log_dir)
p = reactor.spawnProcess(pp, cmd, args, env=env, path=path,
childFDs={1: pp.out_fd, 2: pp.err_fd})
return p, pp.finished
#-------------------------------------------------------------------------------
def pprint_relativedelta(delta):
"""
Return a string representation of a relativedelta object in the form
similar to: "1y 2m 3d 5h 6m". If any of the components is equal to zero,
it's omitted.
"""
ret = ''
if delta.years:
ret += '{}y '.format(delta.years)
if delta.months:
ret += '{}m '.format(delta.months)
if delta.days:
ret += '{}d '.format(delta.days)
if delta.hours:
ret += '{}h '.format(delta.hours)
if delta.minutes:
ret += '{}m '.format(delta.minutes)
ret += '{}s'.format(delta.seconds)
return ret
#-------------------------------------------------------------------------------
def load_cert_chain(t, data):
"""
Load X509 objects from all the certificates in the given PEM data.
:param t: format type; only :data:`OpenSSL.crypto.FILETYPE_PEM` is
supported; the parameter is here only to keep the same
function signature as the other similar functions in
pyOpenSSL
:param data: string containing certificate chain data in the PEM
format
:return: a list of X509 objects representing the certificates
"""
if t != OpenSSL.crypto.FILETYPE_PEM:
raise OpenSSL.crypto.Error('Only the PEM format is supported')
certs_pem = pem.parse(data.encode('utf-8'))
certs = []
for cert_pem in certs_pem:
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
str(cert_pem))
certs.append(cert)
return certs
#-------------------------------------------------------------------------------
class SSLCertOptions(CertificateOptions):
"""
This class implements an SSL context factory that remediates the problem
with the default factory not being able to handle arbitrary certificate
chains. It allows the user to pass file names instead of pyOpenSSL objects
which hides quite a lot of complexity. Furthermore, any time an SSL
context object is requested it check the mtime of the files to see if
they have been changed. If they were changed, they are reloaded. Doing
things this way allows you to renew your certificates without having
to restart the server. It's quite convenient if you use Let's Encrypt
as a CA which offers certificates with 90 days lifespan. The class
extends the functionality of the recommended `CertificateOptions` factory
and constructs it with the defaults, except for the parameters described
below.
:param key_file: A file containing the private key in either ASN.1 or PEM
format
:param cert_file: A file containing the certificate in either ASN.1 or PEM
format.
:param chain_file: A file containing any additional certificates in the
chain of trust in the PEM format
"""
#---------------------------------------------------------------------------
def __init__(self, key_file, cert_file, chain_file=None):
self.key_file = key_file
self.cert_file = cert_file
self.chain_file = chain_file if chain_file != '' else None
self.load_time = None
key, cert, chain = self.load_data()
super(SSLCertOptions, self).__init__(key, cert, extraCertChain=chain)
self.getContext()
#---------------------------------------------------------------------------
def load_data(self):
"""
Load the pyOpenSSL objects from the user-supplied files the files were
modified since the last time we loaded them.
:return: a list containing the private key as a PKey object,
the certificate as a X509 object, a possibly empty list
of additional certificates in the chain of trust, all of them
as X509 objects
"""
#-----------------------------------------------------------------------
# Check if the data needs to be reloaded
#-----------------------------------------------------------------------
files = [self.key_file, self.cert_file, self.chain_file]
if self.load_time is not None:
reload_data = False
for fn in files:
if fn is None:
continue
mtime = os.path.getmtime(fn)
if mtime > self.load_time:
reload_data = True
break
if not reload_data:
return (None, None, [])
#-----------------------------------------------------------------------
# Load the data
#-----------------------------------------------------------------------
types = [OpenSSL.crypto.FILETYPE_ASN1, OpenSSL.crypto.FILETYPE_PEM]
funcs = [
OpenSSL.crypto.load_privatekey,
OpenSSL.crypto.load_certificate,
load_cert_chain
]
objs = []
for i in range(len(files)):
#-------------------------------------------------------------------
# Read the file if the name was specified
#-------------------------------------------------------------------
fn = files[i]
if fn is None:
objs.append(None)
continue
with open(fn) as f:
data = f.read()
#-------------------------------------------------------------------
# Try to load the object
#-------------------------------------------------------------------
obj = None
for t in types:
try:
obj = funcs[i](t, data)
except OpenSSL.crypto.Error:
pass
if obj is not None:
break
objs.append(obj)
self.load_time = time.time()
#-----------------------------------------------------------------------
# Fix the chain value and return
#-----------------------------------------------------------------------
key, cert, chain = objs
if chain is None:
chain = []
return key, cert, chain
#---------------------------------------------------------------------------
def getContext(self):
"""
Get the SSL context recreating it using new certificate data if
necessary.
"""
key, cert, chain = self.load_data()
if key is not None:
self.privateKey = key
self.certificate = cert
self.extraCertChain = chain
self._context = None
return super(SSLCertOptions, self).getContext()
#-------------------------------------------------------------------------------
def decode_addresses(addrs):
"""
Find all IP address-port pairs in the given string. The convention follows
the definitions in RFC3986. For IPv4 it's: `xxx.xxx.xxx.xxx:xxxx`, and for
IPv6: `[xxxx::xxxx]:xxxx`.
"""
exp = re.compile(r"""
[\s]* # whitespaces
(
((?P<IPv4>[\d.]+):(?P<portv4>\d+))| # IPv4
(\[(?P<IPv6>[A-Fa-f0-9:\.]+)\]:(?P<portv6>\d+)) # IPv6
)
""", re.VERBOSE)
start = 0
addresses = []
while True:
match = exp.match(addrs, start)
if not match:
break
start = match.end()
if match.group('IPv4'):
addresses.append((match.group('IPv4'), int(match.group('portv4'))))
else:
addresses.append((match.group('IPv6'), int(match.group('portv6'))))
return addresses
|
StarcoderdataPython
|
3393076
|
<filename>models/database.py
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
# Database Connection model
import MySQLdb as mdb
class MySQL_DB():
def __init__(self):
self.connection = mdb.connect(host='localhost', user='root', passwd='', db='con_science_bot')
self.cur = self.connection.cursor(mdb.cursors.DictCursor)
self.connection.set_character_set('utf8')
self.cur.execute('SET NAMES utf8;')
self.cur.execute('SET CHARACTER SET utf8;')
self.cur.execute('SET character_set_connection=utf8;')
@classmethod
def get_size(cls):
db = cls()
with db.connection:
db.cur.execute("""
SELECT table_name AS "Table",
ROUND(((data_length + index_length) / 1024 / 1024), 2) AS "Size (MB)"
FROM information_schema.TABLES
WHERE table_schema = "con_science_bot"
ORDER BY (data_length + index_length) DESC;
""")
return db.cur.fetchall()
|
StarcoderdataPython
|
111326
|
# Assignment 1
# CSC 486 - Spring 2022
# Author: Dr. <NAME>
# Purpose: to test your installation of PyCharm and make sure some of our common
# libraries are installed and working correctly.
import networkx
import matplotlib.pyplot as plt
def main():
# Draws a complete graph of 10 nodes, then presents it on the screen.
# Visit https://networkx.org/documentation/stable/reference/generators.html for
# a lot of alternatives to complete_graph() that you can play around with!
G = networkx.complete_graph(10)
networkx.draw(G)
plt.show()
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1781793
|
from django.contrib.auth.models import Group as AbstractGroup
from django.core.validators import RegexValidator
from django.db import models
from organizations.abstract import (
AbstractOrganization,
AbstractOrganizationInvitation,
AbstractOrganizationOwner,
AbstractOrganizationUser,
)
from openwisp_users.base.models import (
AbstractUser,
BaseGroup,
BaseOrganization,
BaseOrganizationOwner,
BaseOrganizationUser,
)
class DetailsModel(models.Model):
"""
You do not need to copy this model in your
application it is only for testing purposes.
This field serves no purpose, it only serves as an example
for extending models and used for testing purposes.
It will be inherited by all the models.
"""
details = models.CharField(max_length=64, blank=True, null=True)
class Meta:
abstract = True
class User(DetailsModel, AbstractUser):
# Remember to set `blank=False` if you don't want your users to
# skip filling this information.
social_security_number = models.CharField(
max_length=11,
null=True,
blank=True,
validators=[RegexValidator(r'^\d\d\d-\d\d-\d\d\d\d$')],
)
class Meta(AbstractUser.Meta):
abstract = False
class Organization(DetailsModel, BaseOrganization, AbstractOrganization):
pass
class OrganizationUser(DetailsModel, BaseOrganizationUser, AbstractOrganizationUser):
pass
class OrganizationOwner(DetailsModel, BaseOrganizationOwner, AbstractOrganizationOwner):
pass
# only needed for django-organizations~=2.x
class OrganizationInvitation(AbstractOrganizationInvitation):
pass
class Group(DetailsModel, BaseGroup, AbstractGroup):
pass
#########################################
# You do not need to copy the following in
# your application it is only for module
# testing purposes.
#########################################
class UserInlineModel(DetailsModel, models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
class OrganizationInlineModel(DetailsModel, models.Model):
organization = models.OneToOneField(Organization, on_delete=models.CASCADE)
|
StarcoderdataPython
|
3254815
|
<gh_stars>0
# Mth To Last elements
#
# https://www.codeeval.com/open_challenges/10/
#
# Challenge Description: Write a program which determines the Mth to the last
# element in a list.
import sys
input_file = sys.argv[1]
try:
test_cases = open(input_file, 'r')
except IOError:
print('No such file ' + input_file)
sys.exit(-1)
for line in test_cases:
line_content = line.split(" ")
n = len(line_content)
element_to_print = n - int(line_content.pop()) - 1
if element_to_print >= 0 and element_to_print < len(line_content):
print(line_content[element_to_print])
test_cases.close()
sys.exit(0)
|
StarcoderdataPython
|
1686138
|
<filename>py/kubeflow/kfctl/testing/pytests/jupyter_test.py<gh_stars>10-100
"""Test jupyter custom resource.
This file tests that we can create notebooks using the Jupyter custom resource.
It is an integration test as it depends on having access to
a Kubeflow cluster with the custom resource test installed.
We use the pytest framework because
1. It can output results in junit format for prow/gubernator
2. It has good support for configuring tests using command line arguments
(https://docs.pytest.org/en/latest/example/simple.html)
Python Path Requirements:
kubeflow/testing/py - https://github.com/kubeflow/testing/tree/master/py
* Provides utilities for testing
Manually running the test
1. Configure your KUBECONFIG file to point to the desired cluster
"""
import logging
import os
import pytest
from kubernetes import client as k8s_client
from kubeflow.testing import util
def test_jupyter(record_xml_attribute, kfctl_repo_path, namespace):
"""Test the jupyter notebook.
Args:
record_xml_attribute: Test fixture provided by pytest.
kfctl_repo_path: path to local kfctl repository.
namespace: namespace to run in.
"""
util.load_kube_config()
util.load_kube_credentials()
logging.info("using kfctl repo: %s" % kfctl_repo_path)
util.run(["kubectl", "apply", "-f",
os.path.join(kfctl_repo_path,
"py/kubeflow/kfctl/testing/pytests/testdata/jupyter_test.yaml")])
api_client = k8s_client.ApiClient()
api = k8s_client.CoreV1Api(api_client)
resp = api.list_namespaced_service(namespace)
names = [service.metadata.name for service in resp.items]
if not "jupyter-test" in names:
raise ValueError("not able to find jupyter-test service.")
if __name__ == "__main__":
logging.basicConfig(
level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
pytest.main()
|
StarcoderdataPython
|
1690719
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# iterative bfs
class Solution:
def maxDepth(self, root: TreeNode) -> int:
level = 0
q = collections.deque([root]) if root else None
while q:
level += 1
for _ in range(len(q)):
node = q.popleft()
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return level
|
StarcoderdataPython
|
1673745
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
__author__ = '<NAME><<EMAIL>>'
import uuid
import json
def pack_outgoing_message_to_nest(pattern, data):
''' put pattern and data in correct format
message format, refer to
https://stackoverflow.com/questions/55628093/use-socket-client-with-nestjs-microservice
used to send request to other nest microservice
'''
_id = uuid.uuid4()
dict_merged = {'pattern': pattern, 'data': data, 'id': str(_id)}
s_json = json.dumps(obj=dict_merged)
return f'{len(s_json)}#{s_json}'.encode()
def unpack_incoming_response_from_nest(message):
''' to unpack message from other nest microservice
'''
_s = message.split(b'#')
final_length = int(_s[0])
message = json.loads(message[len(str(final_length))+1:].decode())
return message.get('err'), message.get('response')
def pack_outgoing_message_to_client(response, message_id, err=None):
'''send the message to the client as microservice server
'''
dict_merged = {
'err': err,
'response': response,
'isDisposed': True,
'id': message_id}
s_json = json.dumps(obj=dict_merged)
return f'{len(s_json)}#{s_json}'.encode()
def unpack_incoming_message_from_client(message):
'''unpack the incoming message from client as microservice server
'''
_s = message.split(b'#')
final_length = int(_s[0])
message = json.loads(message[len(str(final_length))+1:].decode())
return message['pattern'], message['data'], message['id']
def get_response_message(message, data, final_length):
'''get the response message
message -- previous received message
data - new received messages
'''
if message == b'':
_s = data.split(b'#')
final_length = int(_s[0].decode())
message += data
try:
if len(message.decode()) == final_length + len(str(final_length)) + 1:
return message, final_length, True
except UnicodeDecodeError:
pass
return message, final_length, False
def receive_all_messages(sock):
'''get all messages from the sock'''
message = b''
final_length = 0
while True:
_d = sock.recv(1024)
if _d:
message, final_length, done = get_response_message(
message, _d, final_length)
if done:
break
else:
break
return message
|
StarcoderdataPython
|
128456
|
__author__ = 'jhlee'
import cPickle
import numpy as np
import csv
import sys
import time
import os.path
EVENT = {'Ev101': 1, 'Ev102': 2, 'Ev103': 3, 'Ev104': 4, 'Ev105': 5, 'Ev106': 6, 'Ev107': 7, 'Ev108': 8, 'Ev109': 9, 'Ev110': 0}
RATIO = {'Training': 0, 'Test': 1}
class YLIMED():
def __init__(self, pathInfo, pathAud, pathImg):
self.pathInfo = pathInfo
self.pathAud = pathAud
self.pathImg = pathImg
self.VID = []
self.LABEL = []
self.SET = []
self.__summary_data()
if not (os.path.isfile('YLIMED_info.tmp')):
self.__initial_data_info()
def __summary_data(self):
f = open(self.pathInfo, 'rb')
inforeader = csv.reader(f)
next(inforeader)
trteratio = np.zeros(2)
labelset = np.zeros(10)
trainingset = np.zeros(10)
testset = np.zeros(10)
for info in inforeader:
self.VID.append(info[0])
self.LABEL.append(info[7])
self.SET.append(info[13])
labelset[EVENT[info[7]]] += 1
trteratio[RATIO[info[13]]] += 1
if info[13] == 'Training':
trainingset[EVENT[info[7]]] += 1
else:
testset[EVENT[info[7]]] += 1
print '-------------------------------------SUMMARY-------------------------------------'
print 'Total =', int(sum(labelset))
print 'Tr / Te ratio =', trteratio
print 'Label set =', labelset
print 'Training set =', trainingset
print 'Test set =', testset
print '---------------------------------------------------------------------------------'
f.close()
def __initial_data_info(self):
print 'Initial data info...'
f = open('YLIMED_info.tmp', 'w')
starttime = time.time()
total = len(self.VID)
count = 0
for tVID in self.VID:
#check both file exist
temp_aud_file = self.pathAud + '/' + tVID + '.mfcc20.ascii'
temp_img_file = self.pathImg + '/' + tVID + '.fc7.txt'
if not (os.path.isfile(temp_aud_file) and os.path.isfile(temp_img_file)):
total -= 1
continue
#set progress bar
count += 1
progress = int(float(count) / float(total) * float(100))
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%% %d sec" % ('='*(progress/5), progress, time.time() - starttime))
sys.stdout.flush()
#open file
audFile = open(temp_aud_file, 'r')
audData = audFile.readlines()
imgFile = open(temp_img_file, 'r')
imgData = imgFile.readlines()
#split sequence by shorter data
if(cmp(len(imgData), len(audData)/100)>0):
range_len=len(audData)/100
else:
range_len=len(imgData)
f.write(tVID + ' ' + str(range_len) + '\n')
imgFile.close()
f.close()
def __get_part_data(self, Aud_Img_Lab, tr_or_te):
tmpinfo = open('YLIMED_info.tmp', 'r')
tmpdata = tmpinfo.readlines()
starttime = time.time()
total = len(tmpdata)
count = 0
output = []
for line in tmpdata:
count += 1
progress = int(float(count) / float(total) * float(100))
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%% %d sec" % ('='*(progress/5), progress, time.time() - starttime))
sys.stdout.flush()
line = line.split()
tVID = line[0]
range_len = line[1]
set = self.SET[self.VID.index(tVID)]
if set != tr_or_te:
continue
if tr_or_te == 'Test' and Aud_Img_Lab == 'VID':
for i in range(int(range_len)):
output.append(tVID)
continue
if Aud_Img_Lab == 'Lab':
for i in range(int(range_len)):
label = EVENT[self.LABEL[self.VID.index(tVID)]]
output.append(label)
else:
if Aud_Img_Lab == 'Aud':
temp_file = self.pathAud + '/' + tVID + '.mfcc20.ascii'
f = open(temp_file, 'r')
data = f.readlines()
f.close()
elif Aud_Img_Lab == 'Img':
temp_file = self.pathImg + '/' + tVID + '.fc7.txt'
f = open(temp_file, 'r')
data = f.readlines()
f.close()
for i in range(int(range_len)):
add = []
if Aud_Img_Lab == 'Aud':
for j in range(100):
add += [float(x) for x in data[i*100+j].split()]
elif Aud_Img_Lab == 'Img':
add = [float(x) for x in data[i].split()]
output.append(add)
tmpinfo.close()
output = np.asarray(output)
print ', finish'
return output
def get_aud_X_train(self):
print 'Load Training Audio Data'
return self.__get_part_data('Aud', 'Training')
def get_aud_X_test(self):
print 'Load Test Audio Data'
return self.__get_part_data('Aud', 'Test')
def get_img_X_train(self):
print 'Load Training Image Data'
return self.__get_part_data('Img', 'Training')
def get_img_X_test(self):
print 'Load Test Image Data'
return self.__get_part_data('Img', 'Test')
def get_y_train(self):
print 'Load Training Label Data'
return self.__get_part_data('Lab', 'Training')
def get_y_test(self):
print 'Load Test Label Data'
return self.__get_part_data('Lab', 'Test')
def get_testVID(self):
return self.__get_part_data('VID', 'Test')
#TODO
# print 'Pickling...'
#
# y_train = np.asarray(y_train)
# y_test = np.asarray(y_test)
# print 'y_train num: %d' % len(y_train), y_train.shape
# print 'y_test num: %d' % len(y_test), y_test.shape
# f = open('YLDMED_y.pkl', 'wb')
# temp = y_train, y_test
# cPickle.dump(temp, f)
# f.close()
#
# aud_X_train = np.asarray(aud_X_train)
# aud_X_test = np.asarray(aud_X_test)
# print 'aud_X_train num: %d' % len(aud_X_train), aud_X_train.shape
# print 'aud_X_test num: %d' % len(aud_X_test), aud_X_test.shape
# f = open('YLDMED_aud_X.pkl', 'wb')
# temp = aud_X_train, aud_X_test
# cPickle.dump(temp, f)
# f.close()
#
# img_X_train = np.asarray(img_X_train)
# img_X_test = np.asarray(img_X_test)
# print 'img_X_train num: %d' % len(img_X_train), img_X_train.shape
# print 'img_X_test num: %d' % len(img_X_test), img_X_test.shape
# f = open('YLDMED_img_X.pkl', 'wb')
# temp = img_X_train, img_X_test
# cPickle.dump(temp, f)
# f.close()
if __name__ == '__main__':
data = YLIMED('YLIMED_info.csv', '/DATA/YLIMED150924/audio/mfcc20', '/DATA/YLIMED150924/keyframe/fc7')
print data.get_testVID().shape
|
StarcoderdataPython
|
44462
|
<gh_stars>1-10
#!/usr/bin/env python
import os
import sys
from box import Box
import numpy as np
import torch
import gym
from model import Model
from trainer import Trainer
def print_config(config, d=0):
tabs = ' ' * d * 4
for k in config.keys():
if isinstance(config[k], Box):
print('{}{}:'.format(tabs, k))
print_config(config[k], d + 1)
else:
print('{}{}: {}'.format(tabs, k, config[k]))
if __name__ == '__main__':
print('Loading config...')
config_path = sys.argv[1]
with open(config_path, 'r') as config_f:
config = Box.from_yaml(config_f)
print_config(config)
print('Setting random seed...')
np.random.seed(config.random_seed)
torch.cuda.manual_seed(config.random_seed)
print('Creating environment...')
env = gym.make(config.environment)
print('Creating model...')
config.model.policy.input_size = env.observation_space.shape[0]
# config.model.policy.output_size = env.action_space.shape[0]
config.model.policy.output_size = env.action_space.n
config.model.value.input_size = env.observation_space.shape[0]
model = Model(config.model)
print('Start training...')
trainer = Trainer(config.train, env, model, config.model)
trainer.start()
|
StarcoderdataPython
|
1668172
|
<filename>eCommerce/store/views.py
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.contrib.auth.models import User
from django.views.decorators.http import require_POST
from django.http import HttpResponse
from .forms import ProfileForm, ProductForm, BuyForm, BuyCartForm
from .models import Product, Order
import json
# Create your views here.
@login_required
def index(request):
last_products = Product.objects.exclude(seller=User.objects.get(id=request.user.id)).order_by('-id')[:8]
return render(request, 'store/index.html', {'products':last_products})
@login_required
def profile(request):
if request.method == 'POST':
form = ProfileForm(request.POST)
if form.is_valid():
password = form.cleaned_data['password']
email = form.cleaned_data['email']
first_name = form.cleaned_data['first_name']
last_name = form.cleaned_data['last_name']
user = User.objects.get(id=request.user.id)
if password:
user.set_password(password)
if email:
user.email = email
if first_name:
user.first_name = first_name
if last_name:
user.last_name = last_name
user.save()
messages.success(request, 'Profile edited successfully')
return redirect('profile')
else:
form = ProfileForm(user_id=request.user.id)
return render(request, 'store/profile.html', {'form':form})
@login_required
def view_profile(request, profile):
if profile == request.user.username:
return redirect('profile')
info = get_object_or_404(User, username=profile)
username = info.username
products = Product.objects.filter(seller__exact=info.id)
return render(request, 'store/view-profile.html', {'username':username, 'products':products})
@login_required
def product(request, product_name):
product = get_object_or_404(Product, name=product_name)
if str(product.seller) == request.user.username:
return redirect('edit_product', product_name=product_name)
return render(request, 'store/product.html', {'product':product})
@login_required
def create_product(request):
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES)
if form.is_valid():
prod = form.save(commit=False)
prod.seller = User.objects.get(id=request.user.id)
prod.save()
messages.success(request, 'Product created successfully')
return redirect('my_products')
else:
form = ProductForm()
return render(request, 'store/create-product.html', {'form':form})
@login_required
def edit_product(request, product_name):
prod = get_object_or_404(Product, name=product_name)
if str(prod.seller) != request.user.username:
messages.error(request, "Here are the products you can edit")
return redirect('my_products')
if request.method == 'POST':
form = ProductForm(request.POST, request.FILES, instance=prod)
if form.is_valid():
prod = form.save(commit=False)
prod.seller = User.objects.get(id=request.user.id)
prod.save()
messages.success(request, 'Product edited successfully')
return redirect('my_products')
else:
form = ProductForm(instance=prod)
return render(request, 'store/edit-product.html', {'form':form})
@login_required
def delete_product(request, product_name):
prod = get_object_or_404(Product, name=product_name)
if str(prod.seller) != request.user.username:
messages.error(request, "Here are the products you can delete")
return redirect('my_products')
if request.method == 'POST':
if request.POST.get('delete'):
prod.delete()
messages.success(request, 'Product deleted successfully')
return redirect('my_products')
return render(request, 'store/delete-product.html', {'product':prod.name})
@login_required
def my_products(request):
products = Product.objects.filter(seller__exact=request.user.id)
return render(request, 'store/my-products.html', {'products':products})
@login_required
def search(request):
if request.method == 'GET':
query = request.GET.get('q', '')
if not query:
messages.error(request, 'You must specify a query in the search URL')
return redirect('index')
else:
query = request.POST.get('query')
results = Product.objects.filter(name__icontains=query).exclude(seller=User.objects.get(id=request.user.id))
results_ascendant = results.order_by('price')
results_descendant = results.order_by('-price')
return render(request, 'store/search.html', {'query':query, 'results_ascendant':results_ascendant, 'results_descendant':results_descendant})
@login_required
def buy(request, product_name):
prod = get_object_or_404(Product, name=product_name)
price = prod.price
if str(prod.seller) == request.user.username:
messages.error(request, "You can't buy your own product")
return redirect('index')
if request.method == 'POST':
form = BuyForm(request.POST)
if form.is_valid():
address = form.cleaned_data['address']
buyer = User.objects.get(id=request.user.id)
quantity = form.cleaned_data['quantity']
try:
actual_order = Order.objects.get(product=prod, buyer=buyer, state='Opened')
actual_order.quantity += quantity
actual_order.save()
except:
Order(product=prod, buyer=buyer, address=address, state='Opened', quantity=quantity).save()
messages.success(request, 'Product bought successfully')
return redirect('index')
else:
form = BuyForm()
return render(request, 'store/buy.html', {'product':prod.name, 'form':form, 'price':price})
@login_required
def orders(request):
orders = Order.objects.filter(product__seller=User.objects.get(id=request.user.id)).order_by('-date')
products_state = None
if orders:
products = [order.product for order in orders]
states = [order.state for order in orders]
quantities = [order.quantity for order in orders]
dates = [order.date for order in orders]
ids = [order.id for order in orders]
products_state = tuple(zip(products, states, quantities, dates, ids))
return render(request, 'store/orders.html', {'products_state':products_state})
@login_required
def add_to_cart(request, product_name):
prod = get_object_or_404(Product, name=product_name)
if str(prod.seller) == request.user.username:
messages.error(request, "You can't add to cart your own product")
return redirect('index')
if request.session.get('cart', None):
cart = request.session['cart']
if product_name in [prod['product'] for prod in request.session['cart']]:
for i in range(len(cart)):
if cart[i]['product'] == product_name:
cart[i]['quantity'] += 1
break
else:
cart.append({'product':product_name, 'quantity':1})
request.session['cart'] = cart
else:
request.session['cart'] = [
{'product':product_name, 'quantity':1}
]
messages.success(request, 'Product added to the <a href="/cart" class="text-success">cart</a>')
return redirect('product', product_name=product_name)
@login_required
def cart(request):
cart = request.session.get('cart', None)
cart_products = None
if cart:
products = [Product.objects.get(name=prod['product']) for prod in cart]
quantity = [prod['quantity'] for prod in cart]
cart_products = set(zip(products, quantity))
return render(request, 'store/cart.html', {'cart_products':cart_products})
@login_required
@require_POST
def change_quantity(request):
cart = request.session['cart']
post_data = json.loads(request.body.decode("utf-8"))
if post_data['change'] == 'remove':
for i in range(len(cart)):
if cart[i]['product'] == post_data['product']:
cart[i]['quantity'] -= 1
new_quantity = cart[i]['quantity']
break
elif post_data['change'] == 'add':
for i in range(len(cart)):
if cart[i]['product'] == post_data['product']:
cart[i]['quantity'] += 1
new_quantity = cart[i]['quantity']
break
request.session['cart'] = cart
return HttpResponse(content=json.dumps({'new_quantity':new_quantity}), content_type = "application/json")
@login_required
@require_POST
def delete_from_cart(request):
cart = request.session['cart']
product_to_delete = json.loads(request.body.decode("utf-8"))['product_to_delete']
for i in range(len(cart)):
if cart[i]['product'] == product_to_delete:
cart.pop(i)
break
request.session['cart'] = cart
return HttpResponse(content=f'{product_to_delete} deleted form the cart', content_type = "application/json")
@login_required
def view_order(request, id):
order = get_object_or_404(Order, id=id)
if str(order.product.seller) != request.user.username:
messages.error(request, "Here are your orders")
return redirect('orders')
return render(request, 'store/view-order.html', {'order':order})
@login_required
@require_POST
def change_delivery_state(request):
post_data = json.loads(request.body.decode("utf-8"))
order_id = post_data['order_id']
state = post_data['state']
order = Order.objects.get(id=order_id)
order.state = state
order.save()
return HttpResponse(content=f'Order {order_id} state changed')
@login_required
def buy_from_cart(request):
cart = request.session.get('cart', None)
price = 0
for cart_product in cart:
prod_price = Product.objects.get(name=cart_product['product']).price
quantity = cart_product['quantity']
price += prod_price * quantity
if request.method == 'POST':
form = BuyCartForm(request.POST)
if form.is_valid():
address = form.cleaned_data['address']
buyer = User.objects.get(id=request.user.id)
for cart_product in cart:
product = Product.objects.get(name=cart_product['product'])
quantity = cart_product['quantity']
try:
actual_order = Order.objects.get(product=product, buyer=buyer, state='Opened')
actual_order.quantity += quantity
actual_order.save()
except:
Order(product=product, buyer=buyer, address=address, state='Opened', quantity=quantity).save()
messages.success(request, f'Product{["s" if len(cart)>1 else ""][0]} bought successfully')
return redirect('index')
else:
form = BuyCartForm()
return render(request, 'store/buy-from-cart.html', {'form':form, 'num':len(cart), 'price':price})
|
StarcoderdataPython
|
186983
|
# SPDX-FileCopyrightText: 2021 Division of Intelligent Medical Systems, DKFZ
# SPDX-FileCopyrightText: 2021 <NAME>
# SPDX-License-Identifier: MIT
import simpa as sp
import numpy as np
def create_custom_absorber():
wavelengths = np.linspace(200, 1500, 100)
absorber = sp.Spectrum(spectrum_name="random absorber",
wavelengths=wavelengths,
values=np.random.random(
np.shape(wavelengths)))
return absorber
def create_custom_chromophore(volume_fraction: float = 1.0):
chromophore = sp.Molecule(
absorption_spectrum=create_custom_absorber(),
volume_fraction=volume_fraction,
scattering_spectrum=sp.ScatteringSpectrumLibrary.CONSTANT_SCATTERING_ARBITRARY(40.0),
anisotropy_spectrum=sp.AnisotropySpectrumLibrary.CONSTANT_ANISOTROPY_ARBITRARY(0.9)
)
return chromophore
def create_custom_tissue_type():
# First create an instance of a TissueSettingsGenerator
tissue_settings_generator = sp.MolecularCompositionGenerator()
water_volume_fraction = 0.4
blood_volume_fraction = 0.5
custom_chromophore_volume_fraction = 0.1
# The volume fraction within every tissue type should sum up to 1.
oxygenation = 0.4
# Then append chromophores that you want
tissue_settings_generator.append(key="oxyhemoglobin",
value=sp.MOLECULE_LIBRARY.oxyhemoglobin(oxygenation * blood_volume_fraction))
tissue_settings_generator.append(key="deoxyhemoglobin",
value=sp.MOLECULE_LIBRARY.deoxyhemoglobin((1 - oxygenation) * blood_volume_fraction))
tissue_settings_generator.append(key="water",
value=sp.MOLECULE_LIBRARY.water(water_volume_fraction))
tissue_settings_generator.append(key="custom",
value=create_custom_chromophore(custom_chromophore_volume_fraction))
return tissue_settings_generator
|
StarcoderdataPython
|
32181
|
<reponame>maojanlin/gAIRRsuite<gh_stars>1-10
import argparse
import pickle
import os
import numpy as np
#from parse_contig_realign import mark_edit_region, variant_link_graph, haplotyping_link_graph, output_contig_correction
from parse_contig_realign import variant_link_graph, output_contig_correction, parse_CIGAR, parse_MD, trim_dict, find_double_pos, get_farthest_ext
from utils import get_reverse_complement
import sys
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-fs', '--fn_sam',
help = 'sam file of reads realign to contig'
)
parser.add_argument(
'-fc', '--fn_cluster_contig',
help = 'cropped contig file, corrected or not'
)
parser.add_argument(
'-for', '--fo_report',
help = 'output report file'
)
parser.add_argument(
'-foc', '--fo_corrected_alleles',
help = 'output corrected alleles fasta file'
)
args = parser.parse_args()
return args
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def cluster_separate(fn_cluster_contig, fn_sam):
# dict_contig {}
# - keys: contig_name
# - values: [edit_histogram, cover_histogram, contig_SEQ, list_read_field[]]
dict_contig = {}
# dict_contig's initialization
with open(fn_cluster_contig, 'r') as f_c:
contig_name = ""
contig_SEQ = ""
for line in f_c:
if line[0] == '>':
if contig_name != "":
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
contig_name = line.strip()[1:].split()[0]
contig_SEQ = ""
else:
contig_SEQ += line.strip()
dict_contig[contig_name] = [np.zeros(len(contig_SEQ) + 1), np.zeros(len(contig_SEQ) + 1), contig_SEQ, []]
with open(fn_sam, 'r') as f_r:
read_name = ""
read_SEQ = ""
for line in f_r:
if line[0] != '@':
fields = line.split()
if fields[2] == '*':
continue
else:
contig_name = fields[2]
dict_contig[contig_name][3].append(fields)
return dict_contig
def mark_edit_region(contig_name, contig_info, ignore_S=False):
# contig_info = [edit_histogram, cov_histogram, contig_SEQ, list_read]
edit_histogram = contig_info[0]
cov_histogram = contig_info[1]
# list_read_info: [ (start_pos, end_pos, read_name, even_odd_flag, mis_region) ]
list_read_info = []
even_odd_flag = 1
list_read_field = contig_info[3]
for fields in list_read_field:
read_name = fields[0]
read_SEQ = fields[9]
cigar = fields[5]
sam_flag = int(fields[1])
# if the alignment is a supplementary alignment, pass, it does not matter the even odd
# read BWA manual "Supplementary Alignment" for more information
if sam_flag > 1024:
continue
S_flag = False
number, operate = parse_CIGAR(cigar)
if ignore_S and 'S' in cigar:
if operate[0] == 'S':
if number[0] >= len(read_SEQ)/15:
S_flag = True
if operate[-1] == 'S':
if number[-1] >= len(read_SEQ)/15:
S_flag = True
# if cigar == '*', means alignment is bad, pass
# if the read align to incorrect contigs, pass
if cigar == '*' or contig_name != fields[2] or S_flag:
# list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region))
list_read_info.append((0, 0, read_name, even_odd_flag, [], "", read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
continue
edit_dist = int(fields[11].split(':')[2]) # NM:i:2 tag
MD_tag = fields[12].split(':')[2] # MD:Z:38G2A20
start_pos = int(fields[3])
mis_region_MD = parse_MD(MD_tag)
mis_region_MD = [ele + start_pos - 1 for ele in mis_region_MD] # change to ref coordinate
mis_region_I = [] # insertion boundary region
diff_len = 0 # len contribution of D, I, and S
if 'I' in operate or 'D' in operate or 'S' in operate:
idx_I = start_pos - 1 # index in reference
for idx, op in enumerate(operate):
if op == 'I':
diff_len -= number[idx]
mis_region_I.append(idx_I)
mis_region_I.append(idx_I+1)
else:
if op == 'S':
diff_len -= number[idx]
else:
idx_I += number[idx]
if op == 'D':
diff_len += number[idx]
end_pos = start_pos + len(fields[9]) + diff_len
match_len = end_pos - start_pos
mis_region_S = []
recover_S_flag = False
if operate[0] == 'S':
left_S_len = min(number[0], start_pos-1)
if left_S_len < match_len/10: # if S len is not too long, we accept it as mismatch
mis_region_S = [pos for pos in range(start_pos-left_S_len,start_pos)]
start_pos -= left_S_len
operate[0] = 'M'
if left_S_len != number[0]:
operate = ['S'] + operate
number = [number[0]-left_S_len] + number
number[1] = left_S_len
recover_S_flag = True
if operate[-1] == 'S':
right_S_len = min(number[-1], len(cov_histogram)-end_pos)
if right_S_len < match_len/10: # if S len is not to long, we accept it as mismatch
mis_region_S += [pos for pos in range(end_pos,end_pos+right_S_len)]
end_pos += right_S_len
operate[-1] = 'M'
if right_S_len != number[-1]:
operate = operate + ['S']
number = number + [number[-1]-right_S_len]
number[-2] = right_S_len
recover_S_flag = True
if recover_S_flag:
cigar = ""
for cigar_id, element in enumerate(number):
cigar += str(element)
cigar += operate[cigar_id]
#print(read_name + '\t', start_pos, end_pos)
cov_histogram[start_pos:end_pos] += 1
mis_region = mis_region_MD + mis_region_I + mis_region_S
mis_region.sort()
edit_histogram[mis_region] += 1
# record the reads information
list_read_info.append((start_pos, end_pos, read_name, even_odd_flag, mis_region, cigar, read_SEQ))
if even_odd_flag == 1:
even_odd_flag = 2
else:
even_odd_flag = 1
return edit_histogram, cov_histogram, list_read_info
def haplotyping_link_graph(dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward, edit_region):
# sort the potential variants on the interested site, can only use these variants bases
list_pos_weight = []
print("Trimming the significant bases at interested site:")
print("Original site-base dict", dict_var_weight)
for key in sorted(dict_var_weight.keys()):
dict_part = dict_var_weight[key]
trim_dict(dict_part, 10)
list_pos_weight.append((key, sorted(dict_part.items(), key=lambda pair:pair[1], reverse=True)))
print("Final site-base list:", list_pos_weight)
eprint("#### max site-base variant #", max([len(ele[1]) for ele in list_pos_weight]))
if list_pos_weight == []:
print("There is no variant detected!")
return [], []
print("+++++++++++++++++++", "dict_link_graph", "+++++++++++++++++++")
for key in sorted(dict_link_graph.keys()):
print(key, dict_link_graph[key])
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++")
# initializing the haplotype list, the cursor, and the last_ext
haplotype_0 = [] # record the (position, base) pair of the haplotype
hap_cursor_0 = 0 # record the position got the linking information (still useless in this version)
break_flag_0 = False # the flag indicating of the haplotype is breaked
haplotype_1 = []
hap_cursor_1 = 0
break_flag_1 = False
pos_start_idx = 0
# find the first variant site with two variants
pos_start_idx, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1 = find_double_pos(pos_start_idx, list_pos_weight, haplotype_0, haplotype_1, hap_cursor_0, hap_cursor_1)
# haplotyping from list_pos_weight:
for pos_idx in range(pos_start_idx, len(list_pos_weight)):
pos_weight = list_pos_weight[pos_idx]
position = pos_weight[0]
list_pos_base = pos_weight[1]
print("XXXXXXXXXXXXXX", position, "XXXXXXXXXXXXXXXX")
# deal with haplotype_0's outward lin
dict_outward_0 = {}
if dict_link_outward.get(haplotype_0[hap_cursor_0]):
dict_outward_0 = dict_link_outward[haplotype_0[hap_cursor_0]]
trim_dict(dict_outward_0)
if position > get_farthest_ext(dict_outward_0, haplotype_0[hap_cursor_0]):
break_flag_0 = True
eprint("Haplotype 0 has a break at", haplotype_0[hap_cursor_0], "to", position)
print(dict_outward_0)
# deal with haplotype_1's outward link
print("--------------------")
dict_outward_1 = {}
if dict_link_outward.get(haplotype_1[hap_cursor_1]):
dict_outward_1 = dict_link_outward[haplotype_1[hap_cursor_1]]
trim_dict(dict_outward_1)
if position > get_farthest_ext(dict_outward_1, haplotype_1[hap_cursor_1]):
break_flag_1 = True
eprint("Haplotype 1 has a break at", haplotype_1[hap_cursor_1], "to", position)
print(dict_outward_1)
# deal with position's inward link
print("--------------------")
dict_inward_0 = {}
if dict_link_inward.get((position, list_pos_base[0][0])):
dict_inward_0 = dict_link_inward[(position, list_pos_base[0][0])]
trim_dict(dict_inward_0)
print(dict_inward_0)
#print(dict_link_graph[(position, list_pos_base[1][0])])
if len(list_pos_base) > 1:
print("--------------------")
dict_inward_1 = {}
if dict_link_inward.get((position, list_pos_base[1][0])):
dict_inward_1 = dict_link_inward[(position, list_pos_base[1][0])]
trim_dict(dict_inward_1)
print(dict_inward_1)
connect_info_0 = None
connect_info_1 = None
# There must be at least one kind of base in the position
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 0, 0)
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_0.get(outward_key):
print("Potential Connect: ", outward_key, 1, 0)
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# if there are two variants in the position
if len(list_pos_base) > 1:
for (outward_key, weight) in sorted(dict_outward_0.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 0, 1)
if connect_info_0 == None or connect_info_0[0] < weight:
connect_info_0 = (dict_outward_0[outward_key], (position, outward_key[1][1]))
break
for (outward_key, weight) in sorted(dict_outward_1.items(), key=lambda pair:pair[1], reverse=True):
if dict_inward_1.get(outward_key):
print("Potential Connect: ", outward_key, 1, 1)
if connect_info_1 == None or connect_info_1[0] < weight:
connect_info_1 = (dict_outward_1[outward_key], (position, outward_key[1][1]))
break
# the case that two haplotypes may collapse into one
if connect_info_0 and connect_info_1:
if connect_info_0[1] == connect_info_1[1]: # two haplotypes are collapsed
record_info_0 = [connect_info_0[1]]
record_info_1 = [connect_info_1[1]]
for redouble_idx in range(pos_idx, len(list_pos_weight)):
rd_pos_weight = list_pos_weight[redouble_idx]
rd_position = rd_pos_weight[0]
rd_list_pos_base = rd_pos_weight[1]
if(len(rd_list_pos_base)) >= 2: # if there are two variants at the site
# call the potential connections
last_info_0 = haplotype_0[hap_cursor_0]
last_info_1 = haplotype_1[hap_cursor_1]
dict_info_0 = dict_link_graph[last_info_0]
dict_info_1 = dict_link_graph[last_info_1]
# connect them
rd_info_0 = None
rd_info_1 = None
for rd_link_info, rd_weight in sorted(dict_info_0.items(), key=lambda pair:pair[1], reverse=True):
variant_flag = False
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_0[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_0 = tmp_rd_info
break
if rd_info_0:
break
for rd_link_info, rd_weight in sorted(dict_info_1.items(), key=lambda pair:pair[1], reverse=True):
for info_pair in rd_link_info[1]:
tmp_rd_info = []
if info_pair == connect_info_1[1]:
variant_flag = True
tmp_rd_info.append(info_pair)
if variant_flag:
tmp_rd_info.append(info_pair)
if info_pair[0] == rd_position:
rd_info_1 = tmp_rd_info
break
if rd_info_1:
break
print("connect_info_0", record_info_0)
print("connect_info_1", record_info_1)
print("rd_info_0", rd_info_0)
print("rd_info_1", rd_info_1)
if rd_info_0:
record_info_0 += rd_info_0
if rd_info_1:
record_info_1 += rd_info_1
if rd_info_0 != rd_info_1:
if rd_info_0:
pass
else:
break_flag_0 = True
if rd_info_1:
pass
else:
break_flag_1 = True
break
haplotype_0 += record_info_0
hap_cursor_0 += len(record_info_0)
haplotype_1 += record_info_1
hap_cursor_1 += len(record_info_1)
print("Crossing the single base variant site...")
continue
# update the nodes if the connection is found
if connect_info_0:
haplotype_0.append(connect_info_0[1])
hap_cursor_0 += 1
if break_flag_1 and len(list_pos_base) >1:
for idx in range(2):
potential_base = list_pos_base[idx][0]
if potential_base != connect_info_0[1][1]:
eprint("Link rebuilt on Haplotype 1 at", haplotype_1[hap_cursor_1] , "to", position)
haplotype_1.append((position, potential_base))
hap_cursor_1 += 1
break_flag_1 = False
break
if connect_info_1:
haplotype_1.append(connect_info_1[1])
hap_cursor_1 += 1
if break_flag_0 and len(list_pos_base) >1:
for idx in range(2):
potential_base = list_pos_base[idx][0]
if potential_base != connect_info_1[1][1]:
eprint("Link rebuilt on Haplotype 0 at", haplotype_0[hap_cursor_0] , "to", position)
haplotype_0.append((position, potential_base))
hap_cursor_0 += 1
break_flag_0 = False
break
if break_flag_0 and break_flag_1:
eprint("BREAKING LINKS FOR BOTH HAPLOTYPE AT", position, "!!!!")
eprint("Breaking links cannot resloved, we guess...")
haplotype_0.append((position, list_pos_base[0][0]))
hap_cursor_0 += 1
if len(list_pos_base) > 1:
haplotype_1.append((position, list_pos_base[1][0]))
hap_cursor_1 += 1
print(haplotype_0)
print(haplotype_1)
return haplotype_0, haplotype_1
if __name__ == '__main__':
args = parse_args()
fn_sam = args.fn_sam
fn_cluster_contig = args.fn_cluster_contig
fo_report = args.fo_report
fo_corrected_alleles = args.fo_corrected_alleles
dict_contig = cluster_separate(fn_cluster_contig, fn_sam)
for contig_name, contig_info in sorted(dict_contig.items()):
#parse the sam file and generate
edit_histogram, cov_histogram, list_read_info = mark_edit_region(contig_name, contig_info)
#determine the region contains alternative flanking region
edit_region = []
for idx, ele in enumerate(edit_histogram):
print(str(idx) + ':\t' + str(cov_histogram[idx]) + '\t' + str(ele))
if ele > cov_histogram[idx]/4:
edit_region.append(idx)
print(contig_name, edit_region)
contig_SEQ = dict_contig[contig_name][2]
interest_region = "0-" + str(len(contig_SEQ))
interest_edit_region = edit_region
if interest_edit_region != [] and min(cov_histogram[1:]) > 20:
print("=========== allele correction ==============")
eprint("CORRECT", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward = variant_link_graph(interest_edit_region, list_read_info)
haplotype_0, haplotype_1 = haplotyping_link_graph(dict_link_graph, dict_var_weight, dict_link_outward, dict_link_inward, interest_region)
#output_contig_correction(contig_SEQ, region_st, region_ed, haplotype_0, haplotype_1, contig_name, corrected_contig_output_file)
output_contig_correction(contig_SEQ, 0, len(contig_SEQ), haplotype_0, haplotype_1, contig_name, fo_corrected_alleles, "/novel")
elif interest_edit_region != []:
eprint("DDeficient", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
print("=== cov not efficient:", min(cov_histogram[1:]), "=======")
else:
eprint("No variant", contig_name.split('|')[1], min(cov_histogram[1:]), interest_edit_region)
print("============ No novel allele ===============")
|
StarcoderdataPython
|
4818285
|
__author__ = 'mla'
# import pymysql.cursors
# 2 import mysql.connector
# 3 from fixture.db import DbFixture
# 4 SPRAWDZAMY ORM dla groups
from fixture.orm import ORMFixture
# 5 SPRAWDZAMY ORM dla contacts
# 6 SPRAWDZAMY ORM dla contacts in group
from model.group import Group
# 7 SPRAWDZAMY ORM dla get_contacts_not_in_group
# connection = pymysql.connect(host="127.0.0.1", database="addressbook", user="root", password="")
# 2 connection = mysql.connector.connect(host="127.0.0.1", database="addressbook", user="root", password="")
# 3 db = DbFixture(host="127.0.0.1", database="addressbook", user="root", password="")
db = ORMFixture(host="127.0.0.1", database="addressbook", user="root", password="")
try:
# 2 cursor = connection.cursor()
# 2 cursor.execute("select * from group_list")
# 2 for row in cursor.fetchall():
# 2 print(row)
# groups = db.get_group_list()
# for group in groups:
# print(group)
# print(len(groups))
# 3 contacts = db.get_contact_list()
# 3 for contact in contacts:
# 3 print(contact)
# 3 print(len(contacts))
# 4 l = db.get_group_list()
# 4 for item in l:
# 4 print(item)
# 4 print(len(l))
# 5 l = db.get_contact_list()
# 5 for item in l:
# 5 print(item)
# 5 print(len(l))
# 6 l = db.get_contacts_in_group(Group(id="136"))
# 6 for item in l:
# 6 print(item)
# 6 print(len(l))
l = db.get_contacts_not_in_group(Group(id="136"))
for item in l:
print(item)
print(len(l))
finally:
# 2 connection.close()
pass # db.destroy()
|
StarcoderdataPython
|
3326952
|
<reponame>khchine5/lino
"""
Deserves documentation.
"""
#~ import lino.changes
#~ from lino.utils import gendoc
#~ print [unicode(e) for e in gendoc.ENTRIES_LIST]
|
StarcoderdataPython
|
180312
|
<gh_stars>1-10
# Copyright 2021 The NPLinker Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
from numba import jit
class MSSpectrum(object):
def __init__(self, mgf_dict=None, spec=None):
self.compound = None
self.formula = None
self.ionisation = None
self.raw_parentmass = None
self.filename = None
self.id = None
self.raw_spectrum = None
self.output_spectrum = None
# self.filter is a function that can be used
# to filter the raw spectrum, e.g. denoising,
# removal of adduct, etc.
self.filter = None
self.normalise = False
self.correct_for_ionisation = False
if mgf_dict is not None:
self.init_from_mgf(mgf_dict)
if spec is not None:
self.init_from_spec(spec)
def init_from_spec(self, spec):
self.id = spec.id
self.raw_parentmass = spec.parent_mz
self.raw_spectrum = numpy.array(spec.peaks)
# TODO this is a temporary default for the Crusemann data
# should check for it in the mgf in metabolomics.py and store
# in the Spectrum object if found
# (TODO what is the MGF field name for this??)
self.ionisation = '[M+H]+'
def init_from_mgf(self, mgf_dict):
self.output_spectrum = None
self.filename = mgf_dict['params']['filename']
self.compound = None
self.formula = None
self.ionisation = mgf_dict['params']['charge']
self.raw_parentmass = mgf_dict['params']['pepmass'][0]
self.inchi = None
self.id = None
if 'smiles' in mgf_dict['params']:
self.smiles = mgf_dict['params']['smiles']
spec = []
for a in zip(mgf_dict['m/z array'], mgf_dict['intensity array']):
spec.append(a)
self.raw_spectrum = numpy.array(spec)
def load(self, filename):
self.output_spectrum = None
self.filename = filename
spectrum = []
with open(filename, 'r') as f:
for line in f.readlines():
line = line.strip()
if len(line) is 0:
pass
elif line.startswith('>compound'):
self.compound = strip_leading(line)
elif line.startswith('>formula'):
self.formula = strip_leading(line)
elif line.startswith('>ionization'):
self.ionisation = strip_leading(line)
elif line.startswith('>parentmass'):
self.raw_parentmass = float(strip_leading(line))
elif line.startswith('>'):
pass
elif line.startswith('#inchi'):
self.inchi = strip_leading(line)
elif line.startswith('#SpectrumID'):
self.id = strip_leading(line)
elif line.startswith('#'):
pass
else:
mass, charge = line.split()
mass = float(mass)
charge = float(charge)
spectrum.append((mass, charge))
self.raw_spectrum = numpy.array(spectrum)
@property
def parentmass(self):
if self.correct_for_ionisation:
return self.raw_parentmass - self.ionisation_mass
else:
return self.raw_parentmass
@property
def spectrum(self):
if self.normalise:
return _normalise_spectrum(self.unnormalised_spectrum)
else:
return self.unnormalised_spectrum
@property
def unnormalised_spectrum(self):
if self.filter is None:
if self.correct_for_ionisation:
return self.shifted_spectrum
else:
return self.raw_spectrum
else:
if self.output_spectrum is None:
filtered_spectrum = self.filter(self)
if len(filtered_spectrum) is not 0:
self.output_spectrum = self.filter(self)
else:
self.output_spectrum = self.shifted_spectrum
return self.output_spectrum
@property
def shifted_spectrum(self):
return self.raw_spectrum - [self.ionisation_mass, 0]
@property
def ionisation_mass(self):
return IONISATION_MASSES[self.ionisation]
PROTON_MASS = 1.00727645199076
IONISATION_MASSES = {
"[M+H]+": PROTON_MASS,
"[M+H-H2O]+": PROTON_MASS - 18.01056468638,
"[M+K]+": 38.963158,
"[M+Na]+": 22.989218
}
def _normalise_spectrum(spectrum, peak=100.0):
_, max_peak = numpy.max(spectrum, axis=0)
return spectrum * [1, peak / max_peak]
def strip_leading(line):
return ' '.join(line.split()[1:])
def _ppk(i_peaks, j_peaks, sm, si):
X1 = i_peaks
X2 = j_peaks
# N1 = numpy.size(X1, 0); N2 = numpy.size(X2, 0)
N1 = X1.shape[0]
N2 = X2.shape[0]
if N1 == 0 or N2 == 0:
raise Exception("[ERROR]:No peaks when computing the kernel.(try not clean the peaks)")
constant = 1.0/(N1*N2)*0.25/(numpy.pi*numpy.sqrt(sm*si))
mass_term = 1.0/sm * numpy.power(numpy.kron(X1[:, 0].flatten(), numpy.ones(N2)) - numpy.kron(numpy.ones(N1), X2[:, 0].flatten()), 2)
inte_term = 1.0/si * numpy.power(numpy.kron(X1[:, 1].flatten(), numpy.ones(N2)) - numpy.kron(numpy.ones(N1), X2[:, 1].flatten()), 2)
return constant*numpy.sum(numpy.exp(-0.25*(mass_term + inte_term)))
def ppk(*args):
# t0 = time.time()
# a = ppk_loop(*args)
# = _ppk(*args)
# t1 = time.time()
b = ppk_limit(*args)
# t2 = time.time()
# print(t1-t0, t2-t1)
return b
@jit(nopython=True)
def ppk_loop(spectrum_1, spectrum_2, sigma_mass, sigma_int):
# the inputs are really sigma^2, though
# sigma_mass = 0.00001
# sigma_int = 100000
sigma_array = numpy.array([[sigma_mass, 0], [0, sigma_int]])
sigma_inv = numpy.linalg.inv(sigma_array)
len_1 = spectrum_1.shape[0]
len_2 = spectrum_2.shape[0]
constant_term = 1.0 / (len_1 * len_2 * 4 * numpy.pi * numpy.sqrt(sigma_mass * sigma_int))
sum_term = 0
# for p_1, p_2 in itertools.product(spectrum_1, spectrum_2):
for p_1_idx in range(len_1):
p_1 = spectrum_1[p_1_idx, :]
for p_2_idx in range(len_2):
p_2 = spectrum_2[p_2_idx, :]
d = p_1 - p_2
sum_term += numpy.exp(-0.25 * numpy.sum(d * sigma_inv * d))
# print(sum_term)
# print(numpy.sum(sum_term))
return constant_term * sum_term
@jit(nopython=True)
def ppk_limit(spectrum_1, spectrum_2, sigma_mass, sigma_int):
# the inputs are really sigma^2, though
# sigma_mass = 0.00001
# sigma_int = 100000
sigma_array = numpy.array([[sigma_mass, 0], [0, sigma_int]])
sigma_inv = numpy.linalg.inv(sigma_array)
len_1 = spectrum_1.shape[0]
len_2 = spectrum_2.shape[0]
constant_term = 1.0 / (len_1 * len_2 * 4 * numpy.pi * numpy.sqrt(sigma_mass * sigma_int))
sum_term = 0
tol = 5 * numpy.sqrt(sigma_mass)
for p_1_idx, p_2_idx in find_pairs(spectrum_1, spectrum_2, tol):
p_1 = spectrum_1[p_1_idx, :]
p_2 = spectrum_2[p_2_idx, :]
d = p_1 - p_2
sum_term += numpy.exp(-0.25 * numpy.sum(d * sigma_inv * d))
# print(sum_term)
# print(numpy.sum(sum_term))
# print(constant_term, sum_term)
return constant_term * sum_term
@jit(nopython=True)
def find_pairs(spec1, spec2, tol, shift=0):
matching_pairs = []
spec2_lowpos = 0
spec2_length = len(spec2)
for idx in range(len(spec1)):
mz, intensity = spec1[idx, :]
while spec2_lowpos < spec2_length and spec2[spec2_lowpos][0] + shift < mz - tol:
spec2_lowpos += 1
if spec2_lowpos == spec2_length:
break
spec2_pos = spec2_lowpos
while spec2_pos < spec2_length and spec2[spec2_pos][0] + shift < mz + tol:
matching_pairs.append((idx, spec2_pos))
spec2_pos += 1
return matching_pairs
def ppk_nloss(spec1, spec2, prec1, prec2, sigma_mass, sigma_int):
spec1_loss = ([prec1, 0] - spec1) * [1, -1]
spec2_loss = ([prec2, 0] - spec2) * [1, -1]
k_nloss = ppk(spec1_loss[::-1], spec2_loss[::-1], sigma_mass, sigma_int)
return k_nloss
|
StarcoderdataPython
|
1795333
|
from query_filter_builder import v0_to_v01
def test_v0_to_v1():
v0_object = {
"col1": "asd",
"col2": "~asd",
"col3": [1, 2, 3],
"col4": "<5>=3.2"
}
correct_v1_object = {
"version": 0.1,
"filters": [
{
"col": "col1",
"value": "asd"
},
{
"col": "col2",
"value": "~asd",
},
{
"col": "col3",
"value": [1, 2, 3]
},
{
"col": "col4",
"value": "<5>=3.2"
}
]
}
test_v1_object = v0_to_v01(v0_object)
assert test_v1_object == correct_v1_object
|
StarcoderdataPython
|
14148
|
<filename>rnacentral_pipeline/rnacentral/r2dt/should_show.py
# -*- coding: utf-8 -*-
"""
Copyright [2009-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import csv
import enum
import logging
import typing as ty
from pathlib import Path
import joblib
from more_itertools import chunked
import pandas as pd
from pypika import Table, Query
import psycopg2
import psycopg2.extras
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
LOGGER = logging.getLogger(__name__)
SOURCE_MAP = {
"crw": 0,
"ribovision": 1,
"gtrnadb": 2,
"rnase_p": 3,
"rfam": 4,
}
@enum.unique
class Attributes(enum.Enum):
SourceIndex = "source_index"
SequenceLength = "sequence_length"
DiagramSequenceLength = "diagram_sequence_length"
ModelLength = "model_length"
ModelBasepairCount = "model_basepair_count"
DiagramBps = "diagram_bps"
DiagramModelLength = "diagram_model_length"
DiagramOverlapCount = "diagram_overlap_count"
@classmethod
def model_columns(cls) -> ty.List[str]:
return [attr.column_name() for attr in cls]
def column_name(self) -> str:
return self.value
MODEL_COLUMNS: ty.List[str] = Attributes.model_columns()
def chunked_query(
ids: ty.Iterable[str], query_builder, db_url: str, chunk_size=100
) -> ty.Iterable[ty.Dict[str, ty.Any]]:
conn = psycopg2.connect(db_url)
for chunk in chunked(ids, chunk_size):
sql = str(query_builder(chunk))
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as cur:
cur.execute(sql)
for result in cur:
yield dict(result)
def fetch_modeled_data(
all_ids: ty.Iterable[str], db_url: str, chunk_size=100
) -> ty.Iterable[ty.Dict[str, ty.Any]]:
rna = Table("rna")
ss = Table("rnc_secondary_structure_layout")
sm = Table("rnc_secondary_structure_layout_models")
def build_query(ids):
return (
Query.from_(rna)
.select(
rna.upi.as_("urs"),
rna.len.as_("sequence_length"),
sm.model_source,
ss.sequence_start.as_("diagram_sequence_start"),
ss.sequence_stop.as_("diagram_sequence_stop"),
ss.basepair_count.as_("diagram_bps"),
ss.model_start.as_("diagram_model_start"),
ss.model_stop.as_("diagram_model_stop"),
sm.model_length,
sm.model_basepair_count,
ss.overlap_count.as_("diagram_overlap_count"),
)
.join(ss)
.on(ss.urs == rna.upi)
.join(sm)
.on(sm.id == ss.model_id)
.where(ss.urs.isin(ids))
)
seen: ty.Set[str] = set()
results = chunked_query(all_ids, build_query, db_url, chunk_size=chunk_size)
for result in results:
if any(v is None for v in result.values()):
continue
yield result
seen.add(result["urs"])
for urs in all_ids:
if urs not in seen:
LOGGER.warn("Missed loading %s", urs)
def infer_columns(frame: pd.DataFrame):
frame["diagram_sequence_length"] = (
frame["diagram_sequence_stop"] - frame["diagram_sequence_start"]
)
frame["diagram_model_length"] = (
frame["diagram_model_stop"] - frame["diagram_model_start"]
)
frame["source_index"] = frame.model_source.map(SOURCE_MAP)
if frame["source_index"].isnull().any():
raise ValueError("Could not build source_index for all training data")
def fetch_training_data(handle: ty.IO, db_url: str) -> pd.DataFrame:
ids = []
training = {}
for (urs, flag) in csv.reader(handle):
ids.append(urs)
if flag == "1":
training[urs] = True
elif flag == "0":
training[urs] = False
else:
raise ValueError(f"Unknown flag {flag}")
filled = []
for metadata in fetch_modeled_data(ids, db_url):
urs = metadata["urs"]
if urs not in training:
raise ValueError(f"Got an extra entry, somehow {metadata}")
metadata["valid"] = training[urs]
filled.append(metadata)
training = pd.DataFrame.from_records(filled)
infer_columns(training)
return training
def train(handle, db_url, cross_validation=5, test_size=0.4) -> RandomForestClassifier:
data = fetch_training_data(handle, db_url)
X_train, X_test, y_train, y_test = train_test_split(
data[MODEL_COLUMNS].to_numpy(), data["valid"].to_numpy(), test_size=test_size
)
clf = RandomForestClassifier(min_samples_split=5)
scores = cross_val_score(clf, X_train, y_train, cv=cross_validation)
LOGGER.info("%s fold cross validation scores: %s", cross_validation, scores)
clf.fit(X_train, y_train)
LOGGER.info("Test data (%f) scoring %s", test_size, clf.score(X_test, y_test))
return clf
def from_result(clf, result) -> bool:
predictable = {}
for attribute in Attributes:
value = attribute.r2dt_result_value(result)
predictable[attribute.column_name()] = [value]
predictable = pd.DataFrame.from_records(predictable)
return clf.predict(predictable)[0]
def write(model_path: Path, handle: ty.IO, db_url: str, output: ty.IO):
model = joblib.load(model_path)
ids = [r[0] for r in csv.reader(handle)]
modeled = fetch_modeled_data(ids, db_url)
frame = pd.DataFrame.from_records(modeled)
infer_columns(frame)
predicted = model.predict(frame[MODEL_COLUMNS].to_numpy())
to_write = pd.DataFrame()
to_write["urs"] = frame["urs"]
to_write["should_show"] = predicted.astype(int)
to_write.to_csv(output, index=False)
def write_model(handle: ty.IO, db_url: str, output: Path):
joblib.dump(train(handle, db_url), output)
def write_training_data(handle: ty.IO, db_url: str, output: ty.IO):
ids = []
for row in csv.reader(handle):
ids.append(row[0])
modeled = list(fetch_modeled_data(ids, db_url))
writer = csv.DictWriter(output, fieldnames=modeled[0].keys())
writer.writeheader()
writer.writerows(modeled)
def convert_sheet(handle: ty.IO, output: ty.IO):
converted = []
for row in csv.DictReader(handle):
urs = row["urs"]
raw_should_show = row["Labeled Should show"]
if not raw_should_show:
LOGGER.info("No value for %s", urs)
should_show = None
raw_should_show = raw_should_show.lower()
if raw_should_show == "true":
should_show = "1"
elif raw_should_show == "false":
should_show = "0"
else:
LOGGER.warn("Unknown should show in %s", row)
continue
converted.append((urs, should_show))
converted.sort(key=lambda r: r[0])
writer = csv.writer(output)
writer.writerows(converted)
def inspect_data(data, db_url: str) -> ty.Iterable[ty.Dict[str, ty.Any]]:
def build_query(ids):
ss = Table("rnc_secondary_structure_layout")
sm = Table("rnc_secondary_structure_layout_models")
pre = Table("rnc_rna_precomputed")
return (
Query.from_(ss)
.join(sm)
.on(sm.id == ss.model_id)
.join(pre)
.on(pre.urs == sm.urs)
.select(
sm.model_source,
sm.model_name,
sm.model_so_term,
)
.where(ss.urs.isin(ids))
.where(pre.taxid.isnotnull)
)
mapping = {d[0]: d for d in data}
seen: ty.Set[str] = set()
results = chunked_query(data, build_query, db_url)
for result in results:
if any(v is None for v in result.values()):
continue
yield {
"urs": result["urs"],
"link": f"https://rnacentral.org/rna/{result['urs']}",
"model_source": result["model_source"],
"model_name": result["model_name"],
"model_so_term": result["model_so_term"],
"Labeled Should show": result["urs"],
}
seen.add(result["urs"])
for urs in mapping.keys():
if urs not in seen:
LOGGER.warn("Missed loading %s", urs)
def write_inspect_data(handle: ty.IO, db_url: str, output: ty.IO):
data = list(csv.reader(handle))
inspect = list(inspect_data(data, db_url))
writer = csv.DictWriter(output, fieldnames=inspect[0].keys())
writer.writeheader()
writer.writerows(inspect)
|
StarcoderdataPython
|
1605325
|
#coding=utf-8
import requests
import argparse
parser = argparse.ArgumentParser(description="Zhihu_markdown_eq_converter")
parser.add_argument("--file", help="your md file path", default="rl.md")
args = parser.parse_args()
with open(args.file, 'r', encoding='utf-8') as f:
data = f.readlines()
data_str = ' '.join(data)
# $$..$$
data_str = data_str.replace("$$", "====")
# $..$
def parse_data(data_str, renderer=0):
index = 0
length = len(data_str)
modifications = {}
count = 0
while index < length:
char = data_str[index]
if char == '$' and data_str[index-1]!='\\':
start_index = index+1
index += 1
while data_str[index]!='$':
index += 1
end_index = index
equation = data_str[start_index:end_index].replace('\n','')
equation = requests.utils.quote(r"{}".format(equation))
href = f"https://www.zhihu.com/equation?tex={equation}"
if renderer==0:
img = f'''<img src="{href}" eeimg="1">'''
else:
img = f'''<p align="center"><img src="{href}" eeimg="1"></p>'''
modifications[count] = {'start_index':start_index, 'end_index':end_index, 'img':img}
count += 1
index += 1
else:
index += 1
sorted(modifications.items(), key=lambda x:x[1]['start_index'], reverse=True)
new_data_str = ''
prev_index = 0
for _, key in enumerate(modifications):
start_index = modifications[key]['start_index']
end_index = modifications[key]['end_index']
img = modifications[key]['img']
new_data_str = new_data_str + data_str[prev_index:start_index-1] + img
prev_index = end_index + 1
new_data_str += data_str[prev_index: ]
return new_data_str
data_str = parse_data(data_str,0)
data_str = data_str.replace('====','$')
data_str = parse_data(data_str,1)
with open('zhihu_'+args.file, 'w', encoding='utf') as f:
f.write(data_str)
|
StarcoderdataPython
|
3332044
|
import time
from datetime import date
# Third-Party
from algoliasearch_django.decorators import disable_auto_indexing
from openpyxl import Workbook
from openpyxl.writer.excel import save_virtual_workbook
from phonenumber_field.validators import validate_international_phonenumber
# Django
from django.apps import apps
from django.db.models import Manager
from django.db.models import Q
from django.db.models import F
from django.db.models import Min
from django.db.models import Max
from django.db.models import When
from django.db.models import Subquery
from django.db.models import OuterRef
from django.db.models import CharField
from django.db.models import IntegerField
from django.db.models import DateField
from django.db.models import Case
from django.core.files.base import ContentFile
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
User = get_user_model()
class PersonManager(Manager):
def update_or_create_from_human(self, human):
# Extract
if isinstance(human, dict):
mc_pk = human['id']
first_name = human['first_name']
middle_name = human['middle_name']
last_name = human['last_name']
nick_name = human['nick_name']
email = human['email']
birth_date = human['birth_date']
home_phone = human['home_phone']
cell_phone = human['cell_phone']
work_phone = human['work_phone']
bhs_id = human['bhs_id']
gender = human['gender']
part = human['part']
mon = human['mon']
is_deceased = human['is_deceased']
is_honorary = human['is_honorary']
is_suspended = human['is_suspended']
is_expelled = human['is_expelled']
else:
mc_pk = str(human.id)
first_name = human.first_name
middle_name = human.middle_name
last_name = human.last_name
nick_name = human.nick_name
email = human.email
birth_date = human.birth_date
home_phone = human.home_phone
cell_phone = human.cell_phone
work_phone = human.work_phone
bhs_id = human.bhs_id
gender = human.gender
part = human.part
mon = human.mon
is_deceased = human.is_deceased
is_honorary = human.is_honorary
is_suspended = human.is_suspended
is_expelled = human.is_expelled
# Transform
inactive = any([
is_deceased,
is_honorary,
is_suspended,
is_expelled,
])
if inactive:
status = self.model.STATUS.inactive
else:
status = self.model.STATUS.active
prefix = first_name.rpartition('Dr.')[1].strip()
first_name = first_name.rpartition('Dr.')[2].strip()
last_name = last_name.partition('II')[0].strip()
suffix = last_name.partition('II')[1].strip()
last_name = last_name.partition('III')[0].strip()
suffix = last_name.partition('III')[1].strip()
last_name = last_name.partition('DDS')[0].strip()
suffix = last_name.partition('DDS')[1].strip()
last_name = last_name.partition('Sr')[0].strip()
suffix = last_name.partition('Sr')[1].strip()
last_name = last_name.partition('Jr')[0].strip()
suffix = last_name.partition('Jr')[1].strip()
last_name = last_name.partition('M.D.')[0].strip()
suffix = last_name.partition('M.D.')[1].strip()
if nick_name == first_name:
nick_name = ""
try:
validate_international_phonenumber(home_phone)
except ValidationError:
home_phone = ""
try:
validate_international_phonenumber(cell_phone)
except ValidationError:
cell_phone = ""
try:
validate_international_phonenumber(work_phone)
except ValidationError:
work_phone = ""
if gender:
gender = getattr(self.model.GENDER, gender, None)
else:
gender = None
if part:
part = getattr(self.model.PART, part, None)
else:
part = None
try:
validate_international_phonenumber(home_phone)
except ValidationError:
home_phone = ""
try:
validate_international_phonenumber(cell_phone)
except ValidationError:
cell_phone = ""
try:
validate_international_phonenumber(work_phone)
except ValidationError:
work_phone = ""
is_deceased = bool(is_deceased)
defaults = {
'status': status,
'prefix': prefix,
'first_name': first_name,
'middle_name': middle_name,
'last_name': last_name,
'suffix': suffix,
'nick_name': nick_name,
'email': email,
'birth_date': birth_date,
'home_phone': home_phone,
'cell_phone': cell_phone,
'work_phone': work_phone,
'bhs_id': bhs_id,
'gender': gender,
'part': part,
'is_deceased': is_deceased,
'mon': mon,
}
# Update or create
person, created = self.update_or_create(
mc_pk=mc_pk,
defaults=defaults,
)
return person, created
def delete_orphans(self, humans):
# Delete Orphans
orphans = self.filter(
mc_pk__isnull=False,
).exclude(
mc_pk__in=humans,
)
t = orphans.count()
orphans.delete()
return t
def export_orphans(self, cursor=None):
ps = self.filter(
email__isnull=True,
user__isnull=False,
)
if cursor:
ps = ps.filter(
modified__gte=cursor,
)
return ps
def export_adoptions(self, cursor=None):
ps = self.filter(
email__isnull=False,
user__isnull=True,
)
if cursor:
ps = ps.filter(
modified__gte=cursor,
)
return ps
class GroupManager(Manager):
def update_or_create_from_structure(self, structure):
# Extract
if isinstance(structure, dict):
mc_pk = structure['id']
name = structure['name']
kind = structure['kind']
gender = structure['gender']
division = structure['division']
bhs_id = structure['bhs_id']
legacy_code = structure['chapter_code']
website = structure['website']
email = structure['email']
main_phone = structure['phone']
fax_phone = structure['fax']
facebook = structure['facebook']
twitter = structure['twitter']
youtube = structure['youtube']
pinterest = structure['pinterest']
flickr = structure['flickr']
instagram = structure['instagram']
soundcloud = structure['soundcloud']
preferred_name = structure['preferred_name']
visitor_information = structure['visitor_information']
established_date = structure['established_date']
status_id = structure['status_id']
parent_pk = structure['parent_id']
else:
mc_pk = str(structure.id)
name = structure.name
kind = structure.kind
gender = structure.gender
division = structure.division
bhs_id = structure.bhs_id
legacy_code = structure.chapter_code
website = structure.website
email = structure.email
main_phone = structure.phone
fax_phone = structure.fax
facebook = structure.facebook
twitter = structure.twitter
youtube = structure.youtube
pinterest = structure.pinterest
flickr = structure.flickr
instagram = structure.instagram
soundcloud = structure.soundcloud
preferred_name = structure.preferred_name
visitor_information = structure.visitor_information
established_date = structure.established_date
status_id = structure.status_id
parent_pk = structure.parent_id
# Transform
status_map = {
'64ad817f-f3c6-4b09-a1b0-4bd569b15d03': self.model.STATUS.inactive, # revoked
'd9e3e257-9eca-4cbf-959f-149cca968349': self.model.STATUS.inactive, # suspended
'6e3c5cc6-0734-4edf-8f51-40d3a865a94f': self.model.STATUS.inactive, # merged
'bd4721e7-addd-4854-9888-8a705725f748': self.model.STATUS.inactive, # closed
'e04744e6-b743-4247-92c2-2950855b3a93': self.model.STATUS.inactive, # expired
'55a97973-02c3-414a-bbef-22181ad46e85': self.model.STATUS.active, # pending
'bb1ee6f6-a2c5-4615-b6ad-76130c37b1e6': self.model.STATUS.active, # pending voluntary
'd7102af8-013a-40e7-bc85-0b00766ed124': self.model.STATUS.active, # awaiting
'f3facc00-1990-4c68-9052-39e066906a38': self.model.STATUS.active, # prospective
'4bfee76f-3110-4c32-bade-e5044fdd5fa2': self.model.STATUS.active, # licensed
'7b9e5e34-a7c5-4f1e-9fc5-656caa74b3c7': self.model.STATUS.active, # active
}
status = status_map.get(status_id, None)
# Re-construct dangling article
name = name.strip() if name else ""
parsed = name.partition(", The")
name = "The {0}".format(parsed[0]) if parsed[1] else parsed[0]
preferred_name = "{0} (NAME APPROVAL PENDING)".format(preferred_name.strip()) if preferred_name else ''
name = name if name else preferred_name
if not name:
name = "(UNKNOWN)"
# AIC
aic_map = {
503061: "Signature",
500983: "After Hours",
501972: "Main Street",
501329: "Forefront",
500922: "Instant Classic",
304772: "Musical Island Boys",
500000: "Masterpiece",
501150: "Ringmasters",
317293: "Old School",
286100: "Storm Front",
500035: "Crossroads",
297201: "OC Times",
299233: "Max Q",
302244: "Vocal Spectrum",
299608: "Realtime",
6158: "Gotcha!",
2496: "Power Play",
276016: "Four Voices",
5619: "Michigan Jake",
6738: "Platinum",
3525: "FRED",
5721: "Revival",
2079: "Yesteryear",
2163: "Nightlife",
4745: "Marquis",
3040: "Joker's Wild",
1259: "Gas House Gang",
2850: "Keepsake",
1623: "The Ritz",
3165: "Acoustix",
1686: "Second Edition",
492: "Chiefs of Staff",
1596: "Interstate Rivals",
1654: "Rural Route 4",
406: "The New Tradition",
1411: "Rapscallions",
1727: "Side Street Ramblers",
545: "Classic Collection",
490: "Chicago News",
329: "Boston Common",
4034: "Grandma's Boys",
318: "Bluegrass Student Union",
362: "Most Happy Fellows",
1590: "Innsiders",
1440: "Happiness Emporium",
1427: "Regents",
627: "Dealer's Choice",
1288: "Golden Staters",
1275: "Gentlemen's Agreement",
709: "Oriole Four",
711: "Mark IV",
2047: "Western Continentals",
1110: "Four Statesmen",
713: "Auto Towners",
715: "Four Renegades",
1729: "Sidewinders",
718: "Town and Country 4",
719: "Gala Lads",
1871: "The Suntones",
722: "Evans Quartet",
724: "Four Pitchikers",
726: "Gaynotes",
729: "Lads of Enchantment",
731: "Confederates",
732: "Four Hearsemen",
736: "The Orphans",
739: "Vikings",
743: "Four Teens",
746: "Schmitt Brothers",
748: "Buffalo Bills",
750: "Mid-States Four",
753: "Pittsburghers",
756: "Doctors of Harmony",
759: "Garden State Quartet",
761: "Misfits",
764: "Harmony Halls",
766: "Four Harmonizers",
770: "Elastic Four",
773: "Chord Busters",
775: "Flat Foot Four",
776: "Bartlsesville Barflies",
}
# Overwrite status for AIC
if bhs_id in aic_map:
status = -5
# Overwrite name for AIC
name = aic_map.get(bhs_id, name)
kind_map = {
'quartet': self.model.KIND.quartet,
'chorus': self.model.KIND.chorus,
'chapter': self.model.KIND.chapter,
'group': self.model.KIND.noncomp,
'district': self.model.KIND.district,
'organization': self.model.KIND.international,
}
kind = kind_map.get(kind, None)
legacy_code = legacy_code if legacy_code else ""
gender_map = {
'men': self.model.GENDER.male,
'women': self.model.GENDER.female,
'mixed': self.model.GENDER.mixed,
}
gender = gender_map.get(gender, self.model.GENDER.male)
division_map = {
'EVG Division I': self.model.DIVISION.evgd1,
'EVG Division II': self.model.DIVISION.evgd2,
'EVG Division III': self.model.DIVISION.evgd3,
'EVG Division IV': self.model.DIVISION.evgd4,
'EVG Division V': self.model.DIVISION.evgd5,
'FWD Arizona': self.model.DIVISION.fwdaz,
'FWD Northeast': self.model.DIVISION.fwdne,
'FWD Northwest': self.model.DIVISION.fwdnw,
'FWD Southeast': self.model.DIVISION.fwdse,
'FWD Southwest': self.model.DIVISION.fwdsw,
'LOL 10000 Lakes': self.model.DIVISION.lol10l,
'LOL Division One': self.model.DIVISION.lolone,
'LOL Northern Plains': self.model.DIVISION.lolnp,
'LOL Packerland': self.model.DIVISION.lolpkr,
'LOL Southwest': self.model.DIVISION.lolsw,
'MAD Central': self.model.DIVISION.madcen,
'MAD Northern': self.model.DIVISION.madnth,
'MAD Southern': self.model.DIVISION.madsth,
'NED Granite and Pine': self.model.DIVISION.nedgp,
'NED Mountain': self.model.DIVISION.nedmtn,
'NED Patriot': self.model.DIVISION.nedpat,
'NED Sunrise': self.model.DIVISION.nedsun,
'NED Yankee': self.model.DIVISION.nedyke,
'SWD Northeast': self.model.DIVISION.swdne,
'SWD Northwest': self.model.DIVISION.swdnw,
'SWD Southeast': self.model.DIVISION.swdse,
'SWD Southwest': self.model.DIVISION.swdsw,
}
division = division_map.get(division, None)
visitor_information = visitor_information.strip() if visitor_information else ''
if parent_pk:
parent = self.get(
mc_pk=parent_pk,
)
else:
parent = None
if parent:
if parent.kind == 'organization':
representing_raw = legacy_code
elif parent.kind == 'district':
representing_raw = parent.legacy_code
elif parent.kind == 'chapter':
representing_raw = parent.parent.legacy_code
else:
representing_raw = None
elif kind == 'organization':
representing_raw = 'BHS'
else:
representing_raw = None
representing_map = {
'BHS': self.model.REPRESENTING.bhs,
'CAR': self.model.REPRESENTING.car,
'CSD': self.model.REPRESENTING.csd,
'DIX': self.model.REPRESENTING.dix,
'EVG': self.model.REPRESENTING.evg,
'FWD': self.model.REPRESENTING.fwd,
'ILL': self.model.REPRESENTING.ill,
'JAD': self.model.REPRESENTING.jad,
'LOL': self.model.REPRESENTING.lol,
'MAD': self.model.REPRESENTING.mad,
'NED': self.model.REPRESENTING.ned,
'NSC': self.model.REPRESENTING.nsc,
'ONT': self.model.REPRESENTING.ont,
'PIO': self.model.REPRESENTING.pio,
'RMD': self.model.REPRESENTING.rmd,
'SLD': self.model.REPRESENTING.sld,
'SUN': self.model.REPRESENTING.sun,
'SWD': self.model.REPRESENTING.swd,
}
representing = representing_map.get(representing_raw, None)
defaults = {
'status': status,
'name': name,
'kind': kind,
'gender': gender,
'representing': representing,
'division': division,
'bhs_id': bhs_id,
'code': legacy_code,
'website': website,
'email': email,
'phone': main_phone,
'fax_phone': fax_phone,
'facebook': facebook,
'twitter': twitter,
'youtube': youtube,
'pinterest': pinterest,
'flickr': flickr,
'instagram': instagram,
'soundcloud': soundcloud,
'visitor_information': visitor_information,
'start_date': established_date,
'parent': parent,
}
# Load
group, created = self.update_or_create(
mc_pk=mc_pk,
defaults=defaults,
)
return group, created
def delete_orphans(self, structures):
# Delete Orphans
orphans = self.filter(
mc_pk__isnull=False,
).exclude(
mc_pk__in=structures,
)
t = orphans.count()
orphans.delete()
return t
def sort_tree(self):
self.all().update(tree_sort=None)
root = self.get(kind=self.model.KIND.international)
i = 1
root.tree_sort = i
with disable_auto_indexing(model=self.model):
root.save()
for child in root.children.order_by('kind', 'code', 'name'):
i += 1
child.tree_sort = i
with disable_auto_indexing(model=self.model):
child.save()
orgs = self.filter(
kind__in=[
self.model.KIND.chapter,
self.model.KIND.chorus,
self.model.KIND.quartet,
]
).order_by(
'kind',
'name',
)
for org in orgs:
i += 1
org.tree_sort = i
with disable_auto_indexing(model=self.model):
org.save()
return
def denormalize(self, cursor=None):
groups = self.filter(status=self.model.STATUS.active)
if cursor:
groups = groups.filter(
modified__gte=cursor,
)
for group in groups:
group.denormalize()
with disable_auto_indexing(model=self.model):
group.save()
return
def update_seniors(self):
quartets = self.filter(
kind=self.model.KIND.quartet,
status__gt=0,
mc_pk__isnull=False,
)
for quartet in quartets:
prior = quartet.is_senior
is_senior = quartet.get_is_senior()
if prior != is_senior:
quartet.is_senior = is_senior
with disable_auto_indexing(model=self.model):
quartet.save()
return
def get_quartets(self):
wb = Workbook()
ws = wb.active
fieldnames = [
'PK',
'Name',
'Kind',
'Organization',
'District',
'Chapter(s)',
'Senior?',
'BHS ID',
'Code',
'Status',
]
ws.append(fieldnames)
groups = self.filter(
status=self.model.STATUS.active,
kind=self.model.KIND.quartet,
).order_by('name')
for group in groups:
pk = str(group.pk)
name = group.name
kind = group.get_kind_display()
organization = "FIX"
district = group.district
chapters = group.chapters
is_senior = group.is_senior
is_youth = group.is_youth
bhs_id = group.bhs_id
code = group.code
status = group.get_status_display()
row = [
pk,
name,
kind,
organization,
district,
chapters,
is_senior,
is_youth,
bhs_id,
code,
status,
]
ws.append(row)
file = save_virtual_workbook(wb)
content = ContentFile(file)
return content
class AwardManager(Manager):
def sort_tree(self):
self.all().update(tree_sort=None)
awards = self.order_by(
'-status', # Actives first
'district', # Basic BHS Hierarchy
'-kind', # Quartet, Chorus
'gender', #Male, mixed
F('age').asc(nulls_first=True), # Null, Senior, Youth
'level', #Championship, qualifier
'is_novice',
'name', # alpha
)
i = 0
for award in awards:
i += 1
award.tree_sort = i
award.save()
return
def get_awards(self):
wb = Workbook()
ws = wb.active
fieldnames = [
'ID',
'District',
'Division',
'Name',
'Kind',
'Gender',
'Season',
'Level',
'Single',
'Spots',
'Threshold',
'Minimum',
'Advance',
]
ws.append(fieldnames)
awards = self.select_related(
# 'group',
).filter(
status__gt=0,
).order_by('tree_sort')
for award in awards:
pk = str(award.id)
district = award.get_district_display()
division = award.get_division_display()
name = award.name
kind = award.get_kind_display()
gender = award.get_gender_display()
season = award.get_season_display()
level = award.get_level_display()
single = award.is_single
spots = award.spots
threshold = award.threshold
minimum = award.minimum
advance = award.advance
row = [
pk,
district,
division,
name,
kind,
gender,
season,
level,
single,
spots,
threshold,
minimum,
advance,
]
ws.append(row)
file = save_virtual_workbook(wb)
content = ContentFile(file)
return content
class ChartManager(Manager):
def get_report(self):
wb = Workbook()
ws = wb.active
fieldnames = [
'PK',
'Title',
'Arrangers',
'Composers',
'Lyricists',
'Holders',
'Status',
]
ws.append(fieldnames)
charts = self.order_by('title', 'arrangers')
for chart in charts:
pk = str(chart.pk)
title = chart.title
arrangers = chart.arrangers
composers = chart.composers
lyricists = chart.lyricists
holders = chart.holders
status = chart.get_status_display()
row = [
pk,
title,
arrangers,
composers,
lyricists,
holders,
status,
]
ws.append(row)
file = save_virtual_workbook(wb)
content = ContentFile(file)
return content
|
StarcoderdataPython
|
3301400
|
import numpy as np
from matplotlib.axes import Axes
def align_yaxis(ax1: Axes, ax2: Axes) -> None:
"""Align zeros of the two axes, zooming them out by same ratio."""
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:, 1] / (extrema[:, 1] - extrema[:, 0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0, 1] = extrema[0, 0] + tot_span * (extrema[0, 1] - extrema[0, 0])
extrema[1, 0] = extrema[1, 1] + tot_span * (extrema[1, 0] - extrema[1, 1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
|
StarcoderdataPython
|
1705346
|
import logging
from pytorch_pretrained_bert import BertTokenizer
logger = logging.getLogger(__name__)
def get_tokenizer(tokenizer_name):
logger.info(f"Loading Tokenizer {tokenizer_name}")
if tokenizer_name.startswith("bert"):
do_lower_case = "uncased" in tokenizer_name
tokenizer = BertTokenizer.from_pretrained(
tokenizer_name, do_lower_case=do_lower_case
)
return tokenizer
|
StarcoderdataPython
|
180945
|
<gh_stars>0
"""
Provides models and utilities for displaying different types of Twitter feeds.
"""
from mezzanine import __version__
|
StarcoderdataPython
|
1696758
|
<gh_stars>0
from django.db import models
# Create your models here.
class Program(models.Model):
key = models.IntegerField(primary_key=True)
class KeywordManager(models.Manager):
def keyword_array(self):
return self.all().distinct().values_list("text", flat=True)
class Keyword(models.Model):
program = models.ForeignKey(Program)
text = models.CharField(max_length=255)
relevancy = models.FloatField()
objects = KeywordManager()
def __unicode__(self):
return self.text
class Meta:
ordering = ('text', )
|
StarcoderdataPython
|
3303573
|
<filename>meine_stadt_transparent/settings/__init__.py
import json
import logging
import os
import subprocess
import warnings
from importlib.util import find_spec
from logging import Filter, LogRecord
from subprocess import CalledProcessError
from typing import Dict, Union, Optional
from pathlib import Path
import sentry_sdk
from sentry_sdk import configure_scope
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import ignore_logger
from meine_stadt_transparent.settings.env import env, TESTING
from meine_stadt_transparent.settings.nested import (
INSTALLED_APPS,
MIDDLEWARE,
Q_CLUSTER,
)
from meine_stadt_transparent.settings.env import * # noqa F403
from meine_stadt_transparent.settings.nested import * # noqa F403
from meine_stadt_transparent.settings.security import * # noqa F403
# Mute irrelevant warnings
warnings.filterwarnings("ignore", message="`django-leaflet` is not available.")
# This comes from PGPy with enigmail keys
warnings.filterwarnings(
"ignore", message=".*does not have the required usage flag EncryptStorage.*"
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
REAL_HOST = env.str("REAL_HOST")
PRODUCT_NAME = env.str("PRODUCT_NAME", "Meine Stadt Transparent")
SITE_NAME = env.str("SITE_NAME", PRODUCT_NAME)
ABSOLUTE_URI_BASE = env.str("ABSOLUTE_URI_BASE", "https://" + REAL_HOST)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
ALLOWED_HOSTS = [REAL_HOST, "127.0.0.1", "localhost"]
ROOT_URLCONF = "meine_stadt_transparent.urls"
WSGI_APPLICATION = "meine_stadt_transparent.wsgi.application"
# forcing request.build_absolute_uri to return https
# os.environ["HTTPS"] = "on"
MAIL_PROVIDER = env.str("MAIL_PROVIDER", "smtp").lower()
if MAIL_PROVIDER != "smtp":
ANYMAIL = json.loads(env.str("ANYMAIL"))
# TODO: Validation of MAIL_PROVIDER
EMAIL_BACKEND = f"anymail.backends.{MAIL_PROVIDER}.EmailBackend"
elif "EMAIL_URL" in env:
# If EMAIL_URL is not configured, django's SMTP defaults will be used
EMAIL_CONFIG = env.email_url("EMAIL_URL")
vars().update(EMAIL_CONFIG)
EMAIL_FROM = env.str("EMAIL_FROM", "info@" + REAL_HOST)
EMAIL_FROM_NAME = env.str("EMAIL_FROM_NAME", SITE_NAME)
# required for django-allauth. See https://github.com/pennersr/django-allauth/blob/0.41.0/allauth/account/adapter.py#L95
DEFAULT_FROM_EMAIL = f"{EMAIL_FROM_NAME} <{EMAIL_FROM}>"
# Encrypted email are currently plaintext only (html is just rendered as plaintext in thunderbird),
# which is why this feature is disabled by default
ENABLE_PGP = env.bool("ENABLE_PGP", False)
# The pgp keyservevr, following the sks protocol
SKS_KEYSERVER = env.str("SKS_KEYSERVER", "gpg.mozilla.org")
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {"default": env.db()}
# https://stackoverflow.com/a/45233653/3549270
SILENCED_SYSTEM_CHECKS = ["mysql.E001"]
# https://docs.djangoproject.com/en/3.2/releases/3.2/#customizing-type-of-auto-created-primary-keys
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = env.str("LANGUAGE_CODE", "de-de")
TIME_ZONE = env.str("TIME_ZONE", "Europe/Berlin")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Authentication
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_REQUIRED = True
LOGIN_REDIRECT_URL = "/profile/"
ACCOUNT_DEFAULT_HTTP_PROTOCOL = "https"
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_ADAPTER = "mainapp.account_adapter.AccountAdapter"
SOCIALACCOUNT_EMAIL_VERIFICATION = False
SOCIALACCOUNT_QUERY_EMAIL = True
ACCOUNT_MANAGEMENT_VISIBLE = env.bool("ACCOUNT_MANAGEMENT_VISIBLE", True)
# Needed by allauth
SITE_ID = 1
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
SOCIALACCOUNT_USE_FACEBOOK = env.bool("SOCIALACCOUNT_USE_FACEBOOK", False)
SOCIALACCOUNT_USE_TWITTER = env.bool("SOCIALACCOUNT_USE_TWITTER", False)
SOCIALACCOUNT_PROVIDERS = {}
if SOCIALACCOUNT_USE_FACEBOOK:
SOCIALACCOUNT_PROVIDERS["facebook"] = {
"EXCHANGE_TOKEN": True,
"VERIFIED_EMAIL": False,
"APP": {
"client_id": env.str("FACEBOOK_CLIENT_ID"),
"secret": env.str("FACEBOOK_SECRET_KEY"),
},
}
INSTALLED_APPS.append("allauth.socialaccount.providers.facebook")
if SOCIALACCOUNT_USE_TWITTER:
SOCIALACCOUNT_PROVIDERS["twitter"] = {
"APP": {
"client_id": env.str("TWITTER_CLIENT_ID"),
"secret": env.str("TWITTER_SECRET_KEY"),
}
}
INSTALLED_APPS.append("allauth.socialaccount.providers.twitter")
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = env.str("STATIC_ROOT", os.path.join(BASE_DIR, "static/"))
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "mainapp/assets"),
os.path.join(BASE_DIR, "node_modules/pdfjs-dist/viewer"), # See desgin.md
)
SERVE_STATIC_FILES = env.str("SERVE_STATIC_FILES", DEBUG)
# Minio / S3 Settings
MINIO_PREFIX = env.str("MINIO_PREFIX", "meine-stadt-transparent-")
MINIO_ACCESS_KEY = env.str("MINIO_ACCESS_KEY", "meinestadttransparent")
MINIO_SECRET_KEY = env.str("MINIO_SECRET_KEY", "meinestadttransparent")
MINIO_REGION = env.str("MINIO_REGION", "us-east-1")
MINIO_HOST = env.str("MINIO_HOST", "localhost:9000")
MINIO_SECURE = env.bool("MINIO_SECURE", False)
MINIO_REDIRECT = env.bool("MINIO_REDIRECT", False)
MINIO_PUBLIC_HOST = env.str("MINIO_PUBLIC_HOST", None)
MINIO_PUBLIC_SECURE = env.bool("MINIO_PUBLIC_SECURE", True)
SCHEDULES_ENABLED = env.bool("SCHEDULES_ENABLED", False)
# When webpack compiles, it replaces the stats file contents with a compiling placeholder.
# If debug is False and the stats file is in the project root, this leads to a WebpackLoaderBadStatsError.
# So we place the file besides the assets, so it will be copied over by collectstatic
# only after the compilation finished, so that django only ever sees a finished stats file
if DEBUG or TESTING:
webpack_stats = "mainapp/assets/bundles/webpack-stats.json"
else:
webpack_stats = os.path.join(STATIC_ROOT, "bundles", "webpack-stats.json")
WEBPACK_LOADER = {
"DEFAULT": {"BUNDLE_DIR_NAME": "bundles/", "STATS_FILE": webpack_stats}
}
# Elastic
ELASTICSEARCH_ENABLED = env.bool("ELASTICSEARCH_ENABLED", True)
if ELASTICSEARCH_ENABLED and "django_elasticsearch_dsl" not in INSTALLED_APPS:
if "django_elasticsearch_dsl" not in INSTALLED_APPS:
INSTALLED_APPS.append("django_elasticsearch_dsl")
ELASTICSEARCH_URL = env.str("ELASTICSEARCH_URL", "localhost:9200")
ELASTICSEARCH_VERIFY_CERTS = env.bool("ELASTICSEARCH_VERIFY_CERTS", True)
if not ELASTICSEARCH_VERIFY_CERTS:
import urllib3
urllib3.disable_warnings()
ELASTICSEARCH_DSL = {
"default": {
"hosts": ELASTICSEARCH_URL,
"timeout": env.int("ELASTICSEARCH_TIMEOUT", 10),
"verify_certs": ELASTICSEARCH_VERIFY_CERTS,
}
}
ELASTICSEARCH_PREFIX = env.str("ELASTICSEARCH_PREFIX", "meine-stadt-transparent")
if not ELASTICSEARCH_PREFIX.islower():
raise ValueError("ELASTICSEARCH_PREFIX must be lowercase")
# Language use for stemming, stop words, etc.
ELASTICSEARCH_LANG = env.str("ELASTICSEARCH_LANG", "german")
ELASTICSEARCH_QUERYSET_PAGINATION = env.int("ELASTICSEARCH_QUERYSET_PAGINATION", 50)
# Valid values for GEOEXTRACT_ENGINE: Nominatim, Opencage, Mapbox
GEOEXTRACT_ENGINE = env.str("GEOEXTRACT_ENGINE", "Nominatim").lower()
if GEOEXTRACT_ENGINE not in ["nominatim", "mapbox", "opencage"]:
raise ValueError("Unknown Geocoder: " + GEOEXTRACT_ENGINE)
if GEOEXTRACT_ENGINE == "opencage":
OPENCAGE_KEY = env.str("OPENCAGE_KEY")
NOMINATIM_URL = env.str("NOMINATIM_URL", "https://nominatim.openstreetmap.org")
# Settings for Geo-Extraction
GEOEXTRACT_LANGUAGE = env.str("GEOEXTRACT_LANGUAGE", LANGUAGE_CODE.split("-")[0])
GEOEXTRACT_SEARCH_COUNTRY = env.str("GEOEXTRACT_SEARCH_COUNTRY", "Deutschland")
GEOEXTRACT_SEARCH_CITY = env.str("GEOEXTRACT_SEARCH_CITY", None)
SUBPROCESS_MAX_RAM = env.int("SUBPROCESS_MAX_RAM", 1024 * 1024 * 1024) # 1 GB
CITY_AFFIXES = env.list(
"CITY_AFFIXES",
default=[
"Stadt",
"Landeshauptstadt",
"Gemeinde",
"Kreisverwaltung",
"Landkreis",
"Kreis",
],
)
DISTRICT_REGEX = env.str("DISTRICT_REGEX", r"(^| )kreis|kreis( |$)")
TEXT_CHUNK_SIZE = env.int("TEXT_CHUNK_SIZE", 1024 * 1024)
OCR_AZURE_KEY = env.str("OCR_AZURE_KEY", None)
OCR_AZURE_LANGUAGE = env.str("OCR_AZURE_LANGUAGE", "de")
OCR_AZURE_API = env.str(
"OCR_AZURE_API", "https://westcentralus.api.cognitive.microsoft.com"
)
# Configuration regarding the city of choice
SITE_DEFAULT_BODY = env.int("SITE_DEFAULT_BODY", 1)
SITE_DEFAULT_ORGANIZATION = env.int("SITE_DEFAULT_ORGANIZATION", 1)
# Possible values: OSM, Mapbox
MAP_TILES_PROVIDER = env.str("MAP_TILES_PROVIDER", "OSM")
MAP_TILES_URL = env.str("MAP_TILES_URL", None)
MAPBOX_TOKEN = env.str("MAPBOX_TOKEN", None)
CUSTOM_IMPORT_HOOKS = env.str("CUSTOM_IMPORT_HOOKS", None)
PARLIAMENTARY_GROUPS_TYPE = (1, "parliamentary group")
COMMITTEE_TYPE = (2, "committee")
DEPARTMENT_TYPE = (3, "department")
ORGANIZATION_ORDER = env.list(
"ORGANIZATION_ORDER",
int,
[PARLIAMENTARY_GROUPS_TYPE, COMMITTEE_TYPE, DEPARTMENT_TYPE],
)
# Possible values: month, listYear, listMonth, listDay, basicWeek, basicDay, agendaWeek, agendaDay
CALENDAR_DEFAULT_VIEW = env.str("CALENDAR_DEFAULT_VIEW", "listMonth")
CALENDAR_HIDE_WEEKENDS = env.bool("CALENDAR_HIDE_WEEKENDS", True)
CALENDAR_MIN_TIME = env.bool("CALENDAR_MIN_TIME", "08:00:00")
CALENDAR_MAX_TIME = env.bool("CALENDAR_MAX_TIME", "21:00:00")
# Configuration regarding Search Engine Optimization
SITE_SEO_NOINDEX = env.bool("SITE_SEO_NOINDEX", False)
# Include the plain text of PDFs next to the PDF viewer, visible only for Screenreaders
# On by default to improve accessibility, deactivatable in case there are legal concerns
EMBED_PARSED_TEXT_FOR_SCREENREADERS = env.bool(
"EMBED_PARSED_TEXT_FOR_SCREENREADERS", True
)
SEARCH_PAGINATION_LENGTH = 20
SENTRY_DSN = env.str("SENTRY_DSN", None)
SENTRY_ENVIRONMENT = env.str(
"SENTRY_ENVIRONMENT", "development" if DEBUG else "production"
)
# SENTRY_HEADER_ENDPOINT is defined in security.py
if SENTRY_DSN:
if os.environ.get("DOCKER_GIT_SHA"):
version = os.environ.get("DOCKER_GIT_SHA")
else:
try:
version = (
subprocess.check_output(
["git", "rev-parse", "HEAD"], stderr=subprocess.DEVNULL
)
.strip()
.decode()
)
except CalledProcessError:
# Note however that logging isn't configured at this point
import importlib.metadata
try:
version = importlib.metadata.version("meine_stadt_transparent")
except importlib.metadata.PackageNotFoundError:
version = "unknown"
release = "meine-stadt-transparent@" + version
sentry_sdk.init(
SENTRY_DSN,
integrations=[DjangoIntegration()],
release=release,
ignore_errors=[KeyboardInterrupt],
environment=SENTRY_ENVIRONMENT,
traces_sample_rate=env.int("SENTRY_TRACES_SAMPLE_RATE", 0.05),
)
ignore_logger("django.security.DisallowedHost")
with configure_scope() as scope:
scope.set_tag("real_host", REAL_HOST)
Q_CLUSTER["error_reporter"] = {"sentry": {"dsn": SENTRY_DSN}}
DJANGO_LOG_LEVEL = env.str("DJANGO_LOG_LEVEL", None)
MAINAPP_LOG_LEVEL = env.str("MAINAPP_LOG_LEVEL", None)
IMPORTER_LOG_LEVEL = env.str("IMPORTER_LOG_LEVEL", None)
# Anchoring this in this file is required for running tests from other directories
LOG_DIRECTORY = env.str(
"LOG_DIRECTORY", Path(__file__).parent.parent.parent.joinpath("log")
)
NO_LOG_FILES = env.bool("NO_LOG_FILES", False)
def make_handler(
log_name: str, level: Optional[str] = None
) -> Dict[str, Union[str, int]]:
if NO_LOG_FILES:
return {"class": "logging.NullHandler"}
handler = {
"class": "logging.handlers.RotatingFileHandler",
"filename": os.path.join(LOG_DIRECTORY, log_name),
"formatter": "extended",
"maxBytes": 8 * 1024 * 1024,
"backupCount": 2 if not DEBUG else 0,
}
if level:
handler["level"] = level
return handler
class WarningsFilter(Filter):
"""
Removes bogus warnings.
We handle the warnings through the logging module so they get properly
tracked in the log files, but this also means we can't use the warning
module to filter them.
"""
def filter(self, record: LogRecord) -> bool:
irrelevant = (
"Xref table not zero-indexed. ID numbers for objects will be corrected."
)
if irrelevant in record.getMessage():
return False
return True
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"extended": {"format": "%(asctime)s %(levelname)-8s %(name)-12s %(message)s"},
"with_time": {"format": "%(asctime)s %(message)s"},
},
"handlers": {
"console": {"class": "logging.StreamHandler", "formatter": "with_time"},
"django": make_handler("django.log"),
"django-error": make_handler("django-error.log", "WARNING"),
"importer": make_handler("importer.log"),
"importer-error": make_handler("importer-error.log", "WARNING"),
},
"filters": {"warnings-filters": {"()": WarningsFilter}},
"loggers": {
"mainapp": {
"handlers": ["console", "django-error", "django"],
"level": MAINAPP_LOG_LEVEL or "INFO",
"propagate": False,
},
"importer": {
"handlers": ["console", "importer-error", "importer"],
"level": IMPORTER_LOG_LEVEL or "INFO",
"propagate": False,
},
"django": {
"level": DJANGO_LOG_LEVEL or "WARNING",
"handlers": ["console", "django-error", "django"],
"propagate": False,
},
"django-q": {
"level": DJANGO_LOG_LEVEL or "WARNING",
"handlers": ["console", "django-error", "django"],
"propagate": False,
},
"py.warnings": {
"level": "WARNING",
"handlers": ["console", "django-error", "django"],
"propagate": False,
"filters": ["warnings-filters"],
},
},
}
LOGGING.update(env.json("LOGGING", {}))
# Not sure what is going on, but this make caplog work
if TESTING:
LOGGING["loggers"] = {}
logging.captureWarnings(True)
OPARL_INDEX = env.str("OPARL_INDEX", "https://mirror.oparl.org/bodies")
TEMPLATE_META = {
"logo_name": env.str("TEMPLATE_LOGO_NAME", "MST"),
"site_name": SITE_NAME,
"prototype_fund": "https://prototypefund.de/project/open-source-ratsinformationssystem",
"github": "https://github.com/meine-stadt-transparent/meine-stadt-transparent",
"contact_mail": EMAIL_FROM,
"main_css": env.str("TEMPLATE_MAIN_CSS", "mainapp"),
"location_limit_lng": 42,
"location_limit_lat": 23,
"sks_keyserver": SKS_KEYSERVER,
"enable_pgp": ENABLE_PGP,
"sentry_dsn": SENTRY_DSN,
}
FILE_DISCLAIMER = env.str("FILE_DISCLAIMER", None)
FILE_DISCLAIMER_URL = env.str("FILE_DISCLAIMER_URL", None)
SETTINGS_EXPORT = [
"TEMPLATE_META",
"FILE_DISCLAIMER",
"FILE_DISCLAIMER_URL",
"ABSOLUTE_URI_BASE",
"ACCOUNT_MANAGEMENT_VISIBLE",
]
# Mandatory but afaik unsused value
WAGTAIL_SITE_NAME = SITE_NAME
# Workaround to avoid filling up disk space
PROXY_ONLY_TEMPLATE = env.str("PROXY_ONLY_TEMPLATE", None)
DEBUG_TOOLBAR_ACTIVE = False
DEBUG_TESTING = env.bool("DEBUG_TESTING", False)
if DEBUG and not TESTING:
# For some reason pycharm needs the latter condition (might just be misconfiguration)
if find_spec("debug_toolbar"):
# Debug Toolbar
if "debug_toolbar" not in INSTALLED_APPS:
INSTALLED_APPS.append("debug_toolbar")
MIDDLEWARE.append("debug_toolbar.middleware.DebugToolbarMiddleware")
DEBUG_TOOLBAR_CONFIG = {"JQUERY_URL": ""}
DEBUG_TOOLBAR_ACTIVE = True
else:
logger = logging.getLogger(__name__)
logger.warning(
"This is running in DEBUG mode, however the Django debug toolbar is not installed."
)
DEBUG_TOOLBAR_ACTIVE = False
if env.bool("DEBUG_SHOW_SQL", False):
LOGGING["loggers"]["django.db.backends"] = {
"level": "DEBUG",
"handlers": ["console"],
"propagate": False,
}
INTERNAL_IPS = ["127.0.0.1"]
# Make debugging css styles in firefox easier
DEBUG_STYLES = env.bool("DEBUG_STYLES", False)
if DEBUG_STYLES:
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'")
# Just an additional host you might want
ALLOWED_HOSTS.append("meinestadttransparent.local")
|
StarcoderdataPython
|
1743166
|
#
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
layout = BoolScalar("layout", False) # NHWC
# TEST 1: ROI_POOLING_1, outputShape = [2, 2], spatialScale = [0.5, 0.5]
i1 = Input("in", "TENSOR_FLOAT32", "{1, 4, 4, 1}")
roi1 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
o1 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
Model().Operation("ROI_POOLING", i1, roi1, [0, 0, 0, 0, 0], 2, 2, 2.0, 2.0, layout).To(o1)
quant8 = DataTypeConverter().Identify({
i1: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
roi1: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o1: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
})
# Instantiate an example
Example({
i1: [
-10, -1, 4, -5,
-8, -2, 9, 1,
7, -2, 3, -7,
-2, 10, -3, 5
],
roi1: [
2, 2, 4, 4,
0, 0, 6, 6,
2, 0, 4, 6,
0, 2, 6, 4,
8, 8, 8, 8 # empty region
],
o1: [
-2, 9, -2, 3,
-1, 9, 10, 5,
-1, 9, 10, 3,
-2, 9, 7, 3,
0, 0, 0, 0
]
}).AddNchw(i1, o1, layout).AddVariations("relaxed", quant8, "float16")
# TEST 2: ROI_POOLING_2, outputShape = [2, 3], spatialScale = 0.25
i2 = Input("in", "TENSOR_FLOAT32", "{4, 4, 8, 2}")
roi2 = Input("roi", "TENSOR_FLOAT32", "{4, 4}")
o2 = Output("out", "TENSOR_FLOAT32", "{4, 2, 3, 2}")
Model().Operation("ROI_POOLING", i2, roi2, [0, 0, 3, 3], 2, 3, 4.0, 4.0, layout).To(o2)
quant8 = DataTypeConverter().Identify({
i2: ("TENSOR_QUANT8_ASYMM", 0.04, 0),
roi2: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o2: ("TENSOR_QUANT8_ASYMM", 0.04, 0)
})
# Instantiate an example
Example({
i2: [
8.84, 8.88, 7.41, 5.60, 9.95, 4.37, 0.10, 7.64, 6.50, 9.47,
7.55, 3.00, 0.89, 3.01, 6.30, 4.40, 1.64, 6.74, 6.16, 8.60,
5.85, 3.17, 7.12, 6.79, 5.77, 6.62, 5.13, 8.44, 5.08, 7.12,
2.84, 1.19, 8.37, 0.90, 7.86, 9.69, 1.97, 1.31, 4.42, 9.89,
0.18, 9.00, 9.30, 0.44, 5.05, 6.47, 1.09, 9.50, 1.30, 2.18,
2.05, 7.74, 7.66, 0.65, 4.18, 7.14, 5.35, 7.90, 1.04, 1.47,
9.01, 0.95, 4.07, 0.65,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00,
0.00, 0.00, 0.00, 0.00,
5.47, 2.64, 0.86, 4.86, 2.38, 2.45, 8.77, 0.06, 3.60, 9.28,
5.84, 8.97, 6.89, 1.43, 3.90, 5.91, 7.40, 9.25, 3.12, 4.92,
1.87, 3.22, 9.50, 6.73, 2.07, 7.30, 3.07, 4.97, 0.24, 8.91,
1.09, 0.27, 7.29, 6.94, 2.31, 6.88, 4.33, 1.37, 0.86, 0.46,
6.07, 3.81, 0.86, 6.99, 4.36, 1.92, 8.19, 3.57, 7.90, 6.78,
4.64, 6.82, 6.18, 9.63, 2.63, 2.33, 1.36, 2.70, 9.99, 9.85,
8.06, 4.80, 7.80, 5.43
],
roi2: [
4, 4, 24, 8,
4, 4, 28, 12,
7, 1, 25, 11, # test rounding
1, 7, 5, 11 # test roi with shape smaller than output
],
o2: [
6.16, 8.60, 7.12, 6.79, 5.13, 8.44, 7.86, 9.69, 4.42, 9.89, 9.30, 6.47,
7.86, 9.89, 9.30, 9.89, 9.30, 9.50, 7.86, 9.89, 9.30, 9.89, 9.30, 9.50,
9.50, 6.73, 9.50, 9.28, 6.89, 8.97, 6.18, 9.63, 9.99, 9.85, 9.99, 9.85,
7.29, 6.94, 7.29, 6.94, 2.31, 6.88, 7.90, 6.78, 7.90, 6.82, 4.64, 6.82
]
}).AddNchw(i2, o2, layout).AddVariations("relaxed", quant8, "float16")
# TEST 3: ROI_POOLING_3, outputShape = [2, 2], spatialScale = [0.5, 1]
i3 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}")
roi3 = Input("roi", "TENSOR_FLOAT32", "{5, 4}")
o3 = Output("out", "TENSOR_FLOAT32", "{5, 2, 2, 1}")
Model().Operation("ROI_POOLING", i3, roi3, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, layout).To(o3)
quant8 = DataTypeConverter().Identify({
i3: ("TENSOR_QUANT8_ASYMM", 0.25, 128),
roi3: ("TENSOR_QUANT16_ASYMM", 0.125, 0),
o3: ("TENSOR_QUANT8_ASYMM", 0.25, 128)
})
# Instantiate an example
Example({
i3: [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-10, -1, 4, -5,
-8, -2, 9, 1,
7, -2, 3, -7,
-2, 10, -3, 5,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
roi3: [
1, 2, 2, 4,
0, 0, 3, 6,
1, 0, 2, 6,
0, 2, 3, 4,
0, 0, 0, 0
],
o3: [
-2, 9, -2, 3,
-1, 9, 10, 5,
-1, 9, 10, 3,
-2, 9, 7, 3,
-10, -10, -10, -10
]
}).AddNchw(i3, o3, layout).AddVariations("relaxed", quant8, "float16")
|
StarcoderdataPython
|
1608228
|
# Generated by Django 3.1.5 on 2021-01-31 06:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('notes', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='note',
name='date_created',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='note',
name='date_uipdated',
field=models.DateTimeField(auto_now=True),
),
]
|
StarcoderdataPython
|
3368977
|
<reponame>inshadsajeev143/utube<filename>youtube_dl/version.py
from __future__ import unicode_literals
__version__ = '2019.04.17'
|
StarcoderdataPython
|
1621510
|
<filename>test/regression/daily/test_pte.py
######################################################################
# To execute:
# Install: sudo apt-get install python python-pytest
# Run on command line: py.test -v --junitxml results.xml ./test_pte.py
import unittest
import subprocess
TEST_PASS_STRING="RESULT=PASS"
######################################################################
### LEVELDB
######################################################################
class LevelDB_Perf_Stress(unittest.TestCase):
@unittest.skip("skipping")
def test_FAB3584_SkeletonQueries(self):
'''
FAB-2032,FAB-3584
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
calculate tps, and remove network and cleanup
'''
# Replace TestPlaceholder.sh with actual test name, something like:
# ../../tools/PTE/tests/runSkeletonQueriesLevel.sh
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3586_SkeletonInvokes(self):
'''
FAB-2032,FAB-3586
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
query the ledger to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3593_Standard_basic_TLS(self):
'''
FAB-2032,FAB-3593
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3595_Standard_basic_1M(self):
'''
FAB-2032,FAB-3595
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3597_Standard_basic_Gossip(self):
'''
FAB-2032,FAB-3597
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3599_Standard_12Hr(self):
'''
FAB-2032,FAB-3599
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
######################################################################
### COUCHDB
######################################################################
class CouchDB_Perf_Stress(unittest.TestCase):
@unittest.skip("skipping")
def test_FAB3585_SkeletonQueries(self):
'''
FAB-2032,FAB-3585
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
calculate tps, and remove network and cleanup
'''
# Replace TestPlaceholder.sh with actual test name, something like:
# ../../tools/PTE/tests/runSkeletonQueriesCouch.sh
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3587_SkeletonInvokes(self):
'''
FAB-2032,FAB-3587
Network: 1 Ord, 1 KB, 1 ZK, 2 Org, 2 Peers, 1 Chan, 1 CC
Launch skeleton network, use PTE in STRESS mode to continuously
send 10000 query transactions concurrently to 1 peer in both orgs,
query the ledger to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3588_Scaleup1(self):
'''
FAB-2032,FAB-3588
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 20 Chan, 2 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3589_Scaleup2(self):
'''
FAB-2032,FAB-3589
Network: 2 Ord, 5 KB, 3 ZK, 4 Org, 8 Peers, 40 Chan, 4 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3590_Scaleup3(self):
'''
FAB-2032,FAB-3590
Network: 2 Ord, 5 KB, 3 ZK, 8 Org, 16 Peers, 80 Chan, 8 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3591_Scaleup4(self):
'''
FAB-2032,FAB-3591
Network: 4 Ord, 5 KB, 3 ZK, 16 Org, 32 Peers, 160 Chan, 16 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3592_Scaleup5(self):
'''
FAB-2032,FAB-3592
Network: 4 Ord, 5 KB, 3 ZK, 32 Org, 64 Peers, 320 Chan, 32 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3594_Standard_basic_TLS(self):
'''
FAB-2032,FAB-3594
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3596_Standard_basic_1M(self):
'''
FAB-2032,FAB-3596
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3598_Standard_basic_Gossip(self):
'''
FAB-2032,FAB-3598
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send 100 invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
@unittest.skip("skipping")
def test_FAB3600_Standard_12Hr(self):
'''
FAB-2032,FAB-3600
Network: 2 Ord, 5 KB, 3 ZK, 2 Org, 4 Peers, 10 Chan, 10 CC
Launch network, use PTE stress mode to send invoke transactions
concurrently to all peers on all channels on all chaincodes,
query the ledger for each to ensure the last transaction was written,
calculate tps, remove network and cleanup
'''
result = subprocess.check_output("./TestPlaceholder.sh", shell=True)
self.assertIn(TEST_PASS_STRING, result)
|
StarcoderdataPython
|
3323655
|
<reponame>shiminasai/plataforma_FADCANIC
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import ckeditor.fields
class Migration(migrations.Migration):
dependencies = [
('biblioteca', '0002_auto_20160328_2122'),
]
operations = [
migrations.AlterModelOptions(
name='temas',
options={'verbose_name_plural': 'Temas'},
),
migrations.AddField(
model_name='biblioteca',
name='tipo_documento',
field=models.IntegerField(blank=True, null=True, choices=[(1, b'Documento para biblioteca'), (2, b'Informe privado')]),
),
migrations.AlterField(
model_name='biblioteca',
name='descripcion',
field=ckeditor.fields.RichTextField(null=True, verbose_name=b'Sinopsis', blank=True),
),
]
|
StarcoderdataPython
|
45442
|
#py_screener.py
def screener(user_inp=None):
"""A function to square only floating points.
Returns custom exceptions if an int or complex is encountered."""
#make sure something was input
if not user_inp:
print("Ummm...did you type in ANYTHING?")
return
#If it *might* be a float (has a ".") try to type-cast it and return
if "." in user_inp:
try:
inp_as_float=float(user_inp)
if isinstance(inp_as_float, float):
square = inp_as_float**2
print( "You gave me {}. Its square is: {}".format(user_inp, square))
except:
return
try: #see if we need to return the ComplexException
if "(" in user_inp: #it might be complex if it has a (
inp_as_complex=complex(user_inp)
if isinstance(inp_as_complex, complex):
raise ComplexException(inp_as_complex)
except: #it's not complex
pass
try:
#we already tried to type-cast to float, let's try casting to int
inp_as_integer=int(user_inp)
if isinstance(inp_as_integer, int):
raise IntException(inp_as_integer)
except:
pass
#if we're here, the function hasn't returned anything or raised an exception
print("Done processing {} ".format(user_inp))
|
StarcoderdataPython
|
1635534
|
<reponame>fossabot/bevrand
class ErrorModel():
def __init__(self, valid, message, status_code):
self.valid = valid
self.message = message
self.status_code = status_code
class SuccessModelRedis():
def __init__(self, sorted_list):
self.sorted_list = sorted_list
|
StarcoderdataPython
|
4813540
|
<gh_stars>0
from __future__ import unicode_literals
import frappe
def second_totals(doc, method):
total = 0.0
for data in doc.optional_items:
data.amount = data.qty * data.rate
total += data.amount
doc.optional_total = total
|
StarcoderdataPython
|
84484
|
<gh_stars>0
# flake8: noqa
from timeCalculator.calculator import add_time
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.