max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
tabulate_deals.py | frostburn/puyotable | 0 | 12798751 | import argparse
import json
from multiprocessing import Pool
from puyotable.canonization import canonize_deals
def all_deals(num_deals, num_colors):
if not num_deals:
return [[]]
result = []
for c0 in range(num_colors):
for c1 in range(num_colors):
for deals in all_deals(num_deals - 1, num_colors):
result.append(deals + [(c0, c1)])
return result
def for_all_deals(num_deals, num_colors, callback, prefix=[]):
if not num_deals:
callback(prefix)
return
for c0 in range(num_colors):
for c1 in range(num_colors):
for_all_deals(
num_deals - 1,
num_colors,
callback,
prefix + [(c0, c1)]
)
def unique_deals(num_deals, num_colors, prefix_=[]):
canonized = set()
def callback(deals):
canonized.add(canonize_deals(deals, num_colors))
# Known symmetry reduction
prefix = [(0, 0)] + prefix_
for_all_deals(num_deals - 1, num_colors, callback, prefix)
prefix = [(0, 1)] + prefix_
for_all_deals(num_deals - 1, num_colors, callback, prefix)
return canonized
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Tabulate all opening sequences in Puyo Puyo.'
)
parser.add_argument(
'num_colors', metavar='colors', type=int,
help='Number of available colors'
)
parser.add_argument(
'depth', metavar='depth', type=int,
help='How many pieces deep to tabulate'
)
parser.add_argument(
'--outfile', metavar='f', type=str,
help='Filename for JSON output'
)
args = parser.parse_args()
canonized = set()
if args.depth > 1:
prefixes = [[(c0, c1)] for c0 in range(args.num_colors) for c1 in range(args.num_colors)] # noqa
process_args = [
(args.depth - 1, args.num_colors, prefix) for prefix in prefixes
]
pool = Pool()
subsets = pool.starmap(unique_deals, process_args)
for subset in subsets:
canonized.update(subset)
else:
canonized = unique_deals(args.depth, args.num_colors)
print("Found", len(canonized), "unique sequences.")
if args.outfile:
with open(args.outfile, 'w') as f:
json.dump(sorted(canonized), f)
print("Saved result to", args.outfile)
| 2.578125 | 3 |
src/pbn_api/migrations/0026_auto_20210816_0815.py | iplweb/django-bpp | 1 | 12798752 | <reponame>iplweb/django-bpp<gh_stars>1-10
# Generated by Django 3.0.14 on 2021-08-16 06:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0025_auto_20210809_0149"),
]
operations = [
migrations.AlterUniqueTogether(
name="oswiadczenieinstytucji",
unique_together=set(),
),
migrations.AlterUniqueTogether(
name="publikacjainstytucji",
unique_together=set(),
),
]
| 1.382813 | 1 |
bin/dump_lpcnet.py | nkari82/LPCNet | 0 | 12798753 | <gh_stars>0
#!/usr/bin/python3
'''Copyright (c) 2017-2018 Mozilla
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
- Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import lpcnet
import sys
import struct
import numpy as np
import h5py
import re
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.compat.v1.keras.layers import CuDNNGRU
from tensorflow.keras.layers import Layer, GRU, Dense, Conv1D, Embedding
from ulaw import ulaw2lin, lin2ulaw
from mdense import MDense
max_rnn_neurons = 1
max_conv_inputs = 1
max_mdense_tmp = 1
Activations = {
'LINEAR':0,
'SIGMOID':1,
'TANH':2,
'RELU':3,
'SOFTMAX':4
}
def printVector(f, vector, name, dtype='float32'):
print("name: {}, len: {}".format(name, len(vector)))
v = np.reshape(vector, (-1))
v = v.astype(dtype)
f.write(struct.pack('I', len(v)))
f.write(v.tobytes())
def printSparseVector(f, A, name):
N = A.shape[0]
W = np.zeros((0,))
diag = np.concatenate([np.diag(A[:,:N]), np.diag(A[:,N:2*N]), np.diag(A[:,2*N:])])
A[:,:N] = A[:,:N] - np.diag(np.diag(A[:,:N]))
A[:,N:2*N] = A[:,N:2*N] - np.diag(np.diag(A[:,N:2*N]))
A[:,2*N:] = A[:,2*N:] - np.diag(np.diag(A[:,2*N:]))
printVector(f, diag, name + '_diag')
idx = np.zeros((0,), dtype='int')
for i in range(3*N//16):
pos = idx.shape[0]
idx = np.append(idx, -1)
nb_nonzero = 0
for j in range(N):
if np.sum(np.abs(A[j, i*16:(i+1)*16])) > 1e-10:
nb_nonzero = nb_nonzero + 1
idx = np.append(idx, j)
W = np.concatenate([W, A[j, i*16:(i+1)*16]])
idx[pos] = nb_nonzero
printVector(f, W, name)
#idx = np.tile(np.concatenate([np.array([N]), np.arange(N)]), 3*N//16)
printVector(f, idx, name + '_idx', dtype='int')
def dump_layer_ignore(self, f):
print("ignoring layer " + self.name + " of type " + self.__class__.__name__)
Layer.dump_layer = dump_layer_ignore
def dump_sparse_gru(self, f):
global max_rnn_neurons
name = 'sparse_' + self.name
print("printing layer " + name + " of type sparse " + self.__class__.__name__)
weights = self.get_weights()
printSparseVector(f, weights[1], name + '_recurrent_weights')
printVector(f, weights[-1], name + '_bias')
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after') and not self.reset_after:
reset_after = 0
else:
reset_after = 1
neurons = weights[0].shape[1]//3
max_rnn_neurons = max(max_rnn_neurons, neurons)
f.write(struct.pack('iii', weights[0].shape[1]//3, Activations[activation], reset_after))
def dump_gru_layer(self, f):
global max_rnn_neurons
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[1], name + '_recurrent_weights')
printVector(f, weights[-1], name + '_bias')
if hasattr(self, 'activation'):
activation = self.activation.__name__.upper()
else:
activation = 'TANH'
if hasattr(self, 'reset_after') and not self.reset_after:
reset_after = 0
else:
reset_after = 1
neurons = weights[0].shape[1]//3
max_rnn_neurons = max(max_rnn_neurons, neurons)
f.write(struct.pack('iiii', weights[0].shape[0], weights[0].shape[1]//3, Activations[activation], reset_after))
CuDNNGRU.dump_layer = dump_gru_layer
GRU.dump_layer = dump_gru_layer
def dump_dense_layer_impl(name, weights, bias, activation, f):
printVector(f, weights, name + '_weights')
printVector(f, bias, name + '_bias')
f.write(struct.pack('iii', weights.shape[0], weights.shape[1], Activations[activation]))
def dump_dense_layer(self, f):
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
activation = self.activation.__name__.upper()
dump_dense_layer_impl(name, weights[0], weights[1], activation, f)
Dense.dump_layer = dump_dense_layer
def dump_mdense_layer(self, f):
global max_mdense_tmp
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, np.transpose(weights[0], (1, 2, 0)), name + '_weights')
printVector(f, np.transpose(weights[1], (1, 0)), name + '_bias')
printVector(f, np.transpose(weights[2], (1, 0)), name + '_factor')
activation = self.activation.__name__.upper()
max_mdense_tmp = max(max_mdense_tmp, weights[0].shape[0]*weights[0].shape[2])
f.write(struct.pack('iiii', weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], Activations[activation]))
MDense.dump_layer = dump_mdense_layer
def dump_conv1d_layer(self, f):
global max_conv_inputs
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()
printVector(f, weights[0], name + '_weights')
printVector(f, weights[-1], name + '_bias')
activation = self.activation.__name__.upper()
max_conv_inputs = max(max_conv_inputs, weights[0].shape[1]*weights[0].shape[0])
f.write(struct.pack('iiii', weights[0].shape[1], weights[0].shape[0], weights[0].shape[2], Activations[activation]))
Conv1D.dump_layer = dump_conv1d_layer
def dump_embedding_layer_impl(name, weights, f):
printVector(f, weights, name + '_weights')
f.write(struct.pack('ii', weights.shape[0], weights.shape[1]))
def dump_embedding_layer(self, f):
name = self.name
print("printing layer " + name + " of type " + self.__class__.__name__)
weights = self.get_weights()[0]
dump_embedding_layer_impl(name, weights, f)
Embedding.dump_layer = dump_embedding_layer
model, _, _ = lpcnet.new_lpcnet_model(rnn_units1=384, use_gpu=False)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['sparse_categorical_accuracy'])
model.load_weights(sys.argv[1])
bf = open('nnet_data.bin', 'wb')
embed_size = lpcnet.embed_size
E = model.get_layer('embed_sig').get_weights()[0]
W = model.get_layer('gru_a').get_weights()[0][:embed_size,:]
dump_embedding_layer_impl('gru_a_embed_sig', np.dot(E, W), bf)
W = model.get_layer('gru_a').get_weights()[0][embed_size:2*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_pred', np.dot(E, W), bf)
W = model.get_layer('gru_a').get_weights()[0][2*embed_size:3*embed_size,:]
dump_embedding_layer_impl('gru_a_embed_exc', np.dot(E, W), bf)
W = model.get_layer('gru_a').get_weights()[0][3*embed_size:,:]
#FIXME: dump only half the biases
b = model.get_layer('gru_a').get_weights()[2]
dump_dense_layer_impl('gru_a_dense_feature', W, b, 'LINEAR', bf)
for i, layer in enumerate(model.layers):
layer.dump_layer(bf)
dump_sparse_gru(model.get_layer('gru_a'), bf)
bf.write(struct.pack('III', max_rnn_neurons, max_conv_inputs, max_mdense_tmp))
bf.close()
| 1.695313 | 2 |
straph/paths/meta_walks.py | busyweaver/Straph | 0 | 12798754 | import matplotlib.pyplot as plt
import numpy as np
import numpy.polynomial.polynomial as nppol
class Metawalk:
def __init__(self,
time_intervals=None,
nodes=None,
):
"""
A basic constructor for a ``Metwalks`` object
:param times : A list of couples of floats which represents times corresponding to the time intervals
:param links : A list of nodes. (first node = source ; last node = destination)
"""
self.time_intervals = time_intervals
self.nodes = nodes
def add_link(self, l, t):
self.time_intervals.append(t)
self.nodes.append(l)
def length(self):
return len(self.time_intervals)
def duration(self):
return self.time_intervals[-1][1] - self.time_intervals[0][0]
def clone(self):
return Metawalk(self.time_intervals[:],self.nodes[:])
def __hash__(self):
m = tuple(self.nodes)
n = tuple(self.time_intervals)
return hash((m,n))
def __str__(self):
s = ""
for i in range(0,self.length()):
s += " "
s += str(self.nodes[i])
s += " "
s += str(self.time_intervals[i])
s += " "
s += str(self.nodes[i+1])
s += " | volume = "
s += str(self.volume())
return s
def __repr__(self):
return self.__str__()
def __eq__(self, m):
if m == None:
return False
if m.length() != self.length():
return False
if (m.nodes == self.nodes) and (m.time_intervals == self.time_intervals):
return True
return False
def is_instantenous(self):
#we check from the last because of the algirothm that uses it add new links to the end of the metawalk
b = True
if len(self.time_intervals) == 1:
return True
x = self.time_intervals[-1]
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i] != x:
return False
return True
def update_following_last(self,b):
#sometimes when adding a metaedge the metawalk has to be cut at some points because some paths are no longer valid.
if b == 0:
#last edge added ends at same time but starts before
self.time_intervals[-1][0] = self.time_intervals[-2][0]
else:
end = self.time_intervals[-1][1]
# last edge starts at same time but ends before
for i in range(-2,-len(self.time_intervals)-1,-1):
if self.time_intervals[i][1] > end:
self.time_intervals[i][1] = end
def volume(self):
"""Normally the link are either exactly the same or disjoint, need to check for inclusion, exclusion of intervals """
time_intervals = self.time_intervals[:]
time_intervals.append([-1,-1])
res = [0 for i in range(len(time_intervals)+ 1)]
last_x,last_y = time_intervals[0]
b = True
if len(time_intervals)==1:
last_x,last_y = time_intervals[0]
if last_x != last_y:
b = False
res[1] = np.around((last_y - last_x), decimals=2)
else:
if last_x == last_y:
degree = 0
else:
degree = 1
for i in range(1,len(time_intervals)):
if last_x != last_y:
b = False
x,y = time_intervals[i]
#it should be enough to check one bound no overlap in linkq in fragmented link streams but maybe its ok to generalise it and make it work whenvever later on, update : false, [1,2],[1,1]
if x == last_x and y == last_y and degree > 0:
degree += 1
else:
res[degree] += np.around((last_y - last_x)/np.math.factorial(degree), decimals=2)
if x != y:
degree = 1
last_x = x
last_y = y
if b == True:
res[0] = 1
res = [np.around(e,decimals=2) for e in res]
return nppol.Polynomial(res)
def passes_through(self,t,v):
if v in self.nodes:
indice = self.nodes.index(v)
else:
return False
if indice == 0:
if t < self.time_intervals[0][0]:
return True
else:
return False
elif indice == len(self.nodes) -1:
if t >= self.time_intervals[-1][1]:
return True
else:
return False
else:
if t >= self.time_intervals[indice-1][1] and t < self.time_intervals[indice][0]:
return True
else:
return False
def passes_through_whole_interval(self,v,t1,t2):
return False
def passes_through_somewhere_interval(self,v,t1,t2):
#t1 included, but t2 not
return False
def add_interval_betweenness(self,t_max,interval_size):
res = []
for i in range(0,len(self.time_intervals)-1):
left_bound = self.time_intervals[i][1]
right_bound = self.time_intervals[i+1][0]
nb_interval_contributes_to = (left_bound - right_bound) // interval_size
fst_interval_left_bound = left_bound // interval_size
for j in range(1,nb_interval_contributes_to+1):
res.append((self.nodes[i+1], fst_interval_left_bound, fst_interval_left_bound + j * interval_size ))
fst_interval_left_bound = fst_interval_left_bound + j * interval_size
return res
def fastest_meta_walk(self):
if self.time_intervals[0] == self.time_intervals[-1]:
return self.clone()
else:
nodes = self.nodes[:]
time_intervals = self.time_intervals[:]
time_intervals[0] = (time_intervals[0][1],time_intervals[0][1])
time_intervals[-1] = (time_intervals[-1][0],time_intervals[-1][0])
for i in range(1,len(time_intervals)):
if time_intervals[i][0] < time_intervals[0][0]:
time_intervals[i] = (time_intervals[0][0],time_intervals[i][1])
if time_intervals[i][1] > time_intervals[-1][1]:
time_intervals[i] = (time_intervals[i][0],time_intervals[-1][1])
return Metawalk(time_intervals,nodes)
def first_time(self):
return self.time_intervals[0][0]
def last_departure(self):
return self.time_intervals[0][1]
def first_arrival(self):
return self.time_intervals[-1][0]
def first_node(self):
return self.nodes[0]
def last_node(self):
return self.nodes[-1]
def plot(self, S, color="#18036f",
markersize=10, dag=False, fig=None):
"""
Draw a path on the ``StreamGraph`` object *S*
:param S:
:param color:
:param markersize:
:param dag:
:param fig:
:return:
"""
if fig is None:
fig, ax = plt.subplots()
else:
ax = plt.gca()
if dag:
dag = S.condensation_dag()
dag.plot(node_to_label=S.node_to_label, ax=ax)
else:
S.plot(ax=ax)
# Plot Source
id_source = S.nodes.index(self.nodes[0])
plt.plot([self.time_intervals[0]], [id_source], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Destination
id_destination = S.nodes.index(self.nodes[-1])
plt.plot([self.time_intervals[-1]], [id_destination], color=color,
marker='o', alpha=0.8, markersize=markersize)
# Plot Path
for i in range(self.length()):
l = self.nodes[i]
l2 = self.nodes[i+1]
t = self.time_intervals[i][0]
t2 = self.time_intervals[i][1]
id1 = S.nodes.index(l)
id2 = S.nodes.index(l2)
idmax = max(id1, id2)
idmin = min(id1, id2)
# verts = [
# (idmin, t), # left, bottom
# (idmax, t), # left, top
# (idmax, t2), # right, top
# (idmin, t2), # right, bottom
# ]
plt.vlines(t, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
plt.vlines(t2, ymin=idmin, ymax=idmax, linewidth=6, alpha=0.8, color=color)
if i != self.length() - 1:
plt.hlines(id2, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
plt.hlines(id1, xmin=t, xmax=t2,
linewidth=4, alpha=0.8, color=color)
# Plot marker
# if t != self.times[i + 1]:
# plt.plot([t], [id2], color=color,
# marker='>', alpha=0.8, markersize=markersize)
# if i != 0 and (t, id1) != (self.times[0], id_source) != (self.times[-1], id_destination):
# # Plot marker
# if id1 == idmin:
# plt.plot([t], [id1], color=color,
# marker='^', alpha=0.8, markersize=markersize)
# else:
# plt.plot([t], [id1], color=color,
# marker='v', alpha=0.8, markersize=markersize)
plt.tight_layout()
return fig
def check_coherence(self, S):
for i in range(self.length()):
l = (self.nodes[i],self.nodes[i+1])
inter = self.time_intervals[i]
l_ = (self.nodes[i+1],self.nodes[i]) # Inverse the order of the interval
if l not in S.links and l_ not in S.links:
raise ValueError("Link : " + str(l) + " does not exists in the Stream Graph !")
else:
t = inter[0]
t2 = inter[1]
if l in S.links:
id_link = S.links.index(l)
else:
id_link = S.links.index(l_)
is_present = False
for lt0, lt1 in zip(S.link_presence[id_link][::2], S.link_presence[id_link][1::2]):
if (lt0 <= t <= lt1) and (lt0 <= t2 <= lt1) and (t <= t2):
is_present = True
if not is_present:
raise ValueError("Link : " + str(l) + " does not exists at time " + str(t) + " !")
print("Check Path Coherence ok !")
return
| 3.09375 | 3 |
src/comment/models.py | xistadi/BookStore | 0 | 12798755 | from django.db import models
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db.models.signals import post_delete
from django.dispatch import receiver
from myauth import models as myauth_models
from products.models import Book
class CommentProducts(models.Model):
profile = models.ForeignKey(
myauth_models.Profile,
on_delete=models.PROTECT,
related_name='comments'
)
book = models.ForeignKey(
Book,
on_delete=models.PROTECT,
related_name='comments'
)
comment = models.TextField(
verbose_name='Комментарий',
default='',
blank=True,
null=True
)
date_add = models.DateTimeField(
auto_now=False,
auto_now_add=True,
verbose_name='Дата внесения в каталог'
)
date_last_change = models.DateTimeField(
auto_now=True,
auto_now_add=False,
verbose_name='Дата последнего изменения карточки'
)
stars = models.IntegerField(
validators=[
MinValueValidator(0),
MaxValueValidator(5)]
)
def __str__(self):
return f'Комментарий №{self.pk}, пользователя: {self.profile}, к книге: {self.book}'
class Meta:
verbose_name = 'Комментарий'
verbose_name_plural = 'Комментарии'
@receiver(post_delete, sender=CommentProducts)
def post_delete_review(sender, instance, **kwargs):
avr = instance.book.get_rating()
instance.book.avr_rating = avr
instance.book.save()
| 1.789063 | 2 |
examples/BioASQ/extra_modules/CoreMMR.py | paritoshgpt1/BOOM | 29 | 12798756 | <reponame>paritoshgpt1/BOOM
import glog as log
from boom.modules import Module
from multiprocessing import Pool
from .bioasq.coreMMR import CoreMMR as BioASQCoreMMR
def multi_process_helper(args):
questions, alpha = args
ranker = BioASQCoreMMR()
result = []
for question in questions:
if 'snippets' in question:
question['snippets'] = [s['text'] for s in question['snippets']]
result.append((ranker.getRankedList(question, alpha, 0), question['ideal_answer'][0]))
#log.debug(result)
return result
class CoreMMR(Module):
def __init__(self, module_id, name, exp_name, rabbitmq_host, pipeline_conf, module_conf, **kwargs):
super(CoreMMR, self).__init__(module_id, name, exp_name, rabbitmq_host, pipeline_conf, module_conf, **kwargs)
self.processes = module_conf['processes'] if 'processes' in module_conf else 1
self.pool = Pool(processes=self.processes)
## Override the cleanup function to make sure close the process pool.
def cleanup(self):
self.pool.close()
self.pool.join()
def process(self, job, data):
questions = data['questions']
N = len(questions)
step_size = int(N / float(self.processes))
slices = [(questions[i:i+step_size], job.params['alpha']) for i in range(0, N, step_size)]
tmp = self.pool.map(multi_process_helper, slices)
result = []
for x in tmp:
result += x
return result
| 2.140625 | 2 |
app/__init__.py | jamescurtin/ham-dash | 0 | 12798757 | <filename>app/__init__.py
"""Main application."""
| 0.996094 | 1 |
main.py | Wajahat-Mirza/Linear_Algebra_Project | 1 | 12798758 | <reponame>Wajahat-Mirza/Linear_Algebra_Project
import sys
import time
from help_functions import *
from Inverse_mat import *
from Matrix_mutliplication import *
from linear_system import *
from LU_factorization import *
from determinant import *
def input_mat(num):
rows = input("Enter number of rows for Matrix {}: ".format(num+1))
cols = input("Enter number of columns for Matrix {}: ".format(num+1))
while (rows.isnumeric() != True or cols.isnumeric() != True):
print("\nError: Invalid Input. Rows and Cols can only be positive. Try Again. \n")
rows = input("Enter number of rows for Matrix {}: ".format(num+1))
cols = input("Enter number of columns for Matrix {}: ".format(num+1))
rows = int(rows)
cols = int(cols)
final_matrix= []
for i in range(rows):
sub_matrix = []
for j in range(cols):
val = input("Please input Value of element {}{} of Matric {}: ".format(i+1,j+1, num+1))
while val.replace(".", "", 1).lstrip('-').isdigit() != True:
print("Error: Invalid! Try Again")
val = input("Please input Value of element {}{} of Matric {}: ".format(i+1,j+1, num+1))
val = float(val)
sub_matrix.append(val)
final_matrix.append(sub_matrix)
display_mat("\tMatrix_{}".format(num+1), None,final_matrix, None,None)
return final_matrix, rows, cols
def actions(action,matrix, rows, cols):
# final_matrix, rows, cols = input_mat(0)
while action.lower() != "n":
if action == "1":
# final_matrix, rows, cols = input_mat(0)
while rows != cols:
print("Mathematical Error: Non-Square Matrices Do not Have Inverse.\nPlease give Valid Inputs again")
matrix, rows, cols = input_mat(0)
ident = identity_mat(rows,cols)
new_ident, inverse_final = inverse(matrix, ident)
validity_input(matrix, rows, cols)
elif action == "2":
num_of_Mat = input("Num of Matrices to Multiply? ")
while num_of_Mat.isdigit() != True:
print("Error: Please give a positive numeric value")
num_of_Mat = input("Num of Matrices to Multiply? ")
num_of_Mat = int(num_of_Mat)
if num_of_Mat != 2:
print("Error: This Code can multiply Two Matrices at a time. \nPlease Input two matrices and mutliply their resultant with the other matrices if need be.\
\nSorry!")
validity_input(matrix, rows, cols)
mats = []
for num in range(num_of_Mat):
final_mat, rows, cols = input_mat(0)
mats.append(final_mat)
mat_multiplication(mats[0], mats[1])
display_mat("\nMatrix_Multiplied\n", None,mats[0], None,None)
display_mat("\nMatrix_Multiplied\n", None,matrix, None,None)
validity_input(matrix, rows, cols)
elif action == "3":
(A,P,L,U) = lu_decomposition(matrix)
mats = [A,P,L,U]
mats_names = ['A','P','L','U']
for i, j in zip(mats_names, mats):
display_mat("\n\tMatrix_{}\n".format(i), None,j, None,None)
print("\nCalculating... Please be patient with the machine :) ")
time.sleep(1)
validity_input(matrix, rows, cols)
elif action == "5":
A, B, C, total = lin_input()
print("\n\n\t\t\033[1mFirst, solve the system using Sympy!\033[0m \n\n")
sol = lin(C, total)
print("\n\n\t\t\033[1mNow, we solve the system using our program.\033[0m \n\n")
mat_A, mat_B, result = linear_system(A, B)
print("\nThus, \033[1m\033[4mSymPy output\033[0m is:\n\n\t\t{}\n\nWhereas \033[1m\033[4mOur program output\033[0m is:\n\n\t\t{}\n".format(sol, result))
validity_input(matrix, rows, cols)
elif action == "4":
det = determinant(matrix, det = 0)
print("\n\t\t\tDeterminant of the given Matrix is: \033[4m\033[1m{}\033[0m \n".format(det))
validity_input(matrix, rows, cols)
elif action =="6":
t = transpose(matrix)
validity_input(matrix, rows, cols)
elif action == "7":
print("You have Successfully Terminated the Program!")
sys.exit()
else:
print("Invalid input. Program Terminated.")
sys.exit()
return action
def validity_input(matrix, rows, cols):
action = input("Do you want to continue [y/n]? ")
while action != "y" and action != "n":
print("Error: Invalid Input. Give either 'y' or 'n'. Try again!")
action = input("Do you want to continue [y/n]? ")
if action == "y":
action = input("\n1. = Inverse \n2. = Multiplication\n3. = LU factorization\n4. = Finding Determinant \n\
5. = Solve Linear Equation System\n6. = Transpose\n7. = Quit\nPlease Input Numerical Value for Action. E.g. either '1' or '2': ")
actions(action, matrix, rows, cols)
else:
print("Program Successfully Terminated")
sys.exit()
return action
print("================================================================================================")
print('\n\t\t\t\033[1m \033[91m \033[4m' + "Matrix Calculator\n" + '\033[0m')
print("================================================================================================")
print("\nEnter your matrix on which you want the actions to execute: \n")
print("Directions to note \nThe Matrix that you are giving now can be used for finding \n\
1. Inverse\n2. LU factorization\n3. Finding Determinant \n4. Transpose\n\n\
This Matrix will not be used for computing \n1. Multiplication\n2. Solving Linear Equation System\nYou will have to give separate inputs, asked later\n\n\
If there is any error, please contact us! Have Fun! \n")
print("================================================================================================")
matrix, rows, cols = input_mat(0)
validity_input(matrix, rows, cols)
| 3.875 | 4 |
src/lib/twilio/request_validator.py | crucialwebstudio/unemployment-reminders | 0 | 12798759 | from urllib.parse import urlparse, urlunparse
from functools import wraps
from flask import abort, request, current_app
from lib.twilio import TwilioClient
def validate_twilio_request(f):
"""Validates that incoming requests genuinely originated from Twilio"""
# Adapted from https://www.twilio.com/docs/usage/tutorials/how-to-secure-your-flask-app-by-validating-incoming-twilio-requests?code-sample=code-custom-decorator-for-flask-apps-to-validate-twilio-requests-3&code-language=Python&code-sdk-version=6.x
@wraps(f)
def decorated_function(*args, **kwargs):
twilio_client = TwilioClient(
current_app.config['SECRETS'].TWILIO_ACCOUNT_SID,
current_app.config['SECRETS'].TWILIO_AUTH_TOKEN
)
# save variables from original request as we will be making transformations on it below
original_url = request.url
original_host_header = request.headers.get('X-Original-Host')
# the url parts to be transformed
twilio_url_parts = urlparse(original_url)
"""
Solve issues with NGROK
Twilio sees: http://somedomain.ngrok.io
App sees: http://localhost:5000
So we replace the domain our app sees with the X-Original-Host header
"""
if original_host_header:
twilio_url_parts = twilio_url_parts._replace(netloc=original_host_header)
"""
Solve issues with API Gateway custom domains
Twilio sees: https://custom-domain.com/bot/validate-next-alert
App sees: https://custom-domain.com/{stage}/bot/validate-next-alert
So we strip API_GATEWAY_BASE_PATH from the beginning of the path
"""
api_gateway_base_path = current_app.config['API_GATEWAY_BASE_PATH']
if api_gateway_base_path:
# Strip N chars from beginning of path.
chars_to_strip = len(f"/{api_gateway_base_path}")
new_path = twilio_url_parts.path[chars_to_strip:]
twilio_url_parts = twilio_url_parts._replace(path=new_path)
# Validate the request using its URL, POST data, and X-TWILIO-SIGNATURE header
request_valid = twilio_client.validate_request(
urlunparse(twilio_url_parts), request.form, request.headers.get('X-TWILIO-SIGNATURE', '')
)
# Continue processing the request if it's valid, return a 403 error if it's not
if request_valid:
return f(*args, **kwargs)
else:
return abort(403)
return decorated_function
| 2.859375 | 3 |
rpicam/utils/telegram_poster.py | LokiLuciferase/rpicam | 0 | 12798760 | #!/usr/bin/env python3
import os
from typing import Union
from pathlib import Path
import requests
from rpicam.utils.logging_utils import get_logger
class TelegramPoster:
"""
Bare-bones class to post videos to a Telegram chat.
Uses per default credentials stored in environment.
"""
API_URL = 'https://api.telegram.org'
API_TOKEN_ENV_VAR = 'RPICAM_TG_API_TOKEN'
CHAT_ID_ENV_VAR = 'RPICAM_TG_CHAT_ID'
def __init__(self, api_token: str = None, chat_id: str = None):
if api_token is not None and chat_id is not None:
self.api_token = api_token
self.chat_id = chat_id
else:
self.api_token = os.getenv(self.API_TOKEN_ENV_VAR, None)
self.chat_id = os.getenv(self.CHAT_ID_ENV_VAR, None)
self._logger = get_logger(self.__class__.__name__, verb=True)
if self.api_token is None or self.chat_id is None:
raise RuntimeError('Could not find Telegram credentials in environment.')
def send_video(self, p: Union[Path, str]):
"""Post the given video to Telegram using stored credentials."""
p = Path(str(p)).resolve()
if not p.is_file():
raise RuntimeError(f'file not found: {p}')
url = f'{self.API_URL}/bot{self.api_token}/sendVideo'
files = {
'chat_id': (None, self.chat_id),
'video': (str(p), open(p, 'rb'))
}
r = requests.post(url, files=files)
if r.status_code != 200:
self._logger.error(f'Could not upload file. Exit code was {r.status_code}')
| 2.578125 | 3 |
src/tracker/mtmct/wp/utils_for_mtmct.py | ToumaKazusa3/WP-MTMCT | 1 | 12798761 | <gh_stars>1-10
import json
import pickle
import numpy as np
import os
import errno
from geopy.distance import geodesic
from pathlib import Path
import glob
import cv2
def ham_to_dem(time):
'''
:param time: hour minute second
:return: second
'''
dam_time_list = []
if isinstance(time, list):
for i in time:
dem_time = int(i % 100) + 60 * (int(i % 10000) - int(i % 100)) / 100 + int(i / 10000) * 60 * 60
dam_time_list.append(dem_time)
return dam_time_list
else:
dem_time = int(time % 100) + 60 * (int(time % 10000) - int(time % 100)) / 100 + int(time / 10000) * 60 * 60
return int(dem_time)
def get_time(time):
'''
:param time: hour minute second
:return: second from 102630
'''
now_time = ham_to_dem(time)
start_time = ham_to_dem(102630)
sub_time = now_time - start_time
return int(sub_time)
def trans_gps_diff_to_dist_v1(a, b):
'''
:param a: ndarray:[jd, wd]
:param b: ndarray:[jd, wd]
:return: distance
'''
dist = a - b
j_dist = dist[0] * 111000 * np.cos(a[1] / 180 * np.pi)
w_dist = dist[1] * 111000
return np.sqrt(np.power(j_dist, 2)+np.power(w_dist, 2))
def trans_gps_diff_to_dist_v2(dist):
'''
:param dist: [jd_sub, wd_sub]
:return: distance
'''
j_dist = dist[0] * 111000 * np.cos(31 / 180 * np.pi)
w_dist = dist[1] * 111000
return np.sqrt(np.power(j_dist, 2)+np.power(w_dist, 2))
def trans_gps_diff_to_dist_v3(gps_1, gps_2):
'''
:param gps_1: [jd, wd]
:param gps_2: [jd, wd]
:return: distance between two gps
'''
return geodesic((gps_1[1], gps_1[0]), (gps_2[1], gps_2[0]).m)
def pixel_to_loc(data_pix2loc, traj_point, method='nearset', k=4):
'''
:param data_pix2loc: [pixel_x, pixel_y, jd, wd] size[n, 4]
:param traj_list: [pixel_x, pixel_y] size[2]
:param method: 'nearest' 'linear' 'nearest_k_mean'
:param k: num of selected point
:return: traj_list's jd and wd
'''
if method == 'nearset':
ex = data_pix2loc[:, 0] - traj_point[0]
ey = data_pix2loc[:, 1] - traj_point[1]
dist = ex ** 2 + ey ** 2
index = np.argsort(dist)[0]
jd = data_pix2loc[index, 2]
wd = data_pix2loc[index, 3]
elif method == 'linear':
index = np.where(int(data_pix2loc[:, 0]) == traj_point[0] and int(data_pix2loc[:, 1]) == traj_point[1])
jd = data_pix2loc[index, 2]
wd = data_pix2loc[index, 3]
elif method == 'nearest_k_mean':
ex = data_pix2loc[:, 0] - traj_point[0]
ey = data_pix2loc[:, 1] - traj_point[1]
dist = ex ** 2 + ey ** 2
indexs = np.argsort(dist)[:k]
jd, wd = np.mean(data_pix2loc[indexs, 2:], axis=0)
else:
assert 'Do not have the meathod'
return jd, wd
def norm_data(a):
'''
:param a: feature distance N X N
:return: normalize feature
'''
a = a.copy()
a_min = np.min(a, axis=1, keepdims=True)
_range = np.max(a,axis=1,keepdims=True) - a_min
return (a - a_min) / _range
def check_path(folder_dir, create=False):
'''
:param folder_dir: file path
:param create: create file or not
:return:
'''
folder_dir = Path(folder_dir)
if not folder_dir.exists():
if create:
try:
os.makedirs(folder_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
else:
raise IOError
return folder_dir
class DataPacker(object):
'''
this class supplys four different Data processing format
'''
@staticmethod
def dump(info, file_path):
check_path(Path(file_path).parent, create=True)
with open(file_path, 'wb') as f:
pickle.dump(info, f)
print('Store data ---> ' + str(file_path), flush=True)
@staticmethod
def load(file_path):
check_path(file_path)
with open(file_path, 'rb') as f:
info = pickle.load(f)
print('Load data <--- ' + str(file_path), flush=True)
return info
@staticmethod
def json_dump(info, file_path, encoding='UTF-8'):
check_path(Path(file_path).parent, create=True)
with open(file_path, 'w', encoding=encoding) as f:
json.dump(info, f)
print('Store data ---> ' + str(file_path), flush=True)
@staticmethod
def json_load(file_path, encoding='UTF-8', acq_print=True):
check_path(file_path)
with open(file_path, 'r', encoding=encoding) as f:
info = json.load(f)
if acq_print:
print('Load data <--- ' + str(file_path), flush=True)
return info
@staticmethod
def np_save_txt(info, file_path):
check_path(Path(file_path).parent, create=True)
assert file_path.split('.')[-1] == 'txt', 'This file\' suffix is not txt, please check file path.'
np.savetxt(file_path, info)
print('Store data ---> ' + str(file_path), flush=True)
@staticmethod
def np_load_txt(file_path, acq_print=True):
check_path(file_path)
assert file_path.split('.')[-1] == 'txt', 'This file\' suffix is not txt, please check file path.'
info = np.loadtxt(file_path)
if acq_print:
print('Load data <--- ' + str(file_path), flush=True)
return info
@staticmethod
def np_save(info, file_path):
check_path(Path(file_path).parent, create=True)
assert file_path.split('.')[-1] == 'npy', 'This file\' suffix is not npy, please check file path.'
np.save(file_path, info)
print('Store data ---> ' + str(file_path), flush=True)
@staticmethod
def np_load(file_path, acq_print=True):
check_path(file_path)
assert file_path.split('.')[-1] == 'npy', 'This file\' suffix is not npy, please check file path.'
info = np.load(file_path)
if acq_print:
print('Load data <--- ' + str(file_path), flush=True)
return info
class DataProcesser(object):
@staticmethod
def refine_v1(camid, locx, locy):
if camid == 1:
if locx + 30 * locy <= 3000 or locx - 8.235 * locy >= 1200:
return False
if camid == 2:
if locy <= 150 or locx >= 3750:
return False
if camid == 3:
if locy <= 200:
return False
if camid == 4:
if locy <= 600:
return False
if camid == 5:
if locy <= 150 or 0.1167 * locx + locy <= 350 or locx - 5.7143 * locy >= 2000:
return False
if camid == 6:
if locy <= 150 or locx - 2.5 * locy >= 2000:
return False
return True
@staticmethod
def refine_v2(camid, locx, locy):
if camid == 1:
if locx + 20 * locy <= 4000 or locx - 8.235 * locy >= 1200:
return False
if camid == 2:
if locy <= 200 or locx >= 3600:
return False
if camid == 3:
if locy <= 250:
return False
if camid == 4:
if locy <= 600 or (locx >= 2000 and 0.5833 * locx + locy <= 2340):
return False
if camid == 5:
if locy <= 150 or 0.1167 * locx + locy <= 350 or locx - 5.7143 * locy >= 2000:
return False
if camid == 6:
if locy <= 150 or locx - 2.5 * locy >= 2000:
return False
return True
@staticmethod
def refine_result(path, path_new, camid):
refineData = []
with open(path, 'r') as f:
lines = f.readlines()
for line in lines:
data = line.strip().split(',')
locx = float(data[2]) + float(data[4]) / 2
locy = float(data[3]) + float(data[5])
if DataProcesser.refine_v2(camid, locx, locy):
refineData.append(line)
with open(path_new, 'w') as f:
f.writelines(refineData)
@staticmethod
def crop_image(img_dir, result_dir, save_img_dir, camid):
img_paths = glob.glob(str(img_dir / '*.jpg'))
img_paths.sort()
result_path = result_dir / ('GPSReID0'+str(camid)+'_refine_v2.txt')
data_list = []
with open(result_path, 'r') as f:
total_data = f.readlines()
for data in total_data:
data = data.strip().split(',')
data_list.append([int(data[0]), int(data[1]), float(data[2]), float(data[3]), float(data[4]), float(data[5])])
data_arr = np.array(data_list)
for img_path in img_paths:
index = int(img_path.split('/')[-1].split('.')[0]) + 1
img0 = cv2.imread(img_path)
data_cur = data_arr[data_arr[:,0] == index]
for data in data_cur:
fid, tid, x1, y1, w, h = data
save_img_path = save_img_dir / '{:02d}'.format(camid) / '{:05d}'.format(int(tid))
check_path(save_img_path, create=True)
count = len(glob.glob(str(save_img_path / '*.jpg')))
y2 = y1 + h
x2 = x1 + w
if y1 <= 0:
y1 = 0
if x1 <= 0:
x1 = 0
if y2 > 3007:
y2 = 3007
if x2 > 4000:
x2 = 4000
img = img0[int(y1):int(y2),int(x1):int(x2)]
img_name = 'T{:05d}_C{:02d}_F{:06d}_I{:06d}.jpg'.format(int(tid), camid, int(fid), count+1)
try:
cv2.imwrite(str(save_img_path / img_name), img)
except:
print('ignore image -- ', save_img_path / img_name)
@staticmethod
def count_dataset_info(tracklet_dir, cam_num, verbose=False, save_info=False):
tracklet_dir = Path(tracklet_dir)
info = {'old2new':[{} for i in range(cam_num)], 'ignore':[],
'tracklets_info':[{} for i in range(cam_num)], 'tracklets':[]}
new_id = 0
for camid in range(cam_num):
tracklets_per_cam_dir = tracklet_dir / '{:02d}'.format(camid + 1)
tracklet_ids = os.listdir(tracklets_per_cam_dir)
tracklet_ids.sort()
tracklets_num = len(tracklet_ids)
count_ignore_tkl = 0
count_ava_tkl = 0
count_avg_tkl_num = 0
total_num = 0
for tracklet_id in tracklet_ids:
img_paths = glob.glob(str(tracklets_per_cam_dir / tracklet_id / '*.jpg'))
img_paths.sort()
if len(img_paths) <= 2:
count_ignore_tkl += 1
info['old2new'][camid][int(tracklet_id)] = None
info['ignore'].append(tracklets_per_cam_dir / tracklet_id)
else:
count_ava_tkl += 1
info['old2new'][camid][int(tracklet_id)] = new_id
new_id += 1
info['tracklets'].append((img_paths, new_id, camid))
total_num += len(img_paths)
count_avg_tkl_num += 1
info['tracklets_info'][camid]['total_tracklets_num'] = tracklets_num
info['tracklets_info'][camid]['ignore_tracklets_num'] = count_ignore_tkl
info['tracklets_info'][camid]['available_tracklets_num'] = count_ava_tkl
info['tracklets_info'][camid]['available_images_num'] = total_num
info['tracklets_info'][camid]['average_images_num'] = total_num // count_ava_tkl
assert info['tracklets_info'][camid]['total_tracklets_num'] == \
info['tracklets_info'][camid]['ignore_tracklets_num'] + \
info['tracklets_info'][camid]['available_tracklets_num']
if save_info:
DataPacker.dump(info, tracklet_dir / 'info.pkl')
return info
if verbose:
ti = info['tracklets_info']
to_tn = ti[1]['total_tracklets_num'] + ti[2]['total_tracklets_num'] + ti[3]['total_tracklets_num'] + \
ti[4]['total_tracklets_num'] + ti[5]['total_tracklets_num'] + ti[0]['total_tracklets_num']
to_itn = ti[1]['ignore_tracklets_num'] + ti[2]['ignore_tracklets_num'] + ti[3]['ignore_tracklets_num'] + \
ti[4]['ignore_tracklets_num'] + ti[5]['ignore_tracklets_num'] + ti[0]['ignore_tracklets_num']
to_atn = ti[1]['available_tracklets_num'] + ti[2]['available_tracklets_num'] + ti[3]['available_tracklets_num'] + \
ti[4]['available_tracklets_num'] + ti[5]['available_tracklets_num'] + ti[0]['available_tracklets_num']
to_avain = ti[1]['available_images_num'] + ti[2]['available_images_num'] + ti[3]['available_images_num'] + \
ti[4]['available_images_num'] + ti[5]['available_images_num'] + ti[0]['available_images_num']
to_avein = ti[1]['average_images_num'] + ti[2]['average_images_num'] + ti[3]['average_images_num'] + \
ti[4]['average_images_num'] + ti[5]['average_images_num'] + ti[0]['average_images_num']
print("=> GPSMOT loaded")
print("Dataset statistics:")
print(" ----------------------------------------------------------------------------------------------------------------")
print(" subset | # tkls num | # ignore tkls num | # available tkls num | # available images num | # average images num")
print(" ----------------------------------------------------------------------------------------------------------------")
print(" cam1 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[0]['total_tracklets_num'], ti[0]['ignore_tracklets_num'], \
ti[0]['available_tracklets_num'], ti[0]['available_images_num'], ti[0]['average_images_num']))
print(" cam2 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[1]['total_tracklets_num'], ti[1]['ignore_tracklets_num'], \
ti[1]['available_tracklets_num'], ti[1]['available_images_num'], ti[1]['average_images_num']))
print(" cam3 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[2]['total_tracklets_num'], ti[2]['ignore_tracklets_num'], \
ti[2]['available_tracklets_num'], ti[2]['available_images_num'], ti[2]['average_images_num']))
print(" cam4 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[3]['total_tracklets_num'], ti[3]['ignore_tracklets_num'], \
ti[3]['available_tracklets_num'], ti[3]['available_images_num'], ti[3]['average_images_num']))
print(" cam5 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[4]['total_tracklets_num'], ti[4]['ignore_tracklets_num'], \
ti[4]['available_tracklets_num'], ti[4]['available_images_num'], ti[4]['average_images_num']))
print(" cam6 | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(ti[5]['total_tracklets_num'], ti[5]['ignore_tracklets_num'], \
ti[5]['available_tracklets_num'], ti[5]['available_images_num'], ti[5]['average_images_num']))
print(" ----------------------------------------------------------------------------------------------------------------")
print(" total | {:10d} | {:17d} | {:20d} | {:22d} | {:20d} ".format(to_tn, to_itn, to_atn, to_avain, to_avein)) | 2.40625 | 2 |
ci/check_copyright_block.py | VishnuPrem/multi_robot_restaurant | 7 | 12798762 | <filename>ci/check_copyright_block.py
#!/usr/bin/env python3
from datetime import datetime
import re
import sys
file_path = sys.argv[1]
n_lines = open(file_path, 'r').readlines()
email_address = open('.git/copyrightemail', 'r').readlines()[0].strip()
if not email_address in "\n".join(n_lines[:5]):
print("{}: Your email, {}, is not found in the Copyright header!".format(file_path, email_address))
exit(-1)
date_idx = 0
if n_lines[date_idx].strip() == "#pragma once":
date_idx += 1
def fix_date(line):
if str(datetime.today().year) in line:
return line
date_string = line
range_match = re.match(r'.*([1-3][0-9]{3})( )*-( )*([1-3][0-9]{3})', date_string)
if range_match is not None:
date_string = date_string.replace(str(range_match.group(4)), str(datetime.today().year))
else:
single_match = re.match(r'.*([1-3][0-9]{3})', date_string)
if single_match is not None:
date_string = date_string.replace(str(single_match.group(1)), str(single_match.group(1)) + ' - ' + str(datetime.today().year))
return date_string
old_date_line = n_lines[date_idx]
new_date_line = fix_date(old_date_line)
if old_date_line != new_date_line:
print("Updated date!")
print("Old date line:", old_date_line)
print("New date line:", new_date_line)
n_lines[date_idx] = new_date_line
f = open(file_path, 'w')
f.writelines(n_lines)
f.close()
| 2.765625 | 3 |
py/fingerboard.py | Takayoshi-Aoyagi/Jazz-Chords | 1 | 12798763 | # coding: UTF-8
from tone import Tone
class Fingerboard:
@classmethod
def getPos(cls):
_pos = {}
openTones = ["E", "B", "G", "D", "A", "E"]
tone = Tone()
for stringIndex, openTone in enumerate(openTones):
toneIndex = tone.getToneNumberByName(openTone)
arr = []
for i in range(13):
toneString = tone.getToneName(openTone, i)
arr.append(toneString)
_pos[stringIndex + 1] = arr
return _pos
@classmethod
def dump(cls, includes):
_pos = cls.getPos()
if len(includes) > 0:
for key in _pos.keys():
arr = _pos[key]
for i, tone in enumerate(arr):
if tone not in includes.keys():
arr[i] = " "
else:
arr[i] = "%2s(%3s)" % (tone, includes[tone])
flets = map(lambda x: " %7s " % x, range(13))
print " " + " ".join(flets)
for key in sorted(_pos.keys()):
tones = _pos[key]
tones = map(lambda x: " %7s " % x, tones)
print '%s弦: |%s|' % (key, "|".join(tones))
| 2.8125 | 3 |
app/Account.py | dan-english/test3 | 1 | 12798764 | """Account Model."""
from masoniteorm.models import Model
class Account(Model):
"""Account Model."""
__fillable__ = ['email', 'access_token', 'scopes', 'type', 'valid', 'user_id', 'provider', 'nylas_account_id']
__auth__ = 'email'
| 2.453125 | 2 |
lib10x/lib10x.py | antonybholmes/lib10x | 0 | 12798765 | <filename>lib10x/lib10x.py
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 6 16:51:15 2018
@author: antony
"""
import matplotlib
# matplotlib.use('agg')
import matplotlib.pyplot as plt
import collections
import numpy as np
import scipy.sparse as sp_sparse
import tables
import pandas as pd
from sklearn.manifold import TSNE
import sklearn.preprocessing
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import silhouette_samples
from sklearn.neighbors import kneighbors_graph
from scipy.interpolate import griddata
import h5py
from scipy.interpolate import interp1d
from scipy.spatial import distance
import networkx as nx
import os
import phenograph
import libplot
import libcluster
import libtsne
import seaborn as sns
from libsparse.libsparse import SparseDataFrame
from lib10x.sample import *
from scipy.spatial import ConvexHull
from PIL import Image, ImageFilter
from scipy.stats import binned_statistic
import imagelib
TNSE_AX_Q = 0.999
MARKER_SIZE = 10
SUBPLOT_SIZE = 4
EXP_ALPHA = 0.8
# '#f2f2f2' #(0.98, 0.98, 0.98) #(0.8, 0.8, 0.8) #(0.85, 0.85, 0.85
BACKGROUND_SAMPLE_COLOR = [0.75, 0.75, 0.75]
EDGE_COLOR = None # [0.3, 0.3, 0.3] #'#4d4d4d'
EDGE_WIDTH = 0 # 0.25
ALPHA = 0.9
BLUE_YELLOW_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'blue_yellow', ['#162d50', '#ffdd55'])
BLUE_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'blue', ['#162d50', '#afc6e9'])
BLUE_GREEN_YELLOW_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'bgy', ['#162d50', '#214478', '#217844', '#ffcc00', '#ffdd55'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#002255', '#2ca05a', '#ffd42a'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#002255', '#003380', '#2ca05a', '#ffd42a', '#ffdd55'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#003366', '#339966', '#ffff66', '#ffff00')
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#001a33', '#003366', '#339933', '#ffff66', '#ffff00'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#00264d', '#003366', '#339933', '#e6e600', '#ffff33'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#003366', '#40bf80', '#ffff33'])
BGY_ORIG_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'bgy', ['#002255', '#003380', '#2ca05a', '#ffd42a', '#ffdd55'])
BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'bgy', ['#003366', '#004d99', '#40bf80', '#ffe066', '#ffd633'])
GRAY_PURPLE_YELLOW_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'grey_purple_yellow', ['#e6e6e6', '#3333ff', '#ff33ff', '#ffe066'])
GYBLGRYL_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'grey_blue_green_yellow', ['#e6e6e6', '#0055d4', '#00aa44', '#ffe066'])
OR_RED_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'or_red', matplotlib.cm.OrRd(range(4, 256)))
BU_PU_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list(
'bu_pu', matplotlib.cm.BuPu(range(4, 256)))
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#0066ff', '#37c871', '#ffd42a'])
# BGY_CMAP = matplotlib.colors.LinearSegmentedColormap.from_list('bgy', ['#003380', '#5fd38d', '#ffd42a'])
EXP_NORM = matplotlib.colors.Normalize(-1, 3, clip=True)
LEGEND_PARAMS = {'show': True, 'cols': 4, 'markerscale': 2}
CLUSTER_101_COLOR = (0.3, 0.3, 0.3)
np.random.seed(0)
GeneBCMatrix = collections.namedtuple(
'GeneBCMatrix', ['gene_ids', 'gene_names', 'barcodes', 'matrix'])
def decode(items):
return np.array([x.decode('utf-8') for x in items])
def get_matrix_from_h5(filename, genome):
with tables.open_file(filename, 'r') as f:
try:
dsets = {}
print(f.list_nodes('/'))
for node in f.walk_nodes('/' + genome, 'Array'):
dsets[node.name] = node.read()
# for node in f.walk_nodes('/matrix', 'Array'):
# dsets[node.name] = node.read()
print(dsets)
matrix = sp_sparse.csc_matrix(
(dsets['data'], dsets['indices'], dsets['indptr']), shape=dsets['shape'])
return GeneBCMatrix(decode(dsets['genes']), decode(dsets['gene_names']), decode(dsets['barcodes']), matrix)
except tables.NoSuchNodeError:
raise Exception("Genome %s does not exist in this file." % genome)
except KeyError:
raise Exception("File is missing one or more required datasets.")
#GeneBCMatrix = collections.namedtuple('FeatureBCMatrix', ['feature_ids', 'feature_names', 'barcodes', 'matrix'])
def get_matrix_from_h5_v2(filename, genome):
with h5py.File(filename, 'r') as f:
if u'version' in f.attrs:
if f.attrs['version'] > 2:
raise ValueError(
'Matrix HDF5 file format version (%d) is an newer version that is not supported by this function.' % version)
else:
raise ValueError(
'Matrix HDF5 file format version (%d) is an older version that is not supported by this function.' % version)
feature_ids = [x.decode('ascii', 'ignore')
for x in f['matrix']['features']['id']]
feature_names = [x.decode('ascii', 'ignore')
for x in f['matrix']['features']['name']]
barcodes = list(f['matrix']['barcodes'][:])
matrix = sp_sparse.csc_matrix(
(f['matrix']['data'], f['matrix']['indices'], f['matrix']['indptr']), shape=f['matrix']['shape'])
return GeneBCMatrix(feature_ids, feature_names, decode(barcodes), matrix)
def save_matrix_to_h5(gbm, filename, genome):
flt = tables.Filters(complevel=1)
with tables.open_file(filename, 'w', filters=flt) as f:
try:
group = f.create_group(f.root, genome)
f.create_carray(group, 'genes', obj=gbm.gene_ids)
f.create_carray(group, 'gene_names', obj=gbm.gene_names)
f.create_carray(group, 'barcodes', obj=gbm.barcodes)
f.create_carray(group, 'data', obj=gbm.matrix.data)
f.create_carray(group, 'indices', obj=gbm.matrix.indices)
f.create_carray(group, 'indptr', obj=gbm.matrix.indptr)
f.create_carray(group, 'shape', obj=gbm.matrix.shape)
except:
raise Exception("Failed to write H5 file.")
def subsample_matrix(gbm, barcode_indices):
return GeneBCMatrix(gbm.gene_ids, gbm.gene_names, gbm.barcodes[barcode_indices], gbm.matrix[:, barcode_indices])
def get_expression(gbm, gene_name, genes=None):
if genes is None:
genes = gbm.gene_names
gene_indices = np.where(genes == gene_name)[0]
if len(gene_indices) == 0:
raise Exception("%s was not found in list of gene names." % gene_name)
return gbm.matrix[gene_indices[0], :].toarray().squeeze()
def gbm_to_df(gbm):
return pd.DataFrame(gbm.matrix.todense(), index=gbm.gene_names, columns=gbm.barcodes)
def get_barcode_counts(gbm):
ret = []
for i in range(len(gbm.barcodes)):
ret.append(np.sum(gbm.matrix[:, i].toarray()))
return ret
def df(gbm):
"""
Converts a GeneBCMatrix to a pandas dataframe (dense)
Parameters
----------
gbm : a GeneBCMatrix
Returns
-------
object : Pandas DataFrame shape(n_cells, n_genes)
"""
df = pd.DataFrame(gbm.matrix.todense())
df.index = gbm.gene_names
df.columns = gbm.barcodes
return df
def to_csv(gbm, file, sep='\t'):
df(gbm).to_csv(file, sep=sep, header=True, index=True)
def sum(gbm, axis=0):
return gbm.matrix.sum(axis=axis)
def tpm(gbm):
m = gbm.matrix
s = 1 / m.sum(axis=0)
mn = m.multiply(s)
tpm = mn.multiply(1000000)
return GeneBCMatrix(gbm.gene_ids, gbm.gene_names, gbm.barcodes, tpm)
def create_cluster_plots(pca, labels, name, marker='o', s=MARKER_SIZE):
for i in range(0, pca.shape[1]):
for j in range(i + 1, pca.shape[1]):
create_cluster_plot(pca, labels, name, pc1=(
i + 1), pc2=(j + 1), marker=marker, s=s)
def pca_base_plots(pca, clusters, n=10, marker='o', s=MARKER_SIZE):
rows = libplot.grid_size(n)
w = 4 * rows
fig = libplot.new_base_fig(w=w, h=w)
si = 1
for i in range(0, n):
for j in range(i + 1, n):
ax = libplot.new_ax(fig, subplot=(rows, rows, si))
pca_plot_base(pca, clusters, pc1=(i + 1),
pc2=(j + 1), marker=marker, s=s, ax=ax)
si += 1
return fig
def pca_plot_base(pca,
clusters,
pc1=1,
pc2=2,
marker='o',
labels=False,
s=MARKER_SIZE,
w=8,
h=8,
fig=None,
ax=None):
colors = libcluster.get_colors()
if ax is None:
fig, ax = libplot.new_fig(w=w, h=h)
ids = list(sorted(set(clusters['Cluster'])))
for i in range(0, len(ids)):
l = ids[i]
#print('Label {}'.format(l))
indices = np.where(clusters['Cluster'] == l)[0]
n = len(indices)
label = 'C{} ({:,})'.format(l, n)
df2 = pca.iloc[indices, ]
x = df2.iloc[:, pc1 - 1]
y = df2.iloc[:, pc2 - 1]
if i in colors:
color = colors[i] # l]
else:
color = 'black'
ax.scatter(x, y, color=color, edgecolor=color, s=s,
marker=marker, alpha=libplot.ALPHA, label=label)
if labels:
l = pca.index.values
for i in range(0, pca.shape[0]):
print(pca.shape, pca.iloc[i, pc1 - 1], pca.iloc[i, pc2 - 1])
ax.text(pca.iloc[i, pc1 - 1], pca.iloc[i, pc2 - 1], pca.index[i])
return fig, ax
def pca_plot(pca,
clusters,
pc1=1,
pc2=2,
marker='o',
labels=False,
s=MARKER_SIZE,
w=8,
h=8,
legend=True,
fig=None,
ax=None):
fig, ax = pca_plot_base(pca,
clusters,
pc1=pc1,
pc2=pc2,
marker=marker,
labels=labels,
s=s,
w=w,
h=h,
fig=fig,
ax=ax)
#libtsne.tsne_legend(ax, labels, colors)
libcluster.format_simple_axes(ax, title="PC")
if legend:
libcluster.format_legend(ax, cols=6, markerscale=2)
return fig, ax
def create_pca_plot(pca,
clusters,
name,
pc1=1,
pc2=2,
marker='o',
labels=False,
legend=True,
s=MARKER_SIZE,
w=8,
h=8,
fig=None,
ax=None,
dir='.',
format='png'):
out = '{}/pca_{}_pc{}_vs_pc{}.{}'.format(dir, name, pc1, pc2, format)
fig, ax = pca_plot(pca,
clusters,
pc1=pc1,
pc2=pc2,
labels=labels,
marker=marker,
legend=legend,
s=s,
w=w,
h=h,
fig=fig,
ax=ax)
libplot.savefig(fig, out, pad=2)
plt.close(fig)
def set_tsne_ax_lim(tsne, ax):
"""
Set the t-SNE x,y limits to look pretty.
"""
d1 = tsne.iloc[:, 0]
d2 = tsne.iloc[:, 1]
xlim = [d1[d1 < 0].quantile(1 - TNSE_AX_Q),
d1[d1 >= 0].quantile(TNSE_AX_Q)]
ylim = [d2[d2 < 0].quantile(1 - TNSE_AX_Q),
d2[d2 >= 0].quantile(TNSE_AX_Q)]
#print(xlim, ylim)
# ax.set_xlim(xlim)
# ax.set_ylim(ylim)
def base_cluster_plot(d,
clusters,
markers=None,
s=libplot.MARKER_SIZE,
colors=None,
edgecolors=EDGE_COLOR,
linewidth=EDGE_WIDTH,
dim1=0,
dim2=1,
w=8,
h=8,
alpha=ALPHA, # libplot.ALPHA,
show_axes=True,
legend=True,
sort=True,
cluster_order=None,
fig=None,
ax=None):
"""
Create a tsne plot without the formatting
Parameters
----------
d : Pandas dataframe
t-sne, umap data
clusters : Pandas dataframe
n x 1 table of n cells with a Cluster column giving each cell a
cluster label.
s : int, optional
Marker size
w : int, optional
Plot width
h : int, optional
Plot height
alpha : float (0, 1), optional
Tranparency of markers.
show_axes : bool, optional, default true
Whether to show axes on plot
legend : bool, optional, default true
Whether to show legend.
"""
if ax is None:
fig, ax = libplot.new_fig(w=w, h=h)
libcluster.scatter_clusters(d.iloc[:, dim1].values,
d.iloc[:, dim2].values,
clusters,
colors=colors,
edgecolors=edgecolors,
linewidth=linewidth,
markers=markers,
alpha=alpha,
s=s,
ax=ax,
cluster_order=cluster_order,
sort=sort)
#set_tsne_ax_lim(tsne, ax)
# libcluster.format_axes(ax)
if not show_axes:
libplot.invisible_axes(ax)
legend_params = dict(LEGEND_PARAMS)
if isinstance(legend, bool):
legend_params['show'] = legend
elif isinstance(legend, dict):
legend_params.update(legend)
else:
pass
if legend_params['show']:
libcluster.format_legend(ax,
cols=legend_params['cols'],
markerscale=legend_params['markerscale'])
return fig, ax
def base_cluster_plot_outline(out,
d,
clusters,
s=libplot.MARKER_SIZE,
colors=None,
edgecolors=EDGE_COLOR,
linewidth=EDGE_WIDTH,
dim1=0,
dim2=1,
w=8,
alpha=ALPHA, # libplot.ALPHA,
show_axes=True,
legend=True,
sort=True,
outline=True):
cluster_order = list(sorted(set(clusters['Cluster'])))
im_base = imagelib.new(w * 300, w * 300)
for i in range(0, len(cluster_order)):
print('index', i, cluster_order[i])
cluster = cluster_order[i]
if isinstance(colors, dict):
color = colors[cluster]
elif isinstance(colors, list):
if cluster < len(colors):
# np.where(clusters['Cluster'] == cluster)[0]]
color = colors[i]
else:
color = 'black'
else:
color = 'black'
fig, ax = separate_cluster(d,
clusters,
cluster,
color=color,
size=w,
s=s,
linewidth=linewidth,
add_titles=False)
# get x y lim
xlim = ax.get_xlim()
ylim = ax.get_ylim()
fig, ax = separate_cluster(d,
clusters,
cluster,
color=color,
size=w,
s=s,
linewidth=linewidth,
add_titles=False,
show_background=False)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if not show_axes:
libplot.invisible_axes(ax)
legend_params = dict(LEGEND_PARAMS)
if isinstance(legend, bool):
legend_params['show'] = legend
elif isinstance(legend, dict):
legend_params.update(legend)
else:
pass
if legend_params['show']:
libcluster.format_legend(ax,
cols=legend_params['cols'],
markerscale=legend_params['markerscale'])
libplot.invisible_axes(ax)
tmp = 'tmp{}.png'.format(i)
libplot.savefig(fig, tmp)
plt.close(fig)
# Open image
# im = imagelib.open(tmp)
# im_no_bg = imagelib.remove_background(im)
# im_smooth = imagelib.smooth_edges(im_no_bg)
# imagelib.paste(im_no_bg, im_smooth, inplace=True)
# imagelib.save(im_no_bg, 'smooth.png')
# imagelib.paste(im_base, im_no_bg, inplace=True)
im = imagelib.open(tmp)
if outline:
im_no_bg = imagelib.remove_background(im)
im_edges = imagelib.edges(im)
im_outline = imagelib.paste(im, im_edges) # im_no_bg
im_smooth = imagelib.smooth(im_outline)
imagelib.save(im_smooth, 'smooth.png') # im_smooth
imagelib.paste(im_base, im_smooth, inplace=True)
else:
imagelib.paste(im_base, im, inplace=True)
# # find gray areas and mask
# im_data = np.array(im1.convert('RGBA'))
#
# r = im_data[:, :, 0]
# g = im_data[:, :, 1]
# b = im_data[:, :, 2]
#
# grey_areas = (r < 255) & (r > 200) & (g < 255) & (g > 200) & (b < 255) & (b > 200)
#
# d = im_data[np.where(grey_areas)]
# d[:, :] = [255, 255, 255, 0]
# im_data[np.where(grey_areas)] = d
#
# im2 = Image.fromarray(im_data)
#
# # Edge detect on what is left (the clusters)
# im_edges = im2.filter(ImageFilter.FIND_EDGES)
#
# im_smooth = im_edges.filter(ImageFilter.SMOOTH)
#
# # paste outline onto clusters
# im2.paste(im_smooth, (0, 0), im_smooth)
#
# # overlay edges on top of original image to highlight cluster
# im_base.paste(im2, (0, 0), im2)
# break
imagelib.save(im_base, out)
def cluster_plot(tsne,
clusters,
dim1=0,
dim2=1,
markers='o',
s=libplot.MARKER_SIZE,
colors=None,
w=8,
h=8,
legend=True,
show_axes=False,
sort=True,
cluster_order=None,
fig=None,
ax=None,
out=None):
fig, ax = base_cluster_plot(tsne,
clusters,
markers=markers,
colors=colors,
dim1=dim1,
dim2=dim2,
s=s,
w=w,
h=h,
cluster_order=cluster_order,
legend=legend,
sort=sort,
show_axes=show_axes,
fig=fig,
ax=ax)
#libtsne.tsne_legend(ax, labels, colors)
#libcluster.format_simple_axes(ax, title="t-SNE")
#libcluster.format_legend(ax, cols=6, markerscale=2)
if out is not None:
libplot.savefig(fig, out)
return fig, ax
def create_cluster_plot(d,
clusters,
name,
dim1=0,
dim2=1,
method='tsne',
markers='o',
s=libplot.MARKER_SIZE,
w=8,
h=8,
colors=None,
legend=True,
sort=True,
show_axes=False,
ax=None,
cluster_order=None,
format='png',
dir='.',
out=None):
if out is None:
# libtsne.get_tsne_plot_name(name))
out = '{}/{}_{}.{}'.format(dir, method, name, format)
print(out)
return cluster_plot(d,
clusters,
dim1=dim1,
dim2=dim2,
markers=markers,
colors=colors,
s=s,
w=w,
h=h,
cluster_order=cluster_order,
show_axes=show_axes,
legend=legend,
sort=sort,
out=out)
def base_tsne_plot(tsne, marker='o', s=libplot.MARKER_SIZE, c='red', label=None, fig=None, ax=None):
"""
Create a tsne plot without the formatting
"""
if ax is None:
fig, ax = libplot.new_fig()
libplot.scatter(tsne['TSNE-1'], tsne['TSNE-2'], c=c,
marker=marker, label=label, s=s, ax=ax)
return fig, ax
def tsne_plot(tsne, marker='o', s=libplot.MARKER_SIZE, c='red', label=None, fig=None, ax=None):
fig, ax = base_tsne_plot(tsne, marker=marker, c=c,
s=s, label=label, fig=fig, ax=ax)
#libtsne.tsne_legend(ax, labels, colors)
libcluster.format_simple_axes(ax, title="t-SNE")
libcluster.format_legend(ax, cols=6, markerscale=2)
return fig, ax
def base_expr_plot(data,
exp,
dim=[1, 2],
cmap=plt.cm.plasma,
marker='o',
edgecolors=EDGE_COLOR,
linewidth=1,
s=MARKER_SIZE,
alpha=1,
w=libplot.DEFAULT_WIDTH,
h=libplot.DEFAULT_HEIGHT,
fig=None,
ax=None,
norm=None): # plt.cm.plasma):
"""
Base function for creating an expression plot for T-SNE/2D space
reduced representation of data.
Parameters
----------
data : Pandas dataframe
features x dimensions, e.g. rows are cells and columns are tsne dimensions
exp : numpy array
expression values for each data point so it must have the same number
of elements as data has rows.
d1 : int, optional
First dimension being plotted (usually 1)
d2 : int, optional
Second dimension being plotted (usually 2)
fig : matplotlib figure, optional
Supply a figure object on which to render the plot, otherwise a new
one is created.
ax : matplotlib ax, optional
Supply an axis object on which to render the plot, otherwise a new
one is created.
norm : Normalize, optional
Specify how colors should be normalized
Returns
-------
fig : matplotlib figure
If fig is a supplied argument, return the supplied figure, otherwise
a new figure is created and returned.
ax : matplotlib axis
If ax is a supplied argument, return this, otherwise create a new
axis and attach to figure before returning.
"""
if ax is None:
fig, ax = libplot.new_fig(w=w, h=h)
# if norm is None and exp.min() < 0:
#norm = matplotlib.colors.Normalize(vmin=-3, vmax=3, clip=True)
if norm is None:
norm = libplot.NORM_3
# Sort by expression level so that extreme values always appear on top
idx = np.argsort(exp) #np.argsort(abs(exp)) # np.argsort(exp)
x = data.iloc[idx, dim[0] - 1].values # data['{}-{}'.format(t, d1)][idx]
y = data.iloc[idx, dim[1] - 1].values # data['{}-{}'.format(t, d2)][idx]
e = exp[idx]
# if (e.min() == 0):
#print('Data does not appear to be z-scored. Transforming now...')
# zscore
#e = (e - e.mean()) / e.std()
#print(e.min(), e.max())
# z-score
#e = (e - e.mean()) / e.std()
# limit to 3 std for z-scores
#e[e < -3] = -3
#e[e > 3] = 3
ax.scatter(x,
y,
c=e,
s=s,
marker=marker,
alpha=alpha,
cmap=cmap,
norm=norm,
edgecolors='none', # edgecolors,
linewidth=linewidth)
# for i in range(0, x.size):
# en = norm(e[i])
# color = cmap(int(en * cmap.N))
# color = np.array(color)
#
# c1 = color.copy()
# c1[-1] = 0.5
#
# #print(c1)
#
# ax.scatter(x[i],
# y[i],
# c=[c1],
# s=s,
# marker=marker,
# edgecolors='none', #edgecolors,
# linewidth=linewidth)
#
#
#
# mean = color.mean()
#
# #print(x[i], y[i], mean)
#
# #if mean > 0.5:
# ax.scatter(x[i],
# y[i],
# c='#ffffff00',
# s=s,
# marker=marker,
# norm=norm,
# edgecolors=[color],
# linewidth=linewidth)
#libcluster.format_axes(ax, title=t)
return fig, ax
def expr_plot(data,
exp,
dim=[1, 2],
cmap=plt.cm.magma,
marker='o',
s=MARKER_SIZE,
alpha=1,
edgecolors=EDGE_COLOR,
linewidth=EDGE_WIDTH,
w=libplot.DEFAULT_WIDTH,
h=libplot.DEFAULT_HEIGHT,
show_axes=False,
fig=None,
ax=None,
norm=None,
colorbar=False): # plt.cm.plasma):
"""
Creates a base expression plot and adds a color bar.
"""
is_first = False
if ax is None:
fig, ax = libplot.new_fig(w, h)
is_first = True
base_expr_plot(data,
exp,
dim=dim,
s=s,
marker=marker,
edgecolors=edgecolors,
linewidth=linewidth,
alpha=alpha,
cmap=cmap,
norm=norm,
w=w,
h=h,
ax=ax)
# if colorbar or is_first:
if colorbar:
libplot.add_colorbar(fig, cmap, norm=norm)
#libcluster.format_simple_axes(ax, title=t)
if not show_axes:
libplot.invisible_axes(ax)
return fig, ax
# def expr_plot(tsne,
# exp,
# d1=1,
# d2=2,
# x1=None,
# x2=None,
# cmap=BLUE_YELLOW_CMAP,
# marker='o',
# s=MARKER_SIZE,
# alpha=EXP_ALPHA,
# out=None,
# fig=None,
# ax=None,
# norm=None,
# w=libplot.DEFAULT_WIDTH,
# h=libplot.DEFAULT_HEIGHT,
# colorbar=True): #plt.cm.plasma):
# """
# Creates a basic t-sne expression plot.
#
# Parameters
# ----------
# data : pandas.DataFrame
# t-sne 2D data
# """
#
# fig, ax = expr_plot(tsne,
# exp,
# t='TSNE',
# d1=d1,
# d2=d2,
# x1=x1,
# x2=x2,
# cmap=cmap,
# marker=marker,
# s=s,
# alpha=alpha,
# fig=fig,
# ax=ax,
# norm=norm,
# w=w,
# h=h,
# colorbar=colorbar)
#
# set_tsne_ax_lim(tsne, ax)
#
# libplot.invisible_axes(ax)
#
# if out is not None:
# libplot.savefig(fig, out, pad=0)
#
# return fig, ax
def create_expr_plot(tsne,
exp,
dim=[1, 2],
cmap=None,
marker='o',
s=MARKER_SIZE,
alpha=EXP_ALPHA,
fig=None,
ax=None,
w=libplot.DEFAULT_WIDTH,
h=libplot.DEFAULT_HEIGHT,
edgecolors=EDGE_COLOR,
linewidth=EDGE_WIDTH,
norm=None,
method='tsne',
show_axes=False,
colorbar=True,
out=None): # plt.cm.plasma):
"""
Creates and saves a presentation tsne plot
"""
if out is None:
out = '{}_expr.pdf'.format(method)
fig, ax = expr_plot(tsne,
exp,
dim=dim,
cmap=cmap,
marker=marker,
s=s,
alpha=alpha,
fig=fig,
w=w,
h=h,
ax=ax,
show_axes=show_axes,
colorbar=colorbar,
norm=norm,
linewidth=linewidth,
edgecolors=edgecolors)
if out is not None:
libplot.savefig(fig, out, pad=0)
return fig, ax
def base_pca_expr_plot(data,
exp,
dim=[1, 2],
cmap=None,
marker='o',
s=MARKER_SIZE,
alpha=EXP_ALPHA,
fig=None,
ax=None,
norm=None): # plt.cm.plasma):
fig, ax = base_expr_plot(data,
exp,
t='PC',
dim=dim,
cmap=cmap,
marker=marker,
s=s,
fig=fig,
alpha=alpha,
ax=ax,
norm=norm)
return fig, ax
def pca_expr_plot(data,
expr,
name,
dim=[1, 2],
cmap=None,
marker='o',
s=MARKER_SIZE,
alpha=EXP_ALPHA,
fig=None,
ax=None,
norm=None): # plt.cm.plasma):
out = 'pca_expr_{}_t{}_vs_t{}.pdf'.format(name, 1, 2)
fig, ax = base_pca_expr_plot(data,
expr,
dim=dim,
cmap=cmap,
marker=marker,
s=s,
alpha=alpha,
fig=fig,
ax=ax,
norm=norm)
libplot.savefig(fig, out)
plt.close(fig)
return fig, ax
def expr_grid_size(x, size=SUBPLOT_SIZE):
"""
Auto size grid to look nice.
"""
if type(x) is int:
l = x
elif type(x) is list:
l = len(x)
elif type(x) is np.ndarray:
l = x.shape[0]
elif type(x) is pd.core.frame.DataFrame:
l = x.shape[0]
else:
return None
cols = int(np.ceil(np.sqrt(l)))
w = size * cols
rows = int(l / cols) + 2
if l % cols == 0:
# Assume we will add a row for a color bar
rows += 1
h = size * rows
return w, h, rows, cols
def get_gene_names(data):
if ';' in data.index[0]:
ids, genes = data.index.str.split(';').str
else:
genes = data.index
ids = genes
return ids.values, genes.values
def get_gene_ids(data, genes, ids=None, gene_names=None):
"""
For a given gene list, get all of the transcripts.
Parameters
----------
data : DataFrame
data table containing and index
genes : list
List of strings of gene ids
ids : Index, optional
Index of gene ids
gene_names : Index, optional
Index of gene names
Returns
-------
list
list of tuples of (index, gene_id, gene_name)
"""
if ids is None:
ids, gene_names = get_gene_names(data)
ret = []
for g in genes:
indexes = np.where(ids == g)[0]
if indexes.size > 0:
for index in indexes:
ret.append((index, ids[index], gene_names[index]))
else:
# if id does not exist, try the gene names
indexes = np.where(gene_names == g)[0]
for index in indexes:
ret.append((index, ids[index], gene_names[index]))
return ret
def get_gene_data(data, g, ids=None, gene_names=None):
if ids is None:
ids, gene_names = get_gene_names(data)
if isinstance(g, list):
g = np.array(g)
if isinstance(g, np.ndarray):
idx = np.where(np.isin(ids, g))[0]
if idx.size < 1:
# if id does not exist, try the gene names
idx = np.where(np.isin(gene_names, g))[0]
if idx.size < 1:
return None
else:
idx = np.where(ids == g)[0]
if idx.size > 0:
# if id exists, pick the first
idx = idx[0]
else:
# if id does not exist, try the gene names
idx = np.where(gene_names == g)[0]
if idx.size > 0:
idx = idx[0]
else:
return None
if isinstance(data, SparseDataFrame):
return data[idx, :].to_array()
else:
return data.iloc[idx, :].values
def gene_expr_grid(data, tsne, genes, cmap=None, size=SUBPLOT_SIZE):
"""
Plot multiple genes on a grid.
Parameters
----------
data : Pandas dataframe
Genes x samples expression matrix
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
genes : array
List of gene names
Returns
-------
fig : Matplotlib figure
A new Matplotlib figure used to make the plot
"""
if type(genes) is pd.core.frame.DataFrame:
genes = genes['Genes'].values
ids, gene_names = get_gene_names(data)
gene_ids = get_gene_ids(data, genes, ids=ids, gene_names=gene_names)
w, h, rows, cols = expr_grid_size(gene_ids, size=size)
fig = libplot.new_base_fig(w=w, h=h)
for i in range(0, len(gene_ids)):
# gene id
gene_id = gene_ids[i][1]
gene = gene_ids[i][2]
print(gene, gene_id)
exp = get_gene_data(data, gene_id, ids=ids, gene_names=gene_names)
ax = libplot.new_ax(fig, rows, cols, i + 1)
expr_plot(tsne, exp, ax=ax, cmap=cmap, colorbar=False)
# if i == 0:
# libcluster.format_axes(ax)
# else:
# libplot.invisible_axes(ax)
ax.set_title('{} ({})'.format(gene_ids[i][2], gene_ids[i][1]))
libplot.add_colorbar(fig, cmap)
return fig
def genes_expr(data,
tsne,
genes,
prefix='',
dim=[1, 2],
index=None,
dir='GeneExp',
cmap=BGY_CMAP,
norm=None,
w=4,
h=4,
s=30,
alpha=ALPHA,
linewidth=EDGE_WIDTH,
edgecolors='none',
colorbar=True,
method='tsne',
format='png'):
"""
Plot multiple genes on a grid.
Parameters
----------
data : Pandas dataframe
Genes x samples expression matrix
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
genes : array
List of gene names
"""
if dir[-1] == '/':
dir = dir[:-1]
if not os.path.exists(dir):
mkdir(dir)
if index is None:
index = data.index
if isinstance(genes, pd.core.frame.DataFrame):
genes = genes['Genes'].values
if norm is None:
norm = matplotlib.colors.Normalize(vmin=-3, vmax=3, clip=True)
#cmap = plt.cm.plasma
ids, gene_names = get_gene_names(data)
print(ids, gene_names, genes)
gene_ids = get_gene_ids(data, genes, ids=ids, gene_names=gene_names)
print(gene_ids)
for i in range(0, len(gene_ids)):
gene_id = gene_ids[i][1]
gene = gene_ids[i][2]
print(gene_id, gene)
exp = get_gene_data(data, gene_id, ids=ids, gene_names=gene_names)
#fig, ax = libplot.new_fig()
#expr_plot(tsne, exp, ax=ax)
#libplot.add_colorbar(fig, cmap)
fig, ax = expr_plot(tsne,
exp,
cmap=cmap,
dim=dim,
w=w,
h=h,
s=s,
colorbar=colorbar,
norm=norm,
alpha=alpha,
linewidth=linewidth,
edgecolors=edgecolors)
if gene_id != gene:
out = '{}/{}_expr_{}_{}.{}'.format(dir,
method, gene, gene_id, format)
else:
out = '{}/{}_expr_{}.{}'.format(dir, method, gene, format)
libplot.savefig(fig, 'tmp.png', pad=0)
libplot.savefig(fig, out, pad=0)
plt.close(fig)
im1 = Image.open('tmp.png')
# Edge detect on what is left (the clusters)
imageWithEdges = im1.filter(ImageFilter.FIND_EDGES)
im_data = np.array(imageWithEdges.convert('RGBA'))
#r = data[:, :, 0]
#g = data[:, :, 1]
#b = data[:, :, 2]
a = im_data[:, :, 3]
# (r < 255) | (g < 255) | (b < 255) #(r > 0) & (r == g) & (r == b) & (g == b)
black_areas = (a > 0)
d = im_data[np.where(black_areas)]
d[:, 0:3] = [64, 64, 64]
im_data[np.where(black_areas)] = d
im2 = Image.fromarray(im_data)
im2.save('edges.png', 'png')
# overlay edges on top of original image to highlight cluster
# enable if edges desired
im1.paste(im2, (0, 0), im2)
im1.save(out, 'png')
def genes_expr_outline(data,
tsne,
genes,
prefix='',
index=None,
dir='GeneExp',
cmap=BGY_CMAP,
norm=None,
w=6,
s=30,
alpha=1,
linewidth=EDGE_WIDTH,
edgecolors='none',
colorbar=True,
method='tsne',
bins=10,
background=BACKGROUND_SAMPLE_COLOR):
"""
Plot multiple genes on a grid.
Parameters
----------
data : Pandas dataframe
Genes x samples expression matrix
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
genes : array
List of gene names
"""
if dir[-1] == '/':
dir = dir[:-1]
if not os.path.exists(dir):
mkdir(dir)
if index is None:
index = data.index
if isinstance(genes, pd.core.frame.DataFrame):
genes = genes['Genes'].values
if norm is None:
norm = matplotlib.colors.Normalize(vmin=-3, vmax=3, clip=True)
#cmap = plt.cm.plasma
ids, gene_names = get_gene_names(data)
gene_ids = get_gene_ids(data, genes, ids=ids, gene_names=gene_names)
for i in range(0, len(gene_ids)):
gene_id = gene_ids[i][1]
gene = gene_ids[i][2]
print(gene_id, gene)
exp = get_gene_data(data, gene_id, ids=ids, gene_names=gene_names)
bin_means, bin_edges, binnumber = binned_statistic(exp, exp, bins=bins)
print(binnumber.min(), binnumber.max())
iw = w * 300
im_base = imagelib.new(iw, iw)
for bin in range(0, bins):
bi = bin + 1
idx_bin = np.where(binnumber == bi)[0]
idx_other = np.where(binnumber != bi)[0]
tsne_other = tsne.iloc[idx_other, :]
fig, ax = libplot.new_fig(w, w)
x = tsne_other.iloc[:, 0]
y = tsne_other.iloc[:, 1]
libplot.scatter(x,
y,
c=[background],
ax=ax,
edgecolors='none', # bgedgecolor,
linewidth=linewidth,
s=s)
#fig, ax = libplot.new_fig()
#expr_plot(tsne, exp, ax=ax)
#libplot.add_colorbar(fig, cmap)
exp_bin = exp[idx_bin]
tsne_bin = tsne.iloc[idx_bin, :]
expr_plot(tsne_bin,
exp_bin,
cmap=cmap,
s=s,
colorbar=colorbar,
norm=norm,
alpha=alpha,
linewidth=linewidth,
edgecolors=edgecolors,
ax=ax)
tmp = 'tmp{}.png'.format(bin)
libplot.savefig(fig, tmp, pad=0)
plt.close(fig)
im = imagelib.open(tmp)
im_no_bg = imagelib.remove_background(im)
im_edges = imagelib.edges(im_no_bg)
im_smooth = imagelib.smooth(im_edges)
im_outline = imagelib.paste(im_no_bg, im_smooth)
imagelib.paste(im_base, im_outline, inplace=True)
# # find gray areas and mask
# im_data = np.array(im1.convert('RGBA'))
#
# r = im_data[:, :, 0]
# g = im_data[:, :, 1]
# b = im_data[:, :, 2]
#
# print(tmp, r.shape)
#
# grey_areas = (r < 255) & (r > 200) & (g < 255) & (g > 200) & (b < 255) & (b > 200)
#
#
# d = im_data[np.where(grey_areas)]
# d[:, :] = [255, 255, 255, 0]
# im_data[np.where(grey_areas)] = d
#
#
# #edges1 = feature.canny(rgb2gray(im_data))
#
# #print(edges1.shape)
#
# #skimage.io.imsave('tmp_canny_{}.png'.format(bin), edges1)
#
# im2 = Image.fromarray(im_data)
#
# im_no_gray, im_smooth = smooth_edges(im1, im1)
#
# # Edge detect on what is left (the clusters)
# im_edges = im2.filter(ImageFilter.FIND_EDGES)
#
#
# im_data = np.array(im_edges.convert('RGBA'))
#
# #r = data[:, :, 0]
# #g = data[:, :, 1]
# #b = data[:, :, 2]
# #a = im_data[:, :, 3]
#
# # Non transparent areas are edges
# #black_areas = (a > 0) #(r < 255) | (g < 255) | (b < 255) #(r > 0) & (r == g) & (r == b) & (g == b)
#
# #d = im_data[np.where(black_areas)]
# #d[:, 0:3] = [64, 64, 64]
# #im_data[np.where(black_areas)] = d
#
# #im3 = Image.fromarray(im_data)
# #im2.save('edges.png', 'png')
#
# im_smooth = im_edges.filter(ImageFilter.SMOOTH)
# im_smooth.save('edges.png', 'png')
#
# im2.paste(im_smooth, (0, 0), im_smooth)
#
# im_base.paste(im2, (0, 0), im2)
if gene_id != gene:
out = '{}/{}_expr_{}_{}.png'.format(dir, method, gene, gene_id)
else:
out = '{}/{}_expr_{}.png'.format(dir, method, gene)
print(out)
# overlay edges on top of original image to highlight cluster
#im_base.paste(im2, (0, 0), im2)
imagelib.save(im_base, out)
def avg_expr(data,
tsne,
genes,
cid,
clusters,
prefix='',
index=None,
dir='GeneExp',
cmap=OR_RED_CMAP, # BGY_CMAP,
norm=None,
w=libplot.DEFAULT_WIDTH,
h=libplot.DEFAULT_HEIGHT,
alpha=1.0,
colorbar=False,
method='tsne',
fig=None,
ax=None,
sdmax=0.5):
"""
Plot multiple genes on a grid.
Parameters
----------
data : Pandas dataframe
Genes x samples expression matrix
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
genes : array
List of gene names
"""
if dir[-1] == '/':
dir = dir[:-1]
if not os.path.exists(dir):
mkdir(dir)
if index is None:
index = data.index
if isinstance(genes, pd.core.frame.DataFrame):
genes = genes['Genes'].values
if norm is None:
norm = matplotlib.colors.Normalize(vmin=-3, vmax=3, clip=True)
#cmap = plt.cm.plasma
ids, gene_names = get_gene_names(data)
exp = get_gene_data(data, genes, ids=ids, gene_names=gene_names)
avg = exp.mean(axis=0)
avg = (avg - avg.mean()) / avg.std()
avg[avg < -1.5] = -1.5
avg[avg > 1.5] = 1.5
avg = (avg - avg.min()) / (avg.max() - avg.min()) # min_max_scale(avg)
create_expr_plot(tsne,
avg,
cmap=cmap,
w=w,
h=h,
colorbar=colorbar,
norm=norm,
alpha=alpha,
fig=fig,
ax=ax)
x = tsne.iloc[:, 0].values # data['{}-{}'.format(t, d1)][idx]
y = tsne.iloc[:, 1].values # data['{}-{}'.format(t, d2)][idx]
idx = np.where(clusters['Cluster'] == cid)[0]
nx = 500
ny = 500
xi = np.linspace(x.min(), x.max(), nx)
yi = np.linspace(y.min(), y.max(), ny)
x = x[idx]
y = y[idx]
#centroid = [x.sum() / x.size, y.sum() / y.size]
centroid = [(x * avg[idx]).sum() / avg[idx].sum(),
(y * avg[idx]).sum() / avg[idx].sum()]
d = np.array([distance.euclidean(centroid, (a, b)) for a, b in zip(x, y)])
sd = d.std()
m = d.mean()
print(m, sd)
z = (d - m) / sd
# find all points within 1 sd of centroid
idx = np.where(abs(z) < sdmax)[0] # (d > x1) & (d < x2))[0]
x = x[idx]
y = y[idx]
points = np.array([[p1, p2] for p1, p2 in zip(x, y)])
hull = ConvexHull(points)
#x1 = x[idx]
#y1 = y[idx]
# avg1 = np.zeros(x.size) #avg[idx]
#avg1[idx] = 1
# fx = interp1d(points[hull.vertices, 0], points[hull.vertices, 1], kind='cubic')
# fy = interp1d(points[hull.vertices, 1], points[hull.vertices, 0], kind='cubic')
#
# xt = np.linspace(x.min(), x.max(), 100, endpoint=True)
# yt = np.linspace(y.min(), y.max(), 100, endpoint=True)
#
#
xp = points[hull.vertices, 0]
yp = points[hull.vertices, 1]
xp = np.append(xp, xp[0])
yp = np.append(yp, yp[0])
ax.plot(xp, yp, 'k-')
#ax.plot(points[hull.vertices[0], 0], points[hull.vertices[[0, -1]], 1])
#points = np.array([[x, y] for x, y in zip(x1, y1)])
#hull = ConvexHull(points)
#ax.plot(points[hull.vertices,0], points[hull.vertices,1])
#zi = griddata((x, y), avg1, (xi, yi))
#ax.contour(xi, yi, z, levels=1)
def gene_expr(data, tsne, gene, fig=None, ax=None, cmap=plt.cm.plasma, out=None):
"""
Plot multiple genes on a grid.
Parameters
----------
data : Pandas dataframe
Genes x samples expression matrix
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
genes : array
List of gene names
"""
exp = get_gene_data(data, gene)
return expr_plot(tsne, exp, fig=fig, ax=ax, cmap=cmap, out=out)
def separate_cluster(tsne,
clusters,
cluster,
color='black',
background=BACKGROUND_SAMPLE_COLOR,
bgedgecolor='#808080',
show_background=True,
add_titles=True,
size=4,
alpha=ALPHA,
s=MARKER_SIZE,
edgecolors='white',
linewidth=EDGE_WIDTH,
fig=None,
ax=None):
"""
Plot a cluster separately to highlight where the samples are
Parameters
----------
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
cluster : int
Clusters in
colors : list, color
Colors of points
add_titles : bool
Whether to add titles to plots
w: int, optional
width of new ax.
h: int, optional
height of new ax.
Returns
-------
fig : Matplotlib figure
A new Matplotlib figure used to make the plot
ax : Matplotlib axes
Axes used to render the figure
"""
if ax is None:
fig, ax = libplot.new_fig(size, size)
#print('Label {}'.format(l))
idx1 = np.where(clusters['Cluster'] == cluster)[0]
idx2 = np.where(clusters['Cluster'] != cluster)[0]
# Plot background points
if show_background:
x = tsne.iloc[idx2, 0]
y = tsne.iloc[idx2, 1]
libplot.scatter(x,
y,
c=[background],
ax=ax,
edgecolors='none', # bgedgecolor,
linewidth=linewidth,
s=s)
# Plot cluster over the top of the background
x = tsne.iloc[idx1, 0]
y = tsne.iloc[idx1, 1]
#print('sep', cluster, color)
color = color # + '7f'
libplot.scatter(x,
y,
c=color,
ax=ax,
edgecolors='none', # edgecolors,
linewidth=linewidth,
s=s)
if add_titles:
if isinstance(cluster, int):
prefix = 'C'
else:
prefix = ''
ax.set_title('{}{} ({:,})'.format(
prefix, cluster, len(idx1)), color=color)
ax.axis('off') # libplot.invisible_axes(ax)
return fig, ax
def separate_clusters(tsne,
clusters,
name,
colors=None,
size=4,
add_titles=True,
type='tsne',
format='pdf'):
"""
Plot each cluster into its own plot file.
"""
ids = list(sorted(set(clusters['Cluster'])))
indices = np.array(list(range(0, len(ids))))
if colors is None:
colors = libcluster.get_colors()
for i in indices:
print('index', i)
cluster = ids[i]
if isinstance(colors, dict):
color = colors[cluster]
elif isinstance(colors, list):
if cluster < len(colors):
# np.where(clusters['Cluster'] == cluster)[0]]
color = colors[i]
else:
color = CLUSTER_101_COLOR
else:
color = 'black'
fig, ax = separate_cluster(tsne,
clusters,
cluster,
color=color,
add_titles=add_titles,
size=size)
out = '{}_sep_clust_{}_c{}.{}'.format(type, name, cluster, format)
print('Creating', out, '...')
libplot.savefig(fig, out)
libplot.savefig(fig, 'tmp.png')
plt.close(fig)
def cluster_grid(tsne,
clusters,
colors=None,
cols=-1,
size=SUBPLOT_SIZE,
add_titles=True,
cluster_order=None):
"""
Plot each cluster separately to highlight where the samples are
Parameters
----------
tsne : Pandas dataframe
Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
clusters : DataFrame
Clusters in
colors : list, color
Colors of points
add_titles : bool
Whether to add titles to plots
plot_order: list, optional
List of cluster ids in the order they should be rendered
Returns
-------
fig : Matplotlib figure
A new Matplotlib figure used to make the plot
"""
if cluster_order is None:
ids = np.array(list(sorted(set(clusters['Cluster']))))
cluster_order = np.array(list(range(0, len(ids)))) + 1
n = cluster_order.size
if cols == -1:
cols = int(np.ceil(np.sqrt(n)))
rows = int(np.ceil(n / cols))
w = size * cols
h = size * rows
fig = libplot.new_base_fig(w=w, h=h)
if colors is None:
colors = libcluster.get_colors()
# Where to plot figure
pc = 1
for c in cluster_order:
i = c - 1
cluster = ids[i]
# look up index for color purposes
#i = np.where(ids == cluster)[0][0]
print('index', i, cluster, colors)
if isinstance(colors, dict):
color = colors.get(cluster, 'black')
elif isinstance(colors, list):
#i = cluster - 1
if i < len(colors):
# colors[cid - 1] #colors[i] #np.where(clusters['Cluster'] == cluster)[0]]
color = colors[i]
else:
color = 'black'
else:
color = 'black'
ax = libplot.new_ax(fig, subplot=(rows, cols, pc))
separate_cluster(tsne,
clusters,
cluster,
color=color,
add_titles=add_titles,
ax=ax)
# idx1 = np.where(clusters['Cluster'] == cluster)[0]
# idx2 = np.where(clusters['Cluster'] != cluster)[0]
#
# # Plot background points
#
#
#
# x = tsne.iloc[idx2, 0]
# y = tsne.iloc[idx2, 1]
# libplot.scatter(x, y, c=BACKGROUND_SAMPLE_COLOR, ax=ax)
#
# # Plot cluster over the top of the background
#
# x = tsne.iloc[idx1, 0]
# y = tsne.iloc[idx1, 1]
#
# if isinstance(colors, dict):
# color = colors[cluster]
# elif isinstance(colors, list):
# color = colors[i]
# else:
# color = 'black'
#
# libplot.scatter(x, y, c=color, ax=ax)
#
# if add_titles:
# if isinstance(cluster, int):
# prefix = 'C'
# else:
# prefix = ''
#
# ax.set_title('{}{} ({:,})'.format(prefix, cluster, len(idx1)), color=color)
#
#
# libplot.invisible_axes(ax)
pc += 1
return fig
def create_cluster_grid(tsne,
clusters,
name,
colors=None,
cols=-1,
size=SUBPLOT_SIZE,
add_titles=True,
cluster_order=None,
method='tsne',
dir='.',
out=None):
fig = cluster_grid(tsne,
clusters,
colors=colors,
cols=cols,
size=size,
add_titles=add_titles,
cluster_order=cluster_order)
if out is None:
out = '{}/{}_{}_separate_clusters.png'.format(dir, method, name)
libplot.savefig(fig, out, pad=0)
#libplot.savefig(fig, '{}/tsne_{}separate_clusters.pdf'.format(dir, name))
#
#
# def tsne_cluster_sample_grid(tsne, clusters, samples, colors=None, size=SUBPLOT_SIZE):
# """
# Plot each cluster separately to highlight samples
#
# Parameters
# ----------
# tsne : Pandas dataframe
# Cells x tsne tsne data. Columns should be labeled 'TSNE-1', 'TSNE-2' etc
# clusters : DataFrame
# Clusters in
#
# Returns
# -------
# fig : Matplotlib figure
# A new Matplotlib figure used to make the plot
# """
#
#
# cids = list(sorted(set(clusters['Cluster'])))
#
# rows = int(np.ceil(np.sqrt(len(cids))))
#
# w = size * rows
#
# fig = libplot.new_base_fig(w=w, h=w)
#
# if colors is None:
# colors = libcluster.colors()
#
# for i in range(0, len(cids)):
# c = cids[i]
#
# #print('Label {}'.format(l))
# idx2 = np.where(clusters['Cluster'] != c)[0]
#
# # Plot background points
#
# ax = libplot.new_ax(fig, subplot=(rows, rows, i + 1))
#
# x = tsne.iloc[idx2, 0]
# y = tsne.iloc[idx2, 1]
#
# libplot.scatter(x, y, c=BACKGROUND_SAMPLE_COLOR, ax=ax)
#
# # Plot cluster over the top of the background
#
# sid = 0
#
# for sample in samples:
# id = '-{}'.format(sid + 1)
# idx1 = np.where((clusters['Cluster'] == c) & clusters.index.str.contains(id))[0]
#
# x = tsne.iloc[idx1, 0]
# y = tsne.iloc[idx1, 1]
#
# libplot.scatter(x, y, c=colors[sid], ax=ax)
#
# sid += 1
#
# ax.set_title('C{} ({:,})'.format(c, len(idx1)), color=colors[i])
# libplot.invisible_axes(ax)
#
# #set_tsne_ax_lim(tsne, ax)
#
# return fig
#
#
# def create_tsne_cluster_sample_grid(tsne, clusters, samples, name, colors=None, size=SUBPLOT_SIZE, dir='.'):
# """
# Plot separate clusters colored by sample
# """
# fig = tsne_cluster_sample_grid(tsne, clusters, samples, colors, size)
#
# libplot.savefig(fig, '{}/tsne_{}_sample_clusters.png'.format(dir, name))
# #libplot.savefig(fig, '{}/tsne_{}separate_clusters.pdf'.format(dir, name))
#
#
def load_clusters(pca, headers, name, cache=True):
file = libtsne.get_cluster_file(name)
if not os.path.isfile(file) or not cache:
print('{} was not found, creating it with...'.format(file))
# Find the interesting clusters
labels, graph, Q = phenograph.cluster(pca, k=20)
if min(labels) == -1:
new_label = 100
labels[np.where(labels == -1)] = new_label
labels += 1
libtsne.write_clusters(headers, labels, name)
cluster_map, data = libtsne.read_clusters(file)
labels = data # .tolist()
return cluster_map, labels
def umi_tpm(data):
# each column is a cell
reads_per_bc = data.sum(axis=0)
scaling_factors = 1000000 / reads_per_bc
scaled = data.multiply(scaling_factors) # , axis=1)
return scaled
def umi_log2(d):
if isinstance(d, SparseDataFrame):
print('UMI norm log2 sparse')
return d.log2(add=1)
else:
return (d + 1).apply(np.log2)
def umi_tpm_log2(data):
d = umi_tpm(data)
return umi_log2(d)
def umi_norm(data):
"""
Scale each library to its median size
Parameters
----------
data : Pandas dataframe
Matrix of umi counts
"""
# each column is a cell
reads_per_bc = data.sum(axis=0)
# int(np.round(np.median(reads_per_bc)))
median_reads_per_bc = np.median(reads_per_bc)
scaling_factors = median_reads_per_bc / reads_per_bc
scaled = data.multiply(scaling_factors) # , axis=1)
return scaled
def umi_norm_log2(data):
d = umi_norm(data)
print(type(d))
return umi_log2(d)
def scale(d, clip=None, min=None, max=None, axis=1):
if isinstance(d, SparseDataFrame):
print('UMI norm log2 scale sparse')
sd = StandardScaler(with_mean=False).fit_transform(d.T.matrix)
return SparseDataFrame(sd.T, index=d.index, columns=d.columns)
else:
# StandardScaler().fit_transform(d.T)
sd = sklearn.preprocessing.scale(d, axis=axis)
#sd = sd.T
if isinstance(clip, float) or isinstance(clip, int):
max = abs(clip)
min = -max
if isinstance(min, float) or isinstance(min, int):
print('z min', min)
sd[np.where(sd < min)] = min
if isinstance(max, float) or isinstance(max, int):
print('z max', max)
sd[np.where(sd > max)] = max
return pd.DataFrame(sd, index=d.index, columns=d.columns)
def min_max_scale(d, min=0, max=1, axis=1):
#m = d.min(axis=1)
#std = (d - m) / (d.max(axis=1) - m)
#scaled = std * (max - min) + min
# return scaled
if axis == 0:
return pd.DataFrame(MinMaxScaler(feature_range=(min, max)).fit_transform(d), index=d.index, columns=d.columns)
else:
return pd.DataFrame(MinMaxScaler(feature_range=(min, max)).fit_transform(d.T).T, index=d.index, columns=d.columns)
def rscale(d, min=0, max=1, axis=1):
if axis == 0:
return pd.DataFrame(RobustScaler().fit_transform(d), index=d.index, columns=d.columns)
else:
return pd.DataFrame(RobustScaler().fit_transform(d.T).T, index=d.index, columns=d.columns)
def umi_norm_log2_scale(data, clip=None):
d = umi_norm_log2(data)
return scale(d, clip=clip)
def read_clusters(file):
print('Reading clusters from {}...'.format(file))
return pd.read_csv(file, sep='\t', header=0, index_col=0)
def silhouette(tsne, tsne_umi_log2, clusters, name):
# measure cluster worth
x1 = silhouette_samples(
tsne, clusters.iloc[:, 0].tolist(), metric='euclidean')
x2 = silhouette_samples(
tsne_umi_log2, clusters.iloc[:, 0].tolist(), metric='euclidean')
fig, ax = libplot.newfig(w=9, h=7, subplot=211)
df = pd.DataFrame({'Silhouette Score': x1, 'Cluster': clusters.iloc[:, 0].tolist(
), 'Label': np.repeat('tsne-10x', len(x1))})
libplot.boxplot(df, 'Cluster', 'Silhouette Score',
colors=libcluster.colors(), ax=ax)
ax.set_ylim([-1, 1])
ax.set_title('tsne-10x')
#libplot.savefig(fig, 'RK10001_10003_clust-phen_silhouette.pdf')
ax = fig.add_subplot(212) # libplot.newfig(w=9)
df2 = pd.DataFrame({'Silhouette Score': x2, 'Cluster': clusters.iloc[:, 0].tolist(
), 'Label': np.repeat('tsne-ah', len(x2))})
libplot.boxplot(df2, 'Cluster', 'Silhouette Score',
colors=libcluster.colors(), ax=ax)
ax.set_ylim([-1, 1])
ax.set_title('tsne-ah')
libplot.savefig(fig, '{}_silhouette.pdf'.format(name))
def node_color_from_cluster(clusters):
colors = libcluster.colors()
return [colors[clusters['Cluster'][i] - 1] for i in range(0, clusters.shape[0])]
# def network(tsne, clusters, name, k=5):
# A = kneighbors_graph(tsne, k, mode='distance', metric='euclidean').toarray()
#
# #A = A[0:500, 0:500]
#
# G=nx.from_numpy_matrix(A)
# pos=nx.spring_layout(G) #, k=2)
#
# #node_color = (c_phen['Cluster'][0:A.shape[0]] - 1).tolist()
# node_color = (clusters['Cluster'] - 1).tolist()
#
# fig, ax = libplot.newfig(w=10, h=10)
#
# nx.draw_networkx(G, pos=pos, with_labels=False, ax=ax, node_size=50, node_color=node_color, vmax=(clusters['Cluster'].max() - 1), cmap=libcluster.colormap())
#
# libplot.savefig(fig, 'network_{}.pdf'.format(name))
def plot_centroids(tsne, clusters, name):
c = centroids(tsne, clusters)
fig, ax = libplot.newfig(w=5, h=5)
ax.scatter(c[:, 0], c[:, 1], c=None)
libplot.format_axes(ax)
libplot.savefig(fig, '{}_centroids.pdf'.format(name))
def centroid_network(tsne, clusters, name):
c = centroids(tsne, clusters)
A = kneighbors_graph(c, 5, mode='distance', metric='euclidean').toarray()
G = nx.from_numpy_matrix(A)
pos = nx.spring_layout(G)
fig, ax = libplot.newfig(w=8, h=8)
# list(range(0, c.shape[0]))
node_color = libcluster.colors()[0:c.shape[0]]
cmap = libcluster.colormap()
labels = {}
for i in range(0, c.shape[0]):
labels[i] = i + 1
#nx.draw_networkx(G, pos=pos, with_labels=False, ax=ax, node_size=200, node_color=node_color, vmax=(c.shape[0] - 1), cmap=libcluster.colormap())
nx.draw_networkx(G, with_labels=True, labels=labels, ax=ax, node_size=800,
node_color=node_color, font_color='white', font_family='Arial')
libplot.format_axes(ax)
libplot.savefig(fig, '{}_centroid_network.pdf'.format(name))
def centroids(tsne, clusters):
cids = list(sorted(set(clusters['Cluster'].tolist())))
ret = np.zeros((len(cids), 2))
for i in range(0, len(cids)):
c = cids[i]
x = tsne.iloc[np.where(clusters['Cluster'] == c)[0], :]
centroid = (x.sum(axis=0) / x.shape[0]).tolist()
ret[i, 0] = centroid[0]
ret[i, 1] = centroid[1]
return ret
def knn_method_overlaps(tsne1, tsne2, clusters, name, k=5):
c1 = centroids(tsne1, clusters)
c2 = centroids(tsne2, clusters)
a1 = kneighbors_graph(c1, k, mode='distance', metric='euclidean').toarray()
a2 = kneighbors_graph(c2, k, mode='distance', metric='euclidean').toarray()
overlaps = []
for i in range(0, c1.shape[0]):
ids1 = np.where(a1[i, :] > 0)[0]
ids2 = np.where(a2[i, :] > 0)[0]
ids3 = np.intersect1d(ids1, ids2)
o = len(ids3) / 5 * 100
overlaps.append(o)
df = pd.DataFrame(
{'Cluster': list(range(1, c1.shape[0] + 1)), 'Overlap %': overlaps})
df.set_index('Cluster', inplace=True)
df.to_csv('{}_cluster_overlaps.txt'.format(name), sep='\t')
def mkdir(path):
"""
Make dirs including any parents and avoid raising exception to work
more like mkdir -p
Parameters
----------
path : str
directory to create.
"""
try:
os.makedirs(path)
except:
pass
def split_a_b(counts, samples, w=6, h=6, format='pdf'):
"""
Split cells into a and b
"""
cache = True
counts = libcluster.remove_empty_rows(counts)
# ['AICDA', 'CD83', 'CXCR4', 'MKI67', 'MYC', 'PCNA', 'PRDM1']
genes = pd.read_csv('../../../../expression_genes.txt', header=0)
mkdir('a')
a_barcodes = pd.read_csv('../a_barcodes.tsv', header=0, sep='\t')
idx = np.where(counts.columns.isin(a_barcodes['Barcode'].values))[0]
d_a = counts.iloc[:, idx]
d_a = libcluster.remove_empty_rows(d_a)
if isinstance(d_a, SparseDataFrame):
d_a = umi_norm_log2(d_a)
else:
d_a = umi_norm_log2_scale(d_a)
pca_a = libtsne.load_pca(d_a, 'a', cache=cache) # pca.iloc[idx,:]
tsne_a = libtsne.load_pca_tsne(pca_a, 'a', cache=cache)
c_a = libtsne.load_phenograph_clusters(pca_a, 'a', cache=cache)
create_pca_plot(pca_a, c_a, 'a', dir='a')
create_cluster_plot(tsne_a, c_a, 'a', dir='a')
create_cluster_grid(tsne_a, c_a, 'a', dir='a')
create_merge_cluster_info(d_a, c_a, 'a', sample_names=samples, dir='a')
create_cluster_samples(tsne_a, c_a, samples, 'a_sample', dir='a')
genes_expr(d_a, tsne_a, genes, prefix='a_BGY',
cmap=BLUE_GREEN_YELLOW_CMAP, w=w, h=h, dir='a/GeneExp', format=format)
fig, ax = cluster_plot(tsne_a, c_a, legend=False, w=w, h=h)
libplot.savefig(fig, 'a/a_tsne_clusters_med.pdf')
# b
mkdir('b')
b_barcodes = pd.read_csv('../b_barcodes.tsv', header=0, sep='\t')
idx = np.where(counts.columns.isin(b_barcodes['Barcode'].values))[0]
d_b = counts.iloc[:, idx]
d_b = libcluster.remove_empty_rows(d_b)
if isinstance(d_a, SparseDataFrame):
d_b = umi_norm_log2(d_b)
else:
d_b = umi_norm_log2_scale(d_b)
pca_b = libtsne.load_pca(d_b, 'b', cache=cache) # pca.iloc[idx_b,:]
tsne_b = libtsne.load_pca_tsne(pca_b, 'b', cache=cache)
c_b = libtsne.load_phenograph_clusters(pca_b, 'b', cache=cache)
create_pca_plot(pca_b, c_b, 'b', dir='b')
create_cluster_plot(tsne_b, c_b, 'b', dir='b')
create_cluster_grid(tsne_b, c_b, 'b', dir='b')
create_merge_cluster_info(d_b, c_b, 'b', sample_names=samples, dir='b')
create_cluster_samples(tsne_b, c_b, samples, 'b_sample', dir='b')
genes_expr(d_b, tsne_b, genes, prefix='b_BGY',
cmap=BLUE_GREEN_YELLOW_CMAP, w=w, h=h, dir='b/GeneExp', format=format)
fig, ax = cluster_plot(tsne_b, c_b, legend=False, w=w, h=h)
libplot.savefig(fig, 'b/b_tsne_clusters_med.pdf')
def sample_clusters(d, sample_names):
"""
Create a cluster matrix based on by labelling cells by sample/batch.
"""
sc = np.array(['' for i in range(0, d.shape[0])], dtype=object)
c = 1
for s in sample_names:
id = '-{}'.format(c)
print(id)
print(np.where(d.index.str.contains(id))[0])
sc[np.where(d.index.str.contains(id))[0]] = s
c += 1
print(np.unique(d.index.values))
print(np.unique(sc))
df = pd.DataFrame(sc, index=d.index, columns=['Cluster'])
return df
def create_cluster_samples(tsne_umi_log2,
clusters,
sample_names,
name,
method='tsne',
format='png',
dir='.',
w=16,
h=16,
legend=True):
sc = sample_clusters(clusters, sample_names)
create_cluster_plot(tsne_umi_log2,
sc,
name,
method=method,
format=format,
dir=dir,
w=w,
h=w,
legend=legend)
| 1.726563 | 2 |
tensormonk/loss/adversarial_loss.py | Tensor46/TensorMONK | 29 | 12798766 | <gh_stars>10-100
"""TensorMONK :: loss :: AdversarialLoss"""
__all__ = ["AdversarialLoss"]
import torch
import numpy as np
eps = np.finfo(float).eps
def g_minimax(d_of_fake: torch.Tensor, invert_labels: bool = False):
r"""Minimax loss for generator. (`"Generative Adversarial Nets"
<https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`_).
Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)).
loss = - log(σ( d_of_fake ))
Args:
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
if invert_labels:
return - (1 - d_of_fake.sigmoid()).clamp(eps).log().mean()
return - d_of_fake.sigmoid().clamp(eps).log().mean()
def d_minimax(d_of_real: torch.Tensor, d_of_fake: torch.Tensor,
invert_labels: bool = False):
r"""Minimax loss for discriminator. (`"Generative Adversarial Nets"
<https://papers.nips.cc/paper/5423-generative-adversarial-nets.pdf>`_).
Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)) and d_of_real = D(I).
loss = - log(σ( d_of_real )) - log(1 - σ( d_of_fake ))
Args:
d_of_real (torch.Tensor, required): discriminator output of real.
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_real, torch.Tensor)
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
if invert_labels:
rloss = - (1 - d_of_real.sigmoid()).clamp(eps).log()
floss = - d_of_fake.sigmoid().clamp(eps).log()
else:
rloss = - d_of_real.sigmoid().clamp(eps).log()
floss = - (1 - d_of_fake.sigmoid()).clamp(eps).log()
return (rloss + floss).mean() / 2
def g_least_squares(d_of_fake: torch.Tensor, invert_labels: bool = False):
r"""Least squares loss for generator. (`"Least Squares Generative
Adversarial Networks"<https://arxiv.org/abs/1611.04076>`_).
Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)).
loss = (1 - σ( d_of_fake ))^2
Args:
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
if invert_labels:
return d_of_fake.sigmoid().pow(2).mean()
return (1 - d_of_fake.sigmoid()).pow(2).mean()
def d_least_squares(d_of_real: torch.Tensor, d_of_fake: torch.Tensor,
invert_labels: bool = False):
r"""Least squares loss for generator. (`"Least Squares Generative
Adversarial Networks"<https://arxiv.org/abs/1611.04076>`_).
Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)) and d_of_real = D(I).
loss = ((1 - σ( d_of_real ))^2 + σ( d_of_fake )^2) / 2
Args:
d_of_real (torch.Tensor, required): discriminator output of real.
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_real, torch.Tensor)
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
if invert_labels:
rloss = d_of_real.sigmoid().pow(2)
floss = (1 - d_of_fake.sigmoid()).pow(2)
else:
rloss = (1 - d_of_real.sigmoid()).pow(2)
floss = d_of_fake.sigmoid().pow(2)
return (rloss + floss).mean() / 2
def g_relativistic(d_of_real: torch.Tensor, d_of_fake: torch.Tensor,
invert_labels: bool = False):
r"""Relativistic loss for generator. (`"The relativistic discriminator: a
key element missing from standard GAN"<https://arxiv.org/abs/1807.00734>`_
). Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)) and d_of_real = D(I).
loss = - log(1 - σ(d_of_fake - E[d_of_real]))
Args:
d_of_real (torch.Tensor, required): discriminator output of real.
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_real, torch.Tensor)
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
if invert_labels:
return - (d_of_fake - d_of_real.mean()).sigmoid().clamp(eps).log()
return - (1 - (d_of_fake - d_of_real.mean()).sigmoid()).clamp(eps).log()
def d_relativistic(d_of_real: torch.Tensor, d_of_fake: torch.Tensor,
invert_labels: bool = False):
r"""Relativistic loss for generator. (`"The relativistic discriminator: a
key element missing from standard GAN"<https://arxiv.org/abs/1807.00734>`_
). Assumes, real label is 1 and fake label is 0 (use invert_labels to flip
real and fake labels). d_of_fake = D(G(z)) and d_of_real = D(I).
loss = - log(1 - σ(d_of_real - E[d_of_fake])) -
log(σ(d_of_fake - E[d_of_real]))
Args:
d_of_real (torch.Tensor, required): discriminator output of real.
d_of_fake (torch.Tensor, required): discriminator output of fake.
invert_labels (bool, optional): Inverts real and fake labels to 0 and 1
respectively (default: :obj:`"False"`).
"""
assert isinstance(d_of_real, torch.Tensor)
assert isinstance(d_of_fake, torch.Tensor)
assert isinstance(invert_labels, bool)
dra_rf = (d_of_real - d_of_fake.mean()).sigmoid().clamp(eps)
dra_fr = (d_of_fake - d_of_real.mean()).sigmoid().clamp(eps)
if invert_labels:
return - (dra_rf.log() + (1 - dra_fr).log()).mean()
return - ((1 - dra_rf).log() + dra_fr.log()).mean()
class AdversarialLoss:
r"""Adversarial losses.
Assumes 1 is real and 0 is fake.
Fake --> D(G(z)) = d_of_fake = d_of_g_of_z
Real --> D(I) = d_of_real
"""
# Paper: Generative Adversarial Nets
# URL: https://arxiv.org/abs/1406.2661
g_minimax = g_minimax
d_minimax = d_minimax
# Paper: Least Squares Generative Adversarial Networks
# URL: https://arxiv.org/abs/1611.04076
g_least_squares = g_least_squares
d_least_squares = d_least_squares
# Paper: The relativistic discriminatorr: a key element missing from
# standard GAN
# URL: https://arxiv.org/pdf/1807.00734.pdf
g_relativistic = g_relativistic
d_relativistic = d_relativistic
| 2.546875 | 3 |
autoarray/plot/fit_imaging_plotters.py | jonathanfrawley/PyAutoArray_copy | 0 | 12798767 | <filename>autoarray/plot/fit_imaging_plotters.py
from autoarray.plot import abstract_plotters
from autoarray.plot.mat_wrap import visuals as vis
from autoarray.plot.mat_wrap import include as inc
from autoarray.plot.mat_wrap import mat_plot as mp
from autoarray.fit import fit as f
from autoarray.structures.grids.two_d import grid_2d_irregular
class AbstractFitImagingPlotter(abstract_plotters.AbstractPlotter):
def __init__(self, fit, mat_plot_2d, visuals_2d, include_2d):
super().__init__(
mat_plot_2d=mat_plot_2d, include_2d=include_2d, visuals_2d=visuals_2d
)
self.fit = fit
@property
def visuals_with_include_2d(self):
return self.visuals_2d + self.visuals_2d.__class__(
origin=self.extract_2d(
"origin", grid_2d_irregular.Grid2DIrregular(grid=[self.fit.mask.origin])
),
mask=self.extract_2d("mask", self.fit.mask),
border=self.extract_2d("border", self.fit.mask.border_grid_sub_1.binned),
)
def figures_2d(
self,
image=False,
noise_map=False,
signal_to_noise_map=False,
model_image=False,
residual_map=False,
normalized_residual_map=False,
chi_squared_map=False,
):
"""Plot the model data of an analysis, using the *Fitter* class object.
The visualization and output type can be fully customized.
Parameters
-----------
fit : autolens.lens.fitting.Fitter
Class containing fit between the model data and observed lens data (including residual_map, chi_squared_map etc.)
output_path : str
The path where the data is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the data is output. File formats (e.g. png, fits) output the data to harddisk. 'show' displays the data \
in the python interpreter window.
"""
if image:
self.mat_plot_2d.plot_array(
array=self.fit.data,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(title="Image", filename="image_2d"),
)
if noise_map:
self.mat_plot_2d.plot_array(
array=self.fit.noise_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(title="Noise-Map", filename="noise_map"),
)
if signal_to_noise_map:
self.mat_plot_2d.plot_array(
array=self.fit.signal_to_noise_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(
title="Signal-To-Noise Map", filename="signal_to_noise_map"
),
)
if model_image:
self.mat_plot_2d.plot_array(
array=self.fit.model_data,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(title="Model Image", filename="model_image"),
)
if residual_map:
self.mat_plot_2d.plot_array(
array=self.fit.residual_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(
title="Residual Map", filename="residual_map"
),
)
if normalized_residual_map:
self.mat_plot_2d.plot_array(
array=self.fit.normalized_residual_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(
title="Normalized Residual Map", filename="normalized_residual_map"
),
)
if chi_squared_map:
self.mat_plot_2d.plot_array(
array=self.fit.chi_squared_map,
visuals_2d=self.visuals_with_include_2d,
auto_labels=mp.AutoLabels(
title="Chi-Squared Map", filename="chi_squared_map"
),
)
def subplot(
self,
image=False,
noise_map=False,
signal_to_noise_map=False,
model_image=False,
residual_map=False,
normalized_residual_map=False,
chi_squared_map=False,
auto_filename="subplot_fit_imaging",
):
self._subplot_custom_plot(
image=image,
noise_map=noise_map,
signal_to_noise_map=signal_to_noise_map,
model_image=model_image,
residual_map=residual_map,
normalized_residual_map=normalized_residual_map,
chi_squared_map=chi_squared_map,
auto_labels=mp.AutoLabels(filename=auto_filename),
)
def subplot_fit_imaging(self):
return self.subplot(
image=True,
signal_to_noise_map=True,
model_image=True,
residual_map=True,
normalized_residual_map=True,
chi_squared_map=True,
)
class FitImagingPlotter(AbstractFitImagingPlotter):
def __init__(
self,
fit: f.FitImaging,
mat_plot_2d: mp.MatPlot2D = mp.MatPlot2D(),
visuals_2d: vis.Visuals2D = vis.Visuals2D(),
include_2d: inc.Include2D = inc.Include2D(),
):
super().__init__(
fit=fit,
mat_plot_2d=mat_plot_2d,
include_2d=include_2d,
visuals_2d=visuals_2d,
)
| 2.4375 | 2 |
sensehat/__init__.py | myDevicesIoT/cayennee-plugin-sensehat | 3 | 12798768 | """
This module provides a class for interfacing with the Sense HAT add-on board for Raspberry Pi.
"""
import os
from multiprocessing.managers import RemoteError
from myDevices.utils.logger import error, exception, info
from sensehat.manager import connect_client
class SenseHAT():
"""Class for interacting with a Sense HAT device"""
def __init__(self, use_emulator=False):
"""Initializes Sense HAT device.
Arguments:
use_emulator: True if the Sense HAT Emulator should be used. This requires the Emulator to be installed and running on the desktop.
"""
self.use_emulator = use_emulator
self.sense_hat = None
self.digital_value = 0
self.analog_value = 0.0
self.image_file = os.path.join('/etc/myDevices/plugins/cayenne-plugin-sensehat/data/image.png')
self.call_sense_hat_function('clear')
def init_sense_hat(self):
"""Initializes connection to Sense HAT service and gets a SenseHat shared object."""
if not self.sense_hat:
try:
self.manager = connect_client()
self.manager.use_emulator(self.use_emulator)
self.sense_hat = self.manager.SenseHat()
except ConnectionRefusedError as e:
info('Sense HAT service connection refused')
error(e)
except RemoteError as e:
error('Failed to connect to Sense HAT device')
def call_sense_hat_function(self, function_name, *args):
"""Calls a function of the SenseHat shared object.
Arguments:
function_name: Name of the function to call.
args: Arguments to pass to the function.
"""
self.init_sense_hat()
try:
if self.sense_hat is not None:
func = getattr(self.sense_hat, function_name)
value = func(*args)
return value
except EOFError as e:
error(e)
sense_hat = None
except AttributeError as e:
error(e)
sense_hat = None
def get_temperature(self):
"""Gets the temperature as a tuple with type and unit."""
return (self.call_sense_hat_function('get_temperature'), 'temp', 'c')
def get_humidity(self):
"""Gets the humidity as a tuple with type and unit."""
return (self.call_sense_hat_function('get_humidity'), 'rel_hum', 'p')
def get_pressure(self):
"""Gets the pressure as a tuple with type and unit."""
value = self.call_sense_hat_function('get_pressure')
if value is not None:
return (value * 100, 'bp', 'pa')
def get_acclerometer(self):
"""Gets the g-force as a tuple with type and unit."""
values = self.call_sense_hat_function('get_accelerometer_raw')
if values is not None:
g_force = []
g_force.append(values['x'])
g_force.append(values['y'])
g_force.append(values['z'])
return (g_force, 'accel', 'g')
def get_gyroscope(self):
"""Gets radians per second from the gyroscope."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_gyroscope_raw')
if values is not None:
rps = []
rps.append(values['x'])
rps.append(values['y'])
rps.append(values['z'])
return rps
def get_magnetometer(self):
"""Gets microteslas from the magnetometer."""
#Not currently supported in Cayenne
values = self.call_sense_hat_function('get_compass_raw')
if values is not None:
gyro = []
gyro.append(values['x'])
gyro.append(values['y'])
gyro.append(values['z'])
return gyro
def get_digital(self):
"""Gets the digital value as a tuple specifying this is a digital actuator."""
return (self.digital_value, 'digital_actuator')
def set_digital(self, value):
"""Displays an image on the Sense HAT LED matrix if the digital value is equal to True."""
self.digital_value = value
if self.digital_value:
self.call_sense_hat_function('load_image', self.image_file)
else:
self.call_sense_hat_function('clear')
def get_analog(self):
"""Gets the digital value as a tuple specifying this is an analog actuator."""
return (self.analog_value, 'analog_actuator')
def set_analog(self, value):
"""Displays the analog value on the Sense HAT LED matrix."""
self.analog_value = value
self.call_sense_hat_function('show_message', str(self.analog_value))
| 2.796875 | 3 |
support/cross/aio/gthread.py | pmp-p/python-wasm-plus | 3 | 12798769 | <filename>support/cross/aio/gthread.py
import aio
import inspect
# mark not started but no error
aio.error = None
aio.paused = False
aio.fd = {}
aio.pstab = {}
def _shutdown():
print(__file__, "_shutdown")
# https://docs.python.org/3/library/threading.html#threading.excepthook
# a green thread
# FIXME: fix wapy BUG 882 so target can be None too in preempt mode
# TODO: default granularity with https://docs.python.org/3/library/sys.html#sys.setswitchinterval
class Lock:
count = 0
def __enter__(self):
self.acquire()
def __exit__(self, *tb):
self.release()
def acquire(self, blocking=True, timeout=- 1):
self.count += 1
return True
def release(self):
self.count -= 1
def locked(self):
return self.count>0
class Condition:
def __init__(self, lock=None):
self.lock = lock or Lock()
def acquire(self, *args):
return self.lock.acquire()
def release(self):
self.lock.release()
def wait(self, timeout=None):
raise RuntimeError("notify not supported")
def wait_for(self, predicate, timeout=None):
raise RuntimeError("wait not supported")
class Thread:
def __init__(
self, group=None, target=None, name=None, args=(), kwargs={}, *, daemon=None
):
# def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
self.args = args
self.kwargs = kwargs
self.name = name
self.slice = 0
self.last = aio.rtclock()
if target:
if hasattr(target, "run"):
if name is None:
self.name = name or target.__class__.__name__
self.run = target.run
else:
self.run = target
if name is None:
try:
self.name = "%s-%s" % (self.run.__name__, id(self))
except:
pass
else:
target = self
if self.name is None:
self.name = "%s-%s" % (self.__class__.__name__, id(self))
self.status = None
async def wrap(self):
for idle in self.run(*self.args, **self.kwargs):
await aio.sleep(0)
async def runner(self, coro):
self.status = True
try:
# TODO: pass thread local context here
async with aio.ctx(self.slice).call(coro):
self.status = False
except Exception as e:
self.status = repr(e)
sys.print_exception(e, sys.stderr)
if __UPY__:
def __iter__(self):
if self.status is True:
rtc = aio.rtclock()
self.delta = (rtc - self.last) - self.slice
if self.delta < 0:
self.delta = 0
yield from aio.sleep_ms(self.slice - int(self.delta / 2))
self.last = rtc
__await__ = __iter__
else:
def __await__(self):
if self.status is True:
rtc = aio.rtclock()
self.delta = (rtc - self.last) - self.slice
if self.delta < 0:
self.delta = 0
# no sleep_ms on cpy
yield from aio.sleep_ms(
float(self.slice - int(self.delta / 2)) / 1_000
).__await__()
# return aio.sleep( float(self.slice - int(self.delta / 2)) / 1_000 )
self.last = rtc
def rt(self, slice):
self.slice = int(float(slice) * 1_000)
return self
def start(self):
aio.pstab.setdefault(self.name, [])
if self.run:
if not inspect.iscoroutinefunction(self.run):
self.status = True
aio.create_task(self.wrap())
else:
coro = self.run(*self.args, **self.kwargs)
pdb("168:", self.name, "starting", coro)
aio.create_task(self.runner(coro))
aio.pstab[self.name].append(self)
return self
def join(self):
embed.enable_irq()
while self.is_alive():
aio_suspend()
embed.disable_irq()
def __bool__(self):
return self.is_alive() and not aio.exit
def is_alive(self):
return self.status is True
def service(srv, *argv, **kw):
embed.log(f"starting green thread : {srv}")
thr = aio.Thread(group=None, target=srv, args=argv, kwargs=kw).start()
srv.__await__ = thr.__await__
return aio.pstab.setdefault(srv, thr)
aio.task = service
def proc(srv):
return aio.pstab.get(srv)
class Runnable:
def __await__(self):
yield from aio.pstab.get(self).__await__()
# replace with green threading
import sys
sys.modules["threading"] = sys.modules["aio.gthread"]
| 2.375 | 2 |
base16_theme_switcher/__init__.py | piotr-rusin/base16-theme-switcher | 0 | 12798770 | <gh_stars>0
"""
======================
base16-theme-switcher
======================
Base16-theme-switcher is an extensible color theme configuration tool
for applications using colors provided in X resource database
(configured in ~/.Xresources file).
The application may be extended with support for different ways
of prompting a user for a name of a theme to be set or for configuring
themes for other applications.
"""
__title__ = 'base16-theme-switcher'
__version__ = '1.0.0b2'
__author__ = '<NAME>'
__license__ = 'MIT'
__copyright__ = 'Copyright 2017 <NAME>'
| 1.007813 | 1 |
Examples/SimpleSNR-2015-10-07.py | scivision/isrutils | 1 | 12798771 | #!/usr/bin/env python
from isrutils.looper import simpleloop
# %% users param
P = {
"path": "~/data/2015-10-07/isr",
"beamid": 64157,
"acf": True,
"vlimacf": (18, 45),
"zlim_pl": [None, None],
"vlim_pl": [72, 90],
"flim_pl": [3.5, 5.5],
"odir": "out/2015-10-07",
"vlim": [25, 55],
"zlim": (90, None),
"verbose": True,
}
# %%
flist = ()
simpleloop(flist, P)
| 1.867188 | 2 |
turbotutorial/turbotutorial/urls.py | vitaliimelnychuk/django-hotwire-playground | 0 | 12798772 | """turbotutorial URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.views.generic import TemplateView
from django.urls import path
from chat import views
urlpatterns = [
path("admin/", admin.site.urls),
# path("", views.RoomList.as_view(), name="room_list"),
# path("<slug:pk>/", views.RoomDetail.as_view(), name="room_detail"),
# path("create_message/", views.TurboTest.as_view(), name="message_create"),
path("quickstart/", TemplateView.as_view(template_name="broadcast_example.html")),
]
| 2.5 | 2 |
services/movies_etl/postgres_to_es/config.py | svvladimir-ru/ugc_sprint_1 | 0 | 12798773 | import os
# ETL
ETL_MODE = os.environ.get('ETL_MODE')
ETL_CHUNK_SIZE = int(os.environ.get('ETL_CHUNK_SIZE'))
ETL_SYNC_DELAY = int(os.environ.get('ETL_SYNC_DELAY'))
ETL_FILE_STATE = os.environ.get('ETL_FILE_STATE')
ETL_DEFAULT_DATE = os.environ.get('ETL_DEFAULT_DATE')
# Postgres
POSTGRES_NAME = os.environ.get('POSTGRES_NAME')
POSTGRES_USER = os.environ.get('POSTGRES_USER')
POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD')
POSTGRES_HOST = os.environ.get('POSTGRES_HOST')
POSTGRES_PORT = os.environ.get('POSTGRES_PORT')
# Elasticsearch
ELASTICSEARCH_HOST = os.environ.get('ELASTICSEARCH_HOST')
ELASTICSEARCH_PORT = os.environ.get('ELASTICSEARCH_PORT') | 1.773438 | 2 |
send_email.py | DGhambari/CompSys_Facial_Recognition_Assignment | 0 | 12798774 | <filename>send_email.py
import email, smtplib, ssl
from email import encoders
from email.mime.base import MIMEBase
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from dotenv import load_dotenv, find_dotenv
import os
from pathlib import Path
#load_dotenv(".env")
project_folder = os.path.expanduser('~/Pi-Backup-Files/facial_recognition') # adjust as appropriate
load_dotenv(os.path.join(project_folder, '.env'))
SENDER_EMAIL = os.getenv("SENDER_EMAIL")
RECEIVER_EMAIL = os.getenv("RECEIVER_EMAIL")
EMAIL_PASSWORD = os.getenv("EMAIL_PASSWORD")
def send(email_subject, ):
subject = "Intruder Detected!"
body = "Please see the attached picture."
sender_email = SENDER_EMAIL
receiver_email = RECEIVER_EMAIL
password = <PASSWORD>
# Create a multipart message and set headers
message = MIMEMultipart()
message["From"] = sender_email
message["To"] = receiver_email
message["Subject"] = subject
#message["Bcc"] = receiver_email # Recommended for mass emails
# Add body to email
message.attach(MIMEText(body, "plain"))
filename = "image.jpg" # In same directory as script
# Open PDF file in binary mode
with open(filename, "rb") as attachment:
# Add file as application/octet-stream
# Email client can usually download this automatically as attachment
part = MIMEBase("application", "octet-stream")
part.set_payload(attachment.read())
# Encode file in ASCII characters to send by email
encoders.encode_base64(part)
# Add header as key/value pair to attachment part
part.add_header(
"Content-Disposition",
f"attachment; filename= {filename}",
)
# Add attachment to message and convert message to string
message.attach(part)
text = message.as_string()
# Log in to server using secure context and send email
context = ssl.create_default_context()
with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, text)
if __name__ == "__main__":
send() | 3.03125 | 3 |
Artificial Intelligence/Goal Stack Planning/gsp.py | Harjiwan/Python | 17 | 12798775 | class GSP:
def __init__(self):
self.start = []
self.goal = []
self.stack = []
self.actions = ['Stack','UnStack','Pick','Put']
self.predicate = ['On','OnTable']
self.prereq = ['Clear','Holding','ArmEmpty']
def accept(self):
self.start = input("Enter Start state : ").split("^")
self.goal = input("Enter Goal state : ").split("^")
def contains(self,l1,l2,x):
if x in l2:
return True
else:
return False
def break_compound(self,l1):
for i in l1:
self.stack.append(i)
def process(self):
self.accept()
self.stack.append(goal)
while len(self.stack) != 0:
#Break compound clause onto stack
if len(self.stack[-1]) > 1:
break_compound(self.stack[-1])
| 3.140625 | 3 |
preprocess/TripleClassificationData.py | lualiu/GanforKGE | 0 | 12798776 | <gh_stars>0
import os
import numpy as np
import torch
from utils.readdata import read_dicts_from_file,read_triples_from_file,turn_triples_to_label_dict
class TripleClassificationData(object):
def __init__(self,data_path,train_data_name,valid_data_name,test_data_name,with_reverse=False):
self.entity_dict,self.relation_dict = read_dicts_from_file(
[os.path.join(data_path,train_data_name),
os.path.join(data_path,valid_data_name),
os.path.join(data_path,test_data_name)],
with_reverse=with_reverse
)
self.entity_numbers = len(self.entity_dict.keys())
self.relation_numbers = len(self.relation_dict.keys())
self.train_triples_with_reverse = read_triples_from_file(os.path.join(data_path, train_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.valid_triples_with_reverse,self.valid_triples_for_classification = self.read_triple_from_file(os.path.join(data_path, valid_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.test_triples_with_reverse,self.test_triples_for_classification = self.read_triple_from_file(os.path.join(data_path, test_data_name),
self.entity_dict, self.relation_dict,
with_reverse=with_reverse)
self.train_numbers = len(self.train_triples_with_reverse)
self.train_triples_dict = turn_triples_to_label_dict(self.train_triples_with_reverse)
self.valid_triples_dict = turn_triples_to_label_dict(self.valid_triples_with_reverse)
self.test_triples_dict = turn_triples_to_label_dict(self.test_triples_with_reverse)
self.gold_triples_dict = dict(list(self.train_triples_dict.items()) +
list(self.valid_triples_dict.items()) +
list(self.test_triples_dict.items()))
#del self.train_triples_with_reverse
del self.valid_triples_dict
del self.test_triples_dict
self.train_triples_numpy_array = np.array(self.train_triples_with_reverse).astype(np.int32)
self.valid_triples_for_classification = np.array(self.valid_triples_for_classification).astype(np.int32)
self.test_triples_for_classification = np.array(self.test_triples_for_classification).astype(np.int32)
def read_triple_from_file(self,filename,entity_dict,relation_dict,with_reverse):
triples_list = []
classification_triples_label = []
with open(filename) as file:
for line in file:
head, relation, tail, label = line.strip().split('\t')
if int(label) == 1:
triples_list.append([
entity_dict[head],
relation_dict[relation],
entity_dict[tail]
])
if with_reverse:
relation_reverse = relation + '_reverse'
triples_list.append([
entity_dict[tail],
relation_dict[relation_reverse],
entity_dict[head]
])
classification_triples_label.append([
entity_dict[head],
relation_dict[relation],
entity_dict[tail],
label
])
return triples_list,classification_triples_label
def get_batch(self,batch_size):
random_index = np.random.permutation(self.train_numbers)
random_train_triple = self.train_triples_numpy_array[random_index]
pointer = 0
while pointer < self.train_numbers:
start_index = pointer
end_index = start_index + batch_size
if end_index >= self.train_numbers:
end_index = self.train_numbers
pointer = end_index
current_batch_size = end_index - start_index
new_batch_train_triple_true = random_train_triple[start_index:end_index,:].copy()
new_batch_train_triple_fake = random_train_triple[start_index:end_index,:].copy()
random_words = np.random.randint(0,self.entity_numbers,current_batch_size)
for index in range(current_batch_size):
while (new_batch_train_triple_fake[index,0],
new_batch_train_triple_fake[index,1],
random_words[index]) in self.train_triples_dict:
random_words[index] = np.random.randint(0,self.entity_numbers)
new_batch_train_triple_fake[index,2] = random_words[index]
yield torch.tensor(new_batch_train_triple_true).long().cuda(),torch.tensor(new_batch_train_triple_fake).long().cuda()
| 2.578125 | 3 |
run.py | goonpug/goonpug-stats | 1 | 12798777 | #!/usr/bin/env python
from __future__ import absolute_import
from goonpug import app
def main():
app.run(debug=True)
if __name__ == '__main__':
main()
| 1.226563 | 1 |
swagger_client/models/get_universe_graphics_graphic_id_ok.py | rseichter/bootini-star | 0 | 12798778 | <filename>swagger_client/models/get_universe_graphics_graphic_id_ok.py
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online # noqa: E501
OpenAPI spec version: 0.8.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class GetUniverseGraphicsGraphicIdOk(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'graphic_id': 'int',
'graphic_file': 'str',
'sof_race_name': 'str',
'sof_fation_name': 'str',
'sof_dna': 'str',
'sof_hull_name': 'str',
'collision_file': 'str',
'icon_folder': 'str'
}
attribute_map = {
'graphic_id': 'graphic_id',
'graphic_file': 'graphic_file',
'sof_race_name': 'sof_race_name',
'sof_fation_name': 'sof_fation_name',
'sof_dna': 'sof_dna',
'sof_hull_name': 'sof_hull_name',
'collision_file': 'collision_file',
'icon_folder': 'icon_folder'
}
def __init__(self, graphic_id=None, graphic_file=None, sof_race_name=None, sof_fation_name=None, sof_dna=None, sof_hull_name=None, collision_file=None, icon_folder=None): # noqa: E501
"""GetUniverseGraphicsGraphicIdOk - a model defined in Swagger""" # noqa: E501
self._graphic_id = None
self._graphic_file = None
self._sof_race_name = None
self._sof_fation_name = None
self._sof_dna = None
self._sof_hull_name = None
self._collision_file = None
self._icon_folder = None
self.discriminator = None
self.graphic_id = graphic_id
if graphic_file is not None:
self.graphic_file = graphic_file
if sof_race_name is not None:
self.sof_race_name = sof_race_name
if sof_fation_name is not None:
self.sof_fation_name = sof_fation_name
if sof_dna is not None:
self.sof_dna = sof_dna
if sof_hull_name is not None:
self.sof_hull_name = sof_hull_name
if collision_file is not None:
self.collision_file = collision_file
if icon_folder is not None:
self.icon_folder = icon_folder
@property
def graphic_id(self):
"""Gets the graphic_id of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
graphic_id integer # noqa: E501
:return: The graphic_id of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: int
"""
return self._graphic_id
@graphic_id.setter
def graphic_id(self, graphic_id):
"""Sets the graphic_id of this GetUniverseGraphicsGraphicIdOk.
graphic_id integer # noqa: E501
:param graphic_id: The graphic_id of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: int
"""
if graphic_id is None:
raise ValueError("Invalid value for `graphic_id`, must not be `None`") # noqa: E501
self._graphic_id = graphic_id
@property
def graphic_file(self):
"""Gets the graphic_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
graphic_file string # noqa: E501
:return: The graphic_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._graphic_file
@graphic_file.setter
def graphic_file(self, graphic_file):
"""Sets the graphic_file of this GetUniverseGraphicsGraphicIdOk.
graphic_file string # noqa: E501
:param graphic_file: The graphic_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._graphic_file = graphic_file
@property
def sof_race_name(self):
"""Gets the sof_race_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
sof_race_name string # noqa: E501
:return: The sof_race_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._sof_race_name
@sof_race_name.setter
def sof_race_name(self, sof_race_name):
"""Sets the sof_race_name of this GetUniverseGraphicsGraphicIdOk.
sof_race_name string # noqa: E501
:param sof_race_name: The sof_race_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._sof_race_name = sof_race_name
@property
def sof_fation_name(self):
"""Gets the sof_fation_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
sof_fation_name string # noqa: E501
:return: The sof_fation_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._sof_fation_name
@sof_fation_name.setter
def sof_fation_name(self, sof_fation_name):
"""Sets the sof_fation_name of this GetUniverseGraphicsGraphicIdOk.
sof_fation_name string # noqa: E501
:param sof_fation_name: The sof_fation_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._sof_fation_name = sof_fation_name
@property
def sof_dna(self):
"""Gets the sof_dna of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
sof_dna string # noqa: E501
:return: The sof_dna of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._sof_dna
@sof_dna.setter
def sof_dna(self, sof_dna):
"""Sets the sof_dna of this GetUniverseGraphicsGraphicIdOk.
sof_dna string # noqa: E501
:param sof_dna: The sof_dna of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._sof_dna = sof_dna
@property
def sof_hull_name(self):
"""Gets the sof_hull_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
sof_hull_name string # noqa: E501
:return: The sof_hull_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._sof_hull_name
@sof_hull_name.setter
def sof_hull_name(self, sof_hull_name):
"""Sets the sof_hull_name of this GetUniverseGraphicsGraphicIdOk.
sof_hull_name string # noqa: E501
:param sof_hull_name: The sof_hull_name of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._sof_hull_name = sof_hull_name
@property
def collision_file(self):
"""Gets the collision_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
collision_file string # noqa: E501
:return: The collision_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._collision_file
@collision_file.setter
def collision_file(self, collision_file):
"""Sets the collision_file of this GetUniverseGraphicsGraphicIdOk.
collision_file string # noqa: E501
:param collision_file: The collision_file of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._collision_file = collision_file
@property
def icon_folder(self):
"""Gets the icon_folder of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
icon_folder string # noqa: E501
:return: The icon_folder of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:rtype: str
"""
return self._icon_folder
@icon_folder.setter
def icon_folder(self, icon_folder):
"""Sets the icon_folder of this GetUniverseGraphicsGraphicIdOk.
icon_folder string # noqa: E501
:param icon_folder: The icon_folder of this GetUniverseGraphicsGraphicIdOk. # noqa: E501
:type: str
"""
self._icon_folder = icon_folder
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GetUniverseGraphicsGraphicIdOk):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 1.703125 | 2 |
setup.py | itamar-otonomo/traj-dist-py3 | 0 | 12798779 | from setuptools import setup, find_packages
from Cython.Distutils.extension import Extension
from Cython.Build import cythonize, build_ext
import numpy
import os
from glob import glob
"""
ext_modules = [Extension("traj_dist.cydist.basic_geographical", ["traj_dist/cydist/basic_geographical.pyx"]),
Extension("traj_dist.cydist.basic_euclidean", ["traj_dist/cydist/basic_euclidean.pyx"]),
Extension("traj_dist.cydist.sspd", ["traj_dist/cydist/sspd.pyx"]),
Extension("traj_dist.cydist.dtw", ["traj_dist/cydist/dtw.pyx"]),
Extension("traj_dist.cydist.lcss", ["traj_dist/cydist/lcss.pyx"]),
Extension("traj_dist.cydist.hausdorff", ["traj_dist/cydist/hausdorff.pyx"]),
Extension("traj_dist.cydist.discret_frechet", ["traj_dist/cydist/discret_frechet.pyx"]),
Extension("traj_dist.cydist.frechet", ["traj_dist/cydist/frechet.pyx"]),
Extension("traj_dist.cydist.segment_distance", ["traj_dist/cydist/segment_distance.pyx"]),
Extension("traj_dist.cydist.sowd", ["traj_dist/cydist/sowd.pyx"]),
Extension("traj_dist.cydist.erp", ["traj_dist/cydist/erp.pyx"]),
Extension("traj_dist.cydist.edr", ["traj_dist/cydist/edr.pyx"])]
"""
sources = glob('traj_dist/cydist/*.pyx')
extensions = [
Extension(filename.split('.')[0].replace(os.path.sep, '.'),
sources=[filename],
)
for filename in sources]
setup(
name="trajectory_distance_py3",
version="1.0.1",
author="<NAME>",
author_email="<EMAIL>",
cmdclass={'build_ext': build_ext},
# ext_modules=ext_modules,
ext_modules=extensions,
include_dirs=[numpy.get_include()],
install_requires=["numpy>=1.14.0", "cython>=0.27.3", "shapely>=1.6.3", "geohash2>=1.1", 'pandas>=0.20.3',
'scipy>=0.19.1'],
description="Distance to compare 2D-trajectories in Cython",
packages=find_packages()
)
| 1.78125 | 2 |
ex034.py | olmirjunior/CursoemVideoPython-Exercicios-e-Aula | 0 | 12798780 | nome = str(input('Digite o nome do funcioário: '))
salario = float(input('Qual o salário do funcionário R$ '))
sal10 = (salario * 10 / 100)
sal15 = (salario * 15 / 100)
if (salario >= 1250) * (10 * 10 / 100):
print('O novo salário do funcionário(a) {} com 10% de aumento será de {}'.format(nome, sal10 + salario))
else:
print('O novo salário do funcionário(a) {} com 15% de aumento será de {}'.format(nome, sal15 + salario))
# Neu codigo acima, do professor abaixo
salario = float(input('Qual o salário do funcionário R$ '))
if salario <= 1250:
novo = salario + (salario * 15 / 100)
else:
novo = salario + (salario * 10 / 100)
print('Quem ganhava R${:.2f} passa a ganhar R${:.2f} agora'.format(salario, novo)) | 3.875 | 4 |
pyeccodes/defs/grib2/template_4_categorical_def.py | ecmwf/pyeccodes | 7 | 12798781 | import pyeccodes.accessors as _
def load(h):
h.add(_.Unsigned('numberOfCategories', 1))
with h.list('categories'):
for i in range(0, h.get_l('numberOfCategories')):
h.add(_.Codetable('categoryType', 1, "4.91.table", _.Get('masterDir'), _.Get('localDir')))
h.add(_.Unsigned('codeFigure', 1))
h.add(_.Unsigned('scaleFactorOfLowerLimit', 1))
h.add(_.Unsigned('scaledValueOfLowerLimit', 4))
h.add(_.Unsigned('scaleFactorOfUpperLimit', 1))
h.add(_.Unsigned('scaledValueOfUpperLimit', 4))
| 2.046875 | 2 |
genbank-fan/nwk_tree_parser.py | chnops/code | 2 | 12798782 | <filename>genbank-fan/nwk_tree_parser.py
import sys
from Bio import Phylo
names = {}
tree = Phylo.read(sys.argv[1], "newick")
for idx, clade in enumerate(tree.find_clades()):
if clade.name:
clade.name = '%d\t%s' % (idx, clade.name)
print clade.name
# else:
# clade.name = str(idx)
# names = clade.name
# print names
| 2.9375 | 3 |
HomieMQTT.py | RdeLange/skill-homey | 0 | 12798783 | <gh_stars>0
import paho.mqtt.client as mqtt
import threading
import time
class HomieMQTT:
""" Class for controlling Homie Convention.
The Homie Convention follows the following format:
root/system name/device class (optional)/zone (optional)/device name/capability/command """
DEVICES = []
messages = {}
homie_parent = ""
homie_device = ""
def __init__(self,host, port,root,authentication,user,password):
self.mq_host = host
self.mq_port = port
self.mq_root = root
self.mq_authentication = authentication
self.mq_user = user
self.mq_password = password
#self.reconnect(force=True)
threading.Thread(target=self.reconnect,args=(True,)).start()
# try:
# self.mqttc = mqtt.Client()
# if authentication == True:
# self.mqttc.username_pw_set(username=user, password=password)
# self.mqttc.on_message = self.on_message
# print("Homey discovery started.....")
# self.mqttc.connect(host, int(port), 60)
# self.mqttc.subscribe(root+"/#", 0)
# threading.Thread(target=self.startloop).start()
# except:
#print("No connection...")
# temp = 3
def reconnect(self, force=False):
if force == True:
self.mq_connected = False
while not self.mq_connected:
try:
self.mqttc = mqtt.Client(client_id='Homie Adapter')
if self.mq_authentication == True:
self.mqttc.username_pw_set(username=self.mq_user, password=self.mq_password)
self.mqttc.on_connect = self.on_connect
self.mqttc.on_message = self.on_message
self.mqttc.connect(host=self.mq_host,port=int(self.mq_port))
threading.Thread(target=self.startloop).start()
self.mq_connected = True
print("Connected to MQ!")
except Exception as ex:
print("Could not connect to MQ: {0}".format(ex))
print("Trying again in 5 seconds...")
time.sleep(5)
self.notify()
def startloop(self):
self.mqttc.loop_forever()
def on_message(self,mqttc, obj, msg,):
#INITIAL VALUES
payload = msg.payload.decode("utf-8")
topic = str(msg.topic)
temp = topic.split("/")
self.homie_parent = temp[0]
self.homie_device = temp[1]
self.messages[topic] = payload
def on_connect(self, client, userdata, flags, rc):
self.mqttc.subscribe(self.mq_root+"/#", 0)
def notify(self):
#self.reconnect()
threading.Thread(target=self.reconnect,args=(False,)).start()
def getmessages(self):
#print(self.mq_root)
temp = self.mq_root.split("/")
self.homie_parent = temp[0]
self.homie_device = temp[1]
return self.messages, self.homie_parent,self.homie_device
| 2.578125 | 3 |
LeetCode/852 Peak Index in a Mountain Array.py | gesuwen/Algorithms | 0 | 12798784 | # Binary Search
# Let's call an array A a mountain if the following properties hold:
#
# A.length >= 3
# There exists some 0 < i < A.length - 1 such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1]
# Given an array that is definitely a mountain, return any i such that A[0] < A[1] < ... A[i-1] < A[i] > A[i+1] > ... > A[A.length - 1].
#
# Example 1:
#
# Input: [0,1,0]
# Output: 1
# Example 2:
#
# Input: [0,2,1,0]
# Output: 1
# Note:
#
# 3 <= A.length <= 10000
# 0 <= A[i] <= 10^6
# A is a mountain, as defined above.
class Solution:
def peakIndexInMountainArray(self, A):
"""
:type A: List[int]
:rtype: int
"""
lowerBound = 0
upperBound = len(A) - 1
while lowerBound <= upperBound:
m = (lowerBound + upperBound) // 2
if A[m] > A[m-1] and A[m] > A[m+1]:
return m
if A[m] < A[m+1]:
lowerBound = m
else:
upperBound = m
return m
| 3.765625 | 4 |
official/modules/example/example.py | jaklinger/nesta-toolbox | 0 | 12798785 | <reponame>jaklinger/nesta-toolbox<gh_stars>0
'''
example
This is an example of a python module. It contains all the required elements
for being promoted to nesta-toolbox.official.modules.
The specific example here simply demonstrates how to check the weather in London,
with the API key read from an configuration file.
'''
import requests
import configparser
class WeatherChecker(configparser.ConfigParser):
'''
Basic class to check weather at a given place. The class inherits from
ConfigParser, and expects to find a file api.config containing a variable
DEFAULT.API_KEY. Basic usage:
wc = WeatherChecker()
wc.get_weather_at_place("Some place name")
'''
def __init__(self):
'''
Wrapper to ConfigParser, to read config file and store
the parameters in self
'''
# Read the config file
super().__init__()
self.read('api.config')
# Make sure that the file exists & contains the relevant info
try:
self["DEFAULT"]["API_KEY"]
except KeyError as err:
raise KeyError("Couldn't find DEFAULT.API_KEY variable in a "
"file api.config in this directory.")
def get_weather_at_place(self,place):
'''
A wrapper to the OpenWeatherMap API.
:param place: the query string for OpenWeatherMap
:type place: str
'''
url="http://api.openweathermap.org/data/2.5/weather"
params=dict(q=place,appid=self["DEFAULT"]["API_KEY"])
r = requests.get(url,params)
r.raise_for_status()
return r.json()
# Write an example main routine in a __name__ == __main__ snippet
if __name__ == "__main__":
# Get and print London wind speeds
wc = WeatherChecker()
weather = wc.get_weather_at_place("London,UK")
print(weather)
| 3.203125 | 3 |
src/controller/mode.py | iivvoo-abandoned/most | 0 | 12798786 | <filename>src/controller/mode.py
#!/usr/bin/python
# $Id: mode.py,v 1.3 2002/02/05 17:44:25 ivo Exp $
"""
Class to parse modes. The class is initialized with a modestring,
the result methods will provide the parsed plus/min modes and
parameters.
TODO: banlist support?
"""
""" Handle messages like:
Vlads!^<EMAIL> MODE #foo +o Test
Test MODE Test :-i
[email protected] MODE #foo +bol b*!*@* MostUser 23
[email protected] MODE #foo +t-l
:struis.intranet.amaze.nl 324 Vads #foo +tnl 12
[email protected] MODE #foo +l 21
Nick: x
Origin: [email protected]
Target: #foo
Command: MODE
rest: +l
rest: 21
Alternative interface for this class: get params by mode
or, return plus/minmodes in (mode, param) pairs, i.e.
+ookl Foo Bar key 12 -> (o, Foo), (o, Bar), (k, key), (l, 12)
"""
from string import *
class mode:
def __init__(self, mode=None):
self._plusmodes = ""
self._minmodes = ""
self._plusparams = ""
self._minparams = ""
self.setmode(mode)
def setmode(self, mode):
if type(mode) in (type(()), type([])):
self.mode = mode
else:
self.mode = split(mode, ' ')
self.parse()
def parse(self):
modechars = self.mode[0]
modeparams = self.mode[1:]
plusmodes = ""
minmodes = ""
plusparams = []
minparams = []
# XXX There's too much duplications here
#
# Are plusmodes always defined to come first?
pcount = 0
if modechars[0] == '+':
min_start = find(modechars, '-')
if min_start != -1:
plusmodes,minmodes = split(modechars[1:], '-')
else:
plusmodes = modechars[1:]
for i in plusmodes:
if i in 'volkbIe': # modes that require a parameter
if len(modeparams) > pcount:
plusparams.append(modeparams[pcount])
pcount = pcount + 1
else:
plusparams.append("") # or None?
else:
plusparams.append("")
for i in minmodes:
if i in 'vobIe': # modes that require a parameter
minparams.append(modeparams[pcount])
pcount = pcount + 1
else:
minparams.append("")
elif modechars[0] == '-':
plus_start = find(modechars, '+')
if plus_start != -1:
minmodes,plusmodes = split(modechars[1:], '+')
else:
minmodes = modechars[1:]
for i in minmodes:
if i in 'vobIe': # modes that require a parameter
minparams.append(modeparams[pcount])
pcount = pcount + 1
else:
minparams.append("")
for i in plusmodes:
if i in 'volkbIe': # modes that require a parameter
if len(modeparams) > pcount:
plusparams.append(modeparams[pcount])
pcount = pcount + 1
else:
plusparams.append("") # or None?
else:
plusparams.append("")
self._plusmodes = plusmodes
self._minmodes = minmodes
self._plusparams = plusparams
self._minparams = minparams
def plusmodes(self):
return self._plusmodes
def minmodes(self):
return self._minmodes
def plusmode(self, index):
return self._plusmodes[index]
def minmode(self, index):
return self._minmodes[index]
def plusparams(self):
return self._plusparams
def minparams(self):
return self._minparams
def plusparam(self, index):
return self._plusparams[index]
def minparam(self, index):
return self._minparams[index]
if __name__ == '__main__':
# you won't get them as complex as the first on irc.
tests = [ "+tnokb-lib VladDrac key *!*@* *!*@*.nl",
"+tnkl",
"+-",
"+l- 10",
"-oo+oo Foo1 Foo2 Foo3 Foo4"]
for i in tests:
m = mode(i)
print "String: %s" % i
print "Plusmodes: %s" % m.plusmodes()
print "Minmodes: %s" % m.minmodes()
print "Plusparams: "
print m.plusparams()
print "Minparams:"
print m.minparams()
| 3.203125 | 3 |
src/inmediag/__init__.py | TechFitU/MDSOM | 0 | 12798787 | <filename>src/inmediag/__init__.py
default_app_config = 'inmediag.apps.MDSOMConfig' | 1.117188 | 1 |
misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/train.py | e-ddykim/training_extensions | 256 | 12798788 | <reponame>e-ddykim/training_extensions<filename>misc/pytorch_toolkit/chest_xray_screening/chest_xray_screening/train.py
import numpy as np
import time
import os
import argparse
import torch
from torch.backends import cudnn
from torch import optim
import torch.nn.functional as tfunc
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import StepLR
from .utils.dataloader import RSNADataSet
from .utils.score import compute_auroc
from .utils.model import DenseNet121, DenseNet121Eff
from math import sqrt
import json
from tqdm import tqdm as tq
class RSNATrainer():
def __init__(self, model,
data_loader_train, data_loader_valid, data_loader_test,
class_count, checkpoint, device, class_names, lr):
self.gepoch_id = 0
self.device = device
self.model = model.to(self.device)
self.data_loader_train = data_loader_train
self.data_loader_valid = data_loader_valid
self.data_loader_test = data_loader_test
self.class_names = class_names
self.class_count = class_count
self.auroc_max = 0.0 # Setting maximum AUROC value as zero
self.optimizer = optim.Adam(self.model.parameters(), lr=lr)
if checkpoint is not None:
model_checkpoint = torch.load(checkpoint)
self.optimizer.load_state_dict(model_checkpoint['optimizer'])
else:
model_checkpoint = None
self.loss_fn = torch.nn.BCELoss()
def train(self, max_epoch, savepath):
train_loss_min = 1e+5 # A random very high number
valid_loss_min = 1e+5
for epoch_id in range(max_epoch):
print(f"Epoch {epoch_id+1}/{max_epoch}")
self.gepoch_id = epoch_id
train_loss, valid_loss, auroc_max = self.epoch_train()
self.current_train_loss = train_loss
self.current_valid_loss = valid_loss
timestamp_end = time.strftime("%H%M%S-%d%m%Y")
if train_loss < train_loss_min:
train_loss_min = train_loss
if valid_loss < valid_loss_min:
valid_loss_min = valid_loss
torch.save({'epoch': epoch_id + 1,
'state_dict': self.model.state_dict(),
'best_loss': valid_loss_min,
'optimizer' : self.optimizer.state_dict()},
os.path.join(savepath, f'm-epoch-{epoch_id}.pth'))
test_auroc = self.test()
print(f"Epoch:{epoch_id + 1}| EndTime:{timestamp_end}| TestAUROC: {test_auroc}| ValidAUROC: {auroc_max}")
def valid(self):
self.model.eval()
loss_valid_r = 0
valid_batches = 0 # Counter for valid batches
out_gt = torch.FloatTensor().to(self.device)
out_pred = torch.FloatTensor().to(self.device)
with torch.no_grad():
for (var_input, var_target) in tq(self.data_loader_valid):
var_target = var_target.to(self.device)
out_gt = torch.cat((out_gt, var_target), 0).to(self.device)
_, c, h, w = var_input.size()
var_input = var_input.view(-1, c, h, w)
var_output = self.model(var_input.to(self.device))
out_pred = torch.cat((out_pred, var_output), 0)
lossvalue = self.loss_fn(
var_output, tfunc.one_hot(var_target.squeeze(1).long(), num_classes=self.class_count).float())
loss_valid_r += lossvalue.item()
valid_batches += 1
valid_loss = loss_valid_r / valid_batches
auroc_individual = compute_auroc(
tfunc.one_hot(out_gt.squeeze(1).long()).float(),
out_pred, self.class_count)
print(len(auroc_individual))
auroc_mean = np.array(auroc_individual).mean()
return valid_loss, auroc_mean
def epoch_train(self):
loss_train_list = []
loss_valid_list = []
self.model.train()
scheduler = StepLR(self.optimizer, step_size=6, gamma=0.002)
for batch_id, (var_input, var_target) in tq(enumerate(self.data_loader_train)):
var_target = var_target.to(self.device)
var_input = var_input.to(self.device)
var_output= self.model(var_input)
trainloss_value = self.loss_fn(
var_output,
tfunc.one_hot(var_target.squeeze(1).long(), num_classes=self.class_count).float())
self.optimizer.zero_grad()
trainloss_value.backward()
self.optimizer.step()
train_loss_value = trainloss_value.item()
loss_train_list.append(train_loss_value)
if batch_id % (len(self.data_loader_train)-1) == 0 and batch_id != 0:
validloss_value, auroc_mean = self.valid()
loss_valid_list.append(validloss_value)
if auroc_mean > self.auroc_max:
print('Better auroc obtained')
self.auroc_max = auroc_mean
scheduler.step()
train_loss_mean = np.mean(loss_train_list)
valid_loss_mean = np.mean(loss_valid_list)
return train_loss_mean, valid_loss_mean, auroc_mean
def test(self):
cudnn.benchmark = True
out_gt = torch.FloatTensor().to(self.device)
out_pred = torch.FloatTensor().to(self.device)
self.model.eval()
with torch.no_grad():
for i, (var_input, var_target) in enumerate(self.data_loader_test):
var_target = var_target.to(self.device)
var_input = var_input.to(self.device)
out_gt = torch.cat((out_gt, var_target), 0).to(self.device)
_, c, h, w = var_input.size()
var_input = var_input.view(-1, c, h, w)
out = self.model(var_input)
out_pred = torch.cat((out_pred, out), 0)
auroc_individual = compute_auroc(tfunc.one_hot(out_gt.squeeze(1).long()).float(), out_pred, self.class_count)
auroc_mean = np.array(auroc_individual).mean()
print(f'AUROC mean:{auroc_mean}')
for i, auroc_val in enumerate(auroc_individual):
print(f"{self.class_names[i]}:{auroc_val}")
return auroc_mean
def main(args):
lr = args.lr
checkpoint = args.checkpoint
batch_size = args.bs
max_epoch = args.epochs
class_count = args.clscount #The objective is to classify the image into 3 classes
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu if available
class_names = ['Lung Opacity', 'Normal', 'No Lung Opacity / Not Normal']
# Data Loader
dpath = args.dpath
img_pth = os.path.join(args.dpath, 'processed_data/')
numpy_path = os.path.join(args.dpath, 'data_split/')
with open(os.path.join(dpath, 'rsna_annotation.json')) as lab_file:
labels = json.load(lab_file)
# Place numpy file containing train-valid-test split on tools folder
tr_list = np.load(os.path.join(numpy_path,'train_list.npy')).tolist()
val_list = np.load(os.path.join(numpy_path,'valid_list.npy')).tolist()
test_list = np.load(os.path.join(numpy_path,'test_list.npy')).tolist()
dataset_train = RSNADataSet(tr_list, labels, img_pth, transform=True)
dataset_valid = RSNADataSet(val_list, labels, img_pth, transform=True)
data_loader_train = DataLoader(
dataset=dataset_train,
batch_size=batch_size,
shuffle=True,
num_workers=4,
pin_memory=False)
data_loader_valid = DataLoader(
dataset=dataset_valid,
batch_size=batch_size,
shuffle=False,
num_workers=4,
pin_memory=False)
dataset_test = RSNADataSet(test_list, labels, img_pth, transform=True)
data_loader_test = DataLoader(
dataset=dataset_test,
batch_size=1,
shuffle=False,
num_workers=4,
pin_memory=False)
# Construct Model
if args.optimised:
alpha = args.alpha
phi = args.phi
beta = args.beta
if beta is None:
beta = round(sqrt(2 / alpha), 3)
alpha = alpha ** phi
beta = beta ** phi
model = DenseNet121Eff(alpha, beta, class_count)
else:
model = DenseNet121(class_count)
# Train the Model
savepath = args.spath
rsna_trainer = RSNATrainer(
model, data_loader_train, data_loader_valid, data_loader_test,
class_count,checkpoint, device, class_names, lr)
rsna_trainer.train(max_epoch, savepath)
print("Model trained !")
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--lr",
required=False,
help="Learning rate",
default=1e-4,
type = float)
parser.add_argument("--checkpoint",
required=False,
help="Checkpoint model weight",
default= None,
type = str)
parser.add_argument("--bs",
required=False,
default=16,
help="Batchsize",
type=int)
parser.add_argument("--dpath",
required=True,
help="Path to folder containing all data",
type =str)
parser.add_argument("--epochs",
required=False,
default=15,
help="Number of epochs",
type=int)
parser.add_argument("--clscount",
required=False,
default=3,
help="Number of classes",
type=int)
parser.add_argument("--spath",
required=True,
help="Path to folder in which models should be saved",
type =str)
parser.add_argument("--optimised",
required=False, default=False,
help="enable flag->eff model",
action='store_true')
parser.add_argument("--alpha",
required=False,
help="alpha for the model",
default=(11 / 6),
type=float)
parser.add_argument("--phi",
required=False,
help="Phi for the model.",
default=1.0,
type=float)
parser.add_argument("--beta",
required=False,
help="Beta for the model.",
default=None,
type=float)
custom_args = parser.parse_args()
main(custom_args)
| 2.21875 | 2 |
NeuralStyleTransferSrc/overide_fun.py | vonlippmann/Deep-_Style_Transfer | 2 | 12798789 | <reponame>vonlippmann/Deep-_Style_Transfer
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class self_QlineEdit(QtWidgets.QLineEdit):
clicked = pyqtSignal()
def __init__(self, parent=None):
super(self_QlineEdit, self).__init__(parent)
def event(self, event):
if event.type() == QEvent.MouseButtonPress:
mouseEvent = QMouseEvent(event)
if mouseEvent.buttons() == Qt.LeftButton:
self.clicked.emit()
return QtWidgets.QLineEdit.event(self,event) | 2.1875 | 2 |
tmp/test.py | AeneasHe/eth-brownie-enhance | 1 | 12798790 | <reponame>AeneasHe/eth-brownie-enhance<filename>tmp/test.py
import os
import shutil
raw_path = "/Library/Frameworks/Python.framework/Versions/3.9/lib/python3.9/site-packages/eth_brownie-1.15.0-py3.9.egg/brownie/project/brownie_project"
install_path = "/Users/aeneas/.brownie/packages/brownie/[email protected]"
shutil.rmtree(install_path)
# os.makedirs(install_path)
shutil.copytree(raw_path, install_path) | 1.757813 | 2 |
cyberdb/extensions/nonce.py | Cyberbolt/CyberDB | 1 | 12798791 | <reponame>Cyberbolt/CyberDB<filename>cyberdb/extensions/nonce.py
import random
seed = '<KEY>'
def generate(num: int):
'''
Generate num random strings.
'''
text = ''
for i in range(num):
text += random.choice(seed)
return text
| 2.53125 | 3 |
publisher/conf.py | hongsups/scipy_proceedings | 1 | 12798792 | <filename>publisher/conf.py
import glob
import os
import io
excludes = ['vanderwalt', 'bibderwalt']
# status_file_root possible values: draft, conference, ready
status_file_base = 'draft'
status_file_name = ''.join([status_file_base, '.sty'])
work_dir = os.path.dirname(__file__)
papers_dir = os.path.join(work_dir, '../papers')
output_dir = os.path.join(work_dir, '../output')
template_dir = os.path.join(work_dir, '_templates')
static_dir = os.path.join(work_dir, '_static')
css_file = os.path.join(static_dir, 'scipy-proc.css')
toc_list = os.path.join(static_dir, 'toc.txt')
build_dir = os.path.join(work_dir, '_build')
pdf_dir = os.path.join(build_dir, 'pdfs')
html_dir = os.path.join(build_dir, 'html')
bib_dir = os.path.join(html_dir, 'bib')
toc_conf = os.path.join(build_dir, 'toc.json')
proc_conf = os.path.join(work_dir, '../scipy_proc.json')
xref_conf = os.path.join(build_dir, 'doi_batch.xml')
status_file = os.path.join(static_dir, status_file_name)
if os.path.isfile(toc_list):
with io.open(toc_list, 'r', encoding='utf-8') as f:
dirs = f.read().splitlines()
else:
dirs = sorted([os.path.basename(d)
for d in glob.glob('%s/*' % papers_dir)
if os.path.isdir(d) and not any(e in d for e in excludes)])
| 1.90625 | 2 |
run/packet_manager.py | insidus341/Packet-Capture-Analyzer | 0 | 12798793 | <filename>run/packet_manager.py
class PacketManager:
def __init__(self):
self.packets = []
def add_packet(self, packet):
Packet(packet)
self.packets.append(packet)
def read_packet(self, number):
try:
packet = self.packets[number]
print(packet.number)
except:
return False
class Packet:
def __init__(self, packet):
self._packet = packet
def get_packet(self):
return self._packet
| 3.203125 | 3 |
demo/site/index.html.py | leafcoder/litefs | 2 | 12798794 | def handler(self):
self.start_response(200)
return ['Hello World'] | 1.601563 | 2 |
pygama/dsp/_processors/time_point_thresh.py | iguinn/pygama | 13 | 12798795 | import numpy as np
from numba import guvectorize
from pygama.dsp.errors import DSPFatal
@guvectorize(["void(float32[:], float32, float32, float32, float32[:])",
"void(float64[:], float64, float64, float64, float64[:])"],
"(n),(),(),()->()", nopython=True, cache=True)
def time_point_thresh(w_in, a_threshold, t_start, walk_forward, t_out):
"""
Find the index where the waveform value crosses the threshold,
walking either forward or backward from the starting index.
Parameters
----------
w_in : array-like
The input waveform
a_threshold : float
The threshold value
t_start : int
The starting index
walk_forward: int
The backward (0) or forward (1) search direction
t_out : float
The index where the waveform value crosses the threshold
Processing Chain Example
------------------------
"tp_0": {
"function": "time_point_thresh",
"module": "pygama.dsp.processors",
"args": ["wf_atrap", "bl_std", "tp_start", 0, "tp_0"],
"unit": "ns",
"prereqs": ["wf_atrap", "bl_std", "tp_start"]
}
"""
t_out[0] = np.nan
if np.isnan(w_in).any() or np.isnan(a_threshold) or np.isnan(t_start) or np.isnan(walk_forward):
return
if np.floor(t_start) != t_start:
raise DSPFatal('The starting index must be an integer')
if np.floor(walk_forward) != walk_forward:
raise DSPFatal('The search direction must be an integer')
if int(t_start) < 0 or int(t_start) >= len(w_in):
raise DSPFatal('The starting index is out of range')
if int(walk_forward) == 1:
for i in range(int(t_start), len(w_in) - 1, 1):
if w_in[i] <= a_threshold < w_in[i+1]:
t_out[0] = i
return
else:
for i in range(int(t_start), 1, -1):
if w_in[i-1] < a_threshold <= w_in[i]:
t_out[0] = i
return
| 2.6875 | 3 |
Numbers/natural.py | rohanrajkamal/pythonexamples | 0 | 12798796 | input1=int(input("enter a number:"))
print("prints the range of natural numbers")
for i in range(1, input1+1):
print("%d" % (i))`` | 4.03125 | 4 |
aiocometd/_metadata.py | robertmrk/aiocometd | 14 | 12798797 | <gh_stars>10-100
"""Package metadata"""
TITLE = "aiocometd"
DESCRIPTION = "CometD client for asyncio"
KEYWORDS = "asyncio aiohttp comet cometd bayeux push streaming"
URL = "https://github.com/robertmrk/aiocometd"
PROJECT_URLS = {
"CI": "https://travis-ci.org/robertmrk/aiocometd",
"Coverage": "https://coveralls.io/github/robertmrk/aiocometd",
"Docs": "http://aiocometd.readthedocs.io/"
}
VERSION = "0.4.5"
AUTHOR = "<NAME>"
AUTHOR_EMAIL = "<EMAIL>"
| 0.996094 | 1 |
handler/mlsklearn/model_selection/model_selection_handler.py | daliuzhen1/BIServiceForRest | 3 | 12798798 | import tornado
import json
import uuid
import pandas as pd
from handler.mlsklearn.util import regqeust_arg_to_sklearn_arg
from sklearn.model_selection import train_test_split
from data.persistence import *
from data.data_source import DataSource
from data.data_storage import DataStorage
class TrainTestSplitHandler(tornado.web.RequestHandler):
def post(self):
try:
json_data = json.loads(self.request.body)
data_id = json_data.get('dataID', None)
if data_id == None:
raise Exception("please input data_id")
data_column_names = json_data.get('dataColumnNames', None)
if data_column_names == None:
raise Exception("please input dataColumnNames")
target_column_name = json_data.get('targetColumnName', None)
if target_column_name == None:
raise Exception("please input targetColumnName")
sklearn_arg = json_data.get('sklearn', None)
data_obj = DataStorage.get_data_obj_by_data_id(data_id)
if data_obj:
data_column_names = data_column_names.split(',')
data = data_obj.pandas_data[data_column_names]
target = data_obj.pandas_data[target_column_name]
data = data.values
target = target.values
sklearn_arg = regqeust_arg_to_sklearn_arg(sklearn_arg, ['test_size', 'random_state'])
arrays = [data, target]
X_train, X_test, y_train, y_test = train_test_split(*arrays, **sklearn_arg)
X_train = pd.DataFrame(X_train, columns=data_column_names)
data_obj_X_train = DataStorage.create_data_obj_by_pandas_data(X_train)
X_test = pd.DataFrame(X_test, columns=data_column_names)
data_obj_X_test = DataStorage.create_data_obj_by_pandas_data(X_test)
y_train = pd.DataFrame(y_train, columns=[target_column_name])
data_obj_y_train = DataStorage.create_data_obj_by_pandas_data(y_train)
y_test = pd.DataFrame(y_test, columns=[target_column_name])
data_obj_y_test = DataStorage.create_data_obj_by_pandas_data(y_test)
result = {}
result_X_train = {}
result_X_train['dataID'] = data_obj_X_train.data_id
result_X_train['columnNames'] = data_obj_X_train.column_names
result_X_test = {}
result_X_test['dataID'] = data_obj_X_test.data_id
result_X_test['columnNames'] = data_obj_X_test.column_names
result_y_train = {}
result_y_train['dataID'] = data_obj_y_train.data_id
result_y_train['columnNames'] = data_obj_y_train.column_names
result_y_test = {}
result_y_test['dataID'] = data_obj_y_test.data_id
result_y_test['columnNames'] = data_obj_y_test.column_names
result['X_train'] = result_X_train
result['X_test'] = result_X_test
result['y_train'] = result_y_train
result['y_test'] = result_y_test
self.write(json.dumps(result))
else:
raise Exception("invalid source_id")
except Exception as e:
self.write(str(e)) | 2.375 | 2 |
src/multipageforms/forms/multipageform.py | kaleissin/django-multipageforms | 4 | 12798799 | <reponame>kaleissin/django-multipageforms
from __future__ import unicode_literals
from collections import OrderedDict
import logging
import copy
LOGGER = logging.getLogger(__name__)
class MultiPageForm(object):
help_text = ''
percentage_done = 0.0
def __init__(self, data=None, files=None, initial=None, **kwargs):
self.initial = initial
self.kwargs = kwargs
self.pageclasses = OrderedDict([(page.slug, page) for page in self.pages])
self.pages = None
self.data = data
self.files = files
self.initialize(self.initial)
self.bind(data=data, files=files, initial=initial)
def file_fields(self):
if self.is_multipart():
for form in self.pages.values():
for field in form.file_fields():
yield field
def first_page(self):
if not self.is_initialized:
self.initialize()
first_slug = tuple(self.pages.keys())[0]
return self.pages[first_slug]
def last_page(self):
if not self.is_initialized:
self.initialize()
last_slug = tuple(self.pages.keys())[-1]
return self.pages[last_slug]
def next_page(self, slug):
"""
Returns next page if any, None otherwise.
Returns ValueError if slug does not exist in the pages
"""
if not self.is_initialized:
self.initialize()
page_slugs = tuple(self.pages.keys())
current_index = page_slugs.index(slug) # ValueError
next_index = current_index + 1
if next_index > len(page_slugs) - 1:
# No more pages
return None
next_slug = page_slugs[next_index]
return self.pages[next_slug]
def prev_page(self, slug):
"""
Returns prev page if any, None otherwise.
Returns ValueError if slug does not exist in the pages
"""
if not self.is_initialized:
self.initialize()
page_slugs = tuple(self.pages.keys())
current_index = page_slugs.index(slug) # ValueError
prev_index = current_index - 1
if prev_index < 0:
# No more pages
return None
prev_slug = page_slugs[prev_index]
return self.pages[prev_slug]
def initialize(self, initial=None, **kwargs):
pages = []
self.initial = self.initial or initial
page_initial = {}
kwargs = self.kwargs.copy()
if 'initial' in kwargs:
kwargs.pop('initial')
for slug, PageClass in self.pageclasses.items():
if self.initial:
page_initial = self.initial.get(slug, {})
page = PageClass().initialize(initial=page_initial, **kwargs)
pages.append((slug, page))
self.pages = OrderedDict(pages)
return self
def bind(self, data=None, files=None, initial=None):
pages = []
for slug, Page in self.pageclasses.items():
page = Page().bind(data=data, files=files, initial=None)
pages.append((slug, page))
self.pages = OrderedDict(pages)
self.data = data
self.files = files
self.initial = initial or self.initial
return self
@property
def is_initialized(self):
return self.pages is not None
@property
def is_bound(self):
if self.is_initialized:
return bool(self.data or self.files)
return False
def is_multipart(self):
return any(page.is_multipart() for page in self.pages.values())
def is_valid(self):
if self.is_initialized:
pages_done = sum(bool(page.is_valid()) for page in self.pages.values())
num_pages = len(self.pages)
self.percentage_done = 100 * pages_done / float(num_pages)
return pages_done == num_pages
return None
@property
def cleaned_data(self):
cleaned_data = {}
if self.is_bound:
for slug, page in self.pages.items():
page.is_valid()
cleaned_data[slug] = page.cleaned_data
return cleaned_data
def preview(self):
lines = []
for page in self.pages.values():
lines.append(page.preview())
return lines
def get_initial_data(self):
initial = {}
if self.is_initialized:
for slug, page in self.pages.items():
page.is_valid()
initial[slug] = page.get_initial_data()
return initial
def get_data(self):
return self.data
| 2.34375 | 2 |
Portfolio_Strategies/vectorized_backtesting.py | vhn0912/Finance | 441 | 12798800 | import numpy as np
import pandas as pd
import yfinance as yf
import matplotlib.pyplot as plt
import datetime
from yahoo_fin import stock_info as si
plt.rcParams['figure.figsize'] = (15, 10)
tickers = si.tickers_dow()
individual_stock = input(f"Which of the following stocks would you like to backtest \n{tickers}\n:")
num_of_years = 1
start = datetime.date.today() - datetime.timedelta(days = int(365.25*num_of_years))
yf_prices = yf.download(tickers, start=start)
# Individual Stock Strategy
prices = yf_prices['Adj Close'][individual_stock]
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title=f'{individual_stock} Moving Average Crossovers and Positions')
pos.plot(ax=ax[1])
plt.show()
my_rs = pos.shift(1)*rs
plt.subplots()
my_rs.cumsum().apply(np.exp).plot(title=f'{individual_stock} MA Strategy Performance')
rs.cumsum().apply(np.exp).plot()
plt.legend([f'{individual_stock} MA Performace', f'{individual_stock} Buy and Hold Performnace'])
plt.show()
print (f'Performance Statistics for {individual_stock} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Full Portfolio Strategy
prices = yf_prices['Adj Close']
rs = prices.apply(np.log).diff(1).fillna(0)
w1 = 5
w2 = 22
ma_x = prices.rolling(w1).mean() - prices.rolling(w2).mean()
pos = ma_x.apply(np.sign)
pos /= pos.abs().sum(1).values.reshape(-1,1)
fig, ax = plt.subplots(2,1)
ma_x.plot(ax=ax[0], title='Individual Moving Average Crossovers and Positions')
ax[0].legend(bbox_to_anchor=(1.1, 1.05))
pos.plot(ax=ax[1])
ax[1].legend(bbox_to_anchor=(1.1, 1.05))
plt.show()
my_rs = (pos.shift(1)*rs)
my_rs.cumsum().apply(np.exp).plot(title='Individual Stocks Strategy Performance')
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {num_of_years} years:')
for i in range(len(tickers)):
print (f'Moving Average Return for {tickers[i]}: ' + str(100 * round(my_rs.cumsum().apply(np.exp)[tickers[i]].tolist()[-1], 4)) + '%')
i = i + 1
plt.subplots()
my_rs = (pos.shift(1)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot(title='Full Portfolio Strategy Performance')
rs.mean(1).cumsum().apply(np.exp).plot()
plt.legend(['Portfolio MA Performace', 'Buy and Hold Performnace'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Moving Average Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Buy and Hold Return: ' + str(100 * round(rs.mean(1).cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Portfolio Tests
# Look-Ahead Bias
my_rs1 = (pos*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1)
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot(title='Full Portfolio Performance')
my_rs2.cumsum().apply(np.exp).plot()
plt.legend(['With Look-Ahead Bias', 'Without Look-Ahead Bias'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('With Look-Ahead Bias: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('Without Look-Ahead Bias: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
# Signal Lags
lags = range(1, 11)
lagged_rs = pd.Series(dtype=float, index=lags)
print ('-' * 60)
print (f'Lag Performance Statistics for {tickers} ({num_of_years} years):')
for lag in lags:
my_rs = (pos.shift(lag)*rs).sum(1)
my_rs.cumsum().apply(np.exp).plot()
lagged_rs[lag] = my_rs.sum()
print (f'Lag {lag} Return: ' + str(100 * round(my_rs.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
plt.title('Full Portfolio Strategy Performance with Lags')
plt.legend(lags, bbox_to_anchor=(1.1, 0.95))
plt.show()
# Transaction Costs
tc_pct = 0.01
delta_pos = pos.diff(1).abs().sum(1)
my_tcs = tc_pct*delta_pos
my_rs1 = (pos.shift(1)*rs).sum(1)
my_rs2 = (pos.shift(1)*rs).sum(1) - my_tcs
plt.subplots()
my_rs1.cumsum().apply(np.exp).plot()
my_rs2.cumsum().apply(np.exp).plot()
plt.title('Full Portfolio Performance')
plt.legend(['Without Transaction Costs', 'With Transaction Costs'])
plt.show()
print ('-' * 60)
print (f'Performance Statistics for {tickers} ({num_of_years} years):')
print ('Without Transaction Costs: ' + str(100 * round(my_rs1.cumsum().apply(np.exp).tolist()[-1], 4)) + '%')
print('With Transaction Costs: ' + str(100 * round(my_rs2.cumsum().apply(np.exp).tolist()[-1], 4)) + '%') | 3.0625 | 3 |
horseModuleCore/ar_logger.py | TNO/horse-module-core | 0 | 12798801 | # -*- coding: utf-8 -*-
"""
Created on Tue May 22 14:07:42 2018
@author: HORSE
"""
import logging
import logging.handlers
import os
def ARLogger(log_filename = 'log.txt'):
# if not os.path.exists('logs'):
# os.makedirs('logs')
fmt = '%(asctime)s %(levelname)s %(message)s'
datefmt = '%Y-%m-%d %H:%M:%S'
ar_logger = logging.getLogger('ARLogger')
ar_logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
log_filename, maxBytes=10000*4, backupCount=5)
formatter = logging.Formatter(fmt, datefmt)
handler.setFormatter(formatter)
ar_logger.addHandler(handler)
return ar_logger | 2.796875 | 3 |
integrations/pinger/pinger.py | hamptons/alerta-contrib | 114 | 12798802 | <filename>integrations/pinger/pinger.py
import sys
import platform
import time
import subprocess
import threading
import Queue
import re
import logging
import yaml
from alertaclient.api import Client
__version__ = '3.3.0'
LOG = logging.getLogger('alerta.pinger')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
PING_FILE = 'alert-pinger.targets'
PING_MAX_TIMEOUT = 15 # seconds
PING_MAX_RETRIES = 2
PING_SLOW_WARNING = 200 # ms
PING_SLOW_CRITICAL = 500 # ms
SERVER_THREAD_COUNT = 20
LOOP_EVERY = 30
_PING_ALERTS = [
'PingFailed',
'PingSlow',
'PingOK',
'PingError',
]
PING_OK = 0 # all ping replies received within timeout
PING_FAILED = 1 # some or all ping replies not received or did not respond within timeout
PING_ERROR = 2 # unspecified error with ping
# Initialise Rules
def init_targets():
targets = list()
LOG.info('Loading Ping targets...')
try:
targets = yaml.load(open(PING_FILE))
except Exception as e:
LOG.error('Failed to load Ping targets: %s', e)
LOG.info('Loaded %d Ping targets OK', len(targets))
return targets
class WorkerThread(threading.Thread):
def __init__(self, api, queue):
threading.Thread.__init__(self)
LOG.debug('Initialising %s...', self.getName())
self.last_event = {}
self.queue = queue # internal queue
self.api = api # message broker
def run(self):
while True:
LOG.debug('Waiting on input queue...')
item = self.queue.get()
if not item:
LOG.info('%s is shutting down.', self.getName())
break
environment, service, resource, retries, queue_time = item
if time.time() - queue_time > LOOP_EVERY:
LOG.warning('Ping request to %s expired after %d seconds.', resource, int(time.time() - queue_time))
self.queue.task_done()
continue
LOG.info('%s pinging %s...', self.getName(), resource)
if retries > 1:
rc, rtt, loss, stdout = self.pinger(resource, count=2, timeout=5)
else:
rc, rtt, loss, stdout = self.pinger(resource, count=5, timeout=PING_MAX_TIMEOUT)
if rc != PING_OK and retries:
LOG.info('Retrying ping %s %s more times', resource, retries)
self.queue.put((environment, service, resource, retries - 1, time.time()))
self.queue.task_done()
continue
if rc == PING_OK:
avg, max = rtt
if avg > PING_SLOW_CRITICAL:
event = 'PingSlow'
severity = 'critical'
text = 'Node responded to ping in %s ms avg (> %s ms)' % (avg, PING_SLOW_CRITICAL)
elif avg > PING_SLOW_WARNING:
event = 'PingSlow'
severity = 'warning'
text = 'Node responded to ping in %s ms avg (> %s ms)' % (avg, PING_SLOW_WARNING)
else:
event = 'PingOK'
severity = 'normal'
text = 'Node responding to ping avg/max %s/%s ms.' % tuple(rtt)
value = '%s/%s ms' % tuple(rtt)
elif rc == PING_FAILED:
event = 'PingFailed'
severity = 'major'
text = 'Node did not respond to ping or timed out within %s seconds' % PING_MAX_TIMEOUT
value = '%s%% packet loss' % loss
elif rc == PING_ERROR:
event = 'PingError'
severity = 'warning'
text = 'Could not ping node %s.' % resource
value = stdout
else:
LOG.warning('Unknown ping return code: %s', rc)
continue
# Defaults
resource += ':icmp'
group = 'Ping'
correlate = _PING_ALERTS
raw_data = stdout
try:
self.api.send_alert(
resource=resource,
event=event,
correlate=correlate,
group=group,
value=value,
severity=severity,
environment=environment,
service=service,
text=text,
event_type='serviceAlert',
raw_data=raw_data,
)
except Exception as e:
LOG.warning('Failed to send alert: %s', e)
self.queue.task_done()
LOG.info('%s ping %s complete.', self.getName(), resource)
self.queue.task_done()
@staticmethod
def pinger(node, count=1, interval=1, timeout=5):
if timeout <= count * interval:
timeout = count * interval + 1
if timeout > PING_MAX_TIMEOUT:
timeout = PING_MAX_TIMEOUT
if sys.platform == "darwin":
cmd = "ping -q -c %s -i %s -t %s %s" % (count, interval, timeout, node)
else:
cmd = "ping -q -c %s -i %s -w %s %s" % (count, interval, timeout, node)
ping = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout = ping.communicate()[0].rstrip('\n')
rc = ping.returncode
LOG.debug('Ping %s => %s (rc=%d)', cmd, stdout, rc)
m = re.search('(?P<loss>\d+(\.\d+)?)% packet loss', stdout)
if m:
loss = m.group('loss')
else:
loss = 'n/a'
m = re.search('(?P<min>\d+\.\d+)/(?P<avg>\d+\.\d+)/(?P<max>\d+\.\d+)/(?P<mdev>\d+\.\d+)\s+ms', stdout)
if m:
rtt = (float(m.group('avg')), float(m.group('max')))
else:
rtt = (0, 0)
if rc == 0:
LOG.info('%s: is alive %s', node, rtt)
else:
LOG.info('%s: not responding', node)
return rc, rtt, loss, stdout
class PingerDaemon(object):
def __init__(self):
self.shuttingdown = False
def run(self):
self.running = True
# Create internal queue
self.queue = Queue.Queue()
self.api = Client()
# Initialiase ping targets
ping_list = init_targets()
# Start worker threads
LOG.debug('Starting %s worker threads...', SERVER_THREAD_COUNT)
for i in range(SERVER_THREAD_COUNT):
w = WorkerThread(self.api, self.queue)
try:
w.start()
except Exception as e:
LOG.error('Worker thread #%s did not start: %s', i, e)
continue
LOG.info('Started worker thread: %s', w.getName())
while not self.shuttingdown:
try:
for p in ping_list:
if 'targets' in p and p['targets']:
for target in p['targets']:
environment = p['environment']
service = p['service']
retries = p.get('retries', PING_MAX_RETRIES)
self.queue.put((environment, service, target, retries, time.time()))
LOG.debug('Send heartbeat...')
try:
origin = '{}/{}'.format('pinger', platform.uname()[1])
self.api.heartbeat(origin, tags=[__version__])
except Exception as e:
LOG.warning('Failed to send heartbeat: %s', e)
time.sleep(LOOP_EVERY)
LOG.info('Ping queue length is %d', self.queue.qsize())
except (KeyboardInterrupt, SystemExit):
self.shuttingdown = True
LOG.info('Shutdown request received...')
self.running = False
for i in range(SERVER_THREAD_COUNT):
self.queue.put(None)
w.join()
def main():
pinger = PingerDaemon()
pinger.run()
if __name__ == '__main__':
main()
| 2.328125 | 2 |
desky/layout/grid.py | noelbenz/desky | 0 | 12798803 |
import unittest
from desky.rect import Rect
from desky.panel import Panel
from enum import Enum
from functools import reduce, partial
from toolz.dicttoolz import valfilter
# | Type of sizing | Maximum extra width allocation
# --------------------------------------------------------------
# | Fixed (200 px) | 0px
# | Child (use child size) | 0px
# | Percentage (30% of width) | 1px
# | Custom (custom function) | configurable
# | Even (equally divide) | 1px
# | Fill (use remaining space) | any
#
# The types of sizing in the table above are ordered in evalulation priority.
# Fixed, Child, and Percentage sizings are evaluated first. Custom is then
# evaluated and is given the remaining area size as its argument. Even is
# evaluated next. Even panels will split remaining space evenly between
# themselves. Fill evaluates last and will take the remaining space.
#
# If the resulting layout exceeds the bounds of the parent, it is up to the
# parent to decide if it should resize.
def zero_func():
return 0
class GridLayout:
FIXED = 0
CHILD = 1
PERCENTAGE = 2
CUSTOM = 3
EVEN = 4
FILL = 5
def __init__(self, *, column_count = 1, row_count = 1, spacing = 0):
self.panels = dict()
self.column_sizings = dict()
self.row_sizings = dict()
self.column_count = column_count
self.row_count = row_count
self.spacing = spacing
def add(self, panel, column, row, column_count=1, row_count=1):
self.add_rect(panel, Rect(column, row, column_count, row_count))
def add_rect(self, panel, rect):
assert(rect.x >= 0)
assert(rect.y >= 0)
assert(rect.right <= self.column_count)
assert(rect.bottom <= self.row_count)
assert(self.area_empty(rect))
self.panels[rect.frozen_copy()] = panel
def remove(self, panel):
self.panels = valfilter(lambda p: p != panel, self.panels)
def clear(self, *, remove_panels):
if remove_panels:
for panel in self.panels.values():
panel.remove()
self.panels = dict()
def area_empty(self, rect):
for rect_other in self.panels.keys():
if rect.intersects(rect_other):
return False
return True
def set_fixed_column_sizing(self, column, size):
self.column_sizings[column] = (self.FIXED, size)
def set_fixed_row_sizing(self, row, size):
self.row_sizings[row] = (self.FIXED, size)
def set_child_column_sizing(self, column):
self.column_sizings[column] = (self.CHILD,)
def set_child_row_sizing(self, row):
self.row_sizings[row] = (self.CHILD,)
def set_percentage_column_sizing(self, column, percentage):
self.column_sizings[column] = (self.PERCENTAGE, percentage)
def set_percentage_row_sizing(self, row, percentage):
self.row_sizings[row] = (self.PERCENTAGE, percentage)
def set_custom_column_sizing(self, column, sizing_func, extra_func=zero_func):
self.column_sizings[column] = (self.CUSTOM, sizing_func, extra_func)
def set_custom_row_sizing(self, row, sizing_func, extra_func=zero_func):
self.row_sizings[row] = (self.CUSTOM, sizing_func, extra_func)
def set_even_column_sizing(self, column):
self.column_sizings[column] = (self.EVEN,)
def set_even_row_sizing(self, row):
self.row_sizings[row] = (self.EVEN,)
def set_fill_column_sizing(self, column):
self.column_sizings[column] = (self.FILL,)
def set_fill_row_sizing(self, row):
self.row_sizings[row] = (self.FILL,)
def widest_child_in_column(self, column):
column_rect = Rect(column, 0, 1, self.row_count)
rect_panel_tuples_that_intersect_column = list(
filter(
lambda rect_panel_tuple: rect_panel_tuple[0].intersects(column_rect),
self.panels.items()))
def calculate_width(rect_panel_tuple):
rect, panel = rect_panel_tuple
# In case a panel spans multiple columns, determine the height as a
# proportional amount.
return int((panel.rect_outer.w - (rect.w - 1) * self.spacing) / rect.w)
return reduce(max, map(calculate_width, rect_panel_tuples_that_intersect_column), 0)
def tallest_child_in_row(self, row):
row_rect = Rect(0, row, self.column_count, 1)
rect_panel_tuples_that_intersect_row = list(
filter(
lambda rect_panel_tuple: rect_panel_tuple[0].intersects(row_rect),
self.panels.items()))
def calculate_height(rect_panel_tuple):
rect, panel = rect_panel_tuple
# In case a panel spans multiple rows, determine the height as a
# proportional amount.
return int((panel.rect_outer.h - (rect.h - 1) * self.spacing) / rect.h)
return reduce(max, map(calculate_height, rect_panel_tuples_that_intersect_row), 0)
def layout(self, panel):
area = (panel.rect_inner
.move(-panel.x, -panel.y)
.shrink(
0,
0,
(self.column_count - 1) * self.spacing,
(self.row_count - 1) * self.spacing)
)
column_sizings_by_type = dict()
row_sizings_by_type = dict()
# Group columns and rows by their sizing types while preserving the order.
for column in range(self.column_count):
sizing = self.column_sizings.get(column, (self.EVEN,))
group = column_sizings_by_type.get(sizing[0], list())
group.append((column, sizing))
column_sizings_by_type[sizing[0]] = group
for row in range(self.row_count):
sizing = self.row_sizings.get(row, (self.EVEN,))
group = row_sizings_by_type.get(sizing[0], list())
group.append((row, sizing))
row_sizings_by_type[sizing[0]] = group
# Determine column widths and row heights.
column_widths = [0 for _ in range(self.column_count)]
row_heights = [0 for _ in range(self.row_count)]
def calculate_fixed_sizes(sizings_by_type, sizes):
for sizing_tuple in sizings_by_type.get(self.FIXED, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = sizing[1]
calculate_fixed_sizes(column_sizings_by_type, column_widths)
calculate_fixed_sizes(row_sizings_by_type, row_heights)
def calculate_child_sizes(sizings_by_type, sizes, largest_func):
for sizing_tuple in sizings_by_type.get(self.CHILD, []):
column_or_row, _ = sizing_tuple
sizes[column_or_row] = largest_func(column_or_row)
calculate_child_sizes(column_sizings_by_type, column_widths, self.widest_child_in_column)
calculate_child_sizes(row_sizings_by_type, row_heights, self.tallest_child_in_row)
def calculate_percentage_sizes(sizings_by_type, sizes, area_size):
for sizing_tuple in sizings_by_type.get(self.PERCENTAGE, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = int(area_size * sizing[1])
calculate_percentage_sizes(column_sizings_by_type, column_widths, area.w)
calculate_percentage_sizes(row_sizings_by_type, row_heights, area.h)
def calculate_custom_sizes(sizings_by_type, sizes, area_size, remaining_size):
for sizing_tuple in sizings_by_type.get(self.CUSTOM, []):
column_or_row, sizing = sizing_tuple
sizes[column_or_row] = int(sizing[1](area_size, remaining_size))
calculate_custom_sizes(column_sizings_by_type, column_widths, area.w, area.w - sum(column_widths))
calculate_custom_sizes(row_sizings_by_type, row_heights, area.h, area.h - sum(row_heights))
def calculate_even_sizes(sizings_by_type, sizes, remaining_size):
size = int(remaining_size / len(sizings_by_type.get(self.EVEN, [1])))
for sizing_tuple in sizings_by_type.get(self.EVEN, []):
column_or_row, _ = sizing_tuple
sizes[column_or_row] = size
calculate_even_sizes(
column_sizings_by_type,
column_widths,
area.w - sum(column_widths))
calculate_even_sizes(
row_sizings_by_type,
row_heights,
area.h - sum(row_heights))
fill_columns = column_sizings_by_type.get(self.FILL, [])
if fill_columns:
column_widths[fill_columns[0][0]] = area.w - sum(column_widths)
fill_rows = row_sizings_by_type.get(self.FILL, [])
if fill_rows:
row_heights[fill_rows[0][0]] = area.h - sum(row_heights)
# Allocate extra width and height to columns and rows.
extra_width = max(area.w - sum(column_widths), 0)
extra_height = max(area.h - sum(row_heights), 0)
def allocate_extra_percentage(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.PERCENTAGE, []):
column_or_row, _ = sizing_tuple
amount = min(extra, 1)
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_percentage(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_percentage(row_sizings_by_type, row_heights, extra_height)
def allocate_extra_custom(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.CUSTOM, []):
column_or_row, sizing = sizing_tuple
amount = int(sizing[2](extra))
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_custom(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_custom(row_sizings_by_type, row_heights, extra_height)
def allocate_extra_even(sizings_by_type, sizes, extra):
for sizing_tuple in sizings_by_type.get(self.EVEN, []):
column_or_row, _ = sizing_tuple
amount = min(extra, 1)
sizes[column_or_row] += amount
extra -= amount
return extra
extra_width = allocate_extra_even(column_sizings_by_type, column_widths, extra_width)
extra_height = allocate_extra_even(row_sizings_by_type, row_heights, extra_height)
# Save column widths and row heights for users to access.
self.column_widths = column_widths
self.row_heights = row_heights
# Position child panels.
for rect, panel in self.panels.items():
x = area.x + sum(column_widths[:rect.x]) + rect.x * self.spacing
y = area.y + sum(row_heights[:rect.y]) + rect.y * self.spacing
width = sum(column_widths[rect.x:rect.right]) + (rect.w - 1) * self.spacing
height = sum(row_heights[rect.y:rect.bottom]) + (rect.h - 1) * self.spacing
panel.rect_outer = Panel.Rect(x, y, width, height)
class GridLayoutTest(unittest.TestCase):
def setUp(self):
from desky.gui import Gui
self.gui = Gui()
self.parent = self.gui.create(Panel)
self.parent.size = (200, 300)
self.parent.padding = (2, 3, 4, 5)
self.parent.margins = (20, 30, 40, 50)
def test_tallest_child_in_column_or_row_1(self):
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
self.assertEqual(0, grid.widest_child_in_column(2))
self.assertEqual(0, grid.tallest_child_in_row(2))
def test_tallest_child_in_column_or_row_2(self):
def create_grid():
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
return grid
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (60, 38)
grid.add(child, 2, 1)
self.assertEqual(60, grid.widest_child_in_column(2))
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (38, 60)
grid.add(child, 1, 2)
self.assertEqual(60, grid.tallest_child_in_row(2))
def test_tallest_child_in_column_or_row_3(self):
def create_grid():
grid = GridLayout(column_count=5, row_count=5, spacing=3)
for column in range(grid.column_count):
grid.set_child_column_sizing(column)
for row in range(grid.row_count):
grid.set_child_row_sizing(row)
return grid
with self.subTest("column"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (66, 38)
grid.add_rect(child, Rect(2, 1, 3, 2))
self.assertEqual(20, grid.widest_child_in_column(2))
self.assertEqual(20, grid.widest_child_in_column(3))
self.assertEqual(20, grid.widest_child_in_column(4))
with self.subTest("row"):
grid = create_grid()
child = self.gui.create(Panel)
child.size = (38, 66)
grid.add_rect(child, Rect(1, 2, 2, 3))
self.assertEqual(20, grid.tallest_child_in_row(2))
self.assertEqual(20, grid.tallest_child_in_row(3))
self.assertEqual(20, grid.tallest_child_in_row(4))
def test_area_empty(self):
scenarios = [
(Rect(2, 0, 4, 2), Rect(1, 1, 9, 9), False),
(Rect(10, 2, 4, 4), Rect(1, 1, 9, 9), True),
]
for rect_left, rect_right, empty in scenarios:
with self.subTest(rect_left=rect_left, rect_right=rect_right, empty=empty):
grid = GridLayout(column_count=20, row_count=20, spacing=5)
grid.add_rect(self.gui.create(Panel), rect_left)
self.assertEqual(empty, grid.area_empty(rect_right))
def test_single_fixed(self):
grid = GridLayout(spacing=5)
grid.set_fixed_column_sizing(0, 90)
grid.set_fixed_row_sizing(0, 120)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 71, 102), child.rect)
def test_multiple_fixed_1(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_fixed_column_sizing(0, 101)
grid.set_fixed_column_sizing(1, 58)
grid.set_fixed_row_sizing(0, 33)
grid.set_fixed_row_sizing(1, 93)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 101, 33), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(108, 3, 58, 33), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 41, 101, 93), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(108, 41, 58, 93), child_1_1.rect_outer)
def test_multiple_fixed_2(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_fixed_column_sizing(0, 101)
grid.set_fixed_column_sizing(1, 0)
grid.set_fixed_row_sizing(0, 0)
grid.set_fixed_row_sizing(1, 93)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 101, 18), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(108, 3, 19, 18), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 8, 101, 93), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(108, 8, 19, 93), child_1_1.rect_outer)
def test_single_child(self):
grid = GridLayout(spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 53, 81), child.rect)
def test_multiple_child_1(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_column_sizing(1)
grid.set_child_row_sizing(0)
grid.set_child_row_sizing(1)
def create_child(rect, size):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = size
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1), (58, 39))
child_1_0 = create_child(Rect(1, 0, 1, 1), (25, 71))
child_0_1 = create_child(Rect(0, 1, 1, 1), (61, 62))
child_1_1 = create_child(Rect(1, 1, 1, 1), (54, 20))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 80, 89), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(87, 3, 73, 89), child_1_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 97, 80, 80), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(87, 97, 73, 80), child_1_1.rect_outer)
def test_multiple_child_2(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_child_column_sizing(0)
grid.set_child_column_sizing(1)
grid.set_child_row_sizing(0)
grid.set_child_row_sizing(1)
def create_child(rect, size):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = size
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1), (58, 31))
child_0_1 = create_child(Rect(0, 1, 1, 1), (61, 31))
child_1_0 = create_child(Rect(1, 0, 1, 2), (25, 87))
grid.layout(self.parent)
self.assertEqual(Panel.Rect( 2, 3, 80, 50), child_0_0.rect_outer)
self.assertEqual(Panel.Rect( 2, 58, 80, 50), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(87, 3, 44, 105), child_1_0.rect_outer)
def test_single_percentage(self):
grid = GridLayout(spacing=5)
grid.set_percentage_column_sizing(0, 0.333)
grid.set_percentage_row_sizing(0, 0.8)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
width = int(self.parent.rect_inner.w * 0.333) - 19 + 1
height = int(self.parent.rect_inner.h * 0.8) - 18 + 1
self.assertEqual(Panel.Rect(13, 19, width, height), child.rect)
def test_multiple_percentage(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_percentage_column_sizing(0, 0.333)
grid.set_percentage_column_sizing(1, 0.333)
grid.set_percentage_row_sizing(0, 0.8139)
grid.set_percentage_row_sizing(1, 1 - 0.8139)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.333) + 1
width_1 = width_0
height_0 = int(287 * 0.8139) + 1
height_1 = int(287 * (1 - 0.8139))
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_custom(self):
def custom_sizing(area_size, remaining_size):
return area_size ** 0.5
def custom_extra(extra):
return extra / 2
grid = GridLayout(spacing=5)
grid.set_custom_column_sizing(0, custom_sizing, custom_extra)
grid.set_custom_row_sizing(0, custom_sizing, custom_extra)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
root_width = int(194 ** 0.5)
root_height = int(292 ** 0.5)
final_width = root_width + int((194 - root_width) / 2) - 19
final_height = root_height + int((292 - root_height) / 2) - 18
self.assertEqual(Panel.Rect(13, 19, final_width, final_height), child.rect)
def test_multiple_custom(self):
def custom_sizing_1(area_size, remaining_size):
return area_size ** 0.8
def custom_sizing_2(area_size, remaining_size):
return area_size - area_size ** 0.8
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_custom_column_sizing(0, custom_sizing_1, partial(min, 1))
grid.set_custom_column_sizing(1, custom_sizing_2, partial(min, 1))
grid.set_custom_row_sizing(0, custom_sizing_2, partial(min, 1))
grid.set_custom_row_sizing(1, custom_sizing_1, partial(min, 1))
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(custom_sizing_1(189, None)) + 1
width_1 = int(custom_sizing_2(189, None))
height_0 = int(custom_sizing_2(287, None)) + 1
height_1 = int(custom_sizing_1(287, None))
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_even(self):
# Since even sizing is the default we should make sure it works even
# when we don't excplicitly set the columns and rows to even sizing.
for default in (True, False):
with self.subTest(default=default):
grid = GridLayout(spacing=5)
if not default:
grid.set_even_column_sizing(0)
grid.set_even_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 175, 274), child.rect)
def test_multiple_even(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_even_column_sizing(0)
grid.set_even_column_sizing(1)
grid.set_even_row_sizing(0)
grid.set_even_row_sizing(1)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.5) + 1
width_1 = int(189 * 0.5)
height_0 = int(287 * 0.5) + 1
height_1 = int(287 * 0.5)
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def test_single_fill(self):
grid = GridLayout(spacing=5)
grid.set_fill_column_sizing(0)
grid.set_fill_row_sizing(0)
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (53, 81)
grid.add(child, 0, 0)
grid.layout(self.parent)
self.assertEqual(Panel.Rect(13, 19, 175, 274), child.rect)
def test_multiple_fill(self):
grid = GridLayout(column_count=2, row_count=2, spacing=5)
grid.set_percentage_column_sizing(0, 0.3333)
grid.set_fill_column_sizing(1)
grid.set_fill_row_sizing(0)
grid.set_fixed_row_sizing(1, 100)
def create_child(rect):
child = self.gui.create(Panel)
child.parent = self.parent
child.padding = (10, 8, 6, 4)
child.margins = (11, 16, 8, 2)
child.size = (9999, 9999)
grid.add_rect(child, rect)
return child
child_0_0 = create_child(Rect(0, 0, 1, 1))
child_0_1 = create_child(Rect(0, 1, 1, 1))
child_1_0 = create_child(Rect(1, 0, 1, 1))
child_1_1 = create_child(Rect(1, 1, 1, 1))
grid.layout(self.parent)
width_0 = int(189 * 0.3333)
width_1 = 189 - int(189 * 0.3333)
height_0 = 287 - 100
height_1 = 100
self.assertEqual(Panel.Rect(2, 3, width_0, height_0), child_0_0.rect_outer)
self.assertEqual(Panel.Rect(2, 8 + height_0, width_0, height_1), child_0_1.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 3, width_1, height_0), child_1_0.rect_outer)
self.assertEqual(Panel.Rect(7 + width_0, 8 + height_0, width_1, height_1), child_1_1.rect_outer)
def grid_example(gui):
panel = gui.create(Panel)
panel.rect = (50, 50, 500, 500)
panel.padding = (8, 16, 24, 32)
grid = GridLayout(column_count = 3, row_count = 4, spacing = 4)
for row in range(0, grid.row_count):
for column in range(0, grid.column_count):
child = gui.create(Panel)
child.parent = panel
grid.add(child, column, row)
grid.layout(panel)
def main():
from desky.gui import example
#example(grid_example)
unittest.main()
if __name__ == "__main__":
main()
| 2.515625 | 3 |
seqrepc/setup.py | ednilsonlomazi/SeqrepC | 1 | 12798804 | from distutils.core import setup, Extension
def main():
setup(name="seqrepc",
version="beta1.0",
description="SeqrepC is a module for fundamental operations related to numerical representations of genomic sequences.",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/ednilsonlomazi/SeqrepC",
license="BSD 3-Clause License",
ext_modules=[Extension("seqrepc", ["./src/seqrepc.c"])])
if __name__ == "__main__":
main()
| 1.265625 | 1 |
python_modules/dagma/dagma_tests/test_lambda_engine.py | vishvananda/dagster | 0 | 12798805 | <filename>python_modules/dagma/dagma_tests/test_lambda_engine.py<gh_stars>0
import logging
import uuid
from collections import namedtuple
import boto3
# import numpy
import pytest
import dagster.check as check
import dagster.core.types as types
from dagster import (
DependencyDefinition,
ExecutionContext,
InputDefinition,
lambda_solid,
PipelineContextDefinition,
PipelineDefinition,
ExecutionMetadata,
ResourceDefinition,
)
from dagster.core.execution import (
create_execution_plan_core,
create_environment_config,
yield_pipeline_execution_context,
)
from dagma import execute_plan, define_dagma_resource
def create_lambda_context():
return PipelineContextDefinition(
context_fn=lambda _: ExecutionContext.console_logging(log_level=logging.DEBUG),
resources={'dagma': define_dagma_resource()},
)
context_definitions = {'lambda': create_lambda_context()}
@lambda_solid
def solid_a():
return 1
@lambda_solid(inputs=[InputDefinition('arg_a')])
def solid_b(arg_a):
return arg_a * 2
@lambda_solid(inputs=[InputDefinition('arg_a')])
def solid_c(arg_a):
return arg_a * 3
@lambda_solid(inputs=[InputDefinition('arg_b'), InputDefinition('arg_c')])
def solid_d(arg_b, arg_c):
return arg_b + arg_c / 2.0
# return numpy.mean([arg_b, arg_c])
# TODO: figure out how to deal with installing packages like numpy where the target
# architecture on lambda is not the architecture running the pipeline
def define_diamond_dag_pipeline():
return PipelineDefinition(
name='actual_dag_pipeline',
context_definitions=context_definitions,
solids=[solid_a, solid_b, solid_c, solid_d],
dependencies={
'solid_b': {'arg_a': DependencyDefinition('solid_a')},
'solid_c': {'arg_a': DependencyDefinition('solid_a')},
'solid_d': {
'arg_b': DependencyDefinition('solid_b'),
'arg_c': DependencyDefinition('solid_c'),
},
},
)
def define_single_solid_pipeline():
return PipelineDefinition(
name='single_solid_pipeline',
context_definitions=context_definitions,
solids=[solid_a],
dependencies={},
)
TEST_ENVIRONMENT = {
'context': {'lambda': {'resources': {'dagma': {'config': {'aws_region_name': 'us-east-2'}}}}}
}
def run_test_pipeline(pipeline):
execution_metadata = ExecutionMetadata(run_id=str(uuid.uuid4()))
with yield_pipeline_execution_context(
pipeline, TEST_ENVIRONMENT, execution_metadata
) as context:
execution_plan = create_execution_plan_core(context)
return execute_plan(context, execution_plan)
@pytest.mark.skip('Skipping pending pickling issues in lambda engine. Issue #491')
def test_execution_diamond():
pipeline = define_diamond_dag_pipeline()
results = run_test_pipeline(pipeline)
assert len(results) == 4
@pytest.mark.skip('Skipping pending pickling issues in lambda engine. Issue #491')
def test_execution_single():
pipeline = define_single_solid_pipeline()
results = run_test_pipeline(pipeline)
assert len(results) == 1
assert results[('solid_a.transform', 'result')][0] is True
assert results[('solid_a.transform', 'result')][1].output_name == 'result'
assert results[('solid_a.transform', 'result')][1].value == 1
assert results[('solid_a.transform', 'result')][2] is None
| 1.984375 | 2 |
link/test/test_tcp_connection.py | pretty-wise/link | 0 | 12798806 | <filename>link/test/test_tcp_connection.py
import sys
import os
import subprocess
import threading
import signal
import urllib2
import json
import time
sys.path.append('/home/dashboard/codebase/link/utils')
sys.path.append('/Users/prettywise/Codebase/codebase/link/utils')
import parse
import link
def http_local_request(port, command, data):
url = "http://127.0.0.1:" + str(port) + command
content = None
print("local request: " + url)
try:
response = urllib2.urlopen(url, data)
content = response.read()
print(url + " resulted in (" + str(response.getcode()) + "): " + content)
if response.getcode() != 200:
raise Exception(url + " failed with: " + str(response.getcode()))
except:
print("404 probably")
return content
def get_command(port, command):
response = http_local_request(port, "/command-list", None)
if response == None:
return None
commands_json = json.loads(response)
for cmd in commands_json['commands']:
if command in cmd:
return cmd
return None
if __name__ == "__main__":
print("pre main")
print(sys.argv)
# main()
link_A, parser_A = link.run("A", sys.argv[1:])
link_B, parser_B = link.run("B", sys.argv[1:])
# endpoint_A_port = parser_A.FindTcpPort("tcp_connect")
# print("port for endpoint A: " + str(endpoint_A_port))
endpoint_A_rest_port = parser_A.FindRestPort()
print("rest port for endpoint A: " + str(endpoint_A_rest_port))
tcp_connect_B_port = parser_B.FindTcpPort("tcp_connect")
print("tcp_connect port on endpoint B: " + str(tcp_connect_B_port))
endpoint_B_rest_port = parser_B.FindRestPort()
print("rest port for endpoint B: " + str(endpoint_B_rest_port))
parser_A.ReadToLine("^.*(command register registration succeeded).*$")
parser_A.ReadToLine("^.*(command connect registration succeeded).*$")
parser_A.ReadToLine("^.*(command command-list registration succeeded).*$")
parser_B.ReadToLine("^.*(command register registration succeeded).*$")
parser_B.ReadToLine("^.*(command connect registration succeeded).*$")
parser_B.ReadToLine("^.*(command command-list registration succeeded).*$")
print("go!")
register_cmd_A = get_command(endpoint_A_rest_port, "/register")
if register_cmd_A:
print("A's register command: " + register_cmd_A)
connect_cmd_A = get_command(endpoint_A_rest_port, "/connect")
print("A's connect command: " + connect_cmd_A)
# register B's tcp connect in A.
name = "tcp_connect"
version = "1.0"
pid = parser_B.process.pid
json_request = "{ \"hostname\": \"127.0.0.1\", \"port\": " + str(tcp_connect_B_port) + ", \"name\": \"" + name + "\", \"version\": \"" + version + "\", \"pid\": " + str(pid) + " }"
ret = http_local_request(endpoint_A_rest_port, register_cmd_A, json_request)
print("B's tcp_connect registered in A: " + ret)
json_content = json.loads(ret)
tcp_connect_B_handle_in_A = json_content['handle']
# run connection A -> B
json_request_2 = "{\"handle\": " + str(tcp_connect_B_handle_in_A) + "}"
ret = http_local_request(endpoint_A_rest_port, connect_cmd_A, json_request_2)
print("A's tcp_connect connects with B's tcp_connect: " + ret)
parser_A.ReadToLine("^.*(sent all data, closing connection).*$")
print("all data sent")
parser_B.ReadToLine("^.*(all data received, closing connection).*$")
print("all data received")
# time.sleep(2)
parser_A.Kill()
parser_B.Kill()
parser_A.Wait();
parser_B.Wait();
link_A.join(10)
link_B.join(10)
print("post main")
| 2.65625 | 3 |
model/framework.py | LiGaoJi/DegreEmbed | 7 | 12798807 | <gh_stars>1-10
#!/usr/bin/env python
# -*-coding:utf-8 -*-
# @file : framework.py
# @brief : Framework for training, evaluating and saving models.
# @author : <NAME>
# @email : <EMAIL>
import os
from typing import List, Tuple
from tqdm import tqdm
import numpy as np
import torch
from torch import optim, nn
from torch.nn import functional as F
from dataloader import RTDataLoader
from utils import in_top_k
from utils import get_sample_prediction
class RTLoss(nn.Module):
def __init__(self,
threshold: float
) -> None:
super(RTLoss, self).__init__()
self.threshold = nn.parameter.Parameter(
torch.tensor(threshold)
)
def forward(self,
logits: torch.FloatTensor,
targets: torch.LongTensor
) -> torch.Tensor:
targ = F.one_hot(targets, logits.size(1))
# `logits`: (batch_size, num_entity)
loss = torch.maximum(logits, self.threshold)
return torch.mean(-torch.sum(torch.log(loss) * targ, dim=1), dim=0)
class RTFramework(object):
def __init__(self,
miner: nn.Module,
optimizer: optim.Optimizer,
dataloader: RTDataLoader,
loss_fn=nn.CrossEntropyLoss(),
device='cpu',
ckpt_file=None,
ckpt_save_dir=None
) -> None:
"""
Args:
`miner`: a nn.Module instance for logic rule mining
`optimizer`: an Optimizer instance
`dataloader`: Data loader
`loss_fn`: loss function
`device`: 'cpu' | 'cuda'
`ckpt_file`: path to saved model checkpoints
`ckpt_save_dir`: directory to save best model checkpoint
"""
self.loss_fn = loss_fn
self.device = device
self.ckpt_dir = ckpt_save_dir
self.miner = miner
self.optimizer = optimizer
self.dataloader = dataloader
self.start_epoch = 0
if ckpt_file:
self._load_checkpoint(ckpt_file)
def train(self,
top_k: int,
batch_size: int,
num_sample_batches: int,
epochs=20,
valid_freq: int = 1
) -> List:
'''Train `self.miner` on given training data loaded from `self.dataloader`.
Args:
`top_k`: for computing Hit@k
`batch_size`: mini batch size
`num_sample_batches`: max number of batches for one epoch
`epochs`: max training epochs in total
`valid_freq`: `self.miner` will be evaluated on validation dataset
every `valid_freq` epochs.
Returns:
Traning loss and valid accuracies.
'''
num_train = len(self.dataloader.train)
if self.dataloader.query_include_reverse:
num_train *= 2
accuracies = []
losses = []
best_cnt = 0
best_acc = 0.
for epoch in range(self.start_epoch, self.start_epoch + epochs):
self.miner.train()
print("{:=^100}".format(f"Training epoch {epoch+1}"))
running_acc = []
running_loss = 0.
num_trained = 0
for bid, (qq, hh, tt, trips) in enumerate(
self.dataloader.one_epoch("train", batch_size, num_sample_batches, True)
):
qq = torch.from_numpy(qq).to(self.device)
hh = torch.from_numpy(hh).to(self.device)
tt = torch.from_numpy(tt).to(self.device)
logits = self.miner(qq, hh, trips)
loss = self.loss_fn(logits, tt)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
bool_top_k = in_top_k(tt, logits, top_k)
running_acc += bool_top_k.tolist()
running_loss += loss.item() * qq.size(0)
num_trained += qq.size(0)
if (bid+1) % 10 == 0:
loss_val = running_loss / num_trained
acc = np.mean(running_acc)
print("loss: {:>7f}\tacc: {:>4.2f}% [{:>6}/{:>6d}]".format(
loss_val, 100 * acc, num_trained, num_train
))
loss = running_loss / num_train
acc = np.mean(running_acc) * 100
print(f"[epoch {epoch+1}] loss: {loss:>7f}, acc: {acc:>4.2f}")
losses.append(loss)
if (epoch + 1) % valid_freq == 0:
acc = self.eval("valid", batch_size, top_k)
accuracies.append(acc)
if acc > best_acc:
best_acc = acc
best_cnt = 0
print(f"Best checkpoint reached at best accuracy: {(acc * 100):.2f}%")
self._save_checkpoint("checkpoint", epoch)
else:
best_cnt += 1
# Early stopping.
if best_cnt > 2:
break
print("\n[Training finished] epochs: {} best_acc: {:.2f}".format(
epoch - self.start_epoch, best_acc
))
return losses, accuracies
def eval(self,
dataset_name: str,
batch_size: str,
top_k: int,
prediction_file=None
) -> Tuple[torch.Tensor, float]:
"""Evaluate `self.miner` on dataset specified by `dataset_name`.
Args:
`dataset_name`: "train" | "valid" | "test"
`prediction_file`: file path for output prediction results
Returns:
Miner accuracy and given samples.
"""
if prediction_file:
file_obj = open(prediction_file, 'w')
accuracies = []
self.miner.eval()
# Do not accumulate gradient along propagation chain.
with torch.no_grad():
for _, (qq, hh, tt, trips) in enumerate(tqdm(
self.dataloader.one_epoch(
dataset_name, batch_size
),
desc='Predict'
)):
qq = torch.from_numpy(qq).to(self.device)
hh = torch.from_numpy(hh).to(self.device)
tt = torch.from_numpy(tt)
logits = self.miner(qq, hh, trips).cpu()
bool_top_k = in_top_k(tt, logits, top_k)
accuracies += bool_top_k.tolist()
if prediction_file:
qq = qq.cpu().numpy()
hh = hh.cpu().numpy()
tt = tt.cpu().numpy()
for bid, (q, h, t) in enumerate(zip(qq, hh, tt)):
q_str = self.dataloader.id2rel[q]
h_str = self.dataloader.id2ent[h]
prediction = get_sample_prediction(
t, logits[bid].cpu().numpy(),
self.dataloader.id2ent
)
t_str = prediction[-1]
to_write = [q_str, h_str, t_str] + prediction
file_obj.write(",".join(to_write) + "\n")
if prediction_file:
file_obj.close()
return np.mean(accuracies)
def _load_checkpoint(self, ckpt: str) -> None:
'''Load model checkpoint.'''
if os.path.isfile(ckpt):
checkpoint = torch.load(ckpt)
print(f"Successfully loaded checkpoint '{ckpt}'")
self.miner.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.start_epoch = checkpoint['start_epoch']
else:
raise Exception(f"No checkpoint found at '{ckpt}'")
def _save_checkpoint(self,
name: str,
end_epoch: int
) -> None:
"""Save model checkpoint."""
if not self.ckpt_dir:
return
state_dict = {
'start_epoch': end_epoch,
'model': self.miner.state_dict(),
'optimizer': self.optimizer.state_dict(),
}
ckpt_file = name + ".pth.tar"
ckpt_file = os.path.join(self.ckpt_dir, ckpt_file)
torch.save(state_dict, ckpt_file)
| 2.234375 | 2 |
src/pydas_metadata/migrations/versions/f73a9aa46c77_add_isenabled_column_to_eventhandlerbase.py | bvanfleet/pydas | 0 | 12798808 | # pylint: disable=no-member,invalid-name,line-too-long,trailing-whitespace
"""Add IsEnabled column to EventHandlerBASE
Revision ID: <KEY>
Revises: 6b5369ab5224
Create Date: 2021-02-17 20:15:42.776190
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '6b5369ab5224'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('EventHandlerBASE',
sa.Column('IsEnabled', sa.Boolean, nullable=False, default=False))
def downgrade():
op.drop_column('EventHandlerBASE', 'IsEnabled')
| 1.085938 | 1 |
stacks/XIAOMATECH/1.0/services/MYSQL/package/scripts/mysql_client.py | tvorogme/dataops | 3 | 12798809 | from resource_management.libraries.script.script import Script
from resource_management.core.resources.packaging import Package
class Client(Script):
def install(self, env):
packages = ['percona-server-client']
Package(packages)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
def start(self, env):
import params
env.set_params(params)
def stop(self, env):
import params
env.set_params(params)
def status(self, env):
import params
env.set_params(params)
if __name__ == "__main__":
Client().execute()
| 1.984375 | 2 |
Vokeur/website/migrations/0036_auto_20190719_1236.py | lsdr1999/Project | 0 | 12798810 | # Generated by Django 2.2.1 on 2019-07-19 12:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('website', '0035_auto_20190625_0900'),
]
operations = [
migrations.RenameField(
model_name='verenigingen',
old_name='ontgroening',
new_name='introductietijd',
),
]
| 1.640625 | 2 |
proxy/init.py | chrissvec/py-proxy | 0 | 12798811 | #!/usr/bin/python
# Flask is used to create a somewhat lightweight listening server
from flask import Flask
from requests import get
def spawn_proxy():
myproxy = Flask('__name__')
# Quick health check override
@myproxy.route('/healthcheck', methods=['GET'])
def health():
return "OK"
# Let's not spam google if we don't get a query, and return a Bad Request.
@myproxy.route('/', methods=['GET'])
def empty():
return "Empty search string", 400
# This is a very dumb proxy, we're only doing GET.
@myproxy.route('/<path:req>', methods=['GET'])
def proxy(req):
# We're only going to google here, so let's just keep it in the proxy settings for now.
target = 'https://www.google.com/'
return get(f'{target}/search?q={req}').content
return myproxy
| 3.015625 | 3 |
Q926_Flip-String-to-Monotone-Increasing.py | xiaosean/leetcode_python | 0 | 12798812 | <filename>Q926_Flip-String-to-Monotone-Increasing.py<gh_stars>0
class Solution:
def minFlipsMonoIncr(self, S: str) -> int:
n = len(S)
min_diff = sum([a != b for a, b in zip(S, n * "1")])
dp_ = [min_diff] * (n+1)
for zero_idx in range(n):
if "0" == S[zero_idx]:
dp_[zero_idx+1] = dp_[zero_idx] - 1
else:
dp_[zero_idx+1] = dp_[zero_idx] + 1
return min(dp_) | 2.890625 | 3 |
pecli/plugins/strings.py | kirk-sayre-work/pecli | 0 | 12798813 | #! /usr/bin/env python
import pefile
import datetime
import os
import re
from pecli.plugins.base import Plugin
from pecli.lib.utils import cli_out
ASCII_BYTE = b" !\"#\$%&\'\(\)\*\+,-\./0123456789:;<=>\?@ABCDEFGHIJKLMNOPQRSTUVWXYZ\[\]\^_`abcdefghijklmnopqrstuvwxyz\{\|\}\\\~\t"
class PluginStrings(Plugin):
name = "strings"
description = "Extract strings from the PE file"
def add_arguments(self, parser):
parser.add_argument('--ascii', '-a', action="store_true", help="ASCII strings only")
parser.add_argument('--wide', '-w', action="store_true", help="Wide strings only")
parser.add_argument('-n', '--min-len', type=int, default=4, help='Print sequences of ' +
'characters that are at least min-len characters long, instead of ' +
'the default 4.')
self.parser = parser
def get_results(self, data, min_len=4, wide_only=False, ascii_only=False, cli_mode=False):
# regular expressions from flare-floss:
# https://github.com/fireeye/flare-floss/blob/master/floss/strings.py#L7-L9
re_narrow = re.compile(b'([%s]{%d,})' % (ASCII_BYTE, min_len))
re_wide = re.compile(b'((?:[%s]\x00){%d,})' % (ASCII_BYTE, min_len))
strings = []
# print ascii strings unless we only want wide strings
if not wide_only:
for match in re_narrow.finditer(data):
s = match.group().decode('ascii')
strings.append(s)
cli_out(s, cli_mode)
# print wide strings unless we only want ascii strings
if not ascii_only:
for match in re_wide.finditer(data):
try:
s = match.group().decode('utf-16')
cli_out(s, cli_mode)
strings.append(s)
except UnicodeDecodeError:
pass
return {"strings": strings}
def run_cli(self, args, pe, data):
if args.ascii and args.wide:
print("to print both ascii and wide strings, omit both")
else:
self.get_results(data, args.min_len, args.wide, args.ascii, cli_mode=True)
| 2.453125 | 2 |
polyglot/utils.py | UniversalDevicesInc/Polyglot | 27 | 12798814 | <filename>polyglot/utils.py
""" Generic utilities used by Polyglot. """
# pylint: disable=import-error, unused-import, invalid-name, undefined-variable
# flake8: noqa
import sys
import threading
# Uniform Queue and Empty locations b/w Python 2 and 3
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
# Unform ProcessLookupError b/w Python 2 and 3
if sys.version_info[0] == 2:
MyProcessLookupError = OSError
else:
MyProcessLookupError = ProcessLookupError
class AsyncFileReader(threading.Thread):
'''
Helper class to implement asynchronous reading of a file
in a separate thread. Pushes read lines on a queue to
be consumed in another thread.
Source:
http://stefaanlippens.net/python-asynchronous-subprocess-pipe-reading
'''
def __init__(self, fd, handler):
assert callable(handler)
assert callable(fd.readline)
threading.Thread.__init__(self)
self.daemon = True
self._fd = fd
self._handler = handler
def run(self):
'''The body of the thread: read lines and put them on the queue.'''
for line in iter(self._fd.readline, ''):
self._handler(line.replace('\n', ''))
class LockQueue(Queue):
""" Python queue with a locking utility """
def __init__(self, *args, **kwargs):
Queue.__init__(self, *args, **kwargs)
# Queue is old style class in Python 2.x
self.locked = True
def put(self, *args, **kwargs):
""" Put item into queue """
if not self.locked:
Queue.put(self, *args, **kwargs)
def put_nowait(self, *args, **kwargs):
""" Put item into queue without waiting """
if not self.locked:
Queue.put_nowait(self, *args, **kwargs)
| 1.984375 | 2 |
connectortest.py | waysys/BCGen | 0 | 12798815 | <filename>connectortest.py
# -------------------------------------------------------------------------------
#
# Copyright (c) 2021 Waysys LLC
#
# -------------------------------------------------------------------------------
# -------------------------------------------------------------------------------
__author__ = '<NAME>'
__version__ = "2021-09-02"
"""
This module tests the Connector class.
"""
import unittest
from datetime import datetime
import xmlrunner
from base.connector import Connector
from configuration.config import ConnectorTestConfiguration
from queries.policycenterqueries import PolicyCenterQueries
# -------------------------------------------------------------------------------
# Test of the Connector class
# -------------------------------------------------------------------------------
class TestConnector(unittest.TestCase):
"""
Test the ability to connect to the DataHub database.
"""
# -------------------------------------------------------------------------------
# Class Variables
# -------------------------------------------------------------------------------
configuration = ConnectorTestConfiguration()
# -------------------------------------------------------------------------------
# Support Methods
# -------------------------------------------------------------------------------
def setUp(self):
"""
Initialize the connector.
"""
self.cnx = Connector.create_connector(self.configuration.data_source)
return
def tearDown(self):
self.cnx.close()
return
# -------------------------------------------------------------------------------
# Tests
# -------------------------------------------------------------------------------
def test_02_database_connection(self):
"""
Test that the connector actually connects to the database.
"""
cursor = self.cnx.cursor()
self.assertEqual(-1, cursor.rowcount, "Cursor did not return -1")
return
def test_03_database_connection_with_odbc_datasource(self):
"""
Test connection through ODB data source. Perform a query.
"""
pc_queries = PolicyCenterQueries(self.cnx)
selection_start = datetime(2021, 9, 1)
selection_end = datetime(2021, 9, 3)
results = pc_queries.query_accounts(selection_start, selection_end)
count = len(results)
self.assertTrue(count > 0, "Accounts were not found: " + str(count))
return
if __name__ == '__main__':
report_file = TestConnector.configuration.report_file
with open(report_file, 'w') as output:
unittest.main(
testRunner=xmlrunner.XMLTestRunner(output=output),
failfast=False, buffer=False, catchbreak=False)
| 1.726563 | 2 |
j2fa/forms.py | kajala/django-j2fa | 0 | 12798816 | <filename>j2fa/forms.py
from django import forms
from django.utils.translation import gettext_lazy as _
class TwoFactorForm(forms.Form):
code = forms.CharField(label=_("two.factor.code.label"), max_length=8, min_length=1)
| 1.921875 | 2 |
components/fileLoader.py | WangCHEN9/fsPreprocess | 0 | 12798817 | <gh_stars>0
from abc import abstractmethod, ABC
from pathlib import Path
import logging
import streamlit as st
import librosa
import pandas as pd
import numpy as np
from fs_preprocess.tdmsReader import tdmsReader
class fileLoader(ABC):
"""fileLoader meta class"""
def __init__(self, multiple_files: bool = False):
self.multiple_files = multiple_files
self.y = None
self.sr = None
self.get_uploaded()
@abstractmethod
def get_uploaded(self):
pass
class wavFileLoader(fileLoader):
def __init__(self):
super().__init__()
self.suffix = ".wav"
def get_uploaded(self):
logging.info("Refreshing uploaded files...")
self.wav_file = st.file_uploader(
"Choose a wav file", type=["wav"], accept_multiple_files=self.multiple_files
)
if self.wav_file:
self.title = self.wav_file.name
self._load_audio()
self.cut_audio_in_second(left_cut=0.5, right_cut=0.5)
self._show_audio(self.wav_file)
def cut_audio_in_second(self, left_cut: float, right_cut: float) -> None:
num_of_points_to_drop_left = int(left_cut * self.sr)
num_of_points_to_drop_right = int(right_cut * self.sr)
self.y = self.y[num_of_points_to_drop_left:-num_of_points_to_drop_right]
def _load_audio(self, offset: float = None) -> None:
"""get y and sr from wav file, and save them into object related property
during the load, sr are set to 22050 according to librosa default setting.
:param offset: [time for start to read, in the audio file. in second ], defaults to None
:type offset: float, optional
"""
self.y, self.sr = librosa.load(
self.wav_file, sr=self.sr, offset=offset, mono=True
)
def _show_audio(self, wav_file):
st.audio(wav_file, format="audio/wav")
class tdmsFileLoader(fileLoader):
def __init__(self):
super().__init__()
def get_uploaded(self):
logging.info("Refreshing uploaded files...")
self.tdms_file = st.file_uploader(
"Choose a tdms file",
type=["tdms"],
accept_multiple_files=self.multiple_files,
)
if self.tdms_file:
self._load_tdms()
self._selector_for_measures()
self.title = f"{Path(self.tdms_file.name).stem}_{self.selected_measure}"
def _load_tdms(self):
obj = tdmsReader(self.tdms_file)
measures = obj.prepare_output_per_tdms()
# * measures is List[dict], dict keys = ['direction', 'load', 'y','sr']
self.df = pd.DataFrame(data=measures, index=range(len(measures)))
def add_select_options(row):
return f"{row.name}_{row.direction}_{row.load}"
self.df["select_options"] = self.df.apply(add_select_options, axis=1)
def _selector_for_measures(self):
self.selected_measure = st.selectbox(
label=f"select measure for preprocess",
options=self.df["select_options"],
key="seleced_tdms_measure",
)
df_row_selected = self.df[self.df["select_options"] == self.selected_measure]
logging.info(f"{self.selected_measure} selected !")
self.y = df_row_selected.y.to_list()[0]
self.sr = df_row_selected.sr.to_list()[0]
| 2.859375 | 3 |
problem_20.py | mc10/project-euler | 0 | 12798818 | '''
Problem 20
@author: <NAME>
'''
def sum_of_digits(number):
'''Sum the digits of a number by taking the last digit and continually
dividing by 10.'''
digit_sum = 0
while number:
digit_sum += number % 10
number //= 10
return digit_sum
def factorial(number):
factorial = 1
cur_num = 2
while cur_num <= number:
factorial *= cur_num
cur_num += 1
return factorial
print(sum_of_digits(factorial(100)))
| 4.0625 | 4 |
oauth/oauth.py | hasibulkabir/Google | 15 | 12798819 | <reponame>hasibulkabir/Google
# Copyright (c) 2017 The TelegramGoogleBot Authors (see AUTHORS)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from oauth2client.client import OAuth2WebServerFlow, FlowExchangeError
from oauth2client.file import Storage
import os
from config import *
SCOPES = ['https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/drive',
'profile']
flow = OAuth2WebServerFlow(client_id=GOOGLE_OAUTH_CLIENT_ID,
client_secret=GOOGLE_OAUTH_CLIENT_SECRET,
scope=SCOPES,
redirect_uri=GOOGLE_OAUTH_REDIRECT_URI,
access_type='offline',
prompt='consent'
)
def get_url():
return flow.step1_get_authorize_url()
def save(usr, code):
try:
credentials = flow.step2_exchange(code)
except FlowExchangeError:
return False
os.chdir(os.path.dirname(os.path.realpath(__file__)).replace('/oauth', '') + '/oauth/credentials')
file_name = '{id}.json'.format(id=usr.id)
open(file_name, 'a').close()
storage = Storage(file_name)
storage.put(credentials)
return True
| 1.851563 | 2 |
ex_mn/from_indiv/topo1.py | Oeufhp/seniorProject-SDN | 3 | 12798820 | <gh_stars>1-10
from mininet.topo import Topo
class My_Topo(Topo):
def __init__(self):
"Create P2P topology."
# Initialize topology
Topo.__init__(self)
# Add hosts and switches
H1 = self.addHost('h1',ip='10.0.0.1')
H2 = self.addHost('h2',ip='10.0.0.2')
H3 = self.addHost('h3',ip='10.0.0.3')
H4 = self.addHost('h4',ip='10.0.0.4')
S1 = self.addSwitch('s1')
S2 = self.addSwitch('s2')
# S3 = self.addSwitch('s3')
# S4 = self.addSwitch('s4')
# c0 = self.addController('c0')
# Add links
self.addLink(H1, S1)
self.addLink(H2, S1)
self.addLink(S1, S2)
self.addLink(S2, H3)
self.addLink(S2, H4)
topos = {
'myTopo': (lambda: My_Topo())
}
| 2.828125 | 3 |
api/management/commands/set_in_search_init.py | IFRCGo/ifrcgo-api | 11 | 12798821 | from django.core.management.base import BaseCommand
from api.models import Country
from django.db import transaction
from django.db.models import Q
from api.logger import logger
class Command(BaseCommand):
help = 'Update Countries initially to set/revoke their in_search field (probably one-time run only)'
@transaction.atomic
def handle(self, *args, **options):
try:
# Update countries which should appear in search
inc_c = Country.objects.filter(independent=True, is_deprecated=False, record_type=1).update(in_search=True)
# Update countries which should NOT appear in search
# independent can be null too thus why negated check
exc_c = Country.objects.filter(~Q(independent=True) | Q(is_deprecated=True) | ~Q(record_type=1)).update(in_search=False)
logger.info('Successfully set in_search for Countries')
except Exception as ex:
logger.error(f'Failed to set in_search for Countries. Error: {str(ex)}')
| 2.09375 | 2 |
code/python/10.regular-expression-matching.py | ANYALGO/ANYALGO | 1 | 12798822 | class Solution:
def isMatch(self, s: str, p: str) -> bool:
if not p:
return not s
if s == p:
return True
if len(p) > 1 and p[1] == "*":
if s and (s[0] == p[0] or p[0] == "."):
return self.isMatch(s, p[2:]) or self.isMatch(s[1:], p)
else:
return self.isMatch(s, p[2:])
elif s and p and (s[0] == p[0] or p[0] == "."):
return self.isMatch(s[1:], p[1:])
return False
| 3.609375 | 4 |
bot/cogs/lock.py | connor-ford/random-discord-bot | 0 | 12798823 | <gh_stars>0
import logging
from discord.errors import NotFound
from discord.ext import commands
from discord_slash import cog_ext
from discord_slash.model import SlashCommandOptionType
from discord_slash.utils.manage_commands import create_choice, create_option
from discord import VoiceState, Member, VoiceChannel, ChannelType
from data.lock_manager import lock_manager
logger = logging.getLogger(__name__)
class LockCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_voice_state_update(
self, member: Member, before: VoiceState, after: VoiceState
):
updates = lock_manager.check(member, after)
for update in updates:
if update[0] == "mute":
await member.edit(
mute=update[1],
reason=f"Locked as {'muted' if update[1] else 'unmuted'}.",
)
elif update[0] == "deafen":
await member.edit(
deafen=update[1],
reason=f"Locked as {'deafened' if update[1] else 'undeafened'}.",
)
elif update[0] == "channel":
channel = await self.bot.fetch_channel(update[1])
await member.edit(
voice_channel=channel,
reason=f"Locked in channel {channel} (ID: {channel.id}).",
)
@cog_ext.cog_subcommand(
name="mute",
description="Lock the specified user as muted or unmuted. This command can only be run in servers.",
base="lock",
options=[
create_option(
name="user",
description="The user to lock.",
option_type=SlashCommandOptionType.USER,
required=True,
),
create_option(
name="muted",
description="Determines if the user should be locked as muted or unmuted.",
option_type=SlashCommandOptionType.BOOLEAN,
required=True,
),
],
)
async def _lock_mute(self, ctx, user: Member, muted: bool):
if not ctx.guild:
await ctx.send("This command can only be run in servers.", hidden=True)
return
lock_manager.add(ctx, user.id, lock_type="mute", lock_value=muted)
await ctx.send(
f"User `{user}` locked as {'muted' if muted else 'unmuted'}.",
hidden=True,
)
@cog_ext.cog_subcommand(
name="deafen",
description="Lock the specified user as deafened or undeafened. This command can only be run in servers.",
base="lock",
options=[
create_option(
name="user",
description="The user to lock.",
option_type=SlashCommandOptionType.USER,
required=True,
),
create_option(
name="deafened",
description="Determines if the user should be locked as deafened or undeafened.",
option_type=SlashCommandOptionType.BOOLEAN,
required=True,
),
],
)
async def _lock_deafen(self, ctx, user: Member, deafened: bool):
if not ctx.guild:
await ctx.send("This command can only be run in servers.", hidden=True)
return
lock_manager.add(ctx, user.id, lock_type="deafen", lock_value=deafened)
await ctx.send(
f"User `{user}` locked as {'deafened' if deafened else 'undeafened'}.",
hidden=True,
)
@cog_ext.cog_subcommand(
name="channel",
description="Lock the specified user in a specified voice channel. This command can only be run in servers.",
base="lock",
options=[
create_option(
name="user",
description="The user to lock.",
option_type=SlashCommandOptionType.USER,
required=True,
),
create_option(
name="channel",
description="The voice channel to lock the user in.",
option_type=SlashCommandOptionType.CHANNEL,
required=True,
),
],
)
async def _lock_channel(self, ctx, user: Member, channel: VoiceChannel):
if not ctx.guild:
await ctx.send("This command can only be run in servers.", hidden=True)
return
if channel.type != ChannelType.voice:
await ctx.send("The selected channel must be a voice channel.", hidden=True)
return
lock_manager.add(ctx, user.id, lock_type="channel", lock_value=str(channel.id))
await ctx.send(
f"User `{user}` locked in `{channel}` (ID: {channel.id}).",
hidden=True,
)
@cog_ext.cog_subcommand(
base="lock",
name="list",
description="Lists all locked users. This command can only be run in servers.",
)
async def _list_locks(self, ctx):
if not ctx.guild:
await ctx.send("This command can only be run in servers.", hidden=True)
return
users = lock_manager.list(str(ctx.guild.id))
if not users:
await ctx.send("There are no locked users in this server.")
return
message = (
f"Listing {len(users)} locked user{'s' if len(users) > 1 else ''}:\n```"
)
for user_id, locks in users.items():
try:
user = await self.bot.fetch_user(user_id)
except NotFound as e:
logger.info(f"User {user_id} not found.")
message += f"{user}\n"
for lock_type, lock_value in locks.items():
if lock_type == "mute":
message += f" - {'Muted' if lock_value else 'Unmuted'}\n"
elif lock_type == "deafen":
message += f" - {'Deafened' if lock_value else 'Undeafened'}\n"
elif lock_type == "channel":
channel = await self.bot.fetch_channel(lock_value)
message += f" - Channel: {channel} (ID: {channel.id})\n"
message += "```"
await ctx.send(message)
@cog_ext.cog_subcommand(
base="lock",
name="remove",
description="Unlock the specified user. This command can only be run in servers.",
options=[
create_option(
name="user",
description="The user to unlock.",
option_type=SlashCommandOptionType.USER,
required=True,
),
create_option(
name="lock_type",
description="The type of lock to remove from the user (defaults to All)",
option_type=SlashCommandOptionType.STRING,
required=False,
choices=[
create_choice(
name="Mute",
value="mute",
),
create_choice(
name="Deafen",
value="deafen",
),
create_choice(
name="Channel",
value="channel"
),
create_choice(
name="All",
value="all"
)
]
)
],
)
async def _unlock(self, ctx, user: Member, lock_type: str = "all"):
if not ctx.guild:
await ctx.send("This command can only be run in servers.")
return
lock_manager.remove(ctx, user.id, lock_type)
await ctx.send(
f"Removed {lock_type} lock{'s' if lock_type == 'all' else ''} from {user}.",
hidden=True,
)
def setup(bot):
bot.add_cog(LockCog(bot))
| 2.375 | 2 |
setup.py | jlnerd/JLpy_Utilities | 0 | 12798824 | <reponame>jlnerd/JLpy_Utilities<gh_stars>0
import setuptools
from distutils.version import LooseVersion
from pathlib import Path
import os
import re
import codecs
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def get_requirements(requirements='requirements.txt'):
"""Get the list of requirements from the pip `requirements` file.
Args:
requirements (str): path to the pip requirements file.
Examples:
['django==1.5.1', 'mezzanine==1.4.6']
Returns:
List[str]: the list of requirements
"""
# pip v1.5 started requiring the 'session'
# pip v10.0.0 moved these wonderful methods to ._internal. Boo!
from pip import __version__ as pip_version
if LooseVersion(pip_version) < LooseVersion('1.5'):
from pip.req import parse_requirements
do_parse = lambda path: parse_requirements(path) # noqa: E731
elif LooseVersion(pip_version) < LooseVersion('10.0.0'):
from pip.req import parse_requirements
from pip.download import PipSession
do_parse = lambda path: parse_requirements(path, session=PipSession()) # noqa: E731
else:
# We're in the bold new future of using internals... yeah
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
do_parse = lambda path: parse_requirements(path, session=PipSession()) # noqa: E731
# Cool trick to automatically pull in install_requires values directly from
# your requirements.txt file but you need to have pip module available
install_reqs = do_parse(os.path.join(here, requirements))
return [str(ir.req) for ir in install_reqs]
def _determine_requirements_txt_location():
this_dir = Path(__file__).parent
if Path(this_dir, 'requirements.txt').exists():
return 'requirements.txt'
elif Path(this_dir, 'pyDSlib.egg-info', 'requires.txt').exists():
return 'pyDSlib.egg-info/requires.txt'
else:
raise FileExistsError('Unable to find a requirements.txt file')
#define directories to exclude from setup
exclude_dirs = ['ez_setup', 'examples', 'tests', 'venv']
# fetch long description from readme
with open("README.md", "r") as fh:
README = fh.read()
#fetch scripts
try:
scripts = ['scripts/%s' % f for f in os.listdir('scripts') if f != "dummy.py"]
except OSError:
scripts = []
setuptools.setup(
name = 'pyDSlib',
version= find_version('pyDSlib', '__init__.py'),
author="<NAME>",
author_email="<EMAIL>",
description='General utilities to streamline data science and machine learning routines in python',
long_description= README,
long_description_content_type="text/markdown",
url="https://github.com/jlnerd/pyDSlib.git",
packages= setuptools.find_packages(exclude=exclude_dirs),
include_package_data=True,
scripts=scripts,
setup_requires=["pep8", "setuptools>=30"],
dependency_links=[],
#test_suite='ci_scripts.run_nose.run',
#tests_require=['nose>=1.3.7', 'coverage'],
zip_safe=False,
install_requires=get_requirements(_determine_requirements_txt_location()),
entry_points={},
)
| 2.3125 | 2 |
tos/templatetags/tos_tags.py | SocialGouv/ecollecte | 0 | 12798825 | from django import template
from tos.models import CGUItem
register = template.Library()
@register.simple_tag
def get_cgu_items():
return CGUItem.objects.filter(deleted_at__isnull=True)
| 1.71875 | 2 |
egs/sawtooth-detection-baseline/v2/sawtooth_detection.py | dev0x13/globus-plasma | 0 | 12798826 | <gh_stars>0
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
#####################
# SCRIPT PARAMETERS #
#####################
stage = 0
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_dir, "../../../tools"))
from base import get_globus_version
pyglobus_root = os.path.join(current_dir, "../../..", "_stage-%s" % get_globus_version(), "python")
sys.path.append(pyglobus_root)
try:
import pyglobus
except ImportError as e:
print("Cannot import pyglobus from %s, exiting" % pyglobus_root)
sys.exit(1)
output_dir = os.path.join(current_dir, "output", "sawtooth_detection")
########################
# ALGORITHM PARAMETERS #
########################
SHT_FILE = "../../../data/test/sht-reader/sht38515.sht"
SIGNAL_SAMPLING_RATE = int(1e6)
HIGH_PASS_CUTOFF = 400
SMOOTHED_DD1_ORDER = 30
LOW_PASS_CUTOFF = 5000
SAWTOOTH_DETECTION_THRESHOLD = 0.0005
ROI_DETECTOR_MEAN_SCALE = 1
NUM_SIGNAL_FROM_SHT = 26
####################
# HELPER FUNCTIONS #
####################
# Plotting sample_data and saving it to PNG file
def plot(x, y, label_x, label_y, color="k", new_fig=True, flush=True):
global stage
if new_fig:
plt.figure(figsize=(15, 10))
plt.plot(x, y, color)
plt.xlabel(label_x, fontsize=25)
plt.ylabel(label_y, fontsize=25)
if flush:
out = os.path.join(output_dir, "#%i.png" % stage)
plt.savefig(out)
print("Stage %i result:" % stage, out)
stage += 1
if __name__ == "__main__":
font = {"size": 22}
plt.rc("font", **font)
os.makedirs(output_dir, exist_ok=True)
print("Stage %i: Data loading" % stage)
sht_reader = pyglobus.util.ShtReader(SHT_FILE)
signal = sht_reader.get_signal(NUM_SIGNAL_FROM_SHT)
data = np.array((signal.get_data_x(), signal.get_data_y()))
print("Loaded %s" % SHT_FILE)
plot(data[0], data[1], "Время, с", "U, В")
print("Stage %i: ROI extracting" % stage)
roi = pyglobus.sawtooth.get_signal_roi(data[1], mean_scale=1)
x = np.copy(data[0, roi[0]:roi[1]])
y = np.copy(data[1, roi[0]:roi[1]])
plot(x, y, "Время, с", "U, В")
print("Stage %i: High pass filtering" % stage)
pyglobus.dsp.high_pass_filter(y, HIGH_PASS_CUTOFF, SIGNAL_SAMPLING_RATE)
plot(x, y, "Время, с", "U, В")
print("Stage %i: Smoothed differentiation" % stage)
y = pyglobus.dsp.first_order_diff_filter(y, SMOOTHED_DD1_ORDER)
plot(x, y, "Время, с", "U', В/с")
print("Stage %i: Taking absolute value" % stage)
y = np.abs(y)
plot(x, y, "Время, с", "|U'|, В/с")
print("Stage %i: Low pass filtering" % stage)
pyglobus.dsp.low_pass_filter(y, LOW_PASS_CUTOFF, SIGNAL_SAMPLING_RATE)
plot(x, y, "Время, с", "|U'|, В/с", flush=False)
plot(x, [SAWTOOTH_DETECTION_THRESHOLD] * len(x), "Время, с", "|U'|, В/с", color="r", new_fig=False)
print("Stage %i: Sawtooth detection" % stage)
start_ind, end_ind = pyglobus.sawtooth.get_sawtooth_indexes(y, SAWTOOTH_DETECTION_THRESHOLD)
plt.figure(figsize=(15, 10))
plt.axvline(x[start_ind], color="r")
plt.axvline(x[end_ind], color="r")
plot(data[0], data[1], "Время, с", "U, В", new_fig=False)
print("Done!")
| 2.09375 | 2 |
py/test/pytests/brightness/brightness.py | arccode/factory | 3 | 12798827 | # Copyright 2016 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This is a factory test to check the brightness of LCD backlight or LEDs."""
from cros.factory.device import device_utils
from cros.factory.test.i18n import arg_utils as i18n_arg_utils
from cros.factory.test import test_case
from cros.factory.test import test_ui
from cros.factory.utils.arg_utils import Arg
class BrightnessTest(test_case.TestCase):
ARGS = [
i18n_arg_utils.I18nArg('msg', 'Message HTML'),
Arg('timeout_secs', int, 'Timeout value for the test in seconds.',
default=10),
Arg('levels', list, 'A sequence of brightness levels.'),
Arg('interval_secs', (int, float),
'Time for each brightness level in seconds.')
]
def setUp(self):
self.dut = device_utils.CreateDUTInterface()
self.ui.ToggleTemplateClass('font-large', True)
self.ui.BindStandardKeys()
self.ui.SetState([self.args.msg, test_ui.PASS_FAIL_KEY_LABEL])
def runTest(self):
"""Starts an infinite loop to change brightness."""
self.ui.StartFailingCountdownTimer(self.args.timeout_secs)
while True:
for level in self.args.levels:
self._SetBrightnessLevel(level)
self.Sleep(self.args.interval_secs)
def _SetBrightnessLevel(self, level):
raise NotImplementedError
| 2.40625 | 2 |
finat/finiteelementbase.py | connorjward/FInAT | 14 | 12798828 | <filename>finat/finiteelementbase.py
from abc import ABCMeta, abstractproperty, abstractmethod
from itertools import chain
import numpy
import gem
from gem.interpreter import evaluate
from gem.optimise import delta_elimination, sum_factorise, traverse_product
from gem.utils import cached_property
from finat.quadrature import make_quadrature
class FiniteElementBase(metaclass=ABCMeta):
@abstractproperty
def cell(self):
'''The reference cell on which the element is defined.'''
@abstractproperty
def degree(self):
'''The degree of the embedding polynomial space.
In the tensor case this is a tuple.
'''
@abstractproperty
def formdegree(self):
'''Degree of the associated form (FEEC)'''
@abstractmethod
def entity_dofs(self):
'''Return the map of topological entities to degrees of
freedom for the finite element.'''
@property
def entity_permutations(self):
'''Returns a nested dictionary that gives, for each dimension,
for each entity, and for each possible entity orientation, the
DoF permutation array that maps the entity local DoF ordering
to the canonical global DoF ordering.
The entity permutations `dict` for the degree 4 Lagrange finite
element on the interval, for instance, is given by:
.. code-block:: python3
{0: {0: {0: [0]},
1: {0: [0]}},
1: {0: {0: [0, 1, 2],
1: [2, 1, 0]}}}
Note that there are two entities on dimension ``0`` (vertices),
each of which has only one possible orientation, while there is
a single entity on dimension ``1`` (interval), which has two
possible orientations representing non-reflected and reflected
intervals.
'''
raise NotImplementedError(f"entity_permutations not yet implemented for {type(self)}")
@cached_property
def _entity_closure_dofs(self):
# Compute the nodes on the closure of each sub_entity.
entity_dofs = self.entity_dofs()
return {dim: {e: sorted(chain(*[entity_dofs[d][se]
for d, se in sub_entities]))
for e, sub_entities in entities.items()}
for dim, entities in self.cell.sub_entities.items()}
def entity_closure_dofs(self):
'''Return the map of topological entities to degrees of
freedom on the closure of those entities for the finite
element.'''
return self._entity_closure_dofs
@cached_property
def _entity_support_dofs(self):
esd = {}
for entity_dim in self.cell.sub_entities.keys():
beta = self.get_indices()
zeta = self.get_value_indices()
entity_cell = self.cell.construct_subelement(entity_dim)
quad = make_quadrature(entity_cell, (2*numpy.array(self.degree)).tolist())
eps = 1.e-8 # Is this a safe value?
result = {}
for f in self.entity_dofs()[entity_dim].keys():
# Tabulate basis functions on the facet
vals, = self.basis_evaluation(0, quad.point_set, entity=(entity_dim, f)).values()
# Integrate the square of the basis functions on the facet.
ints = gem.IndexSum(
gem.Product(gem.IndexSum(gem.Product(gem.Indexed(vals, beta + zeta),
gem.Indexed(vals, beta + zeta)), zeta),
quad.weight_expression),
quad.point_set.indices
)
evaluation, = evaluate([gem.ComponentTensor(ints, beta)])
ints = evaluation.arr.flatten()
assert evaluation.fids == ()
result[f] = [dof for dof, i in enumerate(ints) if i > eps]
esd[entity_dim] = result
return esd
def entity_support_dofs(self):
'''Return the map of topological entities to degrees of
freedom that have non-zero support on those entities for the
finite element.'''
return self._entity_support_dofs
@abstractmethod
def space_dimension(self):
'''Return the dimension of the finite element space.'''
@abstractproperty
def index_shape(self):
'''A tuple indicating the number of degrees of freedom in the
element. For example a scalar quadratic Lagrange element on a triangle
would return (6,) while a vector valued version of the same element
would return (6, 2)'''
@abstractproperty
def value_shape(self):
'''A tuple indicating the shape of the element.'''
@property
def fiat_equivalent(self):
'''The FIAT element equivalent to this FInAT element.'''
raise NotImplementedError(
f"Cannot make equivalent FIAT element for {type(self).__name__}"
)
def get_indices(self):
'''A tuple of GEM :class:`Index` of the correct extents to loop over
the basis functions of this element.'''
return tuple(gem.Index(extent=d) for d in self.index_shape)
def get_value_indices(self):
'''A tuple of GEM :class:`~gem.Index` of the correct extents to loop over
the value shape of this element.'''
return tuple(gem.Index(extent=d) for d in self.value_shape)
@abstractmethod
def basis_evaluation(self, order, ps, entity=None, coordinate_mapping=None):
'''Return code for evaluating the element at known points on the
reference element.
:param order: return derivatives up to this order.
:param ps: the point set object.
:param entity: the cell entity on which to tabulate.
:param coordinate_mapping: a
:class:`~.physically_mapped.PhysicalGeometry` object that
provides physical geometry callbacks (may be None).
'''
@abstractmethod
def point_evaluation(self, order, refcoords, entity=None):
'''Return code for evaluating the element at an arbitrary points on
the reference element.
:param order: return derivatives up to this order.
:param refcoords: GEM expression representing the coordinates
on the reference entity. Its shape must be
a vector with the correct dimension, its
free indices are arbitrary.
:param entity: the cell entity on which to tabulate.
'''
@property
def dual_basis(self):
'''Return a dual evaluation gem weight tensor Q and point set x to dual
evaluate a function fn at.
The general dual evaluation is then Q * fn(x) (the contraction of Q
with fn(x) along the the indices of x and any shape introduced by fn).
If the dual weights are scalar then Q, for a general scalar FIAT
element, is a matrix with dimensions
.. code-block:: text
(num_nodes, num_points)
If the dual weights are tensor valued then Q, for a general tensor
valued FIAT element, is a tensor with dimensions
.. code-block:: text
(num_nodes, num_points, dual_weight_shape[0], ..., dual_weight_shape[n])
If the dual basis is of a tensor product or FlattenedDimensions element
with N factors then Q in general is a tensor with dimensions
.. code-block:: text
(num_nodes_factor1, ..., num_nodes_factorN,
num_points_factor1, ..., num_points_factorN,
dual_weight_shape[0], ..., dual_weight_shape[n])
where num_points_factorX are made free indices that match the free
indices of x (which is now a TensorPointSet).
If the dual basis is of a tensor finite element with some shape
(S1, S2, ..., Sn) then the tensor element tQ is constructed from the
base element's Q by taking the outer product with appropriately sized
identity matrices:
.. code-block:: text
tQ = Q ⊗ 𝟙ₛ₁ ⊗ 𝟙ₛ₂ ⊗ ... ⊗ 𝟙ₛₙ
.. note::
When Q is returned, the contraction indices of the point set are
already free indices rather than being left in its shape (as either
``num_points`` or ``num_points_factorX``). This is to avoid index
labelling confusion when performing the dual evaluation
contraction.
.. note::
FIAT element dual bases are built from their ``Functional.pt_dict``
properties. Therefore any FIAT dual bases with derivative nodes
represented via a ``Functional.deriv_dict`` property does not
currently have a FInAT dual basis.
'''
raise NotImplementedError(
f"Dual basis not defined for element {type(self).__name__}"
)
def dual_evaluation(self, fn):
'''Get a GEM expression for performing the dual basis evaluation at
the nodes of the reference element. Currently only works for flat
elements: tensor elements are implemented in
:class:`TensorFiniteElement`.
:param fn: Callable representing the function to dual evaluate.
Callable should take in an :class:`AbstractPointSet` and
return a GEM expression for evaluation of the function at
those points.
:returns: A tuple ``(dual_evaluation_gem_expression, basis_indices)``
where the given ``basis_indices`` are those needed to form a
return expression for the code which is compiled from
``dual_evaluation_gem_expression`` (alongside any argument
multiindices already encoded within ``fn``)
'''
Q, x = self.dual_basis
expr = fn(x)
# Apply targeted sum factorisation and delta elimination to
# the expression
sum_indices, factors = delta_elimination(*traverse_product(expr))
expr = sum_factorise(sum_indices, factors)
# NOTE: any shape indices in the expression are because the
# expression is tensor valued.
assert expr.shape == Q.shape[len(Q.shape)-len(expr.shape):]
shape_indices = gem.indices(len(expr.shape))
basis_indices = gem.indices(len(Q.shape) - len(expr.shape))
Qi = Q[basis_indices + shape_indices]
expri = expr[shape_indices]
evaluation = gem.IndexSum(Qi * expri, x.indices + shape_indices)
# Now we want to factorise over the new contraction with x,
# ignoring any shape indices to avoid hitting the sum-
# factorisation index limit (this is a bit of a hack).
# Really need to do a more targeted job here.
evaluation = gem.optimise.contraction(evaluation, shape_indices)
return evaluation, basis_indices
@abstractproperty
def mapping(self):
'''Appropriate mapping from the reference cell to a physical cell for
all basis functions of the finite element.'''
def entity_support_dofs(elem, entity_dim):
'''Return the map of entity id to the degrees of freedom for which
the corresponding basis functions take non-zero values.
:arg elem: FInAT finite element
:arg entity_dim: Dimension of the cell subentity.
'''
return elem.entity_support_dofs()[entity_dim]
| 2.234375 | 2 |
simulation/experiments/simple/sa_strategy.py | smartarch/recodex-dataset | 0 | 12798829 | <reponame>smartarch/recodex-dataset
from interfaces import AbstractSelfAdaptingStrategy
class SimpleSelfAdaptingStrategy(AbstractSelfAdaptingStrategy):
"""Represents a simple SA controller for activation/deactivation of queues based on current workload.
Activates suspended worker queues when the system gets staturated, deactivates queues that are idle.
"""
def init(self, ts, dispatcher, workers):
# At the beginning, make only the first worker active
for worker in workers:
worker.set_attribute("active", False)
workers[0].set_attribute("active", True)
def do_adapt(self, ts, dispatcher, workers, job=None):
# analyse the state of the worker queues
active = 0
overloaded = 0
empty = [] # active yet empty work queues
inactive = [] # inactive work queues
for worker in workers:
if worker.get_attribute("active"):
active += 1
if worker.jobs_count() == 0:
empty.append(worker)
elif worker.jobs_count() > 1:
overloaded += 1
else:
inactive.append(worker)
# take an action if necessary
if len(empty) > 1 and overloaded == 0 and empty:
empty[0].set_attribute("active", False) # put idle worker to sleep
elif inactive and overloaded > 0:
inactive[0].set_attribute("active", True) # wake inactive worker
| 2.984375 | 3 |
app/port_data_analysis.py | btr260/freestyle-project | 1 | 12798830 | import pandas as pd
import os
import dotenv
from dotenv import load_dotenv
import datetime
import plotly
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from app.other_data_pull import spy_pull, fred_pull
from app.port_data_pull import port_data_pull
from app.portfolio_import import portfolio_import
from app import APP_ENV
# -------------------------------------------------------------------------------------
# FUNCTIONS ---------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
def to_pct(dec):
'''
Converts a numeric value to formatted string for printing and display purposes.
Param: dec (int or float) like 0.403321
Example: to_pct(0.403321)
Returns: 40.33%
'''
return f'{dec:.2%}'
def two_dec(dec):
'''
Converts a numeric value to formatted string for printing and display purposes.
Param: dec (int or float) like 4000.444444
Example: two_dec(4000.444444)
Returns: 4,000.44
'''
return f'{dec:,.2f}'
def pd_describe(mon_len):
'''
Converts a specified number of months to a text description of years and months.
Param: mon_len (int) like 17
Example: mon_len(17)
Returns: 1 Year and 5 Months
'''
full_years = int(mon_len / 12)
resid_months = mon_len % 12
if (full_years > 0 and resid_months > 0):
join_str = ' and '
else:
join_str = ''
if full_years == 0:
yr_str = ''
elif full_years == 1:
yr_str = f'{full_years} Year'
else:
yr_str = f'{full_years} Years'
if resid_months == 0:
mon_str = ''
elif resid_months == 1:
mon_str = f'{resid_months} Month'
else:
mon_str=f'{resid_months} Months'
pd_detail=f'{yr_str}{join_str}{mon_str}'
return pd_detail
def returns(dataset, period_length, min_start, max_end):
'''
Calculates various portfolio performance measures and prepares data for data visualization.
'''
# Calculate percent returns of individual portfolio positions
working_data = dataset
working_data['mret'] = working_data.groupby('ticker')['adj close'].pct_change()
working_data['mretp1'] = working_data['mret'] + 1
# Calculate share values over time (used to pull analysis period starting portfolio values)
working_data['sh val'] = working_data['qty'] * working_data['close']
# Define analysis period length. For now, analysis period start date is
# based on the data availability of the individual positions. The most recent
# first monthly data point for a given stock in the portfolio becomes the analysis
# start date. This is a limitation of the data/API.
pd_len = period_length
pd_end = max_end
pd_start = max(max_end - (pd_len * 12), min_start)
# Create dataset of asset values by position at the analysis start date
pd_start_val = working_data.loc[working_data['month'] == pd_start]
pd_start_val = pd_start_val.set_index('ticker')
pd_start_val = pd_start_val['sh val'].rename('start val')
# Caclulate cumulative returns and corresponding monthly values of individual
# portfolio positions over time
cum_ret_set = working_data.loc[(working_data['month'] > pd_start) & (working_data['month'] <= pd_end)]
cum_ret_set = cum_ret_set.set_index('ticker')
cum_ret_set['cumret'] = cum_ret_set.groupby('ticker')['mretp1'].cumprod()
cum_ret_set = cum_ret_set.join(pd_start_val, on='ticker')
cum_ret_set['mon val'] = cum_ret_set['start val'] * cum_ret_set['cumret']
# Calculate monthly returns on the total portfolio over time
port_ret = cum_ret_set.groupby('month')[['start val', 'mon val']].sum()
port_ret['cum ret'] = port_ret['mon val'] / port_ret['start val']
port_ret['mon ret'] = port_ret['mon val'].pct_change()
# Replace analysis period start month portfolio return (was na due to
# pct_change() function)
port_ret.loc[pd_start + 1,
'mon ret'] = port_ret.loc[pd_start + 1, 'cum ret'] - 1
# Merge in S&P 500 data from other_data_pull module
port_ret = port_ret.join(spy_join)
# Merge in 1Y constant maturity treasury data from other_data_pull module
port_ret = port_ret.join(fred_join)
# Calculate S&P 500 returns and cumulative return over analysis period
port_ret['spretp1'] = port_ret['spret'] + 1
port_ret['cum spret'] = port_ret['spretp1'].cumprod()
port_ret = port_ret.drop(columns=['spretp1'])
# Calculate portfolio and S&P 500 excess returns over risk free rate
port_ret['exret'] = port_ret['mon ret'] - port_ret['rate']
port_ret['exspret'] = port_ret['spret'] - port_ret['rate']
# Calculate average annual and monthly returns
months = len(port_ret)
years = months / 12
avg_ann_ret = (port_ret.loc[pd_end, 'cum ret'])**(1 / years) - 1
avg_mon_ret = (port_ret.loc[pd_end, 'cum ret'])**(1 / months) - 1
avg_ann_spret = (port_ret.loc[pd_end, 'cum spret'])**(1 / years) - 1
avg_mon_spret = (port_ret.loc[pd_end, 'cum spret'])**(1 / months) - 1
#Calculate return standard deviations
mon_sdev = port_ret['mon ret'].std()
ann_sdev = mon_sdev * (12 ** .5)
mon_sp_sdev = port_ret['spret'].std()
ann_sp_sdev = mon_sp_sdev * (12 ** .5)
# Calculate portfolio beta (covariance of portfolio and S&P 500 divided by
# volatility of S&P 500)
beta = port_ret.cov().loc['mon ret', 'spret'] / port_ret.cov().loc['spret', 'spret']
# Calculate sharpe ratios
sharpe_port = (port_ret['exret'].mean() / port_ret['exret'].std()) * (12 ** .5)
sharpe_sp = (port_ret['exspret'].mean() / port_ret['exspret'].std()) * (12 ** .5)
# Assemble dictionary of calculation results
ret_calc = {'years_tgt': pd_len, 'years_act': years, 'months_act': months, 'st_date': pd_start.strftime('%Y-%m'),
'end_date': pd_end.strftime('%Y-%m'), 'ann_ret': avg_ann_ret, 'mon_ret': avg_mon_ret, 'ann_sdev': ann_sdev, 'mon_sdev': mon_sdev, 'ann_spret': avg_ann_spret, 'mon_spret': avg_mon_spret, 'ann_sp_sdev': ann_sp_sdev, 'mon_sp_sdev': mon_sp_sdev, 'beta': beta, 'sharpe_port': sharpe_port, 'sharpe_sp': sharpe_sp}
# Create total (cumulative) returns dataset for data visualization
tot_ret_data = port_ret[['cum ret', 'cum spret']] - 1
app_df = pd.DataFrame([[tot_ret_data.index.min() - 1, 0, 0]], columns=['month', 'cum ret', 'cum spret']).set_index('month')
tot_ret_data=tot_ret_data.append(app_df).sort_index()
tot_ret_data.index = tot_ret_data.index.to_series().astype(str)
tot_ret_dict = tot_ret_data.reset_index().to_dict(orient='list')
return ret_calc, tot_ret_dict, port_ret
# -------------------------------------------------------------------------------------
# CODE --------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
if __name__=='__main__':
# Loan environment variables
load_dotenv()
port_file_name = os.environ.get('PORTFOLIO_FILE_NAME')
ap_api_key = os.environ.get('ALPHAVANTAGE_API_KEY')
fred_api_key = os.environ.get('FRED_API_KEY')
portfolio = portfolio_import(port_file_name)
if APP_ENV == 'development':
# Requires that each of other_data_pull and port_data_pull modules be
# run separately/individually (i.e., not called from within this program)
sub = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_port.csv"), parse_dates=['timestamp', 'month'])
sub['month']=sub['month'].dt.to_period('M')
spy_join = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_spy.csv"), parse_dates=['month'])
spy_join['month'] = spy_join['month'].dt.to_period('M')
spy_join=spy_join.set_index('month')
fred_join = pd.read_csv(os.path.join(os.path.dirname(os.path.abspath(
__file__)), '..', 'data', "working_fred.csv"), parse_dates=['month'])
fred_join['month'] = fred_join['month'].dt.to_period('M')
fred_join=fred_join.set_index('month')
maxomin = sub['month'].min()
minomax = sub['month'].max()
else:
# Call on other_data_pull module for S&P 500 and risk free rate data from
# Alpha Vantage and FRED (Federal Reserve Economic Data) APIs
spy_join = spy_pull(ap_api_key)
fred_join = fred_pull(fred_api_key)
# Call on port_data_pull module for monthly data on individual portfolio stocks
# from Alpha Vantage API
sub, minomax, maxomin=port_data_pull(portfolio,ap_api_key)
# Collect and store results, datasets, and chart elements for 1, 2, 3, and 5 year analysis periods
# (but only if sufficient data exists for all portfolio positions). If data are insufficient,
# only store results for complete or near complete periods. For example, if the longest data
# sampling period for one stock in the portfolio is 2 years and 7 months, then the 3-year
# analysis period will record results for a period of 2 years and 7 months, and the loop
# will not bother with the 5-year calculations.
results = []
tot_ret=[]
x = 0
keep = []
figs = []
for i in [1,2,3,5]:
if x==0:
temp_returns, temp_tot, temp_review = returns(sub, i, maxomin, minomax)
results.append(temp_returns)
tot_ret.append(temp_tot)
keep.append(i)
figs.append({'port line': go.Scatter(x=temp_tot['month'], y=temp_tot['cum ret'], name='Portfolio Cumulative Return', line=dict(color='firebrick', width=4)), 'sp line': go.Scatter(x=temp_tot['month'], y=temp_tot['cum spret'], name='S&P 500 Cumulative Return', line=dict(color='royalblue', width=4))})
if temp_returns['years_tgt'] != temp_returns['years_act']:
x = 1
# MAKE CHARTS/TABLES!
axis_font = dict(size=16, family='Times New Roman')
tick_font = dict(size=12, family='Times New Roman')
for i in range(len(figs)):
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.03, row_width=[0.75,0.25], specs=[[{'type':'table'}], [{'type':'scatter'}]])
fig.add_trace(figs[i]['port line'], row=2, col=1)
fig.add_trace(figs[i]['sp line'], row=2, col=1,)
pd_months = results[i]['months_act']
fig.update_layout(title=dict(text=f'Portfolio Performance Report: Monthly Returns over Last {pd_describe(pd_months)}', font=dict(family='Times New Roman', size=20)))
fig.update_layout(xaxis=dict(title=dict(text='Month', font=axis_font), ticks='outside', tickfont=tick_font))
fig.update_layout(yaxis=dict(title=dict(text='Cumulative Monthly Returns (%)', font=axis_font), ticks='outside', tickfont=tick_font, tickformat='.1%'))
fig.update_layout(legend=dict(orientation='h', font=axis_font))
col1 = ['Avg. Annual Return', 'Std. Dev. (Ann.)', 'Sharpe Ratio', 'Beta']
col2 = [to_pct(results[i]['ann_ret']), to_pct(results[i]['ann_sdev']), two_dec(results[i]['sharpe_port']), two_dec(results[i]['beta'])]
col3 = [to_pct(results[i]['ann_spret']), to_pct(results[i]['ann_sp_sdev']), two_dec(results[i]['sharpe_sp']), two_dec(1.00)]
fig.add_trace(go.Table(header=dict(values=['Statistic', 'Portfolio', 'S&P 500']), cells=dict(values=[col1, col2, col3])),row=1,col=1)
fig.show()
| 2.453125 | 2 |
aiomatrix/types/events/base.py | Forden/aiomatrix | 2 | 12798831 | <reponame>Forden/aiomatrix<gh_stars>1-10
import datetime
from pydantic import BaseModel, Extra, root_validator
from ...utils.mixins import ContextVarMixin
class MatrixObject(BaseModel, ContextVarMixin):
raw: dict
class Config:
allow_mutation = True
json_encoders = {datetime.datetime: lambda dt: int(dt.timestamp())}
validate_assignment = True
extra = Extra.ignore
@root_validator(pre=True)
def _parse_raw(cls, values: dict):
values['raw'] = values.copy()
return values
| 2.296875 | 2 |
src/utils/common.py | neerajbafila/pytorch-CNN | 0 | 12798832 | <gh_stars>0
import yaml
import os
import logging
"""Used to perform common tasks
"""
def read_yaml(yaml_path):
with open(yaml_path, 'r') as yaml_file:
content = yaml.safe_load(yaml_file)
logging.info('yaml file loaded')
return content
def create_directories(path_to_dir: list):
"""Creates directories if they don't exist
"""
full_path = ""
for path in path_to_dir:
full_path = os.path.join(full_path, path)
os.makedirs(full_path, exist_ok=True)
logging.info(f"Directories created: {full_path}") | 3.03125 | 3 |
experiments/KS/Bayes/.ipynb_checkpoints/experiment_code-checkpoint.py | GJBoth/MultiTaskPINN | 0 | 12798833 | <filename>experiments/KS/Bayes/.ipynb_checkpoints/experiment_code-checkpoint.py<gh_stars>0
from multitaskpinn.utils.tensorboard import Tensorboard
from multitaskpinn.utils.output import progress
from multitaskpinn.model.deepmod import DeepMoD
from typing import Optional
import torch
import time
import numpy as np
from torch.distributions.studentT import StudentT
from sklearn.linear_model import BayesianRidge
def train(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
reg_weight,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""Stops training when it reaches minimum MSE.
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(MSE + reg_weight * Reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + reg_weight * Reg_test)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
# ================== Sparsity update =============
# Updating sparsity and or convergence
if iteration % write_iterations == 0:
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
break
board.close()
def train_bayes_MSE_optim(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""Stops training when it reaches minimum MSE.
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
loss = torch.sum(torch.exp(-model.s[:, 0]) * MSE + torch.sum(model.s[:, 0]))
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + Reg_test)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test)
# ================== Sparsity update =============
# Updating sparsity and or convergence
if iteration % write_iterations == 0:
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
break
board.close()
def train_bayes_full_optim(model: DeepMoD,
data: torch.Tensor,
target: torch.Tensor,
optimizer,
sparsity_scheduler,
split: float = 0.8,
log_dir: Optional[str] = None,
max_iterations: int = 10000,
write_iterations: int = 25,
**convergence_kwargs) -> None:
"""Stops training when it reaches minimum MSE.
Args:
model (DeepMoD): [description]
data (torch.Tensor): [description]
target (torch.Tensor): [description]
optimizer ([type]): [description]
sparsity_scheduler ([type]): [description]
log_dir (Optional[str], optional): [description]. Defaults to None.
max_iterations (int, optional): [description]. Defaults to 10000.
"""
start_time = time.time()
board = Tensorboard(log_dir) # initializing tb board
# Splitting data, assumes data is already randomized
n_train = int(split * data.shape[0])
n_test = data.shape[0] - n_train
data_train, data_test = torch.split(data, [n_train, n_test], dim=0)
target_train, target_test = torch.split(target, [n_train, n_test], dim=0)
# Training
print('| Iteration | Progress | Time remaining | Loss | MSE | Reg | L1 norm |')
for iteration in np.arange(0, max_iterations + 1):
# ================== Training Model ============================
prediction, time_derivs, thetas = model(data_train)
MSE = torch.mean((prediction - target_train)**2, dim=0) # loss per output
t = time_derivs[0]
Theta = thetas[0]
if iteration == 0:
sk_reg = BayesianRidge(fit_intercept=False, compute_score=True, alpha_1=0, alpha_2=0, lambda_1=0, lambda_2=0)
sk_reg.fit(thetas[0].cpu().detach().numpy(), time_derivs[0].cpu().detach().numpy())
model.s.data[:, 0] = torch.log(1 / MSE.data)
model.s.data[:, 1] = torch.log(torch.tensor(sk_reg.lambda_))
model.s.data[:, 2] = torch.log(torch.tensor(sk_reg.alpha_))
tau = torch.exp(model.s[:, 0])#torch.exp(-model.s[:, 0])
alpha = torch.exp(model.s[:, 1])#torch.exp(-model.s[:, 1])#torch.exp(model.s[:, 1]) #1 / MSE[0].data
beta = torch.exp(model.s[:, 2])#torch.exp(-model.s[:, 2])#torch.exp(model.s[:, 2]) #torch.tensor(1e-5).to(Theta.device)
M = Theta.shape[1]
N = Theta.shape[0]
# Posterior std and mean
A = torch.eye(M).to(Theta.device) * alpha + beta * Theta.T @ Theta
mn = beta * torch.inverse(A) @ Theta.T @ t
loss_reg = -1/2 * (M * torch.log(alpha)
+ N * torch.log(beta)
- beta * (t - Theta @ mn).T @ (t - Theta @ mn) - alpha * mn.T @ mn
- torch.trace(torch.log(A))
- N * np.log(2*np.pi))
Reg = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs, thetas, model.constraint_coeffs(scaled=False, sparse=True))])
print(N / 2 * tau * MSE - N / 2 * torch.log(tau), loss_reg)
loss = torch.sum((N / 2 * tau * MSE - N / 2 * torch.log(tau)) + loss_reg)
# Optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if iteration % write_iterations == 0:
# ================== Validation costs ================
prediction_test, coordinates = model.func_approx(data_test)
time_derivs_test, thetas_test = model.library((prediction_test, coordinates))
with torch.no_grad():
MSE_test = torch.mean((prediction_test - target_test)**2, dim=0) # loss per output
Reg_test = torch.stack([torch.mean((dt - theta @ coeff_vector)**2)
for dt, theta, coeff_vector in zip(time_derivs_test, thetas_test, model.constraint_coeffs(scaled=False, sparse=True))])
loss_test = torch.sum(MSE_test + Reg_test)
# ====================== Logging =======================
_ = model.sparse_estimator(thetas, time_derivs) # calculating l1 adjusted coeffs but not setting mask
estimator_coeff_vectors = model.estimator_coeffs()
l1_norm = torch.sum(torch.abs(torch.cat(model.constraint_coeffs(sparse=True, scaled=True), dim=1)), dim=0)
progress(iteration, start_time, max_iterations, loss.item(),
torch.sum(MSE).item(), torch.sum(Reg).item(), torch.sum(l1_norm).item())
board.write(iteration, loss, MSE, Reg, l1_norm, model.constraint_coeffs(sparse=True, scaled=True), model.constraint_coeffs(sparse=True, scaled=False), estimator_coeff_vectors, MSE_test=MSE_test, Reg_test=Reg_test, loss_test=loss_test, s=model.s)
# ================== Sparsity update =============
# Updating sparsity and or convergence
if iteration % write_iterations == 0:
sparsity_scheduler(iteration, torch.sum(MSE_test), model, optimizer)
if sparsity_scheduler.apply_sparsity is True:
with torch.no_grad():
model.constraint.sparsity_masks = model.sparse_estimator(thetas, time_derivs)
break
board.close() | 2.21875 | 2 |
Python3/872.py | rakhi2001/ecom7 | 854 | 12798834 | <filename>Python3/872.py
__________________________________________________________________________________________________
sample 24 ms submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
from collections import deque
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
def get_leafs(root):
if not root:
return []
if not root.left and not root.right:
return [root.val]
return get_leafs(root.left) + get_leafs(root.right)
return get_leafs(root1) == get_leafs(root2)
__________________________________________________________________________________________________
sample 12908 kb submission
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def leafSimilar(self, root1: TreeNode, root2: TreeNode) -> bool:
return self.getleaf(root1) == self.getleaf(root2)
def getleaf(self, root):
if not root: return []
stack = [root]
out = []
while root.left:
stack.append(root.left)
root = root.left
while stack:
p = stack.pop()
if not p.left and not p.right:
out.append(p.val)
if p.right:
stack.append(p.right)
p = p.right
while p.left:
stack.append(p.left)
p = p.left
return out
__________________________________________________________________________________________________
| 3.703125 | 4 |
neurogenesis/demux.py | juliusf/Neurogenesis | 3 | 12798835 | <filename>neurogenesis/demux.py
import itertools
import hashlib
import os
import stat
from neurogenesis.base import SimulationRun
from neurogenesis.util import Logger
class DynamicLine(): # TODO better name?
def __init__(self):
self.head_part = ""
self.dynamic_part = ""
self.tail_part = ""
self.current_print_representation = ""
self.parameter_representation = {}
def get_current_value_tuple(self):
return (self.head_part, self.current_print_representation, self.tail_part)
def __str__(self):
return self.head_part + self.current_print_representation + self.tail_part
def __repr__(self):
return self.__str__()
def demux_and_write_simulation(args):
ini_file = args['inifile']
out_dir = args['outdir']
config = args['configName']
lines = []
dynamic_lines = []
simulation_runs = {}
config_line = "# This is config %s\n" % (config)
lines.append(config_line)
with open(ini_file) as input_file:
for row in input_file:
if '{' in row.split('#')[0]: # ignore comments (T)
line = create_dynamic_line(row.split('#')[0] + '\n')
dynamic_lines.append(line)
lines.append(line)
else:
lines.append(row)
all_dynamic_lines = [ line.dynamic_part for line in dynamic_lines if len(line.dynamic_part) > 0]
for perm in itertools.product(*all_dynamic_lines):
run = SimulationRun()
for idx, val in enumerate(perm):
dynamic_lines[idx].current_print_representation = val
#run.parameters.append((dynamic_lines[idx].head_part.split()[0], val.strip()))
run.parameters[dynamic_lines[idx].head_part.split()[0]] = val.strip()
hash = create_file_hash(lines)
target_file = "run.sh"
write_sim_data(args, lines, hash, target_file)
run.hash = hash
[run.config.append(line.get_current_value_tuple()) for line in dynamic_lines]
run.path = out_dir + hash + "/"
run.executable_path = out_dir + hash + "/" + target_file
run.config_name= config
simulation_runs[hash] = run
Logger.info("Generated %s simulation configs." % (len(simulation_runs)))
return simulation_runs
def write_sim_data(args, lines, hash, target_file):
full_folder_path = check_and_create_folder(args['outdir'], hash)
write_ini(full_folder_path, lines)
create_bash_script(args, full_folder_path,target_file)
write_additional_files(args, full_folder_path)
def create_file_hash(lines):
hash = hashlib.md5()
[hash.update(str(line).encode('utf-8')) for line in lines]
return hash.hexdigest()
def write_additional_files(args, folder_path):
files = args['additionalFiles'].split()
for file in files:
base_name = os.path.basename(file)
new_file_path = folder_path + '/'+ base_name
f = check_and_create_file(new_file_path)
with open(file) as input_file:
for row in input_file:
f.write(row)
f.close()
input_file.close()
def write_ini(folder_path, file):
full_path = folder_path + "/omnetpp.ini"
if os.path.exists(full_path):
os.remove(full_path)
f = open (full_path, "a")
for line in file:
f.write(str(line))
f.close()
def check_and_create_folder(base_path, folder_name):
full_path = base_path + folder_name
if not os.path.exists(full_path):
os.makedirs(full_path)
return full_path
def check_and_create_file(full_path):
if os.path.exists(full_path):
os.remove(full_path)
f = open (full_path, "a")
return f
def create_bash_script(args, target_folder, target_file):
omnet_exec = args['omnetdir']
inet_dir = args['inetdir']
config_name = args['configName']
script = """
#!/bin/bash
DIR=%s
TARGET=%s
CONFIG=%s
cd $DIR
%s -u Cmdenv -l $DIR/INET -c $CONFIG -n $DIR/inet:$DIR/../tutorials:$DIR/../examples:$DIR/../examples:$TARGET/ $TARGET/omnetpp.ini > /dev/null
rc=$?
if [ $rc -gt 0 ]; then
exit $rc
fi
""" % (inet_dir[:-1], target_folder, config_name, omnet_exec)
full_path = target_folder + "/" + target_file
if os.path.exists(full_path):
os.remove(full_path)
f = open (full_path, "a")
f.write(script)
f.close()
file_handle = os.stat(full_path)
os.chmod(full_path, file_handle.st_mode | stat.S_IEXEC)
def create_dynamic_line(line):
dline = DynamicLine()
head_tokens = line.split('{',1)
if head_tokens[0][-1] == "$":
dline.head_part = head_tokens[0] + '{'
tail_tokens = head_tokens[1].split('}',1)
if( '=' in head_tokens[1]): #assignment
assignemt = head_tokens[1].split('=')
dline.head_part += assignemt[0] + " = "# puts the variable name at the beginning
dynamic_parts = assignemt[1].split(",")
dynamic_parts[-1] = dynamic_parts[-1].split('}',1)[0]
dline.dynamic_part = dynamic_parts
dline.tail_part = '}' + tail_tokens[1]
else:
dline.head_part = line
dline.dynamic_part = []
dline.tail_part = ""
else: #legacy config syntax
dline.head_part = head_tokens[0]
tail_tokens = head_tokens[1].split('}')
dline.dynamic_part = tail_tokens[0].split(',')
dline.tail_part = tail_tokens[1]
return dline
| 2.703125 | 3 |
test/test_idseqs_to_mask.py | ulf1/keras-tweaks | 0 | 12798836 | from keras_tweaks import idseqs_to_mask
import tensorflow as tf
class AllTests(tf.test.TestCase):
def test1(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3], dense=False)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test2(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[1 for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3],
dense=False, dtype=tf.uint8)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
# self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test3(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 0, 1],
[0, 1, 1],
[0, 2, 0],
[0, 3, 0],
[0, 4, 2],
[0, 5, 2],
[1, 0, 1],
[1, 2, 2],
[1, 3, 1],
[1, 4, 0],
[1, 5, 0]),
values=[1.0 for _ in range(11)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, n_vocab_sz=3, ignore=[3],
dense=False, dtype=tf.float64)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
# self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test4(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 2, 0],
[0, 3, 0],
[0, 4, 1],
[0, 5, 1],
[1, 1, 2],
[1, 2, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(8)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, ignore=[1],
dense=False, dtype=tf.bool)
self.assertAllEqual(
tf.sparse.to_dense(masks), tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
def test5(self):
idseqs = [[1, 1, 0, 0, 2, 2, 3], [1, 3, 2, 1, 0, 0, 2]]
target = tf.sparse.SparseTensor(
indices=(
[0, 2, 0],
[0, 3, 0],
[0, 4, 1],
[0, 5, 1],
[1, 1, 2],
[1, 2, 1],
[1, 4, 0],
[1, 5, 0]),
values=[True for _ in range(8)],
dense_shape=(2, 6, 3))
masks = idseqs_to_mask(
idseqs, n_seqlen=6, ignore=[1],
dense=True, dtype=tf.bool)
self.assertAllEqual(masks, tf.sparse.to_dense(target))
self.assertAllEqual(masks.dtype, target.dtype)
self.assertAllEqual(masks.shape, target.shape)
if __name__ == "__main__":
tf.test.main()
| 2.375 | 2 |
project_dir/urls.py | cybrvybe/FactorBeats-Platform | 0 | 12798837 | <reponame>cybrvybe/FactorBeats-Platform
"""project_dir URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework import routers
import artists.views as artist_views
import beats.views as beat_views
from rest_framework_jwt.views import obtain_jwt_token
from users.views import current_app_user, AppUserList
router = routers.DefaultRouter()
router_item_list = [
{
"plural": r"artists",
"view": artist_views.ArtistView,
"singular": "artist"
},
{
"plural": r"social-links",
"view": artist_views.SocialLinkView,
"singular": "social-link"
},
{
"plural": r"instrumentals",
"view": beat_views.InstrumentalView,
"singular": "instrumental"
},
{
"plural": r"instrumental-collections",
"view": beat_views.InstrumentalCollectionView,
"singular": "instrumental-collection"
}
]
for router_item in router_item_list:
router.register(
router_item["plural"],
router_item["view"],
router_item["singular"]
)
urlpatterns = [
path('admin/', admin.site.urls),
path(
"api/",
include(router.urls)
),
path(
"infinite-api/",
beat_views.ReactInfiniteInstrumentalView.as_view(),
name = "infinite-react"
),
path('token-auth/', obtain_jwt_token),
path('current_user/', current_app_user),
path('users/', AppUserList.as_view())
]
| 2.40625 | 2 |
skbio/sequence/__init__.py | Kleptobismol/scikit-bio | 0 | 12798838 | <reponame>Kleptobismol/scikit-bio
r"""
Biological sequences (:mod:`skbio.sequence`)
============================================
.. currentmodule:: skbio.sequence
This module provides functionality for working with biological sequences,
including generic sequences, nucelotide sequences, DNA sequences, and RNA
sequences. Class methods and attributes are also available to obtain valid
character sets, complement maps for different sequence types, and for
obtaining degenerate character definitions. Additionaly this module defines the
``GeneticCode`` class, which represents an immutable object that translates RNA
or DNA strings to amino acid sequences.
Classes
-------
.. autosummary::
:toctree: generated/
BiologicalSequence
NucleotideSequence
DNASequence
RNASequence
ProteinSequence
GeneticCode
Functions
---------
.. autosummary::
:toctree: generated/
genetic_code
Exceptions
----------
.. autosummary::
:toctree: generated/
BiologicalSequenceError
GeneticCodeError
GeneticCodeInitError
InvalidCodonError
Examples
--------
>>> from skbio.sequence import DNASequence, RNASequence
New sequences are created with optional id and description fields.
>>> d1 = DNASequence('ACC--G-GGTA..')
>>> d1 = DNASequence('ACC--G-GGTA..',id="seq1")
>>> d1 = DNASequence('ACC--G-GGTA..',id="seq1",description="GFP")
New sequences can also be created from existing sequences, for example as their
reverse complement or degapped (i.e., unaligned) version.
>>> d2 = d1.degap()
>>> d1
<DNASequence: ACC--G-GGT... (length: 13)>
>>> d2
<DNASequence: ACCGGGTA (length: 8)>
>>> d3 = d2.reverse_complement()
>>> d3
<DNASequence: TACCCGGT (length: 8)>
It's also straight-forward to compute distances between sequences (optionally
using user-defined distance metrics, default is Hamming distance) for use in
sequence clustering, phylogenetic reconstruction, etc.
>>> d4 = DNASequence('GACCCGCT')
>>> d5 = DNASequence('GACCCCCT')
>>> d3.distance(d4)
0.25
>>> d3.distance(d5)
0.375
Class-level methods contain information about the molecule types.
>>> DNASequence.iupac_degeneracies()['B']
set(['C', 'T', 'G'])
>>> RNASequence.iupac_degeneracies()['B']
set(['C', 'U', 'G'])
>>> DNASequence.is_gap('-')
True
Creating and using a ``GeneticCode`` object
>>> from skbio.sequence import genetic_code
>>> from pprint import pprint
>>> sgc = genetic_code(1)
>>> sgc
GeneticCode(FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG)
>>> sgc['UUU'] == 'F'
True
>>> sgc['TTT'] == 'F'
True
>>> sgc['F'] == ['TTT', 'TTC'] #in arbitrary order
True
>>> sgc['*'] == ['TAA', 'TAG', 'TGA'] #in arbitrary order
True
Retrieving the anticodons of the object
>>> pprint(sgc.anticodons)
{'*': ['TTA', 'CTA', 'TCA'],
'A': ['AGC', 'GGC', 'TGC', 'CGC'],
'C': ['ACA', 'GCA'],
'D': ['ATC', 'GTC'],
'E': ['TTC', 'CTC'],
'F': ['AAA', 'GAA'],
'G': ['ACC', 'GCC', 'TCC', 'CCC'],
'H': ['ATG', 'GTG'],
'I': ['AAT', 'GAT', 'TAT'],
'K': ['TTT', 'CTT'],
'L': ['TAA', 'CAA', 'AAG', 'GAG', 'TAG', 'CAG'],
'M': ['CAT'],
'N': ['ATT', 'GTT'],
'P': ['AGG', 'GGG', 'TGG', 'CGG'],
'Q': ['TTG', 'CTG'],
'R': ['ACG', 'GCG', 'TCG', 'CCG', 'TCT', 'CCT'],
'S': ['AGA', 'GGA', 'TGA', 'CGA', 'ACT', 'GCT'],
'T': ['AGT', 'GGT', 'TGT', 'CGT'],
'V': ['AAC', 'GAC', 'TAC', 'CAC'],
'W': ['CCA'],
'Y': ['ATA', 'GTA']}
NucleotideSequences can be translated using a ``GeneticCode`` object.
>>> d6 = DNASequence('ATGTCTAAATGA')
>>> from skbio.sequence import genetic_code
>>> gc = genetic_code(11)
>>> gc.translate(d6)
<ProteinSequence: MSK* (length: 4)>
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from ._exception import (BiologicalSequenceError, GeneticCodeError,
GeneticCodeInitError, InvalidCodonError)
from ._sequence import (BiologicalSequence, NucleotideSequence, DNASequence,
RNASequence, ProteinSequence, DNA, RNA, Protein)
from ._genetic_code import GeneticCode, genetic_code
__all__ = ['BiologicalSequenceError', 'GeneticCodeError',
'GeneticCodeInitError', 'InvalidCodonError', 'BiologicalSequence',
'NucleotideSequence', 'DNASequence', 'RNASequence',
'ProteinSequence', 'DNA', 'RNA', 'Protein', 'GeneticCode',
'genetic_code']
from numpy.testing import Tester
test = Tester().test
| 2.53125 | 3 |
pyheom/noise_decomposition.py | tatsushi-ikeda/pyheom | 5 | 12798839 | <gh_stars>1-10
#
# LibHEOM: Copyright (c) <NAME>
# This library is distributed under BSD 3-Clause License.
# See LINCENSE.txt for licence.
# ------------------------------------------------------------------------
import numpy as np
import scipy as sp
import scipy.sparse
import itertools
from collections import OrderedDict
from .predefined_noise import *
from .summation_over_poles import *
from .commuting_matrix import *
from .pade_spectral_decomposition import *
fsd_coeffs = {
100.0: [[1, 1.35486, 1.34275],
[2, 5.50923, 0.880362],
[3, 0.553793, -0.965783]],
1000.0: [[1, 79.1394, 0.637942],
[1, 0.655349, 0.666920],
[2, 11.6632, 0.456271],
[2, 1.54597, 0.740457],
[3, 3.39011, 0.626892]],
}
def calc_S_msd(gamma_k, a_k, T, n_ltc):
def cot(x):
return 1/np.tan(x)
n_m = a_k.shape[0]
n_k = n_ltc
nu_k = np.zeros(n_k)
s_k = np.zeros(n_m + n_k, dtype=a_k.dtype)
S_delta = 0.0
for k in range(n_k):
nu_k[k] = 2*np.pi*(k + 1)*T
if np.any(np.abs(gamma_k - nu_k[k]) < (np.finfo(float).eps)):
raise Exception('[Error] Bath frequency #{} is degenerated.'.format(k))
# r_k[m] -->
for m in range(n_m):
s_k[m] = -2*a_k[m]*cot(gamma_k[m]/(2*T))/2.0
for k in range(n_k):
s_k[n_m+k] = 0.0
for m in range(n_m):
s_k[n_m+k] += -4*a_k[m]/(nu_k[k]**2 - gamma_k[m]**2)
s_k[n_m+k] *= T*nu_k[k]
for m in range(n_m):
inner = 1/gamma_k[m]**2 - 1/(2*T*gamma_k[m])*cot(gamma_k[m]/(2*T))
for k in range(n_k):
inner -= 2/(nu_k[k]**2 - gamma_k[m]**2)
S_delta += -2*T*a_k[m]*inner
result = OrderedDict()
def put_coeff(a, m, coeff):
if (a, m) in result:
result[(a, m)] += coeff
else:
result[(a, m)] = coeff
put_coeff(np.inf, 0, S_delta)
for k in range(n_m):
put_coeff(gamma_k[k], 0, s_k[k])
for k in range(n_k):
put_coeff(nu_k[k], 0, s_k[k + n_m])
return result
def calc_noise_time_domain(J, T, type_ltc, **kwargs):
if (type_ltc == 'none'):
n_list = [[0, T, 1, 0]]
return calc_S_from_poles(J.poles, n_list), calc_A_from_poles(J.poles)
elif (type_ltc == 'msd'):
n_msd = kwargs['n_msd']
A = calc_A_from_poles(J.poles)
gamma_k = np.zeros(len(A), dtype=np.complex128)
a_k = np.zeros(len(A), dtype=np.complex128)
for k, (gamma, l) in enumerate(A.keys()):
if l != 0:
raise Exception('[Error] msd accepts only first-order poles')
gamma_k[k] = gamma
a_k[k] = A[(gamma, 0)]
return calc_S_msd(gamma_k, a_k, T, n_msd), A
elif (type_ltc == 'psd' or type_ltc == 'psd+fsd' ):
coeffs = []
coeff_0 = 0
if type_ltc == 'psd+fsd':
n_fsd_rec = kwargs['n_fsd_rec']
chi_fsd = kwargs['chi_fsd']
# calc fsd coeffs
T_n = T
for i in range(n_fsd_rec):
T_np1 = T_n*chi_fsd
coeff_0 += T_n - T_np1
T_n = T_np1
for j, a, b in fsd_coeffs[chi_fsd]:
coeffs.append([j, a, b, T_n])
T_0 = T_n
else:
T_0 = T
# calc psd coeffs
n_psd = kwargs['n_psd']
type_psd = kwargs['type_psd']
xi, eta, R_1, T_3 = psd(n_psd, type_psd)
# collect poles
poles = OrderedDict()
## psd poles
poles[(0, 1, 0)] = T_0
if (R_1 != 0):
poles[(0, 0, 0)] = R_1
if (T_3 != 0):
poles[(0, 0, 1)] = T_3
for p in range(n_psd):
poles[(T_0*xi[p], 1, 0)] = 2*eta[p]*T_0
## fsd poles
poles[(0, 1, 0)] += coeff_0
for j, a, b, T_n in coeffs:
poles[(T_n/a, j, 0)] = b*(T_n/a)**(2*j-1)
n_list = [[a, b, m, n] for (a, m, n), b in poles.items()]
return calc_S_from_poles(J.poles, n_list), calc_A_from_poles(J.poles)
else:
raise Exception('[Error] Unknown ltc')
def calc_noise_params(S, A):
# Calculate Basis Degeneracy
phi_deg_dict = OrderedDict()
for gamma, n in itertools.chain(S.keys(), A.keys()):
if (gamma == np.inf):
continue
if gamma in phi_deg_dict:
phi_deg_dict[gamma] = max(phi_deg_dict[gamma], n + 1)
else:
phi_deg_dict[gamma] = n + 1
phi_dim = sum((n for n in phi_deg_dict.values()))
#
phi = []
phi_0 = np.zeros((phi_dim), np.complex128)
gamma = sp.sparse.lil_matrix((phi_dim, phi_dim), dtype=np.complex128)
sigma = np.ones((phi_dim), np.complex128)
s_vec = np.zeros((phi_dim), np.complex128)
a_vec = np.zeros((phi_dim), np.complex128)
s_mat = sp.sparse.lil_matrix((phi_dim, phi_dim), dtype=np.complex128)
a_mat = sp.sparse.lil_matrix((phi_dim, phi_dim), dtype=np.complex128)
ctr = 0
for gamma_n, deg_max in phi_deg_dict.items():
for deg in range(deg_max):
phi.append((gamma_n, deg))
phi_0[ctr] = 1 if deg == 0 else 0
gamma[ctr,ctr] = gamma_n
if deg > 0:
gamma[ctr,ctr-1] = -deg
if ((gamma_n, deg) in S):
s_vec[ctr] = S[(gamma_n, deg)]
if ((gamma_n, deg) in A):
a_vec[ctr] = A[(gamma_n, deg)]
ctr += 1
block_size = deg+1
s_mat[ctr-block_size:ctr, ctr-block_size:ctr] \
= get_commuting_matrix(s_vec[ctr-block_size:ctr],
gamma[ctr-block_size:ctr, ctr-block_size:ctr].todense(),
sigma[ctr-block_size:ctr])
a_mat[ctr-block_size:ctr, ctr-block_size:ctr] \
= get_commuting_matrix(a_vec[ctr-block_size:ctr],
gamma[ctr-block_size:ctr, ctr-block_size:ctr].todense(),
sigma[ctr-block_size:ctr])
S_delta = 0.0
if (np.inf, 0) in S:
S_delta = S[(np.inf, 0)]
return dict(gamma = gamma,
sigma = sigma,
phi_0 = phi_0,
s = s_mat,
S_delta = S_delta,
a = a_mat)
def noise_decomposition(J, T, type_ltc, **kwargs):
return calc_noise_params(*calc_noise_time_domain(J, T, type_ltc, **kwargs))
# noise = calc_noise_params(*calc_noise_time_domain(None, T, 'psd+fsd', n_psd = 1, type_psd = 'N/N', n_fsd_rec=1, chi_fsd=100.0))
# noise = calc_noise_params(*calc_noise_time_domain(J, T, 'psd+fsd',
# n_psd = 1, type_psd = 'N/N',
# n_fsd_rec=1, chi_fsd=100.0))
# noise = calc_noise_params(*calc_noise_time_domain(J, T, 'psd',
# n_psd = 1, type_psd = 'N-1/N'))
# noise = calc_noise_params(*calc_noise_time_domain(J, T, 'msd',
# n_msd = 10))
# noise = calc_noise_params(*calc_noise_time_domain(J, T, 'NONE'))
| 1.65625 | 2 |
snippets/streaming_indicators_app.py | MarcSkovMadsen/panel-visuals | 0 | 12798840 | <filename>snippets/streaming_indicators_app.py
import numpy as np
import pandas as pd
import panel as pn
pn.extension(sizing_mode='stretch_width')
layout = pn.layout.FlexBox(*(
pn.indicators.Trend(
data={'x': list(range(10)), 'y': np.random.randn(10).cumsum()},
width=150,
height=100,
plot_type=pn.indicators.Trend.param.plot_type.objects[i%4]
) for i in range(28)
))
def stream():
for trend in layout:
trend.stream(
{
'x': [trend.data['x'][-1]+1],
'y': [trend.data['y'][-1]+np.random.randn()]
}, rollover=20)
cb = pn.state.add_periodic_callback(stream, 500)
pn.template.FastListTemplate(
site="Panel",
title="Streaming Trend Indicators",
main=[layout,],
header_background="#428bca"
).servable() | 2.359375 | 2 |
test_cast/19-10-17/Animal-shelter/solution.py | qinggniq/Algorithm-Practice | 0 | 12798841 | <gh_stars>0
class Animal:
def __init__(self, id: int, isDog: bool):
self.id = id
self.isDog = isDog
class Solution:
def __init__(self):
self.catQueue = []
self.dogQueue = []
self.time = 0
def enqueue(self, animal: list):
if animal[1]:
self.dogQueue.append((animal, self.time))
else:
self.catQueue.append((animal, self.time))
self.time += 1
def dequeueAny(self) -> list:
if len(self.catQueue) == 0 or self.catQueue[0][1] <= self.dogQueue[0][1]:
return self.catQueue.pop(0)[0]
if len(self.dogQueue) == 0 or self.dogQueue[0][1] <= self.catQueue[0][1]:
return self.dogQueue.pop(0)[0]
return [-1, -1]
def dequeueDog(self) -> Animal:
if len(self.dogQueue) != 0:
return self.dogQueue.pop(0)[0]
return [-1, -1]
def dequeueCat(self) -> Animal:
if len(self.catQueue) != 0:
return self.catQueue.pop(0)[0]
return [-1, -1]
| 3.296875 | 3 |
composer/models/resnet9_cifar10/__init__.py | ajaysaini725/composer | 0 | 12798842 | <reponame>ajaysaini725/composer<filename>composer/models/resnet9_cifar10/__init__.py
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.models.resnet9_cifar10.model import CIFAR10_ResNet9 as CIFAR10_ResNet9
from composer.models.resnet9_cifar10.resnet9_cifar10_hparams import CIFARResNet9Hparams as CIFARResNet9Hparams
_task = 'Image Classification'
_dataset = 'CIFAR10'
_name = 'ResNet9'
_quality = '92.9'
_metric = 'Top-1 Accuracy'
_ttt = '5m'
_hparams = 'resnet9_cifar10.yaml'
| 1.171875 | 1 |
movielens-ml.py | oFwano/Movielens-Datascience-Project | 0 | 12798843 | <gh_stars>0
#!/usr/bin/env python
# coding: utf-8
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csr_matrix
from sklearn.neighbors import NearestNeighbors
from sklearn.svm import SVC
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
def get_movie_idx_from_name(movie_title,movie_to_idx):
for title,idx in movie_to_idx.items():
if title == movie_title:
return idx
return None
def main():
ratings_path = "Data/ml-latest-small/ratings.csv"
movies_path = "Data/ml-latest-small/movies.csv"
#change the movie title to recommend a different movie
movie_title = "Thor (2011)"
df_movies = pd.read_csv(movies_path,usecols=['movieId','title'])
df_ratings = pd.read_csv(ratings_path,usecols=['userId','movieId','rating'])
df_movierating = pd.merge(df_ratings, df_movies, on='movieId')
df_plt = pd.DataFrame(df_ratings.groupby('movieId').size(), columns=['count'])
df_plt = df_plt.sort_values('count',ascending=False)
df_plt = df_plt.reset_index(drop=True)
plt1 = df_plt.plot(figsize=(30,15),fontsize=15)
plt1.set_xlabel("movieId")
plt1.set_ylabel("Number of ratings")
plt1.set_title("Number of Ratings per Movie")
plt1 = plt1.get_figure()
plt1.savefig('plot/ratingfq',bbox_inches='tight')
#--------------- item based recommender system ---------------#
df_rating_mean = df_ratings.groupby(['movieId'],as_index = False, sort = False).mean()
df_rating_mean = df_rating_mean.drop(columns=['userId'])
df_rating_mean = df_rating_mean.rename(columns = {'rating': 'rating_mean'})
df_rating_norm = pd.merge(df_rating_mean,df_ratings,on='movieId')
df_rating_norm['rate_norm'] = df_rating_norm['rating']-df_rating_norm['rating_mean']
df_movies2 = df_rating_mean.merge(df_movies,on='movieId')
#-- reshape the data for indexing --#
pivot = df_ratings.pivot(index='movieId',columns='userId', values = 'rating').fillna(0)
csr_pivot = csr_matrix(pivot.values)
movie_list = list(df_movies.set_index('movieId').loc[pivot.index].title)
movie_to_idx = dict(map(lambda t: (t[1],t[0]), enumerate(movie_list))) #dictionary
idx_to_movie = dict((v, k) for k, v in movie_to_idx.items()) # reverse dictionary
idx = get_movie_idx_from_name(movie_title,movie_to_idx) # movie index
#-- use machine learning (knn) to get closest movies to recommend --#
knn_model_recommender = NearestNeighbors(metric='cosine',algorithm='brute',n_neighbors=20,n_jobs=-1)
knn_model_recommender.fit(pivot)
distances, indices = knn_model_recommender.kneighbors(csr_pivot[idx], n_neighbors=11)
tuple_dist_idx = sorted(list(zip(indices.squeeze().tolist(), distances.squeeze().tolist())), key=lambda x: x[1])[:0:-1]
print('Recommendations for {} using a knn approach:'.format(movie_title))
for i, (idx, dist) in enumerate(tuple_dist_idx):
df_avg = df_movies2[df_movies2['title'] == idx_to_movie[idx]]
avgr = round(df_avg, 3)
print('{0}: {1}, with distance of {2} and average rating of {3}'.format(i+1, idx_to_movie[idx], dist, avgr['rating_mean'].iloc[0]))
#--------------- machine learning ---------------#
X_input = df_rating_norm[['movieId','userId']].values
y_input = df_rating_norm['rate_norm'].values
y_input = y_input.astype('int')
X_train, X_test, y_train, y_test = train_test_split(X_input, y_input, test_size=0.3)
#-- commenting out SVC because it takes too long to run, accuracy score for SVC is written in comment below --#
X_scaled = preprocessing.scale(X_train)
Xtest_scaled = preprocessing.scale(X_test)
#svc_model = SVC()
#svc_model.fit(X_scaled, y_train)
#y_predicted = svc_model.predict(Xtest_scaled)
#print("\nml model SVC")
#print(accuracy_score(y_test, y_predicted)) # 0.760760046015312
bayes_model = MultinomialNB()
bayes_model.fit(X_train, y_train)
y_predicted = bayes_model.predict(X_test)
print("\n ml model MultinomialNB")
print(accuracy_score(y_test, y_predicted)) # 0.044858021222438926
knnc_model = KNeighborsClassifier(n_neighbors=5)
knnc_model.fit(X_train, y_train)
y_predicted = knnc_model.predict(X_test)
print("\n ml model KNN Classifier")
print(accuracy_score(y_test, y_predicted)) # 0.7298601699117384
gnb_model = GaussianNB()
gnb_model.fit(X_train, y_train)
y_predicted = gnb_model.predict(X_test)
print("\n ml model Gaussian NB")
print(accuracy_score(y_test, y_predicted)) # 0.7639416878780867
if __name__ == '__main__':
main()
| 3.03125 | 3 |
apis_v1/tests/test_views_voter_email_address_retrieve.py | ecluster/WeVoteServer | 0 | 12798844 | # apis_v1/test_views_voter_email_address_save.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from django.urls import reverse
from django.test import TestCase
from email_outbound.models import EmailAddress, EmailManager
import json
class WeVoteAPIsV1TestsVoterEmailAddressRetrieve(TestCase):
databases = ["default", "readonly"]
def setUp(self):
self.generate_voter_device_id_url = reverse("apis_v1:deviceIdGenerateView")
self.voter_create_url = reverse("apis_v1:voterCreateView")
self.voter_email_address_save_url = reverse("apis_v1:voterEmailAddressSaveView")
self.voter_email_address_retrieve_url = reverse("apis_v1:voterEmailAddressRetrieveView")
def test_retrieve_with_no_voter_device_id(self):
response = self.client.get(self.voter_email_address_retrieve_url)
json_data = json.loads(response.content.decode())
self.assertEqual('status' in json_data, True, "status expected in the json response, and not found")
self.assertEqual(json_data['status'],
"VALID_VOTER_DEVICE_ID_MISSING",
"status = {status} Expected status VALID_VOTER_DEVICE_ID_MISSING"
"voter_device_id: {voter_device_id}".format(status=json_data['status'],
voter_device_id=json_data['voter_device_id']))
self.assertEqual(len(json_data["email_address_list"]), 0,
"Expected email_address_list to have length 0, "
"actual length = {length}".format(length=len(json_data['email_address_list'])))
def test_retrieve_with_voter_device_id(self):
response = self.client.get(self.generate_voter_device_id_url)
json_data = json.loads(response.content.decode())
voter_device_id = json_data['voter_device_id'] if 'voter_device_id' in json_data else ''
# Create a voter so we can test retrieve
response2 = self.client.get(self.voter_create_url, {'voter_device_id': voter_device_id})
json_data2 = json.loads(response2.content.decode())
self.assertEqual('status' in json_data2, True,
"status expected in the voterEmailAddressRetrieveView json response but not found")
self.assertEqual('voter_device_id' in json_data2, True,
"voter_device_id expected in the voterEmailAddressRetrieveView json response but not found")
| 2.34375 | 2 |
src/SpartanTicTacToe.py | SpartanEngineer/SpartanTicTacToe | 0 | 12798845 | <gh_stars>0
import copy, random, tkFont, time, webbrowser, os
from Tkinter import *
from functools import partial
from PIL import ImageTk
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
#Author: <NAME>
#Date: 01/29/17
#Description: This code uses machine learning to train a tic tac toe game AI.
# It also includes code to allow playing against the trained AI via a Tkinter
# GUI.
#-------------------------------------------------------------------------------------
#Run via Python 2.7
#REQUIRES: pillow (python imaging library)
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
#-------------------------------------------------------------------------------------
#the board is a list of size 9 that contains information about the current tic
# tac toe board
#the board maps as follows:
# 0 == top left space
# 1 == top center space
# 2 == top right space
# 3 == center left space
# 4 == center center space
# 5 == center right space
# 6 == bottom left space
# 7 == bottom center space
# 8 == bottom right space
#value mapping for a space in each board:
# 0 == empty space
# 1 == x space
# 2 == o space
rootDirectory = os.path.dirname(os.path.realpath(__file__))
print('----------------------------------------------------')
print('----------------------------------------------------')
print('https://www.spartanengineer.com')
print('----------------------------------------------------')
print('----------------------------------------------------')
learnConstant = 0.1 # learning constant
def getCheckLists(board):
b = []
b.append(board[0:3]) #horizontal
b.append(board[3:6])
b.append(board[6:9])
b.append([board[y] for y in range(9) if(y == 0 or y == 3 or y == 6)]) #vertical
b.append([board[y] for y in range(9) if(y == 1 or y == 4 or y == 7)])
b.append([board[y] for y in range(9) if(y == 2 or y == 5 or y == 8)])
b.append([board[y] for y in range(9) if(y == 0 or y == 4 or y == 8)]) #diagonal
b.append([board[y] for y in range(9) if(y == 2 or y == 4 or y == 6)])
return b
#true if there is three in a row, false if not
#x parameter should be 1(x) or 2(o)
def threeInARow(board, x, a=[]):
if(a == []):
a = getCheckLists(board)
for c in a:
if(c.count(x) == 3):
return True
return False
#returns the # of places where there is 2 x/o's & an empty space in a row/col/diagonal
def twoInARow(board, x, a=[]):
result = 0
if(a == []):
a = getCheckLists(board)
for c in a:
if(c.count(x) == 2 and c.count(0) == 1):
result += 1
return result
#returns the # of places where there is an x/o and 2 empty spaces in a row/col/diagonal
def openOne(board, x, a=[]):
result = 0
if(a == []):
a = getCheckLists(board)
for c in a:
if(c.count(x) == 1 and c.count(0) == 2):
result += 1
return result
#this function determines if the game is: ongoing, a draw, a win for x, a win for o
def getGameState(board):
checkLists = getCheckLists(board)
if(threeInARow(board, 1, checkLists)):
return 1 #x win
elif(threeInARow(board, 2, checkLists)):
return 2 #o win
elif(board.count(0) != 0):
return 3 #game still going
else:
return 0 #draw
conv = {True:1, False:0}
#returns a list of the features used to evaluate the value of a tic tac toeboard
#the features are as follows:
# is there 3 x's in a row (0 or 1)
# is there 3 o's in a row (0 or 1)
# # of places where there is 2 x's beside an empty space
# # of places where there is 2 o's beside an empty space
# # of places where there is an x and two empty spaces in a row/col/diagonal
# # of places where there is an o and two empty spaces in a row/col/diagonal
def getFeatures(board):
a = board
checkLists = getCheckLists(a)
return [conv[threeInARow(a, 1, checkLists)],
conv[threeInARow(a, 2, checkLists)],
twoInARow(a, 1, checkLists),
twoInARow(a, 2, checkLists),
openOne(a, 1, checkLists),
openOne(a, 2, checkLists)]
#returns the value of a board based on the features (of the board) and their respective weights
def estimateMoveValue(features, weights):
result = 0
for i in range(len(features)):
result += (features[i] * weights[i])
return result
#makes the best possible amongst all of the possible moves
def makeBestMove(board, weights, x):
boards, values = [], []
positions = [i for i in range(9) if(board[i] == 0)]
for i in range(len(positions)):
position = positions[i]
newBoard = copy.deepcopy(board)
newBoard[position] = x
features = getFeatures(newBoard)
value = estimateMoveValue(features, weights)
boards.append(newBoard)
values.append(value)
mValue = values[0]
mPosition = positions[0]
for i in range(1, len(positions)):
if(values[i] > mValue):
mValue = values[i]
mPosition = positions[i]
board[mPosition] = x
#makes a random move
def makeRandomMove(board, x):
a = [i for i in range(9) if(board[i] == 0)]
randomNum = random.randint(0, len(a)-1)
board[a[randomNum]] = x
#plays a tic-tac-toe game between the X and O AI
#we pit AI's against each other in order to train our AI's
def playGame(xWeights, oWeights, xTrain, oTrain):
turn = 1
board = [0 for x in range(9)]
gameState = 3
while(gameState == 3):
if(turn == 1):
makeBestMove(board, xWeights, turn)
xTrain.append(copy.deepcopy(board))
else:
makeBestMove(board, oWeights, turn)
oTrain.append(copy.deepcopy(board))
if(turn == 1):
turn = 2
else:
turn = 1
gameState = getGameState(board)
return gameState
#update our weights based upon the training data from the last played game
#the weights are updated by comparing the estimated move value with the actual move value
#values of 0, 100, & -100 are used for a draw, win, and loss
def updateWeights(weights, train, result, x):
values = [0 for i in range(len(train))]
if(result == 0):
values[len(values)-1] = 0
elif(result == x):
values[len(values)-1] = 100
else:
values[len(values)-1] = -100
for i in range(len(values)-1):
values[i] = estimateMoveValue(getFeatures(train[i+1]), weights)
for i in range(len(values)):
board = train[i]
features = getFeatures(board)
value = values[i]
estimate = estimateMoveValue(features, weights)
#update our weights
for j in range(len(weights)):
weights[j] = weights[j] +(learnConstant*(value-estimate)*features[j])
#initialize our weights, the value of 0.5 is arbitrarily picked
initialWeight = 0.5
n_features = 6
oWeights = [initialWeight for i in range(n_features)]
xWeights = [initialWeight for i in range(n_features)]
trainingIterations = 10000
print("training our tic tac toe AI for %d games (this may take a minute or two...)" % trainingIterations)
for i in range(trainingIterations):
xTrain, oTrain = [], []
result = playGame(xWeights, oWeights, xTrain, oTrain)
updateWeights(xWeights, xTrain, result, 1)
updateWeights(oWeights, oTrain, result, 2)
print("finished training our tic tac toe AI!!!")
print("final weights: ")
print(xWeights)
print(oWeights)
print("launching the GUI, this allows the user to play against the AI")
#----------------------------------------------------
#------------------GUI Code--------------------------
#----------------------------------------------------
#determines if the game is still ongoing an updates the label correctly
def determineWinner():
state = getGameState(theBoard)
if(state == 0):
winnerLabel['text'] = 'draw'
elif(state == playerSide):
winnerLabel['text'] = 'player wins'
elif(state == computerSide):
winnerLabel['text'] = 'computer wins'
if(state != 3):
for i in range(9):
buttons[i]['state'] = 'disabled'
return state
#update our tic tac toe board's graphical display
def updateButtons():
for i in range(9):
if(theBoard[i] != 0):
b = buttons[i]
if(theBoard[i] == 1):
b['text'] = 'X'
else:
b['text'] = 'O'
b['state'] = 'disabled'
#makes a computer move
def makeMove():
if(computerSide == 1):
makeBestMove(theBoard, xWeights, 1)
else:
makeBestMove(theBoard, oWeights, 2)
updateButtons()
winnerLabel['text'] = 'player turn'
determineWinner()
#this function is called when on the board buttons are clicked
def buttonClick(n):
theBoard[n] = playerSide
updateButtons()
winnerLabel['text'] = 'computer turn'
state = determineWinner()
if(state == 3):
makeMove()
#this function starts a new tic tac toe game vs the AI
def newGameClick():
global playerSide, computerSide, theBoard
playerSide = playAsWhich.get()
if(playerSide == 1):
computerSide = 2
else:
computerSide = 1
winnerLabel['text'] = 'game started'
theBoard = [0 for x in range(9)]
for b in buttons:
b['text'] = '-'
b['state'] = 'normal'
if(computerSide == 1):
makeMove()
#this function loads up the spartan engineer website in a browser
def openWebsite(event):
webbrowser.open_new('http://www.spartanengineer.com')
#the following code sets up the Tkinter GUI for playing against the AI
root = Tk()
Grid.rowconfigure(root, 0, weight=1)
Grid.columnconfigure(root, 0, weight=1)
root.minsize(width=700, height=700)
root.wm_title("SpartanTicTacToe")
spartanImage = ImageTk.PhotoImage(file=rootDirectory + '/resources/spartan-icon-small.png')
root.call('wm', 'iconphoto', root._w, spartanImage)
frame = Frame(root)
frame.grid(row=0, column=0, sticky=N+S+E+W)
buttonFont = tkFont.Font(family='Helvetica', size=72, weight='bold')
buttons = []
i = 0
for r in range(3):
for c in range(3):
button = Button(frame, text="-", command=partial(buttonClick, i))
button.grid(row=r, column=c, sticky=N+S+E+W)
button['font'] = buttonFont
button['state'] = 'disabled'
buttons.append(button)
i += 1
newGameButton = Button(frame, command=newGameClick, text="New Game?")
newGameButton.grid(row=3, column=0, sticky=N+S+E+W)
playAsWhich = IntVar()
radioFrame = Frame(frame)
radioFrame.grid(row=3, column=1, sticky=N+S+E+W)
Grid.rowconfigure(radioFrame, 0, weight=1)
Grid.columnconfigure(radioFrame, 0, weight=1)
Grid.columnconfigure(radioFrame, 1, weight=1)
r1 = Radiobutton(radioFrame, text="X?", variable=playAsWhich, value=1)
r1.grid(row=0, column=0, sticky=N+S+E+W)
r1.invoke()
r2 = Radiobutton(radioFrame, text="O?", variable=playAsWhich, value=2)
r2.grid(row=0, column=1, sticky=N+S+E+W)
winnerLabel = Label(frame, text="new game")
winnerLabel.grid(row=3, column=2, sticky=N+S+W+E)
spartanFrame = Frame(frame)
spartanFrame.grid(columnspan=3, row=4, column=0)
spartanLabel = Label(spartanFrame, image=spartanImage, cursor='hand2')
spartanLabel.pack()
spartanTextLink = Label(spartanFrame, text='www.spartanengineer.com', fg='blue',
cursor='hand2')
spartanTextLink.bind("<Button-1>", openWebsite)
spartanLabel.bind("<Button-1>", openWebsite)
spartanTextLink.pack()
for r in range(5):
Grid.rowconfigure(frame, r, weight=1)
for c in range(3):
Grid.columnconfigure(frame, c, weight=1)
root.mainloop()
| 2.421875 | 2 |
AC_tools/obsolete/misc_REDUNDANT.py | tsherwen/AC_tools | 7 | 12798846 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Redundant misc. functions to be eventually removed from AC_tools.
"""
import os
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from pandas import DataFrame
# time
import time
import datetime as datetime
# math
from math import radians, sin, cos, asin, sqrt, pi, atan2
def get_arr_edge_indices(arr, res='4x5', extra_points_point_on_edge=None,
verbose=True, debug=False):
"""
Find indices in a lon, lat (2D) grid, where value does not equal a given
value ( e.g. the edge )
"""
if verbose:
print(('get_arr_edge_indices for arr of shape: ', arr.shape))
# initialise variables
lon_c, lat_c, NIU = get_latlonalt4res(res=res, centre=True)
lon_e, lat_e, NIU = get_latlonalt4res(res=res, centre=False)
lon_diff = lon_e[-5]-lon_e[-6]
lat_diff = lat_e[-5]-lat_e[-6]
nn, n, = 0, 0
last_lat_box = arr[nn, n]
coords = []
last_lon_box = arr[nn, n]
need_lon_outer_edge, need_lat_outer_edge = False, False
if debug:
print((lon_e, lat_e))
# ---- Loop X dimension ( lon )
for nn, lon_ in enumerate(lon_c):
# Loop Y dimension ( lat ) and store edges
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
if arr[nn, n] != last_lat_box:
# If 1st lat, selct bottom of box
point_lon = lon_e[nn]+lon_diff/2
if need_lat_outer_edge:
point_lat = lat_e[n+1]
else:
point_lat = lat_e[n]
need_lat_outer_edge = True
need_lat_outer_edge = False
# Add mid point to cordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[lon_e[nn]+(lon_diff*i), point_lat] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lat_box = arr[nn, n]
# ---- Loop Y dimension ( lat )
for n, lat_ in enumerate(lat_c):
if debug:
print((arr[nn, n], last_lat_box, last_lon_box,
arr[nn, n] == last_lat_box, arr[nn, n] == last_lon_box))
# Loop X dimension ( lon ) and store edges
for nn, lon_ in enumerate(lon_c):
# If change in value at to list
if arr[nn, n] != last_lon_box:
point_lat = lat_e[n]+lat_diff/2
# Make sure we select the edge lon
if need_lon_outer_edge:
point_lon = lon_e[nn+1]
else:
point_lon = lon_e[nn]
need_lon_outer_edge = True
need_lon_outer_edge = False
# Add mid point to coordinates list
if isinstance(extra_points_point_on_edge, type(None)):
mid_point = [point_lon, point_lat]
coords += [mid_point]
# Add given number of points along edge
else:
coords += [[point_lon, lat_e[n]+(lat_diff*i)] for i in
np.linspace(0, 1, extra_points_point_on_edge,
endpoint=True)]
# temporally save the previous box's value
last_lon_box = arr[nn, n]
return coords
def split_data_by_days(data=None, dates=None, day_list=None,
verbose=False, debug=False):
"""
Takes a list of datetimes and data and returns a list of data and
the bins ( days )
"""
if verbose:
print('split_data_by_days called')
# Create DataFrame of Data and dates
df = DataFrame(data, index=dates, columns=['data'])
# Add list of dates ( just year, month, day ) <= this is mappable, update?
df['days'] = [datetime.datetime(*i.timetuple()[:3]) for i in dates]
if debug:
print(df)
# Get list of unique days
if isinstance(day_list, type(None)):
day_list = sorted(set(df['days'].values))
# Loop unique days and select data on these days
data4days = []
for day in day_list:
print((day, df[df['days'] == day]))
data4days += [df['data'][df['days'] == day]]
# Just return the values ( i.e. not pandas array )
data4days = [i.values.astype(float) for i in data4days]
print([type(i) for i in data4days])
# print data4days[0]
# sys.exit()
if debug:
print(('returning data for {} days, with lengths: '.format(
len(day_list)), [len(i) for i in data4days]))
# Return as list of days (datetimes) + list of data for each day
return data4days, day_list
def obs2grid(glon=None, glat=None, galt=None, nest='high res global',
sites=None, debug=False):
"""
values that have a given lat, lon and alt
Notes
-------
- Function flagged for removal
"""
if isinstance(glon, type(None)):
glon, glat, galt = get_latlonalt4res(nest=nest, centre=False,
debug=debug)
# Assume use of known CAST sites... unless others given.
if isinstance(sites, type(None)):
loc_dict = get_loc(rtn_dict=True)
sites = list(loc_dict.keys())
# Pull out site location indicies
indices_list = []
for site in sites:
lon, lat, alt = loc_dict[site]
vars = get_xy(lon, lat, glon, glat)
indices_list += [vars]
return indices_list
| 2.671875 | 3 |
douban/test.py | mcxiaoke/python-labs | 7 | 12798847 | <gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2018-01-27 09:47:26
from __future__ import print_function, unicode_literals, absolute_import
import requests
import json
import os
import sys
import hashlib
import time
import argparse
import logging
from lxml import etree, html
sys.path.insert(1, os.path.dirname(
os.path.dirname(os.path.realpath(__file__))))
from lib import commons
from lib.utils import distinct_list, read_file
def parse_doulist():
url = 'https://www.douban.com/doulist/39822487/?sort=time&sub_type=12'
root = etree.HTML(commons.get(url).text)
links = root.xpath('//a[contains(@href,"/photos/album/")]')
return distinct_list([l.attrib['href'] for l in links])
def parse_douban_captcha():
text = read_file(sys.argv[1])
root = etree.HTML(text)
captcha_image = root.xpath('//img[@id="captcha_image"]/@src')
captcha_id = root.xpath('//input[@name="captcha-id"]/@value')
if captcha_image and captcha_id:
print(captcha_image[0])
print(captcha_id[0])
if __name__ == '__main__':
parse_douban_captcha() | 2.625 | 3 |
wsma/base.py | aradford123/wsma_python | 4 | 12798848 | <reponame>aradford123/wsma_python
# -*- coding: utf-8 -*-
"""
This defines the base class for the WSMA Python module.
"""
from abc import ABCMeta, abstractmethod
from jinja2 import Template
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import xmltodict
import json
import time
import logging
class _Schema(object):
def __init__(self):
"""
:rtype : basestring
"""
self.begin_schema = """<?xml version="1.0" encoding="UTF-8"?>
<SOAP:Envelope xmlns:SOAP="http://schemas.xmlsoap.org/soap/envelope/"
xmlns:SOAP-ENC="http://schemas.xmlsoap.org/soap/encoding/"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<SOAP:Header>
<wsse:Security xmlns:wsse="http://schemas.xmlsoap.org/ws/2002/04/secext" SOAP:mustUnderstand="false">
<wsse:UsernameToken>
<wsse:Username>{{Username}}</wsse:Username>
<wsse:Password>{{Password}}</wsse:Password>
</wsse:UsernameToken>
</wsse:Security>
</SOAP:Header>
<SOAP:Body> """
self.end_schema = """ </request>
</SOAP:Body>
</SOAP:Envelope>"""
class _ExecTemplate(_Schema):
def __init__(self):
"""
:type self: object
"""
_Schema.__init__(self)
self.body = """<request xmlns="urn:cisco:wsma-exec"
correlator="{{CORRELATOR}}">
<execCLI maxWait="PT{{TIMEOUT}}S" xsd="false" {{FORMAT}}>
<cmd>{{EXEC_CMD}}</cmd>
</execCLI> """
self.template = Template("{0}{1}{2}".
format(self.begin_schema,
self.body,
self.end_schema))
class _ConfigTemplate(_Schema):
def __init__(self):
_Schema.__init__(self)
self.body = """<request xmlns="urn:cisco:wsma-config"
correlator="{{CORRELATOR}}">
<configApply details="all" {{ACTION_ON_FAIL}}>
<config-data>
<cli-config-data-block>{{CONFIG_CMD}}</cli-config-data-block>
</config-data>
</configApply>"""
self.template = Template("{0}{1}{2}".
format(self.begin_schema,
self.body,
self.end_schema))
class _ConfigPersistTemplate(_Schema):
def __init__(self):
"""
:type self: object
"""
_Schema.__init__(self)
self.body = """<request xmlns="urn:cisco:wsma-config"
correlator="{{CORRELATOR}}">
<configPersist>
</configPersist>"""
self.template = Template("{0}{1}{2}".
format(self.begin_schema,
self.body,
self.end_schema))
class Base(object):
'''The base class for all WSMA transports.
Provides the groundwork for specified transports.
WSMA defines the following transports:
- SSH
- HTTP / HTTPS
- TLS
this is the WSMA :class:`Base <Base>` class
:param host: hostname of the WSMA server
:param username: username to use
:param password: <PASSWORD>
:param port: port to connect to
:param timeout: timeout for transport
'''
__metaclass__ = ABCMeta
def __init__(self, host, username, password, port, timeout=60):
super(Base, self).__init__()
if not host:
raise ValueError("host argument may not be empty")
self.timeout = timeout
self.host = host
self.username = username
self.password = password
self.port = port
self.success = False
self.output = ''
self.data = None
# session holds the transport session
self._session = None
# count is used for the correlator over
# the existence of the session
self._count = 0
def __enter__(self):
logging.debug('WITH/AS connect session')
self.connect()
return self if self._ping() else None
def __exit__(self, exc_type, exc_val, exc_tb):
logging.debug('WITH/AS disconnect session')
self.disconnect()
def _ping(self):
'''Test the connection, does it make sense to continue?
it is assumed that
1) wsma is configured on the device (how could we connect otherwise?)
2) this is a priv-lvl-1 command
3) this command is platform independent for platforms supporting WSMA
an alternative would be "show version"
:rtype: bool
'''
return self.execCLI("show wsma id")
def _buildCorrelator(self, command):
'''Build a correlator for each command. Consists of
- command to be sent -and-
- timestamp
:param command: used to make a unique string to return as a correlator
:rtype: str
'''
result = time.strftime("%H%M%S")
result += "-%s" % str(self._count)
result += ''.join(command.split())
self._count += 1
return result
def _process(self, xml_data):
'''Process the given data dict and populate instance vars:
- success: was the call successful (bool)
- output: holds CLI output (e.g. for show commands), (string)
if the call wass successful.
it holds the error message (typos, config and exec)
if not successful
- xml_data: holds the XML data received from the device
:param data: dictionary with response data
:rtype: bool
'''
self.data = self.parseXML(xml_data)
# did the parsing yield an error?
if self.data.get('error') is not None:
return False
logging.info("JSON data: %s", json.dumps(self.data, indent=4))
# was it successful?
try:
self.success = bool(int(self.data['response']['@success']))
except KeyError:
self.output = 'unknown error / key error'
return False
# exec mode?
if self.data['response']['@xmlns'] == "urn:cisco:wsma-exec":
if self.success:
try:
t = self.data['response']['execLog'][
'dialogueLog']['received']['text']
except KeyError:
t = None
t = '' if t is None else t
self.output = t
return True
if not self.success:
e = self.data['response']['execLog'][
'errorInfo']['errorMessage']
self.output = e
return False
# config mode?
if self.data['response']['@xmlns'] == "urn:cisco:wsma-config":
if self.success:
t = 'config mode / not applicable'
self.output = t
return True
if not self.success:
re = self.data['response']['resultEntry']
# multi line config input returns list
if type(re) is list:
results = re
else:
results = list()
results.append(re)
# look for first failed element
for line in results:
if line.get('failure'):
self.output = line.get('text')
break
return False
# catch all
return False
@abstractmethod
def communicate(self, template_data):
'''Needs to be overwritten in subclass, it should process
provided template_data by sending it using the selected
transport. Essentially:
return self._process(send(data))
Assuming that send(template_data) returns XML from the device.
:param template_data: XML string to be sent in transaction
:rtype: bool
'''
self.success= True
self.output = ''
self.data = None
# make sure we have a session
if self._session == None:
self.output = 'no established session!'
self.success= False
return self.success
@abstractmethod
def connect(self):
'''Connects to the WSMA host via a specific transport.
The specific implementation has to be provided by
the subclass. tls, ssh and http(s) are usable in IOS.
'''
logging.info("connect to {} as {}/{}".format(self.url,
self.username,
self.password))
@abstractmethod
def disconnect(self):
'''Disconnects the transport
'''
logging.info("disconnect from {}".format(self.url))
self._session = None
@property
def odmFormatResult(self):
'''When using format specifications (e.g. structured data
instead of unstructured CLI output) then this property
holds the structured data as an object.
:rtype dict:
'''
try:
return self.data['response']['execLog']['dialogueLog']['received']['tree']
except KeyError:
return None
@property
def hasSession(self):
'''checks whether we have a valid session or not.
:rtype bool:
'''
return self._session is not None and self._ping()
def execCLI(self, command, format_spec=None):
'''Run given command in exec mode, return JSON response. The
On success, self.output and self.success will be updated.
If format_spec is given (and valid), odmFormatResult will
contain the dictionary with the result data.
:param command: command string to be run in exec mode on device
:param format_spec: if there is a ODM spec file for the command
:rtype: bool
'''
correlator = self._buildCorrelator("exec" + command)
if format_spec is not None:
format_text = 'format="%s"' % format_spec
else:
format_text = ""
etmplate = _ExecTemplate()
template_data = etmplate.template.render(EXEC_CMD=command,
TIMEOUT=self.timeout,
CORRELATOR=correlator,
FORMAT=format_text,
Username=self.username,
Password=<PASSWORD>)
logging.debug("Template {}".format(template_data))
return self.communicate(template_data)
def config(self, command, action_on_fail="stop"):
'''Execute given commands in configuration mode.
On success, self.output and self.success will be updated.
:param command: config block to be applied to the device
:param action_on_fail, can be "stop", "continue", "rollback"
:rtype: bool
'''
correlator = self._buildCorrelator("config")
fail_str = 'action-on-fail="%s"' % action_on_fail
self._count += 1
etmplate = _ConfigTemplate()
template_data = etmplate.template.render(CONFIG_CMD=command,
CORRELATOR=correlator,
ACTION_ON_FAIL=fail_str,
Username=self.username,
Password=self.password)
logging.debug("Template {0:s}".format(template_data))
return self.communicate(template_data)
def configPersist(self):
'''Makes configuration changes persistent.
:rtype: bool
'''
correlator = self._buildCorrelator("config-persist")
etmplate = _ConfigPersistTemplate()
template_data = etmplate.template.render(CORRELATOR=correlator,
Username=self.username,
Password=self.password)
logging.debug("Template {0:s}".format(template_data))
return self.communicate(template_data)
@staticmethod
def parseXML(xml_text):
'''Parses given XML string and returns the 'response' child within the
XML tree. If no response is found, the SOAP 'Envelope' is returned.
If an empty string is used or an error occurs during parsing then
dict(error='some error string') is returned.
This still assumes that IF an XML string is passed into
this function then it should have a valid SOAP Envelope.
:param xml_text: XML string to be converted
:rtype: dict
'''
if xml_text is None:
return dict(error='XML body is empty')
"""
from lxml import etree
etree.register_namespace("SOAP", "http://schemas.xmlsoap.org/soap/envelope/")
element = etree.fromstring(xml_text.encode('utf-8'))
print('#' * 40)
print(etree.tostring(element, pretty_print=True).decode('utf-8'))
print(json.dumps(xmltodict.parse(xml_text), indent=4))
"""
logging.debug("XML string: {}".format(xml_text))
try:
dom = parseString(xml_text)
except ExpatError as e:
return dict(error='%s' % e)
logging.debug("XML tree:{}".format(dom.childNodes[-1].toprettyxml()))
response = dom.getElementsByTagName('response')
if len(response) > 0:
return xmltodict.parse(response[0].toxml())
return xmltodict.parse(
dom.getElementsByTagNameNS(
"http://schemas.xmlsoap.org/soap/envelope/",
"Envelope")[0].toxml())
| 2.359375 | 2 |
postprocessing.py | loftwah/chatscript_generator | 1 | 12798849 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Module with functions used in post processing phase"""
import os
import glob
import re
def save_topic_file(topic, botdir):
"""This method saves a topic in a file and returns its name.
Args:
topic (str): Topic content.
botdir (str): Path to dir where to create topic file.
"""
top_name = '{}.top'.format(topic.name)
filename = os.path.join(botdir, top_name)
with open(filename, 'w') as arq:
arq.write(topic.__str__())
def save_control_knowledge_files(map_values, botdir):
"""Save CS control and knowledges files.
Args:
map_values (dict): Keys are values in template files and values
are the actual field values in generated files.
botdir (str): Path to dir where to create files.
"""
dirname = os.path.dirname(os.path.abspath(__file__))
templates_dir = os.path.join(dirname, 'templates')
templates_filenames = glob.glob('{}*'.format(templates_dir+os.sep))
for temp_path in templates_filenames:
temp_name = temp_path.split(os.sep)[-1]
result_path = os.path.join(botdir, temp_name)
with open(temp_path, 'r') as template, open(result_path, 'w') as result:
content = template.read()
for temp_value, actual_value in map_values.items():
content = content.replace(temp_value, actual_value)
result.write(content)
def save_chatbot_files(botname, topics, cs_path='../ChatScript'):
"""This method creates the chatbot files in the passed path. If
any of necessary directories do not exist it will be created.
Args:
botname (str): Name of the chatbot.
topics (list): List of chatbot's topics content.
cs_path (str): Path of ChatScript base directory.
"""
rawdata = os.path.join(cs_path, 'RAWDATA')
botname_formal = '_'.join(botname.lower().split())
botdir = os.path.join(rawdata, botname_formal)
if not os.path.isdir(botdir):
os.mkdir(botdir)
descriptions = list()
for top in topics:
save_topic_file(top, botdir)
try:
if top.beauty_name:
descriptions.append(top.beauty_name.lower())
except AttributeError:
continue
map_values = {
'BOTNAME': botname.capitalize(),
'FALAR_SOBRE': ', '.join(descriptions)
}
save_control_knowledge_files(map_values, botdir)
botfiles_content = os.path.join(*botdir.split(os.sep)[-2:]) + os.sep
botfiles = os.path.join(rawdata, 'files{}.txt'.format(botname_formal))
with open(botfiles, 'w') as arq:
arq.write(botfiles_content)
| 3.03125 | 3 |
nanome/_internal/_structure/_serialization/_residue_serializer.py | rramji/nanome-lib | 0 | 12798850 | from nanome._internal._util._serializers import _ArraySerializer, _StringSerializer, _ColorSerializer
from . import _AtomSerializerID
from . import _BondSerializer
from .. import _Residue
from nanome.util import Logs
from nanome._internal._util._serializers import _TypeSerializer
class _ResidueSerializer(_TypeSerializer):
def __init__(self, shallow = False):
self.shallow = shallow
self.array = _ArraySerializer()
self.atom = _AtomSerializerID()
self.bond = _BondSerializer()
self.color = _ColorSerializer()
self.string = _StringSerializer()
def version(self):
#Version 0 corresponds to Nanome release 1.10
return 1
def name(self):
return "Residue"
def serialize(self, version, value, context):
context.write_long(value._index)
self.array.set_type(self.atom)
if (self.shallow):
context.write_using_serializer(self.array, [])
else:
context.write_using_serializer(self.array, value._atoms)
self.array.set_type(self.bond)
if (self.shallow):
context.write_using_serializer(self.array, [])
else:
context.write_using_serializer(self.array, value._bonds)
context.write_bool(value._ribboned)
context.write_float(value._ribbon_size)
context.write_int(value._ribbon_mode)
context.write_using_serializer(self.color, value._ribbon_color)
if (version > 0):
context.write_bool(value._labeled)
context.write_using_serializer(self.string, value._label_text)
context.write_using_serializer(self.string, value._type)
context.write_int(value._serial)
context.write_using_serializer(self.string, value._name)
context.write_int(value._secondary_structure.value)
def deserialize(self, version, context):
residue = _Residue._create()
residue._index = context.read_long()
self.array.set_type(self.atom)
residue._set_atoms(context.read_using_serializer(self.array))
self.array.set_type(self.bond)
residue._set_bonds(context.read_using_serializer(self.array))
residue._ribboned = context.read_bool()
residue._ribbon_size = context.read_float()
residue._ribbon_mode = _Residue.RibbonMode.safe_cast(context.read_int())
residue._ribbon_color = context.read_using_serializer(self.color)
if (version > 0):
residue._labeled = context.read_bool()
residue._label_text = context.read_using_serializer(self.string)
residue._type = context.read_using_serializer(self.string)
residue._serial = context.read_int()
residue._name = context.read_using_serializer(self.string)
residue._secondary_structure = _Residue.SecondaryStructure.safe_cast(context.read_int())
return residue | 1.875 | 2 |
Subsets and Splits