id
stringlengths 1
7
| text
stringlengths 6
1.03M
| dataset_id
stringclasses 1
value |
---|---|---|
1771801
|
import configparser
import psycopg2
import boto3
from sql_queries import copy_table_queries, insert_table_queries, counting_queries
def load_staging_tables(cur, conn):
""" Copy data from S3 into staging tables.
"""
for query in copy_table_queries:
try:
print("Loading staging table.")
cur.execute(query)
print("Successfully loaded staging table.")
except psycopg2.Error as e:
print("Error loading staging tables.")
print(e)
def insert_tables(cur, conn):
""" Insert data from staging tables into analytic tables (star schema).
"""
for query in insert_table_queries:
try:
cur.execute(query)
print("Inserted data to table.")
except psycopg2.Error as e:
print("Error inserting into tables.")
print(e)
def count_queries(cur, conn):
""" Function to get the row counts in the specified tables for data
integrity
"""
for query in counting_queries:
try:
print("Rows in table: ")
cur.execute(query)
row = cur.fetchone()
if row is not None:
print(row)
except psycopg2.Error as e:
print("Error inserting into tables.")
print(e)
def main():
""" Main function to load staging tables from S3 buckets, insert the staged data
into analytic tables, and count the rows for data integrity.
"""
# Get config data
config = configparser.ConfigParser()
config.read('../config/dwh.cfg')
try:
# Connect to dwh
print("Create dwh connection...")
conn = psycopg2.connect("host={} dbname={} user={} password={} port={}".format(
*config['CLUSTER'].values()))
conn.set_session(autocommit=True)
cur = conn.cursor()
print("Redshift connection completed")
except psycopg2.Error as e:
print("Error connecting and/or creating a cursor with db. ")
print(e)
# Load data from S3 into staging, insert the data into analytic tables,
# and count the rows in the tables.
load_staging_tables(cur, conn)
insert_tables(cur, conn)
count_queries(cur, conn)
# Close connection to dwh
try:
conn.close()
print("Connection closed. ")
except psycopg2.Error as e:
print("Error closing connection to dwh. ")
print(e)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3343405
|
<filename>decode.py
#!/usr/bin/env python3
def decode(data):
# convert 36 bits to long integer
value=int(data,2)
# remove 12 least significant bits by shifting >> 12
value=value >> 12
# remove preambule - only 12 least significant bits are important
value=value & 0xfff
# lets check do we have negative temperature by comparing bits 10-12
# 0xf00 is 111100000000. We are checking value of 3 most significant bits
# if set to 111 - negative temp. If set 000 - positive
if (value & 0xe00) == 0xe00:
# Negative algorythm
return ((value & 0xff) - 256) / 10.0
else:
# Positive temp algorythm
return (value & 0x1ff)/10.0
DATA="101001101000111101110011111100000000"
VALUE=-14.1
if VALUE == decode(DATA):
print("Algorytm poprawny:\n{} ->{}".format(DATA, VALUE))
else:
print("Próbuj dalej ..")
|
StarcoderdataPython
|
1715869
|
<reponame>ChristianSi/phoneng<filename>setup.py<gh_stars>1-10
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='lytspel',
version='2.0.7',
author='<NAME>',
author_email='<EMAIL>',
description='A Simple Phonetic Respelling for the English Language',
entry_points = {
'console_scripts': ['lytspel=lytspel:main'],
},
long_description=long_description,
long_description_content_type='text/markdown',
license='ISC',
url='https://www.lytspel.org/',
packages=setuptools.find_packages(),
install_requires=[
'lxml >= 4.0.0',
'setuptools >= 34.3.3',
'spacy >= 2.0.0',
],
# Keep minimum version listed here in sync with the one in INSTALL-USE.md
python_requires='>=3.5',
package_data={'lytspel': ['lytspel-dict.csv']},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: ISC License (ISCL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Communications',
'Topic :: Text Processing :: Linguistic',
],
keywords='english spelling reform lytspel',
project_urls={
'Source': 'https://github.com/ChristianSi/lytspel',
'Tracker': 'https://github.com/ChristianSi/lytspel/issues',
},
)
|
StarcoderdataPython
|
3289303
|
<gh_stars>0
import pytest
import falcon
from falcon import MEDIA_TEXT
def test_response_set_content_type_set():
resp = falcon.Response()
resp._set_media_type(MEDIA_TEXT)
assert resp._headers['content-type'] == MEDIA_TEXT
def test_response_set_content_type_not_set():
resp = falcon.Response()
assert 'content-type' not in resp._headers
def test_response_get_headers():
resp = falcon.Response()
resp.append_header('x-things1', 'thing-1')
resp.append_header('x-things2', 'thing-2')
resp.append_header('X-Things3', 'Thing-3')
resp.set_cookie('Chocolate', 'Chip')
headers = resp.headers
assert headers['x-things1'] == 'thing-1'
assert headers['x-things2'] == 'thing-2'
assert headers['x-things3'] == 'Thing-3'
assert 'set-cookie' not in headers
def test_response_attempt_to_set_read_only_headers():
resp = falcon.Response()
resp.append_header('x-things1', 'thing-1')
resp.append_header('x-things2', 'thing-2')
resp.append_header('x-things3', 'thing-3a')
resp.append_header('X-Things3', 'thing-3b')
with pytest.raises(AttributeError):
resp.headers = {'x-things4': 'thing-4'}
headers = resp.headers
assert headers['x-things1'] == 'thing-1'
assert headers['x-things2'] == 'thing-2'
assert headers['x-things3'] == 'thing-3a, thing-3b'
|
StarcoderdataPython
|
25588
|
<filename>models.py
import ipdb
import math
import numpy as np
import tensorflow as tf
# N_DIM_STATE = 4
# N_DIM_ACTIONS = 2
N_DIM_STATE = 210*160
N_DIM_ACTIONS = 9
def batch_norm_init(inits, size, name):
return tf.Variable(inits * tf.ones([size]), name=name)
def weight_init(shape, name):
return tf.Variable(tf.random_normal(shape, stddev=math.sqrt(shape[0])), name=name)
def batch_normalization(batch, mean=None, var=None):
if mean is None or var is None:
mean, var = tf.nn.moments(batch, axes=[0])
return (batch - mean) / tf.sqrt(var + tf.constant(1e-9))
def update_batch_normalization(batch, l, bn_assigns, running_mean, running_var, ewma):
mean, var = tf.nn.moments(batch, axes=[0])
assign_mean = running_mean[l - 1].assign(mean)
assign_var = running_var[l - 1].assign(var)
bn_assigns.append(ewma.apply([running_mean[l - 1], running_var[l - 1]]))
with tf.control_dependencies([assign_mean, assign_var]):
return (batch - mean) / tf.sqrt(var + 1e-10)
def ddqn(s1, a1, r1, s2, discount, learning_rate, layers, q_values_fun_builder):
training = tf.placeholder(tf.bool)
n_data = tf.shape(s1)[0]
# DDQN - Find best value using the up to date Q function, but estimate it's value from our target Q function.
targets, _, bn_assigns, target_weights, _ = q_values_fun_builder(s2, training)
best_action = tf.argmax(targets, axis=1)
# Cases when the second action is picked
second_action_is_best = tf.cast(best_action, dtype=bool)
# DDQN Pick action with Q_1, score with Q_target
ddqn_target_scores, _, _, ddqn_target_weights, _ = q_values_fun_builder(s2, training)
target_scores = tf.where(
second_action_is_best,
discount*ddqn_target_scores[:, 1],
discount*ddqn_target_scores[:, 0])
# Remove future score prediction if end of episode
future_score = tf.where(
tf.equal(r1, -1*tf.ones(tf.shape(r1))),
tf.zeros(tf.shape(r1)),
tf.reshape(target_scores, [-1, 1]))
target_q_valuez = tf.concat([r1 + future_score for _ in range(N_DIM_ACTIONS)], 1)
all_ones = tf.concat([tf.ones([n_data, 1]) for _ in range(N_DIM_ACTIONS)], 1)
predicted_q_values, _, _, online_weights, _ = q_values_fun_builder(s1, training)
target_q_values = tf.where(
tf.equal(a1, all_ones),
target_q_valuez,
predicted_q_values)
best_action_picker, u_loss, bn_assigns, _, tf_debug_var = q_values_fun_builder(s1, training, online_weights)
u_loss = (u_loss * tf.constant(1/100))
supervised_loss = tf.reduce_mean(tf.square(tf.stop_gradient(target_q_values) - predicted_q_values))
loss = supervised_loss + u_loss
training_vars = []
for w_key, weights in online_weights.items():
training_vars = training_vars + weights
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = opt.minimize(loss, var_list=training_vars)
target_updaters = []
for w_key, weights in online_weights.items():
for w_index in range(len(weights)):
target_updaters.append(
tf.assign(target_weights[w_key][w_index],
online_weights[w_key][w_index]))
updaters = []
for w_key, weights in online_weights.items():
for w_index in range(len(weights)):
updaters.append(
tf.assign(ddqn_target_weights[w_key][w_index],
online_weights[w_key][w_index]))
def updater(sess):
for u in updaters:
sess.run(u)
# add the updates of batch normalization statistics to train_step
network_updates = tf.group(*(bn_assigns + target_updaters))
with tf.control_dependencies([train_op]):
train_op = tf.group(network_updates)
return loss, \
train_op, \
best_action_picker, \
updater, \
training, \
None
def ddqn_mlp(s1, a1, r1, s2, discount, learning_rate, layer_sizes):
n_data = tf.shape(s1)[0]
# Q-Values from a ladder network
def q_values(state1, training, weights=None):
L = len(layer_sizes) - 1 # number of layers
shapes = [s for s in zip(layer_sizes[:-1], layer_sizes[1:])] # shapes of linear layers
if weights is None:
weights = {
'Encoder_w': [weight_init(s, 'Encoder_w') for s in shapes], # Encoder weights
'beta': [batch_norm_init(0.0, layer_sizes[l+1], 'beta') for l in range(L)],
'gamma': [batch_norm_init(1.0, layer_sizes[l+1], 'gamma') for l in range(L)]
}
# Relative importance of each layer
running_mean = [tf.Variable(tf.constant(0.0, shape=[l]), name='running_mean', trainable=False)
for l in layer_sizes[1:]]
running_var = [tf.Variable(tf.constant(1.0, shape=[l]), name='running_var', trainable=False)
for l in layer_sizes[1:]]
ewma = tf.train.ExponentialMovingAverage(decay=0.99) # to calculate the moving averages of mean and variance
bn_assigns = [] # this list stores the updates to be made to average mean and variance
# to store the pre-activation, activation, mean and variance for each layer
d = {'z': {}, 'm': {}, 'v': {}, 'h': {}}
h = state1
d['z'][0] = h
for l in range(1, L + 1):
print("Layer ", l, ": ", layer_sizes[l - 1], " -> ", layer_sizes[l])
d['h'][l - 1] = h
z_pre = tf.matmul(h, weights['Encoder_w'][l - 1]) # pre-activation
m, v = tf.nn.moments(z_pre, axes=[0])
# if training:
def training_batch_norm():
return update_batch_normalization(z_pre, l, bn_assigns, running_mean, running_var, ewma)
# else:
def eval_batch_norm():
mean = ewma.average(running_mean[l - 1])
var = ewma.average(running_var[l - 1])
z = batch_normalization(z_pre, mean, var)
return z
z = tf.cond(training, training_batch_norm, eval_batch_norm)
if l == L:
h = tf.nn.softmax(weights['gamma'][l - 1] * (z + weights["beta"][l - 1]))
else:
h = tf.nn.relu(z + weights["beta"][l - 1])
d['z'][l] = z
d['m'][l], d['v'][l] = m, v
d['h'][l] = h
return h, tf.Variable(tf.constant(0.0)), bn_assigns, weights, None
return ddqn(s1, a1, r1, s2, discount, learning_rate, layer_sizes, q_values)
# https://github.com/rinuboney/ladder/blob/master/ladder.py
def ladder_mlp(s1, a1, r1, s2, discount, learning_rate, layer_sizes, denoising_cost):
# Q-Values from a ladder network
def q_values(state1, training, weights=None):
L = len(layer_sizes) - 1 # number of layers
shapes = [s for s in zip(layer_sizes[:-1], layer_sizes[1:])] # shapes of linear layers
if weights is None:
weights = {
'Encoder_w': [weight_init(s, 'Encoder_w') for s in shapes], # Encoder weights
'Decoder_w': [weight_init(s[::-1], 'Decoder_w') for s in shapes], # Decoder weights
'beta': [batch_norm_init(0.0, layer_sizes[l+1], 'beta') for l in range(L)],
'gamma': [batch_norm_init(1.0, layer_sizes[l+1], 'gamma') for l in range(L)]
}
# Relative importance of each layer
running_mean = [tf.Variable(tf.constant(0.0, shape=[l]), name='running_mean', trainable=False)
for l in layer_sizes[1:]]
running_var = [tf.Variable(tf.constant(1.0, shape=[l]), name='running_var', trainable=False)
for l in layer_sizes[1:]]
ewma = tf.train.ExponentialMovingAverage(decay=0.99) # to calculate the moving averages of mean and variance
bn_assigns = [] # this list stores the updates to be made to average mean and variance
def encoder(inputs, noise_std):
# add noise to input
h = inputs + tf.random_normal(tf.shape(inputs)) * noise_std
# to store the pre-activation, activation, mean and variance for each layer
d = {'z': {}, 'm': {}, 'v': {}, 'h': {}}
d['z'][0] = h
for l in range(1, L + 1):
print("Layer ", l, ": ", layer_sizes[l - 1], " -> ", layer_sizes[l])
d['h'][l - 1] = h
z_pre = tf.matmul(h, weights['Encoder_w'][l - 1]) # pre-activation
m, v = tf.nn.moments(z_pre, axes=[0])
# if training:
def training_batch_norm():
# Training batch normalization
# batch normalization for labeled and unlabeled examples is performed separately
if noise_std > 0:
# Corrupted encoder
# batch normalization + noise
z = batch_normalization(z_pre, m, v)
z += tf.random_normal(tf.shape(z_pre)) * noise_std
else:
# Clean encoder
# batch normalization + update the average mean and variance using batch
# mean and variance of labeled examples
z = update_batch_normalization(z_pre, l, bn_assigns, running_mean, running_var, ewma)
return z
# else:
def eval_batch_norm():
# Evaluation batch normalization
# obtain average mean and variance and use it to normalize the batch
mean = ewma.average(running_mean[l - 1])
var = ewma.average(running_var[l - 1])
z = batch_normalization(z_pre, mean, var)
return z
# perform batch normalization according to value of boolean "training" placeholder:
z = tf.cond(training, training_batch_norm, eval_batch_norm)
if l == L:
# use softmax activation in output layer
h = tf.nn.softmax(weights['gamma'][l - 1] * (z + weights["beta"][l - 1]))
else:
# use ReLU activation in hidden layers
h = tf.nn.relu(z + weights["beta"][l - 1])
d['z'][l] = z
d['m'][l], d['v'][l] = m, v # save mean and variance of unlabeled examples for decoding
d['h'][l] = h
return h, d
print("=== Corrupted Encoder ===")
y_c, corr = encoder(state1, 0.1)
print("=== Clean Encoder ===")
y, clean = encoder(state1, 0.0) # 0.0 -> do not add noise
print("=== Decoder ===")
def g_gauss(z_c, u, size):
wi = lambda inits, name: tf.Variable(inits * tf.ones([size]), name=name)
a1 = wi(0., 'a1')
a2 = wi(1., 'a2')
a3 = wi(0., 'a3')
a4 = wi(0., 'a4')
a5 = wi(0., 'a5')
a6 = wi(0., 'a6')
a7 = wi(1., 'a7')
a8 = wi(0., 'a8')
a9 = wi(0., 'a9')
a10 = wi(0., 'a10')
mu = a1 * tf.sigmoid(a2 * (u + tf.constant(1e-9)) + a3) + a4 * u + a5
v = a6 * tf.sigmoid(a7 * (u + tf.constant(1e-9)) + a8) + a9 * u + a10
z_est = (z_c - mu) * v + mu
return z_est
# Decoder
z_est = {}
d_cost = [] # to store the denoising cost of all layers
for l in range(L, -1, -1):
print("Layer ", l, ": ", layer_sizes[l+1] if l+1 < len(layer_sizes) else None,
" -> ", layer_sizes[l], ", denoising cost: ", denoising_cost[l])
z, z_c = clean['z'][l], corr['z'][l]
m = clean['m'].get(l, 0)
v = clean['v'].get(l, 1-1e-10) + tf.constant(1e-9)
if l == L:
u = y_c
else:
u = tf.matmul(z_est[l+1], weights['Decoder_w'][l])
u = batch_normalization(u)
z_est[l] = g_gauss(z_c, u, layer_sizes[l])
z_est_bn = (z_est[l] - m) / v
# append the cost of this layer to d_cost
d_cost.append((tf.reduce_mean(tf.reduce_sum(tf.square(z_est_bn - z), 1)) / layer_sizes[l]) * denoising_cost[l])
# calculate total unsupervised cost by adding the denoising cost of all layers
unsupervised_cost = tf.add_n(d_cost)
return y, unsupervised_cost, bn_assigns, weights, None
return ddqn(s1, a1, r1, s2, discount, learning_rate, layer_sizes, q_values)
|
StarcoderdataPython
|
1714373
|
# Generated by Django 3.2 on 2021-05-05 03:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library_api', '0007_auto_20210505_0316'),
]
operations = [
migrations.AlterField(
model_name='meminjam',
name='tanggal_peminjaman',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='meminjam',
name='tanggal_pengembalian',
field=models.DateTimeField(auto_now=True, null=True),
),
]
|
StarcoderdataPython
|
3261470
|
from flask_wtf import FlaskForm
from wtforms import (BooleanField, StringField, HiddenField, PasswordField,
validators, IntegerField, SubmitField)
from wtforms.fields.html5 import DateTimeField
from wtforms.widgets.html5 import DateTimeInput
from ..common.constants import MAX_DESCRIPTION_LEN, STRING_LEN
from ..common.helpers import get_current_time, get_nearest_time, get_nearest_time_plus
class NewEventForm(FlaskForm):
event_name = StringField('event_name', [validators.DataRequired(), validators.Length(min=1, max=STRING_LEN)])
date_fmt = '%Y-%m-%m %H:%M:%S'
start_dt = DateTimeField('start_dt', [validators.DataRequired()], default=get_nearest_time(), format=date_fmt)
end_dt = DateTimeField('end_dt', [validators.DataRequired()], default=get_nearest_time_plus(minutes=30),
format=date_fmt)
show_as = StringField('show_as', [validators.DataRequired(), validators.AnyOf(['Free', 'Busy'])])
description = StringField('description', [validators.Length(max=MAX_DESCRIPTION_LEN)], default=str())
private = BooleanField('private', default=True)
|
StarcoderdataPython
|
3385417
|
"""
The ``display`` taxon groups applets implementing display interfaces, that is, interfaces for
sending commands to a device that alters its transmittance and/or reflectance in response.
Although some devices may receive periodic commands that embed 2d arrays of samples, they are
still classified under the ``display`` taxon, unless that is the only possible mode of operation,
in which case the ``video`` taxon is appropriate.
Examples: HD44780 character LCD, SPI raster LCD, SPI LCD with integrated microcontroller that can
draw geometric primitives.
Counterexamples: RGB TFT LCD (use taxon ``video``).
"""
|
StarcoderdataPython
|
143572
|
<reponame>bychkovav/gradient_descent
import simpleGD as simple
from mpl_toolkits import mplot3d
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
# Gradient descent will be performed for Rosenbrock function: (a-x)**2+ b(y-x**2)**2
history = simple.process(100, 0.00001, -3, -3, 1, 100)
X = np.arange(-3, 3, 0.1)
Y = np.arange(-3, 3, 0.1)
xx, yy = np.meshgrid(X, Y)
ax = plt.axes(projection='3d')
surf = ax.plot_surface(xx, yy, simple.get_f(xx, yy, 1, 100), cmap=cm.coolwarm,
linewidth=3, antialiased=False)
ax.plot3D([w[0] for w in history], [w[1] for w in history], [w[2] for w in history], '-ok',color='red')
# Customize the z axis.
# ax.plot3D([w[0] for w in history], [w[1] for w in history], [w[2] for w in history])
plt.show()
# print(res)
|
StarcoderdataPython
|
94743
|
<filename>scrapy-template/spider/{{class_prefix}}.py
# -*- coding: utf-8 -*-
import scrapy
class {{class_prefix}}Spider(scrapy.Spider):
name = '{{spider_name}}'
allowed_domains = ['{{spider_name}}']
custom_settings = {
'CONCURRENT_REQUESTS': 2,
'DOWNLOAD_DELAY': 0.25
}
defaultHeaders = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
def start_requests(self):
urls = [
'https://xxxx.com',
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parseList, headers=self.defaultHeaders)
def parseList(self, response):
blocks = response.css('.pic-txt')
for b in blocks:
url = 'https:' + b.css('.tit a::attr(href)')[0].extract()
#yield {'url': url}
yield scrapy.Request(url=url, callback=self.parseArticle, headers=self.defaultHeaders)
def parseArticle(self, response):
yield {
'title': response.css('.artTit::text')[0].extract(),
'content': "".join(response.css('.artText *::text').extract()),
'tag': " ".join(response.css('.artLabel a::text').extract()),
}
|
StarcoderdataPython
|
3337261
|
<gh_stars>0
#! /usr/bin/env python3
if __name__=='__main__':
a = 3
if a == 0:
print("a==0")
elif a > 0:
print("a>0")
else:
print("a<0")
b = 2
print("b >= 0") if b >= 0 else print("b < 0")
if a > 0 and b > 0:
print("a > 0 and b > 0")
|
StarcoderdataPython
|
1754525
|
class NewsPaper:
def __init__(self, name):
self.name = name
class Book:
BOOK_TYPES = ("PAPERBACK", "HARDCOVER", "EBOOK")
def __init__(self, title, author, pages, price, booktype):
self.title = title
self.author = author
self.pages = pages
self.price = price
if (booktype not in Book.BOOK_TYPES):
raise ValueError(f"{booktype} is not a valid book type")
else:
self.booktype = booktype
self.__secret = 123 # this is hiding the attrbute
__booklist = None
@staticmethod
def getBookList():
if Book.__booklist == None:
Book.__booklist = []
return Book.__booklist
@classmethod
def getBookTypes(cls):
return cls.BOOK_TYPES
def setTitle(self, newTitle):
self.title = newTitle
def getPrice(self):
if hasattr(self, "_discount"):
return self.price - (self.price * self._discount)
else:
return self.price
def setDiscount(self, amount):
self._discount = amount
print(Book.getBookTypes())
b1 = Book("Hello New world", "<NAME>", 123, 345.50, "HARDCOVER")
b1 = Book("Hello", "<NAME>", 123, 345.50, "PAPERBOOK")
theBooks = Book.getBookList()
theBooks.append(b1)
# b1 = Book("Hello New world", "<NAME>", 123, 345.50)
# b2 = Book("I am Angad", "<NAME>", 342, 690.75)
# n1 = NewsPaper("The Hindu")
# print(b1.getPrice())
# print(b2.getPrice())
# print(b2.setDiscount(0.25))
# print(b2.getPrice())
# print(type(b1))
# print(type(n1))
# print(isinstance(b1, Book))
# print(isinstance(n1, Book))
#print(b2.__secret) # Will give error
#print(b2._Book__secret)
|
StarcoderdataPython
|
1763297
|
from data import BayStars, Dragons, Eagles, Hawks, Carp, Tigers, Marines, Buffaloes, Swallows, Giants, Lions
# Set alias of each team's name
Buffaloes_name=["オリックス","おりっくす","バファローズ","ばふぁろーず","オリックスバファローズ","おりっくすばふぁろーず"]
Swallows_name=["ヤクルト","やくると","スワローズ","すわろーず","ヤクルトスワローズ","やくるとすわろーず","東京ヤクルトスワローズ","とうきょうやくるとすわろーず"]
Tigers_name=["阪神","はんしん","タイガース","たいがーす","阪神タイガース","はんしんたいがーす"]
Marines_name=["ロッテ","ろって","マリーンズ","まりーんず","千葉ロッテマリーンズ","ちばろってまりーんず"]
Giants_name=["巨人","きょじん","ジャイアンツ","じゃいあんつ","読売ジャイアンツ","よみうりじゃいあんつ"]
Eagles_name=["楽天","らくてん","イーグルス","いーぐるす","東北楽天ゴールデンイーグルス","とうほくらくてんごーるでんいーぐるす"]
Carp_name=["広島","ひろしま","カープ","かーぷ","広島カープ","ひろしまかーぷ","広島東洋カープ","ひろしまとうようかーぷ"]
Hawks_name=["ソフトバンク","そふとばんく","ホークス","ほーくす","福岡ソフトバンクホークス","ふくおかそふとばんくほーくす"]
Dragons_name=["中日","ちゅうにち","ドラゴンズ","どらごんず","中日ドラゴンズ","ちゅうにちどらごんず"]
Fighters_name=["日ハム","にちはむ","ファイターズ","ふぁいたーず","北海道日本ハムファイターズ","ほっかいどうにっぽんはむふぁいたーず"]
BayStars_name=["横浜","よこはま","バファローズ","べいすたーず","横浜DeNAベイスターズ","よこはまでぃーえぬえーべいすたーず"]
Lions_name=["西武","せいぶ","ライオンズ","らいおんず","埼玉西武ライオンズ","さいたませいぶらいおんず"]
# team data list
team_list={
"BayStars":[BayStars.data,BayStars_name],
"Dragons":[Dragons.data,Dragons_name],
"Eagles":[Eagles.data,Eagles_name],
"Hawks":[Hawks.data,Hawks_name],
"Carp":[Carp.data,Carp_name],
"Tigers":[Tigers.data,Tigers_name],
"Marines":[Marines.data,Marines_name],
"Buffaloes":[Buffaloes.data,Buffaloes_name],
"Swallows":[Swallows.data,Swallows_name],
"Giants":[Giants.data,Giants_name],
"Lions":[Lions.data,Lions_name]
}
# find and return team member
def team_member(team_name):
team_member_list=team_name+"の応援歌登録済み選手一覧です。\n"
for team_member in team_list[team_name][0].keys():
team_member_list+="\n"+team_member
return team_member_list
# find and return player's lylic
def search_player_to_lylic(player_name):
lylic=""
for team_name in team_list.keys():
for player in team_list[team_name][0].keys():
if player_name in player:
if len(lylic)>0:
lylic+="\n\n\n"
lylic+=team_name+":"+player+"\n"+team_list[team_name][0][player]
if len(lylic)==0:
lylic="選手が見つかりませんでした。\nチーム名を入れると、応援歌登録済み選手一覧を表示します。"
return lylic
|
StarcoderdataPython
|
1650011
|
<gh_stars>1-10
import patent
if __name__ == '__main__':
google_patent = patent.get('CN1045110B', 'en')
print(google_patent.title)
print(google_patent.number)
print(google_patent.inventors)
print(google_patent.language)
print(google_patent.claims)
|
StarcoderdataPython
|
123217
|
<filename>python/wiki.py
class WikiPage:
def __init__(self, title, uri=None, text=None, tags=None):
self.title = title
self.text = text or ""
self.tags = tags or {}
self.uri = uri or title
self.parents = []
self.children = []
def add_child(self, page):
self.children.append(page)
page.add_parent(self)
def add_parent(self, page):
self.parents.append(page)
if page.uri == "/":
self.uri = "/" + self.uri
else:
self.uri = page.uri + "/" + self.uri
|
StarcoderdataPython
|
3366295
|
<gh_stars>1000+
# Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from cassandra import cluster
from cassandra.cluster import ContinuousPagingOptions
from cassandra.datastax.graph.fluent import DseGraph
from cassandra.graph import VertexProperty
from tests.integration import greaterthanorequaldse68
from tests.integration.advanced.graph import (
GraphUnitTestCase, ClassicGraphSchema, CoreGraphSchema,
VertexLabel, GraphTestConfiguration
)
from tests.integration import greaterthanorequaldse60
from tests.integration.advanced.graph.fluent import (
BaseExplicitExecutionTest, create_traversal_profiles, check_equality_base)
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
@greaterthanorequaldse60
@GraphTestConfiguration.generate_tests(traversal=True)
class BatchStatementTests(BaseExplicitExecutionTest):
def setUp(self):
super(BatchStatementTests, self).setUp()
self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name)
def _test_batch_with_schema(self, schema, graphson):
"""
Sends a Batch statement and verifies it has succeeded with a schema created
@since 1.1.0
@jira_ticket PYTHON-789
@expected_result ValueError is arisen
@test_category dse graph
"""
self._send_batch_and_read_results(schema, graphson)
def _test_batch_without_schema(self, schema, graphson):
"""
Sends a Batch statement and verifies it has succeeded without a schema created
@since 1.1.0
@jira_ticket PYTHON-789
@expected_result ValueError is arisen
@test_category dse graph
"""
if schema is not ClassicGraphSchema:
raise unittest.SkipTest('schema-less is only for classic graphs')
self._send_batch_and_read_results(schema, graphson, use_schema=False)
def _test_batch_with_schema_add_all(self, schema, graphson):
"""
Sends a Batch statement and verifies it has succeeded with a schema created.
Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements
instead of :method:`dse_graph.query._BatchGraphStatement.add`
@since 1.1.0
@jira_ticket PYTHON-789
@expected_result ValueError is arisen
@test_category dse graph
"""
self._send_batch_and_read_results(schema, graphson, add_all=True)
def _test_batch_without_schema_add_all(self, schema, graphson):
"""
Sends a Batch statement and verifies it has succeeded without a schema created
Uses :method:`dse_graph.query._BatchGraphStatement.add_all` to add the statements
instead of :method:`dse_graph.query._BatchGraphStatement.add`
@since 1.1.0
@jira_ticket PYTHON-789
@expected_result ValueError is arisen
@test_category dse graph
"""
if schema is not ClassicGraphSchema:
raise unittest.SkipTest('schema-less is only for classic graphs')
self._send_batch_and_read_results(schema, graphson, add_all=True, use_schema=False)
def test_only_graph_traversals_are_accepted(self):
"""
Verifies that ValueError is risen if the parameter add is not a traversal
@since 1.1.0
@jira_ticket PYTHON-789
@expected_result ValueError is arisen
@test_category dse graph
"""
batch = DseGraph.batch()
self.assertRaises(ValueError, batch.add, '{"@value":{"step":[["addV","poc_int"],'
'["property","bigint1value",{"@value":12,"@type":"g:Int32"}]]},'
'"@type":"g:Bytecode"}')
another_batch = DseGraph.batch()
self.assertRaises(ValueError, batch.add, another_batch)
def _send_batch_and_read_results(self, schema, graphson, add_all=False, use_schema=True):
traversals = []
datatypes = schema.fixtures.datatypes()
values = {}
g = self.fetch_traversal_source(graphson)
ep = self.get_execution_profile(graphson)
batch = DseGraph.batch(session=self.session,
execution_profile=self.get_execution_profile(graphson, traversal=True))
for data in six.itervalues(datatypes):
typ, value, deserializer = data
vertex_label = VertexLabel([typ])
property_name = next(six.iterkeys(vertex_label.non_pk_properties))
values[property_name] = value
if use_schema or schema is CoreGraphSchema:
schema.create_vertex_label(self.session, vertex_label, execution_profile=ep)
traversal = g.addV(str(vertex_label.label)).property('pkid', vertex_label.id).property(property_name, value)
if not add_all:
batch.add(traversal)
traversals.append(traversal)
if add_all:
batch.add_all(traversals)
self.assertEqual(len(datatypes), len(batch))
batch.execute()
vertices = self.execute_traversal(g.V(), graphson)
self.assertEqual(len(vertices), len(datatypes), "g.V() returned {}".format(vertices))
# Iterate over all the vertices and check that they match the original input
for vertex in vertices:
schema.ensure_properties(self.session, vertex, execution_profile=ep)
key = [k for k in list(vertex.properties.keys()) if k != 'pkid'][0].replace("value", "")
original = values[key]
self._check_equality(original, vertex)
def _check_equality(self, original, vertex):
for key in vertex.properties:
if key == 'pkid':
continue
value = vertex.properties[key].value \
if isinstance(vertex.properties[key], VertexProperty) else vertex.properties[key][0].value
check_equality_base(self, original, value)
class ContinuousPagingOptionsForTests(ContinuousPagingOptions):
def __init__(self,
page_unit=ContinuousPagingOptions.PagingUnit.ROWS, max_pages=1, # max_pages=1
max_pages_per_second=0, max_queue_size=4):
super(ContinuousPagingOptionsForTests, self).__init__(page_unit, max_pages, max_pages_per_second,
max_queue_size)
def reset_paging_options():
cluster.ContinuousPagingOptions = ContinuousPagingOptions
@greaterthanorequaldse68
@GraphTestConfiguration.generate_tests(schema=CoreGraphSchema)
class GraphPagingTest(GraphUnitTestCase):
def setUp(self):
super(GraphPagingTest, self).setUp()
self.addCleanup(reset_paging_options)
self.ep_graphson2, self.ep_graphson3 = create_traversal_profiles(self.cluster, self.graph_name)
def _setup_data(self, schema, graphson):
self.execute_graph(
"schema.vertexLabel('person').ifNotExists().partitionBy('name', Text).property('age', Int).create();",
graphson)
for i in range(100):
self.execute_graph("g.addV('person').property('name', 'batman-{}')".format(i), graphson)
def _test_cont_paging_is_enabled_by_default(self, schema, graphson):
"""
Test that graph paging is automatically enabled with a >=6.8 cluster.
@jira_ticket PYTHON-1045
@expected_result the default continuous paging options are used
@test_category dse graph
"""
# with traversals... I don't have access to the response future... so this is a hack to ensure paging is on
cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests
ep = self.get_execution_profile(graphson, traversal=True)
self._setup_data(schema, graphson)
self.session.default_fetch_size = 10
g = DseGraph.traversal_source(self.session, execution_profile=ep)
results = g.V().toList()
self.assertEqual(len(results), 10) # only 10 results due to our hack
def _test_cont_paging_can_be_disabled(self, schema, graphson):
"""
Test that graph paging can be disabled.
@jira_ticket PYTHON-1045
@expected_result the default continuous paging options are not used
@test_category dse graph
"""
# with traversals... I don't have access to the response future... so this is a hack to ensure paging is on
cluster.ContinuousPagingOptions = ContinuousPagingOptionsForTests
ep = self.get_execution_profile(graphson, traversal=True)
ep = self.session.execution_profile_clone_update(ep, continuous_paging_options=None)
self._setup_data(schema, graphson)
self.session.default_fetch_size = 10
g = DseGraph.traversal_source(self.session, execution_profile=ep)
results = g.V().toList()
self.assertEqual(len(results), 100) # 100 results since paging is disabled
def _test_cont_paging_with_custom_options(self, schema, graphson):
"""
Test that we can specify custom paging options.
@jira_ticket PYTHON-1045
@expected_result we get only the desired number of results
@test_category dse graph
"""
ep = self.get_execution_profile(graphson, traversal=True)
ep = self.session.execution_profile_clone_update(ep,
continuous_paging_options=ContinuousPagingOptions(max_pages=1))
self._setup_data(schema, graphson)
self.session.default_fetch_size = 10
g = DseGraph.traversal_source(self.session, execution_profile=ep)
results = g.V().toList()
self.assertEqual(len(results), 10) # only 10 results since paging is disabled
|
StarcoderdataPython
|
45779
|
<reponame>Steven-Wilson/pyweek25
import model
import pyxelen
from view import *
from sounds import *
from utils import *
def set_scene(state, **kwargs):
return state.set(scene=state.scene.set(**kwargs))
def selection(state):
return state.scene.selection
def select_next(state):
state = set_scene(state, selection=selection(state).next())
return state.play_effect(FX_BLIP)
def select_prev(state):
state = set_scene(state, selection=selection(state).prev())
return state.play_effect(FX_BLIP)
def select(state):
if selection(state) == model.MainMenuSelection.PLAY:
return state.set(scene=model.ACT1).play_effect(FX_SELECT)
elif selection(state) == model.MainMenuSelection.OPTIONS:
return state.set(
scene=model.Settings(
selection=model.SettingsSelection.MUSIC_VOLUME
)
).play_effect(FX_SELECT)
elif selection(state) == model.MainMenuSelection.CREDITS:
return state.set(scene=model.CREDITS)
else:
return state
def on_key_down(key, state):
if key == pyxelen.Key.DOWN:
return select_next(state)
elif key == pyxelen.Key.UP:
return select_prev(state)
elif key == pyxelen.Key.RETURN:
return select(state)
else:
return state
def on_update(state):
return state.set_music(MUSIC_MENU)
def view(renderer, state):
renderer.draw_sprite(MENU_BACKGROUND, FULLSCREEN)
for i, s in enumerate(model.MainMenuSelection):
renderer.draw_text(MAIN_FONT, s.value, 210, 144 + i * 20, False)
if selection(state) == s:
renderer.draw_sprite(RIGHT_ARROW, Box(190, 140 + i * 20, 16, 16))
|
StarcoderdataPython
|
3202381
|
<reponame>EduardEdiJerkovic/Apr2
def hooke_jeeves(f, x0, dx=0.5, e=10 ** -6):
iterations = 0
xp = x0.copy()
xb = x0.copy()
while True:
iterations += 1
xn = find(f, xp, dx)
if f.value_of(xn) < f.value_of(xb):
xp = list(map(lambda n, b: 2 * n - b, xn, xb))
xb = xn.copy()
else:
dx /= 2
xp = xb.copy()
if dx <= e:
return xb, iterations
def find(f, xp, dx):
x = xp.copy()
for i in range(len(xp)):
P = f.value_of(x)
x[i] += dx
N = f.value_of(x)
if N > P:
x[i] -= 2 * dx
N = f.value_of(x)
if N > P:
x[i] += dx
return x
|
StarcoderdataPython
|
3296536
|
#!/usr/bin/env python2
import urllib2, json, os, sys
HEADER = ''' Redirect = {
image: function(board, filename) {
switch (board) {
'''
POST = ''' }
},
post: function(board, postID) {
switch (board) {
'''
TO = ''' }
},
to: function(data) {
var board, threadID, url;
if (!data.isSearch) {
threadID = data.threadID;
}
board = data.board;
switch (board) {
'''
BOTTOM = ''' default:
if (threadID) {
url = "//boards.4chan.org/" + board + "/";
}
}
return url || null;
},
'''
CASE = " case '%s':\n"
RETURN_IMAGE = ' return "%s/" + board + "/full_image/" + filename;\n'
RETURN_POST = ' return "%s/_/api/chan/post/?board=" + board + "&num=" + postID;\n'
RETURN_REDIRECT = """ url = Redirect.path('%s', '%s', data);
break;
"""
ARCHIVES_URL = "https://4chenz.github.io/archives.json/archives.json"
ARCHIVES_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), "archives.json")
PRIORITIES_JSON = os.path.join(os.path.dirname(os.path.abspath(__file__)), "priorities.json")
ARCHIVE_HIDDEN = [29,32,35]
def jsonloadf(filename):
with open(filename) as f:
data = json.load(f, 'utf-8')
return data
def jsonsavef(filename, data):
with open(filename, 'wb') as f:
json.dump(data, f, sort_keys=True, indent=4, separators=(',', ': '), encoding='utf-8')
def http_protocol(a):
dom = a['domain']
if a['https'] and a['http']:
return '//' + dom
elif a['https']:
return 'https://' + dom
elif a['http']:
return 'http://' + dom
class Build:
def __init__(self, outstream=sys.stdout, msgstream=sys.stderr):
self.out = outstream
self.msg = msgstream
self.files = {}
self.boards = {}
self.data = None
self.priorities = jsonloadf(PRIORITIES_JSON)
def page_dl(self):
request = urllib2.Request(ARCHIVES_URL)
response = urllib2.urlopen(request)
data = response.read()
response.close()
self.data = json.loads(data)
def boards_list(self):
f = []
b = []
for a in self.data:
f += a['files']
b += a['boards']
self.archivedfiles = list(set(f))
self.archivedboards = list(set(b))
def find_redundant(self):
f = {}
b = {}
for n, a in enumerate(self.data):
for e in a['files']:
if e in f:
f[e].append(n)
else:
f[e] = [n]
for e in a['boards']:
if e in b:
b[e].append(n)
else:
b[e] = [n]
def filterhidden(value):
return filter(lambda x: not (self.data[x]['uid'] in ARCHIVE_HIDDEN and len(value) > 1), value)
self.singleboards = {}
self.redundantboards = {}
for k, v in b.iteritems():
v2 = filterhidden(v)
if len(v2) == 1:
self.singleboards[k] = v2[0]
if len(v2) > 1:
self.redundantboards[k] = v2
self.singlefiles = {}
self.redundantfiles = {}
for k, v in f.iteritems():
v2 = filterhidden(v)
if len(v2) == 1:
self.singlefiles[k] = v2[0]
if len(v2) > 1:
self.redundantfiles[k] = v2
def pprint(self, t):
print >>self.msg, "%s:" % t
if t == 'files':
it = self.redundantfiles.iteritems()
else:
it = self.redundantboards.iteritems()
for k, v in it:
print >>self.msg, "%s --> " % k,
sel = None
selfound = None
if k in self.priorities[t]:
sel = self.priorities[t][k]
for x in v:
if self.data[x]['uid'] == sel:
forstr = "{%s}"
selfound = x
else:
forstr = '"%s"'
print >>self.msg, forstr % self.data[x]['name'],
if sel == None or selfound == None:
print >>self.msg, "NOT SELECTED!"
else:
print >>self.msg
if t == 'files':
self.files[k] = selfound
else:
self.boards[k] = selfound
def prioprint(self):
self.separator()
print >>self.msg, "archives:"
for a in self.data:
if a['uid'] in ARCHIVE_HIDDEN:
print >>self.msg, "HIDDEN:",
print >>self.msg, a['uid'], a['name']
self.separator()
self.pprint('boards')
self.separator()
self.pprint('files')
self.separator()
def merge(self):
self.boards.update(self.singleboards)
self.files.update(self.singlefiles)
def separator(self):
if self.msg == sys.stderr:
print >>self.msg, "-" * 80
def build(self):
if not self.data:
self.page_dl()
#add empty "files" if missing
for d in self.data:
if not "files" in d:
d.update({"files" : []})
#do stuff
self.boards_list()
self.find_redundant()
self.prioprint()
self.merge()
#image
self.out.write(HEADER)
for n, a in enumerate(self.data):
filefound = False
for b in a['files']:
if b in self.files and n == self.files[b]:
filefound = True
self.out.write(CASE % b)
if filefound:
self.out.write(RETURN_IMAGE % http_protocol(a))
self.out.write(POST)
#post
for n, a in enumerate(self.data):
if a['software'] != 'foolfuuka':
continue
boardfound = False
for b in a['boards']:
if b in self.boards and n == self.boards[b]:
boardfound = True
self.out.write(CASE % b)
if boardfound:
self.out.write(RETURN_POST % http_protocol(a))
self.out.write(TO)
#redirect
for n, a in enumerate(self.data):
boardfound = False
for b in a['boards']:
if b in self.boards and n == self.boards[b]:
boardfound = True
self.out.write(CASE % b)
if boardfound:
self.out.write(RETURN_REDIRECT % (http_protocol(a), a['software']))
self.out.write(BOTTOM)
if __name__ == "__main__":
builder = Build()
if len(sys.argv) == 2:
builder.data = jsonloadf(sys.argv[1])
builder.build()
|
StarcoderdataPython
|
1623820
|
import logging
import re
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib.admin import AdminSite
from django.contrib.admin.models import LogEntry
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError, FieldDoesNotExist
from django.db import connection
from django.db.models.functions import Lower
from django.db.utils import OperationalError
from django.forms import modelform_factory
from django.utils.html import mark_safe, format_html
from django.views.decorators.cache import never_cache
from import_export.admin import ExportMixin
from social_django.models import Association, Nonce, UserSocialAuth
from taggit.models import Tag
from taggit.apps import TaggitAppConfig
from collaborative.export import collaborative_modelresource_factory
from collaborative.filters import TagListFilter
from django_models_from_csv.admin import AdminAutoRegistration
from django_models_from_csv.forms import create_taggable_form
from django_models_from_csv.models import DynamicModel, CredentialStore
logger = logging.getLogger(__name__)
class NewUserAdmin(UserAdmin):
list_display = ("username", "email", "first_name", "last_name")
add_form_template = 'admin/auth/user/add_form.html'
def add_view(self, request, *args, **kwargs):
if request.method != "POST":
return super().add_view(request, *args, **kwargs)
password1 = request.POST.get("password1")
password2 = request.POST.get("password2")
if not password1 and not password2:
newpass = User.objects.make_random_password(length=32)
request.POST._mutable = True
request.POST["password1"] = <PASSWORD>
request.POST["password2"] = <PASSWORD>
request.POST._mutable = False
return super().add_view(request, *args, **kwargs)
def widget_for_object_field(obj, field_name):
FieldForm = modelform_factory(
obj.source_dynmodel().get_model(),
fields=(field_name,)
)
widget = FieldForm().fields[field_name].widget
return widget
def make_getter(rel_name, attr_name, getter_name, field=None):
"""
Build a reverse lookup getter, to be attached to the custom
dynamic lookup admin class.
"""
def getter(self):
if not hasattr(self, rel_name):
return None
rel = getattr(self, rel_name).first()
if not rel:
return None
fieldname = "%s__%s" % (rel_name, attr_name)
content_type_id = ContentType.objects.get_for_model(self).id
# handle tagging separately
if attr_name == "tags":
all_tags = rel.tags.all()
tags_html = []
for t in all_tags:
name = t.name
html = (
"<span class='tag-bubble'>"
"<span class='remtag'>x</span>"
"%s</span>"
) % (name)
tags_html.append(html)
return mark_safe(format_html(
"".join(tags_html)
))
# try to lookup choices for field
choices = getattr(
rel, "%s_CHOICES" % attr_name.upper(), []
)
value = getattr(rel, attr_name)
for pk, txt in choices:
if pk == value:
widget = widget_for_object_field(rel, attr_name)
html = widget.render(fieldname, value)
return mark_safe(format_html(
"<span content_type_id='{}' class='inline-editable'>{}</span>",
content_type_id,
html,
))
# no choice found, return field value
widget = widget_for_object_field(rel, attr_name)
html = widget.render(fieldname, value)
return mark_safe(format_html(
"<span content_type_id='{}' class='inline-editable'>{}</span>",
content_type_id,
html,
))
# the header in django admin is named after the function name. if
# this line is removed, the header will be "GETTER" for all derived
# reverse lookup columns
getter.__name__ = getter_name
return getter
class ReimportMixin(ExportMixin):
"""
Mixin for displaying re-import button on admin list view, alongside the
export button (from import_export module).
"""
change_list_template = 'django_models_from_csv/change_list_dynmodel.html'
class CaseInsensitiveChangeList(ChangeList):
"""
Provides case-insensitive ordering for admin list view.
"""
def get_ordering(self, request, queryset):
ordering = super().get_ordering(request, queryset)
for i in range(len(ordering)):
desc = False
fieldname = ordering[i]
if fieldname.startswith("-"):
fieldname = fieldname[1:]
desc = True
try:
field = queryset.model()._meta.get_field(
"id" if fieldname == "pk" else fieldname
)
except FieldDoesNotExist:
continue
f_type = field.db_type(connection)
if f_type != "text":
continue
if desc:
ordering[i] = Lower(fieldname).desc()
else:
ordering[i] = Lower(fieldname)
return ordering
class ReverseFKAdmin(admin.ModelAdmin):
def __init__(self, *args, **kwargs):
"""
Build relations lookup methods, like metadata__status, but
for the reverse foreignkey direction.
"""
super().__init__(*args, **kwargs)
Model, site = args
if "DynamicModel" == Model._meta.object_name:
return
# setup reverse related attr getters so we can do things like
# metadata__status in the reverse direction
for rel in Model._meta.related_objects:
rel_name = rel.get_accessor_name() # "metadata", etc, related_name
rel_model = rel.related_model
if not rel_model:
logger.warning("No related model found!")
continue
for rel_field in rel_model._meta.get_fields():
# build a getter for this relation attribute
attr_name = rel_field.name
# remove auto fields and other fields of that nature. we
# only want the directly acessible fields of this method
if attr_name != "tags":
if rel_field.is_relation: continue
if not hasattr(rel_field, "auto_created"): continue
if rel_field.auto_created: continue
getter_name = "%s_%s" % (rel_name, attr_name)
short_desc = re.sub(r"[\-_]+", " ", attr_name).replace(
"assignee", "assigned to"
)
getter = make_getter(
rel_name, attr_name, getter_name, field=rel_field
)
setattr(self, getter_name, getter)
getattr(self, getter_name).short_description = short_desc
getattr(
self, getter_name
).admin_order_field = "%s__%s" % (rel_name, attr_name)
def get_view_label(self, obj):
return "View"
get_view_label.short_description = 'Records'
def get_changelist(self, request, **kwargs):
# This controls how the admin list view works. Override the
# ChangeList to modify ordering, template, etc
return CaseInsensitiveChangeList
class DynamicModelAdmin(admin.ModelAdmin):
def get_queryset(self, request):
return DynamicModel.objects.exclude(name__icontains="metadata")
def get_full_deletion_set(self, queryset, only_meta=False):
"""
This is called when a user selects some dynamic models to be
deleted. Since the admin queryset only displays the main models,
not the metadata models, each item in the queryset can be
assumed to be a primary data source model. Here, we want to
also add the corresponding meta models.
"""
pks = []
for model in queryset:
name = model.name
meta = "%smetadata" % (name)
contact_meta = "%scontactmetadata" % (name)
names = (meta, contact_meta)
if not only_meta:
names = (name, meta, contact_meta)
for dynmodel in DynamicModel.objects.filter(name__in=names):
pks.append(dynmodel.pk)
# order this by descending id, since the original model gets
# created first, and we need to delete the reverse fk attached
# models first to avoid a cascade
return DynamicModel.objects.filter(
pk__in=pks
).order_by("-id")
def get_deleted_objects(self, queryset, request):
extended_queryset = self.get_full_deletion_set(queryset)
return super().get_deleted_objects(extended_queryset, request)
def delete_queryset(self, request, queryset):
# for model in queryset:
# for model in self.get_full_deletion_set(queryset):
for model in queryset:
Model = model.get_model()
model_qs = DynamicModel.objects.filter(pk=model.pk)
# wipe all relations, by truncating table
for related in self.get_full_deletion_set(model_qs, only_meta=True):
RelatedModel = related.get_model()
for obj in RelatedModel.objects.all():
obj.delete()
model.delete()
# NOTE: we have to delete these *after* we wipe the original.
# otherwise django throws all kinds of errors or will gracefuly
# succeed but throw errors later during normal admin operation
for metamodel in self.get_full_deletion_set(model_qs, only_meta=True):
metamodel.delete()
class AdminMetaAutoRegistration(AdminAutoRegistration):
def should_register_admin(self, Model):
# metadata models get admin created along with the base model
name = Model._meta.object_name
if name.endswith("metadata"):
return False
return super().should_register_admin(Model)
def create_dynmodel_admin(self, Model):
name = Model._meta.object_name
inheritance = (DynamicModelAdmin,)
return type("%sAdmin" % name, inheritance, {})
def create_admin(self, Model):
name = Model._meta.object_name
if "metadata" in name:
return
if name == "DynamicModel":
return self.create_dynmodel_admin(Model)
meta = []
# find the Metadata model corresponding to the
# csv-backed model we're creating admin for.
# this will end up as an inline admin
for MetaModel in apps.get_models():
meta_name = MetaModel._meta.object_name
# all our additonal related models are in this pattern:
# [model-name][contact|]metadata
if not meta_name.startswith(name) or \
not meta_name.endswith("metadata"):
continue
dynmodel_meta = MetaModel.source_dynmodel(MetaModel)
# for contact log, always show a blank one for easy access
extra = 0
if meta_name.endswith("contactmetadata"):
extra = 1
meta_attrs = {
"model": MetaModel,
"extra": extra,
}
if not meta_name.endswith("contactmetadata"):
fields_meta = self.get_fields(MetaModel, dynmodel=dynmodel_meta)
try:
form_meta = create_taggable_form(MetaModel, fields=fields_meta)
meta_attrs["form"] = form_meta
# no tags on this model
except FieldError:
pass
MetaModelInline = type(
"%sInlineAdmin" % meta_name,
(admin.StackedInline,), meta_attrs)
meta.append(MetaModelInline)
# get searchable and filterable (from column attributes)
# should we order by something? number of results?
try:
model_desc = DynamicModel.objects.get(name=name)
except OperationalError:
return None
except DynamicModel.DoesNotExist:
logger.warning("Model with name: %s doesn't exist. Skipping" % name)
# return super().create_admin(Model)
return None
cols = list(reversed(model_desc.columns))
searchable = [c.get("name") for c in cols if c.get("searchable")]
filterable = [c.get("name") for c in cols if c.get("filterable")]
# Build our CSV-backed admin, attaching inline meta model
dynmodel = Model.source_dynmodel(Model)
fields = self.get_fields(Model, dynmodel=dynmodel)
associated_fields = ["get_view_label"]
if name != "DynamicModel":
test_item = Model.objects.first()
if test_item and hasattr(test_item, "metadata"):
associated_fields.append("metadata_status")
filterable.append("metadata__status")
test_metadata = test_item.metadata.first()
if hasattr(test_metadata, "assigned_to"):
associated_fields.append("metadata_assigned_to")
filterable.append("metadata__assigned_to")
elif hasattr(test_metadata, "assignee"):
associated_fields.append("metadata_assignee")
filterable.append("metadata__assignee")
if test_metadata and hasattr(test_metadata, "tags"):
associated_fields.append("metadata_tags")
filterable.append(TagListFilter)
list_display = associated_fields + fields[:5]
exporter = collaborative_modelresource_factory(
model=Model,
)
# Note that ExportMixin needs to be declared before ReverseFKAdmin
inheritance = (ReimportMixin, ReverseFKAdmin,)
return type("%sAdmin" % name, inheritance, {
"inlines": meta,
"readonly_fields": fields,
"list_display": list_display,
"search_fields": searchable,
"list_filter": filterable,
"resource_class": exporter,
})
# Hide "taggit" name
TaggitAppConfig.verbose_name = "Tagging"
# Remove tagged item inline
class TagAdmin(admin.ModelAdmin):
list_display = ["name", "slug"]
ordering = ["name", "slug"]
search_fields = ["name"]
prepopulated_fields = {"slug": ["name"]}
class Meta:
verbose_name = "Tags"
verbose_name_plural = "Tags"
app_label = "Tags"
@never_cache
def login(*args, **kwargs):
"""
Override login view to hide Google Sign In button if no
OAuth credentials added.
"""
extra_context = kwargs.get("extra_context", {})
have_oauth_creds = CredentialStore.objects.filter(
name="google_oauth_credentials"
).count()
extra_context["google_oauth_credentials"] = have_oauth_creds > 0
if "first_login" in extra_context:
extra_context["first_login"] = False
kwargs["extra_context"] = extra_context
return AdminSite().login(*args, **kwargs)
admin.site.login = login
admin.site.site_header = "Collaborate"
admin.site.index_title = "Welcome"
admin.site.site_title = "Collaborate"
# Remove the "view site" link from the admin header
admin.site.site_url = None
# unregister django social auth from admin
admin.site.unregister(Association)
admin.site.unregister(UserSocialAuth)
admin.site.unregister(Nonce)
admin.site.unregister(User)
admin.site.unregister(Tag)
admin.site.register(Tag, TagAdmin)
admin.site.register(LogEntry)
admin.site.register(User, NewUserAdmin)
def register_dynamic_admins(*args, **kwargs):
AdminMetaAutoRegistration(include="django_models_from_csv.models").register()
# Register the ones that exist ...
register_dynamic_admins()
# ... and register new ones that get created. Otherwise, we'd
# have to actually restart the Django process post-model create
if register_dynamic_admins not in DynamicModel._POST_SAVE_SIGNALS:
DynamicModel._POST_SAVE_SIGNALS.append(register_dynamic_admins)
|
StarcoderdataPython
|
1696282
|
# fa19-516-160
# E.Cloudmesh.Common.2
# Task : Develop a program that demonstrates the use of dotdict.
# Imports
from cloudmesh.common.dotdict import dotdict
# Sample data
data = [
{
"name": "Shreyans",
"course": "e516",
"address" : {
"city": "Bloomington" ,
"state": "IN"
}
},
{
"name": "Ronak",
"course": "e260",
"address" : {
"city": "Indianapolis",
"state": "IL"
}
}
]
# dotdict conversion and Print data
data_1 = dotdict(data[1])
print(type(data_1))
print(data_1)
#Code reference Introduction to python : <NAME>.: section 6.4.1
|
StarcoderdataPython
|
3234331
|
<filename>ml/ml_models/tumor_detection.py
import os
import sys
import cv2
import matplotlib.pyplot as plt
import numpy as np
from keras import layers
from keras.applications.vgg16 import VGG16
from keras.callbacks import EarlyStopping
from keras.models import Model, Sequential, load_model
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
absp = os.path.abspath("")
sys.path.append(absp)
# print(keras.backend.tensorflow_backend._get_available_gpus())
RANDOM_SEED = 123
IMG_SIZE = (224, 224)
NUM_CLASSES = 1
EPOCHS = 30
class Classification_Model:
def __init__(self):
self.aug = None
base_model = VGG16(include_top=False, input_shape=IMG_SIZE + (3,))
self.model = Sequential()
self.model.add(base_model)
self.model.add(layers.Flatten())
self.model.add(layers.Dropout(0.5))
self.model.add(layers.Dense(NUM_CLASSES, activation='sigmoid'))
self.model.layers[0].trainable = False
self.model.compile(
loss='binary_crossentropy',
optimizer=RMSprop(lr=1e-4),
metrics=['accuracy']
)
self.model.summary()
def train_model(self):
self.aug = ImageDataGenerator(
rotation_range=15,
width_shift_range=0.05,
height_shift_range=0.05,
rescale=1./255,
shear_range=0.05,
brightness_range=[0.1, 1.5],
horizontal_flip=True,
vertical_flip=True
)
train_generator = aug.flow_from_directory(
absp + "/data/train/", seed=RANDOM_SEED, target_size=(224, 224), class_mode='binary')
test_generator = aug.flow_from_directory(
absp + "/data/test/", seed=RANDOM_SEED, target_size=(224, 224), class_mode='binary')
es = EarlyStopping(
monitor='val_acc',
mode='max',
patience=6
)
history = self.model.fit_generator(
train_generator,
steps_per_epoch=50,
epochs=EPOCHS,
validation_data=test_generator,
validation_steps=25,
callbacks=[es]
)
def load_model(self, path_to_model):
self.model = load_model(path_to_model)
def predict_image(self, img):
"""Predicts prb of an image having a tumor
Arguments:
img {[np.array]} -- [array representation of image of size (224,224,3); could be got by load_image()]
"""
pred = self.model.predict(img)
if pred[0][0] > 0.5:
return f"Tumor detected with a probability: {pred[0][0]}", pred[0][0]
else:
return f"NO tumors detected with a probability: {1- pred[0][0]}", 1 - pred[0][0]
def save_model(self, model_name):
self.model.save(model_name)
@staticmethod
def load_image(im_path):
img = np.array(cv2.imread(im_path))
img = np.array([cv2.resize(img, dsize=IMG_SIZE)])
return img
|
StarcoderdataPython
|
3224016
|
<reponame>jfitz/code-stat
import string
import math
from codestat_token import Token
from codestat_tokenizer import Tokenizer
from token_builders import (
InvalidTokenBuilder,
WhitespaceTokenBuilder,
NewlineTokenBuilder,
EscapedStringTokenBuilder,
PrefixedStringTokenBuilder,
IntegerTokenBuilder,
IntegerExponentTokenBuilder,
RealTokenBuilder,
RealExponentTokenBuilder,
IdentifierTokenBuilder,
CaseInsensitiveListTokenBuilder,
CaseSensitiveListTokenBuilder,
SingleCharacterTokenBuilder,
LeadToEndOfLineTokenBuilder
)
from cx_token_builders import (
SlashSlashCommentTokenBuilder,
SlashStarCommentTokenBuilder
)
from examiner import Examiner
class CsharpExaminer(Examiner):
@staticmethod
def __escape_z__():
InvalidTokenBuilder.__escape_z__()
WhitespaceTokenBuilder.__escape_z__()
NewlineTokenBuilder.__escape_z__()
EscapedStringTokenBuilder.__escape_z__()
PrefixedStringTokenBuilder.__escape_z__()
IntegerTokenBuilder.__escape_z__()
IntegerExponentTokenBuilder.__escape_z__()
RealTokenBuilder.__escape_z__()
RealExponentTokenBuilder.__escape_z__()
IdentifierTokenBuilder.__escape_z__()
CaseInsensitiveListTokenBuilder.__escape_z__()
CaseSensitiveListTokenBuilder.__escape_z__()
SingleCharacterTokenBuilder.__escape_z__()
LeadToEndOfLineTokenBuilder.__escape_z__()
SlashSlashCommentTokenBuilder.__escape_z__()
SlashStarCommentTokenBuilder.__escape_z__()
return 'Escape ?Z'
def __init__(self, code):
super().__init__()
operand_types = []
whitespace_tb = WhitespaceTokenBuilder()
newline_tb = NewlineTokenBuilder()
integer_tb = IntegerTokenBuilder(None)
integer_exponent_tb = IntegerExponentTokenBuilder(None)
real_tb = RealTokenBuilder(False, False, None)
real_exponent_tb = RealExponentTokenBuilder(False, False, 'E', None)
operand_types.append('number')
leads = '_'
extras = '_'
identifier_tb = IdentifierTokenBuilder(leads, extras)
operand_types.append('identifier')
quotes = ['"', "'", "’"]
string_tb = EscapedStringTokenBuilder(quotes, 10)
prefixed_string_tb = PrefixedStringTokenBuilder('@', False, ['"'])
operand_types.append('string')
slash_slash_comment_tb = SlashSlashCommentTokenBuilder()
slash_star_comment_tb = SlashStarCommentTokenBuilder()
directives = [
'#if', '#else', '#elif', '#endif',
'#define', '#undef',
'#line', '#pragma'
]
preprocessor_tb = CaseSensitiveListTokenBuilder(directives, 'preprocessor', False)
c_warning_tb = LeadToEndOfLineTokenBuilder('#warning', True, 'preprocessor')
c_error_tb = LeadToEndOfLineTokenBuilder('#error', True, 'preprocessor')
c_region_tb = LeadToEndOfLineTokenBuilder('#region', True, 'preprocessor')
c_endregion_tb = LeadToEndOfLineTokenBuilder('#endregion', True, 'preprocessor')
terminators_tb = SingleCharacterTokenBuilder(';', 'statement terminator', False)
known_operators = [
'+', '-', '*', '/', '%',
'=', '==', '!=', '>', '>=', '<', '<=',
'+=', '-=', '*=', '/=', '%=', '&=', '|=', '^=', '<<=', '>>=',
'!', '&', '|', '~', '<<', '>>',
'^',
'.',
'++', '--', '->', '&&', '||',
'?', '??', '?.', '?[',
'=>',
'as', 'is', 'await', 'sizeof',
'typeof', 'new'
]
self.unary_operators = [
'+', '-',
'!', '~',
'++', '--',
'new', 'sizeof', 'typeof'
]
self.postfix_operators = [
'++', '--'
]
known_operator_tb = CaseSensitiveListTokenBuilder(known_operators, 'operator', False)
groupers = ['(', ')', ',', '[', ']', '{', '}', ':']
group_starts = ['(', '[', ',', '{']
group_ends = [')', ']', '}']
group_mids = [',', ':']
groupers_tb = CaseInsensitiveListTokenBuilder(groupers, 'group', False)
keywords = [
'abstract', 'break',
'case', 'catch', 'checked', 'class', 'const',
'continue', 'default', 'delegate', 'do',
'else', 'enum', 'event', 'explicit', 'extern',
'finally', 'fixed', 'for', 'foreach', 'goto',
'if', 'implicit', 'in', 'interface', 'internal',
'lock', 'namespace', 'operator',
'out', 'override', 'params', 'partial', 'private', 'protected', 'public',
'readonly', 'ref', 'return', 'sealed',
'stackalloc', 'static', 'struct', 'switch',
'throw', 'try',
'unchecked', 'unsafe', 'using', 'using static',
'virtual', 'volatile', 'while'
]
keyword_tb = CaseSensitiveListTokenBuilder(keywords, 'keyword', False)
types = [
'bool', 'byte', 'char', 'decimal', 'double', 'float', 'int', 'long', 'object',
'sbyte', 'short', 'string', 'uint', 'ulong', 'ushort', 'void'
]
types_tb = CaseSensitiveListTokenBuilder(types, 'type', True)
operand_types.append('type')
values = [
'base', 'false', 'null', 'this', 'true'
]
values_tb = CaseSensitiveListTokenBuilder(values, 'value', True)
operand_types.append('value')
invalid_token_builder = InvalidTokenBuilder()
tokenbuilders = [
newline_tb,
whitespace_tb,
terminators_tb,
integer_tb,
integer_exponent_tb,
real_tb,
real_exponent_tb,
keyword_tb,
types_tb,
values_tb,
known_operator_tb,
groupers_tb,
identifier_tb,
string_tb,
prefixed_string_tb,
slash_slash_comment_tb,
slash_star_comment_tb,
preprocessor_tb,
c_error_tb,
c_warning_tb,
c_region_tb,
c_endregion_tb,
self.unknown_operator_tb,
invalid_token_builder
]
tokenizer = Tokenizer(tokenbuilders)
tokens = tokenizer.tokenize(code)
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid operator')
tokens = Examiner.combine_adjacent_identical_tokens(tokens, 'invalid')
tokens = Examiner.combine_identifier_colon(tokens, ['statement terminator', 'newline'], ['{'], ['whitespace', 'comment'])
self.tokens = tokens
self.convert_identifiers_to_labels()
number_suffixes = ['f', 'F', 'd', 'D', 'm', 'M']
self.tokens = self.combine_tokens_and_adjacent_types(tokens, 'number', 'identifier', number_suffixes)
self.calc_statistics()
tokens = self.source_tokens()
tokens = Examiner.join_all_lines(tokens)
self.calc_token_confidence()
self.calc_token_2_confidence()
num_operators = self.count_my_tokens(['operator', 'invalid operator'])
if num_operators > 0:
self.calc_operator_confidence(num_operators)
allow_pairs = []
self.calc_operator_2_confidence(tokens, num_operators, allow_pairs)
self.calc_operator_3_confidence(tokens, num_operators, group_ends, allow_pairs)
self.calc_operator_4_confidence(tokens, num_operators, group_starts, allow_pairs)
self.calc_group_confidence(tokens, group_mids)
operand_types_2 = ['number', 'string', 'symbol']
self.calc_operand_n_confidence(tokens, operand_types_2, 2)
self.calc_operand_n_confidence(tokens, operand_types, 4)
self.calc_keyword_confidence()
self.calc_preprocessor_confidence()
self.calc_paired_blockers_confidence(['{'], ['}'])
self.calc_line_length_confidence(code, self.max_expected_line)
|
StarcoderdataPython
|
12880
|
<reponame>DITGO/2021.1-PC-GO1-Archives
from rest_framework import serializers
from archives_app.documents_models import (FrequencyRelation, BoxArchiving,
AdministrativeProcess, OriginBox,
FrequencySheet, DocumentTypes)
class FrequencySupport(serializers.ModelSerializer):
def get_document_type(self, obj):
if obj.document_type_id is not None:
return obj.document_type_id.document_name
return None
class BoxArchivingSerializer(serializers.ModelSerializer):
def get_shelf_number(self, obj):
if obj.shelf_id is not None:
return obj.shelf_id.number
return None
def get_rack_number(self, obj):
if obj.rack_id is not None:
return obj.rack_id.number
return None
def get_abbreviation_name(self, obj):
if obj.abbreviation_id is not None:
return obj.abbreviation_id.name
return ""
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
def get_doc_types(self, obj):
if obj.document_types is not None:
doc_types = []
for obj in obj.document_types.all():
doc_types.append(obj.document_type_id.document_name)
return doc_types
return ""
def get_temporalities(self, obj):
if obj.document_types is not None:
doc_types = []
for obj in obj.document_types.all():
doc_types.append(obj.temporality_date)
return doc_types
return None
shelf_number = serializers.SerializerMethodField('get_shelf_number')
rack_number = serializers.SerializerMethodField('get_rack_number')
abbreviation_name = serializers.SerializerMethodField('get_abbreviation_name')
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
document_type_name = serializers.SerializerMethodField('get_doc_types')
temporality_date = serializers.SerializerMethodField('get_temporalities')
class Meta:
model = BoxArchiving
fields = (
"id",
"process_number",
"sender_unity",
"notes",
"received_date",
"document_url",
"cover_sheet",
"filer_user",
"abbreviation_name",
"shelf_number",
"rack_number",
"origin_box_id",
"abbreviation_id",
"shelf_id",
"rack_id",
"document_types",
"sender_unity_name",
"document_type_name",
"temporality_date"
)
class FrequencyRelationSerializer(FrequencySupport):
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
document_type_name = serializers.SerializerMethodField(
'get_document_type'
)
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
class Meta:
model = FrequencyRelation
fields = (
"id",
"process_number",
"notes",
"document_date",
"received_date",
"temporality_date",
"reference_period",
"filer_user",
"sender_unity",
"document_type_id",
"document_type_name",
"sender_unity_name"
)
class AdministrativeProcessSerializer(serializers.ModelSerializer):
def get_document_subject(self, obj):
if obj.subject_id is not None:
return obj.subject_id.subject_name
return None
def get_sender_unity(self, obj):
if obj.sender_unity is not None:
return obj.sender_unity.unity_name
return ""
def get_sender_user(self, obj):
if obj.sender_user is not None:
return obj.sender_user.name
return ""
sender_unity_name = serializers.SerializerMethodField('get_sender_unity')
sender_user_name = serializers.SerializerMethodField('get_sender_user')
document_subject_name = serializers.SerializerMethodField(
'get_document_subject'
)
class Meta:
model = AdministrativeProcess
fields = ("id",
"process_number",
"notes",
"filer_user",
"notice_date",
"interested",
"cpf_cnpj",
"reference_month_year",
"sender_user",
"sender_user_name",
"archiving_date",
"is_filed",
"is_eliminated",
"temporality_date",
"send_date",
"administrative_process_number",
"sender_unity",
"subject_id",
"dest_unity_id",
"unity_id",
"document_subject_name",
"sender_unity_name"
)
class OriginBoxSerializer(serializers.ModelSerializer):
class Meta:
model = OriginBox
fields = '__all__'
class DocumentTypesSerializer(serializers.ModelSerializer):
class Meta:
model = DocumentTypes
fields = '__all__'
class FrequencySheetSerializer(FrequencySupport):
def get_person_name(self, obj):
if obj.person_id is not None:
return obj.person_id.name
return ""
document_type_name = serializers.SerializerMethodField(
'get_document_type'
)
person_name = serializers.SerializerMethodField('get_person_name')
class Meta:
model = FrequencySheet
fields = ("id",
"person_id",
"person_name",
"cpf",
"role",
"category",
"workplace",
"municipal_area",
"reference_period",
"notes",
"process_number",
"document_type_id",
"temporality_date",
"document_type_name"
)
|
StarcoderdataPython
|
174283
|
import numpy as np
import segyio
import pyvds
VDS_FILE = 'test_data/small.vds'
SGY_FILE = 'test_data/small.sgy'
def compare_inline_ordinal(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_ordinal in lines_to_test:
slice_segy = segyfile.iline[segyfile.ilines[line_ordinal]]
slice_vds = vdsfile.iline[vdsfile.ilines[line_ordinal]]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_inline_number(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_number in lines_to_test:
slice_segy = segyfile.iline[line_number]
slice_vds = vdsfile.iline[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_inline_slicing(vds_filename):
slices = [slice(1, 5, 2), slice(1, 2, None), slice(1, 3, None), slice(None, 3, None), slice(3, None, None)]
with pyvds.open(vds_filename) as vdsfile:
for slice_ in slices:
slices_slice = np.asarray(vdsfile.iline[slice_])
start = slice_.start if slice_.start is not None else 1
stop = slice_.stop if slice_.stop is not None else 6
step = slice_.step if slice_.step is not None else 1
slices_concat = np.asarray([vdsfile.iline[i] for i in range(start, stop, step)])
assert np.array_equal(slices_slice, slices_concat)
def test_inline_accessor():
compare_inline_ordinal(VDS_FILE, SGY_FILE, [0, 1, 2, 3, 4], tolerance=1e-5)
compare_inline_number(VDS_FILE, SGY_FILE, [1, 2, 3, 4, 5], tolerance=1e-5)
compare_inline_slicing(VDS_FILE)
def compare_crossline_ordinal(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_ordinal in lines_to_test:
slice_segy = segyfile.xline[segyfile.xlines[line_ordinal]]
slice_vds = vdsfile.xline[vdsfile.xlines[line_ordinal]]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_crossline_number(vds_filename, sgy_filename, lines_to_test, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(sgy_filename) as segyfile:
for line_number in lines_to_test:
slice_segy = segyfile.xline[line_number]
slice_vds = vdsfile.xline[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def compare_crossline_slicing(vds_filename):
slices = [slice(20, 21, 2), slice(21, 23, 1), slice(None, 22, None), slice(22, None, None)]
with pyvds.open(vds_filename) as vdsfile:
for slice_ in slices:
slices_slice = np.asarray(vdsfile.xline[slice_])
start = slice_.start if slice_.start is not None else 20
stop = slice_.stop if slice_.stop is not None else 25
step = slice_.step if slice_.step is not None else 1
slices_concat = np.asarray([vdsfile.xline[i] for i in range(start, stop, step)])
assert np.array_equal(slices_slice, slices_concat)
def test_crossline_accessor():
compare_crossline_ordinal(VDS_FILE, SGY_FILE, [0, 1, 2, 3, 4], tolerance=1e-5)
compare_crossline_number(VDS_FILE, SGY_FILE, [20, 21, 22, 23, 24], tolerance=1e-5)
compare_crossline_slicing(VDS_FILE)
def compare_zslice(vds_filename, tolerance):
with pyvds.open(vds_filename) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
for line_number in range(50):
slice_vds = vdsfile.depth_slice[line_number]
slice_segy = segyfile.depth_slice[line_number]
assert np.allclose(slice_vds, slice_segy, rtol=tolerance)
def test_zslice_accessor():
compare_zslice(VDS_FILE, tolerance=1e-5)
def test_trace_accessor():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
for trace_number in range(-5, 25, 1):
vds_trace = vdsfile.trace[trace_number]
segy_trace = segyfile.trace[trace_number]
assert np.allclose(vds_trace, segy_trace, rtol=1e-5)
def test_read_bin_header():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as segyfile:
assert vdsfile.bin == segyfile.bin
def test_read_trace_header():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgyfile:
for trace_number in range(-5, 25, 1):
vds_header = vdsfile.header[trace_number]
sgy_header = sgyfile.header[trace_number]
assert vds_header == sgy_header
def test_read_trace_header_slicing():
slices = [slice(0, 5, None), slice(0, None, 2), slice(5, None, -1), slice(None, None, 10), slice(None, None, None)]
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgyfile:
for slice_ in slices:
sgy_headers = sgyfile.header[slice_]
vds_headers = vdsfile.header[slice_]
for vds_header, sgy_header in zip(vds_headers, sgy_headers):
assert vds_header == sgy_header
def test_header_is_iterable():
with pyvds.open(VDS_FILE) as vdsfile:
with segyio.open(SGY_FILE) as sgy_file:
for vds_header, sgy_header in zip(vdsfile.header, sgy_file.header):
assert vds_header == sgy_header
def compare_cube(vds_filename, sgy_filename, tolerance):
vol_sgy = segyio.tools.cube(sgy_filename)
vol_vds = pyvds.tools.cube(vds_filename)
assert np.allclose(vol_vds, vol_sgy, rtol=tolerance)
def compare_dt(vds_filename, sgy_filename):
with segyio.open(sgy_filename) as sgy_file:
dt_sgy = segyio.tools.dt(sgy_file)
with pyvds.open(vds_filename) as vds_file:
dt_vds = pyvds.tools.dt(vds_file)
assert dt_sgy == dt_vds
def test_tools_functions():
compare_cube(VDS_FILE, SGY_FILE, tolerance=1e-5)
compare_dt(VDS_FILE, SGY_FILE)
|
StarcoderdataPython
|
3231538
|
<gh_stars>0
# Copyright 2022 Google
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from . import readout_unfolding as ru
DISTRIBUTIONS = [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[0.1, 0.9, 0, 0],
[0, 0, 0.1, 0.9],
[0.25, 0.25, 0.25, 0.25],
[0.1, 0.2, 0.3, 0.4],
]
@pytest.mark.parametrize("use_uniform_prior", [False, True])
@pytest.mark.parametrize("distribution", DISTRIBUTIONS)
def test_initial_guess(use_uniform_prior, distribution):
guess = ru._get_initial_guess(distribution, use_uniform_prior)
if use_uniform_prior:
assert np.allclose(guess, [0.25, 0.25, 0.25, 0.25])
else:
assert np.allclose(guess, distribution)
@pytest.mark.parametrize("use_uniform_prior", [False, True])
@pytest.mark.parametrize("distribution", DISTRIBUTIONS)
def test_correct_with_identity_matrix(distribution, use_uniform_prior):
distribution = np.array(distribution)
corrected = ru.correct_readout_distribution(
np.eye(4), distribution, use_uniform_prior=use_uniform_prior
)
assert np.allclose(corrected, distribution)
@pytest.mark.parametrize("use_uniform_prior", [False, True])
@pytest.mark.parametrize("ideal", DISTRIBUTIONS)
def test_error_on_one_qubit(use_uniform_prior, ideal):
ideal = np.array(ideal)
# Use order 00, 10, 01, 11 and put error on the right qubit
error_0 = 0.05
error_1 = 0.1
readout_matrix = np.array(
[
[1 - error_0, 0, error_0, 0], # prepare |00)
[0, 1 - error_0, 0, error_0], # prepare |10)
[error_1, 0, 1 - error_1, 0], # prepare |01)
[0, error_1, 0, 1 - error_1], # prepare |11)
]
)
measured = readout_matrix.transpose() @ ideal
corrected = ru.correct_readout_distribution(
readout_matrix, measured, use_uniform_prior=use_uniform_prior
)
assert np.allclose(corrected, ideal, atol=0.01)
@pytest.mark.parametrize("use_uniform_prior", [False, True])
@pytest.mark.parametrize("ideal", DISTRIBUTIONS)
def test_correlated_error(use_uniform_prior, ideal):
ideal = np.array(ideal)
readout_matrix = np.array(
[
[0.95, 0.02, 0.02, 0.01], # prepare |00)
[0.05, 0.90, 0.03, 0.02], # prepare |10)
[0.05, 0.03, 0.90, 0.02], # prepare |01)
[0.15, 0.05, 0.05, 0.75], # prepare |11)
]
)
measured = readout_matrix.transpose() @ ideal
corrected = ru.correct_readout_distribution(
readout_matrix, measured, use_uniform_prior=use_uniform_prior
)
assert np.allclose(corrected, ideal, atol=0.02)
|
StarcoderdataPython
|
176009
|
import libsbml
import importlib
import amici
import os
import sys
import pandas as pd
import petab.sbml
# SBML model we want to import
sbml_file = 'CS_Signalling_ERBB_RAS_AKT_petab.xml'
# Name of the model that will also be the name of the python module
model_name = 'ERBB_RAS_AKT_Drugs'
# Directory to which the generated model code is written
model_output_dir = model_name
sbml_importer = amici.SbmlImporter(sbml_file)
petab.sbml.constant_species_to_parameters(sbml_importer.sbml)
libsbml.writeSBMLToFile(sbml_importer.sbml_doc,
'CS_Signalling_ERBB_RAS_AKT_modified.xml')
# extract observable definition from sbml
observables = amici.assignmentRules2observables(
sbml_importer.sbml,
filter_function=lambda p: p.getName() in ['observable_proliferation']
)
condition_table = pd.read_csv('conditions_petab.tsv', sep='\t')
# condition parameters should be everything that is defined in conditions and
# also specified in the model
constantParameters = [
par for par in condition_table.columns
if sbml_importer.sbml.getParameter(par)
]
sbml_importer.sbml2amici(model_name,
model_output_dir,
verbose=False,
observables=observables,
constantParameters=constantParameters)
sys.path.insert(0, os.path.abspath(model_output_dir))
model_module = importlib.import_module(model_name)
model = model_module.getModel()
print("Model parameters:", model.getParameterIds())
print("Model outputs: ", model.getObservableIds())
print("Model states: ", model.getStateIds())
|
StarcoderdataPython
|
35397
|
<reponame>nkmrohit/python
# Generated by Django 2.1.4 on 2019-01-27 04:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customerauth', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customers',
name='address',
field=models.TextField(blank=True),
),
]
|
StarcoderdataPython
|
1754932
|
import datetime
from plt_pack.project_utils.save_opts import SaveOpts
def test_freeze_time(freeze_time):
assert datetime.datetime.now() == freeze_time
def test_save_opts_time_format(freeze_time):
date_fmt = '%d-%b-%H-%M-%S'
save_opts = SaveOpts(datefmt=date_fmt)
assert save_opts.time_str == freeze_time.strftime(date_fmt)
|
StarcoderdataPython
|
106941
|
<reponame>ChrisCrossCrash/SetOriginInEditMode
import bpy
import bmesh
from mathutils import Vector
# Meta Info
# https://wiki.blender.org/wiki/Process/Addons/Guidelines/metainfo
bl_info = {
"name": "Set Origin in Edit Mode",
"description": "Adds a `Set Origin to Selected` option to the right-click menu in Edit Mode",
"author": "<NAME>",
"version": (0, 1),
# TODO: Test the minimum version
"blender": (2, 93, 0),
"location": "View3D (Edit Mode) > right-click > Set Origin to Selected",
"warning": "", # used for warning icon and text in add-ons panel
"wiki_url": "https://github.com/ChrisCrossCrash/SetOriginInEditMode",
"tracker_url": "https://github.com/ChrisCrossCrash/SetOriginInEditMode/issues",
"support": "COMMUNITY",
"category": "Object",
}
def get_avg_location(*verts: bmesh.types.BMVert):
"""Return the average location of one or more vertices."""
result = Vector((0, 0, 0))
for v in verts:
result += v.co
result /= len(verts)
return result
def set_3d_cursor_to_active_verts(context):
"""Moves the object origin to the average location of the vertices selected in edit mode."""
# FIXME: Undoing this action takes 3 or more undos when it should take one. Also, it can't be fully re-done
# after it has been undone.
edit_object = context.edit_object
mesh = edit_object.data
bm = bmesh.from_edit_mesh(mesh)
# active_verts are the vertices that are selected in edit mode.
active_verts = [v for v in bm.verts if v.select]
if not len(active_verts):
# TODO: Find a more graceful way of informing the user than raising an exception.
raise Exception("You must select at least one vertex to change the object origin.")
# Make a copy of the 3D cursor location so that we can set it back after using it.
cursor_start = context.scene.cursor.location.copy()
rotation = edit_object.rotation_euler
scale = edit_object.scale
avg_location = get_avg_location(*active_verts)
# Apply the rotation and scale to avg_location. This makes it possible to add avg_location to the
# object location in the world space to get the average location relative to the world origin.
avg_location.rotate(rotation)
avg_location *= scale
# Move the 3D cursor to the average location (relative to the world origin).
bpy.context.scene.cursor.location = edit_object.location + avg_location
# Switch to object mode, set the object origin to the 3D cursor location, then switch back to edit mode.
# (I call this trick the "Fastest Gun in the West")
bpy.ops.object.mode_set(mode='OBJECT')
bpy.ops.object.origin_set(type='ORIGIN_CURSOR')
bpy.ops.object.mode_set(mode='EDIT')
# Move the 3D cursor back to its starting location.
context.scene.cursor.location = cursor_start
# noinspection PyMethodMayBeStatic
class SET_ORIGIN_IN_EDIT_MODE_OT_main_operator(bpy.types.Operator):
"""Moves the object origin to the average location of the vertices selected in edit mode."""
bl_idname = "set_origin_in_edit_mode.main_operator"
bl_label = "Set Origin to Selected"
@classmethod
def poll(cls, context):
return context.mode == 'EDIT_MESH'
# noinspection PyUnusedLocal
def execute(self, context):
set_3d_cursor_to_active_verts(context)
return {'FINISHED'}
# noinspection PyUnusedLocal
def draw_menu_item(self, context):
layout = self.layout
layout.operator("set_origin_in_edit_mode.main_operator")
def register():
bpy.utils.register_class(SET_ORIGIN_IN_EDIT_MODE_OT_main_operator)
bpy.types.VIEW3D_MT_edit_mesh_context_menu.append(draw_menu_item)
def unregister():
bpy.utils.unregister_class(SET_ORIGIN_IN_EDIT_MODE_OT_main_operator)
bpy.types.VIEW3D_MT_edit_mesh_context_menu.remove(draw_menu_item)
if __name__ == "__main__":
register()
|
StarcoderdataPython
|
3333377
|
<filename>play.py
import numpy as np
from env import lumpy, reward, discount
from agnt import rand, min, ffnn
from plt import plotting_fools
plot = True
num_games = 10
max_iter = 40
#actor1 = rand.agent()
#actor2 = min.agent()
actor = ffnn.agent()
rwrd = reward.score()
dcount = discount.disc()
for game in range(num_games):
env = lumpy.world(gridsize=10)
state = env.initial_state
cursor = env.cursor
local_reward = rwrd.initial_reward
running_reward = [0]
discounted_reward = dcount.expcoef(running_reward)
print('game number:', game)
for i in range(max_iter):
#actor = np.random.choice([actor1, actor2])
actor = actor
action = actor.decision(state, cursor, env.end, [local_reward])
state, cursor = env.get_state(state, cursor, action)
local_reward = rwrd.dontstandstill(cursor, env.end)
#local_reward = rwrd.positivemoves(cursor, env.end)
running_reward.append(local_reward)
if plot:
plotting_fools.plot1(state, running_reward, i, cursor, action)
if np.amin(cursor == env.end) == True:
print(running_reward)
break
if i == max_iter - 1:
print(running_reward)
|
StarcoderdataPython
|
1743769
|
import os
import argparse
from sklearn.feature_selection import (
SelectKBest,
mutual_info_classif,
)
from imblearn.over_sampling import RandomOverSampler
from sklearn.metrics import roc_curve
import matplotlib.pyplot as plt
import classifiers as clfs
import utils
def main(args):
print("Loading and preparing datasets...")
# Get the selected features from our training data
train_X, train_y, good_features = recover_good_features(args.data_path)
# Prepare all of the classifiers
lr = clfs.LogRegClassifier.from_file(args.data_path)
rf = clfs.RandForestClassifier.from_file(args.data_path)
nb = clfs.NaiveBayesClassifier.from_file(args.data_path)
svm = clfs.SVMClassifier.from_file(args.data_path)
nn = clfs.NeuralNetClassifier.from_file(args.data_path)
print("Fitting saved model configs...")
# Re-fitting the saved model configurations
lr.model.fit(train_X, train_y)
rf.model.fit(train_X, train_y)
nb.model.fit(train_X, train_y)
svm.model.fit(train_X, train_y)
nn.model.fit(train_X, train_y)
if args.dev:
print("Evaluating on the DEV set")
dev_file = args.data_path + "bank-full_dev.csv"
dev_X, dev_y = load_eval_dataset(dev_file, good_features)
# Perform metric evaluation and retrieve ROC curve data
lr_fpr, lr_tpr = evaluate_classifier(lr, dev_X, dev_y)
rf_fpr, rf_tpr = evaluate_classifier(rf, dev_X, dev_y)
nb_fpr, nb_tpr = evaluate_classifier(nb, dev_X, dev_y)
svm_fpr, svm_tpr = evaluate_classifier(svm, dev_X, dev_y)
nn_fpr, nn_tpr = evaluate_classifier(nn, dev_X, dev_y)
# Plot the ROC curves for all models
roc_plot(
"development",
[
(lr, lr_fpr, lr_tpr),
(rf, rf_fpr, rf_tpr),
(nb, nb_fpr, nb_tpr),
(svm, svm_fpr, svm_tpr),
(nn, nn_fpr, nn_tpr),
],
)
if args.test:
print("Evaluating on the TEST set")
test_file = args.data_path + "bank-full_test.csv"
test_X, test_y = load_eval_dataset(test_file, good_features)
# Perform metric evaluation and retrieve ROC curve data
lr_fpr, lr_tpr = evaluate_classifier(lr, test_X, test_y)
rf_fpr, rf_tpr = evaluate_classifier(rf, test_X, test_y)
nb_fpr, nb_tpr = evaluate_classifier(nb, test_X, test_y)
svm_fpr, svm_tpr = evaluate_classifier(svm, test_X, test_y)
nn_fpr, nn_tpr = evaluate_classifier(nn, test_X, test_y)
# Plot the ROC curves for all models
roc_plot(
"test",
[
(lr, lr_fpr, lr_tpr),
(rf, rf_fpr, rf_tpr),
(nb, nb_fpr, nb_tpr),
(svm, svm_fpr, svm_tpr),
(nn, nn_fpr, nn_tpr),
],
)
plt.show()
def evaluate_classifier(classifier, X, y):
"""Compute predictions for this classifier and then use those predictions (and prediction probabilities) to compute all key metrics and the ROC curve.
Args:
classifier (AbstractClassifier): the classifier to be evaluated
X (DataFrame): the feature dat to be used for evaluation
y (DataFrame): the label data to be used for evaluating predictions
Returns:
Tuple[List[float], List[float]]: The FPR and TPR values of the ROC curve
"""
print(f"Evaluating {classifier.name.replace('_', ' ')}")
preds = classifier.model.predict(X)
utils.compute_metrics(y, preds)
probs = classifier.model.predict_proba(X)
fpr, tpr, _ = roc_curve(y, probs[:, 1], drop_intermediate=False)
return fpr, tpr
def load_eval_dataset(data_filepath, selected_features):
"""Loads a dataset to be used for evaluation. The dataset is reformed to only use the selected features from KBest selection.
Args:
data_filepath (str): path to the dataset to be loaded
selected_features (ndarray): the indices of features to be selected from the dataset DataFrame
Returns:
Tuple[DataFrame, DataFrame]: The features and label DataFrames for this evaluation dataset
"""
X, y = utils.load_Xy_dataset(data_filepath)
X = X.iloc[:, selected_features]
return X, y
def recover_good_features(data_path):
"""Returns the selected features from KBest selection and train datasets to refit the saved model configs.
Args:
data_path (str): The base path to the training dataset
Returns:
Tuple[DataFrame, DataFrame, ndarray]: All data needed to refit and evaluate with the dev or test datasets
"""
train_file = data_path + "bank-full_train.csv"
train_X, train_y = utils.load_Xy_dataset(train_file)
selector = SelectKBest(mutual_info_classif, k=20)
selector.fit_transform(train_X, train_y)
good_features = selector.get_support(indices=True)
train_X = train_X.iloc[:, good_features]
ros = RandomOverSampler(random_state=17)
train_X, train_y = ros.fit_resample(train_X, train_y)
return train_X, train_y, good_features
def roc_plot(dataset_id, classifier_data):
"""Generates an ROC curve plot for classifier evaluation data on some dataset.
NOTE: this function also plots the line y=x for easy interpretation of the ROC curve results.
Args:
dataset_id (str): a descriptive name of the dataset used to compute the current ROC curve data
classifier_data (Tuple[AbstractClassifier, ndarray, ndarray]): the classifier that has been evaluated along with the TPR/FPR values of the ROC curve to be plotted
"""
plt.figure()
plt.title(f"ROC Curves for the {dataset_id} dataset")
plt.xlabel("False Positive Rate (FPR)")
plt.ylabel("True Positive Rate (TPR)")
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
lw = 2
plt.plot([0, 1], [0, 1], color="black", lw=lw, linestyle="--")
for (clf, fpr, tpr) in classifier_data:
plt.plot(fpr, tpr, lw=lw, label=clf.name.replace("_", " "))
plt.legend(loc="lower right")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Evaluate the bank telemarketing classifiers on a given dataset"
)
parser.add_argument(
"-d",
"--dev",
action="store_true",
help="Perform evaluation on the development dataset",
)
parser.add_argument(
"-t",
"--test",
action="store_true",
help="Perform evaluation on the test dataset",
)
parser.add_argument(
"-p",
"--data_path",
default="../data/",
help="Base path to the dataset to use",
)
args = parser.parse_args()
main(args)
|
StarcoderdataPython
|
3227094
|
"""Unit tests for Norm"""
from tests.utils import NormTestCase
class UnquotingTestCase(NormTestCase):
def test_dynamic_projection(self):
self.execute("test(a: String, b: Integer);")
self.execute("test := ('test', 1)"
" | ('here', 2)"
" | ('there', 3)"
" ;")
query = """
with(test), [('(.*)s', 's'), ('(.*)e', 'e'), ('th(.*)', 'th')]?(p,n) & extract(p, a)?pattern_{n};
"""
data = self.execute(query)
self.assertTrue(data is not None)
self.assertTrue(all(data['pattern_e'].dropna() == ['t', 'her', 'ther']))
self.assertTrue(all(data['pattern_s'].dropna() == ['te']))
self.assertTrue(all(data['pattern_th'].dropna() == ['ere']))
def test_single_exist_context(self):
self.execute("tmp := read('./data/norm/packed_alarms.parquet', ext='parq');")
self.execute("alarms(event:String, ip:String, time:Datetime, tally:Integer);")
self.execute("alarms := tmp(event?, ip?, time?, tally?);")
result = self.execute("with(alarms), foreach(event, ip), tally.sum() > 1000 ?event_{event};")
self.assertTrue(result is not None)
self.assertTrue(len(result.columns) == 16)
def test_dynamic_code_execution(self):
self.execute("test(a: Integer, b: String);")
self.execute("test := (1, 'test(a > 1?);')"
" | (2, 'test(a > 2?);')"
" | (3, 'test(a > 3?);')"
" ;")
result = self.execute("with(test), {b}?r;")
self.assertTrue(result is not None)
self.assertTrue(len(result) == 3)
|
StarcoderdataPython
|
98439
|
#coding=utf-8
import json
from app.util.messageque.http_request import RequestApi
import logging
from django.conf import settings
class MessageSender:
Host = settings.Message_Tornado_host
@classmethod
def send_bottle_message(cls, from_id, desc):
body = {}
body["from_id"] = from_id
body["desc"] = desc
path = "/tecent/bottle"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
if result.get("status_code")==200:
return 200
elif result.get("status_code") ==400:
logging.error("send bottle message to mq error")
return 400
@classmethod
def send_bottle_message_v3(cls, from_id, desc, gender):
body = {}
body["from_id"] = from_id
body["desc"] = desc
body["gender"] = gender
path = "/tecent/bottle_v3"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send bottle message to mq error")
return 400
@classmethod
def send_block_user_message(cls, user_id, desc):
body = {}
body["block_id"] = user_id
body["desc"] = desc
path = "/tecent/bottle"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
print 1234566
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send blocker_user message to mq error")
return 400
@classmethod
def send_charge_bottle_message(cls, from_id):
body = {}
body["from_id"] = from_id
desc = u"我已经成为土豪,快来撩我吧~"
body["desc"] = desc
path = "/tecent/charge_bottle"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send charge bottle message to mq error")
return 400
@classmethod
def send_system_message(cls, to_user_id, desc):
body = {}
body["to_user_id"] = to_user_id
body["desc"] = desc
path = "/tecent/system_message"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send system message to mq error")
return
@classmethod
def send_big_gift_info_message(cls, sender_id, sender_name, receiver_id, receiver_name, gift_id, gift_name,
gift_count):
body = {
"sender_id": sender_id,
"sender_name": sender_name,
"receiver_id": receiver_id,
"receiver_name": receiver_name,
"gift_id": gift_id,
"gift_count": gift_count,
"gift_name": gift_name
}
path = "/tecent/information/big_gift"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
print cls.Host
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send big gift message to mq error")
return 400
@classmethod
def send_charge_info_message(cls, user_id, user_name, money):
body = {
"user_id": user_id,
"user_name": user_name,
"money": money
}
path = "/tecent/information/charge"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
print cls.Host
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send charge info message to mq error")
return 400
@classmethod
def send_video_auth_info_message(cls, user_id, user_name):
body = {
"user_id": user_id,
"user_name": user_name,
}
path = "/tecent/information/video_auth"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
print cls.Host
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send video auth info message to mq error")
return 400
@classmethod
def send_activity_info_message(cls, user_id, user_name, desc):
body = {
"user_id": user_id,
"user_name": user_name,
"desc": desc
}
path = "/tecent/information/activity"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
print cls.Host
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send activity info message to mq error")
return 400
@classmethod
def send_withdraw_info_message(cls, user_id, user_name, money):
body = {
"user_id": user_id,
"user_name": user_name,
"money": money,
}
path = "/tecent/information/withdraw"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send withdraw info message to mq error")
return 400
@classmethod
def send_porn_check(cls, file_id, pic_url, room_id, user_id, join_id, room_user_id):
body = {
"file_id": file_id,
"pic_url": pic_url,
"room_id": room_id,
"user_id": user_id,
"join_id": join_id,
"room_user_id": room_user_id
}
path = "/audit/porn_check"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send porn_check message to mq error")
return 400
@classmethod
def send_picture_detect(cls, pic_url="", user_id=0, pic_channel=0, source=0, obj_id=None, pic_type=1):
if source == 1:
body = {
"pic_url": pic_url,
"user_id": user_id,
"pic_channel": pic_channel,
"source": source
}
elif source == 2:
# 2: 社区动态 图片
body = {
"obj_id": obj_id,
"source": source
}
elif source == 3:
# 3:个人相册
body = {
"pic_urls": pic_url, # 多个用逗号分隔
"source": source,
"user_id": user_id,
"pic_type": pic_type
}
elif source == 4:
# 4:聊天图片鉴定
body = {
"pic_url": pic_url,
"source": source,
"obj_id": obj_id
}
else:
return 400
path = "/audit/pic_check"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send pic_check message to mq error")
return 400
@classmethod
def send_text_check(cls, text, user_id, text_channel, ip):
body = {
"text": text,
"user_id": user_id,
"text_channel": text_channel,
"ip": ip
}
path = "/audit/text_check"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
print data
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send text_check message to mq error")
return 400
@classmethod
def send_about_me_message(cls, user_id):
body = {}
body["user_id"] = user_id
path = "/tecent/about_me"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
print result.get("status_code")
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send about me message to mq error")
return 400
@classmethod
def send_return_tool(cls, from_id, to_id, close_type):
body = {}
body["from_id"] = from_id
body["to_id"] = to_id
body["close_type"] = close_type
path = "/tecent/return_tool"
print close_type, "=======================cloase_type"
data = RequestApi.post_body_request_http(path=path, body=json.dumps(body), headers={}, host=cls.Host)
result = json.loads(data)
if result.get("status_code") == 200:
return 200
elif result.get("status_code") == 400:
logging.error("send_return_tool to mq error")
return 400
|
StarcoderdataPython
|
3327418
|
from flask import Flask, jsonify, request, send_from_directory
import requests
from threading import Thread
import os
import shutil
app = Flask(__name__)
app.config['UPLOAD_FOLDER']='usersContent'
def verifyJson(dadosNext):
tempHTML = ''
if (len(list(dadosNext)) > 0):
for dado in dadosNext:
tempHTML = tempHTML + recursivityJson(dado)
return tempHTML
def recursivityJson(dados):
dadosK = list(dados.keys())[0]
if "property" in list(dados[dadosK]):
propsHTML = ''
styleProps = 'style=\"'
for propName in list(dados[dadosK]['property']):
propsHTML = propsHTML + ' {}=\'{}\''.format(propName, dados[dadosK]['property'][propName])
if "style" in list(dados[dadosK]):
try:
for styleName in list(dados[dadosK]['style']):
styleProps = styleProps + '{}: {};'.format(styleName, dados[dadosK]['style'][styleName])
styleProps = styleProps + '\"'
except:
pass
if type(dados[dadosK]['content']) in [list, dict]:
return "<{} {} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], styleProps, propsHTML, dados[dadosK]['id'], verifyJson(dados[dadosK]['content']), dados[dadosK]['html'])
else:
return "<{} {} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], styleProps, propsHTML, dados[dadosK]['id'], dados[dadosK]['content'], dados[dadosK]['html'])
else:
if (type(dados[dadosK]['content']) in [list, dict]):
return "<{} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], propsHTML, dados[dadosK]['id'], verifyJson(dados[dadosK]['content']), dados[dadosK]['html'])
else:
return "<{} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], propsHTML, dados[dadosK]['id'], dados[dadosK]['content'], dados[dadosK]['html'])
else:
if "style" in list(dados[dadosK]):
styleProps = "style=\""
try:
for styleName in list(dados[dadosK]['style']):
styleProps = styleProps + '{}: {};'.format(styleName, dados[dadosK]['style'][styleName])
styleProps = styleProps + '\"'
except:
pass
if type(dados[dadosK]['content']) in [list, dict]:
return "<{} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], styleProps, dados[dadosK]['id'], verifyJson(dados[dadosK]['content']), dados[dadosK]['html'])
else:
return "<{} {} componentId={}>{}</{}>".format(dados[dadosK]['html'], styleProps, dados[dadosK]['id'], dados[dadosK]['content'], dados[dadosK]['html'])
else:
if type(dados[dadosK]['content']) in [list, dict]:
return "<{} componentId={}>{}</{}>\n".format(dados[dadosK]['html'], dados[dadosK]['id'], verifyJson(dados[dadosK]['content']), dados[dadosK]['html'])
else:
return "<{} componentId={}>{}</{}>\n".format(dados[dadosK]['html'], dados[dadosK]['id'], dados[dadosK]['content'], dados[dadosK]['html'])
return ''
def verifyJs(dPage):
if ('js' in list(dPage)):
return dPage['js']
def verifyCss(dPage):
if ('css' in list(dPage)):
return dPage['css']
def gerarExe(req):
if os.path.exists('usersContent/{}'.format(req['user']['_id'])):
contentE = os.path.join(app.root_path, r'usersContent\{}'.format(req['user']["_id"]))
shutil.rmtree(contentE)
if not os.path.exists('usersContent/{}'.format(req['user']["_id"])):
os.makedirs('usersContent/{}'.format(req['user']["_id"]))
if not os.path.exists('usersContent/{}/content'.format(req['user']["_id"])):
os.makedirs('usersContent/{}/content'.format(req['user']["_id"]))
if not os.path.exists('usersContent/{}/exe'.format(req['user']["_id"])):
os.makedirs('usersContent/{}/exe'.format(req['user']["_id"]))
for page in req['appCode']['pages']:
html = ''
for component in page['pageComponents']:
html = html + recursivityJson(component)
with open('usersContent/{}/content/{}'.format(req['user']["_id"], page['href']), 'w+') as f:
f.write("""<html>
<head>
<meta charset="utf-8">
<style>
{}
</style>
</head>
<body>
{}
</body>
<script>
{}
</script>
</html>""".format(verifyCss(page), html, verifyJs(page)))
fMain = open("base.js", "r")
with open('usersContent/{}/content/main.js'.format(req['user']["_id"]), 'w+') as arquivo:
arquivo.write('page = \"{}\";\n'.format(req['appCode']['pages'][0]['href']))
arquivo.write(fMain.read())
if not os.path.exists('usersContent/{}/content/package.json'.format(req['user']["_id"])):
os.system('cd usersContent/{}/content && npm init -y'.format(req['user']["_id"]))
os.system('cd usersContent/{}/content && npm install --save-dev electron'.format(req['user']["_id"]))
os.system('cd usersContent/{}/content && npm install electron-builder --save-dev'.format(req['user']["_id"]))
os.system('cd usersContent/{}/content && electron-builder'.format(req['user']["_id"]))
original = os.path.join(app.root_path, r'usersContent\{}\content\dist\content Setup 1.0.0.exe'.format(req['user']["_id"]))
target = os.path.join(app.root_path, r'usersContent\{}\exe'.format(req['user']["_id"]))
contentD = os.path.join(app.root_path, r'usersContent\{}\content'.format(req['user']["_id"]))
print(original)
print(target)
shutil.copy2(original, target)
shutil.rmtree(contentD)
@app.route('/download/<path:id>/<path:filename>', methods=['GET', 'POST'])
def download(id, filename):
full_path = os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], r'{}\exe'.format(id))
return send_from_directory(full_path, filename)
@app.route('/gExe/', methods=['POST'])
def gerarAPP():
req_data = request.get_json()
thread1 = Thread(target = gerarExe, args = (req_data,))
thread1.start()
return 'ok'
app.run(debug=True)
|
StarcoderdataPython
|
146477
|
from .constants import BTCMOVE
from .controllers import ftx_move, ftx_trades
__all__ = ["BTCMOVE", "ftx_trades", "ftx_move"]
|
StarcoderdataPython
|
56882
|
from flask_wtf import Form
from wtforms import (TextField, StringField, BooleanField,PasswordField,
validators)
from .utils import Unique
from .models import User
from .constants import (USER_LEN_MSG, USER_REQ_MSG, USER_DUPL_MSG,
EMAIL_FORMAT_MSG, EMAIL_REQ_MSG, EMAIL_DUPL_MSG,
PWD_REQ_MSG, PWD_LEN_MSG, PWD_MATCH_MSG, INCORRECT_PWD)
class LoginForm(Form):
username = TextField('Username',
[validators.Length(max=25,message=USER_LEN_MSG),
validators.Required(message=USER_REQ_MSG)])
password = PasswordField('Password',
[validators.Required(message=PWD_REQ_MSG)])
class SignupForm(Form):
username = TextField('Username', [validators.Length(max=25,
message=USER_LEN_MSG),
validators.Required(message=USER_REQ_MSG),
Unique(User,User.username, message=USER_DUPL_MSG)])
email = TextField('Email', [validators.Email(message=EMAIL_FORMAT_MSG),
validators.Required(message=EMAIL_REQ_MSG),
Unique(User, User.email, message=EMAIL_DUPL_MSG)])
password = PasswordField('Password', [validators.Length(max=25,
message=PWD_REQ_MSG),
validators.InputRequired(message=PWD_REQ_MSG),
validators.EqualTo('confirm',
message=PWD_MATCH_MSG)])
confirm = PasswordField('Repeat Password')
|
StarcoderdataPython
|
3224707
|
#!/usr/bin/env python
import rospy
def helloworld():
#Initialize node with a default name
rospy.init_node('default_node_name', anonymous=True)
#Prints to INFO log
rospy.loginfo("HELLOOO WOOORRLD")
if __name__ =='__main__':
try:
helloworld()
except rospy.ROSInterruptException:
pass
|
StarcoderdataPython
|
101189
|
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
class Cors(MiddlewareMixin):
def process_response(self, request, response):
response['Access-Control-Allow-Origin'] = ','.join(settings.CORS_ORIGIN_LIST)
if request.method == 'OPTIONS':
response['Access-Control-Allow-Methods'] = ','.join(settings.CORS_METHOD_LIST)
response['Access-Control-Allow-Headers'] = ','.join(settings.CORS_HEADER_LIST)
response['Access-Control-Allow-Credentials'] = 'true'
return response
|
StarcoderdataPython
|
1786689
|
# ai 서버, display 클라이언트
# server.py
import flask
from flask import request, make_response, jsonify
from PIL import Image
import time
import cv2
import numpy as np
import core.utils as utils
import tensorflow as tf
from PIL import Image
from IPython.display import display
import requests
num_seats = input("좌석 수 :")
num_seats = int(num_seats)
global empty_num_seats
empty_num_seats = num_seats
app = flask.Flask(__name__)
return_elements = ["input/input_data:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"]
pb_file = "./yolov3_coco.pb"
image_path = "./images/capture.jpg"
num_classes = 80
input_size = 128
graph = tf.Graph()
# IP = "172.29.148.144"
# PORT = 8899
@app.route('/api/calculateEmptySeats', methods=['POST'])
def match():
image = request.files['file']
if image:
with open(image_path, "wb") as fw:
# with open('images/test{}.jpg'.format(time.time()), 'wb') as fw:
fw.write(image.read())
start = time.time()
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)
original_image_size = original_image.shape[:2]
image_data = utils.image_preporcess(np.copy(original_image), [input_size, input_size])
image_data = image_data[np.newaxis, ...]
pred_sbbox, pred_mbbox, pred_lbbox = sess.run([return_tensors[1], return_tensors[2], return_tensors[3]],feed_dict={ return_tensors[0]: image_data})
pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)),np.reshape(pred_mbbox, (-1, 5 + num_classes)),np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0)
bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.5)
bboxes = utils.nms(bboxes, 0.4, method='nms')
image = utils.draw_bbox(original_image, bboxes)
image = Image.fromarray(image)
print("좌석 수: {}".format(num_seats))
print("찾은 사람 수: {}".format(len(bboxes)))
# requests.post(url="http://{}:{}/api/displayEmptySeats".format(IP,PORT), data={"num_empty_seats": num_seats})
image.show()
# display(image)
end = time.time()
print("{}seconds".format(end-start))
else:
print('Image empty!')
return 'OK'
@app.route("/api/displayEmptySeats", methods=["POST"])
def display():
global empty_num_seats
if empty_num_seats < 9:
empty_num_seats += 1
else:
empty_num_seats = 0
return make_response(jsonify({"num_empty_seats": empty_num_seats}))
return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements)
with tf.Session(graph=graph) as sess:
app.run('0.0.0.0', port=5001, threaded=True)
|
StarcoderdataPython
|
152725
|
from django.template import Library
from evap.evaluation.models import Semester
from evap.settings import DEBUG, LANGUAGES
register = Library()
@register.inclusion_tag("navbar.html")
def include_navbar(user, language):
return {
"user": user,
"current_language": language,
"languages": LANGUAGES,
"published_result_semesters": Semester.get_all_with_published_unarchived_results(),
"result_semesters": Semester.get_all_with_unarchived_results(),
"grade_document_semesters": Semester.objects.filter(grade_documents_are_deleted=False),
"debug": DEBUG,
}
|
StarcoderdataPython
|
1653889
|
#!/usr/bin/python
import time
import cv2
import numpy as np
from process import findColor
from ftplib import FTP
from bebop import *
drone = Bebop()
drone.videoDisable() # disable video stream
drone.moveCamera( tilt=-100, pan=0 )
ftp = FTP('192.168.42.1') # connect to host, default port
ftp.login()
ftp.cwd('internal_000/Bebop_2/media')
ftp.retrlines('LIST')
filenames_all = ftp.nlst() # get filenames within the directory
for filename in filenames_all:
ftp.delete(filename) # clear past files/screenshots/videos
try:
drone.takeoff()
drone.takePicture()
for i in xrange(5):
drone.wait(1)
print 'round',i
try:
drone.takePicture()
filenames_all = ftp.nlst() # get filenames within the directory
filenames = [k for k in filenames_all if '.jpg' in k]
#print filenames
filename = filenames[-1]
print filename
ftp.retrbinary("RETR " + filename ,open(filename, 'wb').write)
frame = cv2.imread(filename)
cv2.imshow(frame)
if findColor(frame)==False:
print "no object found"
drone.update( cmd=movePCMDCmd( True, 0, 5, 0, 0 ) )
else:
print "object found"
drone.hover()
# drone.flyToAltitude(1.5,2)
drone.wait(2)
drone.land()
break
#does it need to login and quit ftp everytime?
except Exception, e:
print 'download error'
ftp.quit()
drone.land()
ftp.quit()
drone.land()
except (ManualControlException,Exception), e:
print "Emergency Landing"
ftp.quit()
drone.land()
def flyBackwards():
drone.wait(1)
for i in xrange(3):
drone.update( cmd=movePCMDCmd( True, 0, -10, 0, 0 ) )
drone.wait(1)
# use pcmd commands+ drone.update() to set up video stream and more functions?
# cv2.imshow('image',output)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
|
StarcoderdataPython
|
171703
|
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: © 2014 The glucometerutils Authors
# SPDX-License-Identifier: MIT
"""Tests for the LifeScan OneTouch Ultra Easy driver."""
# pylint: disable=protected-access,missing-docstring
from absl.testing import absltest
from glucometerutils.drivers import otultraeasy
class ConstructTest(absltest.TestCase):
def test_make_packet_ack(self):
self.assertEqual(
b"\x02\x06\x08\x03\xc2\x62",
otultraeasy._make_packet(b"", False, False, False, True),
)
def test_make_packet_version_request(self):
self.assertEqual(
b"\x02\x09\x03\x05\x0d\x02\x03\x08\x9f",
otultraeasy._make_packet(b"\x05\x0d\x02", True, True, False, False),
)
|
StarcoderdataPython
|
3340476
|
<reponame>nekoumei/dtreeplt
import numpy as np
import pandas as pd
from sklearn import tree
from sklearn.preprocessing import MinMaxScaler
import matplotlib.patches as mpatch
import matplotlib.pyplot as plt
import matplotlib.cm as cm
class dtreeplt():
'''
Parameters
---------------
model: sklearn.tree.DecisionTreeClassifier object
You need prepare trained model.
If it is None, use iris data and fitting automatically.
feature_names: array like object(example numpy.array)
list of feature names.
target_names: array like object(example numpy.array)
list of target names.
filled: Bool
If it is True, paint nodes to indicate majority class, like sklearn.
X: numpy array or pandas DataFrame object
It is necessary for interacitve mode.
y: numpy array object
It is necessary for interacitve mode.
cmap: matplotlib cm object
you can choose colormap for draw decision tree.
eval: Bool
if True, hold out 9:1 (stratified) and calc valid accuracy.
the evaluation run only interactive mode.
'''
def __init__(self, model=None, X=None, y=None, feature_names=None, target_names=None,
filled=True, cmap=cm.Accent, eval=True, disp_values=True):
if model is None:
print('Use Iris Datasets.')
model = tree.DecisionTreeClassifier(min_samples_leaf=.1)
X, y, feature_names, target_names = self._get_iris_data()
model.fit(X, y)
if type(X) == pd.core.frame.DataFrame:
X = X.values
elif type(X) == np.ndarray:
pass
elif type(X) == list:
X = np.array(X)
elif X is None:
pass
else:
assert False, 'X must be pandas DataFrame, numpy array or list'
self.model = model
self.X = X
self.y = y
self.feature_names = feature_names
self.target_names = target_names
self.filled = filled
self.cmap = cmap
self.eval = eval
self.disp_values = disp_values
def _get_iris_data(self):
from sklearn.datasets import load_iris
data = load_iris()
X = data.data
y = data.target
feature_names = data.feature_names
target_names = data.target_names
return X, y, feature_names, target_names
def _get_tree_infomation(self):
tree_info_dict = {}
tree_info_dict['samples'] = self.model.tree_.n_node_samples
tree_info_dict['values'] = self.model.tree_.value
tree_info_dict['features'] = [self.feature_names[i] if i >=0 else 'target' for i in self.model.tree_.feature]
tree_info_dict['thresholds'] = self.model.tree_.threshold
tree_info_dict['impurities'] = self.model.tree_.impurity
tree_info_dict['criterion'] = self.model.criterion
tree_info_dict['node_count'] = self.model.tree_.node_count
tree_info_dict['children_left'] = self.model.tree_.children_left
tree_info_dict['children_right'] = self.model.tree_.children_right
tree_info_dict['max_depth'] = self.model.tree_.max_depth
return tree_info_dict
def _get_class_names(self, values):
class_ids = []
for value in values:
class_ids.append(np.argmax(value))
classes = [self.target_names[i] for i in class_ids]
return classes, class_ids
def _calc_nodes_relation(self):
tree_info_dict = self._get_tree_infomation()
self.classes, self.class_ids = self._get_class_names(tree_info_dict['values'])
links = []
links_left = []
links_right = []
link = {}
for i, child_left in enumerate(tree_info_dict['children_left']):
if child_left != -1:
link['source'] = i
link['target'] = child_left
links.append(link.copy())
links_left.append(link.copy())
for i, child_right in enumerate(tree_info_dict['children_right']):
if child_right != -1:
link['source'] = i
link['target'] = child_right
links.append(link.copy())
links_right.append(link.copy())
tree_info_dict['links'] = links
tree_info_dict['nodes_height'] = self._calc_nodes_height(
tree_info_dict['node_count'],
tree_info_dict['max_depth'],
tree_info_dict['links']
)
# 親ノード直下の子ノードがいくつあるか数える
child_counts = []
for i in range(tree_info_dict['node_count']):
child_count = 0
for link in tree_info_dict['links']:
if link['source'] == i:
child_count += 1
child_counts.append(child_count)
# x軸の配置を調整する
base_distance = 0.6
append_coordinate = [base_distance * -1, base_distance]
x_dict = {}
for i in range(tree_info_dict['node_count']):
x_dict[i] = None
x_dict[0] = 0
for i in range(tree_info_dict['node_count']):
tmp = 0
for link in tree_info_dict['links']:
if link['source'] == i:
x = round(x_dict[link['source']] + append_coordinate[tmp], 5)
height = tree_info_dict['nodes_height'][link['target']]
for j, node_height in enumerate(tree_info_dict['nodes_height']):
try:
if (round(height, 1) == round(node_height, 1)) \
and (round(x_dict[j], 1) == round(x, 1)):
x += base_distance * 2
x = round(x, 5)
except TypeError:
# None参照を無視する
pass
x_dict[link['target']] = round(x, 5)
tmp += 1
# 親ノードと子ノードのx軸が離れすぎている場合調整する
for link in tree_info_dict['links']:
diff = x_dict[link['source']] - x_dict[link['target']]
if round(diff, 1) < base_distance * -2:
height = tree_info_dict['nodes_height'][link["target"]]
for heigh in range(height + 1):
nodes = [i for i, x in enumerate(tree_info_dict['nodes_height']) if x == heigh]
for node in nodes:
x_dict[node] -= base_distance
return x_dict, tree_info_dict
def _calc_nodes_height(self, node_count, max_depth, links):
heights_list = list(range(node_count))
heights_list[0] = max_depth
k = [0]
for i in range(node_count):
tmp = []
for link in links:
if link['source'] in k:
heights_list[link['target']] = heights_list[link['source']] - 1
tmp.append(link['target'])
k = tmp.copy()
return heights_list
def _get_texts(self, tree_info_dict):
texts = []
values = ''
for i in range(tree_info_dict['node_count']):
if not tree_info_dict['features'][i] == 'target':
text = f'{tree_info_dict["features"][i]} <= {tree_info_dict["thresholds"][i]:,.2f}\n'
else:
text = ''
if self.disp_values:
values = f'values = {tree_info_dict["values"][i]}\n'
text += f'{tree_info_dict["criterion"]} = {tree_info_dict["impurities"][i]:.2f}\n\
samples = {tree_info_dict["samples"][i]}\n' + values + \
f'class = {self.classes[i]}'
texts.append(text)
return texts
def draw_figure(self, x_dict, tree_info_dict):
fig = plt.figure(
figsize=[
(max(x_dict.values()) - min(x_dict.values())) * 6,
tree_info_dict['nodes_height'][0] * 5
]
)
ax = fig.add_subplot(111)
# 不要な枠線、軸の削除
fig.patch.set_alpha(0)
ax.patch.set_alpha(0)
ax.tick_params(labelbottom=False, bottom=False)
ax.tick_params(labelleft=False, left=False)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.set_xlim(min(x_dict.values()), max(x_dict.values()))
viz_x = {}
viz_y = {}
for i in range(tree_info_dict['node_count']):
viz_x[i] = x_dict[i]
viz_y[i] = tree_info_dict['nodes_height'][i]
rect_width = 1
rect_height = 0.7
texts = self._get_texts(tree_info_dict)
if self.filled:
colors = [self.cmap(self.class_ids[i]) for i in range(len(self.class_ids))]
else:
colors = ['white' for i in range(len(self.class_ids))]
if tree_info_dict["criterion"] == 'gini':
alphas = 1 - tree_info_dict['impurities']
else:
alphas = 1 - MinMaxScaler((0, 0.9)).fit_transform(tree_info_dict['impurities'].reshape(-1, 1)).flatten()
for i, text in enumerate(texts):
# nodeを表す四角形の描画
rectangle = mpatch.Rectangle(
(viz_x[i], viz_y[i]),
rect_width,
rect_height,
color=colors[i],
alpha=alphas[i],
ec='#000000'
)
ax.add_artist(rectangle)
# node内のtextの描画
rx, ry = rectangle.get_xy()
cx = rx + rectangle.get_width() / 2.0
cy = ry + rectangle.get_height() / 2.0
ax.annotate(text, (cx, cy), color='black',
fontsize=20, ha='center', va='center')
# 矢印の描画
for link in tree_info_dict['links']:
x = x_dict[link['source']] + rect_width / 2
y = tree_info_dict['nodes_height'][link['source']]
dx = x_dict[link['target']] + rect_width / 2
dy = tree_info_dict['nodes_height'][link['target']] + rect_height
ax.annotate(s='', xy=(dx, dy), xytext=(x, y),
xycoords='data',
arrowprops=dict(arrowstyle='->', color='black')
)
ax.set_xlim(min(x_dict.values()), max(x_dict.values()) + rect_width)
_ = ax.set_ylim(min(tree_info_dict['nodes_height']), max(tree_info_dict['nodes_height']) + rect_height)
return fig
def view(self, interactive=False):
'''
Parameters
---------------
interactive: Bool
return
--------------
if interactive:
fig: ipywidgets.VBox object
else:
fig: matplotlib.figure object
'''
x_dict, tree_info_dict = self._calc_nodes_relation()
if interactive:
from . import interactive as it
return it.view_interactive(self.feature_names, self.target_names, self.X, self.y, self.model, self.eval,
self.disp_values)
else:
fig = self.draw_figure(x_dict, tree_info_dict)
return fig
|
StarcoderdataPython
|
1714639
|
import sys
import cadquery as cq
# obj = cq.Workplane("YZ").circle(1).extrude(10)
# show_object(obj, "obj.1")
# obj = obj.copyWorkplane(cq.Workplane("XZ")).circle(2).extrude(15)
# show_object(obj, "obj.2")
# obj = obj.copyWorkplane(cq.Workplane("XY")).box(10, 5, 1)
# show_object(obj, "obj.3")
obj = cq.Workplane("YZ").circle(1).extrude(10)
show_object(obj, "obj.1")
print(f"obj.1.all()={obj.all()}")
print(f"obj.1.parent.all()={obj.parent.all()}")
obj = obj.copyWorkplane(cq.Workplane("XZ")).circle(2).extrude(15)
show_object(obj, "obj.2")
print(f"obj.2.all()={obj.all()}")
print(f"obj.2.parent.all()={obj.parent.all()}")
obj = obj.copyWorkplane(cq.Workplane("XY"))
show_object(obj, "obj.3")
print(f"obj.3.all()={obj.all()}")
print(f"obj.3.parent.all()={obj.parent.all()}")
objx = obj.circle(2)
show_object(objx, "objx")
print(f"objx.all()={objx.all()}")
print(f"objx.parent.all()={objx.parent.all()}")
objy = objx.union()
show_object(objy, "objy")
print(f"objy.all()={objy.all()}")
print(f"objy.parent.all()={objy.parent.all()}")
#objy = objx.extrude(5, combine=False)
#show_object(objy, "objy")
#print(f"objy.all()={objy.all()}")
#print(f"objy.parent.all()={objy.parent.all()}")
|
StarcoderdataPython
|
3233801
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import locale
from decimal import Decimal
from django import template
register = template.Library()
@register.filter
def to_json(value):
return json.dumps(value)
@register.filter
def to_abs(value):
return abs(value)
@register.filter
def amount_split(value):
""" 用逗号分隔数据 """
return '{:,}'.format(float(value))
|
StarcoderdataPython
|
3277895
|
# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
import prime
class Solve(object):
def __init__(self):
self.such_primes = []
self.nr_such_prime = 11
def solve(self):
def contain_even(x):
for i in str(x):
if i in '0468':
return True
return False
def gen_truncates(x):
s = str(x)
for i in range(1, len(s)):
yield int(s[:i])
for i in range(len(s)-1, 0, -1):
yield int(s[i:])
primes = [2, 3, 5, 7]
for p in prime.primes_between(start=10):
primes.append(p)
if not contain_even(p):
for i in gen_truncates(p):
if i not in primes:
break
else:
print len(self.such_primes), 'Found', p
self.such_primes.append(p)
if len(self.such_primes) >= self.nr_such_prime:
break
return sum(self.such_primes)
s = Solve()
print s.solve()
|
StarcoderdataPython
|
88892
|
<filename>monday/resources/items.py
from monday.resources.base import BaseResource
from monday.query_joins import mutate_item_query, get_item_query, update_item_query, get_item_by_id_query, \
update_multiple_column_values_query, mutate_subitem_query, add_file_to_column_query
class ItemResource(BaseResource):
def __init__(self, token):
super().__init__(token)
def create_item(self, board_id, group_id, item_name, column_values=None,
create_labels_if_missing=False):
query = mutate_item_query(board_id, group_id, item_name, column_values,
create_labels_if_missing)
return self.client.execute(query)
def create_subitem(self, parent_item_id, subitem_name, column_values=None,
create_labels_if_missing=False):
query = mutate_subitem_query(parent_item_id, subitem_name, column_values,
create_labels_if_missing)
return self.client.execute(query)
def fetch_items_by_column_value(self, board_id, column_id, value):
query = get_item_query(board_id, column_id, value)
return self.client.execute(query)
def fetch_items_by_id(self, ids, limit=25):
query = get_item_by_id_query(ids, limit)
return self.client.execute(query)
def change_item_value(self, board_id, item_id, column_id, value):
query = update_item_query(board_id, item_id, column_id, value)
return self.client.execute(query)
def change_multiple_column_values(self, board_id, item_id, column_values):
query = update_multiple_column_values_query(board_id, item_id, column_values)
return self.client.execute(query)
def add_file_to_column(self, item_id, column_id, file):
query = add_file_to_column_query(item_id, column_id)
return self.file_upload_client.execute(query, variables={'file': file})
|
StarcoderdataPython
|
3322841
|
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
class MissionSearchForm(Form):
'''
Form to perform the search functionality
in the /search route where one needs to
type in the first and last name
'''
mission = TextField('Field', validators=[validators.DataRequired()])
|
StarcoderdataPython
|
1656644
|
import time
import sips
from sips.h import attach
from sips.h import hot
from sips.h import helpers as h
from sips.h import serialize as s
from sips.h import attach
from sips.macros import macros as m
from sips.macros import bov as bm
def test_get_and_window():
columns = ["a_pts", "h_pts", "quarter", "secs"]
dfs = h.get_dfs(m.PARENT_DIR + "data/lines/lines/")
sets = [
h.seq_windows(df.values, df[columns].values, history_size=10, target_size=10)
for df in dfs
]
first = sets[0]
X, y = first
print(f"X.shape : {X.shape}")
print(f"y.shape : {y.shape}")
return X, y
def test_attach_wins():
# get dataframes and attach the win/losses
dfs = h.get_dfs()
w_wins = attach.wins(dfs)
return w_wins
def test_get_filter_and_serialize():
dfs = h.get_dfs()
data = s.serialize_dfs(dfs)
sdfs = s.serialize_dfs(
dfs, label_cols=["a_pts", "h_pts", "a_ml", "h_ml"], to_numpy=False
)
zipped = list(zip(sdfs[0], sdfs[1]))
print(len(zipped))
print(zipped[0])
return data, sdfs
def df_filtering_commutative_time_delta():
"""
Tests whether applying a min game length then filtering for wins
is faster than filtering the other way around.
Both are slow and the filter needs to be done in one iteration of the dfs.
"""
all_dfs = h.get_dfs()
num_dfs_initial = len(all_dfs)
start1 = time.time()
apply_then_filter = h.apply_min_then_filter(all_dfs, verbose=True)
end1 = time.time()
start2 = time.time()
filter_then_apply_min = h.filter_then_apply_min(all_dfs, verbose=True)
end2 = time.time()
delta1 = end1 - start1
delta2 = end2 - start2
if len(filter_then_apply_min) > 0:
print(f"df: {filter_then_apply_min[0]}")
print(f"df: {filter_then_apply_min[0].status}")
print(f"delta 1: {delta1}")
print(f"delta 2: {delta2}")
return delta1, delta2
def test_sdfs():
dfs = h.get_dfs()
cols = bm.TO_SERIALIZE
maps = hot.all_hot_maps()
numbers = s.serialize_dfs(
dfs, in_cols=None, label_cols=None, hot_maps=maps, to_numpy=False
)
print(numbers)
return numbers
def test_heat():
dfs = h.get_dfs()
df = dfs[0]
hot_maps = hot.all_hot_maps(output="dict")
hotted = hot.hot(df, hot_maps=hot_maps)
return hotted
if __name__ == "__main__":
X, y = test_get_and_window()
w_wins = test_attach_wins()
data, sdfs = test_get_filter_and_serialize()
d1, d2 = df_filtering_commutative_time_delta()
numbers = test_sdfs()
hotted = test_heat()
|
StarcoderdataPython
|
1735346
|
<reponame>wtsnjp/nlp100
#
# usage: python k36.py {file name}
#
import sys
import collections
from k30 import load_mecab
def frequency_ranking(data):
cd = collections.Counter([m['base'] for s in data for m in s])
return [[k, v] for k,v in cd.most_common()]
if __name__ == '__main__':
fn = sys.argv[1]
data = load_mecab(fn)
for v in frequency_ranking(data):
print(v[0], v[1])
|
StarcoderdataPython
|
4836996
|
<reponame>davbre/rotki
from eth_utils import is_checksum_address
from rotkehlchen.constants.ethereum import EthereumConstants
def test_ethereum_contracts():
"""Test that all ethereum contract entries have legal data"""
for _, entry in EthereumConstants().contracts.items():
assert len(entry) == 3
assert is_checksum_address(entry['address'])
assert entry['deployed_block'] > 0
assert isinstance(entry['abi'], list)
def test_ethereum_abi():
"""Test that the ethereum abi entries have legal data"""
for _, entry in EthereumConstants().abi_entries.items():
assert isinstance(entry, list)
|
StarcoderdataPython
|
1763138
|
#coding=utf-8
'''
Created on 2016年1月12日
@author: hadoop
'''
class UrlManager(object):
def __init__(self):#构造函数
self.new_urls=set()
self.old_urls=set()
def add_new_url(self,url):#在管理器中添加一个新的url
if url is None:
return
if url not in self.new_urls and url not in self.old_urls:#url既不在新urls中,也不在已爬取的urls中
self.new_urls.add(url)
'''def add_new_urls(self,urls):#在管理器中添加新的url
print "正在添加新的url..."
if urls is None or len(urls)==0:#None or 列表为空
return
for url in urls:
self.add_new_url(url)'''
def add_all_urls(self,urls):#在管理器中添加所有要下载的url
print "正在添加新的url..."
if urls is None or len(urls)==0:#None or 列表为空
return
for url in urls:
self.add_new_url(url)
def has_new_url(self):#判断管理器中是否有新的待爬取的url
return len( self.new_urls)!=0
def get_new_url(self):#从url管理器中获取一个新的待爬取的url
print "正在获取待爬取的url..."
#print "出栈前",self.new_urls
new_url = self.new_urls.pop() #出栈
#print "出栈后",self.new_urls
self.old_urls.add(new_url)
return new_url
|
StarcoderdataPython
|
3227776
|
<reponame>foerstner-lab/GRADitude
import pandas as pd
def exclude_the_min_row_sum(feature_count_table,
feature_count_start_column, feature_count_end_column, min_row, output_file):
feature_count_table_df = pd.read_table(feature_count_table)
matrix_value = _extract_value_matrix(feature_count_table_df,
feature_count_start_column, feature_count_end_column)
colum_with_gene_name = _extract_gene_matrix(feature_count_table_df)
attribute_matrix = _extract_attributes(feature_count_table_df, feature_count_start_column)
min_row_sum(matrix_value, attribute_matrix, colum_with_gene_name, min_row, output_file)
def _extract_value_matrix(feature_count_table_df, feature_count_start_column,
feature_count_end_column):
return feature_count_table_df.iloc[:, feature_count_start_column:(
feature_count_end_column)]
def _extract_gene_matrix(feature_count_table_df):
gene_column = feature_count_table_df[list(filter(
lambda col: col.startswith("Attributes"), feature_count_table_df.columns))]
return gene_column
def _extract_attributes(feature_count_table_df,
feature_count_start_column):
return feature_count_table_df.iloc[:, : feature_count_start_column]
def min_row_sum(value_matrix, attribute_matrix, gene_column, min_row, output_file):
gene_table_final = []
combined_df_ext = pd.concat([attribute_matrix, value_matrix], axis=1)
summed_values = value_matrix.sum(axis=1)
combined_df = pd.concat([gene_column, summed_values], axis=1)
combined_df.columns = ['Attributes', 'sum_of_values']
selected_df = combined_df[~(combined_df['sum_of_values'] <= min_row)]
selected_df.reset_index(drop=True, inplace=True)
my_keys = selected_df['Attributes'].tolist()
for index, row in combined_df_ext.iterrows():
gene = row["Attributes"]
if gene in my_keys:
gene_table_final.append(row)
df_with_min_row_samples = pd.DataFrame(gene_table_final)
df_with_min_row_samples.reset_index(drop=True, inplace=True)
df_with_min_row_samples.to_csv(output_file, sep='\t', index=0)
|
StarcoderdataPython
|
83495
|
import pandas as pd
from .. import config
def process(filename, is_continuous=False, threshold=1.96):
"""
Parameters
----------
filename: :str
tab separated file in which the first row contains gene name/entrez gene id combined
and patient ids. The rest of the rows are the genes and their expressions on each patient.
is_continuous: :bool if true returns normalized z scores
threshold: :float 1.96
defaults to two standard deviation thresholding for under/over expressed for discrete calculation
Returns
-------
gene_expressions: if is_continuous is True:
a dataframe indicating the normalized values of expressions of genes where
genes with entrez gene id are on rows and patient ids are on columns
otherwise: a dataframe indicating the over- (1) and under-expressed (-1) genes where
genes with entrez gene id are on rows and patient ids are on columns
gene_name_map: a dataframe indicating the name of the genes of entrez gene ids
"""
fpath = config.get_safe_data_file(filename)
data = pd.read_csv(fpath, sep="\t")
# data = data.set_index(['Gene Name', 'Entrez Gene ID'])
# data = data.set_index('Entrez Gene ID')
data[["gene_name", "entrez_gene_id"]] = data["#probe"].str.split(
"|", n=1, expand=True
)
data = data.set_index(["entrez_gene_id"])
data = data.drop(columns=["#probe"])
# IF THERE ARE ADDITIONAL PROBLEMS TO BE CONSIDERED, PREPROCESS THE DATA ACCORDINGLY
# drop genes with name '?'
tmp = data.index[data["gene_name"] == "?"]
data = data.drop(tmp)
# sort the dataframe based on both gene name and patient id
data = data.sort_index(axis=1)
data = data.sort_values("gene_name")
gene_name_map = data["gene_name"]
data = data.drop(columns=["gene_name"])
# delete patients with non-solid tumor
tmp = [row[3][0:2] == "01" for row in data.columns.str.split("-").tolist()]
ind = [
i for i, x in enumerate(tmp) if x == False
] # find the indices of non-solid tumors
data = data.drop(columns=data.columns[ind]) # drop them from the data frame
# drop the genes which are not expressed more than half of the samples
genes_to_drop = data.index[(data == 0).T.sum().values > (len(data.columns) / 2)]
data = data.drop(genes_to_drop)
# calculate z-scores
mean_exp = data.mean(axis=1, numeric_only=True)
std_exp = data.std(axis=1, numeric_only=True)
z_scores = data.subtract(mean_exp, axis=0)
z_scores = z_scores.div(std_exp, axis=0)
if is_continuous:
gene_expression = pd.DataFrame(z_scores, index=data.index, columns=data.columns)
else:
# find differentially expressed genes
gene_expression = pd.DataFrame(0, index=data.index, columns=data.columns)
# 1 for over-expressed, -1 for under-expressed
gene_expression[z_scores > threshold] = 1
gene_expression[z_scores < -threshold] = -1
return gene_expression, gene_name_map
|
StarcoderdataPython
|
176365
|
# -*- coding: utf-8 -*-
"""jira_lex.py: Django datatableview_advanced_search"""
from __future__ import unicode_literals
from __future__ import print_function
import sys
import logging
from datetime import date
__author__ = '<NAME>'
__date__ = '2/28/18 9:20 AM'
__copyright__ = 'Copyright 2018 IC Manage. All rights reserved.'
__credits__ = ['<NAME>', ]
log = logging.getLogger(__name__)
reserved = {
'IN': 'IN',
'AND': 'AND',
'OR': 'OR',
'NOT': 'NOT'
}
class AdvancedSearchLexer(object):
tokens = ['WORD', 'SINGLE_QUOTE_WORD', 'DOUBLE_QUOTE_WORD', 'DATE', 'FLOAT', 'INT',
'COMPARE', 'LBRACK', 'RBRACK', 'COMMA', 'LPAREN', 'RPAREN'] + reserved.values()
t_COMPARE = r'!?=|[<>]=?|~='
t_COMMA = r','
t_LBRACK = r'\['
t_RBRACK = r'\]'
t_LPAREN = r'\('
t_RPAREN = r'\)'
def __init__(self, **kwargs):
import ply.lex as lex
self.lexer = lex.lex(module=self, **kwargs)
self.lexer.linestart = 0
def __iter__(self):
return iter(self.lexer)
# dates are in the following format: /mm/dd/yyyy
def t_DATE(self, t):
r'(?P<month>\d{1,2})/(?P<day>\d{1,2})/(?P<year>\d{4})'
day = int(t.lexer.lexmatch.group('day'))
month = int(t.lexer.lexmatch.group('month'))
year = int(t.lexer.lexmatch.group('year'))
t.value = date(year, month, day)
return t
def t_SINGLE_QUOTE_WORD(self, t):
r"(')(?P<word>[a-zA-Z_0-9][a-zA-Z_0-9:\.\\'\" ]*)(')"
t.value = t.lexer.lexmatch.group('word')
return t
def t_DOUBLE_QUOTE_WORD(self, t):
r'(")(?P<word>[a-zA-Z_0-9][a-zA-Z_0-9:\.\\"\' ]*)(")'
t.value = t.lexer.lexmatch.group('word')
return t
def t_WORD(self, t):
r'[a-zA-Z_][a-zA-Z_0-9:\.]*|\d+[a-zA-Z_:]+[a-zA-Z_0-9:\.]*'
# This allows for words beginning with numbers but you must have a letter in there somewhere
t.type = reserved.get(t.value, 'WORD') # Check for reserved words
return t
def t_FLOAT(self, t):
r'[-+]?\d+\.(\d+)?([eE][-+]?\d+)?'
t.value = float(t.value)
return t
def t_INT(self, t):
r'[-+]?\d+'
t.value = int(t.value)
return t
# A string containing ignored characters (spaces and tabs)
t_ignore = " \t"
def t_newline(self, t):
r'\n+'
t.lexer.lineno += len(t.value)
def t_error(self, t):
log.error("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Test it output
def test(self, data, print_output=True):
self.lexer.input(data)
while True:
tok = self.lexer.token()
if not tok:
break
if print_output:
print(tok)
return True
def main(args):
"""Main - $<description>$"""
logging.basicConfig(
level=logging.DEBUG, datefmt="%H:%M:%S", stream=sys.stdout,
format="%(asctime)s %(levelname)s [%(filename)s] (%(name)s) %(message)s")
# Test it out
data = '''
(foo='bar\'s' AND x=1) OR (y NOT IN [2, 3, -3.5]) AND datestamp >= 1/25/2018 AND X="THe OTH3R"
'''
m = AdvancedSearchLexer()
m.test(data) # Test it
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="$<description>$")
sys.exit(main(parser.parse_args()))
|
StarcoderdataPython
|
1672095
|
<filename>umpnet/unet_parts.py
""" Parts of the U-Net model """
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv(nn.Module):
"""convolution => [BN] => ReLU"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.conv(x)
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
"""Downscaling with maxpool then double conv"""
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
"""Upscaling then double conv"""
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
# if bilinear, use the normal convolutions to reduce the number of channels
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
# if you have padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
|
StarcoderdataPython
|
141506
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import reusables
from box import __version__ as box_version
from qtpy import API, QtCore, QtGui, QtWidgets
from fastflix.language import t
from fastflix.shared import base_path, link, pyinstaller
from fastflix.version import __version__
__all__ = ["About"]
class About(QtWidgets.QWidget):
def __init__(self, parent=None):
super(About, self).__init__(parent)
layout = QtWidgets.QGridLayout()
self.setMinimumSize(QtCore.QSize(400, 400))
build_file = Path(base_path, "build_version")
build = t("Build")
label = QtWidgets.QLabel(
f"<b>FastFlix</b> v{__version__}<br>"
f"{f'{build}: {build_file.read_text().strip()}<br>' if build_file.exists() else ''}"
f"<br>{t('Author')}: {link('https://github.com/cdgriffith', '<NAME>')}"
f"<br>{t('Dual License')}: MIT (Code) / {'L' if API == 'pyside2' else ''}GPL (Release)"
)
label.setFont(QtGui.QFont("Arial", 14))
label.setAlignment(QtCore.Qt.AlignCenter)
label.setOpenExternalLinks(True)
label.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
layout.addWidget(label)
support_label = QtWidgets.QLabel(
f'{link("https://github.com/cdgriffith/FastFlix/wiki/Support-FastFlix", t("Support FastFlix"))}<br><br>'
)
support_label.setOpenExternalLinks(True)
support_label.setFont(QtGui.QFont("Arial", 12))
support_label.setAlignment((QtCore.Qt.AlignCenter | QtCore.Qt.AlignTop))
layout.addWidget(support_label)
bundle_label = QtWidgets.QLabel(
f"Conversion suites: {link('https://www.ffmpeg.org/download.html', 'FFmpeg')} ({t('Various')}), "
f"{link('https://github.com/rigaya/NVEnc', 'NVEncC')} (MIT)<br><br>"
f"Encoders: <br> {link('https://github.com/rigaya/NVEnc', 'NVEncC')} (MIT), "
f"SVT AV1 (MIT), rav1e (MIT), aom (MIT), x265 (GPL), x264 (GPL), libvpx (BSD)"
)
bundle_label.setAlignment(QtCore.Qt.AlignCenter)
bundle_label.setOpenExternalLinks(True)
layout.addWidget(bundle_label)
supporting_libraries_label = QtWidgets.QLabel(
"Supporting libraries<br>"
f"{link('https://www.python.org/', t('Python'))}{reusables.version_string} (PSF LICENSE), "
f"{link('https://github.com/cdgriffith/Box', t('python-box'))} {box_version} (MIT), "
f"{link('https://github.com/cdgriffith/Reusables', t('Reusables'))} {reusables.__version__} (MIT)<br>"
"mistune (BSD), colorama (BSD), coloredlogs (MIT), Requests (Apache 2.0)<br>"
"appdirs (MIT), iso639-lang (MIT), psutil (BSD), qtpy (MIT), pathvalidate (MIT) <br>"
)
supporting_libraries_label.setAlignment(QtCore.Qt.AlignCenter)
supporting_libraries_label.setOpenExternalLinks(True)
layout.addWidget(supporting_libraries_label)
if pyinstaller:
pyinstaller_label = QtWidgets.QLabel(
f"Packaged with: {link('https://www.pyinstaller.org/index.html', 'PyInstaller')}"
)
pyinstaller_label.setAlignment(QtCore.Qt.AlignCenter)
pyinstaller_label.setOpenExternalLinks(True)
layout.addWidget(QtWidgets.QLabel())
layout.addWidget(pyinstaller_label)
license_label = QtWidgets.QLabel(
link("https://github.com/cdgriffith/FastFlix/blob/master/docs/build-licenses.txt", t("LICENSES"))
)
license_label.setAlignment(QtCore.Qt.AlignCenter)
license_label.setOpenExternalLinks(True)
layout.addWidget(QtWidgets.QLabel())
layout.addWidget(license_label)
self.setLayout(layout)
|
StarcoderdataPython
|
4836323
|
<filename>moneysocket/nexus/transact/provider.py
# Copyright (c) 2020 <NAME>
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
import logging
from moneysocket.nexus.nexus import Nexus
from moneysocket.message.notification.invoice import NotifyInvoice
from moneysocket.message.notification.preimage import NotifyPreimage
from moneysocket.message.notification.provider import NotifyProvider
LAYER_REQUESTS = {"REQUEST_PAY", "REQUEST_INVOICE"}
class ProviderTransactNexus(Nexus):
def __init__(self, below_nexus, layer):
super().__init__(below_nexus, layer)
self.handleinvoicerequest = None
self.handlepayrequest = None
def handle_layer_request(self, msg):
if msg['request_name'] == "REQUEST_INVOICE":
assert self.handleinvoicerequest
self.handleinvoicerequest(self, msg['msats'], msg['request_uuid'])
elif msg['request_name'] == "REQUEST_PAY":
assert self.handlepayrequest
self.handlepayrequest(self, msg['bolt11'], msg['request_uuid'])
def is_layer_message(self, msg):
if msg['message_class'] != "REQUEST":
return False
return msg['request_name'] in LAYER_REQUESTS
def on_message(self, below_nexus, msg):
if not self.is_layer_message(msg):
super().on_message(below_nexus, msg)
return
self.handle_layer_request(msg)
def on_bin_message(self, below_nexus, msg_bytes):
pass
def notify_invoice(self, bolt11, request_reference_uuid):
self.send(NotifyInvoice(bolt11, request_reference_uuid))
def notify_preimage(self, preimage, request_reference_uuid):
self.send(NotifyPreimage(preimage, None, request_reference_uuid))
def notify_provider_info(self, shared_seed):
assert self.layer.handleproviderinforequest
pi = self.layer.handleproviderinforequest(shared_seed)
logging.debug("NOTIFY PROVIDER: %s" % pi['wad'])
m = NotifyProvider(pi['account_uuid'], payer=pi['payer'],
payee=pi['payee'], wad=pi['wad'])
self.send(m)
|
StarcoderdataPython
|
1721470
|
"""Evented dictionary"""
import sys
from typing import (
Any,
Dict,
Iterator,
Mapping,
MutableMapping,
Sequence,
Type,
TypeVar,
Union,
)
_K = TypeVar("_K")
_T = TypeVar("_T")
class TypedMutableMapping(MutableMapping[_K, _T]):
"""Dictionary mixin that enforces item type."""
def __init__(
self,
data: Mapping[_K, _T] = None,
basetype: Union[Type[_T], Sequence[Type[_T]]] = (),
):
if data is None:
data = {}
self._dict: Dict[_K, _T] = dict()
self._basetypes = (
basetype if isinstance(basetype, Sequence) else (basetype,)
)
self.update(data)
# #### START Required Abstract Methods
def __setitem__(self, key: int, value: _T): # noqa: F811
self._dict[key] = self._type_check(value)
def __delitem__(self, key: _K) -> None:
del self._dict[key]
def __getitem__(self, key: _K) -> _T:
return self._dict[key]
def __len__(self) -> int:
return len(self._dict)
def __iter__(self) -> Iterator[_T]:
return iter(self._dict)
def __repr__(self):
return str(self._dict)
if sys.version_info < (3, 8):
def __hash__(self):
# We've explicitly added __hash__ for python < 3.8 because otherwise
# nested evented dictionaries fail tests.
# This can be removed once we drop support for python < 3.8
# see: https://github.com/napari/napari/pull/2994#issuecomment-877105434
return hash(frozenset(self))
def _type_check(self, e: Any) -> _T:
if self._basetypes and not any(
isinstance(e, t) for t in self._basetypes
):
raise TypeError(
f"Cannot add object with type {type(e)} to TypedDict expecting type {self._basetypes}",
)
return e
def __newlike__(self, iterable: MutableMapping[_K, _T]):
new = self.__class__()
# separating this allows subclasses to omit these from their `__init__`
new._basetypes = self._basetypes
new.update(**iterable)
return new
def copy(self) -> "TypedMutableMapping[_T]":
"""Return a shallow copy of the dictionary."""
return self.__newlike__(self)
|
StarcoderdataPython
|
146234
|
import requests
from .BITBOX import REST_URL
class Transaction:
def details(txid):
if type(txid) is str:
response = requests.get(REST_URL+"transaction/details/"+txid)
return response.json()
elif type(txid) is list:
response = requests.post(REST_URL+"transaction/details", data={"txids": txid})
return response.json()
else:
raise TypeError("Input txid must be a string or array of strings.")
|
StarcoderdataPython
|
164582
|
<reponame>Kwongrf/pytorch-retinanet
from __future__ import print_function, division
import sys
import os
import torch
import numpy as np
import random
import math
import csv
import six
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from torch.utils.data.sampler import Sampler
from pycocotools.coco import COCO
import skimage.io
import skimage.transform
import skimage.color
import skimage
from PIL import Image
import pydicom
from collections import Counter
from transform import *
from image import (
TransformParameters,
adjust_transform_for_image,
apply_transform,
preprocess_image,
resize_image
)
#######################################################KRF CREATED 2018/11/14######################################################################
class MyDataset(Dataset):
"""My dataset.类似于CSV dataset,差异在于CSV格式稍有不同,以及图片处理过程不同"""
def __init__(self, train_file, class_list, transform=None):
"""
Args:
train_file (string): CSV file with training annotations
annotations (string): CSV file with class list
test_file (string, optional): CSV file with testing annotations
"""
self.train_file = train_file
self.class_list = class_list
self.transform = transform
# parse the provided class file
try:
with self._open_for_csv(self.class_list) as file:
self.classes = self.load_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(self.class_list, e)), None)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, class_name
try:
with self._open_for_csv(self.train_file) as file:
self.image_data = self._read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(self.train_file, e)), None)
self.image_names = list(self.image_data.keys())
def _parse(self, value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _open_for_csv(self, path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb',
for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_classes(self, csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = self._parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img = self.load_image(idx)
annot = self.load_annotations(idx)
sample = {'img': img, 'annot': annot}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
# img = skimage.io.imread(self.image_names[image_index])
ds = pydicom.read_file(self.image_names[image_index])
img = ds.pixel_array
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
def load_annotations(self, image_index):
# get ground truth annotations
annotation_list = self.image_data[self.image_names[image_index]]
annotations = np.zeros((0, 5))
# some images appear to miss annotations (like image with id 257034)
if len(annotation_list) == 0:
return annotations
# parse annotations
for idx, a in enumerate(annotation_list):
# some annotations have basically no width / height, skip them
x1 = a['x1']
x2 = a['x2']
y1 = a['y1']
y2 = a['y2']
#允许出现(0,0,0,0)的情况
# if (x2-x1) < 1 or (y2-y1) < 1:
# continue
annotation = np.zeros((1, 5))
annotation[0, 0] = x1
annotation[0, 1] = y1
annotation[0, 2] = x2
annotation[0, 3] = y2
annotation[0, 4] = self.name_to_label(a['class'])
annotations = np.append(annotations, annotation, axis=0)
return annotations
def _read_annotations(self, csv_reader, classes):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
img_file, x1, y1, x2, y2, class_name = row[:6]
except ValueError:
raise_from(ValueError('line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''.format(line)), None)
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2) == ('', '', '', ''):#!!!!这里原来没有将normal的图片加载进去
result[img_file].append({'x1': 0, 'x2': 0, 'y1': 0, 'y2': 0, 'class': class_name})
else:
##################change int to float by KRF######################################
x1 = self._parse(x1, float, 'line {}: malformed x1: {{}}'.format(line))
y1 = self._parse(y1, float, 'line {}: malformed y1: {{}}'.format(line))
x2 = self._parse(x2, float, 'line {}: malformed x2: {{}}'.format(line))
y2 = self._parse(y2, float, 'line {}: malformed y2: {{}}'.format(line))
# Check that the bounding box is valid.
if x2 <= x1:
raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))
if y2 <= y1:
raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))
# check if the current class name is correctly present
if class_name not in classes:
raise ValueError('line {}: unknown class name: \'{}\' (classes: {})'.format(line, class_name, classes))
result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})
return result
def name_to_label(self, name):
return self.classes[name]
def label_to_name(self, label):
return self.labels[label]
def num_classes(self):
return max(self.classes.values()) + 1
#####################################Modified by KRF######################
def image_aspect_ratio(self, image_index):
#image = Image.open(self.image_names[image_index])
ds = pydicom.read_file(self.image_names[image_index])
img_arr = ds.pixel_array
image = Image.fromarray(img_arr).convert('RGB')
return float(image.width) / float(image.height)
###############################################################################################################################################
#######################################################KRF CREATED 2018/11/15###################################################
class TestDataset(Dataset):
"""Test dataset.类似于MyDataset,不过没有csv和label"""
def __init__(self, test_fp, transform=None):
"""
Args:
test_fp (string):训练集的文件目录
"""
self.test_fp = test_fp
self.transform = transform
self.image_names = os.listdir(self.test_fp) #只测试100张
for i in range(len(self.image_names)):
self.image_names[i] = os.path.join(test_fp,self.image_names[i])
def __len__(self):
return len(self.image_names)
def __getitem__(self, idx):
img = self.load_image(idx)
sample = {'img': img,'name' : self.image_names[idx]}
if self.transform:
sample = self.transform(sample)
return sample
def load_image(self, image_index):
# img = skimage.io.imread(self.image_names[image_index])
ds = pydicom.read_file(self.image_names[image_index])
img = ds.pixel_array
if len(img.shape) == 2:
img = skimage.color.gray2rgb(img)
return img.astype(np.float32)/255.0
#####################################Modified by KRF######################
def image_aspect_ratio(self, image_index):
#image = Image.open(self.image_names[image_index])
ds = pydicom.read_file(self.image_names[image_index])
img_arr = ds.pixel_array
image = Image.fromarray(img_arr).convert('RGB')
return float(image.width) / float(image.height)
###################################################################################################################################
#覆盖另一个
class NormalizerTest(object):
def __init__(self):
self.mean = np.array([[[0.485, 0.456, 0.406]]])
self.std = np.array([[[0.229, 0.224, 0.225]]])
def __call__(self, sample):
image, name = sample['img'], sample['name']
#######################################################
#image = HistEqu(image,mode='gray')#进行直方图均衡化####
#######################################################
return {'img':((image.astype(np.float32)-self.mean)/self.std), 'name': name}
class ResizerTest(object):
"""Convert ndarrays in sample to Tensors."""
#def __call__(self, sample, min_side=608, max_side=1024):
###########################################KRF Modeified###########################################################
def __call__(self, sample, min_side=512, max_side=1024):
image, name = sample['img'], sample['name']
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = skimage.transform.resize(image, (int(round(rows*scale)), int(round((cols*scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows%32
pad_h = 32 - cols%32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
#annots[:, :4] *= scale
return {'img': torch.from_numpy(new_image), 'name': name, 'scale': scale}
#retinanet.load_state_dict('weights/RSNA_retinanet_3.pt')
def collaterTest(data):
imgs = [s['img'] for s in data]
names = [s['name'] for s in data]
scales = [s['scale'] for s in data]
widths = [int(s.shape[0]) for s in imgs]
heights = [int(s.shape[1]) for s in imgs]
batch_size = len(imgs)
max_width = np.array(widths).max()
max_height = np.array(heights).max()
padded_imgs = torch.zeros(batch_size, max_width, max_height, 3)
for i in range(batch_size):
img = imgs[i]
padded_imgs[i, :int(img.shape[0]), :int(img.shape[1]), :] = img
# max_num_annots = max(annot.shape[0] for annot in annots)
# if max_num_annots > 0:
# annot_padded = torch.ones((len(annots), max_num_annots, 5)) * -1
# if max_num_annots > 0:
# for idx, annot in enumerate(annots):
# #print(annot.shape)
# if annot.shape[0] > 0:
# annot_padded[idx, :annot.shape[0], :] = annot
# else:
# annot_padded = torch.ones((len(annots), 1, 5)) * -1
padded_imgs = padded_imgs.permute(0, 3, 1, 2)
return {'img': padded_imgs, 'names': names, 'scale': scales}
class RandomTransformer(object):
"""Transformation to sample"""
def __call__(self,sample):
image, annots = sample['img'], sample['annot']
trs = random_transform(
min_rotation=-0.05,
max_rotation=0.05,
min_translation=(-0.1, -0.1),
max_translation=(0.1, 0.1),
# min_shear=-0.5,
# max_shear=0.5,
min_scaling=(0.9, 0.9),
max_scaling=(1.1, 1.1),
flip_x_chance=0.5,
#flip_y_chance=0.5,
)
tr = adjust_transform_for_image(trs, image, True)
image = apply_transform(tr, image, None)#TransformParameters())
# Transform the bounding boxes in the annotations.
annotations = annots.copy()
if not (annotations[0,2] == 0 or annotations[0,3] == 0):
for index in range(annotations.shape[0]):
annotations[index, :4] = transform_aabb(tr, annotations[index, :4])
sample = {'img':image,'annot':annotations}
return sample
class MyAugmenter(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample, flip_x=0.15, shift = 0.3, scale = 0.4, rotate = 0.5, noise = 0.6):
rand = np.random.rand()
image, annots = sample['img'], sample['annot']
if rand < flip_x:
#print("flip_x")
image = image[:, ::-1, :]#横向翻转,20%的概率
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
x2 = annots[:, 2].copy()
x_tmp = x1.copy()
annots[:, 0] = cols - x2
annots[:, 2] = cols - x_tmp
sample = {'img': image, 'annot': annots}
elif rand < shift:#平移
#print("shift")
delta_x = 10
delta_y = 10
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
y1 = annots[:, 1].copy()
x2 = annots[:, 2].copy()
y2 = annots[:, 3].copy()
emptyImage = image.copy()
if np.random.rand() < 0.25: #右移
annots[:,0] = x1 + delta_x
annots[:,2] = x2 + delta_x
for i in range(rows):
if i>=delta_x:
emptyImage[i,:]=image[i-delta_x,:]
else:
emptyImage[i,:]=(0,0,0)
else : #左移
annots[:,0] = x1 - delta_x
annots[:,2] = x2 - delta_x
for i in range(rows):
if i< rows-delta_x:
emptyImage[i,:]=image[i+delta_x,:]
else:
emptyImage[i,:]=(0,0,0)
if np.random.rand() < 0.5: #右移
annots[:,1] = y1 + delta_y
annots[:,3] = y2 + delta_y
for j in range(cols):
if j>=delta_y:
emptyImage[:,j]=image[:,j-delta_x]
else:
emptyImage[:,j]=(0,0,0)
else: #右移
annots[:,1] = y1 - delta_y
annots[:,3] = y2 - delta_y
for j in range(cols):
if j<cols - delta_y:
emptyImage[:,j]=image[:,j+delta_x]
else:
emptyImage[:,j]=(0,0,0)
sample = {'img': emptyImage, 'annot': annots}
elif rand < scale:
#print("scale")
rows, cols, channels = image.shape
x1 = annots[:, 0].copy()
y1 = annots[:, 1].copy()
x2 = annots[:, 2].copy()
y2 = annots[:, 3].copy()
emptyImage = image.copy()
rand_tmp = np.random.rand()
if rand_tmp < 0.5:#放大
degree = 0.9
else:
degree = 1.1
annots[:, 0] = (x1 * [degree,]).astype(np.uint8)
annots[:, 1] = (y1 * [degree,]).astype(np.uint8)
annots[:, 2] = (x2 * [degree,]).astype(np.uint8)
annots[:, 3] = (y2 * [degree,]).astype(np.uint8)
for j in range(cols):
for i in range(rows):
if j/degree > 0 and j/degree < cols and i/degree > 0 and i/degree < rows:
emptyImage[i,j] = image[int(i/degree),int(j/degree)]
else:
emptyImage[i,j] = (0,0,0)
sample = {'img': emptyImage, 'annot': annots}
elif rand < rotate:##TODO
#print("rotate")
rand_tmp = np.random.rand()
if rand_tmp < 0.25:
angle = 15
elif rand_tmp < 0.5:
angle = -15
elif rand_tmp < 0.75:
angle = 30
else:
angle = -30
h, w, channels = image.shape
x1 = annots[:, 0].copy()
y1 = annots[:, 1].copy()
x2 = annots[:, 2].copy()
y2 = annots[:, 3].copy()
anglePi = angle * math.pi / 180.0
cosA = math.cos(anglePi)
sinA = math.sin(anglePi)
X1 = math.ceil(abs(0.5 * h * cosA + 0.5 * w * sinA))
X2 = math.ceil(abs(0.5 * h * cosA - 0.5 * w * sinA))
Y1 = math.ceil(abs(-0.5 * h * sinA + 0.5 * w * cosA))
Y2 = math.ceil(abs(-0.5 * h * sinA - 0.5 * w * cosA))
hh = int(2 * max(Y1, Y2))
ww = int(2 * max(X1, X2))
# X1 = math.ceil(abs( x1 * cosA + y1 * sinA))
# X2 = math.ceil(abs(x2 * cosA + y2 * sinA))
# Y1 = math.ceil(abs(-1 * x1 * sinA + y1 * cosA))
# Y2 = math.ceil(abs(-1 * x2 * sinA + y2 * cosA))
X1 = abs(x1 * cosA + y1 * sinA)
X2 = abs(x2 * cosA + y2 * sinA)
Y1 = abs(-1 * x1 * sinA + y1 * cosA)
Y2 = abs(-1 * x2 * sinA + y2 * cosA)
for i in range(annots.shape[0]):
annots[i,0] = int(min(X1[i], X2[i]))
annots[i,1] = int(min(Y1[i], Y2[i]))
annots[i,2] = int(max(X1[i], X2[i]))
annots[i,3] = int(max(Y1[i], Y2[i]))
emptyImage = np.zeros((hh, ww, channels), np.uint8)
for i in range(hh):
for j in range(ww):
x = cosA * i + sinA * j - 0.5 * ww * cosA - 0.5 * hh * sinA + 0.5 * w
y = cosA * j- sinA * i+ 0.5 * ww * sinA - 0.5 * hh * cosA + 0.5 * h
x = int(x)
y = int(y)
if x > -1 and x < h and y > -1 and y < w :
emptyImage[i, j] = image[x, y]
sample = {'img': emptyImage, 'annot': annots}
# return emptyImage
elif rand < noise:
#print("noise")
rows, cols, channels = image.shape
param=10
#灰阶范围
grayscale=256
newimg=np.zeros((rows,cols,channels),np.uint8)
for x in range(rows):
for y in range(0,cols,2):
r1=np.random.random_sample()
r2=np.random.random_sample()
z1=param*np.cos(2*np.pi*r2)*np.sqrt((-2)*np.log(r1))
z2=param*np.sin(2*np.pi*r2)*np.sqrt((-2)*np.log(r1))
fxy=int(image[x,y,0]+z1)
fxy1=int(image[x,y+1,0]+z2)
#f(x,y)
if fxy<0:
fxy_val=0
elif fxy>grayscale-1:
fxy_val=grayscale-1
else:
fxy_val=fxy
#f(x,y+1)
if fxy1<0:
fxy1_val=0
elif fxy1>grayscale-1:
fxy1_val=grayscale-1
else:
fxy1_val=fxy1
for c in range(channels):
newimg[x,y,c]=fxy_val
newimg[x,y+1,c]=fxy1_val
sample = {'img': newimg, 'annot': annots}
return sample
def HistEqu(img,level=256,mode='RGB'):
'''
:param img: image array
:param level:灰度等级,彩色图是每个通道对应的等级数
:param mode:'rgb'为彩色模式,'gray'为灰度图
:return: 按照输出文件路径保存均衡化之后的图片
'''
if mode == 'RGB' or mode == 'rgb':
r, g, b = [], [], []
width, height,channels = img.shape
sum_pix = width * height
pix = img.copy()
for x in range(width):
for y in range(height):
r.append(pix[x, y][0])
g.append(pix[x, y][1])
b.append(pix[x, y][2])
r_c = dict(Counter(r))
g_c = dict(Counter(g))
b_c = dict(Counter(b))
r_p,g_p,b_p = [],[],[]
for i in range(level):
if i in r_c :
r_p.append(float(r_c[i]) / sum_pix)
else:
r_p.append(0)
if i in g_c :
g_p.append(float(g_c[i])/sum_pix)
else:
g_p.append(0)
if i in b_c :
b_p.append(float(b_c[i])/sum_pix)
else:
b_p.append(0)
temp_r,temp_g,temp_b = 0,0,0
for i in range(level):
temp_r += r_p[i]
r_p[i] = int(temp_r * (level-1))
temp_b += b_p[i]
b_p[i] = int(temp_b *(level-1))
temp_g += g_p[i]
g_p[i] = int(temp_g*(level -1))
# new_photo = Image.new('RGB',(width,height))
new_photo=np.zeros((width, height,channels),np.uint8)
for x in range(width):
for y in range(height):
new_photo[x,y] = [r_p[pix[x,y][0]],g_p[pix[x,y][1]],b_p[pix[x,y][2]]]
#new_photo.save(outfile)
elif mode == 'gray' or mode == 'GRAY':
width, height = img.shape
sum_pix = width * height
pix = img.copy()
pb = []
for x in range(width):
for y in range(height):
pb.append(pix[x,y])
pc = dict(Counter(pb))
pb = []
for i in range(level):
if i in pc :
pb.append(float(pc[i]) / sum_pix)
else:
pb.append(0)
temp = 0
for i in range(level):
temp += pb[i]
pb[i] = int(temp * (level-1))
new_photo=np.zeros((width,height),np.uint8)
for x in range(width):
for y in range(height):
new_photo[x,y] = pb[pix[x,y]]
#new_photo.save(outfile)
return new_photo
|
StarcoderdataPython
|
3243233
|
<filename>6 kyu/Sequence classifier.py
def sequence_classifier(arr):
check=sorted(arr[i]-arr[i-1] for i in range(1, len(arr)))
low=min(check)
high=max(check)
if low==high==0:
return 5
elif low>=0:
return 2 if low==0 else 1
elif high<=0:
return 4 if high==0 else 3
return 0
|
StarcoderdataPython
|
1788323
|
<reponame>Azure/MachineLearning-MusicGeneration<filename>MusicGeneration/train.py
# Spark configuration and packages specification. The dependencies defined in
# this file will be automatically provisioned for each run that uses Spark.
from __future__ import print_function
import numpy as np
from keras.models import Sequential
from keras.layers.recurrent import LSTM
from keras.layers.core import *
from keras.layers.normalization import *
from keras.callbacks import EarlyStopping, History
from keras.layers import TimeDistributed
from keras.models import model_from_json
import time
from download_data import download_grocery_data
from midi_io import get_data, createSeqNetInputs
from config import cfg
import sys
import os
from azureml.logging import get_azureml_logger
from azure.storage.blob import BlockBlobService
try:
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
except ImportError:
print("Library matplotlib missing. Can't plot")
block_blob_service = BlockBlobService(account_name= cfg.AZURE.ACCOUNT_NAME, account_key=cfg.AZURE.ACCOUNT_KEY)
from azure.storage.blob import PublicAccess
block_blob_service.create_container('musicmodels', public_access=PublicAccess.Container)
#Global parameters
time_per_time_slice = cfg.CONST.TIME_PER_TIME_SLICE #0.02 #200ms #time-unit for each column in the piano roll
highest_note = cfg.CONST.HIGHEST_NOTE #81 # A_6
lowest_note = cfg.CONST.LOWEST_NOTE #33 # A_2
input_dim = cfg.CONST.INPUT_DIM #highest_note - lowest_note + 1
output_dim = cfg.CONST.OUTPUT_DIM #highest_note - lowest_note + 1
MICROSECONDS_PER_MINUTE = cfg.CONST.MICROSECONDS_PER_MINUTE #60000000
#Model parameters
num_units = cfg.MODEL_PARAMS.NUM_UNITS #64
x_seq_length = cfg.MODEL_PARAMS.X_SEQ_LENGTH #50
y_seq_length = cfg.MODEL_PARAMS.Y_SEQ_LENGTH #50
loss_function = cfg.MODEL_PARAMS.LOSS_FUNCTION #'categorical_crossentropy'
optimizer = cfg.MODEL_PARAMS.OPTIMIZER #Adam() #lr=0.0001
batch_size = cfg.MODEL_PARAMS.BATCH_SIZE #64
num_epochs = cfg.MODEL_PARAMS.NUM_EPOCHS #100
# initialize the logger
logger = get_azureml_logger()
# This is how you log scalar metrics
logger.log("X_Seq_length", x_seq_length )
logger.log("y_Seq_length", y_seq_length )
logger.log("Loss Function", loss_function )
logger.log("Batch Size", batch_size )
logger.log("No Epochs", num_epochs )
def createSeq2Seq():
#seq2seq model
#encoder
model = Sequential()
model.add(LSTM(input_dim = input_dim, output_dim = num_units, activation= 'tanh', return_sequences = True ))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(LSTM(num_units, activation= 'tanh'))
#decoder
model.add(RepeatVector(y_seq_length))
num_layers= 2
for _ in range(num_layers):
model.add(LSTM(num_units, activation= 'tanh', return_sequences = True))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(TimeDistributed(Dense(output_dim, activation= 'softmax')))
return model
#Prepare data
dataset_folder = download_grocery_data()
pianoroll_data = get_data(dataset_folder)
input_data, target_data = createSeqNetInputs(pianoroll_data, x_seq_length, y_seq_length)
input_data = input_data.astype(np.bool)
target_data = target_data.astype(np.bool)
#Model
model = createSeq2Seq()
model.summary()
model.compile(loss=loss_function, optimizer = optimizer)
earlystop = EarlyStopping(monitor='loss', patience= 10, min_delta = 0.01 , verbose=0, mode= 'auto')
history = History()
hist = model.fit(input_data, target_data, batch_size = batch_size, nb_epoch=num_epochs, callbacks=[ earlystop, history ])
#print("History:", hist.history )
#Save model and weights to Blob storage
weights_file = 'LSTM_weights_%s' %(time.strftime("%Y%m%d_%H_%M"))
weights_path = '%s/%s' %(cfg.DATA.WEIGHTS_DIR, weights_file)
model.save_weights(weights_path)
print ("Weights saved to: ", weights_path)
block_blob_service.create_blob_from_path('musicmodels', weights_file, weights_path)
model_file = 'LSTM_model_%s' %(time.strftime("%Y%m%d_%H_%M"))
model_path = '%s/%s' %(cfg.DATA.MODEL_DIR, model_file)
json_string= model.to_json()
open(model_path, 'w').write(json_string)
print ("Model saved to: ", model_path)
block_blob_service.create_blob_from_path('musicmodels', model_file, model_path)
# Create the outputs folder - save any outputs you want managed by AzureML here
os.makedirs('./outputs', exist_ok=True)
fig = plt.figure(figsize=(6, 5), dpi=75)
plt.plot(hist.history['loss'])
fig.savefig("./outputs/Loss.png", bbox_inches='tight')
|
StarcoderdataPython
|
117041
|
<filename>src/lgr_advanced/lgr_editor/views/codepoints/list.py<gh_stars>1-10
#! /bin/env python
# -*- coding: utf-8 -*-
"""
list.py -
"""
import logging
from io import StringIO
from django.contrib import messages
from django.core.cache import cache
from django.http import Http404, JsonResponse
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from django.views.generic import TemplateView, FormView
from django.views.generic.base import View, RedirectView
from lgr.char import RangeChar
from lgr.exceptions import LGRException, LGRFormatException, CharInvalidContextRule
from lgr.utils import format_cp
from lgr.validate import check_symmetry, check_transitivity
from lgr_advanced import unidb
from lgr_advanced.lgr_editor.forms import (AddCodepointForm,
EditCodepointsForm)
from lgr_advanced.lgr_editor.utils import slug_to_cp, render_char
from lgr_advanced.lgr_editor.views.codepoints.mixins import CodePointMixin
from lgr_advanced.lgr_editor.views.mixins import LGRHandlingBaseMixin, LGREditMixin
from lgr_advanced.lgr_exceptions import lgr_exception_to_text
from lgr_advanced.utils import (make_lgr_session_key,
LGR_REPERTOIRE_CACHE_KEY,
cp_to_slug,
render_name,
LGR_CACHE_TIMEOUT)
logger = logging.getLogger(__name__)
class CodePointsViewMixin:
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
has_range = False
for char in self.lgr_info.lgr.repertoire.all_repertoire():
if isinstance(char, RangeChar):
has_range = True
break
rule_names = (('', ''),) + tuple((v, v) for v in self.lgr_info.lgr.rules)
cp_form = None
edit_codepoints_form = None
form = ctx.get('form')
if form:
if isinstance(form, AddCodepointForm):
cp_form = form
elif isinstance(form, EditCodepointsForm):
edit_codepoints_form = form
cp_form = cp_form or AddCodepointForm(prefix='add_cp')
edit_codepoints_form = edit_codepoints_form or EditCodepointsForm(prefix='edit_codepoints',
rule_names=rule_names,
tags=((v, v) for v in
self.lgr_info.lgr.all_tags()))
ctx.update({
'cp_form': cp_form,
'edit_codepoints_form': edit_codepoints_form,
'lgr': self.lgr_info.lgr,
'lgr_id': self.lgr_id,
'is_set': self.lgr_info.is_set or self.lgr_set_id is not None,
'has_range': has_range,
})
if self.lgr_set_id:
lgr_set_info = self.session.select_lgr(self.lgr_set_id)
ctx['lgr_set'] = lgr_set_info.lgr
ctx['lgr_set_id'] = self.lgr_set_id
return ctx
class ListCodePointsView(LGRHandlingBaseMixin, CodePointsViewMixin, TemplateView):
"""
List the codepoints defined in an LGR.
"""
template_name = 'lgr_editor/codepoint_list.html'
def post(self, request, *args, **kwargs):
if 'add_cp' in request.POST:
view = AddCodePointView.as_view()
elif 'add-rules' in request.POST or 'add-tags' in request.POST:
view = EditCodePointView.as_view()
else:
raise Http404
return view(request, *args, **kwargs)
class AddCodePointView(LGREditMixin, CodePointsViewMixin, FormView):
form_class = AddCodepointForm
template_name = 'lgr_editor/codepoint_list.html'
def get_prefix(self):
return 'add_cp'
def get_success_url(self):
return reverse('codepoint_list', kwargs={'lgr_id': self.lgr_id})
def form_valid(self, form):
logger.debug("Add CP")
# form was submitted, we parse the value from the form field
cp_or_sequence = form.cleaned_data['codepoint']
override_repertoire = form.cleaned_data['override_repertoire']
try:
self.lgr_info.lgr.add_cp(cp_or_sequence,
validating_repertoire=self.lgr_info.validating_repertoire,
override_repertoire=override_repertoire)
self.session.save_lgr(self.lgr_info)
messages.success(self.request, _('New code point %s added') % format_cp(cp_or_sequence))
except LGRException as ex:
messages.add_message(self.request, messages.ERROR, lgr_exception_to_text(ex))
# do nothing to redirect to myself (success url) to refresh display
# Note: cannot add code point in set mode
return super().form_valid(form)
class EditCodePointView(LGREditMixin, CodePointsViewMixin, FormView):
form_class = EditCodepointsForm
template_name = 'lgr_editor/codepoint_list.html'
def get_success_url(self):
return reverse('codepoint_list', kwargs={'lgr_id': self.lgr_id})
def get_prefix(self):
return 'edit_codepoints'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
rule_names = (('', ''),) + tuple((v, v) for v in self.lgr_info.lgr.rules)
kwargs['rule_names'] = rule_names
kwargs['tags'] = tuple((v, v) for v in self.lgr_info.lgr.all_tags())
return kwargs
def form_valid(self, form):
logger.debug('Edit codepoints')
cd = form.cleaned_data
when = cd['when'] or None
not_when = cd['not_when'] or None
tags = cd['tags']
edited = cd['cp_id']
invalid = []
for cp in [slug_to_cp(c) for c in edited]:
char = self.lgr_info.lgr.get_char(cp)
new_tags = char.tags + tags
try:
if isinstance(char, RangeChar):
# Delete codepoint range from LGR, then add it
self.lgr_info.lgr.del_range(char.first_cp, char.last_cp)
self.lgr_info.lgr.add_range(char.first_cp,
char.last_cp,
comment=char.comment,
when=when or char.when, not_when=not_when or char.not_when,
ref=char.references,
tag=new_tags)
else:
# Delete codepoint from LGR, then add it + its variants
self.lgr_info.lgr.del_cp(char.cp)
self.lgr_info.lgr.add_cp(char.cp,
comment=char.comment,
ref=char.references,
tag=new_tags,
when=when or char.when, not_when=not_when or char.not_when)
for variant in char.get_variants():
self.lgr_info.lgr.add_variant(char.cp,
variant.cp,
variant_type=variant.type,
when=variant.when, not_when=variant.not_when,
comment=variant.comment, ref=variant.references)
except (LGRFormatException, CharInvalidContextRule) as e:
logger.warning('Cannot update char tags/wle:', exc_info=e)
invalid.append(char)
# Need to revert the deletion
if isinstance(char, RangeChar):
self.lgr_info.lgr.add_range(char.first_cp,
char.last_cp,
comment=char.comment,
when=char.when, not_when=char.not_when,
ref=char.references,
tag=char.tags)
else:
self.lgr_info.lgr.add_cp(char.cp,
comment=char.comment,
ref=char.references,
tag=char.tags,
when=char.when, not_when=char.not_when)
for variant in char.get_variants():
self.lgr_info.lgr.add_variant(char.cp,
variant.cp,
variant_type=variant.type,
when=variant.when, not_when=variant.not_when,
comment=variant.comment, ref=variant.references)
self.session.save_lgr(self.lgr_info)
operation = _('Rule') if 'add-rules' in self.request.POST else _('Tag(s)')
operation_lowercase = _('rule') if 'add-rules' in self.request.POST else _('tag(s)')
if len(edited) - len(invalid):
messages.add_message(self.request,
messages.SUCCESS,
_("%(operation)s successfully added to %(nb_cp)s code point(s)") % {
'operation': operation,
'nb_cp': len(edited) - len(invalid)})
if invalid:
messages.add_message(self.request,
messages.WARNING,
_("%(nb_cp)s code points were not updated to avoid invalid %(operation)s") % {
'operation': operation_lowercase,
'nb_cp': len(invalid)})
return super().form_valid(form)
class ListCodePointsJsonView(LGRHandlingBaseMixin, View):
def get(self, request, *args, **kwargs):
udata = unidb.manager.get_db_by_version(self.lgr_info.lgr.metadata.unicode_version)
repertoire_cache_key = make_lgr_session_key(LGR_REPERTOIRE_CACHE_KEY,
request,
self.lgr_id)
repertoire = cache.get(repertoire_cache_key)
if repertoire is None:
# Generate repertoire
repertoire = []
for char in self.lgr_info.lgr.repertoire:
cp_slug = cp_to_slug(char.cp)
kwargs = {'lgr_id': self.lgr_id, 'codepoint_id': cp_slug}
if self.lgr_set_id is not None:
kwargs['lgr_set_id'] = self.lgr_set_id
cp_view_url = reverse('codepoint_view', kwargs=kwargs)
actions = [cp_view_url]
is_range = isinstance(char, RangeChar)
if is_range:
expand_url = reverse('expand_range', kwargs={'lgr_id': self.lgr_id,
'codepoint_id': cp_slug})
actions.append(expand_url)
repertoire.append({
'codepoint_id': cp_slug,
'cp_disp': render_char(char),
'comment': char.comment or '',
'name': render_name(char, udata),
'tags': char.tags,
'variant_number': len(list(char.get_variants())),
'is_range': is_range,
'actions': actions
})
cache.set(repertoire_cache_key, repertoire, LGR_CACHE_TIMEOUT)
response = {'data': repertoire}
return JsonResponse(response)
class ExpandRangesView(LGREditMixin, RedirectView):
"""
Expand all ranges into code points.
"""
pattern_name = 'codepoint_list'
def get(self, request, *args, **kwargs):
try:
self.lgr_info.lgr.expand_ranges()
except LGRException as ex:
messages.add_message(request, messages.ERROR,
lgr_exception_to_text(ex))
self.session.save_lgr(self.lgr_info)
return super().get(request, *args, **kwargs)
class ExpandRangeView(LGREditMixin, CodePointMixin, View):
"""
Expand a range into code points.
"""
def get(self, request, *args, **kwargs):
char = self.lgr_info.lgr.get_char(self.codepoint)
if not isinstance(char, RangeChar):
logger.error("Cannot expand non-range code point")
return redirect('codepoint_list', lgr_id=self.lgr_id)
try:
self.lgr_info.lgr.expand_range(char.first_cp, char.last_cp)
except LGRException as ex:
messages.add_message(request, messages.ERROR,
lgr_exception_to_text(ex))
self.session.save_lgr(self.lgr_info)
return redirect('codepoint_list', lgr_id=self.lgr_id)
class PopulateVariantsView(LGRHandlingBaseMixin, RedirectView):
"""
Automatically populate variants to achieve transitivity and symmetry.
"""
pattern_name = 'codepoint_list'
def get(self, request, *args, **kwargs):
lgr = self.lgr_info.lgr
if 'test' in request.GET:
return JsonResponse({
'result': check_symmetry(lgr, None)[0] and check_transitivity(lgr, None)[0]
})
log_output = StringIO()
ch = logging.StreamHandler(log_output)
ch.setLevel(logging.INFO)
populate_logger = logging.getLogger('lgr.populate')
# Configure module logger - since user may have disabled the 'lgr' logger,
# reset its level
populate_logger.addHandler(ch)
populate_logger.setLevel('INFO')
lgr.populate_variants()
messages.add_message(request, messages.INFO, log_output.getvalue())
messages.add_message(request, messages.SUCCESS, _("Variants populated"))
populate_logger.removeHandler(ch)
log_output.close()
self.session.save_lgr(self.lgr_info)
return super().get(request, *args, **kwargs)
|
StarcoderdataPython
|
13173
|
from .utils import (get_prescription, get_attributes, get_group)
from .models import Disease, Result, Score, Question, SurveyResponse
from .analysis import cardio_risk_group, diabetes_risk_group, stroke_risk_group
from statistics import mean
from celery import shared_task
@shared_task
def worker(session_id):
df, attributes = get_attributes(session_id)
diseases = list(Disease.objects.all())
supported_methods = {
'cardiovascular disease': cardio_risk_group,
'diabetes': diabetes_risk_group,
'stroke': stroke_risk_group
}
question_region = Question.objects.get(label='region')
session_region = (list(SurveyResponse.objects.filter(
session_id=session_id,
question_id=question_region.id))[0]).answer
results = []
for disease in diseases:
illness = disease.illness
result_kwargs = {
'session_id': session_id,
'disease': disease,
'region': session_region
}
if illness not in supported_methods:
result_kwargs['risk_factor'] = 0
result_kwargs['prescription'] = 'Method is currently not supported'
else:
method = supported_methods[illness]
score = method(df, attributes[illness])
result_kwargs['risk_factor'] = float(score)
result_kwargs['label'] = get_group(score)
result_kwargs['prescription'] = get_prescription(score)
result_obj = Result.objects.update_or_create(
session_id=session_id, disease=disease,
defaults=result_kwargs
)
results.append(result_obj[0])
score = (1 - mean([res.risk_factor for res in results])) * 100
Score.objects.create(session_id=session_id, score=score)
|
StarcoderdataPython
|
4803392
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
class Array(object):
NONE = 0
ArrayInt = 1
ArrayLong = 2
ArrayDouble = 3
ArrayFloat = 4
|
StarcoderdataPython
|
3253086
|
<gh_stars>0
from django.views.generic import ListView, DetailView
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin
from .models import MainGenre, Subgenre, Game
### Index ###
class IndexView(ListView):
"""Return the Index page."""
context_object_name = ''
template_name = 'database/index.html'
queryset = Game.objects.all()
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['all_games'] = Game.objects.all()
context['all_main_genres'] = MainGenre.objects.all()
context['all_subgenres'] = Subgenre.objects.all()
return context
### API ###
class MainAPIView(LoginRequiredMixin, TemplateView):
"""Return basic information on APIs."""
template_name = 'api/api_main.html'
### Game ###
class GameListView(ListView):
"""Return a list of all Game objects."""
template_name = 'database/game_list.html'
context_object_name = 'all_games'
def get_queryset(self):
return Game.objects.all()
class GameDetailView(DetailView):
"""Return a datail page of the specific Game object."""
model = Game
template_name = 'database/game_detail.html'
class GameAddView(LoginRequiredMixin, CreateView):
"""Create a new Game object."""
model = Game
fields = ['name', 'main_genre', 'subgenres', 'image']
template_name_suffix = '_add'
class GameEditView(LoginRequiredMixin, UpdateView):
"""Edit a Game object."""
model = Game
fields = ['name', 'main_genre', 'subgenres', 'image']
template_name_suffix = '_edit'
class GameDeleteView(LoginRequiredMixin, DeleteView):
"""Delete a Game object."""
model = Game
success_url = reverse_lazy('database:list-game')
### Main genre ###
class MainGenreListView(ListView):
"""Return a list of all MainGenre objects."""
template_name = 'database/main_genre_list.html'
context_object_name = 'all_main_genres'
def get_queryset(self):
return MainGenre.objects.all()
class MainGenreDetailView(DetailView):
"""Return a datail page of the specific MainGenre object."""
model = MainGenre
template_name = 'database/main_genre_detail.html'
class MainGenreAddView(LoginRequiredMixin, CreateView):
"""Create a new MainGenre object."""
model = MainGenre
fields = ['name', 'image']
template_name_suffix = '_add'
class MainGenreEditView(LoginRequiredMixin, UpdateView):
"""Edit a Main genre object."""
model = MainGenre
fields = ['name', 'image']
template_name_suffix = '_edit'
class MainGenreDeleteView(LoginRequiredMixin, DeleteView):
"""Delete a Main genre object."""
model = MainGenre
success_url = reverse_lazy('database:list-main-genre')
### Subgenre ###
class SubgenreListView(ListView):
"""Return a list of all Subgenre objects."""
template_name = 'database/subgenre_list.html'
context_object_name = 'all_subgenres'
def get_queryset(self):
return Subgenre.objects.all()
class SubgenreDetailView(DetailView):
"""Return a datail page of the specific Subgenre object."""
model = Subgenre
template_name = 'database/subgenre_detail.html'
class SubgenreAddView(LoginRequiredMixin, CreateView):
"""Create a new Subgenre object."""
model = Subgenre
fields = ['name', 'main_genre', 'image']
template_name_suffix = '_add'
class SubgenreEditView(LoginRequiredMixin, UpdateView):
"""Edit a Subgenre object."""
model = Subgenre
fields = ['name', 'main_genre', 'image']
template_name_suffix = '_edit'
class SubgenreDeleteView(LoginRequiredMixin, DeleteView):
"""Delete a Subgenre object."""
model = Subgenre
success_url = reverse_lazy('database:list-subgenre')
class ContactView(LoginRequiredMixin, TemplateView):
"""Contact information."""
template_name = 'database/contact.html'
|
StarcoderdataPython
|
102935
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
tkRAD - tkinter Rapid Application Development library
(c) 2013+ <NAME> <<EMAIL>>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program.
If not, see: http://www.gnu.org/licenses/
"""
# lib imports
from ..widgets import rad_window as RW
from . import rad_xml_frame as XF
class RADXMLWindow (RW.RADWindow):
r"""
general purpose tkRAD Toplevel Window class implementing
tkinter XML widget building;
"""
def _init_mainframe (self, **kw):
r"""
inherited from RADWindowBase class;
"""
# widget inits
self.mainframe = kw.get("mainframe") or XF.RADXMLFrame(self, **kw)
if hasattr(self.mainframe, "set_xml_filename"):
self.mainframe.set_xml_filename(
kw.get("xml_filename") or "mainwindow"
)
# end if
# shortcut inits
self.tk_children = self.mainframe.winfo_children
self.mainframe.quit_app = self._slot_quit_app
# end def
# end class RADXMLWindow
|
StarcoderdataPython
|
46259
|
<reponame>liaohongdong/IPProxy
import time
import json
import random
if __name__ == '__main__':
# a = 10
# while '172.16.17.32:1080':
# a -= 1
# print(a)
# if a <= 0:
# break
# a = ['a', 'b', 'c', 'd']
# a = []
# while a:
# print(time.gmtime().tm_sec)
# time.sleep(3)
# a = {'d': 'd', 'c': 'c'}
# print(json.dumps(a))
# q = '{"d": "d", "c": "c"}'
# aa = json.loads(q)
# print(aa['c'])
a = -1
if -1 and a < 1:
print('1')
else:
print('2')
|
StarcoderdataPython
|
1660519
|
from django.conf import settings
from django.contrib import auth, messages
from django.contrib.auth.decorators import login_required, user_passes_test
from django.db.models import Case, Value, When
from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.http import urlsafe_base64_decode
from django.views.generic import ListView
from django_tables2 import SingleTableMixin
from app.mixins import TabsViewMixin
from app.utils import reverse
from applications.models import Application, DraftApplication
from sponsors.models import Sponsor
from user import forms, models, tokens, providers
from user.forms import SetPasswordForm, PasswordResetForm
from user.mixins import IsOrganizerMixin
from user.models import User
from user.tokens import account_activation_token, password_reset_token
from .tables import OnDutyListTable
def login(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('root'))
# if this is a POST request we need to process the form data
if request.method == 'POST':
form = forms.LoginForm(request.POST)
next_ = request.GET.get('next', '/')
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
user = auth.authenticate(email=email, password=password)
if user and user.is_active:
auth.login(request, user)
resp = HttpResponseRedirect(next_)
c_domain = getattr(settings, 'LOGGED_IN_COOKIE_DOMAIN', getattr(settings, 'HACKATHON_DOMAIN', None))
c_key = getattr(settings, 'LOGGED_IN_COOKIE_KEY', None)
if c_domain and c_key:
try:
resp.set_cookie(c_key, 'biene', domain=c_domain, max_age=settings.SESSION_COOKIE_AGE)
except:
# We don't care if this is not set, we are being cool here!
pass
return resp
else:
form.add_error(None, 'Incorrect username or password. Please try again.')
else:
form = forms.LoginForm()
return render(request, 'login.html', {'form': form})
def signup(request):
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('root'))
# if this is a POST request we need to process the form data
if request.method == 'POST':
form = forms.RegisterForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
password = form.cleaned_data['password']
name = form.cleaned_data['name']
if models.User.objects.filter(email=email).first() is not None:
messages.error(request, 'An account with this email already exists')
else:
user = models.User.objects.create_user(email=email, password=password, name=name)
user = auth.authenticate(email=email, password=password)
auth.login(request, user)
return HttpResponseRedirect(reverse('root'))
else:
form = forms.RegisterForm()
return render(request, 'signup.html', {'form': form})
def logout(request):
auth.logout(request)
messages.success(request, 'Successfully logged out!')
resp = HttpResponseRedirect(reverse('account_login'))
c_domain = getattr(settings, 'LOGGED_IN_COOKIE_DOMAIN', None) or getattr(settings, 'HACKATHON_DOMAIN', None)
c_key = getattr(settings, 'LOGGED_IN_COOKIE_KEY', None)
if c_domain and c_key:
try:
resp.delete_cookie(c_key, domain=c_domain)
except:
# We don't care if this is not deleted, we are being cool here!
pass
return resp
def activate(request, uid, token, backend="django.contrib.auth.backends.ModelBackend"):
try:
uid = force_text(urlsafe_base64_decode(uid))
user = User.objects.get(pk=uid)
if request.user.is_authenticated and request.user != user:
messages.warning(request, "Trying to verify wrong user. Log out please!")
return redirect('root')
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
messages.warning(request, "This user no longer exists. Please sign up again!")
return redirect('root')
if account_activation_token.check_token(user, token):
messages.success(request, "Email verified!")
user.email_verified = True
# CHECKING IF THE USER IS A SPONSOR
user_email_domain = user.email.split('@')[1] # Getting the domain of the user's email i.e @ugahacks.com
if Sponsor.objects.filter(email_domain=user_email_domain):
user.is_sponsor = True
user.save()
auth.login(request, user, backend="django.contrib.auth.backends.ModelBackend")
else:
messages.error(request, "Email verification url has expired. Log in so we can send it again!")
return redirect('root')
def password_reset(request):
if request.method == "POST":
form = PasswordResetForm(request.POST, )
if form.is_valid():
email = form.cleaned_data.get('email')
user = User.objects.get(email=email)
msg = tokens.generate_pw_reset_email(user, request)
msg.send()
return HttpResponseRedirect(reverse('password_reset_done'))
else:
return TemplateResponse(request, 'password_reset_form.html', {'form': form})
else:
form = PasswordResetForm()
context = {
'form': form,
}
return TemplateResponse(request, 'password_reset_form.html', context)
def password_reset_confirm(request, uid, token):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
try:
uid = force_text(urlsafe_base64_decode(uid))
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
return TemplateResponse(request, 'password_reset_confirm.html', {'validlink': False})
if password_reset_token.check_token(user, token):
if request.method == 'POST':
form = SetPasswordForm(request.POST)
if form.is_valid():
form.save(user)
return HttpResponseRedirect(reverse('password_reset_complete'))
form = SetPasswordForm()
else:
return TemplateResponse(request, 'password_reset_confirm.html', {'validlink': False})
return TemplateResponse(request, 'password_reset_confirm.html', {'validlink': True, 'form': form})
def password_reset_complete(request):
return TemplateResponse(request, 'password_reset_complete.html', None)
def password_reset_done(request):
return TemplateResponse(request, 'password_reset_done.html', None)
@login_required
def verify_email_required(request):
if request.user.email_verified:
messages.warning(request, "Your email has already been verified")
return HttpResponseRedirect(reverse('root'))
return TemplateResponse(request, 'verify_email_required.html', None)
@login_required
def set_password(request):
if request.user.has_usable_password():
return HttpResponseRedirect(reverse('root'))
if request.method == 'GET':
return TemplateResponse(request, 'callback.html', {'form': SetPasswordForm(), 'email': request.user.email})
else:
form = SetPasswordForm(request.POST)
if form.is_valid():
user = request.user
form.save(user)
auth.login(request, user, backend="django.contrib.auth.backends.ModelBackend")
messages.success(request, 'Password correctly set')
return HttpResponseRedirect(reverse('root'))
return TemplateResponse(request, 'callback.html', {'form': form, 'email': request.user.email})
@login_required
def send_email_verification(request):
if request.user.email_verified:
messages.warning(request, "Your email has already been verified")
return HttpResponseRedirect(reverse('root'))
msg = tokens.generate_verify_email(request.user)
msg.send()
messages.success(request, "Verification email successfully sent")
return HttpResponseRedirect(reverse('root'))
def callback(request, provider=None):
if not provider:
messages.error(request, 'Invalid URL')
return HttpResponseRedirect(reverse('root'))
if request.user.is_authenticated:
return HttpResponseRedirect(reverse('root'))
code = request.GET.get('code', '')
if not code:
messages.error(request, 'Invalid URL')
return HttpResponseRedirect(reverse('root'))
try:
access_token = providers.auth_mlh(code, request)
mlhuser = providers.get_mlh_user(access_token)
except ValueError as e:
messages.error(request, str(e))
return HttpResponseRedirect(reverse('root'))
user = User.objects.filter(mlh_id=mlhuser.get('id', -1)).first()
if user:
auth.login(request, user, backend="django.contrib.auth.backends.ModelBackend")
elif User.objects.filter(email=mlhuser.get('email', None)).first():
messages.error(request, 'An account with this email already exists. Sign in using your password.')
else:
user = User.objects.create_mlhuser(
email=mlhuser.get('email', None),
name=mlhuser.get('first_name', '') + ' ' + mlhuser.get('last_name', None),
mlh_id=mlhuser.get('id', None),
)
auth.login(request, user, backend="django.contrib.auth.backends.ModelBackend")
# Save extra info
draft = DraftApplication()
draft.user = user
mlhgender = mlhuser.get('gender','')
if mlhgender == "I prefer not to say" or mlhgender not in dict(Application.GENDERS).values():
mlhgender = "Prefer not to answer"
mlhyear = mlhuser.get('level_of_study','')
if mlhyear == "Not Currently a Student":
mlhyear = Application.C_GRADUATED
elif mlhyear == "University (Master's / Doctoral)":
mlhyear = Application.C_GRAD
else:
mlhyear = Application.C_FRESHMAN
draft.save_dict({
'degree': mlhuser.get('major', ''),
'university': mlhuser.get('school', {}).get('name', ''),
'class_status': mlhyear,
'phone_number': '(' + mlhuser.get('phone_number', '')[2:5] + ') ' + mlhuser.get('phone_number', '')[5:8] + '-' + mlhuser.get('phone_number', '')[8:],
'gender': [k for k, v in Application.GENDERS if v == mlhgender][0],
})
draft.save()
return HttpResponseRedirect(reverse('root'))
class OnDutyListView(TabsViewMixin, SingleTableMixin, ListView, IsOrganizerMixin):
template_name = 'duty_list.html'
table_class = OnDutyListTable
table_pagination = {'per_page': 50}
def get_queryset(self):
return User.objects.all().filter(on_duty=True)
|
StarcoderdataPython
|
1713196
|
#%%
from os import sep
with open("data/data_4.txt") as file:
data = [line for line in file]
data[:5]
#%%
#Part 1
#? 1. Split the passport
join_passport = []
join_passport = "".join(data)
split_passport = join_passport.split(sep="\n\n")
#%%
#? 2. Split the components
total_valid = 0
for fields in split_passport:
split_field = fields.split()
if len(split_field) == 8:
total_valid += 1
elif len(split_field) == 7 and any(s.startswith('cid') for s in split_field) == False:
total_valid += 1
print(total_valid)
# %%
#Part 2
def valid_byr(byr):
return len(str(byr)) == 4 and 1920 <= int(byr) <= 2002
def valid_iyr(iyr):
return len(str(iyr)) == 4 and 2010 <= int(iyr) <= 2020
def valid_eyr(eyr):
return len(str(eyr)) == 4 and 2020 <= int(eyr) <= 2030
def valid_hgt(hgt):
return hgt[-2:] == 'cm' and 150 <= int(hgt[:-2]) <= 193 or hgt[-2:] == 'in'and 59 <= int(hgt[:-2]) <= 76
def valid_hcl(hcl):
accepted_alpha = ['a','b','c','d','e','f']
for character in hcl[1:]:
if (character.isnumeric() or (character in accepted_alpha)) == False:
return False
if hcl[0] == '#' and len(hcl) == 7:
return True
else:
return False
def valid_ecl(ecl):
return ecl in ['amb','blu','brn','gry','grn','hzl','oth']
def valid_pid(pid):
return len(str(pid)) == 9 and pid.isnumeric() == True
def full_valid(any):
if any[:3] == 'byr':
return valid_byr(any[4:])
if any[:3] == 'iyr':
return valid_iyr(any[4:])
if any[:3] == 'eyr':
return valid_eyr(any[4:])
if any[:3] == 'hgt':
return valid_hgt(any[4:])
if any[:3] == 'hcl':
return valid_hcl(any[4:])
if any[:3] == 'ecl':
return valid_ecl(any[4:])
if any[:3] == 'pid':
return valid_pid(any[4:])
else:
False
#%%
valid1 = []
for fields in split_passport:
split_field = fields.split()
if len(split_field) == 8:
valid1.append(split_field)
elif len(split_field) == 7 and any(s.startswith('cid') for s in split_field) == False:
valid1.append(split_field)
#%%
count_valid2 = 0
for passport in valid1:
if False not in list_check:
count_valid2 += 1
list_check = []
for check2 in passport:
valid = full_valid(check2)
list_check.append(valid)
count_valid2
|
StarcoderdataPython
|
1695096
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect
from .models import Question,Answers
from django.template import loader
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.shortcuts import redirect
from django.db.models import Sum
from autentication.models import perfil
@login_required
def form1(request):
# estilos de aprendizagem
# form = MyForm(request.POST)
# if form.is_valid():
answer=Answers.objects.filter(user__username=request.user.get_username())
questions=Question.objects.exclude(form=2).exclude(answers__id__in=answer)[:5]
countanswer=answer.filter(question__form=1).count()
if countanswer >=80 :
return HttpResponseRedirect(reverse('questions:respondido'))
print(countanswer)
print(questions)
# for q in questions:
# print(q.id)
for q in questions:
print(q.form)
porcentagem=(countanswer/80.0)*100.0
porcentagem_completada="width: " + str(porcentagem)+ "%;"
print(porcentagem_completada)
valor_completada=porcentagem
context = {
'questions':questions,
'porcentagem_completada':porcentagem_completada,
'valor_completada':valor_completada,
}
return render(request,'questions/perguntaForm1.html',context)
@login_required
def form2(request):
# inteligencias multiplas
answer=Answers.objects.filter(user__username=request.user.get_username())
questions=Question.objects.exclude(form=1).exclude(answers__id__in=answer)[:5]
countanswer=answer.filter(question__form=2).count()
if countanswer>=80:
return HttpResponseRedirect(reverse('questions:respondido'))
porcentagem=(countanswer/80.0)*100.0
porcentagem_completada="width: " + str(porcentagem)+ "%;"
print(porcentagem_completada)
valor_completada=porcentagem
context = {
'questions':questions,
'porcentagem_completada':porcentagem_completada,
'valor_completada':valor_completada,
}
return render(request,'questions/perguntaForm2.html',context)
@login_required
def resposta(request):
formulario=request.POST.get('questionform')
print("formulario")
print(formulario)
print("-")
answer=Answers.objects.filter(user__username=request.user.get_username())
countanswer=answer.filter(question__form=int(float(formulario))).count()
if(countanswer>=80):
return HttpResponseRedirect(reverse('pagina_do_usuario:pagina_inicial'))
qid=[]
resp=[]
if request.method == "POST":
qid.append(request.POST.get('questionid1'))
qid.append(request.POST.get('questionid2'))
qid.append(request.POST.get('questionid3'))
qid.append(request.POST.get('questionid4'))
qid.append(request.POST.get('questionid5'))
print("id values")
for i in qid:
print(i)
resp.append(request.POST.get('slider1'))
resp.append(request.POST.get('slider2'))
resp.append(request.POST.get('slider3'))
resp.append(request.POST.get('slider4'))
resp.append(request.POST.get('slider5'))
print("resp values")
for r in resp:
print(r)
for index,i in enumerate(qid):
answer= Answers.objects.filter(question=int(float(i))).order_by('answers_value')
answer=answer[int(float(resp[index]))]
answer.user.add(User.objects.get(username=request.user.get_username()))
answer=Answers.objects.filter(user__username=request.user.get_username())
countanswer=answer.filter(question__form=int(float(formulario))).count()
if(countanswer>=80):# se o usuario tiver respondido todas as questões referentes ao questionario em questao
if int(float(formulario)) == 1:
calculo_estilos_de_aprendizagem(request, answer.filter(question__form=int(float(formulario))) )
pass
if int(float(formulario)) == 2:
calculo_inteligencias_multiplas(request, answer.filter(question__form=int(float(formulario))) )
pass
return HttpResponseRedirect(reverse('questions:respondido'))
else:
if(int(float(formulario))==1):
return redirect(reverse('questions:form1'))
else:
return redirect(reverse('questions:form2'))
return HttpResponseRedirect(reverse('questions:respondido'))
@login_required
def respondido(request):
context={
}
return render(request,'questions/respondido.html',context)
@login_required
def calculo_inteligencias_multiplas(request,answers):
a=answers.filter(question__number__gte=1,question__number__lte=10)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
vl=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=11,question__number__lte=20)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
lm=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=21,question__number__lte=30)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
ve=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=31,question__number__lte=40)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
i=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=41,question__number__lte=50)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
cc=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=51,question__number__lte=60)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
rm=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=61,question__number__lte=70)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
intra=(float(a)/30.0)*100.0
print(a)
a=answers.filter(question__number__gte=71,question__number__lte=80)
a=a.aggregate(Sum('answers_value'))['answers_value__sum']
n=(float(a)/30.0)*100.0
print(a)
request.user.perfil.int_verbal_linguistica=vl
request.user.perfil.int_musical=rm
request.user.perfil.int_logico_matematica=lm
request.user.perfil.int_cinestesico_corporal=cc
request.user.perfil.int_espacial_visual=ve
request.user.perfil.int_intrapessoal=intra
request.user.perfil.int_naturalista=n
request.user.perfil.int_interpessoal=i
request.user.perfil.f_int=True
request.user.perfil.save()
@login_required
def calculo_estilos_de_aprendizagem(request,answers):
#da pra fazer melhor
#precisa ordena as perguntas na entrada do banco pra gente so ter que fazer querys em intervalos
# e nao precisar fazer 80 querys
ativo=[3,5,7,9,13,20,26,27,35,37,41,43,46,48,51,61,67,74,75,77]
reflexivo=[10,16,18,19,28,31,32,34,36,39,42,44,49,55,58,63,65,69,70,79]
teorico=[2,4,6,11,15,17,21,23,25,29,33,45,50,54,60,64,66,71,78,80]
pragmatico=[1,8,12,14,22,24,30,38,40,47,52,53,56,57,59,62,68,72,73,76]
cont=0.0
for i in ativo:
a=answers.get(question__number=i)
cont=cont+a.answers_value
at=(cont/60)*100
cont=0.0
for i in reflexivo:
a=answers.get(question__number=i)
cont=cont + a.answers_value
re=(cont/60)*100
cont=0.0
for i in teorico:
a=answers.get(question__number=i)
cont=cont+a.answers_value
te=(cont/60)*100
cont=0.0
for i in pragmatico:
a=answers.get(question__number=i)
cont=cont+a.answers_value
pr=(cont/60)*100
request.user.perfil.ea_ativo=at
request.user.perfil.ea_reflexivo=re
request.user.perfil.ea_pragmatico=pr
request.user.perfil.ea_teorico=te
request.user.perfil.f_est=True
request.user.perfil.save()
pass
|
StarcoderdataPython
|
4842202
|
# Created by <NAME>.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
def solution(number: int) -> int:
"""
If we list all the natural numbers below 10 that are
multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of
these multiples is 23.
Finish the solution so that it returns the sum of all
the multiples of 3 or 5 below the number passed in.
:param number:
:return:
"""
result = 0
for n in range(1, number):
if n % 3 == 0 or n % 5 == 0:
result += n
return result
|
StarcoderdataPython
|
3339507
|
<gh_stars>1-10
#!/usr/bin/env python3
def say_hi(name, age):
return f"Hi. My name is {name} and I'm {age} years old"
if __name__ == '__main__':
assert say_hi("Kitty", 28) == "Hi. My name is Kitty and I'm 28 years old"
assert say_hi("Peter", 42) == "Hi. My name is Peter and I'm 42 years old"
|
StarcoderdataPython
|
4809700
|
<reponame>lixiaobo230665/ITMO_ICT_WebProgramming_2020-2021_d3310<filename>sutdents D33102 lixiaobo/sutdent's D33102 lixiaobo lr2/Stupid_monkey/Stupid_monkey/urls.py
"""Stupid_monkey URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from Fat_Ape import views
urlpatterns = [
path('admin/', admin.site.urls),
path('Car/<int:Car_id>',views.detail),
path('getdriver/<int:driver_id>', views.detail),#使用<int:xxx>来传入数据
path('driver_list/', views.driver_list.as_view()),
path('driver_deta/<int:pk>/', views.driver_deta.as_view()),
path('car_deta/<int:pk>/', views.Car_deta.as_view()),
path('delete_dirver_deta/<int:pk>/',views.delete_driver_deta),
path('create_Car/',views.create_view),
path('delete_Car_deta/<int:pk>/',views.delete_Car_data),
path('update_Car/<int:pk>/',views.Update_Car.as_view()),
path('Cars/',views.Cars.as_view())
]
|
StarcoderdataPython
|
173137
|
<gh_stars>1-10
"""
strenum contains an enum that inherits from the plain enum and string.
"""
from enum import Enum
# pylint: disable=too-few-public-methods
class StrEnum(str, Enum):
"""
An enum that has string values.
"""
# see https://docs.python.org/3/library/enum.html?highlight=strenum#others
|
StarcoderdataPython
|
3303276
|
from django.contrib.auth import views as auth_views
from django.urls import path
from . import views
urlpatterns = [
path('<int:R_id>', views.dds,name='dds')
]
|
StarcoderdataPython
|
3222730
|
import discord
intents = discord.Intents.default()
intents.members = True
client = discord.Client(
intents=intents
)
ready = False
@client.event
async def on_ready():
ready = True
while ready != True:
pass
bot = client
bot.run('token')
|
StarcoderdataPython
|
140882
|
def FourierPower(db1Trace, inSampleRate, varargin):
'''
STILL NOT IMPLEMENTED IN PYTHON!!!
[DBWINLENSEC, INNSTEP, BLDOPLOT]) computes the spectral power of DB1TRACE
over time. DB1TRACE is divided into segments whose length is specified by
DBWINLENSEC and INSAMPLERATE. INNSTEP controls the overlap between
segments (e.g. if INNSTEP is 4, then segments are spaced with 1/4 of the
length of the window). Defautl for DBWINLENSEC is 0.5 sec. Default for
INNSTEP is 4. The power spectrum is returned in DB2POWER. In addition, the
indices of the center of each segment on DB1TRACE are returned in
IN1CNTRIDX.
The code is adapted from the help page 'power spectral density estimate
using fft' and is the same as DoFourierPowerSpectrum
2016-11-15 QP: Transcripted from DoFourierPowerSpectrum
'''
switch length(varargin)
case 0
dbWinLenSec = 0.5;
inNStep = 5;
blDoPlot = false;
case 1
dbWinLenSec = varargin{1};
inNStep = 5;
blDoPlot = false;
case 2
dbWinLenSec = varargin{1};
inNStep = varargin{2};
blDoPlot = false;
otherwise
dbWinLenSec = varargin{1};
inNStep = varargin{2};
blDoPlot = varargin{3};
end
%Checks that step divides the traces without remainder
inWinLen = round(dbWinLenSec * inSampleRate);
inStepLen = inWinLen / inNStep;
if mod(inStepLen, 1) ~= 0
error('The step taken does not divide the length into segments of equal length');
end
# Checks for the maximal number of chunks that can fit the trace and removes
# the points that might not be used
inNChunk = floor(length(db1Trace) / inStepLen);
db1Trace = db1Trace(1:inNChunk * inStepLen); %Removes unused points
inNChunk = inNChunk - (inNStep - 1);
# Substract the mean of the the trace in order to remove the zeros frequency
# componant
db1Trace = db1Trace - mean(db1Trace);
# Create a matrix, whose columns are samples of the signal whose length is
# windowLengthSec. Successive samples (columns) are overlapping and spaced
# by windowLength/step seconds. Each sample is then multiplied by a Hamming
# window.
in1ChunkBegIdx = ((1:inNChunk) * inStepLen) - (inStepLen - 1);
in1ChunkIdx = ((1:inWinLen) - 1)';
in2ChunkIdx = repmat(in1ChunkBegIdx, inWinLen, 1) + repmat(in1ChunkIdx, 1, inNChunk);
# Convolutes with a hamming taper
db1Taper = hamming(size(in2ChunkIdx, 1), 'periodic');
if size(in2ChunkIdx, 2) ~= 1
db2TaperTrace = db1Trace(in2ChunkIdx).*repmat(db1Taper , 1, size(in2ChunkIdx, 2));
else
db2TaperTrace = db1Trace(in2ChunkIdx)'.*repmat(db1Taper, 1, size(in2ChunkIdx, 2));
end
# Computes the FFT and calculate the spectrum as the modulus of the FFT
myFFT=fft(db2TaperTrace);
myFFT=myFFT(1:size(db2TaperTrace,1)/2+1,:);
# db2Power = (1/(inSampleRate*size(db2TaperTrace,1))).*abs(myFFT).^2;
db2Power = (1/(inSampleRate*sum(db1Taper))).*abs(myFFT).^2;
db2Power(2:end-1,:) = 2*db2Power(2:end-1,:);
# Gets the central index of each window
in1CntrIdx = ceil(median(in2ChunkIdx));
# Plots the result if needed
if blDoPlot
dbTraceLenSec = length(db1Trace)/inSampleRate;
inTopFreq = 120;
figure
imagesc(dbWinLenSec/2: dbWinLenSec/inNStep : dbTraceLenSec-(dbWinLenSec/2), 0:1/dbWinLenSec:inTopFreq, ...
10*log10(db2Power(1:inTopFreq*dbWinLenSec+1,:)));
xlabel('Time (s)')
ylabel('Frequency (Hz)')
set(gca, 'YDir', 'normal')
colorbar
end
return db2Power, in1CntrIdx
|
StarcoderdataPython
|
3350568
|
<gh_stars>0
import time
import numpy as np
"""
代码流程:
item_score = []
for item in 待推荐物品集合:
for item_like in 用户喜欢的物品集合:
score = get_score_from_similar_items()
item_score.append([item,score])
"""
def build_i2s_i2n(user_item_score_path, item_name_path):
# 读取电影id-电影名字文件
item2name = {}
with open(item_name_path, encoding="ISO-8859-1") as lines:
for line in lines:
item_id, item_name = line.split('|')[:2]
item_id = int(item_id)
item2name[item_id] = item_name
movie_num = len(item2name)
# 获取用户数量
user_dict = {}
with open(user_item_score_path, encoding="ISO-8859-1") as lines:
for line in lines:
user_id, item_id, score, timestamp = line.split('\t')
user_id, item_id, score = int(user_id), int(item_id), int(score)
if user_id not in user_dict:
user_dict[user_id] = user_id
user_num = len(user_dict)
# 读取用户id-电影id-分数文件
item2score = {}
with open(user_item_score_path, encoding="ISO-8859-1") as lines:
for line in lines:
user_id, item_id, score, timestamp = line.split('\t')
user_id, item_id, score = int(user_id), int(item_id), int(score)
if item_id not in item2score:
item2score[item_id] = [0] * user_num
item2score[item_id][user_id - 1] = score
return item2score, item2name
def cosine(v1, v2):
v1dv2 = v1.dot(v2)
v1n = np.sqrt(np.sum(np.square(v1)))
v2n = np.sqrt(np.sum(np.square(v2)))
return v1dv2 / (v1n * v2n)
def find_similar_item(item2score):
item2sitem = {}
for item, score in item2score.items():
similar_item = []
for item_temp, score_temp in item2score.items():
if item == item_temp or item > 500 or item_temp > 500: # 跳过超过500的item
continue
similarity = cosine(np.array(score), np.array(score_temp))
similar_item.append([item_temp, similarity])
similar_item = sorted(similar_item, reverse=True, key=lambda x: x[1])
item2sitem[item] = similar_item
return item2sitem
def item_cf(user_id, item_id, item2sitem, item2score, topk):
pred_score, count = 0, 0
for similar_item, similarity in item2sitem[item_id][:topk]:
score_from_similar_item = item2score[similar_item][user_id - 1]
pred_score += score_from_similar_item * similarity
if score_from_similar_item != 0:
count += 1
pred_score /= count + 1e-5
return pred_score
def movie_recommand(user_id, item2sitem, item2score, item2name, topk=16):
possible_items = [item_id for item_id, user_score_list in item2score.items() \
if item_id < 500 and user_score_list[user_id - 1] == 0]
result = []
for item_id in possible_items:
score = item_cf(user_id, item_id, item2sitem, item2score, topk)
result.append([item2name[item_id], score])
result = sorted(result, reverse=True, key=lambda x: x[1])
return result[:topk]
if __name__ == '__main__':
user_item_score_path = "./data/ml-100k/u1.base"
item_name_path = "./data/ml-100k/u.item"
item2score, item2name = build_i2s_i2n(user_item_score_path, item_name_path)
s = time.time()
item2sitem = find_similar_item(item2score)
print("计算相似用户耗时:%s s" % (time.time() - s))
# 通过用户获取推荐电影
while True:
user_id = int(input('请输入待推荐用户id:'))
recommands = movie_recommand(user_id, item2sitem, item2score, item2name)
for recommand, score in recommands:
print("%.4f | %s" % (score, recommand))
|
StarcoderdataPython
|
1754105
|
import os
import cv2
import pickle
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras.applications import InceptionResNetV2, VGG16
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import LSTM, Dense, Dropout, concatenate, GlobalAveragePooling2D, Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from weights_transfer import load_weights
os.environ["CUDA_VISIBLE_DEVICES"]="1"
def lr_schedule(epoch):
"""Learning Rate Schedule
Learning rate is scheduled to be reduced after 80, 120, 160, 180 epochs.
Called automatically every epoch as part of callbacks during training.
# Arguments
epoch (int): The number of epochs
# Returns
lr (float32): learning rate
"""
lr = 0.0003
if epoch > 800:
lr *= 0.5e-3
elif epoch > 600:
lr *= 1e-3
elif epoch > 400:
lr *= 1e-2
elif epoch > 200:
lr *= 1e-1
print('Learning rate: ', lr)
return lr
# 图像数据读取函数
def get_images(path, size=(250,250,3), normalized=True):
"""
获得模型学习所需要的数据;
其中图像格式(num_images, weight, height)
标注格式(num_images, weight, height),像素值为0/1
注:训练数据目录结构如下
path/
0/ - neg
1/ - pos
"""
files_neg = os.listdir(os.path.join(path, '0'))
files_neg.sort()
files_pos = os.listdir(os.path.join(path, '1'))
files_pos.sort()
images = np.zeros([len(files_neg)+len(files_pos),size[0],size[1],size[2]])
for i, file in enumerate(files_neg):
img = cv2.imread(os.path.join(path, '0', file))
img = cv2.resize(img, (size[0], size[1]), cv2.INTER_AREA)
if normalized:
images[i] = img/255
else:
images[i] = img
for i, file in enumerate(files_pos):
img = cv2.imread(os.path.join(path, '1', file))
img = cv2.resize(img, (size[0], size[1]), cv2.INTER_AREA)
if normalized:
images[i+len(files_neg)] = img/255
else:
images[i+len(files_neg)] = img
return images
# DNN模型用于临床数据
def create_mlp(input_dim, nlogits=1536):
model = Sequential()
model.add(Dense(32, input_dim=input_dim, activation="relu", name="dense_clinic_1"))
model.add(Dense(nlogits, activation="relu", name="dense_clinic_2"))
model.add(Reshape((1, nlogits), name="reshape_clinic_1"))
return model
# 临床数据分支
input_dim = 3
nlogits = 1536
mlp = create_mlp(input_dim,nlogits)
#mlp.summary()
# cnn模型特征输入分支
XSIZE,YSIZE,ZSIZE = 250,250,3
# transfer learning setting
# 0 - no initial weights
# 1 - imagenet pretrained weights
# 2 - transfer learning weights from SMC-net
#
transfer_learning_style = 2
# gray-image input
if transfer_learning_style==0:
ir2_gray = InceptionResNetV2(include_top=False,input_shape=(XSIZE,YSIZE,ZSIZE))
elif transfer_learning_style==1:
gray_weight_file = "./weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5"
ir2_gray = InceptionResNetV2(include_top=False, weights=gray_weight_file, input_shape=(XSIZE, YSIZE, ZSIZE))
else:
ir2_gray = InceptionResNetV2(include_top=False,input_shape=(XSIZE,YSIZE,ZSIZE))
gray_weight_file = "./saved_models/InceptionResNetV2_GRAY.554.h5"
gray_weight = load_weights(gray_weight_file)
ir2_gray.set_weights(gray_weight)
model_gray = GlobalAveragePooling2D(name='GlobalAverage2D_gray')(ir2_gray.output)
model_gray = Reshape((1,-1),name="reshape_all_gray")(model_gray)
cnn_gray = Model(inputs=ir2_gray.input, outputs=model_gray, name="model_gray")
for layer in ir2_gray.layers:
layer.trainable = True
layer._name = layer._name + str("_gray")
# color-image input
if transfer_learning_style==0:
ir2_color = InceptionResNetV2(include_top=False,input_shape=(XSIZE,YSIZE,ZSIZE))
elif transfer_learning_style==1:
color_weight_file = "./weights/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5"
ir2_color = InceptionResNetV2(include_top=False, weights=gray_weight_file, input_shape=(XSIZE, YSIZE, ZSIZE))
else:
ir2_color = InceptionResNetV2(include_top=False,input_shape=(XSIZE,YSIZE,ZSIZE))
color_weight_file = "./saved_models/InceptionResNetV2_COLOR.549.h5"
color_weight = load_weights(color_weight_file)
ir2_color.set_weights(color_weight)
model_color = GlobalAveragePooling2D(name='GlobalAverage2D_color')(ir2_color.output)
model_color = Reshape((1,-1),name="reshape_all_color")(model_color)
cnn_color = Model(inputs=ir2_color.input, outputs=model_color,name="model_color")
for layer in ir2_color.layers:
layer.trainable = True
layer._name = layer._name + str("_color")
# 模型输入的融合
combinedInput = concatenate(axis=1, inputs=[mlp.output, cnn_gray.output, cnn_color.output], name="concatenate_all")
# LSTM模型
outputs = LSTM(128,dropout=0.25,input_shape=combinedInput.shape,name="LSTM_all")(combinedInput)
outputs = Dense(128, activation="relu", name="dense_output_1")(outputs)
outputs = Dropout(0.5, name="dropout_output_1")(outputs)
outputs = Dense(32, activation="relu", name="dense_output_2")(outputs)
outputs = Dense(1, activation="sigmoid", name="dense_output_3")(outputs)
model = Model(inputs=[mlp.input, cnn_gray.input, cnn_color.input], outputs=outputs)
#model.summary()
# 模型编译
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=0.0003), metrics=['acc'])
# 获得临床数据
pos = pd.read_excel('./data/MultiModality/training/clinic/positive.xlsx')
neg = pd.read_excel('./data/MultiModality/training/clinic/negative.xlsx')
# 临床数据预处理
#
# 男 - 1
# 女 - 0
#
neg['sex'][neg['sex'] == '女'] = 0
neg['sex'][neg['sex'] == '男'] = 1
pos['sex'][pos['sex'] == '女'] = 0
pos['sex'][pos['sex'] == '男'] = 1
# 临床数据整合为特征和标签
posv = pos.values.astype(np.float32)
negv = neg.values.astype(np.float32)
x = np.concatenate((negv, posv))
y_train_clinic = np.concatenate( (np.zeros(len(negv)), np.ones(len(posv))) )
x_train_clinic = StandardScaler().fit_transform(x)
print("用于训练的临床数据为", len(x_train_clinic), " 标签为", len(y_train_clinic))
# 黑白图像数据
x_train_gray = get_images("./data/MultiModality/training/B-mode/")
x_train_color = get_images("./data/MultiModality/training/CDFI/")
# 用于训练的数据大小
print("用于训练的灰度图像数据为", x_train_gray.shape)
print("用于训练的彩色图像数据为", x_train_color.shape)
# 训练轮数
epochs = 1000
batch_size = 32
# 保存模型权重
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'MMC-Net.{epoch:03d}.h5'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_acc',
verbose=1,
save_best_only=True,
save_weights_only=True)
callbacks = [checkpoint]
history = model.fit([x_train_clinic, x_train_gray, x_train_color],y_train_clinic,batch_size=batch_size,
validation_split=0.15, epochs=epochs, callbacks=callbacks, shuffle=True)
history_saved = True
if history_saved:
history_file = os.path.join('./history', 'mm_history-epoch-'+str(epochs)+'.dat')
with open(history_file,'wb') as f:
pickle.dump(history.history, f)
print("Network training history successfully saved!")
|
StarcoderdataPython
|
4816910
|
<gh_stars>0
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.algorithms.scale_schedule.scale_schedule import ScaleSchedule as ScaleSchedule
|
StarcoderdataPython
|
95425
|
# coding: utf-8
import re
import sre_constants
from ..roles import Resolvable
def validate_regex(regex):
"""
:param str regex: A regular expression to validate.
:raises: ValueError
"""
try:
re.compile(regex)
except sre_constants.error as e:
raise ValueError('Invalid regular expression: {0}'.format(e))
def validate(value_or_var, validator):
if isinstance(value_or_var, Resolvable):
for value in value_or_var.iter_possible_values():
validator(value)
else:
validator(value_or_var)
|
StarcoderdataPython
|
1644521
|
import asyncio
import aioredis
from zdppy_redis import AsyncRedis
r = AsyncRedis(host="localhost", port=6379, database=0)
async def set():
"""
测试设置和获取
:return:
"""
await r.set("my-key", "value")
value = await r.get("my-key")
print(value)
async def hset():
"""
测试设置和获取
:return:
"""
await r.hset("hash", {"key1": "value1", "key2": "value2", "key3": 123})
value = await r.hgetall("hash")
print(value)
if __name__ == "__main__":
# asyncio.run(set())
asyncio.run(hset())
|
StarcoderdataPython
|
3258321
|
# -*- coding: utf-8 -*-
"""
Description:
Global fixtures for unittests
Author:
<NAME>
Date:
12/14/20
"""
|
StarcoderdataPython
|
1701118
|
#misc
import sys
#data processing
import numpy as np
import pandas as pd
#homemade
sys.path.append('../../utils')
from helpers import load_tsv
def clean_beehive():
filename = 'Beehive_Logger'
df = load_tsv(filename)
print("Now cleaning Beehive Geyser Data")
print("Initial Shape of df: {0}".format(df.shape))
#drop rows with duplicate eruption_time_epoch records
df = df.drop_duplicates(subset='eruption_time_epoch')
print("Shape of df after dropping duplicates: {0}".format(df.shape))
#drop rows with nan in eruption_time_epoch
df = df.dropna(subset=['eruption_time_epoch'],axis=0)
print("Shape of df after dropping nan: {0}".format(df.shape))
if 'data' not in filename:
filename = '../../data/' + filename
df['eruption_time_epoch'] = pd.to_numeric(df['eruption_time_epoch'])
#add datetime objects
df['eruption_time'] = pd.to_datetime(df['eruption_time_epoch'],unit='s')
assert (df['eruption_time_epoch']>=0).all(), "Shouldnt be negative numbers"
df.to_pickle(filename+'.pkl')
return df
|
StarcoderdataPython
|
3398390
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/1/29 11:20 AM
# @Author : hysrc
# @File : into_elastic.py
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
from config import ELASTICSEARCH_HOSTS
from pipeline.elastic import Ips, Domains
import json
from datetime import datetime
def save_ip():
filename = "/Users/boyhack/Desktop/ips.result.txt"
with open(filename) as f:
data = json.load(f)
# ip = elastic.Ips()
for ip_data in data:
ip = Ips(**ip_data)
ip.published_from = datetime.now()
a = ip.save()
print(a)
def save_domains():
filename = "/Users/boyhack/Desktop/domain.result.txt"
with open(filename) as f:
data = json.load(f)
# ip = elastic.Ips()
for domain in data:
dd = Domains(**domain)
a = dd.save()
print(a)
if __name__ == '__main__':
# es = Elasticsearch(ELASTICSEARCH_HOSTS)
# s = Search(using=es, index='w12scan', doc_type="ips").query("match", target="192.168.127.12")
# print(s.count())
# for hit in s:
# d = hit.to_dict()
# print(d)
save_domains()
save_ip()
pass
|
StarcoderdataPython
|
3329402
|
"""
Trading-Technical-Indicators (tti) python library
File name: _machine_learning_mlp.py
Implements a Multilayer Perceptron classification model for the Machine
Learning features of the tti library.
"""
import time
import datetime
import math
from ._machine_learning_api import MachineLearningAPI
from ._machine_learning_data import MachineLearningData
from ..utils.constants import ALL_TI_FEATURES
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
class MachineLearningMLP(MachineLearningAPI):
"""
Machine Learning MLP class implementation.
"""
def __init__(self):
super().__init__(model_type='MLP')
def mlTrainModel(self, input_data, pool_size=None, verbose=False):
"""
Trains a MLP machine learning model for prices direction predictions.
Args:
input_data (pandas.DataFrame): The input data. Required input
columns are ``high``, ``low``, ``open``, ``close`` and
``volume``. The index is of type ``pandas.DatetimeIndex``. The
minimum number of data required is 60 periods.
pool_size (int, default=None): Pool size for parallel computing
when concurrency applies. When None, then the created
processes are equal to the number of the available cpu cores.
verbose (bool, default=False): If set to True, processing
information is sent to the console.
Raises:
WrongTypeForInputParameter: Input argument has wrong type
WrongValueForInputParameter: Unsupported value for input argument
NotEnoughInputData: Not enough data for calculating the indicator
TypeError: Type error occurred when validating the ``input_data``
ValueError: Value error occurred when validating the ``input_data``
NoFeaturesSelectedForMLData: No features selected for ML data
NotEnoughDataForMachineLearningTraining: Not enough data for ML.
"""
if verbose:
print('\nTrain MLP model for tti features')
# Create ML data
start_time = time.time()
data = MachineLearningData(
input_data=input_data,
ti_features=ALL_TI_FEATURES,
include_close_feature=True,
include_volume_feature=True,
verbose=verbose).createMLData().values
if verbose:
print('- ml data creation time:',
datetime.timedelta(seconds=int(time.time() - start_time)))
self._model_details['number_of_training_instances'] = data.shape
# Adapt value to sklearn n_jobs values
if pool_size is None:
pool_size = -1
# Standardize data
self._model_details['scaler_used'] = True
self._scaler = StandardScaler()
self._scaler.fit(X=data[:, :-1], y=data[:, -1])
data[:, :-1] = self._scaler.transform(X=data[:, :-1])
# Split data to training and test set
x_train, x_test, y_train, y_test = train_test_split(
data[:, :-1], data[:, -1], test_size=0.20, train_size=None,
random_state=None, shuffle=False, stratify=None)
# MLP model tuning, look for the best performing model
if verbose:
print('- searching for the best performing MLP model')
start_time = time.time()
hidden_layer_sizes = []
# Number of hidden nodes: geometric pyramid rule proposed by Masters
# (1993). For a three layer network with n input and m output neurons,
# the hidden layer would have sqrt(n*m) neurons.
size = int(math.sqrt((data.shape[1] - 1) * 2))
hidden_layer_sizes.append((size,))
# The number of hidden neurons should be between the size of the input
# layer and the size of the output layer.
size = int(((data.shape[1] - 1) + 2) / 2)
hidden_layer_sizes.append((size,))
# The number of hidden neurons should be 2/3 the size of the input
# layer, plus the size of the output layer.
size = int(((2/3.) * (data.shape[1] - 1)) + 2)
hidden_layer_sizes.append((size,))
model_grid_search = GridSearchCV(
estimator=MLPClassifier(
max_iter=20000, batch_size=min(128, x_train.shape[0]),
learning_rate='constant'),
param_grid={
'hidden_layer_sizes': hidden_layer_sizes,
'solver': ['sgd', 'adam'],
'activation': ['identity', 'tanh', 'relu'],
'learning_rate_init': [1e-1, 1e-2, 1e-3, 1e-4],
},
n_jobs=pool_size,
refit=False,
verbose=0,
return_train_score=True)
model_grid_search.fit(X=data[:, :-1], y=data[:, -1])
best_parameters = model_grid_search.best_params_
if verbose:
print('- model tuning finished in:',
datetime.timedelta(seconds=int(time.time() - start_time)))
print('- model tuning best score:', model_grid_search.best_score_)
print('- model tuning best parameters:', best_parameters)
# Selected MLP model, calculate test score
if verbose:
print('- calculating training and test score for the selected ' +
'MLP model')
selected_model = MLPClassifier(
max_iter=20000, batch_size=min(128, x_train.shape[0]),
learning_rate='constant', **best_parameters)
selected_model.fit(X=x_train, y=y_train)
self._model_details['training_score'] = selected_model.score(
X=x_train, y=y_train)
self._model_details['test_score'] = selected_model.score(
X=x_test, y=y_test)
if verbose:
print('- selected model training score:',
self._model_details['training_score'])
print('- selected model test score:',
self._model_details['test_score'])
# Selected MLP model, train on full input data (final model)
if verbose:
print('- training selected model on the whole dataset')
self._model = MLPClassifier(
max_iter=20000, batch_size=min(128, data.shape[0]),
learning_rate='constant', **best_parameters)
self._model.fit(X=data[:, :-1], y=data[:, -1])
if verbose:
print('- model details:', self._model_details)
|
StarcoderdataPython
|
3329956
|
<reponame>zengaorong/yahaha
# coding:utf8
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import xlwt
import MySQLdb
def export(host,user,password,dbname,table_name,outputpath):
conn = MySQLdb.connect(host,user,password,dbname,charset='utf8')
cursor = conn.cursor()
count = cursor.execute('select * from '+table_name)
print count
# 重置游标的位置
cursor.scroll(0,mode='absolute')
# 搜取所有结果
results = cursor.fetchall()
# 获取MYSQL里面的数据字段名称
fields = cursor.description
workbook = xlwt.Workbook()
sheet = workbook.add_sheet('table_'+table_name,cell_overwrite_ok=True)
# 写上字段信息
for field in range(0,len(fields)):
sheet.write(0,field,fields[field][0])
# 获取并写入数据段信息
row = 1
col = 0
for row in range(1,len(results)+1):
for col in range(0,len(fields)):
sheet.write(row,col,u'%s'%results[row-1][col])
workbook.save(outputpath)
# export('localhost','root','mysql','test','datetest',r'datetest.xlsx')
|
StarcoderdataPython
|
69649
|
<filename>2020/day15_test.py
import day15
ITERS = 2020
def test_example1():
intro_seq = [0, 3, 6]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 436, f"Error: {final_move}"
def test_example2():
intro_seq = [1, 3, 2]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 1, f"Error: {final_move}"
def test_example3():
intro_seq = [2, 1, 3]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 10, f"Error: {final_move}"
def test_example4():
intro_seq = [1, 2, 3]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 27, f"Error: {final_move}"
def test_example5():
intro_seq = [2, 3, 1]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 78, f"Error: {final_move}"
def test_example6():
intro_seq = [3, 2, 1]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 438, f"Error: {final_move}"
def test_example7():
intro_seq = [3, 1, 2]
final_move = day15.play_game(intro_seq, ITERS)
assert final_move == 1836, f"Error: {final_move}"
|
StarcoderdataPython
|
3216929
|
<filename>audio_zen/model/module/causal_conv.py
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from audio_zen.fvcore.nn import FlopCountAnalysis, flop_count_str
class Chomp1d(nn.Module):
def __init__(self, chomp_size):
super(Chomp1d, self).__init__()
self.chomp_size = chomp_size
def forward(self, x):
return x[:, :, :-self.chomp_size].contiguous()
class TemporalBlock(nn.Module):
def __init__(self, n_inputs, n_outputs, kernel_size, stride, dilation, padding, dropout=0.2):
super(TemporalBlock, self).__init__()
self.conv1 = weight_norm(
nn.Conv1d(n_inputs, n_outputs, kernel_size, stride=stride, padding=padding, dilation=dilation)
)
self.chomp1 = Chomp1d(padding)
self.relu1 = nn.ReLU()
self.dropout1 = nn.Dropout(dropout)
self.conv2 = weight_norm(nn.Conv1d(n_outputs, n_outputs, kernel_size,
stride=stride, padding=padding, dilation=dilation))
self.chomp2 = Chomp1d(padding)
self.relu2 = nn.ReLU()
self.dropout2 = nn.Dropout(dropout)
self.net = nn.Sequential(self.conv1, self.chomp1, self.relu1, self.dropout1,
self.conv2, self.chomp2, self.relu2, self.dropout2)
self.downsample = nn.Conv1d(n_inputs, n_outputs, kernel_size=1) if n_inputs != n_outputs else None
self.relu = nn.ReLU()
self.init_weights()
def init_weights(self):
self.conv1.weight.data.normal_(0, 0.01)
self.conv2.weight.data.normal_(0, 0.01)
if self.downsample is not None:
self.downsample.weight.data.normal_(0, 0.01)
def forward(self, x):
out = self.net(x)
res = x if self.downsample is None else self.downsample(x)
return self.relu(out + res)
class TemporalConvNet(nn.Module):
def __init__(self, num_inputs, num_channels, kernel_size=2, dropout=0.2):
"""
Args:
num_inputs:
num_channels: The list of all channels?
kernel_size:
dropout:
Inputs: x
- x: x has a dimension of [B, C, T]
"""
super(TemporalConvNet, self).__init__()
layers = []
num_levels = len(num_channels)
for i in range(num_levels):
dilation_size = 2 ** i
in_channels = num_inputs if i == 0 else num_channels[i - 1]
out_channels = num_channels[i]
layers += [TemporalBlock(in_channels, out_channels, kernel_size, stride=1, dilation=dilation_size,
padding=(kernel_size-1) * dilation_size, dropout=dropout)]
self.network = nn.Sequential(*layers)
def forward(self, x):
return self.network(x)
class CausalConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, encoder_activate_function, **kwargs):
"""
Args:
in_channels:
out_channels:
encoder_activate_function:
**kwargs:
"""
super().__init__()
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 2),
stride=(2, 1),
padding=(0, 1),
**kwargs # 这里不是左右 pad,而是上下 pad 为 0,左右分别 pad 1...
)
self.norm = nn.BatchNorm2d(out_channels)
self.activation = getattr(nn, encoder_activate_function)()
def forward(self, x):
"""
2D Causal convolution.
Args:
x: [B, C, F, T]
Returns:
[B, C, F, T]
"""
x = self.conv(x)
x = x[:, :, :, :-1] # chomp size
x = self.norm(x)
x = self.activation(x)
return x
class CausalTransConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, is_last=False, output_padding=(0, 0)):
super().__init__()
self.conv = nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 2),
stride=(2, 1),
output_padding=output_padding
)
self.norm = nn.BatchNorm2d(out_channels)
if is_last:
self.activation = nn.ReLU()
else:
self.activation = nn.ELU()
def forward(self, x):
"""
2D Causal convolution.
Args:
x: [B, C, F, T]
Returns:
[B, C, F, T]
"""
x = self.conv(x)
x = x[:, :, :, :-1] # chomp size
x = self.norm(x)
x = self.activation(x)
return x
|
StarcoderdataPython
|
1674140
|
<filename>yardstick/benchmark/scenarios/networking/vnf_generic.py
# Copyright (c) 2016-2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" NSPerf specific scenario definition """
from __future__ import absolute_import
import logging
import yaml
from yardstick.benchmark.scenarios import base
from yardstick.common.utils import import_modules_from_package, itersubclasses
from yardstick.network_services.collector.subscriber import Collector
from yardstick.network_services.vnf_generic import vnfdgen
from yardstick.network_services.vnf_generic.vnf.base import GenericVNF
from yardstick.network_services.traffic_profile.base import TrafficProfile
from yardstick import ssh
LOG = logging.getLogger(__name__)
class SSHError(Exception):
"""Class handles ssh connection error exception"""
pass
class SSHTimeout(SSHError):
"""Class handles ssh connection timeout exception"""
pass
class IncorrectConfig(Exception):
"""Class handles incorrect configuration during setup"""
pass
class IncorrectSetup(Exception):
"""Class handles incorrect setup during setup"""
pass
class SshManager(object):
def __init__(self, node):
super(SshManager, self).__init__()
self.node = node
self.conn = None
def __enter__(self):
"""
args -> network device mappings
returns -> ssh connection ready to be used
"""
try:
self.conn = ssh.SSH.from_node(self.node)
self.conn.wait()
except SSHError as error:
LOG.info("connect failed to %s, due to %s", self.node["ip"], error)
# self.conn defaults to None
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conn:
self.conn.close()
class NetworkServiceTestCase(base.Scenario):
"""Class handles Generic framework to do pre-deployment VNF &
Network service testing """
__scenario_type__ = "NSPerf"
def __init__(self, scenario_cfg, context_cfg): # Yardstick API
super(NetworkServiceTestCase, self).__init__()
self.scenario_cfg = scenario_cfg
self.context_cfg = context_cfg
# fixme: create schema to validate all fields have been provided
with open(scenario_cfg["topology"]) as stream:
self.topology = yaml.load(stream)["nsd:nsd-catalog"]["nsd"][0]
self.vnfs = []
self.collector = None
self.traffic_profile = None
@classmethod
def _get_traffic_flow(cls, scenario_cfg):
try:
with open(scenario_cfg["traffic_options"]["flow"]) as fflow:
flow = yaml.load(fflow)
except (KeyError, IOError, OSError):
flow = {}
return flow
@classmethod
def _get_traffic_imix(cls, scenario_cfg):
try:
with open(scenario_cfg["traffic_options"]["imix"]) as fimix:
imix = yaml.load(fimix)
except (KeyError, IOError, OSError):
imix = {}
return imix
@classmethod
def _get_traffic_profile(cls, scenario_cfg, context_cfg):
traffic_profile_tpl = ""
private = {}
public = {}
try:
with open(scenario_cfg["traffic_profile"]) as infile:
traffic_profile_tpl = infile.read()
except (KeyError, IOError, OSError):
raise
return [traffic_profile_tpl, private, public]
def _fill_traffic_profile(self, scenario_cfg, context_cfg):
traffic_profile = {}
flow = self._get_traffic_flow(scenario_cfg)
imix = self._get_traffic_imix(scenario_cfg)
traffic_mapping, private, public = \
self._get_traffic_profile(scenario_cfg, context_cfg)
traffic_profile = vnfdgen.generate_vnfd(traffic_mapping,
{"imix": imix, "flow": flow,
"private": private,
"public": public})
return TrafficProfile.get(traffic_profile)
@classmethod
def _find_vnf_name_from_id(cls, topology, vnf_id):
return next((vnfd["vnfd-id-ref"]
for vnfd in topology["constituent-vnfd"]
if vnf_id == vnfd["member-vnf-index"]), None)
def _resolve_topology(self, context_cfg, topology):
for vld in topology["vld"]:
if len(vld["vnfd-connection-point-ref"]) > 2:
raise IncorrectConfig("Topology file corrupted, "
"too many endpoint for connection")
node_0, node_1 = vld["vnfd-connection-point-ref"]
node0 = self._find_vnf_name_from_id(topology,
node_0["member-vnf-index-ref"])
node1 = self._find_vnf_name_from_id(topology,
node_1["member-vnf-index-ref"])
if0 = node_0["vnfd-connection-point-ref"]
if1 = node_1["vnfd-connection-point-ref"]
try:
nodes = context_cfg["nodes"]
nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]
nodes[node0]["interfaces"][if0]["dst_mac"] = \
nodes[node1]["interfaces"][if1]["local_mac"]
nodes[node0]["interfaces"][if0]["dst_ip"] = \
nodes[node1]["interfaces"][if1]["local_ip"]
nodes[node1]["interfaces"][if1]["dst_mac"] = \
nodes[node0]["interfaces"][if0]["local_mac"]
nodes[node1]["interfaces"][if1]["dst_ip"] = \
nodes[node0]["interfaces"][if0]["local_ip"]
except KeyError:
raise IncorrectConfig("Required interface not found,"
"topology file corrupted")
@classmethod
def _find_list_index_from_vnf_idx(cls, topology, vnf_idx):
return next((topology["constituent-vnfd"].index(vnfd)
for vnfd in topology["constituent-vnfd"]
if vnf_idx == vnfd["member-vnf-index"]), None)
def _update_context_with_topology(self, context_cfg, topology):
for idx in topology["constituent-vnfd"]:
vnf_idx = idx["member-vnf-index"]
nodes = context_cfg["nodes"]
node = self._find_vnf_name_from_id(topology, vnf_idx)
list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
nodes[node].update(topology["constituent-vnfd"][list_idx])
def map_topology_to_infrastructure(self, context_cfg, topology):
""" This method should verify if the available resources defined in pod.yaml
match the topology.yaml file.
:param topology:
:return: None. Side effect: context_cfg is updated
"""
for node, node_dict in context_cfg["nodes"].items():
cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
with SshManager(node_dict) as conn:
exit_status = conn.execute(cmd)[0]
if exit_status != 0:
raise IncorrectSetup("Node's %s lacks ip tool." % node)
for interface in node_dict["interfaces"]:
network = node_dict["interfaces"][interface]
keys = ["vpci", "local_ip", "netmask",
"local_mac", "driver", "dpdk_port_num"]
missing = set(keys).difference(network)
if missing:
raise IncorrectConfig("Require interface fields '%s' "
"not found, topology file "
"corrupted" % ', '.join(missing))
# 3. Use topology file to find connections & resolve dest address
self._resolve_topology(context_cfg, topology)
self._update_context_with_topology(context_cfg, topology)
@classmethod
def get_vnf_impl(cls, vnf_model):
""" Find the implementing class from vnf_model["vnf"]["name"] field
:param vnf_model: dictionary containing a parsed vnfd
:return: subclass of GenericVNF
"""
import_modules_from_package(
"yardstick.network_services.vnf_generic.vnf")
expected_name = vnf_model['id']
impl = (c for c in itersubclasses(GenericVNF)
if c.__name__ == expected_name)
try:
return next(impl)
except StopIteration:
raise IncorrectConfig("No implementation for %s", expected_name)
def load_vnf_models(self, context_cfg):
""" Create VNF objects based on YAML descriptors
:param context_cfg:
:return:
"""
vnfs = []
for node in context_cfg["nodes"]:
LOG.debug(context_cfg["nodes"][node])
with open(context_cfg["nodes"][node]["VNF model"]) as stream:
vnf_model = stream.read()
vnfd = vnfdgen.generate_vnfd(vnf_model, context_cfg["nodes"][node])
vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
vnf_instance.name = node
vnfs.append(vnf_instance)
return vnfs
def setup(self):
""" Setup infrastructure, provission VNFs & start traffic
:return:
"""
# 1. Verify if infrastructure mapping can meet topology
self.map_topology_to_infrastructure(self.context_cfg, self.topology)
# 1a. Load VNF models
self.vnfs = self.load_vnf_models(self.context_cfg)
# 1b. Fill traffic profile with information from topology
self.traffic_profile = self._fill_traffic_profile(self.scenario_cfg,
self.context_cfg)
# 2. Provision VNFs
try:
for vnf in self.vnfs:
LOG.info("Instantiating %s", vnf.name)
vnf.instantiate(self.scenario_cfg, self.context_cfg)
except RuntimeError:
for vnf in self.vnfs:
vnf.terminate()
raise
# 3. Run experiment
# Start listeners first to avoid losing packets
traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
for traffic_gen in traffic_runners:
traffic_gen.listen_traffic(self.traffic_profile)
# register collector with yardstick for KPI collection.
self.collector = Collector(self.vnfs, self.traffic_profile)
self.collector.start()
# Start the actual traffic
for traffic_gen in traffic_runners:
LOG.info("Starting traffic on %s", traffic_gen.name)
traffic_gen.run_traffic(self.traffic_profile)
def run(self, result): # yardstick API
""" Yardstick calls run() at intervals defined in the yaml and
produces timestamped samples
:param result: dictionary with results to update
:return: None
"""
for vnf in self.vnfs:
# Result example:
# {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
LOG.debug("vnf")
result.update(self.collector.get_kpi(vnf))
def teardown(self):
""" Stop the collector and terminate VNF & TG instance
:return
"""
self.collector.stop()
for vnf in self.vnfs:
LOG.info("Stopping %s", vnf.name)
vnf.terminate()
|
StarcoderdataPython
|
1753031
|
<filename>mi/instrument/seabird/sbe26plus/ooicore/driver.py
from mi.instrument.seabird.sbe26plus.driver import SBE26PlusInstrumentDriver
class InstrumentDriver(SBE26PlusInstrumentDriver):
"""
Specialization for this version of the 26 driver
"""
|
StarcoderdataPython
|
1692865
|
<reponame>tanglef/geomloss
"""
Optimization routines
============================================
"""
import os
import torch
import matplotlib
import matplotlib.pyplot as plt
from math import isnan
import numpy as np
from scipy.optimize import minimize
import warnings
warnings.filterwarnings("ignore",".*GUI is implemented.*") # annoying warning with pyplot and pause...
def mypause(interval):
"""Pause matplotlib without stealing focus."""
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
def model_to_numpy(model, grad=False) :
"""
The fortran routines used by scipy.optimize expect float64 vectors
instead of the gpu-friendly float32 matrices: we need conversion routines.
"""
if not all( param.is_contiguous() for param in model.parameters() ) :
raise ValueError("Scipy optimization routines are only compatible with parameters given as *contiguous* tensors.")
if grad :
tensors = [param.grad.data.view(-1).cpu().numpy() for param in model.parameters()]
else :
tensors = [param.data.view(-1).cpu().numpy() for param in model.parameters()]
return np.ascontiguousarray( np.hstack(tensors) , dtype='float64' )
def numpy_to_model(model, vec) :
i = 0
for param in model.parameters() :
offset = param.numel()
param.data = torch.from_numpy(vec[i:i+offset]).view(param.data.size()).type(param.data.type())
i += offset
if i != len(vec) :
raise ValueError("The total number of variables in model is not the same as in 'vec'.")
def fit_model(Model, method = "L-BFGS", tol = 1e-10, nits = 500, nlogs = 10,
lr = .1, eps = .01, maxcor = 10, gtol = 1e-10,
display = False, **params) :
"""
"""
# Load parameters =====================================================================================================
# We'll minimize the model's cost
# with respect to the model's parameters using a standard gradient-like
# descent scheme. As we do not perform any kind of line search,
# this algorithm may diverge if the learning rate is too large !
# For robust optimization routines, you may consider using
# the scipy.optimize API with a "parameters <-> float64 vector" wrapper.
use_scipy = False
if method == "Adam" :
optimizer = torch.optim.Adam(Model.parameters(), lr=lr, eps=eps)
elif method == "L-BFGS" :
optimizer = torch.optim.SGD(Model.parameters(), lr=1.) # We'll just use its "zero_grad" method...
use_scipy = True
method = 'L-BFGS-B'
options = dict( maxiter = nits,
ftol = tol, # Don't bother fitting the shapes to float precision
gtol = gtol,
maxcor = maxcor # Number of previous gradients used to approximate the Hessian
)
else :
raise NotImplementedError('Optimization method not supported : "'+method+'". '\
'Available values are "Adam" and "L-BFGS".')
costs = []
# Define the "closures" associated to our model =======================================================================
fit_model.nit = -1 ; fit_model.breakloop = False
def closure(final_it=False):
"""
Encapsulates a problem + display iteration into a single callable statement.
This wrapper is needed if you choose to use LBFGS-like algorithms, which
(should) implement a careful line search along the gradient's direction.
"""
fit_model.nit += 1 ; it = fit_model.nit
# Minimization loop --------------------------------------------------------------------
optimizer.zero_grad() # Reset the gradients (PyTorch syntax...).
cost = Model.forward()
costs.append(cost.item()) # Store the "cost" for plotting.
cost.backward() # Backpropagate to compute the gradient.
# Break the loop if the cost's variation is below the tolerance param:
if ( len(costs)>1 and abs(costs[-1]-costs[-2]) < tol ) or fit_model.nit == nits-1:
fit_model.breakloop = True
if display:
Model.plot(nit=fit_model.nit, cost=cost.item())
# print("{}: {:2.4f}".format(fit_model.nit, cost.item()))
return cost
# Scipy-friendly wrapper ------------------------------------------------------------------------------------------------
def numpy_closure(vec, final_it=False) :
"""
Wraps the PyTorch closure into a 'float64'-vector routine,
as expected by scipy.optimize.
"""
vec = lr * vec.astype('float64') # scale the vector, and make sure it's given as float64
numpy_to_model(Model, vec) # load this info into Model's parameters
c = closure(final_it).item() # compute the cost and accumulate the gradients wrt. the parameters
dvec_c = lr * model_to_numpy(Model, grad=True) # -> return this gradient, as a properly rescaled numpy vector
return (c, dvec_c)
# Actual minimization loop ===============================================================================================
if use_scipy :
res = minimize( numpy_closure, # function to minimize
model_to_numpy(Model), # starting estimate
method = method,
jac = True, # matching_problems also returns the gradient
options = options )
numpy_closure(res.x, final_it=True)
# print(res.message)
else :
for i in range(nits+1) : # Fixed number of iterations
optimizer.step(closure) # "Gradient descent" step.
if fit_model.breakloop :
closure(final_it=True)
break
|
StarcoderdataPython
|
4809857
|
<gh_stars>10-100
from django.core.management.base import BaseCommand
from public_project.models import SearchTag
from public_project.tag_cache_creator import rebuild_cache_for_tag
class Command(BaseCommand):
args = ''
help = 'Rebuilding of all search tag cache entries for the project'
def handle(self, *args, **options):
tags = SearchTag.objects.all()
for tag in tags:
print tag
rebuild_cache_for_tag(tag)
|
StarcoderdataPython
|
84300
|
<filename>nbs/dl2/selfmade/exp/nb_05.py
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/05_anneal.ipynb
from exp.nb_04 import *
def create_learner(model_func, loss_func, data):
return Learner(*model_func(data), loss_func, data)
class Recorder(Callback):
def begin_fit(self): self.lrs, self.losses = [], []
def after_batch(self):
if not self.in_train: return
self.lrs.append(self.opt.param_groups[-1]['lr'])
self.losses.append(self.loss.detach().cpu())
def plot_lr(self): plt.plot(self.lrs)
def plot_loss(self): plt.plot(self.losses)
class ParamScheduler(Callback):
_order = 1
def __init__(self, pname, sched_func): self.pname, self.sched_func = pname, sched_func
def set_param(self):
for pg in self.opt.param_groups:
pg[self.pname] = self.sched_func(self.n_epochs / self.epochs)
def begin_batch(self):
if self.in_train: self.set_param()
def sched_lin(start, end):
def _inner(start, end, pos): return start + pos * (end-start)
return partial(_inner, start, end)
def annealer(f):
def _inner(start, end): return partial(f, start, end)
return _inner
@annealer
def sched_lin(start, end, pos): return start + pos * (end - start)
@annealer
def sched_cos(start, end, pos): return start + (1 + math.cos(math.pi*(1-pos))) * (end - start) / 2
@annealer
def sched_no(start, end, pos): return start
@annealer
def sched_exp(start, end, pos): return start * (end/start) ** pos
def cos_1cycle_anneal(start, high, end):
return [sched_cos(start, high), sched_cos(high, end)]
# This monkey-patch is there to be able to plot tensors
torch.Tensor.ndim = property(lambda x: len(x.shape))
def combine_scheds(pcts, scheds): # combine_scheds([0.3, 0.7], [sched_cos(0.3, 0.6), sched_cos(0.6, 0.2)])
assert sum(pcts) == 1.
pcts = tensor([0] + listify(pcts))
assert torch.all(pcts >= 0)
pcts = torch.cumsum(pcts, 0)
# print(pcts) # pcts should be [0, 0.3, 1] now
def _inner(pos): # with pos = 0.5
idx = (pos >= pcts).nonzero().max()
# print(idx) # should be 1 here
if idx == 2: idx = 1 # why is that line needed?
actual_pos = (pos - pcts[idx]) / (pcts[idx+1] - pcts[idx]) # should be (0.5 - 0.3) / (1 - 0.3) = 0.2 / 0.7
# print(actual_pos)
return scheds[idx](actual_pos)
return _inner
|
StarcoderdataPython
|
158087
|
from typing import Union
import numpy as np
def bspline_basis_manual(
knot_vector_t: Union[list, tuple],
knot_i: int = 0,
p: int = 0,
nti: int = 1,
verbose: bool = False,
):
"""Computes the B-spline polynomial basis,
currently limited to degree constant, linear, or quadratic.
Args:
knot_vector_t (float array): [t0, t1, t2, ... tI]
len(knot_vector_t) = (I + 1)
(I + 1) knots with (I) knot spans
must have length of two or more
must be a non-decreasing sequence
knot_i (int): index in the list of
possible knot_index values = [0, 1, 2, ... I]
p (int): polynomial degree
(p=0: constant, p=1: linear, p=2: quadratic, p=3: cubic, etc.),
currently limited to p = [0, 1, 2].
nti (int): number of time intervals for t in per-knot-span
interval [t_i, t_{i+1}],
nti = 1 is default
verbose (bool): prints polynomial or error checking
Returns:
tuple: arrays of (t, f(t)) as time t and polynomial evaluated at t; or,
AssertionError: if input is out of range
"""
num_knots = len(knot_vector_t)
MAX_DEGREE = 2
try:
assert (
len(knot_vector_t) >= 2
), "Error: knot vector length must be two or larger."
assert knot_i >= 0, "Error: knot index knot_i must be non-negative."
assert p >= 0, "Error: polynomial degree p must be non-negative."
assert (
p <= MAX_DEGREE
), f"Error: polynomial degree p exceeds maximum of {MAX_DEGREE}"
assert nti >= 1, "Error: number of time intervals nti must be 1 or greater."
assert knot_i <= (
num_knots - 1
), "Error: knot index knot_i exceeds knot vector length minus 1."
num_knots_i_to_end = len(knot_vector_t[knot_i:])
assert (
num_knots_i_to_end >= p + 1
), "Error: insufficient remaining knots for local support."
except AssertionError as error:
if verbose:
print(error)
return error
knots_lhs = knot_vector_t[0:-1] # left-hand-side knot values
knots_rhs = knot_vector_t[1:] # right-hand-side knot values
knot_spans = np.array(knots_rhs) - np.array(knots_lhs)
dt = knot_spans / nti
# assert all([dti >= 0 for dti in dt]), "Error: knot vector is decreasing."
if not all([dti >= 0 for dti in dt]):
raise ValueError("Error: knot vector is decreasing.")
# improve index notation
# t = [knots_lhs[i] + k * dt[i] for i in np.arange(num_knots-1) for k in np.arange(nti)]
t = [
knots_lhs[k] + j * dt[k]
for k in np.arange(num_knots - 1)
for j in np.arange(nti)
]
t.append(knot_vector_t[-1])
t = np.array(t)
# y = np.zeros((num_knots - 1) * nti + 1)
# y = np.zeros(len(t))
f_of_t = np.zeros(len(t))
if verbose:
print(f"Knot vector: {knot_vector_t}")
print(f"Number of knots = {num_knots}")
print(f"Knot index: {knot_i}")
print(f"Left-hand-side knot vector values: {knots_lhs}")
print(f"Right-hand-side knot vector values: {knots_rhs}")
print(f"Knot spans: {knot_spans}")
print(f"Number of time intervals per knot span: {nti}")
print(f"Knot span deltas: {dt}")
if p == 0:
f_of_t[knot_i * nti : knot_i * nti + nti] = 1.0
if verbose:
print(f"t = {t}")
print(f"f(t) = {f_of_t}")
if p == 1:
for (eix, te) in enumerate(t): # e for evaluations, ix for index
if te >= knot_vector_t[knot_i] and te < knot_vector_t[knot_i + 1]:
f_of_t[eix] = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 1] - knot_vector_t[knot_i]
)
elif te >= knot_vector_t[knot_i + 1] and te < knot_vector_t[knot_i + 2]:
f_of_t[eix] = (knot_vector_t[knot_i + 2] - te) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
if p == 2:
for (eix, te) in enumerate(t): # e for evaluations, ix for index
if te >= knot_vector_t[knot_i] and te < knot_vector_t[knot_i + 1]:
a_1 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i]
)
a_2 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 1] - knot_vector_t[knot_i]
)
f_of_t[eix] = a_1 * a_2
elif te >= knot_vector_t[knot_i + 1] and te < knot_vector_t[knot_i + 2]:
b_1 = (te - knot_vector_t[knot_i]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i]
)
b_2 = (knot_vector_t[knot_i + 2] - te) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
b_3 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 1]
)
b_4 = (te - knot_vector_t[knot_i + 1]) / (
knot_vector_t[knot_i + 2] - knot_vector_t[knot_i + 1]
)
f_of_t[eix] = (b_1 * b_2) + (b_3 * b_4)
elif te >= knot_vector_t[knot_i + 2] and te < knot_vector_t[knot_i + 3]:
c_1 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 1]
)
c_2 = (knot_vector_t[knot_i + 3] - te) / (
knot_vector_t[knot_i + 3] - knot_vector_t[knot_i + 2]
)
f_of_t[eix] = c_1 * c_2
return t, f_of_t
|
StarcoderdataPython
|
178446
|
<reponame>DavidNemeskey/pytorch_lm<gh_stars>0
#!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
"""Implements a very basic version of LSTM."""
import logging
import torch
import torch.nn as nn
from torch.autograd import Variable
from pytorch_lm.dropout import create_hidden_dropout
from pytorch_lm.utils.lang import public_dict
class InitHidden(object):
"""
Provides init_hidden to subclasses. Requires that self.hidden size is
available.
"""
def init_hidden(self, batch_size):
"""Returns the Variables for the hidden states."""
weight = next(self.parameters()).data
dims = (1, batch_size, self.hidden_size)
ret = (Variable(weight.new_full(dims, 0)),
Variable(weight.new_full(dims, 0)))
return ret
class LstmLayer(nn.Module, InitHidden):
"""
A reimplementation of the LSTM cell. (Actually, a layer of LSTM cells.)
As a reminder: input size is batch_size x seq_len x input_features.
"""
def __init__(self, input_size, hidden_size, forget_bias=1):
"""
Args:
- input_size: the number of input features
- hidden_size: the number of cells
- forget_bias: the value of the forget bias [1]
"""
super(LstmLayer, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.w_i = nn.Parameter(torch.Tensor(input_size, 4 * hidden_size))
self.w_h = nn.Parameter(torch.Tensor(hidden_size, 4 * hidden_size))
self.b = nn.Parameter(torch.Tensor(4 * hidden_size))
# f_bias is deleted by the pre-format hook so that it runs only once
# The forget gate is initialized before the first forward call,
# because the weights are initialized after __init__()
self.f_bias = self.forget_bias = forget_bias
self.register_forward_pre_hook(LstmLayer.initialize_f)
logging.getLogger('pytorch_lm.rnn.lstm').info(
'LSTM layer {} with input size {} and hidden size {}, '
'forget bias {}'.format(self.__class__.__name__, self.input_size,
self.hidden_size, self.forget_bias))
@classmethod
def initialize_f(cls, module, _):
"""Initializes the forget gate."""
if module.f_bias is not None:
_, b_f, _, _ = module.b.data.chunk(4, 0)
b_f.fill_(module.f_bias)
module.f_bias = None
def forward(self, input, hidden):
"""
Runs the layer on the sequence input. Calls forward_one in a loop.
"""
outputs = []
seq_dim = 1 # if self.batch_first else 0
h_t, c_t = (h.squeeze(0) for h in hidden)
# chunk() cuts batch_size x 1 x input_size chunks from input
for input_t in input.chunk(input.size(1), dim=seq_dim):
values = input_t.squeeze(seq_dim) # From input to output
h_t, c_t = self.forward_one(values, (h_t, c_t))
values = h_t
outputs.append(values)
return (torch.stack(outputs, seq_dim),
(h_t.unsqueeze(0), c_t.unsqueeze(0)))
def forward_one(self, input, hidden):
"""Of course, forward must be implemented in subclasses."""
raise NotImplementedError()
def __repr__(self):
return '{}({})'.format(self.__class__.__name__, public_dict(self))
class PytorchLstmLayer(nn.LSTM, InitHidden):
"""
Wraps the PyTorch LSTM object. Only a few parameters are supported; most
have fixed values.
"""
keys = ['input_size', 'hidden_size', 'num_layers', 'bias',
'batch_first', 'dropout', 'bidirectional']
fixed_values = {'num_layers': 1, 'batch_first': True,
'dropout': 0, 'bidirectional': False}
def __init__(self, *args, **kwargs):
kwargs.update(zip(self.keys, args))
for key, value in self.fixed_values.items():
if key in kwargs and kwargs[key] != value:
logging.getLogger('pytorch_lm.rnn.lstm').warn(
'Key {} is overridden in {} to {}'.format(
key, self.__class__.__name__, value))
kwargs[key] = value
try:
super(PytorchLstmLayer, self).__init__(
kwargs.pop('input_size'), kwargs.pop('hidden_size'), **kwargs)
except KeyError as ke:
raise TypeError(
'__init__() missing 1 required positional argument: {}'.format(ke))
class DefaultLstmLayer(LstmLayer):
"""
The default LSTM implementation. No dropout, as that is handled outside.
"""
def forward_one(self, input, hidden):
h_t, c_t = hidden
ifgo = input.matmul(self.w_i) + h_t.matmul(self.w_h)
ifgo += self.b
i, f, g, o = ifgo.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_t = f * c_t + i * g
h_t = o * torch.tanh(c_t)
return h_t, c_t
class DropoutLstmLayer(LstmLayer):
"""An LstmLayer with H->H dropout."""
def __init__(self, input_size, hidden_size, forget_bias=1, hh_dropout=0):
super(DropoutLstmLayer, self).__init__(
input_size, hidden_size, forget_bias)
self.hh_dropout = hh_dropout
self.do = self.create_dropouts()
for i, do in enumerate(self.do):
self.add_module('do{}'.format(i), do)
def create_dropouts(self):
"""
Creates the ``list`` of :class:`Dropout` objects used by the cell.
This is one method to be implemented; this default implementation
returns a single :class:`Dropout` object created with
:func:`create_dropout`.
"""
return [create_hidden_dropout(self.hh_dropout)]
def forward(self, inputs, hidden):
"""
Runs the layer on the sequence inputs. Initializes the :class:`Dropout`
objects and calls forward_one in a loop.
"""
for do in self.do:
do.reset_noise()
return super(DropoutLstmLayer, self).forward(inputs, hidden)
class MoonLstmLayer(DropoutLstmLayer):
"""
Following Moon et al. (2015), dropout (with a per-sequence mask) is applied
on c_t. Note: this sucks.
"""
def forward_one(self, input, hidden):
h_t, c_t = hidden
ifgo = input.matmul(self.w_i) + h_t.matmul(self.w_h)
ifgo += self.b
i, f, g, o = ifgo.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_t = self.do[0](f * c_t + i * g)
h_t = o * torch.tanh(c_t)
return h_t, c_t
class TiedGalLstmLayer(LstmLayer):
"""
Following Gal and Ghahramani (2016), per-sequence dropout is applied on
both the input and h_t. Also known as VD-LSTM. With tied gates.
"""
def forward_one(self, input, hidden):
h_t, c_t = hidden
ifgo = input.matmul(self.w_i) + self.do[0](h_t).matmul(self.w_h)
ifgo += self.b
i, f, g, o = ifgo.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_t = f * c_t + i * g
h_t = o * torch.tanh(c_t)
return h_t, c_t
class UntiedGalLstmLayer(LstmLayer):
"""
Following Gal and Ghahramani (2016), per-sequence dropout is applied on
both the input and h_t. Also known as VD-LSTM. With untied weights.
"""
def create_dropouts(self):
return [create_hidden_dropout(self.hh_dropout) for _ in range(4)]
def forward_one(self, input, hidden):
h_t, c_t = hidden
w_ii, w_if, w_ig, w_io = self.w_i.chunk(4, 1)
w_hi, w_hf, w_hg, w_ho = self.w_h.chunk(4, 1)
b_i, b_f, b_g, b_o = self.b.chunk(4, 0)
i = torch.sigmoid(input.matmul(w_ii) +
self.do[0](h_t).matmul(w_hi) + b_i)
f = torch.sigmoid(input.matmul(w_if) +
self.do[1](h_t).matmul(w_hf) + b_f)
g = torch.tanh(input.matmul(w_ig) +
self.do[2](h_t).matmul(w_hg) + b_g)
o = torch.sigmoid(input.matmul(w_io) +
self.do[3](h_t).matmul(w_ho) + b_o)
c_t = f * c_t + i * g
h_t = o * torch.tanh(c_t)
return h_t, c_t
class SemeniutaLstmLayer(LstmLayer):
"""Following Semeniuta et al. (2016), dropout is applied on g_t."""
def forward_one(self, input, hidden):
h_t, c_t = hidden
ifgo = input.matmul(self.w_i) + h_t.matmul(self.w_h)
ifgo += self.b
i, f, g, o = ifgo.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_t = f * c_t + i * self.do[0](g)
h_t = o * torch.tanh(c_t)
return h_t, c_t
class MerityLstmLayer(LstmLayer):
"""
Following Merity et al. (2018): uses DropConnect instead of Dropout, on
the hidden-to-hidden matrices. The parameter of the DropConnect probability
is still called dropout, unfortunately.
"""
def forward_one(self, input, hidden):
h_t, c_t = hidden
ifgo = input.matmul(self.w_i) + h_t.matmul(self.do[0](self.w_h))
ifgo += self.b
i, f, g, o = ifgo.chunk(4, 1)
i = torch.sigmoid(i)
f = torch.sigmoid(f)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_t = f * c_t + i * g
h_t = o * torch.tanh(c_t)
return h_t, c_t
|
StarcoderdataPython
|
122195
|
# Python modules
# 3rd party modules
# Our modules
import vespa.analysis.block_prep_fidsum as block_prep_fidsum
from vespa.common.constants import Deflate
class BlockPrepEditFidsum(block_prep_fidsum.BlockPrepFidsum):
"""
Building block to hold the state of a step in an MRS processing chain.
Includes the functionality to save/recall this object to/from an XML node.
Contains inputs/results for preprocessing of the raw data from the previous
block ('raw') in the dataset.blocks list. This step modifies coil/average
data into a single summed FID array for one dataset.
"""
# The XML_VERSION enables us to change the XML output format in the future
XML_VERSION = "1.0.0"
def __init__(self, attributes=None):
""" Set up standard functionality of the base class """
super.__init__(attributes)
##### Standard Methods and Properties #####################################
def __str__(self):
lines = super().__str__().split('\n')
lines[0] = "------- {0} Object -------".format(self.__class__.__name__)
return '\n'.join(lines)
def deflate(self, flavor=Deflate.ETREE):
e = super().deflate(self, flavor)
if flavor == Deflate.ETREE:
# Alter the tag name & XML version info
e.tag = "block_prep_edit_fidsum"
e.set("version", self.XML_VERSION)
return e
elif flavor == Deflate.DICTIONARY:
return e
##### Private Methods #####################################
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.