max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
BaseStation/src/rocket_packet/rocket_packet_parser.py | ul-gaul/Avionique_Software | 3 | 12786651 | <gh_stars>1-10
import abc
from src.rocket_packet.rocket_packet import RocketPacket
class RocketPacketParser:
__metaclass__ = abc.ABCMeta
def __init__(self, version: int, packet_format: str, num_bytes: int):
self.version = version
self.format = packet_format
self.num_bytes = num_bytes
def get_number_of_bytes(self):
return self.num_bytes
def get_version(self) -> int:
return self.version
@abc.abstractmethod
def parse(self, data: bytes) -> RocketPacket:
pass
@abc.abstractmethod
def get_field_names(self):
pass
@abc.abstractmethod
def to_list(self, packet: RocketPacket) -> list:
pass
@abc.abstractmethod
def from_list(self, data: list) -> RocketPacket:
pass
| 2.796875 | 3 |
test/read_from_url.py | egemenzeytinci/readmrz | 0 | 12786652 | <filename>test/read_from_url.py
from readmrz import MrzDetector, MrzReader
from unittest import TestCase
class ReadFromUrlTest(TestCase):
BASE = 'https://raw.githubusercontent.com/egemenzeytinci'
VALID_URL = f'{BASE}/readmrz/master/images/example.jpg'
def test_valid(self):
detector = MrzDetector()
reader = MrzReader()
# expected result as a dict
expected = {
'surname': 'STEARNE',
'name': '<NAME>',
'country': 'CAN',
'nationality': 'CAN',
'birth_date': '580702',
'expiry_date': '240904',
'sex': 'M',
'document_type': 'P',
'document_number': 'GA302922',
'optional_data': '',
'birth_date_hash': '0',
'expiry_date_hash': '3',
'document_number_hash': '0',
'final_hash': '2'
}
# read image from given url
image = detector.read_from_url(self.VALID_URL)
# crop machine readable zone
cropped = detector.crop_area(image)
# extract mrz code
result = reader.process(cropped)
self.assertDictEqual(expected, result)
| 3.0625 | 3 |
b2validators/document.py | math-s/b2bit-validators | 0 | 12786653 | <reponame>math-s/b2bit-validators
import re
from b2validators.exceptions import ValidationError
def validate_cnpj(value):
cnpj = re.sub("[^0-9]", "", value)
if len(cnpj) < 14:
raise ValidationError("O CNPJ precisa ter 14 dígitos.")
expected_cnpj = [int(digit) for digit in cnpj[:12] if digit.isdigit()]
cnpj_test = [int(digit) for digit in cnpj if digit.isdigit()]
weights = [5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2]
result = []
for idx, w in enumerate(weights):
x = w*expected_cnpj[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cnpj.append(0)
else:
expected_cnpj.append(11 - remainder)
weights = [6] + weights
result = []
for idx, w in enumerate(weights):
x = w*expected_cnpj[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cnpj.append(0)
else:
expected_cnpj.append(11 - remainder)
if cnpj_test != expected_cnpj:
raise ValidationError("CNPJ inválido")
return value
def validate_cpf(value):
cpf = re.sub("[^0-9]", "", value)
if len(cpf) != 11:
raise ValidationError("O CPF deve ter 11 dígitos.")
expected_cpf = [int(digit) for digit in cpf][:9]
cpf_test = [int(digit) for digit in cpf]
weights = [10, 9, 8, 7, 6, 5, 4, 3, 2]
result = []
for idx, w in enumerate(weights):
x = w*expected_cpf[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cpf.append(0)
else:
expected_cpf.append(11 - remainder)
weights = [11] + weights
result = []
for idx, w in enumerate(weights):
x = w*expected_cpf[idx]
result.append(x)
resul_sum = sum(result)
remainder = resul_sum % 11
if remainder < 2:
expected_cpf.append(0)
else:
expected_cpf.append(11 - remainder)
if cpf_test != expected_cpf:
raise ValidationError("CPF inválido.")
return value
| 2.71875 | 3 |
examples/basic_shapes.py | abey79/lines | 39 | 12786654 | <reponame>abey79/lines<gh_stars>10-100
from lines import Cube, Cylinder, Pyramid, Scene
def main():
# Setup the scene
scene = Scene()
scene.add(Cube(translate=(2, 0, 0)))
scene.add(Pyramid())
scene.add(Cylinder(scale=(0.5, 0.5, 1), translate=(-2, 0, 0)))
scene.look_at((2, 6, 1.5), (0, 0, 0))
scene.perspective(70, 0.1, 10)
# Render and display the scene
scene.render().show(show_hidden=True)
if __name__ == "__main__":
main()
| 2.921875 | 3 |
daemon/wakeserver/network.py | opiopan/wakeserver | 1 | 12786655 | <filename>daemon/wakeserver/network.py
import os
import sys
import time
import json
import socket
import subprocess
import requests
import monitoring
MASTER_SERVICE = '_wakeserver._tcp'
SLAVE_SERVICE = '_wakeserver_slave._tcp'
LISTSERVICE = '/var/www/wakeserver/bin/listservice'
HOSTNAME = socket.gethostname() + '.local'
MASTER_PORT = ':8080'
SLAVE_PORT = ':8081'
HOST_KEY = 'host'
SERVERS_KEY = 'servers'
NAME_KEY = 'name'
ISON_KEY = 'isOn'
HTTPTIMEOUT = 10
isMaster = True
remotes = []
def applyRemote(data):
global remotes
if HOST_KEY in data:
newhost = data[HOST_KEY]
needToAppend = True
for host in remotes:
if newhost == host:
needToAppend = False
break
if needToAppend:
print 'NETWORK: new remote: {}'.format(newhost)
remotes.append(newhost)
else:
return None
if SERVERS_KEY in data:
print 'NETWORK: apply data from: {}'.format(newhost)
for server in data[SERVERS_KEY]:
name = server[NAME_KEY] if NAME_KEY in server else None
status = server[ISON_KEY] if ISON_KEY in server else None
if monitoring.monitor:
monitoring.monitor.setStatus(name, status)
return makeSyncData()
def makeSyncData(server = None):
global isMaster
data = {HOST_KEY: HOSTNAME + (MASTER_PORT if isMaster else SLAVE_PORT)}
servers = [server] if server else monitoring.monitor.servers
if not isMaster:
hosts = []
for server in servers:
sdata = {NAME_KEY: server['name'],
ISON_KEY: server['status'] == 'on'}
hosts.append(sdata)
data[SERVERS_KEY] = hosts
return data
def syncRemote(server = None):
global remotes
global isMaster
body = makeSyncData(server)
for remote in remotes:
try:
url = 'http://' + remote + '/remote'
print 'NETWORK: synchronizing with {0}'.format(remote)
resp = requests.post(url, json = body, timeout = HTTPTIMEOUT)
if resp.status_code == requests.codes.ok and isMaster:
applyRemote(resp.json())
except:
print 'NETWORK: error while accessing to {0}'.format(remote)
def initNetwork(ismaster):
global remotes
global isMaster
isMaster = ismaster
proc = subprocess.Popen([LISTSERVICE,
SLAVE_SERVICE if isMaster else MASTER_SERVICE],
stdout = subprocess.PIPE)
while proc:
line = proc.stdout.readline()
if len(line) == 0:
proc.wait()
break
remotes.append(line[:-1])
print 'NETWORK: detected {0} remotes:'.format(len(remotes))
for name in remotes:
print ' {0}'.format(name)
| 2.515625 | 3 |
36_Valid Sudoku.py | Alvin1994/leetcode-python3- | 0 | 12786656 | class Solution:
def isValidSudoku(self, board: List[List[str]]) -> bool:
if not board or not board[0]:
return
tmp = []
for r in range(len(board)):
for c in range(len(board[0])):
if board[r][c] != ".":
tmp += [(board[r][c], r), (c,board[r][c]), (r//3,c//3,board[r][c])]
return len(tmp)==len(set(tmp)) | 3.5 | 4 |
tests/nfs_module_test.py | web-sys1/NFSyndication | 0 | 12786657 | <filename>tests/nfs_module_test.py
import os, glob
import pytest
import subprocess
import pytest
from NFSyndication import init as NFS_init
from NFSyndication.core import args
# test@
# Change the action associated with your option to action='store'
def test_conf():
#We use these conditions to check the statement
args.outputJSON = "feed-output.json"
subscriptions = [
'http://feedpress.me/512pixels',
'http://www.leancrew.com/all-this/feed/',
'http://ihnatko.com/feed/',
'http://blog.ashleynh.me/feed']
with open(f'feeds.txt', 'w', encoding='utf8') as f:
f.write(",".join(subscriptions).replace(',', '\n'))
return NFS_init()
def test_entrypoint():
#Then initialize code
return test_conf()
| 2.09375 | 2 |
translate2/numberout.py | sdytkht/se2se | 0 | 12786658 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 28 00:02:08 2017
@author: kht
"""
import tensorflow as tf
import translate as tl
import numpy as np
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape = shape)
return tf.Variable(initial)
einputs,dinputs,res_logits,all_attens=tl.self_decode()
einputs_t=[]
dinputs_t=[]
res_logits_t=[]
num_exp=len(res_logits)
for i in range(100):
einputs_t.append(einputs[num_exp-i-1])
dinputs_t.append(dinputs[num_exp-i-1])
res_logits_t.append(res_logits[num_exp-i-1])
batch_size=32
maxlen=13
sess = tf.InteractiveSession()
w_fc2 = weight_variable([128, 20])
b_fc2 = bias_variable([20])
x=tf.placeholder(tf.float32,[None,128])
y_=tf.placeholder(tf.float32,[None,20])
y_conv = tf.nn.softmax(tf.matmul(x, w_fc2) + b_fc2)
# train and evaluate the model
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
res=tf.argmax(y_conv, 1)
resreal=tf.argmax(y_, 1)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
init=tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
saver = tf.train.Saver()
saver.restore(sess, "train/NumAdd.ckpt")
for i in range(len(res_logits_t)):
din=dinputs_t[i]
dlogit=res_logits_t[i]
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(correct_prediction,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
print("**************************************************************************************")
'''
for j in range(batch_size):
batch_x=[]
batch_y=np.zeros([13,20],dtype=np.float32)
for k in range(maxlen):
batch_y[k][din[k][j]]=1
dx=dlogit[k][j]
batch_x.append(dx)
print(sess.run(res,feed_dict={x: batch_x, y_: batch_y}))
print(sess.run(resreal,feed_dict={x: batch_x, y_: batch_y}))
print('-----------------------------------------------------------------------')
| 2.546875 | 3 |
publiapp_api/migrations/0010_auto_20200925_0300.py | KevinPercy/PubliAppAPI | 0 | 12786659 | # Generated by Django 3.0.7 on 2020-09-25 03:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('publiapp_api', '0009_auto_20200922_0303'),
]
operations = [
migrations.CreateModel(
name='Ubigeo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('codigo_ubigeo', models.CharField(max_length=6)),
('departamento', models.CharField(max_length=50)),
('provincia', models.CharField(max_length=50)),
('distrito', models.CharField(max_length=50)),
],
),
migrations.AlterField(
model_name='precio',
name='anuncio',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='precios', to='publiapp_api.Anuncio'),
),
]
| 1.539063 | 2 |
ai_covid_19/transformers/nlp.py | nestauk/ai_covid_19 | 0 | 12786660 | #Various functions and utilities that we use to work with text
import re
import string
from string import punctuation
from string import digits
import pandas as pd
import numpy as np
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from gensim import corpora, models
from nltk.stem import *
nltk.download("stopwords", quiet=True)
nltk.download("punkt", quiet=True)
stop_words = set(
stopwords.words("english") + list(string.punctuation) + ["\\n"] + ["quot"]
)
regex_str = [
r"http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|" r"[!*\(\),](?:%[0-9a-f][0-9a-f]))+",
r"(?:\w+-\w+){2}",
r"(?:\w+-\w+)",
r"(?:\\\+n+)",
r"(?:@[\w_]+)",
r"<[^>]+>",
r"(?:\w+'\w)",
r"(?:[\w_]+)",
r"(?:\S)",
]
# Create the tokenizer which will be case insensitive and will ignore space.
tokens_re = re.compile(r"(" + "|".join(regex_str) + ")", re.VERBOSE | re.IGNORECASE)
stemmer = PorterStemmer()
def tokenize_document(text, remove_stops=False):
"""Preprocess a whole raw document.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
List of preprocessed and tokenized documents
"""
return [
clean_and_tokenize(sentence, remove_stops)
for sentence in nltk.sent_tokenize(text)
]
def clean_and_tokenize(text, remove_stops):
"""Preprocess a raw string/sentence of text.
Args:
text (str): Raw string of text.
remove_stops (bool): Flag to remove english stopwords
Return:
tokens (list, str): Preprocessed tokens.
"""
tokens = tokens_re.findall(text)
_tokens = [t.lower() for t in tokens]
filtered_tokens = [
token.replace("-", "_")
for token in _tokens
if not (remove_stops and len(token) <= 2)
and (not remove_stops or token not in stop_words)
and not any(x in token for x in string.digits)
and any(x in token for x in string.ascii_lowercase)
]
return filtered_tokens
def tfidf_vectors(data, max_features):
"""Transforms text to tfidf vectors.
Args:
data (pandas.Series)
Returns:
(`scipy.sparse`): Sparse TFIDF matrix.
"""
vectorizer = TfidfVectorizer(
stop_words="english", analyzer="word", max_features=max_features
)
return vectorizer.fit_transform(data)
#Characters to drop
drop_characters = re.sub('-','',punctuation)+digits
#Stopwords
from nltk.corpus import stopwords
stop = stopwords.words('English')
#Stem functions
from nltk.stem import *
stemmer = PorterStemmer()
def clean_tokenise(string,drop_characters=drop_characters,stopwords=stop_words):
'''
Takes a string and cleans (makes lowercase and removes stopwords)
'''
#Lowercase
str_low = string.lower()
#Remove symbols and numbers
str_letters = re.sub('[{drop}]'.format(drop=drop_characters),'',str_low)
#Remove stopwords
clean = [x for x in str_letters.split(' ') if (x not in stop) & (x!='')]
return(clean)
class CleanTokenize():
'''
This class takes a list of strings and returns a tokenised, clean list of token lists ready
to be processed with the LdaPipeline
It has a clean method to remove symbols and stopwords
It has a bigram method to detect collocated words
It has a stem method to stem words
'''
def __init__(self,corpus):
'''
Takes a corpus (list where each element is a string)
'''
#Store
self.corpus = corpus
def clean(self,drop=drop_characters,stopwords=stop):
'''
Removes strings and stopwords,
'''
cleaned = [clean_tokenise(doc,drop_characters=drop,stopwords=stop) for doc in self.corpus]
self.tokenised = cleaned
return(self)
def stem(self):
'''
Optional: stems words
'''
#Stems each word in each tokenised sentence
stemmed = [[stemmer.stem(word) for word in sentence] for sentence in self.tokenised]
self.tokenised = stemmed
return(self)
def bigram(self,threshold=10):
'''
Optional Create bigrams.
'''
#Colocation detector trained on the data
phrases = models.Phrases(self.tokenised,threshold=threshold)
bigram = models.phrases.Phraser(phrases)
self.tokenised = bigram[self.tokenised]
return(self)
def salient_words_per_category(token_df,corpus_freqs,thres=100,top_words=50):
'''
Create a list of salient terms in a df (salient terms normalised by corpus frequency).
Args:
tokens (list or series) a list where every element is a tokenised abstract
corpus_freqs (df) are the frequencies of terms in the whole corpus
thres (int) is the number of occurrences of a term in the subcorpus
top_words (int) is the number of salient words to output
'''
subcorpus_freqs = flatten_freq(token_df,freq=True)
merged= pd.concat([pd.DataFrame(subcorpus_freqs),corpus_freqs],axis=1,sort=True)
merged['salience'] = (merged.iloc[:,0]/merged.iloc[:,1])
results = merged.loc[merged.iloc[:,0]>thres].sort_values('salience',ascending=False).iloc[:top_words]
results.columns = ['sub_corpus','corpus','salience']
return results
def get_term_salience(df,sel_var,sel_term,corpus_freqs,thres=100,top_words=50):
'''
Returns a list of salient terms per SDG
Args:
df (df) is a df of interest
sel_var (str) is the variable we use to select
sel_term (str) is the term we use to select
corpus_freqs (df) is a df with corpus frequencies
thres (int) is the min number of word occurrences
top_words (int) is the number of words to report
'''
rel_corp = df.loc[df[sel_var]==sel_term].drop_duplicates('project_id')['tokenised_abstract']
salient_rel = salient_words_per_category(list(rel_corp),corpus_freqs,thres,top_words)
salient_rel.rename(columns={'sub_corpus':f'{str(sel_term)}_freq','corpus':'all_freq',
'salience':f'{str(sel_term)}_salience'},inplace=True)
return(salient_rel)
class LdaPipeline():
'''
This class processes lists of keywords.
How does it work?
-It is initialised with a list where every element is a collection of keywords
-It has a method to filter keywords removing those that appear less than a set number of times
-It has a method to process the filtered df into an object that gensim can work with
-It has a method to train the LDA model with the right parameters
-It has a method to predict the topics in a corpus
'''
def __init__(self,corpus):
'''
Takes the list of terms
'''
#Store the corpus
self.tokenised = corpus
def filter(self,minimum=5):
'''
Removes keywords that appear less than 5 times.
'''
#Load
tokenised = self.tokenised
#Count tokens
token_counts = pd.Series([x for el in tokenised for x in el]).value_counts()
#Tokens to keep
keep = token_counts.index[token_counts>minimum]
#Filter
tokenised_filtered = [[x for x in el if x in keep] for el in tokenised]
#Store
self.tokenised = tokenised_filtered
self.empty_groups = np.sum([len(x)==0 for x in tokenised_filtered])
return(self)
def clean(self):
'''
Remove symbols and numbers
'''
def process(self):
'''
This creates the bag of words we use in the gensim analysis
'''
#Load the list of keywords
tokenised = self.tokenised
#Create the dictionary
dictionary = corpora.Dictionary(tokenised)
#Create the Bag of words. This converts keywords into ids
corpus = [dictionary.doc2bow(x) for x in tokenised]
self.corpus = corpus
self.dictionary = dictionary
return(self)
def tfidf(self):
'''
This is optional: We extract the term-frequency inverse document frequency of the words in
the corpus. The idea is to identify those keywords that are more salient in a document by normalising over
their frequency in the whole corpus
'''
#Load the corpus
corpus = self.corpus
#Fit a TFIDF model on the data
tfidf = models.TfidfModel(corpus)
#Transform the corpus and save it
self.corpus = tfidf[corpus]
return(self)
def fit_lda(self,num_topics=20,passes=5,iterations=75,random_state=1803):
'''
This fits the LDA model taking a set of keyword arguments.
#Number of passes, iterations and random state for reproducibility. We will have to consider
reproducibility eventually.
'''
#Load the corpus
corpus = self.corpus
#Train the LDA model with the parameters we supplied
lda = models.LdaModel(corpus,id2word=self.dictionary,
num_topics=num_topics,passes=passes,iterations=iterations,random_state=random_state)
#Save the outputs
self.lda_model = lda
self.lda_topics = lda.show_topics(num_topics=num_topics)
return(self)
def predict_topics(self):
'''
This predicts the topic mix for every observation in the corpus
'''
#Load the attributes we will be working with
lda = self.lda_model
corpus = self.corpus
#Now we create a df
predicted = lda[corpus]
#Convert this into a dataframe
predicted_df = pd.concat([pd.DataFrame({x[0]:x[1] for x in topics},
index=[num]) for num,topics in enumerate(predicted)]).fillna(0)
self.predicted_df = predicted_df
return(self)
| 3.4375 | 3 |
EnlightenGAN/data/unaligned_dataset.py | chenwydj/dynamic_light_unfolding | 0 | 12786661 | import torch
from torch import nn
import os.path
import torchvision.transforms as transforms
from EnlightenGAN.data.base_dataset import BaseDataset, get_transform
from EnlightenGAN.data.image_folder import make_dataset
import random
from PIL import Image
import PIL
from pdb import set_trace as st
import numpy as np
from skimage import color, feature
from skimage.filters import gaussian
def pad_tensor(input):
height_org, width_org = input.shape[2], input.shape[3]
divide = 16
if width_org % divide != 0 or height_org % divide != 0:
width_res = width_org % divide
height_res = height_org % divide
if width_res != 0:
width_div = divide - width_res
pad_left = int(width_div / 2)
pad_right = int(width_div - pad_left)
else:
pad_left = 0
pad_right = 0
if height_res != 0:
height_div = divide - height_res
pad_top = int(height_div / 2)
pad_bottom = int(height_div - pad_top)
else:
pad_top = 0
pad_bottom = 0
padding = nn.ReflectionPad2d((pad_left, pad_right, pad_top, pad_bottom))
input = padding(input).data
else:
pad_left = 0
pad_right = 0
pad_top = 0
pad_bottom = 0
height, width = input.shape[2], input.shape[3]
assert width % divide == 0, 'width cant divided by stride'
assert height % divide == 0, 'height cant divided by stride'
return input, pad_left, pad_right, pad_top, pad_bottom
def pad_tensor_back(input, pad_left, pad_right, pad_top, pad_bottom):
height, width = input.shape[2], input.shape[3]
return input[:,:, pad_top: height - pad_bottom, pad_left: width - pad_right]
class UnalignedDataset(BaseDataset):
def _reinit_A_paths(self):
self.A_paths = self.pos_names# + np.random.choice(self.neg_names_all, int(948/(10/1)), replace=False).tolist()
random.shuffle(self.A_paths)
self.B_paths = list(self.A_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
def initialize(self, opt):
self.opt = opt
self.root = opt.dataroot
##############################
# self.dir_A = os.path.join(opt.dataroot)#, opt.phase + 'A')
# self.dir_B = os.path.join(opt.dataroot)#, opt.phase + 'B')
if not 'images' in self.opt.name:
self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/0_100/", opt.phase)
self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/100_255/", opt.phase)
# self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/0_75/", opt.phase)
# self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/seg_luminance/100_105/", opt.phase)
else:
self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/0_100/", opt.phase)
self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/100_255/", opt.phase)
# self.dir_A = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/0_75/", opt.phase)
# self.dir_B = os.path.join("/ssd1/chenwy/bdd100k/images_luminance/100k/100_105/", opt.phase)
##############################
self.A_paths = make_dataset(self.dir_A)
self.B_paths = make_dataset(self.dir_B)
self.A_paths = sorted(self.A_paths)
self.B_paths = sorted(self.B_paths)
self.A_size = len(self.A_paths)
self.B_size = len(self.B_paths)
self.transform = get_transform(opt)
##### load image2reward to resample dataset ############################
# image2reward = np.load("/home/chenwy/DynamicLightEnlighten/image2reward.npy").item()
# self.pos = []; self.pos_names = []; self.neg_names_all = []
# for k, v in image2reward.items():
# if v > 0:
# self.pos.append(v)
# self.pos_names.append(k)
# elif v < 0:
# self.neg_names_all.append(k)
# self.pos_names = [k for v,k in sorted(zip(self.pos, self.pos_names), reverse=True)]
# self._reinit_A_paths()
#################################
self.low_range = range(55, 70)
self.high_range = range(110, 125)
self.N_TRY = 20
def __getitem__(self, index_A):
A_path = self.A_paths[index_A % self.A_size]
index_B = random.randint(0, self.B_size - 1)
B_path = self.B_paths[index_B % self.B_size]
A_image = Image.open(A_path).convert('RGB')
B_image = Image.open(B_path).convert('RGB')
# A_size = A_img.size
# B_size = B_img.size
# A_size = A_size = (A_size[0]//16*16, A_size[1]//16*16)
# B_size = B_size = (B_size[0]//16*16, B_size[1]//16*16)
# A_img = A_img.resize(A_size, Image.BICUBIC)
# B_img = B_img.resize(B_size, Image.BICUBIC)
# A_gray = A_img.convert('LA')
# A_gray = 255.0-A_gray
w, h = A_image.size
# without luminance selection #####################
# x1 = random.randint(0, w - self.opt.fineSize)
# y1 = random.randint(0, h - self.opt.fineSize)
# A_img = A_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
# B_img = B_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
# A_npy = np.array(A_img)
# B_npy = np.array(B_img)
# r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
# value_A = (0.299*r+0.587*g+0.114*b) / 255.
# value_A = np.sort(value_A.flatten())
# length = value_A.shape[0]
# value_A = value_A[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
# if not 'images' in self.opt.name:
# # mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", "train", os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
# mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", self.opt.phase, os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
# mask = np.array(mask.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))).astype('int32') # cropped mask for light_enhance_AB/seg
# mask = self._mask_transform(mask)
# else:
# mask = torch.zeros(1)
###################################################
# patch luminance & mask class diversity selection ###########################
n_try = 0
while n_try < self.N_TRY:
x1 = random.randint(0, w - self.opt.fineSize)
y1 = random.randint(0, h - self.opt.fineSize)
A_img = A_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
B_img = B_image.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))
A_npy = np.array(A_img)
B_npy = np.array(B_img)
r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
value_A = (0.299*r+0.587*g+0.114*b) / 255.
value_A = np.sort(value_A.flatten())
length = value_A.shape[0]
value_A = value_A[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
if int(np.round(value_A*255)) not in self.low_range: n_try += 1; continue
r,g,b = B_npy[:, :, 0], B_npy[:, :, 1], B_npy[:, :, 2]
value_B = (0.299*r+0.587*g+0.114*b) / 255.
value_B = np.sort(value_B.flatten())
length = value_B.shape[0]
value_B = value_B[int(np.round(length * 0.1)) : int(np.round(length * 0.9))].mean()
if int(np.round(value_B*255)) not in self.high_range: n_try += 1; continue
if not 'images' in self.opt.name:
# mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", "train", os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
mask = Image.open(os.path.join("/ssd1/chenwy/bdd100k/seg/labels/", self.opt.phase, os.path.splitext(A_path.split("/")[-1])[0] + '_train_id.png'))
mask = np.array(mask.crop((x1, y1, x1+self.opt.fineSize, y1+self.opt.fineSize))).astype('int32') # cropped mask for light_enhance_AB/seg
unique, counts = np.unique(mask, return_counts=True)
if len(unique) < 2 or (counts / counts.sum()).max() > 0.7: n_try += 1; continue
mask = self._mask_transform(mask)
else:
mask = torch.zeros(1)
break
if n_try == self.N_TRY:
# if int(np.round(value_A)) not in self.low_range:
# self.A_paths.pop(index_A % self.A_size)
# self.A_size -= 1
# if int(np.round(value_B)) not in self.high_range:
# self.B_paths.pop(index_B % self.B_size)
# self.B_size -= 1
index_A = random.randint(0, self.__len__())
return self.__getitem__(index_A)
##########################################################################
gray_mask = torch.ones(1, self.opt.fineSize, self.opt.fineSize) * value_A
A_img_border = A_image.crop((x1-self.opt.fineSize//2, y1-self.opt.fineSize//2, x1+2*self.opt.fineSize, y1+2*self.opt.fineSize))
A_Lab = torch.Tensor(color.rgb2lab(A_npy) / 100).permute([2, 0, 1])
A_npy = gaussian(A_npy, sigma=2, multichannel=True)
r,g,b = A_npy[:, :, 0], A_npy[:, :, 1], A_npy[:, :, 2]
A_npy = 0.299*r+0.587*g+0.114*b
edges_A = torch.unsqueeze(torch.from_numpy(feature.canny(A_npy, sigma=2).astype("float32")), 0)
A_img = self.transform(A_img)
A_img_border = self.transform(A_img_border)
B_img = self.transform(B_img)
if self.opt.resize_or_crop == 'no':
r,g,b = A_img[0]+1, A_img[1]+1, A_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
input_img = A_img
# A_gray = (1./A_gray)/255.
r,g,b = A_img_border[0]+1, A_img_border[1]+1, A_img_border[2]+1
A_gray_border = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray_border = torch.unsqueeze(A_gray_border, 0)
else:
w = A_img.size(2)
h = A_img.size(1)
# A_gray = (1./A_gray)/255.
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(2) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(2, idx)
B_img = B_img.index_select(2, idx)
if (not self.opt.no_flip) and random.random() < 0.5:
idx = [i for i in range(A_img.size(1) - 1, -1, -1)]
idx = torch.LongTensor(idx)
A_img = A_img.index_select(1, idx)
B_img = B_img.index_select(1, idx)
if self.opt.vary == 1 and (not self.opt.no_flip) and random.random() < 0.5:
times = random.randint(self.opt.low_times,self.opt.high_times)/100.
input_img = (A_img+1)/2./times
input_img = input_img*2-1
else:
input_img = A_img
if self.opt.lighten:
B_img = (B_img + 1)/2.
B_img = (B_img - torch.min(B_img))/(torch.max(B_img) - torch.min(B_img))
B_img = B_img*2. -1
r,g,b = input_img[0]+1, input_img[1]+1, input_img[2]+1
A_gray = 1. - (0.299*r+0.587*g+0.114*b)/2.
A_gray = torch.unsqueeze(A_gray, 0)
return {'A': A_img, 'B': B_img, 'A_gray': A_gray, 'input_img': input_img,
'A_paths': A_path, 'B_paths': B_path, 'mask': mask,
'A_border': A_img_border, 'A_gray_border': A_gray_border,
'A_Lab': A_Lab, 'gray_mask': gray_mask, 'edges_A': edges_A
}
def __len__(self):
return max(self.A_size, self.B_size)
def name(self):
return 'UnalignedDataset'
def _mask_transform(self, mask):
target = np.array(mask).astype('int32')
target[target == 255] = -1
return torch.from_numpy(target).long() | 2.234375 | 2 |
wrappers/tensorflow/example5 - denoise.py | NobuoTsukamoto/librealsense | 6,457 | 12786662 | <reponame>NobuoTsukamoto/librealsense
import pyrealsense2 as rs
import numpy as np
import cv2
from tensorflow import keras
import time, sys
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 848, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.infrared, 1, 848, 480, rs.format.y8, 30) # 1 for left frame
# Start streaming
pipeline.start(config)
channels = 2
cropped_w, cropped_h = 480, 480
test_model_name = ""
if (len(sys.argv) > 1):
test_model_name = str(sys.argv[1])
t1 = time.perf_counter()
model = keras.models.load_model(test_model_name)
t2 = time.perf_counter()
print('model loading : ', t2 - t1, 'seconds')
def predict(noisy_image, ir_image):
t1 = time.perf_counter()
ir_image = np.array(ir_image).astype("uint16")
cropped_ir , cropped_noisy = [], []
width, height = 848, 480
w, h = cropped_w, cropped_h
for col_i in range(0, width, w):
for row_i in range(0, height, h):
cropped_ir.append(ir_image[row_i:row_i+h, col_i:col_i+w])
cropped_noisy.append(noisy_image[row_i:row_i+h, col_i:col_i+w])
# fill with zero to get size 480x480 for both images
fill = np.zeros((h, w - cropped_ir[-1].shape[1]), dtype="uint16")
cropped_ir[-1] = np.hstack((cropped_ir[-1], fill))
cropped_noisy[-1] = np.hstack((cropped_noisy[-1], fill))
t2 = time.perf_counter()
print('image cropping : ', t2 - t1, 'seconds')
cropped_image_offsets = [(0,0), (0,480)]
whole_image = np.zeros((height, width, channels), dtype="float32")
for i in range(len(cropped_ir)):
t1 = time.perf_counter()
noisy_images_plt = cropped_noisy[i].reshape(1, cropped_w, cropped_h, 1)
ir_images_plt = cropped_ir[i].reshape(1, cropped_w, cropped_h, 1)
im_and_ir = np.stack((noisy_images_plt, ir_images_plt), axis=3)
im_and_ir = im_and_ir.reshape(1, cropped_w, cropped_h, channels)
img = np.array(im_and_ir)
# Parse numbers as floats
img = img.astype('float32')
# Normalize data : remove average then devide by standard deviation
img = img / 65535
sample = img
row, col = cropped_image_offsets[i]
t2 = time.perf_counter()
print('image channeling : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
denoised_image = model.predict(sample)
t2 = time.perf_counter()
print('prediction only : ', t2 - t1, 'seconds')
row_end = row + cropped_h
col_end = col + cropped_w
denoised_row = cropped_h
denoised_col = cropped_w
if row + cropped_h >= height:
row_end = height - 1
denoised_row = abs(row - row_end)
if col + cropped_w >= width:
col_end = width - 1
denoised_col = abs(col - col_end)
# combine tested images
whole_image[row:row_end, col:col_end] = denoised_image[:, 0:denoised_row, 0:denoised_col, :]
return whole_image[:, :, 0]
#=============================================================================================================
def convert_image(i):
m = np.min(i)
M = np.max(i)
i = np.divide(i, np.array([M - m], dtype=np.float)).astype(np.float)
i = (i - m).astype(np.float)
i8 = (i * 255.0).astype(np.uint8)
if i8.ndim == 3:
i8 = cv2.cvtColor(i8, cv2.COLOR_BGRA2GRAY)
i8 = cv2.equalizeHist(i8)
colorized = cv2.applyColorMap(i8, cv2.COLORMAP_JET)
colorized[i8 == int(m)] = 0
font = cv2.FONT_HERSHEY_SIMPLEX
m = float("{:.2f}".format(m))
M = float("{:.2f}".format(M))
colorized = cv2.putText(colorized, str(m) + " .. " + str(M) + "[m]", (20, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
return colorized
try:
c = rs.colorizer()
while True:
print("==============================================================")
t0 = time.perf_counter()
# Wait for a coherent pair of frames: depth and ir
t1 = time.perf_counter()
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
ir_frame = frames.get_infrared_frame()
t2 = time.perf_counter()
print('getting depth + ir frames : ', t2 - t1, 'seconds')
if not depth_frame or not ir_frame:
continue
# Convert images to numpy arrays
t1 = time.perf_counter()
depth_image = np.asanyarray(depth_frame.get_data())
ir_image = np.asanyarray(ir_frame.get_data())
t2 = time.perf_counter()
print('convert frames to numpy arrays : ', t2 - t1, 'seconds')
t1 = time.perf_counter()
predicted_image = predict(depth_image, ir_image)
t2 = time.perf_counter()
print('processing + prediction : ', t2 - t1, 'seconds')
# Stack both images horizontally
# depth_image = convert_image(depth_image)
t1 = time.perf_counter()
depth_image = np.asanyarray(c.process(depth_frame).get_data())
predicted_image = convert_image(predicted_image)
red = depth_image[:, :, 2].copy()
blue = depth_image[:, :, 0].copy()
depth_image[:, :, 0] = red
depth_image[:, :, 2] = blue
images = np.hstack((depth_image, predicted_image))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
cv2.waitKey(1)
t2 = time.perf_counter()
print('show image : ', t2 - t1, 'seconds')
print('TOTAL TIME : ', t2 - t0, 'seconds')
finally:
# Stop streaming
pipeline.stop()
| 2.453125 | 2 |
pydantic_models.py | OmarThinks/flask_encryption_endpoint | 0 | 12786663 | <reponame>OmarThinks/flask_encryption_endpoint
from pydantic import BaseModel, constr
message_constraint = constr(max_length=1000000000000)
original_constraint = constr(max_length=1000000)
passphrase_constraint = constr(min_length=2, max_length=10000)
class DecryptionInputs(BaseModel):
message : str
passphrase : passphrase_constraint
class EncryptionInputs(BaseModel):
original : original_constraint
passphrase : passphrase_constraint
| 2.625 | 3 |
rebalance.py | hainingpan/inverse_volatility_caculation | 3 | 12786664 | from datetime import datetime, date
import math
import numpy as np
import time
import sys
import requests
import re
from ortools.linear_solver import pywraplp
# if len(sys.argv) == 1:
# symbols = ['UPRO', 'TMF']
# else:
# symbols = sys.argv[1].split(',')
# for i in range(len(symbols)):
# symbols[i] = symbols[i].strip().upper()
symbols = ['TMF', 'UPRO']
num_trading_days_per_year = 252
window_size = 20
date_format = "%Y-%m-%d"
end_timestamp = int(time.time())
start_timestamp = int(end_timestamp - (1.4 * (window_size + 1) + 4) * 86400)
def get_volatility_and_performance(symbol,cookie,crumb):
download_url = "https://query1.finance.yahoo.com/v7/finance/download/{}?period1={}&period2={}&interval=1d&events=history".format(symbol, start_timestamp, end_timestamp)
lines = requests.get(
download_url,
headers={
'User-Agent':
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2866.71 Safari/537.36'
}).text.strip().split('\n')
assert lines[0].split(',')[0] == 'Date'
assert lines[0].split(',')[4] == 'Close'
prices = []
for line in lines[1:]:
prices.append(float(line.split(',')[4]))
prices.reverse()
volatilities_in_window = []
for i in range(window_size):
volatilities_in_window.append(math.log(prices[i] / prices[i+1]))
most_recent_date = datetime.strptime(lines[-1].split(',')[0], date_format).date()
assert (date.today() - most_recent_date).days <= 4, "today is {}, most recent trading day is {}".format(date.today(), most_recent_date)
return np.std(volatilities_in_window, ddof = 1) * np.sqrt(num_trading_days_per_year), prices[0] / prices[window_size] - 1.0, prices[0]
def get_cookie():
url = 'https://finance.yahoo.com/quote/VOO/history?p=VOO'
r = requests.get(url)
txt = r.text
cookie = r.cookies['B']
pattern = re.compile('.*"CrumbStore":\{"crumb":"(?P<crumb>[^"]+)"\}')
for line in txt.splitlines():
m = pattern.match(line)
if m is not None:
crumb = m.groupdict()['crumb']
return cookie,crumb
def get_data():
#cookie,crumb=get_cookie()
cookie='9mev4idf68vgk&b=3&s=g9'
crumb='Xpr8Z7BQn4W'
volatilities = []
performances = []
current_prices = []
sum_inverse_volatility = 0.0
for symbol in symbols:
volatility, performance, current_price = get_volatility_and_performance(symbol,cookie,crumb)
sum_inverse_volatility += 1 / volatility
volatilities.append(volatility)
performances.append(performance)
current_prices.append(current_price)
alpha=1/(np.array(volatilities) * sum_inverse_volatility)
print ("Portfolio: {}, as of {} (window size is {} days)".format(str(symbols), date.today().strftime('%Y-%m-%d'), window_size))
for i in range(len(symbols)):
print ('{} allocation ratio: {:.2f}% (anualized volatility: {:.2f}%, performance: {:.2f}%)'.format(symbols[i], 100*(alpha[i]), float(volatilities[i] * 100), float(performances[i] * 100)))
return alpha,current_prices
def create_model(epsilon=0.01):
alpha[0]/alpha[1]
data={}
data['constraint_coeffs']=[
[current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1],current_prices[0],-(epsilon+alpha[0]/alpha[1])*current_prices[1]],
[current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1],current_prices[0],-(alpha[0]/alpha[1]-epsilon)*current_prices[1]],
[current_prices[0],current_prices[1],current_prices[0],current_prices[1]],
[current_prices[0],current_prices[1],0,0],
[0,0,current_prices[0],current_prices[1]],
[1,0,0,0],
[0,1,0,0],
[1,1,1,1]
]
data['lb']=[-np.inf, 0,0,0,0,N_Tax_T,N_Tax_U,1]
data['ub']=[0, np.inf,S,S_Tax,S_IRA,np.inf,np.inf,np.inf]
data['obj_coeffs']=[current_prices[0],current_prices[1],current_prices[0],current_prices[1]]
data['xub']=[np.floor(S_Tax/current_prices[0]),np.floor(S_Tax/current_prices[1]),np.floor(S_IRA/current_prices[0]),np.floor(S_IRA/current_prices[1])]
data['num_vars']=len(data['obj_coeffs'])
data['num_constraints']=len(data['constraint_coeffs'])
return data
def findsol(epsilon=0.01):
data = create_model(epsilon)
solver = pywraplp.Solver.CreateSolver('CBC')
x={}
for j in range(data['num_vars']):
x[j] = solver.IntVar(0, data['xub'][j], 'x[%i]' % j)
for i in range(data['num_constraints']):
constraint = solver.RowConstraint(data['lb'][i], data['ub'][i], '')
for j in range(data['num_vars']):
constraint.SetCoefficient(x[j], data['constraint_coeffs'][i][j])
objective = solver.Objective()
for j in range(data['num_vars']):
objective.SetCoefficient(x[j], data['obj_coeffs'][j])
objective.SetMaximization()
status = solver.Solve()
if status==pywraplp.Solver.OPTIMAL:
sol=[x[i].solution_value() for i in range(4)]
else:
sol=[0,0,0,0]
return sol,status
alpha,current_prices=get_data()
N_Tax_T=float(input("Current shares of "+symbols[0]+" in taxable: "))
N_Tax_U=float(input("Current shares of "+symbols[1]+" in taxable: "))
Tax_C=float(input("Current cash in taxable: "))
N_IRA_T=float(input("Current shares of "+symbols[0]+" in IRA: "))
N_IRA_U=float(input("Current shares of "+symbols[1]+" in IRA: "))
IRA_C=float(input("Current cash in IRA: "))
Tax_T=N_Tax_T*current_prices[0]
Tax_U=N_Tax_U*current_prices[1]
IRA_T=N_IRA_T*current_prices[0]
IRA_U=N_IRA_U*current_prices[1]
S_Tax=Tax_T+Tax_U+Tax_C
S_IRA=IRA_T+IRA_U+IRA_C
S=S_Tax+S_IRA
epsilon=0.01
sol,status=findsol(epsilon)
while status != pywraplp.Solver.OPTIMAL:
epsilon=epsilon+0.01
sol,status=findsol(epsilon)
N_Tax_T2,N_Tax_U2,N_IRA_T2,N_IRA_U2=sol
print('-'*10+'result'+'-'*10)
Tax_C2=S_Tax-N_Tax_T2*current_prices[0]-N_Tax_U2*current_prices[1]
IRA_C2=S_IRA-N_IRA_T2*current_prices[0]-N_IRA_U2*current_prices[1]
S_T2=(N_Tax_T2+N_IRA_T2)*current_prices[0]
S_U2=(N_Tax_U2+N_IRA_U2)*current_prices[1]
print('Cash in Taxable %f' % Tax_C2)
print('Cash in IRA %f' % IRA_C2)
print('Achievable balance of TMF/UPRO: ({:.2f}%/{:.2f}%), target ({:.2f}%/{:.2f}%)'.format(100*S_T2/(S_T2+S_U2),100*S_U2/(S_T2+S_U2),100*alpha[0],100*alpha[1]))
print('-'*10+'action'+'-'*10)
print(('buy'*(N_Tax_T2-N_Tax_T>=0)+'sell'*(N_Tax_T2-N_Tax_T<0))+' TMF in Taxable: '+str(int(abs(N_Tax_T2-N_Tax_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_Tax_U2-N_Tax_U>=0)+'sell'*(N_Tax_U2-N_Tax_U<0))+' UPRO in Taxable: '+str(int(abs(N_Tax_U2-N_Tax_U)))+' at price '+str(current_prices[1]))
print(('buy'*(N_IRA_T2-N_IRA_T>=0)+'sell'*(N_IRA_T2-N_IRA_T<0))+' TMF in IRA: '+str(int(abs(N_IRA_T2-N_IRA_T)))+' at price '+str(current_prices[0]))
print(('buy'*(N_IRA_U2-N_IRA_U>=0)+'sell'*(N_IRA_U2-N_IRA_U<0))+' UPRO in IRA: '+str(int(abs(N_IRA_U2-N_IRA_U)))+' at price '+str(current_prices[1]))
| 2.578125 | 3 |
models/admin_control.py | chaoannricardo/NTU_CARDO_Database | 1 | 12786665 | <gh_stars>1-10
# -*- coding: utf8 -*-
from time import sleep as t_sleep
import configuration as conf
from models import data_processing, database_management, file_management
import pymysql
from views import view_CLI
def admin_control():
print("【管理員模式】")
print("0. 產生主表(請使用專用表格)")
command = input("# 請輸入您所需要的功能,或輸入'exit'返回主選單: ")
if command == 'exit':
print("# 返回主選單")
t_sleep(1)
elif command == "0":
# "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
while True:
account = input("# 請輸入帳號: ")
password = input("# 請輸入密碼: ")
try:
config = conf.get_config(account, password)
# 身分驗證
print('# 登入中....')
conn = database_management.pymysql_connect(**config)
print("# 登入成功,歡迎回來", account, '\n\n')
t_sleep(1)
break
except pymysql.err.OperationalError:
print("# 您輸入的帳號或密碼錯誤,請再輸入一次。\n\n")
# 12. 【活動結束後資料建檔】「已登記出席統計表」生成「計算完成統計表」並「輸入資料庫」"
# "C:\Users\ricardo\Desktop\Data\0311_藍天百腦匯報名清單(登陸出席).csv"
# Produce csv file after processing
path, sem, semester_first, semester_second, fc, sc, date = view_CLI.get_information("10")
file_source = file_management.File(path, sem, semester_first, semester_second, fc, sc, date)
file_source.get_file()
data_source = data_processing.Data(file_source.year,
file_source.semester,
file_source.file_path,
file_source.first_cat,
file_source.second_cat)
data, produced_df_path = data_source.data_processing()
file_management.remove_temp()
print('# 成功生成CSV')
print('# 開始將生成csv輸入資料庫...')
# set name of the table
db_connection = database_management.DataConnection(data, config, fc, sc, date)
# create new table for the data
db_connection.create_table("主資料表")
'''
To tackle 'The MySQL server is running with the --secure-file-priv option so it cannot execute this statement' error
reference: https://blog.csdn.net/fdipzone/article/details/78634992
'''
# insert data into mysql table
db_connection.insert_table("主資料表")
db_connection.create_table("黑名單統計表")
db_connection.insert_table("黑名單統計表")
print("# 資料輸入資料庫成功,返回主選單")
t_sleep(1)
file_management.remove_temp()
if __name__ == '__main__':
admin_control()
| 2.328125 | 2 |
papers/CS-F-LTR/src/decision_tree_semi.py | mindspore-ai/contrib | 2 | 12786666 | <gh_stars>1-10
"""[summary]
"""
import pickle
import os
import numpy as np
from sklearn.tree import DecisionTreeClassifier
from utils import evaluation
from scipy.stats import mode
class DecisionTreeSemi:
"""[summary]
"""
def __init__(self, train_relevance_labels, train_features,
test_relevance_labels, test_features, test_query_ids, train_features_u):
"""[summary]
Args:
train_relevance_labels ([type]): [description]
train_features ([type]): [description]
test_relevance_labels ([type]): [description]
test_features ([type]): [description]
test_query_ids ([type]): [description]
train_features_u ([type]): [description]
"""
self.y_labeled2 = train_relevance_labels
self.x_labeled = train_features
self.x_unlabeled = train_features_u
self.test_labels = test_relevance_labels
self.test_features = test_features
self.test_ids = test_query_ids
x = self.x_labeled
y = self.y_labeled2.reshape(-1, 1)
x_y = np.concatenate((x, y), axis=1)
np.random.seed(1)
np.random.shuffle(x_y)
self.x_labeled = x_y[:, :-1]
self.y_labeled2 = x_y[:, -1].reshape(-1,)
def fit(self, fed_num, file_path):
"""[summary]
Args:
fed_num ([type]): [description]
file_path ([type]): [description]
"""
clfs = []
for i in range(fed_num):
clfs.append(
pickle.load(
open(
os.path.join(
file_path,
"decision_tree%d" %
i),
"rb")))
res = np.zeros([fed_num, len(self.x_unlabeled)])
for i in range(fed_num):
res[i] = clfs[i].predict(self.x_unlabeled)
# for i in range(len(self.x_unlabeled)):
# res[i] = res[i] // fed_num
res = mode(res)[0][0]
print(res)
x_aug = np.concatenate((self.x_labeled, self.x_unlabeled))
y_aug = np.concatenate((self.y_labeled2, res))
clf = DecisionTreeClassifier().fit(x_aug, y_aug)
result = clf.predict(self.test_features)
# avg_err, avg_ndcg, avg_full_ndcg, avg_map, avg_auc = \
_, _, _, _, _ = \
evaluation(
result,
self.test_labels,
self.test_ids,
self.test_features)
# pickle.dump(clf, open(os.path.join(file_path, "decision_tree%d" % fed_id), "wb"))
| 2.515625 | 3 |
pennylane/templates/subroutines/hardware_efficient.py | pearcandy/pennylane | 0 | 12786667 | '''
hardware_efficient.py
This code is distributed under the constitution of GNU-GPL.
(c) PearCandy
Log of hardware_efficient
2021/01/06 Released by PearCandy
'''
#coding:utf-8
#-------------------------------------------------------------
from pennylane import numpy as np
from pennylane.templates import template #import the decorator
from pennylane.ops import CNOT, RX, RY, RZ, Hadamard, CZ
@template
def HardwareEfficient(weights, wires, depth=1):
for d in range(depth):
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * d], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * d], wires=i)
for i in range(len(wires) // 2):
CZ(wires=[2 * i, 2 * i + 1])
for i in range(len(wires) // 2 - 1):
CZ(wires=[2 * i + 1, 2 * i + 2])
for i in range(len(wires)):
RY(weights[2 * i + 2 * len(wires) * depth], wires=i)
RZ(weights[2 * i + 1 + 2 * len(wires) * depth], wires=i)
| 2.296875 | 2 |
ex106.py | raphael-abrantes/exercises-python | 0 | 12786668 | <filename>ex106.py<gh_stars>0
import sys, os
caminho = os.path.dirname(__file__)
sys.path.append(caminho[:caminho.find('exs')])
from time import sleep
def printlin(txt):
print(f'~'*int(len(txt) + 4),flush=False)
print(f' {txt} ',flush=False)
print(f'~'*int(len(txt) + 4), flush=False)
def pyhelp():
while True:
printlin('SISTEMA DE AJUDA PyHELP')
pesquisa = str(input('Função ou Biblioteca > ')).strip().lower()
if pesquisa.upper() == 'END':
break
printlin(f'Acessando o Manual de "{pesquisa}"')
sleep(1)
print(f'{help(pesquisa)}')
sleep(2)
printlin(f'ATÉ LOGO')
#PROGRAMA
pyhelp()
| 3.0625 | 3 |
paradigm/Selection.py | Paradigm-shift-AI/paradigm-brain | 0 | 12786669 | <reponame>Paradigm-shift-AI/paradigm-brain
import random
import operator
class Selection:
def __init__(self, preprocessed_question, list_of_questions, token=False):
"""
list_of_questions:
[
questionTypeID: [<question_object>],
]
"""
self.preprocessed_question = preprocessed_question
self.list_of_questions = list_of_questions
self.token = token
self.final_question = []
def __get_proper_noun(self):
if self.token:
return self.preprocessed_question["tag-intersection"]
jk = set()
for j in self.preprocessed_question["processed-sentences"]:
if "NNP" in j:
for k in j["NNP"]:
jk.add(k)
return list(jk)
def __select_fill_in_blanks(self):
for i in self.list_of_questions[1]:
if i["answer"] in self.__get_proper_noun():
insert = True
for k in self.final_question:
if i["question"] == k["question"]:
insert = False
break
if insert:
self.final_question.append(i)
def __select_true_or_false(self, type):
if self.token:
question_rank = {}
for i in self.list_of_questions[type]:
rating = 0
for j in self.preprocessed_question["tag-intersection"]:
if str(j) in i["question"]:
rating += 1
question_rank[i["question"]] = rating
sorted_tuple = sorted(question_rank.items(), key=operator.itemgetter(1), reverse=True)
for i in sorted_tuple[0:3]:
for j in self.list_of_questions[type]:
if i[0] == j["question"]:
insert = True
for k in self.final_question:
if j["question"] == k["question"]:
insert = False
break
if insert:
j["question"] = str(j["question"])
j["answer"] = str(j["answer"])
self.final_question.append(j)
else:
for i in self.list_of_questions[type]:
for j in self.preprocessed_question["tag"][0:5]:
if j in i["question"]:
j["question"] = str(j["question"])
j["answer"] = str(j["answer"])
self.final_question.append(i)
def __select_multiple_correct(self):
for i in self.list_of_questions[3]:
if i["answer1"] in self.__get_proper_noun():
if i["answer2"] in self.__get_proper_noun():
insert = True
for k in self.final_question:
if i["question"] == k["question"]:
insert = False
break
if insert:
self.final_question.append(i)
def __select_relevant_question(self):
def f(questionType):
return {
1: self.__select_fill_in_blanks(),
2: self.__select_true_or_false(2),
3: self.__select_multiple_correct(),
4: self.__select_true_or_false(4)
}[questionType]
for questionType in [2, 3, 4, 5]:
if questionType in self.list_of_questions:
f(questionType)
if len(self.final_question) > 2:
random.shuffle(self.final_question)
self.final_question = self.final_question[0:3]
def get_final_question(self):
self.__select_relevant_question()
return self.final_question
| 3.09375 | 3 |
pi7db/tests/csvtest/test.py | shivjeetbhullar/pi7db | 4 | 12786670 | <reponame>shivjeetbhullar/pi7db<filename>pi7db/tests/csvtest/test.py
import os
import pandas as pd
class tabledb:
def __init__(self,db_name):
self.db_name = db_name
if not os.path.exists(db_name):os.mkdir(db_name)
def create_table(self,**kwargs):
if 'name' in kwargs and 'colums' in kwargs and isinstance(kwargs['name'],str) and isinstance(kwargs['colums'],list):
file_path = os.path.join(self.db_name,f"{kwargs['name']}.csv");kwargs['colums'].insert(0,'un_id')
if not os.path.exists(file_path):
df = pd.DataFrame(columns = kwargs['colums'])
df.to_csv(file_path,index=False, sep=',',encoding='utf-8')
else:
return "PATH ALREADY EXIST"
else:
return "NOT PROPER METHOD"
def re_config_table(self,**kwargs):
if 'name' in kwargs:
if isinstance(kwargs['name'],dict):
file_path = os.path.join(self.db_name,f"{list(kwargs['name'])[0]}.csv")
if os.path.exists(file_path):os.rename(file_path,os.path.join(self.db_name,f"{kwargs['name'][list(kwargs['name'])[0]]}.csv"))
if isinstance(kwargs['name'],str):
file_path = os.path.join(self.db_name,f"{kwargs['name']}.csv");df=pd.read_csv(file_path)
if 'colums' in kwargs and isinstance(kwargs['colums'],dict):
df = df.rename(kwargs['colums'], axis='columns')
df.to_csv(file_path,index=False,mode='w', sep=',',encoding='utf-8')
else:return "TABLE NOT FOUND"
db = tabledb('shivjeet')
#db.create_table(name="yes",colums=["naam","no","yo"])
db.re_config_table(name="yes",colums={"NAME":"Name","ADM-NUMBER":"Admission-no","YEAR":"Year"})
| 2.90625 | 3 |
ftpclient.py | ryanshim/cpsc558-minimal-ftp | 0 | 12786671 | <filename>ftpclient.py
""" Simple implementation of a FTP client program used for pedagogical
purposes. Current commands supported:
get <filename>: retrieve the file specified by filename.
put <filename>: send the file to the server specified by filename.
cd <path>: change the current working directory to the specified path.
ls: list the files in the current working directory in the server.
pwd: get the parent working directory
"""
import socket
import protocol
import argparse
import subprocess
import hashlib
class FTPClient:
def __init__(self, host, port):
""" Initializes the client socket for command connection and attempts to
connect to the server specified by the host and port.
@param host: server ip addr
@param port: port to communicate on
"""
self.host = host
self.port = port
self.client_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect to server and start listener
try:
self.connect((self.host, self.port))
self.start()
except socket.error as e:
print(e) # use logging later
def __del__(self):
self.client_sock.close()
def connect(self, server):
""" Establish a connection with the client socket
@param server: tuple that contains the host IP and port.
"""
self.client_sock.connect(server)
def start(self):
""" Main driver of the FTP client, which continuously parses any
user args and calls the necessary member functions.
"""
while True:
tokens = self.parse()
cmd = tokens[0]
if cmd == 'put' and len(tokens) == 2:
filename = tokens[1]
if self.is_valid_file(filename):
protocol.send_msg(self.client_sock, cmd.encode())
data_port = protocol.recv_msg(self.client_sock).decode()
self.send_file(filename, int(data_port))
else:
print("File does not exist")
elif cmd == 'get' and len(tokens) == 2:
filename = tokens[1]
protocol.send_msg(self.client_sock, cmd.encode())
protocol.send_msg(self.client_sock, filename.encode())
self.recv_file()
elif cmd == 'ls' and len(tokens) == 1:
protocol.send_msg(self.client_sock, cmd.encode())
self.list_files()
elif cmd == 'cd' and len(tokens) == 2:
path = tokens[1]
protocol.send_msg(self.client_sock, cmd.encode())
protocol.send_msg(self.client_sock, path.encode())
elif cmd == 'pwd' and len(tokens) == 1:
protocol.send_msg(self.client_sock, cmd.encode())
self.get_pwd()
elif cmd == 'exit':
protocol.send_msg(self.client_sock, cmd.encode())
self.client_sock.close()
break
def parse(self):
""" Asks for user input and parses the command to extract tokens.
"""
tokens = input(">>> ").split(' ')
return tokens
def get_pwd(self):
""" Receives the output of cwd from the server.
"""
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
conn, addr = ephem_sock.accept()
pwd_output = protocol.recv_msg(conn).decode()
print(pwd_output)
conn.close()
ephem_sock.close()
def list_files(self):
""" Receives the output of ls in the cwd from the server.
"""
# Create an ephemeral socket
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
# Send the ephemeral port number to server
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
# Accept any incoming connections on the ephemeral socket
conn, addr = ephem_sock.accept()
# Receive the ls output from server
ls_output = protocol.recv_msg(conn).decode()
print(ls_output)
conn.close() # close the ephem socket conn
ephem_sock.close()
def send_file(self, filename, ephem_port):
""" Create an ephemeral socket and send file.
@param filename: path to the file to send.
"""
data = open(filename, 'rb').read()
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.connect((self.host, ephem_port))
print('Sending {} to {}'.format(filename, self.host))
try:
protocol.send_msg(ephem_sock, filename.encode())
protocol.send_msg(ephem_sock, data)
# send md5 hash
md5_send = hashlib.md5(data).hexdigest()
protocol.send_msg(ephem_sock, md5_send.encode())
except Exception as e:
print('Error: {}'.format(e))
print('Unsuccessful transfer of {}'.format(filename))
ephem_sock.close()
return
print('Transfer complete.')
ephem_sock.close()
def recv_file(self):
""" Receive a file through an ephemeral socket from the client.
"""
# Create ephemeral socket
ephem_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ephem_sock.bind(('', 0))
ephem_sock.listen(1)
# Send the ephemeral port number to server
ephem_name = ephem_sock.getsockname()
protocol.send_msg(self.client_sock, str(ephem_name[1]).encode())
# Accept any incoming connections on the ephemeral socket
conn, addr = ephem_sock.accept()
# Receive the file and store in cwd
filename = protocol.recv_msg(conn).decode()
if filename == 'NXFILE':
print('File does not exist.')
else:
print('Receiving {} from {}'.format(filename, self.host))
try:
filedata = protocol.recv_msg(conn).decode()
# Check file integrity
md5_recv = protocol.recv_msg(conn).decode()
md5_local = hashlib.md5(filedata.encode()).hexdigest()
if md5_recv != md5_local:
print('Corrupt file data during transfer.')
return
except Exception as e:
print(e)
print('Error receiving file {}'.format(filename))
return
with open(filename, 'w') as outfile:
outfile.write(filedata)
print('Transfer complete.')
# Close the ephemeral socket
conn.close()
ephem_sock.close()
def is_valid_file(self, filename):
""" Checks if the path is valid and if the file exists.
@param filename: name of file of file including path
"""
if subprocess.os.path.exists(filename):
return True
return False
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("ip")
args = parser.parse_args()
client = FTPClient(args.ip, 12000)
| 3.859375 | 4 |
scripts/modules/tests/test_reweighting.py | andrrizzi/tfep-revisited-2021 | 7 | 12786672 | #!/usr/bin/env python
# =============================================================================
# MODULE DOCSTRING
# =============================================================================
"""
Test objects and function in the module reweighting.
"""
# =============================================================================
# GLOBAL IMPORTS
# =============================================================================
import os
import tempfile
import numpy as np
from numpy.random import RandomState
import pint
from ..reweighting import DatasetReweighting
# =============================================================================
# GLOBAL VARIABLES
# =============================================================================
# Makes random test cases deterministic.
_random_state = RandomState(0)
_ureg = pint.UnitRegistry()
# =============================================================================
# TEST UTILITIES
# =============================================================================
class DummyStdReweighting(DatasetReweighting):
"""Dummy implementation of standard reweighting for testing."""
U0 = 0.0
def compute_potentials(self, batch_positions):
kJ_mol = _ureg.kJ / _ureg.mol
return (self.U0 + _random_state.rand(len(batch_positions))) * kJ_mol
def get_traj_info(self):
kJ_mol = _ureg.kJ / _ureg.mol
cvs = np.array(range(len(self.dataset)))
reference_potentials = _random_state.rand(len(cvs)) * kJ_mol
metad_rbias = np.zeros(len(cvs)) * kJ_mol
return cvs, reference_potentials, metad_rbias
# =============================================================================
# TESTS
# =============================================================================
def test_standard_reweighting_potentials_cache():
"""Test that DatasetReweighting caches and reuses the potentials correctly."""
import MDAnalysis.coordinates
from ..data import TrajectoryDataset, TrajectorySubset
def _get_potentials(dataset, file_path, u0, indices, batch_size, write_interval):
subset = TrajectorySubset(dataset, indices=indices)
DummyStdReweighting.U0 = u0
reweighting = DummyStdReweighting(
subset, n_bins=len(subset), temperature=300*_ureg.kelvin,
potentials_file_path=file_path)
return reweighting.compute_dataset_potentials(
batch_size=batch_size, write_interval=write_interval)
# Load the test PDB file.
pdb_file_path = os.path.join(os.path.dirname(__file__), 'data', 'chloro-fluoromethane.pdb')
with MDAnalysis.coordinates.PDB.PDBReader(pdb_file_path) as trajectory:
dataset = TrajectoryDataset(trajectory, return_batch_index=True)
# Cache the potentials in a temporary file.
with tempfile.TemporaryDirectory() as tmp_dir:
file_path = os.path.join(tmp_dir, 'potentials.npz')
# Cache a first value for the potentials of some of the frames.
u1 = 10
potentials1 = _get_potentials(dataset, file_path, u1, indices=[0, 2, 4],
batch_size=1, write_interval=2)
assert np.all((0 <= potentials1.magnitude - u1) & (potentials1.magnitude - u1 < 1))
# Check that what we have just computed does not get re-computed.
u2 = 20
potentials2 = _get_potentials(dataset, file_path, u2, indices=[1, 3, 4],
batch_size=5, write_interval=2)
assert potentials1[-1] == potentials2[-1]
assert np.all((0 <= potentials2.magnitude[:-1] - u2) & (potentials2.magnitude[:-1] - u2 < 1))
# The cache should be up-to-date.
times, potentials = DummyStdReweighting.load_cached_potentials_from_file(file_path)
assert not np.isnan(potentials).any()
| 2.34375 | 2 |
test/test_jump.py | mind-owner/Cyberbrain | 2,440 | 12786673 | from cyberbrain import Binding, InitialValue, Symbol
def test_jump(tracer, check_golden_file):
a = []
b = "b"
c = "c"
tracer.start()
if a: # POP_JUMP_IF_FALSE
pass # JUMP_FORWARD
else:
x = 1
if not a: # POP_JUMP_IF_TRUE
x = 2
x = a != b != c # JUMP_IF_FALSE_OR_POP
x = a == b or c # JUMP_IF_TRUE_OR_POP
# TODO: Test JUMP_ABSOLUTE. This requires loop instructions to be Implemented.
tracer.stop()
| 2.578125 | 3 |
Python/Essential things/testing.py | honchardev/Fun | 0 | 12786674 | <filename>Python/Essential things/testing.py<gh_stars>0
import unittest
class TestUM(unittest.TestCase):
def setUp(self):
"""This method executes BEFORE each test"""
pass
def tearDown(self):
"""This method executes AFTER each test"""
pass
"""
def setUpClass(cls):
# This method executes BEFORE ALL tests
print('Testing begins.')
def tearDownClass(cls):
# This method executes AFTER ALL tests
print('Testing complete.')
"""
def test_numbers_3_4(self):
self.assertEqual(3*4, 12)
def test_strings_a_3(self):
self.assertEqual('a'*3, 'aaa')
"""
List of different checks:
testAssertTrue | Invoke error, if argument != True
testFailUnless | (Outdated) Invoke error, if argument != True
testAssertFalse | Invoke error, if argument != False
testFailIf | (Outdated) Invoke error, if argument != False
testEqual | Check if two arguments are equal.
testEqualFail | (Outdated) Invoke error, if arguments are equal
testNotEqual | Check if two arguments aren't equal
testNotEqualFail | (Outdated) Invoke error, if arguments aren't equal
assertNotAlmostEqual | Compare two arguments with rounding. Invoke error if arguments are equal.
testNotAlmostEqual | (Outdated) Same as assertNotAlmostEqual
assertAlmostEqual | Compare two arguments with rounding. Invoke error if arguments aren't equal.
testAlmostEqual | (Outdated) Same as assertAlmostEqual
"""
if __name__ == '__main__':
unittest.main()
# Pretty interesting: http://www.drdobbs.com/testing/unit-testing-with-python/240165163
| 3.875 | 4 |
src/config.py | muzilli/azure-batch-ffmpeg | 0 | 12786675 | <reponame>muzilli/azure-batch-ffmpeg<filename>src/config.py
# -------------------------------------------------------------------------
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
# ----------------------------------------------------------------------------------
# The example companies, organizations, products, domain names,
# e-mail addresses, logos, people, places, and events depicted
# herein are fictitious. No association with any real company,
# organization, product, domain name, email address, logo, person,
# places, or events is intended or should be inferred.
# --------------------------------------------------------------------------
# Global constant variables (Azure Storage account/Batch details)
# import "config.py" in "batch_python_tutorial_ffmpeg.py"
# Update the Batch and Storage account credential strings below with the values
# unique to your accounts. These are used when constructing connection strings
# for the Batch and Storage client objects.
_BATCH_ACCOUNT_NAME = 'batrseus001' # Your batch account name
_BATCH_ACCOUNT_KEY = '<KEY> # Your batch account key
_BATCH_ACCOUNT_URL = 'https://batrseus001.eastus.batch.azure.com' # Your batch account URL
_STORAGE_ACCOUNT_NAME = 'strseus001' # Your storage account name
_STORAGE_ACCOUNT_KEY = '<KEY> # Your storage account key
_STANDARD_OUT_FILE_NAME = 'stdout.txt' # Standard Output file
_POOL_ID = 'LinuxFfmpegPool'
_DEDICATED_POOL_NODE_COUNT = 0
_LOW_PRIORITY_POOL_NODE_COUNT = 5
_POOL_VM_SIZE = 'STANDARD_A1_v2'
_JOB_ID = 'LinuxFfmpegJob'
| 1.445313 | 1 |
setup.py | inmagik/django-rest-admin | 15 | 12786676 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import django_rest_admin
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = django_rest_admin.__version__
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
os.system('python setup.py bdist_wheel upload')
sys.exit()
if sys.argv[-1] == 'tag':
print("Tagging the version on github:")
os.system("git tag -a %s -m 'version %s'" % (version, version))
os.system("git push --tags")
sys.exit()
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
def get_install_requires():
"""
parse requirements.txt, ignore links, exclude comments
"""
requirements = []
for line in open('requirements.txt').readlines():
# skip to next iteration if comment or empty line
if line.startswith('#') or line == '' or line.startswith('http') or line.startswith('git'):
continue
# add line to requirements
requirements.append(line)
return requirements
setup(
name='django-rest-admin',
version=version,
description="""REST endpoints for administering django models.""",
long_description=readme + '\n\n' + history,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/inmagik/django-rest-admin',
packages=[
'django_rest_admin',
],
include_package_data=True,
install_requires=get_install_requires(),
license="BSD",
zip_safe=False,
keywords='django-rest-admin',
classifiers=[
'Development Status :: 3 - Alpha',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 1.804688 | 2 |
screenshooter/__main__.py | guilherme-kenzo/auto-lovelace-screenshooter | 0 | 12786677 | from .client import ScreenshotClient
import schedule
import click
from .schedule import run_screenshot_every
@click.command()
@click.option("--xpath", type=click.STRING, required=True, help="The XPATH of the element to be screenshot.")
@click.option("--output-file", type=click.STRING, required=True, help="The path of the output screenshot.")
@click.option("--url", type=click.STRING, required=True, help="The URL of the page you want to shoot.")
@click.option("--driver-dir", type=click.STRING, required=False, default="Directory where the driver binary is located (or is to be downloaded).")
@click.option("--every", type=click.STRING, required=False, help="Period of time between updates (in minutes or hours")
def main(**kwargs):
run_screenshot_every(
kwargs.get("every"),
kwargs.get("url"),
xpath=kwargs.get("xpath"),
driver_dir=kwargs.get("driver_dir")
)
if __name__ == "__main__":
main() | 2.8125 | 3 |
sardine/lang/parser/objects.py | JavierLuna/sardine | 0 | 12786678 | class RepositoryDeclaration:
__slots__ = ('name', 'alias')
def __init__(self, name: str, alias: str):
self.name = name
self.alias = alias
def __repr__(self):
return f"<Repository {self.alias} ('{self.name}')>"
class StackDeclaration:
__slots__ = ('name', 'repository_name', 'alias', 'aliased_repository')
def __init__(self, stack_name: str, repository_name: str, alias: str, aliased_repository: bool):
self.name = stack_name
self.repository_name = repository_name
self.alias = alias
self.aliased_repository = aliased_repository
def __repr__(self):
return f"<Stack {self.alias} ('{self.repository_name}/{self.name}')"
| 3.140625 | 3 |
structural_patterns/flyweight_pattern/app.py | Stihotvor/python3_patterns | 0 | 12786679 | class Grade(object):
_instances = {}
def __new__(cls, percent):
percent = max(50, min(99, percent))
letter = 'FDCBA'[(percent - 50) // 10]
self = cls._instances.get(letter)
if self is None:
self = cls._instances[letter] = object.__new__(Grade)
self.letter = letter
return self
def __repr__(self):
return 'Grade {!r}'.format(self.letter)
print(Grade(55), Grade(85), Grade(95), Grade(100))
print(len(Grade._instances)) # number of instances
print(Grade(95) is Grade(100)) # ask for ‘A’ two more times
print(len(Grade._instances)) # number stayed the same? | 3.4375 | 3 |
experiments/ukf_baseball.py | VladPodilnyk/Kalman-and-Bayesian-Filters-in-Python | 12,315 | 12786680 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 8 09:55:24 2015
@author: rlabbe
"""
from math import radians, sin, cos, sqrt, exp, atan2, radians
from numpy import array, asarray
from numpy.random import randn
import numpy as np
import math
import matplotlib.pyplot as plt
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.common import runge_kutta4
class BaseballPath(object):
def __init__(self, x0, y0, launch_angle_deg, velocity_ms,
noise=(1.0,1.0)):
""" Create 2D baseball path object
(x = distance from start point in ground plane,
y=height above ground)
x0,y0 initial position
launch_angle_deg angle ball is travelling respective to
ground plane
velocity_ms speeed of ball in meters/second
noise amount of noise to add to each position
in (x,y)
"""
omega = radians(launch_angle_deg)
self.v_x = velocity_ms * cos(omega)
self.v_y = velocity_ms * sin(omega)
self.x = x0
self.y = y0
self.noise = noise
def drag_force(self, velocity):
""" Returns the force on a baseball due to air drag at
the specified velocity. Units are SI
"""
B_m = 0.0039 + 0.0058 / (1. + exp((velocity-35.)/5.))
return B_m * velocity
def update(self, dt, vel_wind=0.):
""" compute the ball position based on the specified time
step and wind velocity. Returns (x,y) position tuple
"""
# Euler equations for x and y
self.x += self.v_x*dt
self.y += self.v_y*dt
# force due to air drag
v_x_wind = self.v_x - vel_wind
v = sqrt(v_x_wind**2 + self.v_y**2)
F = self.drag_force(v)
# Euler's equations for velocity
self.v_x = self.v_x - F*v_x_wind*dt
self.v_y = self.v_y - 9.81*dt - F*self.v_y*dt
return (self.x, self.y)
radar_pos = (100,0)
omega = 45.
def radar_sense(baseball, noise_rng, noise_brg):
x, y = baseball.x, baseball.y
rx, ry = radar_pos[0], radar_pos[1]
rng = ((x-rx)**2 + (y-ry)**2) ** .5
bearing = atan2(y-ry, x-rx)
rng += randn() * noise_rng
bearing += radians(randn() * noise_brg)
return (rng, bearing)
ball = BaseballPath(x0=0, y0=1, launch_angle_deg=45,
velocity_ms=60, noise=[0,0])
'''
xs = []
ys = []
dt = 0.05
y = 1
while y > 0:
x,y = ball.update(dt)
xs.append(x)
ys.append(y)
plt.plot(xs, ys)
plt.axis('equal')
plt.show()
'''
dt = 1/30.
def hx(x):
global radar_pos
dx = radar_pos[0] - x[0]
dy = radar_pos[1] - x[2]
rng = (dx*dx + dy*dy)**.5
bearing = atan2(-dy, -dx)
#print(x)
#print('hx:', rng, np.degrees(bearing))
return array([rng, bearing])
def fx(x, dt):
fx.ball.x = x[0]
fx.ball.y = x[2]
fx.ball.vx = x[1]
fx.ball.vy = x[3]
N = 10
ball_dt = dt/float(N)
for i in range(N):
fx.ball.update(ball_dt)
#print('fx', fx.ball.x, fx.ball.v_x, fx.ball.y, fx.ball.v_y)
return array([fx.ball.x, fx.ball.v_x, fx.ball.y, fx.ball.v_y])
fx.ball = BaseballPath(x0=0, y0=1, launch_angle_deg=45,
velocity_ms=60, noise=[0,0])
y = 1.
x = 0.
theta = 35. # launch angle
v0 = 50.
ball = BaseballPath(x0=x, y0=y, launch_angle_deg=theta,
velocity_ms=v0, noise=[.3,.3])
kf = UKF(dim_x=4, dim_z=2, dt=dt, hx=hx, fx=fx, kappa=0)
#kf.R *= r
kf.R[0,0] = 0.1
kf.R[1,1] = radians(0.2)
omega = radians(omega)
vx = cos(omega) * v0
vy = sin(omega) * v0
kf.x = array([x, vx, y, vy])
kf.R*= 0.01
#kf.R[1,1] = 0.01
kf.P *= 10
f1 = kf
t = 0
xs = []
ys = []
while y > 0:
t += dt
x,y = ball.update(dt)
z = radar_sense(ball, 0, 0)
#print('z', z)
#print('ball', ball.x, ball.v_x, ball.y, ball.v_y)
f1.predict()
f1.update(z)
xs.append(f1.x[0])
ys.append(f1.x[2])
f1.predict()
p1 = plt.scatter(x, y, color='r', marker='o', s=75, alpha=0.5)
p2, = plt.plot (xs, ys, lw=2, marker='o')
#p3, = plt.plot (xs2, ys2, lw=4)
#plt.legend([p1,p2, p3],
# ['Measurements', 'Kalman filter(R=0.5)', 'Kalman filter(R=10)'],
# loc='best', scatterpoints=1)
plt.show()
| 2.9375 | 3 |
tests/helpers.py | Pineirin/invenio-theme | 7 | 12786681 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest helpers."""
from __future__ import absolute_import, print_function
import os
import shutil
import tempfile
def make_fake_template(content=""):
"""Create fake template for testing.
:param content: File content.
:returns: The temprorary directory.
"""
temp_dir = tempfile.mkdtemp()
invenio_theme_dir = os.path.join(temp_dir, 'invenio_theme')
os.mkdir(invenio_theme_dir)
fake_file = open(os.path.join(invenio_theme_dir, 'fake.html'), 'w+')
fake_file.write(content)
fake_file.close()
return temp_dir
| 1.734375 | 2 |
devspace/commands/render.py | d12y12/DevSpace | 0 | 12786682 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import devspace
from devspace.commands import DevSpaceCommand
from devspace.exceptions import UsageError
from devspace.utils.misc import walk_modules
from devspace.servers import DevSpaceServer
import inspect
def _get_server_from_module(module_name, server_name, project_setting):
for module in walk_modules(module_name):
for obj in vars(module).values():
if inspect.isclass(obj) and \
issubclass(obj, DevSpaceServer) and \
obj.__module__ == module.__name__ and \
not obj == DevSpaceServer and \
server_name == obj.type:
return obj(project_setting)
class Command(DevSpaceCommand):
requires_project = True
def syntax(self):
return "[Options]"
def short_desc(self):
return "Render servers"
def add_options(self, parser):
DevSpaceCommand.add_options(self, parser)
parser.add_option("--all", dest="render_all", action="store_true",
help="Render all servers")
parser.add_option("--server", dest="server_name",
help="Render server by its name")
parser.add_option("--host", dest="host", action="store_true",
help="Render project file host ip")
def run(self, args, opts):
if len(args) > 0:
raise UsageError()
if opts.render_all:
print("render all server")
return
if opts.host:
return
print("render_server")
# print(self.settings.attributes)
server_name = opts.server_name
servers = self.settings.get("servers")
if not servers or server_name not in servers.keys():
print("No servers found please check your project configuration file")
self.exitcode = 1
return
server = _get_server_from_module('devspace.servers', server_name, self.settings)
server.render()
server.update_docker_compose()
@property
def templates_dir(self):
return self.settings['TEMPLATES_DIR'] or \
os.path.join(devspace.__path__[0], 'templates')
| 2.46875 | 2 |
scaffold/generators/common.py | CaravelKit/saas-base | 189 | 12786683 | <reponame>CaravelKit/saas-base<filename>scaffold/generators/common.py
# Functions used all the generators
import os
# Check if file and path exist, if not, create them. Then rewrite file or add content at the
# beginning, commenting the existing part.
def create_write_file(file_path, new_content, rewrite = False, comment_start = '<!--', comment_end = '-->',
ignore_existing_files = False):
file_param = 'r+'
if os.path.exists(file_path):
if ignore_existing_files:
# Ignore existing file and return
print('Ignore: ', file_path)
return
else:
file_param = 'w+'
if not os.path.exists(os.path.dirname(file_path)):
try:
os.makedirs(os.path.dirname(file_path))
except OSError as exc:
if exc.errno != errno.EEXIST:
raise Exception('Path cannot be created, please try again.')
with open(file_path, file_param) as output_file:
if not rewrite:
output_file.seek(0)
content = output_file.read()
content = content.replace(comment_start, '').replace(comment_end, '')
content = comment_start + content
content += comment_end
content = new_content + content
else:
content = new_content
output_file.seek(0)
output_file.truncate()
output_file.write(content)
output_file.close()
| 2.53125 | 3 |
src/authentication/mailchimp/__init__.py | pykulytsky/freelance-service | 0 | 12786684 | from authentication.mailchimp.client import AppMailchimp
from authentication.mailchimp.http import MailchimpHTTPException
from authentication.mailchimp.member import MailchimpMember
__all__ = [
AppMailchimp,
MailchimpMember,
MailchimpHTTPException,
]
| 1.382813 | 1 |
commands/room.py | pariahsoft/Dennis | 1 | 12786685 | <gh_stars>1-10
########################################
## Adventure Bot "Dennis" ##
## commands/room.py ##
## Copyright 2012-2013 PariahSoft LLC ##
########################################
## **********
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to
## deal in the Software without restriction, including without limitation the
## rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
## sell copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
## IN THE SOFTWARE.
## **********
################
# Room Command #
################
from helpers import *
from database import get, put
from database import escape as E
from help import C_HELP
from look import C_LOOK
def C_ROOM(S, DB, sender, args):
if len(args) == 0:
C_LOOK(S, DB, sender, [])
elif len(args) >= 3 and args[0].lower() == "set": # Modify the room.
roomid = getroom(DB, sender)
roominfo = roomstat(DB, roomid)
if roominfo["owner"] == sender or roominfo["locked"] == 0: # Do we have permission to edit the room?
if args[1].lower() == "name": # Set name.
if goodname(" ".join(args[2:])):
put(DB, "UPDATE rooms SET name='{0}' WHERE id='{1}'".format(E(" ".join(args[2:])), roomid))
send(S, sender, "Name updated.")
else:
send(S, sender, "Invalid name.")
elif args[1].lower() == "desc": # Set description.
if args[2].startswith("\\\\"): # Append for long description.
curr = get(DB, "SELECT desc FROM rooms WHERE id='{0}'".format(roomid))
newdesc = "{0}\n{1}".format(E(curr[0][0]), E(" ".join(args[2:])[2:]))
put(DB, "UPDATE rooms SET desc='{0}' WHERE id='{1}'".format(newdesc, roomid))
else:
put(DB, "UPDATE rooms SET desc='{0}' WHERE id='{1}'".format(E(" ".join(args[2:])), roomid))
send(S, sender, "Description updated.")
elif args[1].lower() == "lock": # Set lock flag.
if roominfo["owner"] == sender: # Do we have permission to lock the room?
if args[2].lower() in ["1", "true", "yes", "on"]:
put(DB, "UPDATE rooms SET locked='1' WHERE id='{0}'".format(roomid))
send(S, sender, "Room set to locked.")
else:
put(DB, "UPDATE rooms SET locked='0' WHERE id='{0}'".format(roomid))
send(S, sender, "Room set to unlocked.")
else:
send(S, sender, "Only the owner can lock or unlock a room.")
elif args[1].lower() == "owner": # Change room ownership.
if roominfo["owner"] == sender: # Do we currently own the room?
check = get(DB, "SELECT username FROM players WHERE username='{0}'".format(args[2].lower()))
if check:
put(DB, "UPDATE rooms SET owner='{0}' WHERE id='{1}'".format(args[2].lower(), roomid))
send(S, sender, "Room ownership given to {0}.".format(args[2].lower()))
else:
send(S, sender, "User \"{0}\" does not exist.".format(args[2].lower()))
else:
send(S, sender, "Only the owner can change ownership of a room.")
else:
C_HELP(S, DB, sender, ["room set"])
else:
send(S, sender, "The room is set to locked and you are not the owner.")
elif len(args) == 1 and args[0].lower() == "unlink": # Unlink the room.
roomid = getroom(DB, sender)
roominfo = roomstat(DB, roomid)
if roominfo["owner"] == sender: # Do we have permission to unlink the room?
rooms = get(DB, "SELECT exits,id FROM rooms") # Get list of exits from every room.
for n, room in enumerate(rooms): # Find and delete linked exits from rooms.
for exit in room[0]:
if room[0][exit] == roomid:
del rooms[n][0][exit]
for room in rooms: # Delete exits from database.
put(DB, "UPDATE rooms SET exits='{0}' WHERE id='{1}'".format(obj2str(room[0]), room[1]))
put(DB, "UPDATE rooms SET name='{0}' WHERE id='{1}'".format(roominfo["name"]+" (UNLINKED)", roomid)) # Mark room unlinked.
else:
send(S, sender, "Only the owner can unlink a room.")
elif args[0].lower() == "set":
C_HELP(S, DB, sender, ["room set"])
elif args[0].lower() == "unlink":
C_HELP(S, DB, sender, ["room unlink"])
else:
C_HELP(S, DB, sender, ["room"])
| 1.820313 | 2 |
to_html.py | ImportTaste/AIDScrapper | 5 | 12786686 | import os
import json
from pathlib import Path
from jinja2 import Environment, FileSystemLoader
from aids.app.settings import BASE_DIR
class toHtml:
def __init__(self):
self.env = Environment(loader=FileSystemLoader(BASE_DIR / 'templates'))
self.out_path = Path().cwd()
self.scen_out_file = 'scenario.json'
self.story_out_file = 'story.json'
def new_dir(self, folder):
if folder:
try:
os.mkdir(self.out_path / folder)
except FileExistsError:
pass
with open(BASE_DIR / 'static/style.css', 'r') as file:
style = file.read()
with open(self.out_path / f'{folder}/style.css', 'w') as file:
file.write(style)
def story_to_html(self, infile: str=None):
infile = infile or self.out_path / self.story_out_file
self.new_dir('stories')
with open(infile) as file:
stories = json.load(file)
story_templ = self.env.get_template('story.html')
story_number = {}
for story in reversed(stories):
if story['title']:
story['title'] = story['title'].replace('/', '-')
try:
story_number[story["title"]]
except KeyError:
# new story
story_number = {story["title"]: ""}
if not os.path.exists(
self.out_path /
f'stories/{story["title"]}{story_number[story["title"]]}.html'
):
htmlfile = open(self.out_path / f'stories/{story["title"]}.html', 'w')
else:
# story from same scenario
if story_number[story["title"]]:
story_number[story["title"]] += 1
htmlfile = open(
self.out_path /
f'stories/{story["title"]}{story_number[story["title"]]}.html',
'w'
)
else:
story_number[story["title"]] = 2
htmlfile = open(
self.out_path /
f'stories/{story["title"]}{story_number[story["title"]]}.html',
'w'
)
htmlfile.write(
story_templ.render({
'story': story,
'story_number': story_number
})
)
htmlfile.close()
index = self.env.get_template('index.html')
with open(self.out_path / 'story_index.html', 'w') as outfile:
outfile.write(
index.render(
{'objects': stories, 'content_type': 'stories'
})
)
print('Stories successfully formatted')
def scenario_to_html(self, infile: str=None):
infile = infile or self.out_path / self.scen_out_file
self.new_dir('scenarios')
with open(infile) as file:
scenarios = json.load(file)
subscen_paths = {}
parent_scen = []
for scenario in reversed(scenarios):
scenario['title'] = scenario['title'].replace('/', '-')
if 'isOption' not in scenario or not scenario['isOption']:
# base scenario, initializing the path
scenario['path'] = 'scenarios/'
with open(
self.out_path /
f'{scenario["path"] + scenario["title"]}.html',
'w'
) as file:
scen_templ = self.env.get_template('scenario.html')
file.write(
scen_templ.render({
'scenario': scenario,
'content_type': 'scenario'
})
)
parent_scen.append(scenario)
else:
scenario['path'] = subscen_paths[scenario['title']]
with open(
self.out_path /
f'{scenario["path"]}/{scenario["title"]}.html',
'w'
) as file:
scen_templ = self.env.get_template('scenario.html')
file.write(
scen_templ.render({
'scenario': scenario,
'content_type': 'scenario'
})
)
if "options" in scenario and any(scenario['options']):
for subscen in scenario['options']:
if subscen and "title" in subscen:
subscen['title'] = subscen['title'].replace('/', '-')
subscen['path'] = f'{scenario["path"]}{scenario["title"]}'
subscen_paths[subscen['title']] = subscen['path'] + '/'
self.new_dir(subscen['path'])
index = self.env.get_template('index.html')
with open(self.out_path / 'scen_index.html', 'w') as outfile:
outfile.write(
index.render(
{'objects': parent_scen, 'content_type': 'scenarios'
})
)
print('Scenarios successfully formatted')
| 2.40625 | 2 |
tests/test_utils.py | kirillskor/dedoc | 0 | 12786687 | import os
def get_full_path(path, file=__file__):
dir_path = os.path.dirname(file)
return os.path.join(dir_path, path)
| 2.5625 | 3 |
starter.py | osanchez42/Device42_HPSM_Sync | 0 | 12786688 | # -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf8')
import imp
import csv
import StringIO
from hpsm import HpsmApi
from device42 import Device42Doql
conf = imp.load_source('conf', 'conf')
device42 = {
'host': conf.d42_host,
'username': conf.d42_username,
'password': <PASSWORD>
}
hpsm = {
'host': conf.hpsm_host,
'protocol': conf.hpsm_protocol,
'port': conf.hpsm_port,
'username': conf.hpsm_username,
'password': <PASSWORD>,
'api_version': conf.hpsm_api_version
}
options = {
'debug': conf.opt_debug,
'dry_run': conf.opt_dry_run
}
hpsm_rest = HpsmApi(hpsm, options)
device42_doql = Device42Doql(device42, options)
class Integration:
def __init__(self):
pass
@staticmethod
def get_d42_devices():
f = StringIO.StringIO(device42_doql.get_devices().encode('utf-8'))
devices = []
for item in csv.DictReader(f, delimiter=','):
devices.append(item)
return devices
@staticmethod
def get_d42_hardware(hw_id):
f = StringIO.StringIO(device42_doql.get_hardware(hw_id).encode('utf-8'))
for item in csv.DictReader(f, delimiter=','):
return item
@staticmethod
def get_d42_vendor(vn_id):
f = StringIO.StringIO(device42_doql.get_vendor(vn_id).encode('utf-8'))
for item in csv.DictReader(f, delimiter=','):
return item
@staticmethod
def get_d42_os(os_id):
f = StringIO.StringIO(device42_doql.get_os(os_id).encode('utf-8'))
for item in csv.DictReader(f, delimiter=','):
return item
@staticmethod
def get_d42_subnet(subnet_id):
f = StringIO.StringIO(device42_doql.get_subnet(subnet_id).encode('utf-8'))
for item in csv.DictReader(f, delimiter=','):
return item
@staticmethod
def get_d42_macs(device_id):
f = StringIO.StringIO(device42_doql.get_macs(device_id).encode('utf-8'))
macs = []
for item in csv.DictReader(f, delimiter=','):
macs.append(item)
return macs
@staticmethod
def get_d42_ips(device_id):
f = StringIO.StringIO(device42_doql.get_ips(device_id).encode('utf-8'))
ips = []
for item in csv.DictReader(f, delimiter=','):
ips.append(item)
return ips
def main():
integration = Integration()
devices = integration.get_d42_devices()
hpsm_computers = hpsm_rest.get_d42_items('computers', 'computer')
hpsm_networkdevices = hpsm_rest.get_d42_items('networkdevices', 'networkcomponents')
hpsm_d42_computers = []
hpsm_d42_networkdevices = []
if 'content' in hpsm_computers:
hpsm_computers = hpsm_computers['content']
hpsm_d42_computers = {str(x['Computer']['device42.id']): x['Computer'] for x
in hpsm_computers if 'device42.id' in x['Computer']}
else:
hpsm_computers = []
if 'content' in hpsm_networkdevices:
hpsm_networkdevices = hpsm_networkdevices['content']
hpsm_d42_networkdevices = {str(x['NetworkDevice']['device42.id']): x['NetworkDevice'] for x
in hpsm_networkdevices if 'device42.id' in x['NetworkDevice']}
else:
hpsm_networkdevices = []
for device in devices:
hardware = integration.get_d42_hardware(device['hardware_fk']) if device['hardware_fk'] else None
vendor = integration.get_d42_vendor(hardware['vendor_fk']) if hardware and hardware['vendor_fk'] else None
os = integration.get_d42_os(device['os_fk']) if device['os_fk'] else None
os_vendor = integration.get_d42_vendor(os['vendor_fk']) if os and os['vendor_fk'] else None
macs = integration.get_d42_macs(device['device_pk'])
ips = integration.get_d42_ips(device['device_pk'])
if device['network_device'] == 't':
root = 'NetworkDevice'
endpoint = 'networkdevices'
ctype = 'networkcomponents'
hpsm_mapping = hpsm_d42_networkdevices
hpsm_devices = hpsm_networkdevices
else:
root = 'Computer'
endpoint = 'computers'
ctype = 'computer'
hpsm_mapping = hpsm_d42_computers
hpsm_devices = hpsm_computers
# check locked
if len(hpsm_mapping) > 0 and device['device_pk'] in hpsm_mapping:
device['name'] = hpsm_mapping[device['device_pk']]['logical.name']
else:
# check by serial if not locked
for hpsm_device in hpsm_devices:
hpsm_device = hpsm_device[root]
if 'serial.no.' in hpsm_device and hpsm_device['serial.no.'] == device['serial_no']:
device['name'] = hpsm_device['logical.name']
break
device['name'] = device['name'].replace('/', '_')
hpsm_rest.insert_item({
root: {
'logical.name': device['name'],
'machine.name': device['name'],
'istatus': 'In Service' if device['in_service'] == 't' else 'Missing',
'type': ctype,
'subtype': device['type'].upper(),
'assignment': 'Hardware',
'environment': device['service_level'],
'asset.tag': device['asset_no'],
'manufacturer': vendor['name'] if vendor and 'name' in vendor else '',
'model': hardware['name'] if hardware and 'name' in hardware else '',
'serial.no.': device['serial_no'],
'description': device['notes'],
'os.name': os['name'] if os else '',
'os.version': device['os_version_no'],
'os.manufacturer': os_vendor['name'] if os_vendor and 'name' in os_vendor else '',
'physical.mem.total': float(device['hard_disk_size']) * 1048576 if device['hard_disk_size'] else '',
'mac.address': macs[0]['hwaddress'] if len(macs) > 0 else '',
'ip.address': ips[0]['ip_address'] if len(ips) > 0 else '',
'network.name': integration.get_d42_subnet(ips[0]['subnet_fk'])['name'] if len(ips) > 0 else '',
'device42.id': device['device_pk']
}
}, root, endpoint)
if len(macs) > 1:
hpsm_rest.update_item({
root: {
'logical.name': device['name'],
'addlMacAddress': macs[1]['hwaddress']
}
}, root, endpoint)
if len(ips) > 1:
hpsm_rest.update_item({
root: {
'logical.name': device['name'],
'addlIPAddr': [{
'addlIPAddress': x['ip_address'],
'addlSubnet': integration.get_d42_subnet(x['subnet_fk'])['name']
} for x in ips[1:]]
}
}, root, endpoint)
if __name__ == '__main__':
main()
print '\n Finished'
| 2.34375 | 2 |
advances/lambda.py | tienduy-nguyen/python-learning | 0 | 12786689 | # lambda Parameter: Operation(parameter)
lambda x: x+1
# Equivalence
def plus(x):
return x +1
lambda x, y: x*y
# Equivalence
def mul(x, y):
return x*y
# Example
nums = [[10, 20, 11], [3, 9, 6], [8, 14, 3]]
# Sort from the second value in sub array
sorted(nums, key=lambda x: x[1], reverse = True)
# Ex2: sort descrease of value in dictionaries
dic = [{'name': 'Hana', 'age': 20}, {'name': 'John', 'age': 30}, {'name': 'Karin', 'age': 22}]
sorted(dic, key=lambda x: x['age'], reverse= True)
# Ex3 : get max value in list
num3 = ['1', '100', '111', '2', 2, 2.57]
max(nums, key=lambda x: int(x))
# lambda with filter
# Ex filter the odd value in tuple
lst = (10,22,37,41,100,123,29)
oddList = tuple(filter(lambda x: (x%2 !=0), lst))
print(oddList)
# lambda with map
lst2 = (10,20,30,40,50,60)
square_list = list(map(lambda x: x**2, lst2))
print(square_list) | 3.953125 | 4 |
aiokraken/rest/tests/strats/st_assets.py | asmodehn/aiokraken | 0 | 12786690 |
from aiokraken.model.tests.strats.st_asset import AssetStrategy
from aiokraken.rest.assets import Assets
import hypothesis.strategies as st
@st.composite
def st_assets(draw):
apl = draw(st.lists(elements=AssetStrategy(), max_size=5, unique_by=lambda x: x.restname))
return Assets(assets_as_dict={ap.restname: ap for ap in apl})
if __name__ == '__main__':
for n in range(1, 10):
print(repr(st_assets().example()))
| 2.34375 | 2 |
pyvo/utils/http.py | theresadower/pyvo | 29 | 12786691 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
HTTP utils
"""
import requests
from ..version import version
DEFAULT_USER_AGENT = 'python-pyvo/{}'.format(version)
def use_session(session):
"""
Return the session passed in, or create a default
session to use for this network request.
"""
if session:
return session
else:
return create_session()
def create_session():
"""
Create a new empty requests session with a pyvo
user agent.
"""
session = requests.Session()
session.headers['User-Agent'] = DEFAULT_USER_AGENT
return session
| 2.1875 | 2 |
canary/tasks/taskflow/driver.py | rackerlabs/canary | 5 | 12786692 | # Copyright (c) 2015 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from taskflow.jobs import backends as job_backends
from taskflow.persistence import backends as persistence_backends
from canary.tasks import base
from canary.openstack.common import log
LOG = log.getLogger(__name__)
TASKFLOW_OPTIONS = [
cfg.StrOpt('jobboard_backend_type', default='zookeeper',
help='Default jobboard backend type'),
cfg.StrOpt('persistent_backend_type', default='zookeeper',
help='Default jobboard persistent backend type'),
cfg.ListOpt('jobboard_backend_host', default=['localhost'],
help='Default jobboard backend server host'),
cfg.IntOpt('jobboard_backend_port', default=2181,
help='Default jobboard backend server port (e.g: ampq)'),
cfg.ListOpt('persistent_backend_host', default=['localhost'],
help='Default persistent backend server host'),
cfg.IntOpt('persistent_backend_port', default=2181,
help='Default persistent backend server port (e.g: ampq)'),
cfg.StrOpt('canary_worker_path',
default='/taskflow/jobs/canary_jobs',
help='Default Zookeeper path for canary jobs'),
cfg.StrOpt('canary_worker_jobboard',
default='canary_jobs',
help='Default jobboard name associated with canary worker jobs'),
]
TASKFLOW_GROUP = 'tasks:taskflow'
class TaskFlowDistributedTaskDriver(base.Driver):
"""TaskFlow distributed task Driver."""
def __init__(self, conf):
super(TaskFlowDistributedTaskDriver, self).__init__(conf)
conf.register_opts(TASKFLOW_OPTIONS, group=TASKFLOW_GROUP)
self.distributed_task_conf = conf[TASKFLOW_GROUP]
job_backends_hosts = ','.join(['%s:%s' % (
host, self.distributed_task_conf.jobboard_backend_port)
for host in
self.distributed_task_conf.jobboard_backend_host])
self.jobboard_backend_conf_worker = {
# This topic could become more complicated
"board": self.distributed_task_conf.jobboard_backend_type,
"hosts": job_backends_hosts,
"path": self.distributed_task_conf.canary_worker_path,
}
persistence_backends_hosts = ','.join(['%s:%s' % (
host, self.distributed_task_conf.jobboard_backend_port)
for host in
self.distributed_task_conf.jobboard_backend_host])
self.persistence_backend_conf = {
# This topic could become more complicated
"connection": self.distributed_task_conf.persistent_backend_type,
"hosts": persistence_backends_hosts,
}
def is_alive(self):
"""Health check for TaskFlow worker."""
return True
def persistence(self):
return persistence_backends.backend(
self.persistence_backend_conf.copy())
def job_board(self, conf, persistence, **kwargs):
return job_backends.backend(
self.distributed_task_conf.canary_worker_jobboard,
conf.copy(), persistence=persistence)
@property
def vendor_name(self):
"""storage name.
:returns 'TaskFlow'
"""
return 'TaskFlow'
| 1.59375 | 2 |
app.py | roh-kan/summarize-wordpress-blog | 0 | 12786693 | from flask import Flask, render_template, flash, request
from flask_bootstrap import Bootstrap
from flask_wtf import FlaskForm
from wtforms import SubmitField
from wtforms.fields.html5 import URLField, IntegerField
from wtforms.validators import DataRequired, Optional
from wtforms.widgets import html5 as h5widgets
from utils import fetch, get_query_url
app = Flask(__name__)
# Flask-WTF requires an encryption key - the string can be anything
app.config['SECRET_KEY'] = '<KEY>'
# Flask-Bootstrap requires this line
Bootstrap(app)
DEFAULT_MAX_PAGE = 10
@app.route('/', methods=['GET', 'POST'])
def home():
header = "Summarize WordPress Blog"
form = NameForm()
site_url = request.args.get('url')
base_url = request.base_url
if request.method == 'GET' and site_url != None:
number_of_pages = request.args.get('pages')
if number_of_pages != None:
try:
number_of_pages = int(number_of_pages)
except:
number_of_pages = 1
form.number_of_pages.data = number_of_pages
form.name.data = site_url
lines = fetch(site_url, number_of_pages)
query_url = get_query_url(base_url, site_url, number_of_pages)
return render_template('search.html', pairs=lines, the_title=header, form=form, query_url=query_url)
elif request.method == 'POST' and form.validate_on_submit():
site_url = form.name.data
number_of_pages = form.number_of_pages.data
if number_of_pages is None:
number_of_pages = DEFAULT_MAX_PAGE
lines = fetch(site_url, number_of_pages)
query_url = get_query_url(base_url, site_url, number_of_pages)
return render_template('search.html', pairs=lines, the_title=header, form=form, query_url=query_url)
return render_template('search.html', the_title=header, form=form)
class NameForm(FlaskForm):
name = URLField('Enter a url for wordpress blog',
validators=[DataRequired()], description="e.g. https://www.xyz.com/articles or https://www.xyz.com/blogs")
number_of_pages = IntegerField('Enter number of pages you want to see',
widget=h5widgets.NumberInput(
min=1, max=100),
validators=[Optional()])
submit = SubmitField('Submit')
if __name__ == '__main__':
app.run()
| 2.390625 | 2 |
tests/test_inutils.py | buteco/inutils | 6 | 12786694 | import pytest
from inutils import chunkify
@pytest.mark.parametrize(
"iterable, expected",
(
([1], [[1]]),
([1, 2], [[1, 2]]),
([1, 2, 3], [[1, 2], [3]]),
([1, 2, 3, 4, 5], [[1, 2], [3, 4], [5]]),
(range(1, 7), [[1, 2], [3, 4], [5, 6]]),
),
)
def test_chunkify_size_2(iterable, expected):
assert list(chunkify(iterable, chunk_size=2)) == expected
@pytest.mark.parametrize(
"iterable, expected",
(
([1, 2], [[1, 2]]),
([1, 2, 3], [[1, 2, 3]]),
([1, 2, 3, 4, 5], [[1, 2, 3], [4, 5]]),
(range(1, 7), [[1, 2, 3], [4, 5, 6]]),
([1, 2, 3, 4, 5, 6, 7], [[1, 2, 3], [4, 5, 6], [7]]),
),
)
def test_chunkify_size_3(iterable, expected):
assert list(chunkify(iterable, chunk_size=3)) == expected
| 2.453125 | 2 |
setup.py | lzrvch/pyspikelib | 3 | 12786695 | """pyspikelib: A set of tools for neuronal spiking data mining"""
import os
import re
import codecs
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
def read(*parts):
with codecs.open(os.path.join(here, *parts), 'r') as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError('Unable to find version string.')
DISTNAME = 'pyspikelib'
DESCRIPTION = 'pyspikelib: A set of tools for neuronal spiking data mining'
MAINTAINER = '<NAME>'
MAINTAINER_EMAIL = '<EMAIL>'
URL = 'https://github.com/vanyalzr/pyspikelib'
DOWNLOAD_URL = 'https://github.com/vanyalzr/pyspikelib'
VERSION = find_version(os.path.join(here, 'pyspikelib/version.py'))
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
INSTALL_REQUIRES = [
'addict',
'pathos',
'quantities',
'neo',
'matplotlib',
'numpy',
'seaborn',
'tqdm',
'pandas',
'elephant',
'tsfresh',
'scikit_learn',
'psutil'
]
EXTRAS_REQUIRE = {'tests': ['pytest'], 'data': ['fastparquet']}
setuptools.setup(
name=DISTNAME,
version=VERSION,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
url=URL,
download_url=DOWNLOAD_URL,
packages=setuptools.find_packages(exclude=['data', 'examples', 'experiments']),
classifiers=[
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
],
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
)
| 2.1875 | 2 |
myfuc/__init__.py | kamomehz/waveletCodingCNN | 0 | 12786696 | <filename>myfuc/__init__.py
from .makeData import *
from .makeImg import *
from .makeNet import *
from .makeWT import *
from .makePlot import *
from .trainScript import * | 1.304688 | 1 |
cellwars.py | mfontanini/cellwars-python | 0 | 12786697 | <filename>cellwars.py
'''
cellwars.py
===========
The cellwars python bot package.
'''
from enum import Enum
import sys
# Internal classes start here
class InputProcessingException(Exception):
pass
class CommandType(Enum):
INITIALIZE = 0
SPAWN = 1
DIE = 2
SET_CELL_PROPERTIES = 3
CONFLICTING_ACTIONS = 4
RUN_ROUND = 5
END_GAME = 6
class ActionType(Enum):
ATTACK = "ATTACK"
MOVE = "MOVE"
EXPLODE = "EXPLODE"
ROUND_END = "ROUND_END"
INITIALIZED = "INITIALIZED"
class Direction(Enum):
NORTH = (0, -1)
SOUTH = (0, 1)
EAST = (1, 0)
WEST = (-1, 0)
class Action:
def __init__(self, action_type, parameters):
self.action_type = action_type
self.parameters = parameters
def serialize(self):
return '{} {}'.format(self.action_type.value, ' '.join(map(str, self.parameters)))
@classmethod
def attack(cls, cell_id, position):
return Action(ActionType.ATTACK, [cell_id, position.x, position.y])
@classmethod
def move(cls, cell_id, position):
return Action(ActionType.MOVE, [cell_id, position.x, position.y])
@classmethod
def explode(cls, cell_id):
return Action(ActionType.EXPLODE, [cell_id])
@classmethod
def initialized(cls):
return Action(ActionType.INITIALIZED, [])
class Command:
def __init__(self, command_type, parameters):
self.command_type = command_type
self.parameters = parameters
def _get_callback(self, game_coordinator):
if self.command_type == CommandType.INITIALIZE:
return game_coordinator.initialize
elif self.command_type == CommandType.SPAWN:
return game_coordinator.spawn_cell
elif self.command_type == CommandType.DIE:
return game_coordinator.kill_cell
elif self.command_type == CommandType.SET_CELL_PROPERTIES:
return game_coordinator.set_cell_properties
elif self.command_type == CommandType.CONFLICTING_ACTIONS:
# Not handled yet
return lambda x, y: None
else:
raise Exception("Unexpected command")
def apply(self, game_coordinator):
callback = self._get_callback(game_coordinator)
callback(*self.parameters)
class Communicator:
REQUIRED_ARGS = {
CommandType.INITIALIZE: 5,
CommandType.SPAWN: 6,
CommandType.DIE: 1,
CommandType.SET_CELL_PROPERTIES: 5,
CommandType.CONFLICTING_ACTIONS: 2,
CommandType.RUN_ROUND: 0,
CommandType.END_GAME: 0,
}
def __init__(self, input_stream, output_stream):
self._input_stream = input_stream
self._output_stream = output_stream
def emit_action(self, action, flush=False):
self._output_stream.write('{}\n'.format(action.serialize()))
if flush:
self._output_stream.flush()
def end_round(self):
self.emit_action(Action(ActionType.ROUND_END, []))
self._output_stream.flush()
def read_command(self):
line = self._input_stream.readline().strip()
if not line:
return None
tokens = line.split(' ')
command_type, raw_parameters = (tokens[0], tokens[1:])
if command_type not in CommandType.__members__:
print(repr(command_type))
raise InputProcessingException(
"Unknown command {}".format(command_type)
)
command_type = CommandType[command_type]
expected_args = self.REQUIRED_ARGS[command_type]
if len(raw_parameters) != expected_args:
raise InputProcessingException(
"Found {} parameters for command {}, expected {}".format(
len(raw_parameters),
command_type,
expected_args
)
)
try:
parameters = list(map(int, raw_parameters))
except Exception as ex:
raise InputProcessingException("Non integer parameter found")
return Command(command_type, parameters)
class GameCoordinator:
LOOP_DONE_COMMANDS = set([
CommandType.RUN_ROUND,
CommandType.END_GAME
])
def __init__(self, communicator, bot):
self.communicator = communicator
self._bot = bot
self._state = None
def state(self):
return self._state
def initialize(self, width, height, player_team_id, my_column, enemy_column):
self._state = WorldState(width, height, player_team_id, my_column, enemy_column)
def spawn_cell(self, cell_id, x, y, health, team_id, age):
is_enemy = team_id != self._state.my_team_id
self._state.cells[cell_id] = Cell(
self,
cell_id,
Position(x, y),
health,
team_id,
age,
is_enemy
)
def set_cell_properties(self, cell_id, x, y, health, age):
cell = self._state.cells[cell_id]
cell.position = Position(x, y)
cell.health = health
cell.age = age
def kill_cell(self, cell_id):
del self._state.cells[cell_id]
def loop(self):
self.communicator.emit_action(Action.initialized(), flush=True)
while True:
command = self.communicator.read_command()
while command and command.command_type not in self.LOOP_DONE_COMMANDS:
command.apply(self)
command = self.communicator.read_command()
if not command or command.command_type == CommandType.END_GAME:
break
self._bot.run_round(self._state)
self.communicator.end_round()
# End of internal classes
class Position:
'''
Represents a position, identified by an x and y components.
'''
def __init__(self, x, y):
self.x = x
self.y = y
def translated_by_offset(self, x_offset, y_offset):
'''
Constructs a new position which is translated by the given x and y offsets.
Args:
x_offset (int): the x offset to apply.
y_offset (int): the y offset to apply.
Returns:
A new Position which is the result of translating the x and
y coordinates of this instance by the given offsets.
'''
return Position(self.x + x_offset, self.y + y_offset)
def translated_by_direction(self, direction):
'''
Constructs a new position which is translated by the given direction.
Args:
direction (Direction): the direction to translate this position to.
Returns:
A new Position which is the result of translating the x and
y coordinates of this instance by the given direction.
'''
return self.position.translated_by_offset(*direction.value)
def distance(self, other_position):
'''
Returns the manhattan distance to the provided position.
Args:
other_position (Position): the position to get the distance to.
Returns:
int. The manhattan distance to the provided position.
'''
return abs(other_position.x - self.x) + abs(other_position.y - self.y)
def is_adjacent(self, other_position):
'''
Indicates whether this position is adjacent to the provided one.
A position is considered to be adjacent to another one iff the
manhattan distance to it is 1.
Args:
other_position (Position): the position to be checked.
Returns:
bool: true iff the position is adjacent to this one.
'''
return self.distance(other_position) == 1
def is_surrounding(self, other_position):
'''
Indicates whether this position is surrounding to the provided one.
A position is considered to be surrounding another one if they're at
most one step away in the X axis and at most one in the Y one.
Args:
other_position (Position): the position to be checked.
Returns:
bool: true iff the position is adjacent to this one.
'''
return self != other_position and \
abs(other_position.x - self.x) <= 1 and \
abs(other_position.y - self.y) <= 1
def __eq__(self, other_position):
'''
Checks positions for equality
'''
return (self.x, self.y) == (other_position.x, other_position.y)
def __ne__(self, other_position):
'''
Checks positions for equality
'''
return not (self == other_position)
def __hash__(self):
return hash((self.x, self.y))
class Cell:
'''
Represents a cell that either you or your enemy control.
'''
def __init__(self, coordinator, cell_id, position, health, team_id, age, is_enemy):
self._coordinator = coordinator
self.cell_id = cell_id
self.position = position
self.health = health
self.team_id = team_id
self.age = age
self.is_enemy = is_enemy
def _is_in_bounds(self, position):
return position.x >= 0 and position.x < self._coordinator.state().width and \
position.y >= 0 and position.y < self._coordinator.state().height
def _is_valid_position(self, position):
return self._is_in_bounds(position) and self.position.is_adjacent(position)
def can_move_to_position(self, target_position):
'''
Indicates whether this cell can move to this position.
This simply checks if the target position is valid distance-wise and
whether it is inside the map's bounds: it will not validate if there's
a cell in the target position.
Args:
target_position (Position): the position to be checked.
Returns:
bool: whether this cell can move to the target position.
'''
return self._is_in_bounds(target_position) and \
self.position.is_adjacent(target_position)
def can_move_in_direction(self, direction):
'''
Indicates whether this cell can move in the specified direction.
This simply checks if moving in the specified direction would be inside
the map's bounds: it will not validate if there's a cell in the target
position.
Args:
direction (Direction): the direction to be checked.
Returns:
bool: whether this cell can move in the specified direction.
'''
position = self.position.translated_by_offset(*direction.value)
return self.can_move_to_position(position)
def can_attack_position(self, target_position):
'''
Indicates whether this cell can attack this position.
This simply checks if the target position is valid distance-wise and
whether it is inside the map's bounds: it will not validate if there's
a cell in the target position.
Args:
target_position (Position): the position to be checked.
Returns:
bool: whether this cell can attack the target position.
'''
return self._is_in_bounds(target_position) and \
self.position.is_surrounding(target_position)
def can_attack_cell(self, target_cell):
'''
Indicates whether this cell can attack the target cell's position.
This checks if the target cell's position is within reach and whether
it is inside the map's bounds.
Args:
target_cell (Cell): the cell to be checked.
Returns:
bool: whether this cell can attack the target position.
'''
return self.can_attack_position(target_cell.position)
def attack_position(self, target_position):
'''
Causes this cell to attack the target position.
The position should be valid, as checked by a call
to Cell.can_attack_position.
Args:
target_position (Position): the position to be attacked.
'''
self._coordinator.communicator.emit_action(
Action.attack(self.cell_id, target_position)
)
def attack_cell(self, target_cell):
'''
Causes this cell to attack the target cell.
The cell should be within reach, as checked by a call
to Cell.can_attack_cell.
Args:
target_cell (Cell): the cell to be attacked.
'''
self._coordinator.communicator.emit_action(
Action.attack(self.cell_id, target_cell.position)
)
def explode(self):
'''
Causes this cell to explode.
Explosing causes this cell to die, inflicting damage in every position
in the map surrounding this cell's position.
'''
self._coordinator.communicator.emit_action(Action.explode(self.cell_id))
def move_to_position(self, target_position):
'''
Causes this cell to move to the target position.
The position should be valid, as checked by a call
to Cell.can_move_to_position.
Args:
target_position (Position): the position to move to.
'''
self._coordinator.communicator.emit_action(
Action.move(self.cell_id, target_position)
)
def move_in_direction(self, direction):
'''
Causes this cell to move in the specified direction.
The position should be valid, as checked by a call
to Cell.can_move_in_direction.
Args:
direction (Direction): the direction to move to.
'''
position = self.position.translated_by_offset(*direction.value)
self.move_to_position(position)
class WorldState:
'''
Represents the state of the world.
The world contains:
* Cells, both yours and the enemy ones.
* Map properties, like width and height.
'''
def __init__(self, width, height, my_team_id, my_column, enemy_column):
self.width = width
self.height = height
self.my_team_id = my_team_id
self.my_column = my_column
self.enemy_column = enemy_column
self.cells = {}
def my_cells(self):
'''
Retrieves the cells that belong to your team.
Returns:
(list[Cell]): The list of cells that belong to you.
'''
return list(filter(
lambda cell: cell.team_id == self.my_team_id,
self.cells.values()
))
def enemy_cells(self):
'''
Retrieves the cells that belong to the enemy.
Returns:
(list[Cell]): The list of cells that belong to the enemy.
'''
return list(filter(
lambda cell: cell.team_id != self.my_team_id,
self.cells.values()
))
def cell_by_id(self, cell_id):
'''
Finds a cell by id.
Args:
cell_id (int): the cell id to be looked up
Returns:
Cell: the cell, if found. Otherwise None.
'''
return self.cells.get(cell_id)
def my_starting_column(self):
'''
Gets the column in the grid in which your cells spawn. This is typically either 0
or the grid's width - 1
Returns:
int: The column in which your cells spawn
'''
return self.my_column
def enemy_starting_column(self):
'''
Gets the column in the grid in which the enemy cells spawn. This is typically either 0
or the grid's width - 1
Returns:
int: The column in which the enemy cells spawn
'''
return self.enemy_column
class BotBase:
'''
The base class for any bot.
Create a class named Bot in your code that inherits from this one
and override the BotBase.run_round method with whatever your bot's logic is.
'''
def __init__(self, *args):
super().__init__(*args)
def run_round(self, world_state):
'''
Runs a round of the game.
This method must be implemented by derived classes and should implement
the bot's logic. Use the world_state to find your cells and emit an
action for each of them.
Args:
world_state (WorldState): the current world state.
'''
raise Exception("Bot.run_round not implemented")
if __name__ == "__main__":
communicator = Communicator(sys.stdin, sys.stdout)
bot = Bot()
coordinator = GameCoordinator(communicator, bot)
coordinator.loop()
| 2.84375 | 3 |
app/question/translation/question_translation_api_models.py | hmajid2301/banter-bus-management-api | 0 | 12786698 | <reponame>hmajid2301/banter-bus-management-api
from typing import Optional
from pydantic import BaseModel
from app.core.models import QuestionGroup
class QuestionTranslationIn(BaseModel):
content: str
class QuestionTranslationOut(BaseModel):
question_id: str
game_name: str
language_code: str
round_: Optional[str]
enabled: bool = True
content: str
group: Optional[QuestionGroup] = None
class Config:
allow_population_by_field_name = True
fields = {"round_": "round"}
| 2.296875 | 2 |
chatbot_tutorial/routing.py | joseseb91/django-chat-bot | 0 | 12786699 | <reponame>joseseb91/django-chat-bot
from .wsgi import * # add this line to top of your code
from channels.auth import AuthMiddlewareStack
from channels.routing import ProtocolTypeRouter, URLRouter
from django.urls import path
from chatbot_tutorial import consumer
application = ProtocolTypeRouter({
'websocket': AuthMiddlewareStack(
URLRouter([
path('chat/stream/<str:room_name>/', consumer.ChatRoomConsumer.as_asgi()),
])
),
})
| 1.835938 | 2 |
platform/hwconf_data/zgm13/modules/BULBPWM_COLOR/BULBPWM_COLOR_behavior.py | lenloe1/v2.7 | 0 | 12786700 | from . import ExporterModel
from . import BULBPWM_COLOR_model
from . import RuntimeModel
class BULBPWM_COLOR(ExporterModel.Module):
def __init__(self, name=None):
if not name:
name = self.__class__.__name__
super(BULBPWM_COLOR, self).__init__(name, visible=True)
self.model = BULBPWM_COLOR_model | 2.3125 | 2 |
TFRegression9.py | neuromorphs/telluride-decoding-toolbox | 3 | 12786701 | <reponame>neuromorphs/telluride-decoding-toolbox<filename>TFRegression9.py
# -*- coding: utf8 -*-
"""EEG Regression - Core TensorFlow implementation code.
July 2016
March 2017 - update Enea, integrated the queue.
"""
import gc
import math
import re
import resource
import sys
import time
import matplotlib.pyplot as plot
import numpy as np
import numpy.matlib
import scipy.io as sio
import tensorflow as tf
from tensorflow.contrib.rnn import BasicRNNCell
from tensorflow.contrib.rnn import GRUCell
from tensorflow.contrib.rnn import LSTMCell
from tensorflow.contrib.rnn import MultiRNNCell
from tensorflow.python.ops.rnn import dynamic_rnn
def LagData(data, lags):
"""Add temporal context to an array of observations. The observation data has
a size of N observations by D feature dimensions. This routine returns the new
data with context, and also array of good rows from the original data. This list
is important because depending on the desired temporal lags, some of the
original rows are dropped because there is not enough data.
Using negative lags grab data from earlier (higher) in the data array. While
positive lags are later (lower) in the data array.
"""
num_samples = data.shape[0] # Number of samples in input data
orig_features_count = data.shape[1]
new_features_count = orig_features_count * len(lags)
# We reshape the data into a array of size N*D x 1. And we enhance the lags
# to include all the feature dimensions which are stretched out in "time".
unwrapped_data = data.reshape((-1, 1))
expanded_lags = (lags * orig_features_count).reshape(1, -1).astype(int)
# Now expand the temporal lags array to include all the new feature dimensions
offsets = numpy.matlib.repmat(expanded_lags, orig_features_count, 1) + \
numpy.matlib.repmat(np.arange(orig_features_count).reshape(orig_features_count,
1), 1,
expanded_lags.shape[0])
offsets = offsets.T.reshape(1, -1)
indices = numpy.matlib.repmat(offsets, num_samples, 1)
hops = np.arange(0, num_samples).reshape(-1, 1) * orig_features_count
hops = numpy.matlib.repmat(hops, 1, hops.shape[1])
if 0:
print "Offsets for unwrapped features:", offsets
print "Offset indices:", indices
print "Starting hops:", hops
print "Indices to extract from original:", indices + hops
new_indices = offsets + hops
good_rows = numpy.where(numpy.all((new_indices >= 0) &
(new_indices < unwrapped_data.shape[0]),
axis=1))[0]
new_indices = new_indices[good_rows, :]
new_data = unwrapped_data[new_indices].reshape((-1, new_features_count))
return new_data, good_rows
def TestLagData():
"""Use some simple data to make sure that the LagData routine is working."""
input_data = np.arange(20).reshape((10, 2))
print "Input array:", input_data
(new_data, good_rows) = LagData(input_data, np.arange(-1, 2))
print "Output array:", new_data
print "Good rows:", good_rows
# Use TF to compute the Pearson Correlation of a pair of 1-dimensional vectors.
# From: https://en.wikipedia.org/wiki/Pearson_product-moment_correlation_coefficient
def PearsonCorrelationTF(x, y, prefix='pearson'):
"""Create a TF network that calculates the Pearson Correlation on two input
vectors. Returns a scalar tensor with the correlation [-1:1]."""
with tf.name_scope(prefix):
n = tf.to_float(tf.shape(x)[0])
x_sum = tf.reduce_sum(x)
y_sum = tf.reduce_sum(y)
xy_sum = tf.reduce_sum(tf.multiply(x, y))
x2_sum = tf.reduce_sum(tf.multiply(x, x))
y2_sum = tf.reduce_sum(tf.multiply(y, y))
r_num = tf.subtract(tf.multiply(n, xy_sum), tf.multiply(x_sum, y_sum))
r_den_x = tf.sqrt(tf.subtract(tf.multiply(n, x2_sum), tf.multiply(x_sum, x_sum)))
r_den_y = tf.sqrt(tf.subtract(tf.multiply(n, y2_sum), tf.multiply(y_sum, y_sum)))
r = tf.div(r_num, tf.multiply(r_den_x, r_den_y), name='r')
return r
def ComputePearsonCorrelation(x, y):
"""Compute the Pearson's correlation between two numpy vectors (1D only)"""
n = x.shape[0]
x_sum = np.sum(x)
y_sum = np.sum(y)
xy_sum = np.sum(x * y)
x2_sum = np.sum(x * x)
y2_sum = np.sum(y * y)
r_num = n * xy_sum - x_sum * y_sum
r_den_x = math.sqrt(n * x2_sum - x_sum * x_sum)
r_den_y = math.sqrt(n * y2_sum - y_sum * y_sum)
return r_num / (r_den_x * r_den_y)
# Code to check the Pearson Correlation calculation. Create random data,
# calculate its correlation, and output the data in a form that is easy to
# paste into Matlab. Also compute the correlation with numpy so we can compare.
# Values should be identical.
def TestPearsonCorrelation(n=15):
x = tf.to_float(tf.random_uniform([n], -10, 10, tf.int32))
y = tf.to_float(tf.random_uniform([n], -10, 10, tf.int32))
init = tf.initialize_all_variables()
r = PearsonCorrelationTF(x, y)
borg_session = '' # 'localhost:' + str(FLAGS.brain_port)
with tf.Session(borg_session) as sess:
sess.run(init)
x_data, y_data, r_data = sess.run([x, y, r], feed_dict={})
print 'x=', x_data, ';'
print 'y=', y_data, ';'
print 'r=', r_data, ';'
print 'Expected r is', ComputePearsonCorrelation(x_data, y_data)
# noinspection PyAttributeOutsideInit
class RegressionNetwork:
"""Basic class implementing TF Regression."""
def __init__(self):
self.Clear()
def Clear(self):
self.g = None
self.num_hidden = 0
self.training_steps = 0
self.x1 = None
self.W1 = None
self.b1 = None
self.y1 = None
self.W2 = None
self.b2 = None
self.y2 = None
self.ytrue = None
self.loss = None
self.optimizer = None
self.train = None
self.init = None
self.save = None
self.merge_summaries = None
self.session = None
self.queue = None
self.enqueue_op = None
self.x_input = None
self.y_input = None
def CreatePrefetchGraph(self):
"""Create the pieces of the graph we need to fetch the data. The two
primary outputs of this routine are stored in the class variables:
self.x1 (the input data) and self.ytrue (the predictions)"""
# From the first answer at:
# http://stackoverflow.com/questions/34594198/how-to-prefetch-data-using-a-custom-python-function-in-tensorflow
# Input features are length input_d vectors of floats.
# Output data are length output_d vectors of floats.
self.x_input, self.y_input = tf.py_func(self.FindRandomData,
[], [tf.float32, tf.float32])
self.queue = tf.FIFOQueue(10, [tf.float32, tf.float32],
shapes=[[self.batch_size, self.n_steps, self.input_d], [self.batch_size, 1]])
self.enqueue_op = self.queue.enqueue([self.x_input, self.y_input])
# Insert data here if feeding the network with a feed_dict.
self.x1, self.ytrue = self.queue.dequeue()
def CreateEvalGraph(self, output_d, correlation_loss, learning_rate=0.01):
"""Given the true and predicted y values, add all the evaluation and
summary pieces to the end of the graph."""
if correlation_loss:
if output_d > 1:
raise ValueError("Can't do correlation on multidimensional output")
# Compute the correlation
r = PearsonCorrelationTF(self.ytrue, self.ypredict)
self.loss = tf.negative(r, name='loss_pearson')
else:
# Minimize the mean squared errors.
self.loss = tf.reduce_mean(tf.square(self.ypredict - self.ytrue),
name='loss_euclidean')
tf.summary.scalar('loss', self.loss)
#
# https://www.quora.com/Which-optimizer-in-TensorFlow-is-best-suited-for-learning-regression
# optimizer = tf.train.AdadeltaOptimizer(learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate)
self.optimizer = tf.train.AdamOptimizer(learning_rate)
self.train = self.optimizer.minimize(self.loss)
# Before starting, initialize the variables. We will 'run' this first.
self.init = tf.initialize_all_variables()
self.saver = tf.train.Saver()
self.merged_summaries = tf.summary.merge_all()
# Create a TF network to find values with a single level network that predicts
# y_data from x_data. Only set the correlation_loss argument to true if
# predicting one-dimensional data.
def CreateEntireNetwork(self, input_d, output_d, n_steps, num_hidden=20,
learning_rate=0.01, correlation_loss=False,
batch_size=128):
self.input_d = input_d
self.num_hidden = num_hidden
self.batch_size = batch_size
self.n_steps = n_steps
self.g = tf.Graph()
print "Creating a RegressionNetwork with %d hidden units." % num_hidden
with self.g.as_default():
self.CreatePrefetchGraph()
self.ypredict = self.CreateComputation(self.x1, input_d, output_d)
self.CreateEvalGraph(output_d, correlation_loss)
def CreateComputation(self, x1, input_d, output_d, num_hidden=20):
# W1 needs to be input_d x num_hidden
self.W1 = tf.Variable(tf.random_uniform([input_d, num_hidden], -1.0, 1.0),
name='W1')
self.b1 = tf.Variable(tf.zeros([num_hidden]), name='bias1')
# y1 will be batch_size x num_hidden
self.y1 = tf.nn.relu(tf.nn.bias_add(tf.matmul(x1, self.W1), self.b1),
name='y1')
self.W2 = tf.Variable(tf.random_uniform([num_hidden, output_d], -1.0, 1.0),
name='W2')
self.b2 = tf.Variable(tf.zeros([output_d]), name='b2')
# Output y2 Will be batch_size x output_d
self.y2 = tf.nn.bias_add(tf.matmul(self.y1, self.W2), self.b2, name='y2')
return self.y2
def CreateSession(self, session_target=''):
if not self.session:
self.session = tf.Session(session_target, graph=self.g)
def FindRandomData(self):
"""Find some data for training. Make sure that these three class varibles
are set up before this runs: training_batch_size, training_x_data, and
training_y_data. This code is called at run time, directly by tensorflow
to get some new data for training. This will be used in a py_func."""
ri = numpy.floor(numpy.random.rand(
self.training_batch_size) *
self.training_x_data.shape[0]).astype(int)
training_x = self.training_x_data[ri, :]
training_y = self.training_y_data[ri, :]
return training_x.astype(np.float32), training_y.astype(np.float32)
def TrainFromQueue(self, x_data, y_data, batch_size=40, training_steps=6000,
reporting_fraction=0.1, session_target='',
tensorboard_dir='/tmp/tf'):
"""Train a DNN Regressor, using the x_data to predict the y_data."""
self.training_x_data = x_data
self.training_y_data = y_data
self.training_batch_size = batch_size
qr = tf.train.QueueRunner(self.queue, [self.enqueue_op] * 4)
self.CreateSession(session_target)
coord = tf.train.Coordinator()
enqueue_threads = qr.create_threads(self.session, coord=coord, start=True)
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train', self.g)
self.session.run(self.init)
total_time = 0.0
loss_value = 0.0
for step in xrange(training_steps):
if coord.should_stop():
break
tic = time.time()
_, loss_value, summary_values = self.session.run([self.train, self.loss,
self.merged_summaries])
total_time += time.time() - tic
if step % int(training_steps * reporting_fraction) == 0:
print step, loss_value
train_writer.add_summary(summary_values, step)
self.training_steps = training_steps
print "TrainFromQueue: %d steps averaged %gms per step." % \
(training_steps, total_time / training_steps * 1000)
coord.request_stop()
coord.join(enqueue_threads)
return 0, loss_value
def TrainWithFeed(self, x_data, y_data, batch_size=40, training_steps=6000,
reporting_fraction=0.1, session_target='',
tensorboard_dir='/tmp/tf'):
"""Train a DNN Regressor, using the x_data to predict the y_data.
Report periodically, every
training_steps*reporting_fraction
epochs. The loss function is Euclidean (L2) unless the
correlation_loss parameter is true. Output the final model
to the model_save_file. """
self.CreateSession(session_target)
train_writer = tf.summary.FileWriter(tensorboard_dir + '/train', self.g)
self.session.run(self.init)
total_time = 0.0
loss_value = 0.0
y2_value = 0.0
for step in xrange(training_steps):
# Choose some data at random to feed the graph.
ri = numpy.floor(numpy.random.rand(batch_size) * x_data.shape[0]).astype(int)
training_x = x_data[ri, :]
training_y = y_data[ri, :]
tic = time.time()
_, loss_value, y2_value, summary_values = \
self.session.run([self.train, self.loss, self.y2, self.merged_summaries],
feed_dict={self.x1: training_x, self.ytrue: training_y})
total_time += time.time() - tic
if step % int(training_steps * reporting_fraction) == 0:
print step, loss_value # , training_x.shape, training_y.shape
print step, loss_value # , training_x.shape, training_y.shape
train_writer.add_summary(summary_values, step)
self.training_steps = training_steps
print "Average time per training session is %gms." % \
(1000 * total_time / training_steps)
return y2_value, loss_value
def SaveState(self, model_save_file, session_target=''):
self.CreateSession(session_target)
self.saver.save(self.session, model_save_file)
def RestoreState(self, model_save_file=None, session_target=''):
self.CreateSession(session_target)
if model_save_file:
print "RegressionNetwork.Eval: Restoring the model from:", model_save_file
self.saver.restore(self.session, model_save_file)
def Eval(self, x_data, y_truth=None, session_target='', tensorboard_dir='/tmp/tf'):
self.CreateSession(session_target)
if y_truth is None:
[y2_value] = self.session.run([self.y2], feed_dict={self.x1: x_data})
else:
print "Evaluating the eval loss to", tensorboard_dir
eval_writer = tf.summary.FileWriter(tensorboard_dir + '/eval', self.g)
[y2_value, summary_values, loss] = \
self.session.run([self.y2, self.merged_summaries, self.loss],
feed_dict={self.x1: x_data, self.ytrue: y_truth})
print "loss is:", loss, "y2_value is:", y2_value, "summary_values are:", summary_values
eval_writer.add_summary(summary_values, self.training_steps)
return y2_value
def __enter__(self):
return self
def Close(self):
print "RegressorNetwork::Close called."
if self.session:
print " Closing the session too."
self.session.close()
self.session = None
tf.reset_default_graph()
self.g = None
self.training_x_data = None
self.training_y_data = None
def __del__(self):
"""Called when the GC finds no references to this object."""
print "RegressorNetwork::__del__ called."
self.Close()
def __exit__(self, exc_type, exc_value, traceback):
"""Called when the variable goes out of scope (like in a with)"""
print "RegressorNetwork::__exit__ called."
self.Close()
# noinspection PyAttributeOutsideInit
class DNNRegressionNetwork(RegressionNetwork):
def CreateEntireNetwork(self, input_d, output_d, num_hidden=6, learning_rate=0.0,
rnn_unit='gru', out_layers='[]',
activation='sigmoid', bi_rnn=False,
opt='adam', init='glorot', n_layers=1,
n_steps=32, correlation_loss=False, batch_size=128):
self.num_hidden = num_hidden
self.input_d = input_d
self.batch_size = batch_size
self.n_steps = n_steps
self.g = tf.Graph()
print "Creating a RegressionNetwork with %d hidden units." % num_hidden
with self.g.as_default():
self.CreatePrefetchGraph()
# Inputs
# x = tf.placeholder(tf.float32, [None, dataset.n_steps, dataset.n_features_in])
# Keeping probability for dropout
keep_prob = tf.placeholder(tf.float32)
# Weights for the output layers
levels = [int(a) for a in re.findall(r"[\w']+", out_layers)]
levels.append(output_d)
weights, biases, weights_to_merge = {}, {}, []
for k in xrange(len(levels)):
if k is 0:
if bi_rnn:
if init == 'uni':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_uniform([num_hidden * 2,
levels[k]], -.1, .1))
elif init == 'gauss':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_normal([num_hidden * 2,
levels[k]], stddev=.1))
elif init == 'glorot':
weights['hidden{}'.format(k)] = \
tf.get_variable('hidden{}'.format(k),
shape=[num_hidden * 2, levels[k]],
initializer=tf.contrib.layers.xavier_initializer())
else:
if init == 'uni':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_uniform([num_hidden * n_steps,
levels[k]], -.1, .1))
elif init == 'gauss':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_normal([num_hidden * n_steps,
levels[k]], stddev=.1))
elif init == 'glorot':
weights['hidden{}'.format(k)] = \
tf.get_variable('hidden{}'.format(k),
shape=[num_hidden * n_steps, levels[k]],
initializer=tf.contrib.layers.xavier_initializer())
biases['hidden{}'.format(k)] = tf.Variable(tf.zeros([levels[k]]))
else:
if init == 'uni':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_uniform([levels[k - 1], levels[k]], -.1, .1))
elif init == 'gauss':
weights['hidden{}'.format(k)] = \
tf.Variable(tf.random_normal([levels[k - 1], levels[k]], stddev=.1))
elif init == 'glorot':
weights['hidden{}'.format(k)] = \
tf.get_variable('hidden{}'.format(k),
shape=[levels[k - 1], levels[k]],
initializer=tf.contrib.layers.xavier_initializer())
biases['hidden{}'.format(k)] = tf.Variable(tf.zeros([levels[k]]))
weights_to_merge.append(tf.summary.histogram("weight_hidden{}".format(k),
weights['hidden{}'.format(k)]))
weights_to_merge.append(tf.summary.histogram("bias_hidden{}".format(k),
biases['hidden{}'.format(k)]))
# Register weights to be monitored by tensorboard
# Let's define the training and testing operations
self.ypredict, summ_outputs = self.rnn_step(self.x1, weights, biases,
_keep_prob=keep_prob,
num_hidden=num_hidden,
rnn_unit=rnn_unit,
n_layers=n_layers,
activation=activation)
weights_to_merge += summ_outputs
# self.ypredict = self.CreateComputation(self.x1, input_d, output_d)
self.CreateEvalGraph(output_d, correlation_loss)
def rnn_step(self, _input, _weights, _biases, _keep_prob, num_hidden,
rnn_unit, n_layers, activation):
"""
:param _input: a 'Tensor' of shape [batch_size x n_steps x n_features] representing
the input to the network
:param _weights: Dictionary of weights to calculate the activation of the
fully connected layer(s) after the RNN
:param _biases: Dictionary of weights to calculate the activation of the
fully connected layer(s) after the RNN
:param _keep_prob: float in [0, 1], keeping probability for Dropout
:param num_hidden: int, number of units in each layer
:param rnn_unit: string, type of unit can be 'gru', 'lstm' or 'simple'
:param n_layers: int, number of layers in the RNN
:param activation: string, activation for the fully connected layers:
'linear', 'relu, 'sigmoid', 'tanh'
:return: output of the network
"""
hist_outputs = []
prev_rec = _input
if rnn_unit == 'lstm':
cell = MultiRNNCell([LSTMCell(num_hidden, use_peepholes=True)] * n_layers)
elif rnn_unit == 'gru':
cell = MultiRNNCell([GRUCell(num_hidden)] * n_layers)
else:
cell = MultiRNNCell([BasicRNNCell(num_hidden)] * n_layers)
prev_act, prev_hid = dynamic_rnn(cell, inputs=prev_rec, dtype=tf.float32)
prev_act = tf.reshape(prev_act, (-1, self.num_hidden * self.n_steps))
for k in xrange(len(_weights) - 1):
hidden = tf.nn.relu(tf.nn.bias_add(tf.matmul(prev_act,
_weights['hidden{}'.format(k)]),
_biases['hidden{}'.format(k)]))
hist_outputs.append(tf.summary.histogram("FC_{}".format(k), hidden))
hid_drop = tf.nn.dropout(hidden, _keep_prob)
prev_act = hid_drop
last_act = tf.nn.bias_add(tf.matmul(prev_act,
_weights['hidden{}'.format(len(_weights) - 1)]),
_biases['hidden{}'.format(len(_weights) - 1)])
hist_outputs.append(tf.summary.histogram("last_act", last_act))
if activation == "linear":
ret_act = last_act
elif activation == 'relu':
ret_act = tf.nn.relu(last_act)
elif activation == 'sigmoid':
ret_act = tf.nn.sigmoid(last_act)
elif activation == 'tanh':
ret_act = tf.nn.tanh(last_act)
else:
raise ValueError("Activation requested not yet implemented, choose between "
"'linear', "
"'relu', "
"'sigmoid' or "
"'tanh'")
return ret_act, hist_outputs
"""Polynomial Fitting.
Now generate some fake test data and make sure we can properly regress the data.
This is just a polynomial fitting.
"""
def TestPolynomialFitting():
# Create 8000 phony x and y data points with NumPy
x_data = 1.2 * np.random.rand(8000, 1).astype("float32") - 0.6
# Create point-wise 3rd order non-linearity
y_data = (x_data - .5) * x_data * (x_data + 0.5) + 0.3
# First, regress with Euclidean loss function
with DNNRegressionNetwork() as regressor:
regressor.CreateEntireNetwork(x_data.shape[1], y_data.shape[1],
learning_rate=0.01,
correlation_loss=False,
batch_size=128)
_, _ = regressor.TrainWithFeed(x_data, y_data) # could return loss value
y_prediction = regressor.Eval(x_data)
plot.clf()
plot.plot(x_data, y_prediction, '.', x_data, y_data, '.')
plot.xlabel('Input Variable')
plot.ylabel('Output Variable')
plot.legend(('Prediction', 'Truth'))
plot.title('DNN Regression with Euclidean Loss')
plot.show()
# Second, regress with a correlation loss function
with RegressionNetwork() as regressor:
regressor.CreateEntireNetwork(x_data.shape[1], y_data.shape[1],
learning_rate=0.01,
correlation_loss=True,
n_steps=1)
_, _ = regressor.TrainWithFeed(x_data, y_data) # could return loss value
y_prediction = regressor.Eval(x_data)
plot.clf()
plot.plot(x_data, y_prediction, '.', x_data, y_data, '.')
plot.xlabel('Input Variable')
plot.ylabel('Output Variable')
plot.legend(('Prediction', 'Truth'))
plot.title('DNN Regression with Correlation Loss')
plot.show()
def LoadTellurideDemoData(demo_data_loc):
demodata = sio.loadmat(demo_data_loc)
print "Data keys in Matlab input file:", sorted(demodata.keys())
fsample = demodata['fsample'][0][0]
eeg_data = (demodata['eeg'].reshape((32)))
for i in xrange(eeg_data.shape[0]):
eeg_data[i] = eeg_data[i].astype(np.float32)
audio_data = demodata['wav'].reshape((4))
for i in xrange(audio_data.shape[0]):
audio_data[i] = audio_data[i].astype(np.float32)
print "Audio data has size:", audio_data.shape, audio_data[0].shape
print "EEG data has size:", eeg_data.shape, eeg_data[0].shape
return audio_data, eeg_data, fsample
def AssembleDemoData(audio_data, eeg_data, trials, max_lag):
lags = np.arange(0, int(max_lag))
all_eeg = None
all_audio = None
for t in trials:
# don't wanna lag all the data
(laggedEEG, good_eeg_rows) = LagData(eeg_data[t], lags)
laggedAudio = audio_data[t % 4][good_eeg_rows, 0].reshape((-1, 1))
if all_eeg is None:
all_eeg = laggedEEG
else:
all_eeg = np.append(all_eeg, laggedEEG, 0)
if all_audio is None:
all_audio = laggedAudio
else:
all_audio = np.append(all_audio, laggedAudio, 0)
print "AssembleDemoData:", all_audio.shape, all_eeg.shape
return all_audio, all_eeg
def TestNumberHidden(audio_data, eeg_data, fsample, hidden_list=None,
session_target='', training_steps=5000, batch_size=1000,
tensorboard_base='/tmp/tf', n_steps=32):
if hidden_list is None:
hidden_list = [6]
num_trials = eeg_data.shape[0]
frac_correct = np.zeros((num_trials, max(hidden_list) + 1))
train_loss = np.zeros((num_trials, max(hidden_list) + 1))
for hidden in hidden_list:
for t in range(num_trials):
test_set = [t]
train_set = list(set(range(num_trials)).difference(test_set))
# lag after not now
all_audio, all_eeg = AssembleDemoData(audio_data, eeg_data,
np.array(train_set), fsample * 0.25)
test_audio, test_eeg = AssembleDemoData(audio_data, eeg_data,
np.array(test_set), fsample * 0.25)
print "Before DNNRegression:", all_eeg.shape, all_audio.shape, \
test_eeg.shape, test_audio.shape
with RegressionNetwork() as regressor:
tensorboard_dir = '%s/telluride-%03d' % (tensorboard_base, t)
regressor.CreateEntireNetwork(all_eeg.shape[1], all_audio.shape[1], n_steps=n_steps,
learning_rate=1, correlation_loss=True,
num_hidden=hidden, batch_size=batch_size)
(y_prediction, loss_value) = regressor.TrainFromQueue(all_eeg, all_audio,
batch_size=batch_size,
training_steps=training_steps,
tensorboard_dir=tensorboard_dir,
session_target=session_target)
audio_predict = regressor.Eval(test_eeg, test_audio,
tensorboard_dir=tensorboard_dir)
gc.collect()
print "At end of TestNumberHidden: MaxRSS is", \
resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
c = np.corrcoef(audio_predict.T, test_audio[0:audio_predict.shape[0], :].T)
frac_correct[t, hidden] = c[0, 1]
train_loss[t, hidden] = loss_value
print frac_correct
sys.stdout.flush()
return frac_correct, train_loss
def RunDemoDataTest(num_trials=32, hidden_number=6, batch_size=1000,
demo_data_loc='DemoForTF.mat',
session_target='', training_steps=5000,
tensorboard_base='/tmp/tf', n_steps=32):
(audio_data, eeg_data, fsample) = LoadTellurideDemoData(demo_data_loc)
frac_correct, train_loss = TestNumberHidden(audio_data, eeg_data[0:num_trials],
fsample, [hidden_number],
batch_size=batch_size,
session_target=session_target,
training_steps=training_steps,
tensorboard_base=tensorboard_base,
n_steps=n_steps)
numpy.set_printoptions(linewidth=100000000)
print frac_correct
# Save the data across all trials into two files so we can plot them later.
frac_name = 'frac_correct_%02d.txt' % hidden_number
np.savetxt(frac_name, frac_correct)
loss_name = 'train_loss_%02d.txt' % hidden_number
np.savetxt(loss_name, train_loss)
return frac_name, loss_name
def RunSaveModelTest(demo_data_loc='testing/DemoDataForTensorFlow.mat',
training_steps=1000, num_trials=3):
(audio_data, eeg_data, fsample) = LoadTellurideDemoData(demo_data_loc)
max_lags = fsample * 0.25
all_audio, all_eeg = AssembleDemoData(audio_data, eeg_data,
np.array([0]), max_lags)
eeg_data = eeg_data[0:num_trials] # We don't need all the data to test this.
model_file = '/tmp/regression_model.tf'
num_hidden = 6
with RegressionNetwork() as regressor:
regressor.CreateEntireNetwork(all_eeg.shape[1], all_audio.shape[1],
learning_rate=1, correlation_loss=True,
num_hidden=num_hidden,
n_steps=1)
_, _ = regressor.TrainWithFeed(all_eeg, all_audio,
batch_size=1000,
training_steps=training_steps) # could return loss value
regressor.SaveState(model_file)
with RegressionNetwork() as regressor:
regressor.CreateEntireNetwork(all_eeg.shape[1], all_audio.shape[1],
learning_rate=1, correlation_loss=True,
num_hidden=num_hidden,
n_steps=1)
regressor.RestoreState(model_file)
audio_predict2 = regressor.Eval(all_eeg)
print "Variance of predictions is:", np.var(audio_predict2)
print
def regression_main(argv):
print "Testing the PearsonCorrelation TF Graph (#6)"
if 0:
print "Testing basic line fitting."
TestPearsonCorrelation()
if 1:
print "\nTesting the Polynomial fitting TF Graph"
TestPolynomialFitting()
if 0:
print "\nRunning the TFRegression.regression_main() function."
RunDemoDataTest(hidden_number=3, num_trials=3,
batch_size=1000, training_steps=1000, n_steps=32)
if 0:
print "\nRunning the TFRegression.regression_main() function."
RunSaveModelTest(training_steps=1000)
if __name__ == "__main__":
regression_main(sys.argv)
| 2.515625 | 3 |
custom_components/afvalwijzer/const/const.py | kcleong/homeassistant-config | 0 | 12786702 | import logging
from datetime import timedelta
API = "api"
NAME = "afvalwijzer"
VERSION = "2021.05.01"
ISSUE_URL = "https://github.com/xirixiz/homeassistant-afvalwijzer/issues"
SENSOR_PROVIDER_TO_URL = {
"afvalwijzer_data_default": [
"https://api.{0}.nl/webservices/appsinput/?apikey=5ef443e778f41c4f75c69459eea6e6ae0c2d92de729aa0fc61653815fbd6a8ca&method=postcodecheck&postcode={1}&street=&huisnummer={2}&toevoeging={3}&app_name=afvalwijzer&platform=phone&afvaldata={4}&langs=nl&"
],
}
CONF_PROVIDER = "provider"
CONF_API_TOKEN = "<PASSWORD>_token"
# <KEY>
CONF_POSTAL_CODE = "postal_code"
CONF_STREET_NUMBER = "street_number"
CONF_SUFFIX = "suffix"
CONF_DATE_FORMAT = "date_format"
CONF_INCLUDE_DATE_TODAY = "include_date_today"
CONF_DEFAULT_LABEL = "default_label"
CONF_ID = "id"
CONF_EXCLUDE_LIST = "exclude_list"
SENSOR_PREFIX = "afvalwijzer "
SENSOR_ICON = "mdi:recycle"
ATTR_LAST_UPDATE = "last_update"
ATTR_IS_COLLECTION_DATE_TODAY = "is_collection_date_today"
ATTR_IS_COLLECTION_DATE_TOMORROW = "is_collection_date_tomorrow"
ATTR_IS_COLLECTION_DATE_DAY_AFTER_TOMORROW = "is_collection_date_day_after_tomorrow"
ATTR_DAYS_UNTIL_COLLECTION_DATE = "days_until_collection_date"
ATTR_YEAR_MONTH_DAY_DATE = "year_month_day_date"
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
PARALLEL_UPDATES = 1
SCAN_INTERVAL = timedelta(seconds=30)
DOMAIN = "afvalwijzer"
DOMAIN_DATA = "afvalwijzer_data"
STARTUP_MESSAGE = f"""
-------------------------------------------------------------------
Afvalwijzer
This is a custom integration!
If you have any issues with this you need to open an issue here:
https://github.com/xirixiz/homeassistant-afvalwijzer/issues
-------------------------------------------------------------------
"""
| 2.421875 | 2 |
src/dhapi/lib/auth.py | rayleighko/dhlottery-api | 4 | 12786703 | import copy
import requests
class AuthController:
_REQ_HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.77 Safari/537.36",
"Connection": "keep-alive",
"Cache-Control": "max-age=0",
"sec-ch-ua": '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"',
"sec-ch-ua-mobile": "?0",
"Upgrade-Insecure-Requests": "1",
"Origin": "https://dhlottery.co.kr",
"Content-Type": "application/x-www-form-urlencoded",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Referer": "https://dhlottery.co.kr/",
"Sec-Fetch-Site": "same-site",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-User": "?1",
"Sec-Fetch-Dest": "document",
"Accept-Language": "ko,en-US;q=0.9,en;q=0.8,ko-KR;q=0.7",
}
_AUTH_CRED = ""
def login(self, user_id: str, password: str):
assert type(user_id) == str
assert type(password) == str
default_auth_cred = (
self._get_default_auth_cred()
) # JSessionId 값을 받아온 후, 그 값에 인증을 씌우는 방식
headers = self._generate_req_headers(default_auth_cred)
data = self._generate_body(user_id, password)
_res = self._try_login(headers, data) # 새로운 값의 JSESSIONID가 내려오는데, 이 값으론 로그인 안됨
self._update_auth_cred(default_auth_cred)
def add_auth_cred_to_headers(self, headers: dict) -> str:
assert type(headers) == dict
copied_headers = copy.deepcopy(headers)
copied_headers["Cookie"] = f"JSESSIONID={self._AUTH_CRED}"
return copied_headers
def _get_default_auth_cred(self):
res = requests.get(
"https://dhlottery.co.kr/gameResult.do?method=byWin&wiselog=H_C_1_1"
)
return self._get_j_session_id_from_response(res)
def _get_j_session_id_from_response(self, res: requests.Response):
assert type(res) == requests.Response
for cookie in res.cookies:
if cookie.name == "JSESSIONID":
return cookie.value
raise KeyError("JSESSIONID cookie is not set in response")
def _generate_req_headers(self, j_session_id: str):
assert type(j_session_id) == str
copied_headers = copy.deepcopy(self._REQ_HEADERS)
copied_headers["Cookie"] = f"JSESSIONID={j_session_id}"
return copied_headers
def _generate_body(self, user_id: str, password: str):
assert type(user_id) == str
assert type(password) == str
return {
"returnUrl": "https://dhlottery.co.kr/common.do?method=main",
"userId": user_id,
"password": password,
"checkSave": "on",
"newsEventYn": "",
}
def _try_login(self, headers: dict, data: dict):
assert type(headers) == dict
assert type(data) == dict
res = requests.post(
"https://www.dhlottery.co.kr/userSsl.do?method=login",
headers=headers,
data=data,
)
return res
def _update_auth_cred(self, j_session_id: str) -> None:
assert type(j_session_id) == str
# TODO: judge whether login is success or not
# 로그인 실패해도 jsession 값이 갱신되기 때문에, 마이페이지 방문 등으로 판단해야 할 듯
# + 비번 5번 틀렸을 경우엔 비번 정확해도 로그인 실패함
self._AUTH_CRED = j_session_id
| 2.5 | 2 |
libtiff/tests/test_tiff_array.py | IanZhao/pylibtiff | 4 | 12786704 |
import os
import atexit
from tempfile import mktemp
from numpy import *
from libtiff import TIFF
from libtiff import TIFFfile, TIFFimage
def test_simple_slicing():
for planar_config in [1,2]:
for compression in [None, 'lzw']:
for itype in [uint8, uint16, uint32, uint64,
int8, int16, int32, int64,
float32, float64,
complex64, complex128]:
image = random.randint(0, 100, size=(10,6,7)).astype (itype)
fn = mktemp('.tif')
if 0:
if planar_config==2:
continue
tif = TIFF.open(fn,'w')
tif.write_image(image, compression=compression)
tif.close()
else:
tif = TIFFimage(image)
tif.write_file(fn, compression=compression, planar_config=planar_config)
del tif
tif = TIFFfile(fn)
arr = tif.get_tiff_array()
data = arr[:]
assert len(data)==len (image), `len(data)`
assert image.dtype==data.dtype, `image.dtype, data[0].dtype`
assert (image==data).all()
assert arr.shape==image.shape
indices = [0, slice(None), slice (0,2), slice (0,5,2)]
for i0 in indices[:1]:
for i1 in indices:
for i2 in indices:
sl = (i0, i1, i2)
assert (arr[sl]==image[sl]).all(),`sl`
#os.remove(fn)
atexit.register(os.remove, fn)
| 2.453125 | 2 |
pcrc/connection/patch.py | YehowahLiu/PCRC | 0 | 12786705 | <gh_stars>0
from threading import Lock
from typing import Set
from redbaron import RedBaron, IfelseblockNode, AssignmentNode, WithNode
from pcrc.utils import redbaron_util
__patch_lock = Lock()
__patched = False
def patch_pycraft():
global __patched
with __patch_lock:
if __patched:
return
__patched = True
print('Patching PyCraft...')
__extends_protocol_version_range()
__player_position_fix()
__custom_s2c_packet_registering()
__network_thread_running_state_hook()
__default_proto_version_inject()
__playing_reactor_switch_listener()
__raw_packet_recording()
print('Patched PyCraft')
def __extends_protocol_version_range():
import minecraft
minecraft.KNOWN_MINECRAFT_VERSION_RECORDS.append(minecraft.Version('1.18.2', 758, True))
minecraft.initglobals(use_known_records=True)
def __player_position_fix():
from minecraft.networking.connection import PlayingReactor
from minecraft.networking.packets import PositionAndLookPacket
def patched_PlayingReactor_react(self, packet):
original_react(self, packet)
if packet.packet_name == "player position and look" and not self.connection.context.protocol_later_eq(107):
position_response = PositionAndLookPacket()
position_response.x = packet.x
position_response.feet_y = packet.y
position_response.z = packet.z
position_response.yaw = packet.yaw
position_response.pitch = packet.pitch
position_response.on_ground = True
self.connection.write_packet(position_response)
original_react = PlayingReactor.react
PlayingReactor.react = patched_PlayingReactor_react
def __custom_s2c_packet_registering():
from pcrc.packets import s2c
from minecraft.networking.connection import PlayingReactor
from minecraft.networking.packets import Packet
def patched_get_packets(context):
packets: Set[Packet] = original_get_packets(context)
packets |= s2c.PACKETS
return packets
original_get_packets = PlayingReactor.get_clientbound_packets
PlayingReactor.get_clientbound_packets = staticmethod(patched_get_packets)
def __network_thread_running_state_hook():
from minecraft.networking.connection import NetworkingThread
from pcrc.connection.pcrc_connection import PcrcConnection
def patched_network_thread_run(self):
if isinstance(self, PcrcConnection):
self.add_running_networking_thread_amount(1)
try:
original_network_thread_run(self)
finally:
if isinstance(self, PcrcConnection):
self.add_running_networking_thread_amount(-1)
original_network_thread_run = NetworkingThread.run
NetworkingThread.run = patched_network_thread_run
def __default_proto_version_inject():
"""
modified the value to default_proto_version if there are multiple allow version
"""
import minecraft.networking.connection as connection
from minecraft.networking.connection import Connection
red, connection_class = redbaron_util.read_class(Connection)
connect_method = redbaron_util.get_def(connection_class, 'connect')
main_with = redbaron_util.get_node(connect_method, node_type=WithNode)
idx = redbaron_util.get_node_index(main_with, node_type=AssignmentNode, predicate=lambda n: str(n.target) == 'self.spawned')
redbaron_util.insert_nodes(main_with, idx, [
RedBaron('''self.recorder.logger.info('Allow versions of the server: {}'.format(self.allowed_proto_versions))'''),
RedBaron('''if len(self.allowed_proto_versions) > 1: self.context.protocol_version = self.default_proto_version''')
])
patched_class_source = connect_method.dumps()
globals_ = dict(connection.__dict__)
exec(patched_class_source, globals_)
PatchedConnection = globals_['Connection']
Connection.connect = PatchedConnection.connect
def __playing_reactor_switch_listener():
from minecraft.networking.connection import LoginReactor, PlayingReactor
from pcrc.connection.pcrc_connection import PcrcConnection
def patched_network_thread_run(self, packet):
original_login_reactor_react(self, packet)
if isinstance(self.connection.reactor, PlayingReactor) and isinstance(self.connection, PcrcConnection):
self.connection.pcrc.on_switched_to_playing_reactor()
original_login_reactor_react = LoginReactor.react
LoginReactor.react = patched_network_thread_run
def __raw_packet_recording():
import minecraft.networking.connection as connection
from minecraft.networking.connection import PacketReactor
red, packet_reactor_class = redbaron_util.read_class(PacketReactor)
packet_reactor_class.name = 'PatchedPacketReactor'
read_packet_method = redbaron_util.get_def(packet_reactor_class, 'read_packet')
main_if_else_node = redbaron_util.get_node(read_packet_method, node_type=IfelseblockNode, error_msg='Cannot found if-else block in PacketReactor#read_packet')
main_if_body_nodes = main_if_else_node.value[0]
for i, node in enumerate(main_if_body_nodes):
if isinstance(node, AssignmentNode) and node.target.value == 'packet_id':
main_if_body_nodes.insert(i, 'packet_raw = copy.deepcopy(packet_data.bytes.getvalue())')
main_if_body_nodes.insert(i, 'import copy')
break
else:
raise Exception('Cannot found packet_id assignment node in PacketReactor#read_packet')
main_if_body_nodes.insert(len(main_if_body_nodes) - 1, 'packet.raw_data = packet_raw')
patched_class_source = red.dumps()
# print(patched_class_source)
globals_ = dict(connection.__dict__)
exec(patched_class_source, globals_)
PatchedPacketReactor = globals_['PatchedPacketReactor']
PacketReactor.read_packet = PatchedPacketReactor.read_packet
| 2.390625 | 2 |
pr_flow/gen_route.py | vagrantxiao/dirc_riscv | 0 | 12786706 | #!/usr/bin/env python
import os
import xml.etree.ElementTree
class direct_int:
def __init__(self, prflow_params):
self.prflow_params = prflow_params
def gen_routing(self):
os.system("rm -rf "+self.prflow_params['dest_dir'])
os.system("mkdir "+self.prflow_params['dest_dir'])
os.system("cp ./input/RelayStation.v " + self.prflow_params['dest_dir'])
sw_level_0 = open(self.prflow_params["dest_dir"] + '/switch_level_0_0_route.v', 'w')
sw_level_1_list = []
for i in range(4):
sw_level_1_list.append(open(self.prflow_params["dest_dir"]+ '/switch_level_1_'+str(i)+'_route.v', 'w'))
sw_level_0.write('module switch_level_0_0_route#(\n')
sw_level_0.write(' parameter PAYLOAD_BITS = '+self.prflow_params['payload_bits']+',\n')
sw_level_0.write(' parameter NUM_IN_PORTS = '+self.prflow_params['num_in_ports']+',\n')
sw_level_0.write(' parameter NUM_OUT_PORTS = '+self.prflow_params['num_out_ports']+',\n')
sw_level_0.write(' parameter NUM_UP = '+self.prflow_params['l1_up_num']+'\n')
sw_level_0.write(' )(\n')
sw_level_0.write(' input clk,\n')
sw_level_0.write(' input reset,\n')
sw_level_0.write(' \n')
sw_level_0.write(' input [PAYLOAD_BITS*NUM_UP-1:0] din_0,\n')
sw_level_0.write(' input [PAYLOAD_BITS*NUM_UP-1:0] din_1,\n')
sw_level_0.write(' input [PAYLOAD_BITS*NUM_UP-1:0] din_2,\n')
sw_level_0.write(' input [PAYLOAD_BITS*NUM_UP-1:0] din_3,\n')
sw_level_0.write(' \n')
sw_level_0.write(' input [NUM_UP-1:0] val_in_0,\n')
sw_level_0.write(' input [NUM_UP-1:0] val_in_1,\n')
sw_level_0.write(' input [NUM_UP-1:0] val_in_2,\n')
sw_level_0.write(' input [NUM_UP-1:0] val_in_3,\n')
sw_level_0.write(' \n')
sw_level_0.write(' output [NUM_UP-1:0] ready_upward_0,\n')
sw_level_0.write(' output [NUM_UP-1:0] ready_upward_1,\n')
sw_level_0.write(' output [NUM_UP-1:0] ready_upward_2,\n')
sw_level_0.write(' output [NUM_UP-1:0] ready_upward_3,\n')
sw_level_0.write(' \n')
sw_level_0.write(' output [PAYLOAD_BITS*NUM_UP-1:0] dout_0,\n')
sw_level_0.write(' output [PAYLOAD_BITS*NUM_UP-1:0] dout_1,\n')
sw_level_0.write(' output [PAYLOAD_BITS*NUM_UP-1:0] dout_2,\n')
sw_level_0.write(' output [PAYLOAD_BITS*NUM_UP-1:0] dout_3,\n')
sw_level_0.write(' \n')
sw_level_0.write(' output [NUM_UP-1:0] val_out_0,\n')
sw_level_0.write(' output [NUM_UP-1:0] val_out_1,\n')
sw_level_0.write(' output [NUM_UP-1:0] val_out_2,\n')
sw_level_0.write(' output [NUM_UP-1:0] val_out_3,\n')
sw_level_0.write(' \n')
sw_level_0.write(' input [NUM_UP-1:0] ready_downward_0,\n')
sw_level_0.write(' input [NUM_UP-1:0] ready_downward_1,\n')
sw_level_0.write(' input [NUM_UP-1:0] ready_downward_2,\n')
sw_level_0.write(' input [NUM_UP-1:0] ready_downward_3 \n')
sw_level_0.write(' );\n')
for i in range(4):
sw_level_1_list[i].write('module switch_level_1_'+str(i)+'_route # (\n')
sw_level_1_list[i].write(' parameter PAYLOAD_BITS = '+self.prflow_params['payload_bits']+',\n')
sw_level_1_list[i].write(' parameter NUM_IN_PORTS = '+self.prflow_params['num_in_ports']+',\n')
sw_level_1_list[i].write(' parameter NUM_OUT_PORTS = '+self.prflow_params['num_out_ports']+',\n')
sw_level_1_list[i].write(' parameter NUM_UP = '+self.prflow_params['l1_up_num']+'\n')
sw_level_1_list[i].write(' )(\n')
sw_level_1_list[i].write(' input clk,\n')
sw_level_1_list[i].write(' input reset,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_0,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_1,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_2,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_3,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_4,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_5,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_6,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS*PAYLOAD_BITS-1:0] din_7,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_0,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_1,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_2,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_3,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_4,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_5,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_6,\n')
sw_level_1_list[i].write(' input [NUM_IN_PORTS-1:0] val_in_7,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_0,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_1,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_2,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_3,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_4,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_5,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_6,\n')
sw_level_1_list[i].write(' output [NUM_IN_PORTS-1:0] ready_upward_7,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_0,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_1,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_2,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_3,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_4,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_5,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_6,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS*PAYLOAD_BITS-1:0] dout_7,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_0,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_1,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_2,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_3,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_4,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_5,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_6,\n')
sw_level_1_list[i].write(' output [NUM_OUT_PORTS-1:0] val_out_7,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_0,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_1,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_2,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_3,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_4,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_5,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_6,\n')
sw_level_1_list[i].write(' input [NUM_OUT_PORTS-1:0] ready_downward_7,\n')
sw_level_1_list[i].write('\n')
sw_level_1_list[i].write(' input [NUM_UP*PAYLOAD_BITS-1:0] up_din, \n')
sw_level_1_list[i].write(' input [NUM_UP-1:0] up_val_in, \n')
sw_level_1_list[i].write(' output [NUM_UP-1:0] up_ready_upward,\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' output [NUM_UP*PAYLOAD_BITS-1:0] up_dout,\n')
sw_level_1_list[i].write(' output [NUM_UP-1:0] up_val_out,\n')
sw_level_1_list[i].write(' input [NUM_UP-1:0] up_ready_downward\n')
sw_level_1_list[i].write(' \n')
sw_level_1_list[i].write(' );\n')
root = xml.etree.ElementTree.parse(self.prflow_params['input_file_name'])
links = root.findall('link')
pp_level_1_num = [0 for index in range(int(self.prflow_params['sub_tree_num']))]
pp_level_1_unit = int(self.prflow_params['l1_pp'])
pp_level_0_num = 0
pp_level_0_unit = int(self.prflow_params['l0_pp'])
in_port_used_level_1 = [0 for index in range(int(self.prflow_params['sub_tree_num']))]
out_port_used_level_1 = [0 for index in range(int(self.prflow_params['sub_tree_num']))]
in_port_used_level_0 = [0 for index in range(int(self.prflow_params['sub_tree_num']))]
out_port_used_level_0 = [0 for index in range(int(self.prflow_params['sub_tree_num']))]
in_page_ports_used = [0 for index in range(2**int(self.prflow_params['addr_bits']))]
out_page_ports_used = [0 for index in range(2**int(self.prflow_params['addr_bits']))]
for child in links:
src = child.get('source')
dest = child.get('destination')
src_page_num = int(src.split('.')[0].replace('Function', ''))
src_port_num = int(src.split('.')[1])
dest_page_num = int(dest.split('.')[0].replace('Function', ''))
dest_port_num = int(dest.split('.')[1])
sub_tree_src = int(src_page_num)/8
sub_tree_dest = int(dest_page_num)/8
same_sub_tree = (sub_tree_src == sub_tree_dest)
in_page_ports_used[src_page_num] += 1
out_page_ports_used[dest_page_num] += 1
if same_sub_tree:
# if source and dest are in the same sub_tree
# we do not need to go up level
sw_level_1_list[sub_tree_src].write('\n\n// ' + src + '=>' + dest + '\n')
for i in range(pp_level_1_num[sub_tree_src], pp_level_1_num[sub_tree_src] + pp_level_1_unit):
sw_level_1_list[sub_tree_src].write('\n\n wire ready_downward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire val_in'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire [PAYLOAD_BITS-1:0] din'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire [PAYLOAD_BITS-1:0] dout'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' \n')
if (i == pp_level_1_num[sub_tree_src]):
sw_level_1_list[sub_tree_src].write(' assign din'+str(i)+' = din_'+ str(src_page_num%8) + '['\
+ str(int(self.prflow_params['payload_bits'])*(src_port_num+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*src_port_num) + '];\n')
sw_level_1_list[sub_tree_src].write(' assign val_in'+str(i)+' = val_in_' + str(src_page_num%8) + '['\
+ str(src_port_num)+'];\n')
sw_level_1_list[sub_tree_src].write(' assign ready_upward_' + str(src_page_num%8) + '['\
+ str(src_port_num)+'] = ready_upward'+str(i)+';\n')
else:
sw_level_1_list[sub_tree_src].write(' assign din'+str(i)+' = dout'+str(i-1)+';\n')
sw_level_1_list[sub_tree_src].write(' assign val_in'+str(i)+' = val_out'+str(i-1)+';\n')
sw_level_1_list[sub_tree_src].write(' assign ready_downward'+str(i-1)+' = ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' \n')
sw_level_1_list[sub_tree_src].write(' RelayStation #(\n')
sw_level_1_list[sub_tree_src].write(' .PAYLOAD_BITS(PAYLOAD_BITS)\n')
sw_level_1_list[sub_tree_src].write(' )RelayStation'+str(i)+'(\n')
sw_level_1_list[sub_tree_src].write(' .clk(clk),\n')
sw_level_1_list[sub_tree_src].write(' .reset(reset),\n')
sw_level_1_list[sub_tree_src].write(' .ready_downward(ready_downward'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .val_out(val_out'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .ready_upward(ready_upward'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .val_in(val_in'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .din(din'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .dout(dout'+str(i)+')\n')
sw_level_1_list[sub_tree_src].write(' );\n')
sw_level_1_list[sub_tree_src].write(' assign dout_'+ str(dest_page_num%8) + '['\
+ str(int(self.prflow_params['payload_bits'])*(dest_port_num+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*dest_port_num) + '] = dout'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' assign val_out_' + str(dest_page_num%8) + '['\
+ str(dest_port_num)+'] = val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' assign ready_downward'+str(i)+' = ready_downward_' + str(dest_page_num%8) + '['\
+ str(dest_port_num)+'];\n')
# update pipeline number
pp_level_1_num[sub_tree_src] += pp_level_1_unit
else:
# construct relay stations for source sub_tree input
sw_level_1_list[sub_tree_src].write('\n\n// ' + src + '=>' + dest + '\n')
for i in range(pp_level_1_num[sub_tree_src], pp_level_1_num[sub_tree_src] + pp_level_1_unit):
sw_level_1_list[sub_tree_src].write('\n\n wire ready_downward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire val_in'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire [PAYLOAD_BITS-1:0] din'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' wire [PAYLOAD_BITS-1:0] dout'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' \n')
if (i == pp_level_1_num[sub_tree_src]):
sw_level_1_list[sub_tree_src].write(' assign din'+str(i)+' = din_'+ str(src_page_num%8) + '['\
+ str(int(self.prflow_params['payload_bits'])*(src_port_num+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*src_port_num) + '];\n')
sw_level_1_list[sub_tree_src].write(' assign val_in'+str(i)+' = val_in_' + str(src_page_num%8) + '['\
+ str(src_port_num)+'];\n')
sw_level_1_list[sub_tree_src].write(' assign ready_upward_' + str(src_page_num%8) + '['\
+ str(src_port_num)+'] = ready_upward'+str(i)+';\n')
else:
sw_level_1_list[sub_tree_src].write(' assign din'+str(i)+' = dout'+str(i-1)+';\n')
sw_level_1_list[sub_tree_src].write(' assign val_in'+str(i)+' = val_out'+str(i-1)+';\n')
sw_level_1_list[sub_tree_src].write(' assign ready_downward'+str(i-1)+' = ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' \n')
sw_level_1_list[sub_tree_src].write(' RelayStation #(\n')
sw_level_1_list[sub_tree_src].write(' .PAYLOAD_BITS(PAYLOAD_BITS)\n')
sw_level_1_list[sub_tree_src].write(' )RelayStation'+str(i)+'(\n')
sw_level_1_list[sub_tree_src].write(' .clk(clk),\n')
sw_level_1_list[sub_tree_src].write(' .reset(reset),\n')
sw_level_1_list[sub_tree_src].write(' .ready_downward(ready_downward'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .val_out(val_out'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .ready_upward(ready_upward'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .val_in(val_in'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .din(din'+str(i)+'),\n')
sw_level_1_list[sub_tree_src].write(' .dout(dout'+str(i)+')\n')
sw_level_1_list[sub_tree_src].write(' );\n')
sw_level_1_list[sub_tree_src].write(' assign up_dout['\
+ str(int(self.prflow_params['payload_bits'])*(out_port_used_level_1[sub_tree_src]+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*out_port_used_level_1[sub_tree_src]) + '] = dout'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' assign up_val_out['\
+ str(out_port_used_level_1[sub_tree_src])+'] = val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_src].write(' assign ready_downward'+str(i)+' = up_ready_downward['\
+ str(out_port_used_level_1[sub_tree_src])+'];\n')
# construct relay_station for destination sub_tree
sw_level_1_list[sub_tree_dest].write('\n\n// ' + src + '=>' + dest + '\n')
for i in range(pp_level_1_num[sub_tree_dest], pp_level_1_num[sub_tree_dest] + pp_level_1_unit):
sw_level_1_list[sub_tree_dest].write('\n\n wire ready_downward'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' wire val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' wire ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' wire val_in'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' wire [PAYLOAD_BITS-1:0] din'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' wire [PAYLOAD_BITS-1:0] dout'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' \n')
if (i == pp_level_1_num[sub_tree_dest]):
sw_level_1_list[sub_tree_dest].write(' assign din'+str(i)+' = up_din['\
+ str(int(self.prflow_params['payload_bits'])*(in_port_used_level_1[sub_tree_dest]+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*in_port_used_level_1[sub_tree_dest]) + '];\n')
sw_level_1_list[sub_tree_dest].write(' assign val_in'+str(i)+' = up_val_in['\
+ str(in_port_used_level_1[sub_tree_dest])+'];\n')
sw_level_1_list[sub_tree_dest].write(' assign up_ready_upward['\
+ str(in_port_used_level_1[sub_tree_dest])+'] = ready_upward'+str(i)+';\n')
else:
sw_level_1_list[sub_tree_dest].write(' assign din'+str(i)+' = dout'+str(i-1)+';\n')
sw_level_1_list[sub_tree_dest].write(' assign val_in'+str(i)+' = val_out'+str(i-1)+';\n')
sw_level_1_list[sub_tree_dest].write(' assign ready_downward'+str(i-1)+' = ready_upward'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' \n')
sw_level_1_list[sub_tree_dest].write(' RelayStation #(\n')
sw_level_1_list[sub_tree_dest].write(' .PAYLOAD_BITS(PAYLOAD_BITS)\n')
sw_level_1_list[sub_tree_dest].write(' )RelayStation'+str(i)+'(\n')
sw_level_1_list[sub_tree_dest].write(' .clk(clk),\n')
sw_level_1_list[sub_tree_dest].write(' .reset(reset),\n')
sw_level_1_list[sub_tree_dest].write(' .ready_downward(ready_downward'+str(i)+'),\n')
sw_level_1_list[sub_tree_dest].write(' .val_out(val_out'+str(i)+'),\n')
sw_level_1_list[sub_tree_dest].write(' .ready_upward(ready_upward'+str(i)+'),\n')
sw_level_1_list[sub_tree_dest].write(' .val_in(val_in'+str(i)+'),\n')
sw_level_1_list[sub_tree_dest].write(' .din(din'+str(i)+'),\n')
sw_level_1_list[sub_tree_dest].write(' .dout(dout'+str(i)+')\n')
sw_level_1_list[sub_tree_dest].write(' );\n')
sw_level_1_list[sub_tree_dest].write(' assign dout_'+ str(dest_page_num%8) + '['\
+ str(int(self.prflow_params['payload_bits'])*(dest_port_num+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*dest_port_num) + '] = dout'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' assign val_out_' + str(dest_page_num%8) + '['\
+ str(dest_port_num)+'] = val_out'+str(i)+';\n')
sw_level_1_list[sub_tree_dest].write(' assign ready_downward'+str(i)+' = ready_downward_' + str(dest_page_num%8) + '['\
+ str(dest_port_num)+'];\n')
# construct relay_station for swtich box level 0
sw_level_0.write('\n\n// ' + src + '=>' + dest + '\n')
for i in range(pp_level_0_num, pp_level_0_num + pp_level_0_unit):
sw_level_0.write('\n\n wire ready_downward'+str(i)+';\n')
sw_level_0.write(' wire val_out'+str(i)+';\n')
sw_level_0.write(' wire ready_upward'+str(i)+';\n')
sw_level_0.write(' wire val_in'+str(i)+';\n')
sw_level_0.write(' wire [PAYLOAD_BITS-1:0] din'+str(i)+';\n')
sw_level_0.write(' wire [PAYLOAD_BITS-1:0] dout'+str(i)+';\n')
sw_level_0.write(' \n')
if (i == pp_level_0_num):
sw_level_0.write(' assign din'+str(i)+' = din_'+str(sub_tree_src)+'['\
+ str(int(self.prflow_params['payload_bits'])*(in_port_used_level_0[sub_tree_src]+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*in_port_used_level_0[sub_tree_src]) + '];\n')
sw_level_0.write(' assign val_in'+str(i)+' = val_in_'+str(sub_tree_src)+'['\
+ str(in_port_used_level_0[sub_tree_src])+'];\n')
sw_level_0.write(' assign ready_upward_'+str(sub_tree_src)+'['\
+ str(in_port_used_level_0[sub_tree_src])+'] = ready_upward'+str(i)+';\n')
else:
sw_level_0.write(' assign din'+str(i)+' = dout'+str(i-1)+';\n')
sw_level_0.write(' assign val_in'+str(i)+' = val_out'+str(i-1)+';\n')
sw_level_0.write(' assign ready_downward'+str(i-1)+' = ready_upward'+str(i)+';\n')
sw_level_0.write(' \n')
sw_level_0.write(' RelayStation #(\n')
sw_level_0.write(' .PAYLOAD_BITS(PAYLOAD_BITS)\n')
sw_level_0.write(' )RelayStation'+str(i)+'(\n')
sw_level_0.write(' .clk(clk),\n')
sw_level_0.write(' .reset(reset),\n')
sw_level_0.write(' .ready_downward(ready_downward'+str(i)+'),\n')
sw_level_0.write(' .val_out(val_out'+str(i)+'),\n')
sw_level_0.write(' .ready_upward(ready_upward'+str(i)+'),\n')
sw_level_0.write(' .val_in(val_in'+str(i)+'),\n')
sw_level_0.write(' .din(din'+str(i)+'),\n')
sw_level_0.write(' .dout(dout'+str(i)+')\n')
sw_level_0.write(' );\n')
sw_level_0.write(' assign dout_'+ str(sub_tree_dest) + '['\
+ str(int(self.prflow_params['payload_bits'])*(out_port_used_level_0[sub_tree_dest]+1)-1)\
+ ':'\
+ str(int(self.prflow_params['payload_bits'])*out_port_used_level_0[sub_tree_dest]) + '] = dout'+str(i)+';\n')
sw_level_0.write(' assign val_out_' + str(sub_tree_dest) + '['\
+ str(out_port_used_level_0[sub_tree_dest])+'] = val_out'+str(i)+';\n')
sw_level_0.write(' assign ready_downward'+str(i)+' = ready_downward_' + str(sub_tree_dest) + '['\
+ str(out_port_used_level_0[sub_tree_dest])+'];\n')
# update pipeline number and used port number
pp_level_0_num += pp_level_0_unit
pp_level_1_num[sub_tree_src] += pp_level_1_unit
pp_level_1_num[sub_tree_dest] += pp_level_1_unit
out_port_used_level_0[sub_tree_dest] += 1
in_port_used_level_0[sub_tree_src] += 1
out_port_used_level_1[sub_tree_src] += 1
in_port_used_level_1[sub_tree_dest] += 1
sw_level_0.write('\n\n// assignment for used pins in switch_level_0_route.v\n')
for i in range(len(in_port_used_level_0)):
sw_level_0.write('\n')
for j in range(in_port_used_level_0[i], int(self.prflow_params['l1_up_num'])):
sw_level_0.write(' assign ready_upward_'+str(i)+'['+str(j)+'] = 0;\n')
for j in range(out_port_used_level_0[i], int(self.prflow_params['l1_up_num'])):
sw_level_0.write(' assign dout_'+str(i)+'['\
+ str((j+1)*(int(self.prflow_params['payload_bits']))-1)+':'\
+ str(j*int(self.prflow_params['payload_bits']))+ '] = 0;\n')
sw_level_0.write(' assign val_out_'+str(i)+'['+str(j)+']=0;\n')
for i in range(len(in_port_used_level_1)):
sw_level_1_list[i].write('\n')
for j in range(in_port_used_level_1[i], int(self.prflow_params['l1_up_num'])):
sw_level_1_list[i].write(' assign up_ready_upward['+str(j)+'] = 0;\n')
for j in range(out_port_used_level_1[i], int(self.prflow_params['l1_up_num'])):
sw_level_1_list[i].write(' assign up_dout['\
+ str((j+1)*(int(self.prflow_params['payload_bits']))-1)+':'\
+ str(j*int(self.prflow_params['payload_bits']))+ '] = 0;\n')
sw_level_1_list[i].write(' assign up_val_out['+str(j)+']=0;\n')
# assign used ports connected to pages to 0
for i in range(len(in_page_ports_used)):
sub_tree_num = i/8
# for switch_level 1, input and output are conversed to page
for j in range(in_page_ports_used[i], int(self.prflow_params['num_out_ports'])):
# find all unused input ports in switch_level_1
sw_level_1_list[sub_tree_num].write(' assign ready_upward_'+str(i%8)+'['+str(j)+'] = 0;\n')
for j in range(out_page_ports_used[i], int(self.prflow_params['num_in_ports'])):
# find all unused output ports in switch_level_1
sw_level_1_list[sub_tree_num].write(' assign dout_'+str(i%8)+'['\
+ str((j+1)*(int(self.prflow_params['payload_bits']))-1)+':'\
+ str(j*int(self.prflow_params['payload_bits']))+ '] = 0;\n')
sw_level_1_list[sub_tree_num].write(' assign val_out_'+str(i%8)+'['+str(j)+']=0;\n')
sw_level_0.write('\n\nendmodule\n')
sw_level_0.close()
for i in range(4):
sw_level_1_list[i].write('\n\nendmodule\n')
sw_level_1_list[i].close()
if __name__ == '__main__':
prflow_params = {
"dest_dir": "./output",
"input_file_name": "./input/architecture.xml",
"sub_tree_num": "4",
"num_in_ports": '2',
"num_out_ports": '2',
"l1_up_num": '4',
"l0_pp": "8",
"l1_pp": "4",
"payload_bits": '32',
"addr_bits": '5'
}
direc_inst = direct_int(prflow_params)
direc_inst.gen_routing()
print "The output verilog files are under ./output"
| 2.4375 | 2 |
trs_filer/ga4gh/trs/endpoints/utils.py | zagganas/trs-filer | 8 | 12786707 | <filename>trs_filer/ga4gh/trs/endpoints/utils.py<gh_stars>1-10
"""Utility functions for endpoint controllers."""
from random import choice
import string
def generate_id(
charset: str = ''.join([string.ascii_letters, string.digits]),
length: int = 6,
) -> str:
"""Generate random string based on allowed set of characters.
Args:
charset: String of allowed characters.
length: Length of returned string.
Returns:
Random string of specified length and composed of defined set of
allowed characters.
"""
return ''.join(choice(charset) for __ in range(length))
| 2.546875 | 3 |
radiobear/Constituents/h2o/h2o_ddb.py | david-deboer/radiobear | 3 | 12786708 | import math
import os.path
from radiobear.constituents import parameters
# Some constants
T0 = 300.0 # reference temperature in K
AMU_H2O = 18.015
R = 8.314462E7
# Set data arrays
f0 = []
Ei = []
A = []
GH2 = []
GHe = []
GH2O = []
x_H2 = []
x_He = []
x_H2O = []
def readInputFiles(par):
"""If needed this reads in the data files for h2o"""
useLinesUpTo = 10 # index number
global nlin
nlin = 0
if par.verbose:
print("Reading h2o lines")
filename = os.path.join(par.path, 'h2od.lin')
ifp = open(filename, 'r')
for line in ifp:
if nlin >= useLinesUpTo:
break
nlin += 1
data = line.split()
if len(data) == 9:
f0.append(float(data[0]))
Ei.append(float(data[1]))
A.append(float(data[2]))
GH2.append(float(data[3]))
GHe.append(float(data[4]))
GH2O.append(float(data[5]))
x_H2.append(float(data[6]))
x_He.append(float(data[7]))
x_H2O.append(float(data[8]))
else:
break
ifp.close()
if par.verbose:
print(' ' + str(nlin) + ' lines')
return nlin
def alpha(freq, T, P, X, P_dict, other_dict, **kwargs):
# Read in data if needed
par = parameters.setpar(kwargs)
if len(f0) == 0:
readInputFiles(par)
P_h2 = P*X[P_dict['H2']]
P_he = P*X[P_dict['HE']]
P_h2o = P*X[P_dict['H2O']]
n_int = 3.0/2.0
rho = 1.0E12*AMU_H2O*P_h2o/(R*T)
Pa = 0.81*P_h2 + 0.35*P_he
alpha_h2o = []
for f in freq:
f2 = f**2
alpha = 0.0
for i in range(nlin):
gamma = pow((T0/T), x_H2[i])*GH2[i]*P_h2
gamma += pow((T0/T), x_He[i])*GHe[i]*P_he
gamma += pow((T0/T), x_H2O[i])*GH2O[i]*P_h2o
g2 = gamma**2
ITG = A[i]*math.exp(-Ei[i]/T)
shape = gamma/((f0[i]**2 - f2)**2 + 4.0*f2*g2)
alpha += shape*ITG
GR1971 = 1.08E-11*rho*pow((T0/T), 2.1)*Pa*f2
a = 2.0*f2*rho*pow((T0/T), n_int)*alpha/434294.5 + GR1971/434294.5
if par.units == 'dBperkm':
a *= 434294.5
alpha_h2o.append(a)
return alpha_h2o
| 2.40625 | 2 |
IO_helper.py | begab/mamus | 13 | 12786709 | import re
import gzip
import numpy as np
from zipfile import ZipFile
def load_corpus(corpus_file, load_tags=False):
if corpus_file.endswith('.gz'):
corpus = []
with gzip.open(corpus_file, 'r') as f:
for line in f:
corpus.append(line.decode("utf-8").split())
elif corpus_file.endswith('.conllu'):
corpus = read_conllUD_file(corpus_file, load_tags)
return corpus
def read_conllUD_file(location, load_tags):
sentences = []
tokens = []
with open(location) as f:
for l in f:
if not(l.strip().startswith('#')):
s = l.split('\t')
if len(s) == 10 and not('-' in s[0]):
if load_tags:
tokens.append((s[1], s[3]))
else:
tokens.append(s[1])
elif len(l.strip())==0 and len(tokens) > 0:
sentences.append(tokens)
tokens = []
return enforce_unicode(sentences)
def enforce_unicode(sentences):
"""
In Python3 we should check for str class instead of unicode according to
https://stackoverflow.com/questions/19877306/nameerror-global-name-unicode-is-not-defined-in-python-3
"""
if len(sentences) == 0 or type(sentences[0][0][0]) == str: # if the first token is already unicode, there seems nothing to be done
return sentences
return [[(unicode(t[0], "utf8"), unicode(t[1], "utf8")) for t in s] for s in sentences]
def load_embeddings(filename, max_words=-1):
if filename.endswith('.gz'):
lines = gzip.open(filename)
elif filename.endswith('.zip'):
myzip = ZipFile(filename) # we assume only one embedding file to be included in a zip file
lines = myzip.open(myzip.namelist()[0])
else:
lines = open(filename)
data, words = [], []
for counter, line in enumerate(lines):
if len(words) == max_words:
break
if type(line) == bytes:
try:
line = line.decode("utf-8")
except UnicodeDecodeError:
print('Error at line {}: {}'.format(counter, line))
continue
tokens = line.rstrip().split(' ')
if len(words) == 0 and len(tokens) == 2 and re.match('[1-9][0-9]*', tokens[0]):
# the first line might contain the number of embeddings and dimensionality of the vectors
continue
try:
values = [float(i) for i in tokens[1:]]
if sum([v**2 for v in values]) > 0: # only embeddings with non-zero norm are kept
data.append(values)
words.append(tokens[0])
except:
print('Error while parsing input line #{}: {}'.format(counter, line))
i2w = dict(enumerate(words))
return np.array(data), {v:k for k,v in i2w.items()}, i2w
| 2.953125 | 3 |
application.py | bgalde-dev/dinism | 0 | 12786710 | from flask import (Flask, render_template, jsonify, request, redirect)
import os
import logging
# Get the logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
#################################################
# Flask Setup
#################################################
application = Flask(__name__)
#################################################
# Routes
#################################################
# create route that renders index.html template
@application.route("/")
def home():
return render_template("index.html")
#################################################
# Run the application
#################################################
# run the app.
if __name__ == "__main__":
# Setting debug to True enables debug output. This line should be
# removed before deploying a production app.
application.debug = True
application.run() | 2.59375 | 3 |
big_data/python_tools/big_data_tools/bokeh_tools/line_plot.py | paulhtremblay/big-data | 0 | 12786711 | <filename>big_data/python_tools/big_data_tools/bokeh_tools/line_plot.py
from bokeh.io import show
from bokeh.plotting import figure
import datetime
import argparse
from bokeh.models import NumeralTickFormatter
def line_plot(p, x, y, line_width = 2, legend=None):
p.line(x,y, line_width =line_width,legend=legend )
p.yaxis.formatter=NumeralTickFormatter(format="0,")
show(p)
| 2.828125 | 3 |
Project 7 -- Melanoma Cancer Detection with Deep Learning, CNN using TensorFlow/Inception.py | Vauke/Deep-Neural-Networks-HealthCare | 2 | 12786712 | <filename>Project 7 -- Melanoma Cancer Detection with Deep Learning, CNN using TensorFlow/Inception.py<gh_stars>1-10
import tensorflow as tf
import numpy as np
from tensorflow.python.framework import tensor_shape
class InceptionV3:
bottleneckTensor = None
finalTensor = None
groundTruthInput = None
trainStep = None
evaluationStep = None
bottleneckInput = None
inceptionGraph = None
jpeg_data_tensor = None
distortion_image_data_input_placeholder = None
distort_image_data_operation = None
keep_rate = 0.9
learning_rate = None
global_step = None
is_training = None
def __init__(self,modelPath):
self._create_inception_graph(modelPath)
def _create_inception_graph(self,modelPath):
with tf.Graph().as_default() as self.inceptionGraph:
with tf.gfile.FastGFile(modelPath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
self.bottleneckTensor, self.jpeg_data_tensor, resized_input_tensor, self.decoded_jpeg_data_tensor = (tf.import_graph_def(graph_def, name='', return_elements=[BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,RESIZED_INPUT_TENSOR_NAME,DECODED_JPEG_DATA_TENSOR_NAME]))
def create_learning_rate(self,FLAGS,global_step,num_batches_per_epoch):
if FLAGS.learning_rate_type == "const":
print "Setting up a constant learning rate:"+str(FLAGS.learning_rate)
self.learning_rate = FLAGS.learning_rate
elif FLAGS.learning_rate_type == "exp_decay":
decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay)
print "Setting up an exponentially decaying learning rate:"+str(FLAGS.learning_rate)+":"+str(decay_steps)+":"+str(FLAGS.learning_rate_decay_factor)
self.learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,global_step,decay_steps,FLAGS.learning_rate_decay_factor,staircase=True)
else:
raise ValueError('Incorrect Learning Rate Type...')
def _add_non_bn_fully_connected_layer(self,input_to_layer,input_size,output_size,layer_name,keep_rate):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value_weights = tf.truncated_normal([input_size, output_size],stddev=0.001)
layer_weights = tf.Variable(initial_value_weights, name='final_weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([output_size]), name='final_biases')
with tf.name_scope('Wx_plus_b'):
logits_bn = tf.matmul(input_to_layer, layer_weights) + layer_biases
logits_bn = tf.nn.relu(logits_bn)
logits_bn = tf.nn.dropout(logits_bn, keep_rate)
return logits_bn
def _add_batch_norm(self,scope,x,is_training,reuse=None,epsilon=0.001,decay=0.99):
with tf.variable_scope(scope,reuse=reuse):
input_last_dimension = x.get_shape().as_list()[-1]
#BN Hyperparams
scale = tf.get_variable("scale", input_last_dimension, initializer=tf.constant_initializer(1.0), trainable=True)
beta = tf.get_variable("beta", input_last_dimension, initializer=tf.constant_initializer(0.0), trainable=True)
#Population Mean/Variance to be used while testing
pop_mean = tf.get_variable("pop_mean",input_last_dimension, initializer=tf.constant_initializer(0.0), trainable=False)
pop_var = tf.get_variable("pop_var", input_last_dimension, initializer=tf.constant_initializer(1.0), trainable=False)
if is_training:
#Mean and Variance of the logits
batch_mean, batch_var = tf.nn.moments(x,range(len(x.get_shape().as_list())-1))
train_mean = tf.assign(pop_mean,pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,pop_var * decay + batch_var * (1 - decay))
with tf.control_dependencies([train_mean, train_var]):
logits_bn = tf.nn.batch_normalization(x,batch_mean, batch_var, beta, scale, epsilon)
else:
logits_bn = tf.nn.batch_normalization(x,pop_mean, pop_var, beta, scale, epsilon)
return logits_bn
def _add_contrib_batch_norm_layer(self,scope,x,is_training,decay=0.99):
return tf.contrib.layers.batch_norm(x,decay=decay, is_training=is_training,updates_collections=None,scope=scope,reuse=True,center=True)
def _add_contrib_bn_fully_connected_layer(self,input_to_layer,input_size,output_size,layer_name,keep_rate,is_training):
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value_weights = tf.truncated_normal([input_size, output_size],stddev=0.001)
layer_weights = tf.Variable(initial_value_weights, name='final_weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([output_size]), name='final_biases')
with tf.name_scope('Wx_plus_b'):
#Calculate the logits
logits = tf.matmul(input_to_layer, layer_weights)
with tf.name_scope('batch_norm') as scope:
#Batch Normalization
logits_bn = self._add_contrib_batch_norm_layer(scope,logits,is_training)
#Non Linearity
logits_bn = tf.nn.relu(logits_bn)
#Dropout
logits_bn = tf.nn.dropout(logits_bn, keep_rate)
return logits_bn
# def _add_bn_fully_connected_layer(self,input_to_layer,input_size,output_size,layer_name,keep_rate,is_training):
# with tf.name_scope(layer_name):
# with tf.name_scope('weights'):
# initial_value_weights = tf.truncated_normal([input_size, output_size],stddev=0.001)
# layer_weights = tf.Variable(initial_value_weights, name='final_weights')
# with tf.name_scope('biases'):
# layer_biases = tf.Variable(tf.zeros([output_size]), name='final_biases')
# with tf.name_scope('Wx_plus_b'):
# #Calculate the logits
# logits = tf.matmul(input_to_layer, layer_weights)
# with tf.name_scope('batch_norm') as scope:
# #Batch Normalization
# logits_bn = tf.cond(is_training,
# lambda: self._add_batch_norm(scope,logits,True,None),
# lambda: self._add_batch_norm(scope,logits,False,True))
# #Non Linearity
# logits_bn = tf.nn.relu(logits_bn)
# #Dropout
# logits_bn = tf.nn.dropout(logits_bn, keep_rate)
# return logits_bn
def _add_fully_connected_layer(self,input_to_layer,input_size,output_size,layer_name,keep_rate,is_training, FLAGS):
if FLAGS.use_batch_normalization:
print "Batch normalization is turned on..."
return self._add_contrib_bn_fully_connected_layer(input_to_layer,input_size,output_size,layer_name,keep_rate,is_training)
else:
print "Batch normalization is turned off..."
return self._add_non_bn_fully_connected_layer(input_to_layer,input_size,output_size,layer_name,keep_rate)
def add_final_training_ops(self,class_count, final_tensor_name, optimizer_name, num_batches_per_epoch, FLAGS):
with self.inceptionGraph.as_default():
with tf.name_scope('input'):
self.bottleneckInput = tf.placeholder_with_default(self.bottleneckTensor, shape=[None, BOTTLENECK_TENSOR_SIZE],name='BottleneckInputPlaceholder')
self.groundTruthInput = tf.placeholder(tf.float32,[None, class_count],name='GroundTruthInput')
self.keep_rate = tf.placeholder(tf.float32, name='dropout_keep_rate')
self.is_training_ph = tf.placeholder(tf.bool, name='is_training_ph')
layer_name = 'final_minus_2_training_ops'
logits_final_minus_2 = self._add_fully_connected_layer(self.bottleneckInput,BOTTLENECK_TENSOR_SIZE,FINAL_MINUS_2_LAYER_SIZE,layer_name,self.keep_rate,self.is_training_ph,FLAGS)
layer_name = 'final_minus_1_training_ops'
logits_final_minus_1 = self._add_fully_connected_layer(logits_final_minus_2,FINAL_MINUS_2_LAYER_SIZE,FINAL_MINUS_1_LAYER_SIZE,layer_name,self.keep_rate,self.is_training_ph,FLAGS)
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal([FINAL_MINUS_1_LAYER_SIZE, class_count],stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
with tf.name_scope('Wx_plus_b'):
logits = tf.matmul(logits_final_minus_1, layer_weights) + layer_biases
self.finalTensor = tf.nn.softmax(logits, name=final_tensor_name)
with tf.name_scope('cross_entropy'):
self.cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=self.groundTruthInput, logits=logits)
with tf.name_scope('total'):
self.cross_entropy_mean = tf.reduce_mean(self.cross_entropy)
self.global_step = tf.Variable(0, name='global_step', trainable=False)
self.create_learning_rate(FLAGS,self.global_step,num_batches_per_epoch)
with tf.name_scope('train'):
if optimizer_name == "sgd":
optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate)
elif optimizer_name == "adam":
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
elif optimizer_name == "rmsprop":
optimizer = tf.train.RMSPropOptimizer(self.learning_rate,FLAGS.rmsprop_decay,momentum=FLAGS.rmsprop_momentum,epsilon=FLAGS.rmsprop_epsilon)
else:
raise ValueError('Incorrect Optimizer Type...')
self.trainStep = optimizer.minimize(self.cross_entropy_mean,global_step=self.global_step)
def add_evaluation_step(self):
with self.inceptionGraph.as_default():
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(self.finalTensor, 1)
correctPrediction = tf.equal(prediction, tf.argmax(self.groundTruthInput, 1))
with tf.name_scope('accuracy'):
self.evaluationStep = tf.reduce_mean(tf.cast(correctPrediction, tf.float32))
return self.evaluationStep, prediction
def train_step(self,sess,train_bottlenecks,train_ground_truth,dropout_keep_rate):
#print self.global_step.eval()
return sess.run([self.trainStep,self.cross_entropy_mean],feed_dict={self.bottleneckInput: train_bottlenecks,self.groundTruthInput: train_ground_truth, self.keep_rate:dropout_keep_rate, self.is_training_ph:True})
def evaluate(self,sess,data_bottlenecks,data_ground_truth):
accuracy, crossEntropyValue = sess.run([self.evaluationStep, self.cross_entropy_mean],feed_dict={self.bottleneckInput: data_bottlenecks,self.groundTruthInput: data_ground_truth, self.keep_rate:1, self.is_training_ph:False})
return accuracy,crossEntropyValue
def run_bottleneck_on_image(self,sess, image_data):
#bottleneck_values = sess.run(self.bottleneckTensor,{self.jpeg_data_tensor: image_data})
bottleneck_values = sess.run(self.bottleneckTensor,{self.decoded_jpeg_data_tensor: image_data})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def distort_image(self,sess,image_data):
return sess.run(self.distort_image_data_operation ,{self.distortion_image_data_input_placeholder: image_data})
def add_input_distortions(self, flip_left_right, random_crop, random_scale,
random_brightness):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
Returns:
The jpeg input layer and the distorted result tensor.
"""
print "Setting up image distortion operations..."
#jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
#decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)
with self.inceptionGraph.as_default():
decoded_image_as_float = tf.placeholder('float', [None,None,MODEL_INPUT_DEPTH])
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, MODEL_INPUT_WIDTH)
precrop_height = tf.multiply(scale_value, MODEL_INPUT_HEIGHT)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,
MODEL_INPUT_DEPTH])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
distort_result = tf.multiply(flipped_image, brightness_value)
#distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')
self.distortion_image_data_input_placeholder = decoded_image_as_float
self.distort_image_data_operation = distort_result
| 2.765625 | 3 |
DataStructurePython/stack.py | xnth97/Data-Structure-Notes | 0 | 12786713 | <reponame>xnth97/Data-Structure-Notes<filename>DataStructurePython/stack.py
class Stack:
def __init__(self):
self.array = []
def is_empty(self):
return len(self.array) == 0
def push(self, item):
self.array.append(item)
def pop(self):
if self.is_empty():
return
return self.array.pop()
def peek(self):
if self.is_empty():
return
return self.array[-1] | 3.421875 | 3 |
funcs.py | DhruvPatel01/NotAeroCalc | 0 | 12786714 | <reponame>DhruvPatel01/NotAeroCalc
univariate_funcs = {
'log', 'exp', 'log10',
'sqrt', 'abs',
'cos', 'sin', 'tan', 'arccos', 'arcsin', 'arctan',
} | 1.25 | 1 |
agsearch/text.py | Viva-Lambda/agsearch-python | 0 | 12786715 | # simple text object
from typing import List, Dict
import os
import pdb
import re
from agsearch.textinfo import TextInfo
from agsearch.terminfo import TermInfo
from agsearch.utils import DATA_DIR
from agsearch.utils import PUNCTUATIONS
from greek_accentuation.characters import base
from cltk.stop.greek.stops import STOPS_LIST
from cltk.corpus.greek.alphabet import filter_non_greek
class Text:
def __init__(
self, chunks: List[str], has_chunks: bool, is_clean: bool, text_id: str
):
self.chunks = chunks
self.has_chunks = has_chunks
self.is_clean = is_clean
self.text_id = text_id
self.term_freq: Dict[str, int] = {}
@classmethod
def read_text(cls, path: str) -> str:
txt = None
with open(path, "r", encoding="utf-8") as f:
txt = f.read()
return txt
@classmethod
def to_lower(cls, txt: str) -> str:
return txt.lower()
@classmethod
def remove_stop_words(cls, txt: str) -> str:
"remove stop words starting from longer"
text = txt
slist = STOPS_LIST.copy()
slist.sort(key=lambda x: len(x), reverse=True)
for word in slist:
text = text.replace(word, " ")
return text
@classmethod
def remove_punk(cls, txt: str) -> str:
""
text = txt
for punk in PUNCTUATIONS:
text = text.replace(punk, " ")
return text
@classmethod
def remove_accent(cls, txt: str) -> str:
"remove accents from chars"
txts: List[str] = []
for t in txt:
tclean = base(t)
txts.append(tclean)
return "".join(txts)
@classmethod
def remove_non_greek(cls, txt: str) -> str:
""
return filter_non_greek(txt)
@classmethod
def remove_multiple_space(cls, txt: str):
""
return re.sub(r"\s+", " ", txt)
@classmethod
def clean_chunk(cls, txt: str):
txt = cls.remove_non_greek(txt)
return cls.remove_multiple_space(txt)
@classmethod
def clean_text(cls, text: str) -> str:
txt = cls.to_lower(text)
txt = cls.remove_stop_words(txt)
txt = cls.remove_punk(txt)
txt = cls.remove_accent(txt)
return txt
@classmethod
def get_terms(cls, chunks: List[str], sep: str) -> Dict[str, int]:
""
terms: Dict[str, int] = {}
for chunk in chunks:
chunk_terms = [t.strip() for t in chunk.split(sep) if t]
for t in chunk_terms:
if t in terms:
terms[t] += 1
else:
terms[t] = 1
return terms
@classmethod
def from_info(cls, info: TextInfo, chunk_sep: str = " "):
"create text from text info"
text_id = info.text_id
text_path = os.path.join(DATA_DIR, info.local_path)
text = cls.read_text(text_path)
text = cls.clean_text(text)
terms: Dict[str, int] = {}
chunks: List[str] = []
if info.has_chunks:
chunks = text.split(info.chunk_separator)
chunks = [cls.clean_chunk(c) for c in chunks if c]
terms = cls.get_terms(chunks, chunk_sep)
else:
chunks = [text]
chunks = [cls.clean_chunk(c) for c in chunks if c]
terms = cls.get_terms(chunks, chunk_sep)
#
text_obj = Text(
chunks=chunks, has_chunks=info.has_chunks, is_clean=True, text_id=text_id
)
text_obj.term_freq = terms
return text_obj
def to_doc_counts(self) -> Dict[str, Dict[str, int]]:
""
term_doc_id_counts: Dict[str, Dict[str, int]] = {}
for term, count in self.term_freq.items():
doc_id_count = {self.text_id: count}
term_doc_id_counts[term] = doc_id_count
return term_doc_id_counts
| 2.59375 | 3 |
migrations/versions/452faf7b38da_.py | alijafer/familyTree | 0 | 12786716 | <filename>migrations/versions/452faf7b38da_.py<gh_stars>0
"""empty message
Revision ID: 452faf7b38da
Revises:
Create Date: 2020-07-04 00:50:28.003698
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '452faf7b38da'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('Relations', sa.Column('person', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'Relations', 'Persons', ['person'], ['person_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'Relations', type_='foreignkey')
op.drop_column('Relations', 'person')
# ### end Alembic commands ###
| 1.484375 | 1 |
misc/greenlet_workers.py | establishment/django-establishment | 1 | 12786717 | import gevent
import gevent.queue
from establishment.misc.command_processor import BaseProcessor
from establishment.misc.threading_helper import ThreadHandler
from establishment.funnel.redis_stream import RedisStreamPublisher, RedisStreamSubscriber, RedisQueue, \
redis_response_to_json
class GreenletWorker(gevent.Greenlet):
def __init__(self, logger=None, context=None):
gevent.Greenlet.__init__(self)
self.running = False
self.logger = logger
self.context = context
def _run(self):
self.running = True
self.init()
while self.running:
try:
self.tick()
gevent.sleep(0)
except Exception:
self.log_exception("Error in worker " + str(self.__class__.__name__))
self.cleanup()
self.init()
def init(self):
pass
def cleanup(self):
pass
def tick(self):
pass
def stop(self):
self.running = False
def log_exception(self, message):
if self.logger:
self.logger.exception(message)
def log_error(self, message):
if self.logger:
self.logger.error(message)
class GreenletQueueWorker(GreenletWorker):
def __init__(self, job_queue=None, result_queue=None, logger=None, context=None):
super().__init__(logger=logger, context=context)
self.job_queue = job_queue
self.result_queue = result_queue
def tick(self):
try:
command = self.job_queue.get(timeout=1)
except gevent.queue.Empty:
return
result = self.process_command(command)
if result:
self.result_queue.put(result)
def process_command(self, command):
return None
class GreenletRedisQueueListener(GreenletQueueWorker):
def __init__(self, job_queue, redis_queue_name, redis_connection=None, logger=None, context=None,
job_queue_max_size=1024, bulk_size=128):
super().__init__(job_queue=job_queue, logger=logger, context=context)
self.redis_queue_name = redis_queue_name
self.redis_queue = None
self.redis_connection = redis_connection
self.job_queue_max_size = job_queue_max_size
self.bulk_size = bulk_size
self.activate_bulk_retrieval = False
def init(self):
if not self.redis_queue:
self.redis_queue = RedisQueue(self.redis_queue_name, connection=self.redis_connection)
def cleanup(self):
self.redis_queue = None
def tick(self):
if self.job_queue.qsize() >= self.job_queue_max_size:
gevent.sleep(0.5)
return
if self.activate_bulk_retrieval:
jobs = self.redis_queue.bulk_pop(self.bulk_size)
if len(jobs) == 0:
self.activate_bulk_retrieval = False
else:
job = self.redis_queue.pop(timeout=1)
if job:
self.activate_bulk_retrieval = True
jobs = [job]
for job in jobs:
job = redis_response_to_json(job)
if job:
self.job_queue.put(job)
class GreenletRedisStreamListener(GreenletQueueWorker):
def __init__(self, job_queue, redis_stream_name, logger=None, context=None):
super().__init__(job_queue=job_queue, logger=logger, context=context)
self.redis_stream_name = redis_stream_name
self.redis_stream_subscriber = None
def init(self):
if not self.redis_stream_subscriber:
self.redis_stream_subscriber = RedisStreamSubscriber()
self.redis_stream_subscriber.subscribe(self.redis_stream_name)
def cleanup(self):
self.redis_stream_subscriber = None
def tick(self):
message, stream_name = self.redis_stream_subscriber.next_message()
message = redis_response_to_json(message)
if message:
self.job_queue.put(message)
class GreenletRedisStreamPublisher(GreenletQueueWorker):
def __init__(self, result_queue, redis_stream_name, logger=None, context=None):
super().__init__(result_queue=result_queue, logger=logger, context=context)
self.redis_stream_name = redis_stream_name
self.redis_stream_publisher = None
def init(self):
if not self.redis_stream_publisher:
self.redis_stream_publisher = RedisStreamPublisher(self.redis_stream_name, raw=True)
def cleanup(self):
self.redis_stream_publisher = None
def tick(self):
try:
result = self.result_queue.get(timeout=1)
except gevent.queue.Empty:
return
if not result:
return
self.redis_stream_publisher.publish_json(result)
class GreenletRedisQueueCommandProcessor(BaseProcessor):
def __init__(self, logger_name, WorkerClass, redis_queue_name_in, redis_stream_name_out=None, num_workers=10,
job_queue_max_size=1024):
super().__init__(logger_name=logger_name)
self.workers = []
self.job_queue = None
self.result_queue = None
self.num_workers = num_workers
self.job_queue_max_size = job_queue_max_size
self.redis_queue_name_in = redis_queue_name_in
self.redis_stream_name_out = redis_stream_name_out
self.WorkerClass = WorkerClass
self.worker_context = None
def main(self):
self.workers = []
self.job_queue = gevent.queue.Queue()
self.result_queue = gevent.queue.Queue()
self.workers.append(GreenletRedisQueueListener(job_queue=self.job_queue, logger=self.logger,
redis_queue_name=self.redis_queue_name_in,
job_queue_max_size=self.job_queue_max_size))
if self.redis_stream_name_out:
self.workers.append(GreenletRedisStreamPublisher(result_queue=self.result_queue, logger=self.logger,
redis_stream_name=self.redis_stream_name_out))
for i in range(self.num_workers):
self.workers.append(self.WorkerClass(job_queue=self.job_queue, result_queue=self.result_queue,
logger=self.logger, context=self.worker_context))
for worker in self.workers:
worker.start()
gevent.joinall(self.workers)
self.workers = []
self.job_queue = None
self.result_queue = None
self.logger.info("Gracefully stopped to process commands " + str(self.__class__.__name__))
def start(self):
self.background_thread = ThreadHandler("Command processor " + str(self.__class__.__name__), self.process,
daemon=False)
def stop(self):
super().stop()
for worker in self.workers:
worker.stop()
| 2.140625 | 2 |
setup.py | downneck/mothership | 4 | 12786718 | #!/usr/bin/python
import setuptools
from setuptools import setup, find_packages
install_requires = [
'cmdln',
'SQLAlchemy == 0.5.5'
]
extras_require = {
'ldap' : 'python-ldap',
'postgres' : 'psycopg2'
}
setup(name='Mothership',
author='Gilt SA team',
author_email='<EMAIL>',
description='Mothership - asset managment',
packages=find_packages(),
scripts=['ship',
'ship_readonly'
],
url='http://mothership.sf.net',
version='0.0.28',
)
| 1.226563 | 1 |
programming-laboratory-I/1wge/FC.py | MisaelAugusto/computer-science | 0 | 12786719 | f = float(raw_input())
c = (f - 32)/1.8
k = c + 273.15
print "Fahrenheit: %.3f F" %f
print "Celsius: %.3f C" %c
print "Kelvin: %.3f K" %k
| 3.5625 | 4 |
html_export.py | slookin/sparx2web | 0 | 12786720 | <reponame>slookin/sparx2web
import shutil
import traceback
from sparx_lib import logger
from sparx_lib import open_repository
import argparse
import yaml
config_file = open("config.yaml","r")
config = yaml.load(config_file, Loader=yaml.FullLoader)
config_file.close()
#print(config)
#exit(1)
parser = argparse.ArgumentParser(description='Rebuild eaWeb')
parser.add_argument('models', metavar='Model', type=str, nargs='+', help='model name, "all" means - all models', default="Sprint 1.4")
args = parser.parse_args()
logger.debug("arg models: "+str(args.models))
if args.models[0]=="all":
export_tasks = config["models"]
else:
export_tasks=[]
for element in config["models"]:
if element.get("model") in args.models:
export_tasks.append(element)
logger.debug("export_tasks: "+str(export_tasks))
#logger.debug("args: " + args)
#logger.debug("export_tasks: " + export_tasks)
try:
# shutil.copy(SOURCE_MODEL, MODEL_PATH)
# eaRep = open_repository(MODEL_PATH)
eaRep = open_repository(config["repository"], config["login"], config["password"])
logger.debug("eap opened")
models = eaRep.Models
# model search begin
# look like stupid way for find specific Model, but couldn't find other solution right now
for element in export_tasks:
GUID = ""
logger.debug("model: "+element["model"])
for i in range(0,models.Count):
if models.getAt(i).Name == element["model"]:
GUID = models.getAt(i).PackageGUID
if GUID == "":
raise Exception("Couldn't find model")
# /model search end
# sync
# RootPackage = eaRep.GetPackageByGuid(GUID)
# recursivePackageSVNUpdate(RootPackage, 0)
# logger.debug("updated from svn")
eaRep.GetProjectInterface().RunHTMLReport(PackageGUID=GUID,
ExportPath=element["path"], ImageFormat='PNG', Style='t-magic', Extension='.html')
logger.debug("html report created")
except Exception as err:
traceback.print_exc()
logger.exception("eaRep fails")
#RunHTMLReport
#def RunHTMLReport(self, PackageGUID=defaultNamedNotOptArg, ExportPath=defaultNamedNotOptArg,
# ImageFormat=defaultNamedNotOptArg, Style=defaultNamedNotOptArg
# , Extension=defaultNamedNotOptArg):
try:
eaRep.Exit()
except:
pass
| 2.234375 | 2 |
scrape-twitter.py | Lackshan/crypto-tweet-tracker | 1 | 12786721 | <reponame>Lackshan/crypto-tweet-tracker
# This will be a script or part of a script
from bs4 import BeautifulSoup as soup
import requests
URL = "https://twitter.com/elonmusk"
# 1. Make a request to twritter
def request_page():
try:
response = requests.get(URL)
except Exception as e:
print(e)
if response.status_code != 200:
print("Non success status code returned " + str(response.status_code))
return soup(response.text, "html.parser")
# 2. Give beautiful soup the response html page from twitter
def request_last_tweet(twitter_page):
tweets = twitter_page.find_all("li", {"data-item-type": "tweet"})
for tweet in tweets:
print(tweet)
#Extract tweet
twitter_page = request_page()
request_last_tweet(twitter_page)
# 3. Get the latest tweet
| 3.453125 | 3 |
testproject/urls.py | Camille-cmd/django-rosetta | 0 | 12786722 | <filename>testproject/urls.py
from django.conf.urls import include, re_path
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
admin.autodiscover()
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^rosetta/', include('rosetta.urls')),
]
urlpatterns += staticfiles_urlpatterns()
| 1.890625 | 2 |
models/dynamic_memory/memory_retention_vector.py | zankner/DNC | 3 | 12786723 | <filename>models/dynamic_memory/memory_retention_vector.py
import tensorflow as tf
import numpy as np
def mem_retention(free_gates, read_weightings):
return tf.linalg.matvec(free_gates, read_weightings, transpose_a=True)
| 2.671875 | 3 |
adv_ant/retroNet/urls.py | ayushxx7/retroNet | 40 | 12786724 | <reponame>ayushxx7/retroNet<filename>adv_ant/retroNet/urls.py<gh_stars>10-100
from django.urls import path, include, re_path
from . import views
urlpatterns = [
path("register", views.register, name="register"),
path('', include("django.contrib.auth.urls")),
re_path(r'^$', views.my_profile, name='profile_two'),
path('post', views.createpost, name='post'),
path('posted', views.index, name='index'),
path('home/', views.home, name='home'),
path('updateprofile', views.update_profile, name='updateprofile'),
path('updateprofiledisplay', views.update_profile_display,
name='updateprofiledisplay'),
path('profile', views.my_profile, name="my_profile"),
path('logout', views.logout_page, name="logout_test"),
path('logout_confirm', views.logout_request, name="logout_done"),
path('account_deletion', views.account_deletion, name='account_deletion'),
path('create_tweet', views.create_tweet, name="create_tweet"),
path('view_tweet', views.view_tweet, name="view_tweet")
]
| 1.953125 | 2 |
modules/expense_manage/models.py | xuhuiliang-maybe/ace_office | 1 | 12786725 | # coding=utf-8
from django.contrib.auth.models import User
from django.db import models
from modules.dict_table.models import ExpenseType
from modules.employee_management.employee_info.models import Employee
from modules.project_manage.models import Project
APPLY_STATUS_CHOICES = (
('1', u'待审批'),
('2', u'通过'),
('3', u'拒绝'),
)
INOROUT_CHOICES = (
('1', u'支出'),
('2', u'收入'),
)
class Expense(models.Model):
"""费用信息 """
emplyid = models.ForeignKey(Employee, verbose_name=u"员工编号", related_name="expense_emp", blank=True, null=True)
projectid = models.ForeignKey(Project, verbose_name=u"项目名称", related_name="expense_project", blank=True,
null=True)
userid = models.ForeignKey(User, verbose_name=u"费用负责人", related_name="expense_user", blank=True, null=True)
expensetype = models.ForeignKey(ExpenseType, verbose_name=u"费用类型", blank=True, null=True)
inorout = models.CharField(u"收支类型", max_length=100, choices=INOROUT_CHOICES, default='1')
note = models.CharField(u"申请说明", max_length=100)
apply_user = models.ForeignKey(User, verbose_name=u"申请人")
created = models.DateTimeField(u"申请时间", auto_now_add=True)
handle_user = models.CharField(u"审批人", max_length=100, blank=True)
handle_date = models.DateTimeField(u"审批时间", blank=True, null=True)
reason = models.CharField(u"审批回复", max_length=100, blank=True)
status = models.CharField(u"审批状态", max_length=100, choices=APPLY_STATUS_CHOICES, default='1')
remark1 = models.CharField(u"备注1", max_length=256, blank=True)
remark2 = models.CharField(u"备注2", max_length=256, blank=True)
remark3 = models.CharField(u"备注3", max_length=256, blank=True)
remark4 = models.CharField(u"备注4", max_length=256, blank=True)
remark5 = models.CharField(u"备注5", max_length=256, blank=True)
def __str__(self):
return self.emplyid
class Meta:
verbose_name = u"费用信息"
ordering = ['-id'] # id倒叙
index_together = ["emplyid", "projectid"] # 索引字段组合
permissions = (
("browse_expense", u"浏览 费用信息"),
)
def get_absolute_url(self):
return "/expense/list"
| 2.109375 | 2 |
_site/tomat/apps/checkout/models.py | Lisaveta-K/lisaveta-k.github.io | 0 | 12786726 | <filename>_site/tomat/apps/checkout/models.py
# -*- coding: utf-8 -*-
import datetime
from django.db import models
from users.models import User, Address
from products.models import Product
from shops.models import Delivery, Shop, Discount
from adverts.models import Coupon
class OrderItem(models.Model):
order = models.ForeignKey('checkout.Order')
product = models.ForeignKey(Product, null=True, blank=True, verbose_name=u'Товар')
product_legacy_id = models.PositiveIntegerField(editable=False, default=0)
amount = models.PositiveIntegerField(u'Количество', default=1)
per_item = models.DecimalField(u'Цена за штуку', max_digits=8, decimal_places=2)
sign = models.TextField(u'Подпись к товару', blank=True)
@property
def quantity(self):
return self.amount
@property
def net(self):
return self.amount * self.per_item
class Order(models.Model):
STATUS_RECEIVED = 0
STATUS_PAYMENT_WAITING = 1
STATUS_PAYED = 2
STATUS_PROCESSING = 3
STATUS_SENDED = 4
STATUS_CANCELED = 5
STATUS_CHOICES = (
(STATUS_RECEIVED, u'Принят'),
(STATUS_PAYMENT_WAITING, u'Ожидает оплаты'),
(STATUS_PAYED, u'Оплачен'),
(STATUS_PROCESSING, u'В обработке'),
(STATUS_SENDED, u'Отправлен'),
(STATUS_CANCELED, u'Отменен'),
)
PAYMENT_ROBOKASSA = 0 # Оплата через Робокассу
PAYMENT_CASHPICK = 1 # Наличными при самовывозе
PAYMENT_COURIER = 2 # Курьеру
PAYMENT_CASHLESS = 3 # Безналичный расчет
PAYMENT_CHOICES = (
(PAYMENT_ROBOKASSA, u'Робокасса'),
(PAYMENT_CASHPICK, u'Наличными'),
(PAYMENT_COURIER, u'Курьеру'),
(PAYMENT_CASHLESS, u'Безналичный расчет'),
)
user = models.ForeignKey(User, related_name='orders', null=True, blank=True, verbose_name=u'Пользователь')
address = models.ForeignKey(Address, related_name='orders', null=True, blank=True, verbose_name=u'Адрес')
delivery_date = models.DateField(u'Дата доставки', default=None, null=True, blank=True)
delivery_time = models.TimeField(u'Время доставки', default=None, null=True, blank=True)
shop = models.ForeignKey(Shop, related_name='orders', null=True, blank=True, verbose_name=u'Магазин')
status = models.PositiveSmallIntegerField(u'Статус', choices=STATUS_CHOICES, default=STATUS_RECEIVED)
products_price = models.DecimalField(u'Стоимость товаров без скидок', default=0, max_digits=8, decimal_places=2)
net = models.DecimalField(u'Сумма заказа', default=0, max_digits=8, decimal_places=2) # с доставкой и скидками
code = models.CharField(max_length=10)
delivery = models.ForeignKey(Delivery, null=True, blank=True, verbose_name=u'Доставка')
delivery_cost = models.PositiveIntegerField(u'Стоимость доставки', default=0)
payment_type = models.PositiveSmallIntegerField(u'Оплата', choices=PAYMENT_CHOICES, null=True, blank=True)
discount = models.ForeignKey(Discount, null=True, blank=True, verbose_name=u'Скидка')
coupon = models.ForeignKey(Coupon, null=True, blank=True, verbose_name=u'Промо-код')
comment = models.TextField(u'Комментарий', blank=True)
products = models.ManyToManyField(Product, through=OrderItem)
created = models.DateTimeField(u'Дата создания', default=datetime.datetime.now)
is_emails_sended = models.BooleanField(default=False)
class Meta:
verbose_name = u'заказ'
verbose_name_plural = u'заказы'
def __unicode__(self):
return u'Заказ №%s' % self.id
@property
def products_cost(self):
return self.net - self.delivery_cost
@property
def coupon_price(self):
return self.products_price - self.net
class CourierCity(models.Model):
title = models.CharField(u'Название', max_length=255)
price = models.PositiveIntegerField(u'Цена')
class Meta:
verbose_name = u'город курьерской доставки'
verbose_name_plural = u'город курьерской доставки'
def __unicode__(self):
return self.title
| 2.046875 | 2 |
DepthGAN/losses/base_generator_loss.py | sharanramjee/single-image-stereo-depth-estimation | 0 | 12786727 | import torch
import torch.nn as nn
import torch.nn.functional as F
from losses.bilinear_sampler import apply_disparity
from .ssim import ssim_gauss, ssim_godard
class BaseGeneratorLoss(nn.modules.Module):
def __init__(self, args):
super(BaseGeneratorLoss, self).__init__()
self.which_ssim = args.which_ssim
self.ssim_window_size = args.ssim_window_size
def scale_pyramid(self, img, num_scales):
scaled_imgs = [img]
s = img.size()
h = s[2]
w = s[3]
for i in range(num_scales - 1):
ratio = 2 ** (i + 1)
nh = h // ratio
nw = w // ratio
scaled_imgs.append(nn.functional.interpolate(img, [nh, nw], mode='bilinear', align_corners=False))
return scaled_imgs
def gradient_x(self, img):
# Pad input to keep output size consistent
img = F.pad(img, (0, 1, 0, 0), mode="replicate")
gx = img[:, :, :, :-1] - img[:, :, :, 1:] # NCHW
return gx
def gradient_y(self, img):
# Pad input to keep output size consistent
img = F.pad(img, (0, 0, 0, 1), mode="replicate")
gy = img[:, :, :-1, :] - img[:, :, 1:, :] # NCHW
return gy
def generate_image_left(self, img, disp):
return apply_disparity(img, -disp)
def generate_image_right(self, img, disp):
return apply_disparity(img, disp)
def SSIM(self, x, y):
if self.which_ssim == 'godard':
return ssim_godard(x, y)
elif self.which_ssim == 'gauss':
return ssim_gauss(x, y, window_size=self.ssim_window_size)
else:
raise ValueError('{} version not implemented'.format(self.which_ssim))
def disp_smoothness(self, disp, pyramid):
disp_gradients_x = [self.gradient_x(d) for d in disp]
disp_gradients_y = [self.gradient_y(d) for d in disp]
image_gradients_x = [self.gradient_x(img) for img in pyramid]
image_gradients_y = [self.gradient_y(img) for img in pyramid]
weights_x = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in image_gradients_x]
weights_y = [torch.exp(-torch.mean(torch.abs(g), 1, keepdim=True)) for g in image_gradients_y]
smoothness_x = [disp_gradients_x[i] * weights_x[i] for i in range(self.n)]
smoothness_y = [disp_gradients_y[i] * weights_y[i] for i in range(self.n)]
return smoothness_x + smoothness_y
def forward(self, input, target):
pass
| 1.765625 | 2 |
euler59.py | dchourasia/euler-solutions | 0 | 12786728 | <filename>euler59.py<gh_stars>0
'''
Your task has been made easy, as the encryption key consists of three lower case characters. Using p059_cipher.txt (right click and 'Save Link/Target As...'), a file containing the encrypted ASCII codes, and the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.
'''
data = open('p059_cipher.txt').read().split(',')
dec = []
import re
all = 'abcdefgijklmnopqrstuvwxyz'
import itertools
keys = itertools.permutations(all, 3)
finalKey = ''
for key in keys:
s = ''
for i, c in enumerate(data[:50]):
s += chr(int(c)^ord(key[i%3]))
m = re.match('^[a-zA-Z ]{50}$', s)
if m:
print(key)
finalKey = key
break
#dec.append(s)
s = ''
sm = 0
for i, c in enumerate(data):
d = chr(int(c)^ord(finalKey[i%3]))
s += d
sm += ord(d)
print(s, sm)
#[print(s) for s in dec]
#print(s)
#print(65^42)
| 3.875 | 4 |
StockNest/login/funcs.py | vaibhavantil2/Stock-Price-Forecasting-Using-Artificial-Intelligence | 29 | 12786729 | from essentials.views import randomString,getCurrentTime,errorResp
from models import authToken,verificationCode
from constants import AUTH_EXPIRY_MINS
from StockNest.settings import LOGIN_URL
from django.http import HttpResponseRedirect,JsonResponse,HttpRequest
from django.db.models import Q
from school.funcs import isDemoUser, demoError
# server = "https://eduhubweb.com"
# if DEBUG:
# server = "http://localhost:8000"
def getNewAuth():
while(1):
new_auth = randomString(50)
existing = authToken.objects.filter(Q(mauth = new_auth)|Q(wauth = new_auth)|Q(pmauth = new_auth)|Q(pwauth = new_auth)).count()
if existing == 0:
return new_auth
def getUserAuth(typeVal,u):
if typeVal == 'm':
try:
at = authToken.objects.get(user=u)
at.misExpired = False
at.mlastUpdated = getCurrentTime()
at.pmauth = at.mauth
at.mauth = getNewAuth()
at.save()
return at.mauth
except authToken.DoesNotExist:#first login
at = authToken.objects.create(user=u, mauth = getNewAuth(),wauth= getNewAuth(),pmauth = getNewAuth(),pwauth= getNewAuth())
return at.mauth
elif typeVal == 'w':
try:
at = authToken.objects.get(user=u)
at.wisExpired = False
at.wlastUpdated = getCurrentTime()
at.pwauth = at.wauth
at.wauth = getNewAuth()
at.save()
return at.wauth
except authToken.DoesNotExist:#first login
at = authToken.objects.create(user=u, mauth = getNewAuth(),wauth= getNewAuth(),pmauth = getNewAuth(),pwauth= getNewAuth())
return at.wauth
### authentication decorative for our website
### time based
def stocknestAPI(loginRequired=False,function=None):
def _dec(view_func):
def _view(request, *args, **kwargs):
request.user = None
headers = request.META
if loginRequired:#if required, return 401/412
if 'HTTP_AUTHORIZATION' in headers:
value = headers['HTTP_AUTHORIZATION'] #format keyw/m=auth
elements = value.split('=')
if len(elements) != 2:
return errorResp(401)
auth_val = elements[1]
if elements[0] == 'keym':
try:
obj = authToken.objects.get(mauth=auth_val)
if not checkAuthTimestamp(obj.mlastUpdated):
obj.misExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['POST','PUT','PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
try:
obj = authToken.objects.get(pmauth=auth_val)
if not checkAuthTimestamp(obj.mlastUpdated):
obj.misExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['POST','PUT','PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
return errorResp(401,"Token not found")
elif elements[0] == 'keyw':
try:
obj = authToken.objects.get(wauth=auth_val)
if not checkAuthTimestamp(obj.wlastUpdated):
obj.wisExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['POST','PUT','PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
try:
obj = authToken.objects.get(pwauth=auth_val)
if not checkAuthTimestamp(obj.wlastUpdated):
obj.wisExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['POST','PUT','PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
return errorResp(401,"Token not found")
else:
return errorResp(401)
else:
return errorResp(401)
else:#not required send 412
if 'HTTP_AUTHORIZATION' in headers:
value = headers['HTTP_AUTHORIZATION'] #format key=auth
elements = value.split('=')
if len(elements) != 2:
return errorResp(401)
auth_val = elements[1]
if elements[0] == 'keym':
try:
obj = authToken.objects.get(mauth=auth_val)
if not checkAuthTimestamp(obj.mlastUpdated):
obj.misExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
try:
obj = authToken.objects.get(pmauth=auth_val)
if not checkAuthTimestamp(obj.mlastUpdated):
obj.misExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
return errorResp(401,"Token not found")
elif elements[0] == 'keyw':
try:
obj = authToken.objects.get(wauth=auth_val)
if not checkAuthTimestamp(obj.wlastUpdated):
obj.wisExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
try:
obj = authToken.objects.get(pwauth=auth_val)
if not checkAuthTimestamp(obj.wlastUpdated):
obj.wisExpired = True
obj.save()
return errorResp(412,"Auth expired")
request.user = obj.user
if request.method in ['PATCH','DELETE']:
if isDemoUser(request.user):
return demoError()
return view_func(request, *args, **kwargs)
except authToken.DoesNotExist:
return errorResp(401,"Token not found")
return view_func(request, *args, **kwargs)
_view.__name__ = view_func.__name__
_view.__dict__ = view_func.__dict__
_view.__doc__ = view_func.__doc__
return _view
if function is None:
return _dec
else:
return _dec(function)
def checkAuthTimestamp(timestamp):
current_time = getCurrentTime()
return ((current_time - timestamp).days == 0 and (current_time - timestamp).seconds < AUTH_EXPIRY_MINS*60)
def getRequestUser(request):
user = None
if 'wauth' in request.COOKIES :
wauth = request.COOKIES['wauth']
try:
obj = authToken.objects.get(wauth=wauth)
user = obj.user
except authToken.DoesNotExist:
try:
obj = authToken.objects.get(pwauth=wauth)
user = obj.user
except authToken.DoesNotExist:
pass
return user
def getVerificationToken(u,ty):
if ty == 'ev' or ty == 'pr':
try:
vc = verificationCode.objects.get(user= u,ctype=ty)
except verificationCode.DoesNotExist:
code = randomString(6)
token = randomString(50)
vc = verificationCode.objects.create(code = code,user= u,token=token,ctype=ty)
return vc
else:
raise ValueError | 2.21875 | 2 |
nca47/objects/firewall/fw_addrobj_info.py | WosunOO/nca_xianshu | 0 | 12786730 | from nca47.db import api as db_api
from nca47.objects import base
from nca47.objects import fields as object_fields
from nca47.db.sqlalchemy.models.firewall import ADDROBJ
class FwAddrObjInfo(base.Nca47Object):
VERSION = '1.0'
fields = {
'id': object_fields.StringField(),
'name': object_fields.StringField(),
'ip': object_fields.StringField(),
'expip': object_fields.StringField(),
'vfwname': object_fields.StringField(),
'vfw_id': object_fields.StringField(),
'operation_fro': object_fields.StringField()
}
def __init__(self, context=None, **kwarg):
self.db_api = db_api.get_instance()
super(FwAddrObjInfo, self).__init__(context=None, **kwarg)
@staticmethod
def _from_db_object(fw_addrobj_info, db_fw_addrobj_info):
"""Converts a database entity to a formal :class:`ADDROBJ` object.
:param fw_addrobj_info: An object of :class:`ADDROBJ`.
:param fw_addrobj_info: A DB model of a ADDROBJ.
:return: a :class:`ADDROBJ` object.
"""
for field in fw_addrobj_info.fields:
fw_addrobj_info[field] = db_fw_addrobj_info[field]
fw_addrobj_info.obj_reset_changes()
return fw_addrobj_info
def create(self, context, values):
addrobj = self.db_api.create(ADDROBJ, values)
return addrobj
def delete(self, context, id_):
addrobj = self.db_api.delete_object(ADDROBJ, id_)
return addrobj
def get_object(self, context, **values):
addrobj = self.db_api.get_object(ADDROBJ, **values)
return addrobj
def get_objects(self, context, **values):
addrobj = self.db_api.get_objects(ADDROBJ, **values)
return addrobj
| 2.359375 | 2 |
conta/main/tests/views/test_InformesView.py | osso73/contabilidad | 0 | 12786731 | from pytest_django.asserts import assertTemplateUsed
from fixtures_views import *
class TestInformesView:
@pytest.fixture
def form_parametros(self, django_app):
resp = django_app.get(reverse('main:informes'), user='username')
return resp.forms['parametros']
@pytest.fixture
def populate_db_informes(self, populate_database):
_, cuentas, _ = populate_database
adicionales = [
[5, '2021-01-28', 'Compra del pan', 2.50, 0, cuentas[0]],
[5, '2021-01-28', 'Compra del pan', 0, 2.50, cuentas[3]],
[6, '2021-02-15', 'Compra de fruta', 10.75, 0, cuentas[0]],
[6, '2021-02-15', 'Compra de fruta', 0, 10.75, cuentas[3]],
[7, '2021-03-18', 'Calcetines y calzoncillos', 15.85, 0, cuentas[1]],
[7, '2021-03-18', 'Calcetines y calzoncillos', 0, 15.85, cuentas[3]],
[8, '2021-04-20', 'Abrigo de invierno', 54, 0, cuentas[1]],
[8, '2021-04-20', 'Abrigo de invierno', 0, 54, cuentas[3]],
]
for num, fecha, descripcion, debe, haber, cuenta in adicionales:
Movimiento.objects.create(num=num, fecha=fecha,
descripcion=descripcion, debe=debe, haber=haber, cuenta=cuenta)
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_redirect_if_not_logged_in(self, page, django_app):
resp = django_app.get(page)
assert resp.status_code == 302
assert resp.url.startswith('/accounts/login/')
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_view_url_exists_at_desired_location(self, page, django_app):
resp = django_app.get(page, user='username')
assert resp.status_code == 200
@pytest.mark.parametrize('page', ['/informes/', reverse('main:informes')])
def test_view_uses_correct_template(self, page, django_app):
resp = django_app.get(page, user='username')
assertTemplateUsed(resp, 'main/informes.html')
def test_parametros_form_attributes(self, form_parametros):
form = form_parametros
assert form.id == 'parametros'
assert form.method == 'post'
assert form.action == '/informes/'
assert form.action == reverse('main:informes')
fields = form.fields.keys()
for f in ['f_fecha_inicial', 'f_fecha_final', 'f_tipo', 'f_cuenta', 'f_etiqueta']:
assert f in fields
@pytest.mark.parametrize('tipo, fecha_col', [
('diario', 'Fecha'), ('semanal', 'Semana'), ('mensual', 'Mes'),
('trimestral', 'Trimestre'), ('anual', 'Año')
])
def test_parametros_form_attributes_tipo(self, form_parametros, populate_db_informes, tipo, fecha_col):
populate_db_informes
form = form_parametros
form['f_tipo'].select(text=tipo)
resp = form.submit()
# check title and subtitle
for text in ['Todas las cuentas', f'Informe {tipo}, todas las fechas']:
assert text in resp.text
# check columns of table
for col in [fecha_col, 'Debe', 'Haber', 'Total']:
assert col in resp.text
@pytest.mark.parametrize('fecha_ini, fecha_fin, expected_subtitle', [
('', '2022-01-29', 'Informe diario, desde el principio hasta 2022-01-29'),
('2022-01-29', '', 'Informe diario, desde 2022-01-29 hasta el final'),
('2022-01-01', '2022-01-31', 'Informe diario, desde 2022-01-01 hasta 2022-01-31'),
], ids=['fecha-inicial', 'fecha-final', 'ambas-fechas'])
def test_form_fechas(self, form_parametros, populate_db_informes, fecha_ini, fecha_fin, expected_subtitle):
populate_db_informes
form = form_parametros
form['f_fecha_inicial'] = fecha_ini
form['f_fecha_final'] = fecha_fin
resp = form.submit()
# check title and subtitle
for text in ['Todas las cuentas', expected_subtitle]:
assert text in resp.text
@pytest.mark.parametrize('cuenta, etiqueta, expected_title', [
('100: Caja', '', 'Cuenta 100: Caja'),
('', 'gastos', 'Cuentas del tipo: Gastos corrientes'),
('100: Caja', 'gastos', 'Cuenta 100: Caja'),
], ids=['cuenta-solo', 'etiqueta-solo', 'cuenta-y-etiqueta'])
def test_form_cuentas(self, form_parametros, populate_db_informes, cuenta, etiqueta, expected_title):
populate_db_informes
form = form_parametros
form['f_cuenta'] = cuenta
form['f_etiqueta'] = etiqueta
resp = form.submit()
# check title and subtitle
for text in [expected_title, 'Informe diario, todas las fechas']:
assert text in resp.text
| 2.109375 | 2 |
vvcontrollers/__init__.py | yarmenti/py_vvcontrollers | 0 | 12786732 | __author__ = "<NAME>"
__version__ = "0.1.1"
"""
Helpers to use voila-vuetify template
of the Jupyter voila-dashboard project.
"""
from .application import ApplicationVoilaVuetify
from .menu import MenuController
from .core import CoreController
from .abstract import AbstrController
from .dialog import DialogController
| 1.070313 | 1 |
wagtail_unsplash/forms.py | zerolab/wagtail-unsplash | 0 | 12786733 | # class UnsplashSearchForm(Form):
# query = | 1.15625 | 1 |
api_config.py | eduardoltorres/the-debug-ducky | 0 | 12786734 | import tweepy
from logger_config import logger
from secrets import *
def create_api():
auth = tweepy.OAuthHandler(API_KEY, API_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
try:
api.verify_credentials()
except Exception as e:
logger.error(f"Error {e} creating API.", exc_info=True)
logger.info("API successfully created.")
return api
| 2.703125 | 3 |
src/constants.py | ari-bou/symro | 0 | 12786735 | # Script Flags
# ----------------------------------------------------------------------------------------------------------------------
SPECIAL_COMMAND_MODEL = "MODEL"
SPECIAL_COMMAND_ADDITIONAL_MODEL = "ADDITIONAL_MODELS"
SPECIAL_COMMAND_INIT_DATA = "INIT_DATA"
SPECIAL_COMMAND_SETUP = "SETUP"
SPECIAL_COMMAND_NOEVAL = "NOEVAL"
SPECIAL_COMMAND_EVAL = "EVAL"
SPECIAL_COMMAND_OMIT_DECLARATIONS = "OMIT_DECL"
SPECIAL_COMMAND_INCLUDE_DECLARATIONS = "INCL_DECL"
SPECIAL_COMMAND_FSPROBLEM = "FSPROBLEM"
SPECIAL_COMMAND_SUBPROBLEM = "SUBPROBLEM"
SPECIAL_COMMAND_MASTER_PROBLEM = "MASTER_PROBLEM"
SPECIAL_COMMAND_PRIMAL_SUBPROBLEM = "PRIMAL_SUBPROBLEM"
SPECIAL_COMMAND_FEASIBILITY_SUBPROBLEM = "FEASIBILITY_SUBPROBLEM"
SPECIAL_COMMAND_INITIALIZATION = "INITIALIZATION"
SPECIAL_COMMAND_GBD_ALGORITHM = "GBD_ALGORITHM"
SPECIAL_COMMAND_OUTPUT = "OUTPUT"
SPECIAL_COMMAND_SYMBOLS = [SPECIAL_COMMAND_MODEL,
SPECIAL_COMMAND_ADDITIONAL_MODEL,
SPECIAL_COMMAND_INIT_DATA,
SPECIAL_COMMAND_SETUP,
SPECIAL_COMMAND_NOEVAL,
SPECIAL_COMMAND_EVAL,
SPECIAL_COMMAND_OMIT_DECLARATIONS,
SPECIAL_COMMAND_INCLUDE_DECLARATIONS,
SPECIAL_COMMAND_FSPROBLEM,
SPECIAL_COMMAND_SUBPROBLEM,
SPECIAL_COMMAND_MASTER_PROBLEM,
SPECIAL_COMMAND_PRIMAL_SUBPROBLEM,
SPECIAL_COMMAND_FEASIBILITY_SUBPROBLEM,
SPECIAL_COMMAND_INITIALIZATION,
SPECIAL_COMMAND_GBD_ALGORITHM,
SPECIAL_COMMAND_OUTPUT]
| 1.5625 | 2 |
src/annalist_root/annalist/views/fields/render_ref_image.py | gklyne/annalist | 18 | 12786736 | <filename>src/annalist_root/annalist/views/fields/render_ref_image.py
from __future__ import unicode_literals
from __future__ import absolute_import, division, print_function
"""
Renderer and value mapper for URI value displayed as an image.
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2014, <NAME>"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import logging
log = logging.getLogger(__name__)
from annalist.views.fields.render_base import RenderBase
from annalist.views.fields.render_fieldvalue import (
RenderFieldValue,
get_context_field_value,
get_field_edit_value,
get_field_view_value
)
from django.template import Template, Context
# ----------------------------------------------------------------------------
#
# Image reference value mapping
#
# ----------------------------------------------------------------------------
class RefImageValueMapper(RenderBase):
"""
Value mapper class for image resource reference
"""
@classmethod
def encode(cls, data_value):
"""
Encodes image reference as a string
"""
return data_value or ""
@classmethod
def decode(cls, field_value):
"""
Decodes a URI value as an image reference.
"""
return field_value or ""
# ----------------------------------------------------------------------------
#
# Image reference field renderers
#
# ----------------------------------------------------------------------------
class ref_image_view_renderer(object):
def render(self, context):
"""
Render reference in entity view as referenced image.
"""
linkval = RefImageValueMapper.encode(get_context_field_value(context, "target_value_link", ""))
return (
'''<a href="%s" target="_blank">'''+
'''<img src="%s" alt="Image at '%s'" />'''+
'''</a>''')%(linkval, linkval, linkval)
class ref_image_edit_renderer(object):
def __init__(self):
self._template = Template(
'''<input type="text" size="64" name="{{repeat_prefix}}{{field.description.field_name}}" '''+
'''placeholder="{{field.description.field_placeholder}}" '''+
'''value="{{field.field_edit_value}}" />'''
)
return
def render(self, context):
"""
Render image URI for editing
"""
return self._template.render(context)
def get_ref_image_renderer():
"""
Return field renderer object for token list values
"""
return RenderFieldValue("ref_image",
view_renderer=ref_image_view_renderer(),
edit_renderer=ref_image_edit_renderer(),
)
# End.
| 2.046875 | 2 |
refactorings/increase_field_visibility.py | ashrafizahra81/CodART | 1 | 12786737 | <reponame>ashrafizahra81/CodART
import logging
from refactorings.utils.utils2 import parse_and_walk
try:
import understand as und
except ImportError as e:
print(e)
from antlr4.TokenStreamRewriter import TokenStreamRewriter
from gen.javaLabeled.JavaParserLabeled import JavaParserLabeled
from gen.javaLabeled.JavaParserLabeledListener import JavaParserLabeledListener
logger = logging.getLogger()
__author__ = "<NAME>"
class IncreaseFieldVisibilityListener(JavaParserLabeledListener):
def __init__(self, source_class, source_field, rewriter: TokenStreamRewriter):
self.source_class = source_class
self.source_field = source_field
self.in_class = False
self.in_field = False
self.detected_field = False
self.rewriter = rewriter
def enterClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
if ctx.IDENTIFIER().getText() == self.source_class:
self.in_class = True
def exitClassDeclaration(self, ctx: JavaParserLabeled.ClassDeclarationContext):
if ctx.IDENTIFIER().getText() == self.source_class:
self.in_class = False
def enterFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
self.in_field = True
def exitFieldDeclaration(self, ctx: JavaParserLabeled.FieldDeclarationContext):
self.in_field = False
def enterVariableDeclaratorId(self, ctx: JavaParserLabeled.VariableDeclaratorIdContext):
if ctx.IDENTIFIER().getText() == self.source_field and self.in_field:
self.detected_field = True
def exitClassBodyDeclaration2(self, ctx: JavaParserLabeled.ClassBodyDeclaration2Context):
if self.detected_field:
self.rewriter.replaceSingleToken(
token=ctx.modifier(0).start,
text="public"
)
self.detected_field = False
def main(udb_path, source_package, source_class, source_field, *args, **kwargs):
db = und.open(udb_path)
field_ent = db.lookup(f"{source_package}.{source_class}.{source_field}", "Variable")
if len(field_ent) == 0:
logger.error("Invalid inputs.")
return
field_ent = field_ent[0]
if field_ent.simplename() != source_field:
logger.error("Invalid entity.")
return
if not field_ent.kind().check("Private"):
logger.error("Field is not private.")
return
parent = field_ent.parent()
while parent.parent() is not None:
parent = parent.parent()
main_file = parent.longname()
parse_and_walk(
file_path=main_file,
listener_class=IncreaseFieldVisibilityListener,
has_write=True,
source_class=source_class,
source_field=source_field
)
db.close()
if __name__ == '__main__':
main(
udb_path="D:\Dev\JavaSample\JavaSample\JavaSample.und",
source_package="source_package",
source_class="Sample",
source_field="privateField"
)
| 2.15625 | 2 |
problems/765.py | mengshun/Leetcode | 0 | 12786738 | """
765. 情侣牵手
N 对情侣坐在连续排列的 2N 个座位上,想要牵到对方的手。 计算最少交换座位的次数,以便每对情侣可以并肩坐在一起。 一次交换可选择任意两人,让他们站起来交换座位
"""
def minSwapsCouples(row):
n = len(row) # 总人数
N = n >> 1 # 情侣对数
# 并查集
parent = list(range(N))
size = [1] * N
# 查
def find(x):
if x != parent[x]:
parent[x] = find(parent[x])
return parent[x]
# 并
def merge(x, y):
x, y = find(x), find(y)
if x == y:
return
# 小树 向 大树靠
if size[x] > size[y]:
x, y = y, x
# 大树 大小修改
size[y] += size[x]
# 合并
parent[x] = y
for i in range(N):
# 获取相邻两项真正的组别
x = row[2 * i] >> 1
y = row[2 * i + 1] >> 1
if x != y:
merge(x, y)
# 同一个组的 只统计一次
groups = {}
for i in range(N):
x = find(i)
if x not in groups:
groups[x] = size[x]
return sum(groups.values()) - len(groups)
# 贪心算法
def tanxin(row):
n = len(row)
res = 0
for i in range(0, n-1, 2):
if row[i] == row[i+1] ^ 1:
# 是一对情侣 继续下次循环
continue
for j in range(i+1, n):
if row[i] == row[j] ^ 1:
row[i+1], row[j] = row[j], row[i+1]
break
res += 1
return res
print(minSwapsCouples([0, 2, 1, 3])) # 1
print(tanxin([0, 2, 1, 3])) # 1
print(minSwapsCouples([3, 2, 0, 1])) # 0
print(tanxin([3, 2, 0, 1])) # 0
print(minSwapsCouples([9,12,2,10,11,0,13,6,4,5,3,8,1,7])) # 5
print(tanxin([9,12,2,10,11,0,13,6,4,5,3,8,1,7])) # 5
| 3.53125 | 4 |
python_backend/covidManager/migrations/0001_initial.py | KedarKshatriya/HackOn_Hackathon | 0 | 12786739 | # Generated by Django 3.0.2 on 2020-04-13 17:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('mob', models.CharField(max_length=100)),
('fever', models.IntegerField()),
('tired', models.BooleanField(default=False)),
('throat', models.BooleanField(default=False)),
('bodypain', models.BooleanField(default=False)),
('invisit', models.BooleanField(default=False)),
('pubvisit', models.BooleanField(default=False)),
('hivisit', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| 1.890625 | 2 |
vaquero/pipeline.py | jbn/vaquero | 1 | 12786740 | import ast
import imp
import inspect
import types
from .invocation_tools import _name_of
class PipelineCommand:
def __init__(self, name):
self._name = name
class SkipTo(PipelineCommand):
# TODO: Singleton?
def __init__(self, resuming_function_name):
self.function_name = _name_of(resuming_function_name)
Done = PipelineCommand('DONE')
def collect_func_ordering(file_path_or_module):
"""
Collect all top-level functions in a file or module, in order.
:param file_path_or_module: the path to the python file, or a module.
:return: the ordered top-level function names.
"""
if isinstance(file_path_or_module, types.ModuleType):
file_path = inspect.getsourcefile(file_path_or_module)
else:
file_path = file_path_or_module
with open(file_path, 'r') as fp:
root = ast.parse(fp.read())
names = []
for node in ast.iter_child_nodes(root):
if isinstance(node, ast.FunctionDef):
names.append(node.name)
return names
def collect_pipeline(module, skip_private=True, reloading=True):
"""
Load the functions in a module in their definition order.
:param module: a python module
:param skip_private: ignore functions starting with '_'
:param reloading: reload the module prior to collection
:return: the functions in their definition order
"""
if reloading:
module = imp.reload(module)
pipeline = []
env = vars(module)
for name in collect_func_ordering(module):
if skip_private and name.startswith('_'):
continue
pipeline.append(env[name])
return pipeline
class Pipeline(object):
"""
A sequence of functions for data processing.
"""
def __init__(self):
self._pipeline = []
self._captures = []
def __iter__(self):
return iter(self._captures)
def __call__(self, *args, **kwargs):
await_f = None
for f in self._pipeline:
if await_f is not None:
if _name_of(f) != await_f:
continue
else:
await_f = None
cmd = f(*args, **kwargs)
if isinstance(cmd, PipelineCommand):
if cmd is Done:
break
elif isinstance(cmd, SkipTo):
await_f = cmd.function_name
if await_f is not None:
raise NameError("Function {} never visited".format(await_f))
class ModulePipeline(Pipeline):
"""
Extract a pipeline from a Python module.
This executes each function in the order laid out by the file.
"""
def __init__(self, module, skip_private_applications=True,
include_private_captures=True, reloading=True):
"""
:param module: the python module to load
:param skip_private_applications: if True, then functions prefixed with
'_' are not included in the transformation pipeline
:param include_private_captures: if True, then functions prefixed with
'_' ARE captured for error analysis.
:param reloading: if True, reloads the module when calling `reload()`
"""
self._module = module
self._skip_private_applications = skip_private_applications
self._include_private_captures = include_private_captures
self._reloading = reloading
super(ModulePipeline, self).__init__()
self.reload(force=True)
def reload(self, force=False):
"""
Reload the underlying module.
:param force: if True, reloads the module, even if reloading is false.
"""
# Note: reloading the module on the initial load actually makes sense
# given how it's used. In a notebook, you import the module, then
# pass it to the constructor. It's easy to step over that
# constructor again, passing the old module reference.
if force or self._reloading:
self._pipeline = []
self._captures = []
for f in collect_pipeline(self._module, skip_private=False):
is_private = f.__name__.startswith('_')
if not self._skip_private_applications or not is_private:
self._pipeline.append(f)
if self._include_private_captures or not is_private:
self._captures.append(f)
| 2.375 | 2 |
conf_site/proposals/tests/test_exporting_submissions.py | pydata/conf_site | 13 | 12786741 | <filename>conf_site/proposals/tests/test_exporting_submissions.py
from random import randint
from conf_site.core.tests.test_csv_view import StaffOnlyCsvViewTestCase
from conf_site.proposals.tests.factories import ProposalFactory
from conf_site.proposals.views import ExportSubmissionsView
class ExportSubmissionsViewTestCase(StaffOnlyCsvViewTestCase):
view_class = ExportSubmissionsView
view_name = "submission_export"
def test_all_proposals_are_included(self):
proposals = ProposalFactory.create_batch(size=randint(2, 4))
response = ExportSubmissionsView().get()
for proposal in proposals:
self.assertContains(response, proposal.speaker.name)
self.assertContains(response, proposal.title)
self.assertContains(response, proposal.affiliation)
self.assertContains(response, proposal.code_url)
self.assertContains(response, proposal.kind.name)
| 2.265625 | 2 |
projecto1/aula 16.py | Rachidomar1523/pythonExercicios | 0 | 12786742 | <reponame>Rachidomar1523/pythonExercicios
#lanche = 'rachid', 'omar', 'mersson', 'govnahica'
#print(lanche[-4:4:2])
a = 1, 2, 5,8
b = 7, 9, 3
print(a+b) | 3.546875 | 4 |
prefect_ds/task_runner.py | AndrewRook/prefect_ds | 22 | 12786743 | <filename>prefect_ds/task_runner.py
from prefect.core import Edge
from prefect.engine.state import State
from prefect.engine.task_runner import TaskRunner
from typing import Dict, Any
class DSTaskRunner(TaskRunner):
def run(
self,
state: State = None,
upstream_states: Dict[Edge, State] = None,
context: Dict[str, Any] = None,
executor: "prefect.engine.executors.Executor" = None,
) -> State:
"""
See the documentation for ``prefect.engine.task_runner.TaskRunner.run()``.
"""
self.upstream_states = upstream_states
return super().run(state=state, upstream_states=upstream_states, context=context, executor=executor)
| 2.3125 | 2 |
toollib/__init__.py | atpuxiner/toollib | 113 | 12786744 | <filename>toollib/__init__.py
"""
@author axiner
@version v1.0.0
@created 2021/12/12 13:14
@abstract This is a tool library.
@description
@history
"""
from pathlib import Path
here = Path(__file__).absolute().parent
__version__ = '2022.05.11'
| 1.28125 | 1 |
settings.py | chenke91/ckPermission | 0 | 12786745 | <reponame>chenke91/ckPermission
#coding:utf-8
bind = 'unix:/var/run/gunicorn.sock'
workers = 4
# you should change this
user = 'root'
# maybe you like error
loglevel = 'debug'
errorlog = '-'
logfile = '/var/log/gunicorn/debug.log'
timeout = 300
secure_scheme_headers = {
'X-SCHEME': 'https',
}
x_forwarded_for_header = 'X-FORWARDED-FOR'
| 1.117188 | 1 |
tests/test_regression.py | weninc/bitshuffle-1 | 162 | 12786746 | """
Test that data encoded with earlier versions can still be decoded correctly.
"""
from __future__ import absolute_import, division, print_function
import pathlib
import unittest
import numpy as np
import h5py
TEST_DATA_DIR = pathlib.Path(__file__).parent / "data"
OUT_FILE_TEMPLATE = "regression_%s.h5"
VERSIONS = [
"0.1.3",
]
class TestAll(unittest.TestCase):
def test_regression(self):
for version in VERSIONS:
file_name = TEST_DATA_DIR / (OUT_FILE_TEMPLATE % version)
f = h5py.File(file_name, "r")
g_orig = f["origional"]
g_comp = f["compressed"]
for dset_name in g_comp.keys():
self.assertTrue(np.all(g_comp[dset_name][:] == g_orig[dset_name][:]))
if __name__ == "__main__":
unittest.main()
| 2.40625 | 2 |
tests/test_enhance.py | RaphaelOlivier/pyaudlib | 26 | 12786747 | <gh_stars>10-100
"""Test enhancement functions."""
from audlib.quickstart import welcome
from audlib.sig.window import hamming
WELCOME, SR = welcome()
HOP = .25
WIND = hamming(SR*.025, HOP, synth=True)
def test_SSFEnhancer():
from audlib.enhance import SSFEnhancer
enhancer = SSFEnhancer(SR, WIND, HOP, 512)
sigssf = enhancer(WELCOME, .4) # sounds okay
return
if __name__ == '__main__':
test_SSFEnhancer()
| 2.421875 | 2 |
examples/echo_server.py | nickovs/pypssst | 0 | 12786748 | #!/usr/bin/env python
import socket
from contextlib import closing
import pssst
import click
from cryptography.hazmat.primitives.asymmetric.x25519 import X25519PrivateKey
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
@click.command()
@click.option('-k', '--key-file', help="File containing hex encoded private key")
@click.option('-p', '--port', type=int, help="Port on which to listen", default=45678)
def main(key_file, port):
if key_file:
private_key_text = open(key_file).readline().strip()
private_key = X25519PrivateKey.from_private_bytes(bytes.fromhex(private_key_text))
else:
private_key = X25519PrivateKey.generate()
print("Server public key: ",
private_key.public_key().public_bytes(encoding=Encoding.Raw, format=PublicFormat.Raw).hex())
server_handler = pssst.PSSSTServer(private_key)
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as server_socket:
server_socket.bind(('127.0.0.1', port))
while True:
packet, client_addr = server_socket.recvfrom(2048)
try:
data, client_key, reply_handler = server_handler.unpack_request(packet)
reply_packet = reply_handler(data)
server_socket.sendto(reply_packet, client_addr)
except pssst.PSSSTException as e:
print("Server Exception: {}".format(e))
if __name__ == "__main__":
main()
| 2.78125 | 3 |
blueprint/py/bp/entity_gen/dates.py | andrey-mishchenko/blueprint-oss | 7 | 12786749 | <filename>blueprint/py/bp/entity_gen/dates.py
from typing import Tuple
from ..entity import Date, Entity, Text
from .type_scoring import date_likeness
MINIMUM_SCORE = 0.7
def get_dates(entities: Tuple[Entity, ...]) -> Tuple[Date, ...]:
"""Get date-like entities from the given Entities.
Args:
entities: Should be Words or Phrases.
"""
def make_date(E: Entity) -> Date:
assert isinstance(E, Text)
score, _ = date_likeness(E.text)
return Date(E.bbox, E.text, tuple(E.entity_words()), score)
return tuple(filter(lambda E: (E.likeness_score or 0) >= MINIMUM_SCORE,
map(make_date, entities)))
| 3.046875 | 3 |
tests/test_config.py | isidentical/unimport | 147 | 12786750 | <reponame>isidentical/unimport
import re
from pathlib import Path
from unittest import TestCase
from unimport import constants as C
from unimport import utils
from unimport.config import Config, DefaultConfig
TEST_DIR = Path(__file__).parent / "configs"
pyproject = TEST_DIR / "pyproject.toml"
setup_cfg = TEST_DIR / "setup.cfg"
no_unimport_pyproject = TEST_DIR / "no_unimport" / "pyproject.toml"
no_unimport_setup_cfg = TEST_DIR / "no_unimport" / "setup.cfg"
class ConfigTestCase(TestCase):
include = "test|test2|tests.py"
exclude = "__init__.py|tests/"
sources = [Path("path1"), Path("path2")]
def test_toml_parse(self):
config = Config(config_file=pyproject).parse()
self.assertEqual(self.include, config.include)
self.assertEqual(self.exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertFalse(config.remove)
self.assertTrue(config.diff)
self.assertTrue(config.ignore_init)
def test_cfg_parse(self):
config = Config(config_file=setup_cfg).parse()
self.assertEqual(self.include, config.include)
self.assertEqual(self.exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertFalse(config.remove)
self.assertTrue(config.diff)
self.assertTrue(config.ignore_init)
def test_cfg_merge(self):
config = Config(config_file=setup_cfg).parse()
console_configuration = {
"include": "tests|env",
"remove": True,
"diff": False,
"include_star_import": True,
}
gitignore_exclude = utils.get_exclude_list_from_gitignore()
exclude = "|".join(
[config.exclude] + gitignore_exclude + [C.INIT_FILE_IGNORE_REGEX]
)
config = config.merge(**console_configuration)
self.assertEqual("tests|env", config.include)
self.assertEqual(exclude, config.exclude)
self.assertEqual(self.sources, config.sources)
self.assertTrue(config.gitignore)
self.assertTrue(config.requirements)
self.assertTrue(config.remove)
self.assertFalse(config.diff)
self.assertTrue(config.ignore_init)
class DefaultCommandTestCase(TestCase):
def setUp(self):
self.config = DefaultConfig()
def test_there_is_no_command(self):
self.assertEqual(
self.config.merge(there_is_no_command=True), self.config.merge()
)
def test_same_with_default_config(self):
self.assertEqual(
self.config.merge(exclude=self.config.exclude).exclude,
self.config.merge().exclude,
)
def test_check(self):
self.assertTrue(self.config.merge().check)
self.assertTrue(self.config.merge(check=True).check)
self.assertTrue(self.config.merge(gitignore=True).check)
self.assertFalse(self.config.merge(diff=True).check)
self.assertFalse(self.config.merge(remove=True).check)
self.assertFalse(self.config.merge(permission=True).check)
def test_diff(self):
self.assertFalse(self.config.merge().diff)
self.assertFalse(self.config.merge(remove=True).diff)
self.assertTrue(self.config.merge(diff=True).diff)
self.assertTrue(self.config.merge(permission=True).diff)
class TomlCommandTestCase(TestCase):
def setUp(self):
self.config = Config(pyproject).parse()
self.exclude = "__init__.py|tests/"
def test_same_with_toml_config(self):
self.assertEqual(
self.config.merge(exclude=self.exclude).exclude,
self.config.merge().exclude,
)
def test_check(self):
self.assertTrue(self.config.merge(check=True).check)
self.assertTrue(self.config.merge(diff=False).check)
self.assertTrue(self.config.merge(diff=False, permission=False).check)
self.assertFalse(self.config.merge().check)
self.assertFalse(self.config.merge(gitignore=True).check)
self.assertFalse(self.config.merge(diff=True).check)
self.assertFalse(self.config.merge(remove=True).check)
self.assertFalse(self.config.merge(permission=True).check)
class NoUnimportSectionTestCase(TestCase):
def setUp(self):
self.default_config = DefaultConfig()
def test_toml_parse(self):
config = Config(config_file=no_unimport_pyproject).parse()
self.assertEqual(self.default_config.include, config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.remove)
self.assertFalse(config.diff)
self.assertFalse(config.ignore_init)
def test_cfg_parse(self):
config = Config(config_file=no_unimport_setup_cfg).parse()
self.assertEqual(self.default_config.include, config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.remove)
self.assertFalse(config.diff)
self.assertFalse(config.ignore_init)
def test_cfg_merge(self):
config = Config(config_file=no_unimport_setup_cfg).parse()
console_configuration = {
"include": "tests|env",
"remove": True,
"diff": False,
"include_star_import": True,
}
config = config.merge(**console_configuration)
self.assertEqual("tests|env", config.include)
self.assertEqual(self.default_config.exclude, config.exclude)
self.assertEqual(self.default_config.sources, config.sources)
self.assertTrue(config.remove)
self.assertTrue(config.include_star_import)
self.assertFalse(config.gitignore)
self.assertFalse(config.requirements)
self.assertFalse(config.diff)
class InitFileIgnoreRegexTestCase(TestCase):
exclude_regex = re.compile(C.INIT_FILE_IGNORE_REGEX)
def test_match(self):
self.assertIsNotNone(self.exclude_regex.search("path/to/__init__.py"))
self.assertIsNotNone(self.exclude_regex.search("to/__init__.py"))
self.assertIsNotNone(self.exclude_regex.search("__init__.py"))
def test_not_match(self):
self.assertIsNone(self.exclude_regex.search("path/to/_init_.py"))
self.assertIsNone(
self.exclude_regex.search("path/to/__init__/test.py")
)
self.assertIsNone(self.exclude_regex.search("__init__"))
self.assertIsNone(self.exclude_regex.search("__init__py"))
self.assertIsNone(self.exclude_regex.search("__init__bpy"))
| 2.578125 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.