max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
apps/trac/migrations/0017_checkpoint_distance_units.py | tractiming/trac-gae | 3 | 12789151 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('trac', '0016_auto_20160303_2353'),
]
operations = [
migrations.AddField(
model_name='checkpoint',
name='distance_units',
field=models.CharField(default=b'mi', max_length=2, choices=[(b'm', b'meters'), (b'km', b'kilometers'), (b'mi', b'miles')]),
),
]
| 1.773438 | 2 |
linuxTutorial.py | kumarashit/ExploreTheTux | 1 | 12789152 | <filename>linuxTutorial.py<gh_stars>1-10
#!/usr/bin/env python3
import os
import re
#import sys
commands = {"list" : 'ls', 'long-list': 'ls -ltr', 'pwd' : '<PASSWORD>', 'change-dir':'cd', 'create-dir': 'mkdir', 'delete': 'rm',
'display':'cat', 'rename': 'mv', 'copy' : 'cp', 'modify_perm':'chmod'}
commands_learned = []
separator = "\n-------------------------------------------------------------------------\n[testconsole]:$ "
class colors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\x1b[6;30;42m'#92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class Navigation:
def listing(self):
try:
print("First command is 'ls'\n")
ri = input("Type 'ls' " + separator + colors.OKGREEN)
if ri == commands['list']:
os.system(ri)
else:
print(colors.ENDC+"Please check the command you typed\n")
raise()
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.listing()
else:
pass
def long_listing(self):
try:
ri = input("Type 'ls -ltr' " + separator + colors.OKGREEN)
if ri == commands['long-list']:
os.system(ri)
print(colors.ENDC + "\n\nDid you notice, the 'd' in front of the listing? This means they are directories.\nBut in Linux everything is file.\nMore on this in Advanced Linux :) !!!\n")
else:
print(colors.ENDC + "Please check the command you typed")
raise()
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.long_listing()
else:
pass
def pwd(self):
try:
ri = input("Type 'pwd'" + separator + colors.OKGREEN)
if ri == commands['pwd']:
os.system(ri)
print(colors.ENDC)
else:
print(colors.ENDC + "Please check the command you typed")
raise()
except:
print(colors.ENDC+"Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.pwd()
else:
pass
def chdir(self):
try:
ri = input("Type 'cd' followed by the directory you want to check into! Example 'cd Downloads'"+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(cd)\s+(\w*)', search_str)
if res.group(1) != commands['change-dir']:
raise
os.system(ri)
print(colors.ENDC)
if res.group(2) == '':
print(colors.ENDC+"\nHave you been pushed into the root directory!! :)\nCheck if you entered the directory name or not!!\n")
except:
print(colors.ENDC+"Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.chdir()
else:
pass
def create_dir(self):
try:
ri = input("Type 'mkdir' followed by the directory you want to create. ex: 'mkdir foo'"+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(mkdir)\s+(\w+)', search_str)
if res.group(1) != commands['create-dir'] or res.group(2) == '':
raise
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.create_dir()
else:
pass
def remove_file(self):
try:
ri = input("Type 'rm' followed by the directory/file you want to delete. Use -r for removing a directory. Example 'rm x.txt' for removing a file. and 'rm -r foo' for removing a directory."+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(rm)\s+(\w*)', search_str)
if res.group(1) != commands['delete']:
raise
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC+"Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.remove_file()
else:
pass
def all(self):
self.listing()
commands_learned.append('ls')
print("\n\nNow let's check the time of creation of these files, permissions and more...")
self.long_listing()
commands_learned.append('ls -ltr')
print("\n\nCheck your current working directory")
self.pwd()
commands_learned.append('pwd')
print("\n\nYou want to change the directory?")
self.chdir()
commands_learned.append('cd')
print("\n\nLet's create a directory!!")
self.create_dir()
commands_learned.append('mdkir')
print("\nNow you can use 'cd' command to go into this directory\n")
self.remove_file()
commands_learned.append('rm')
class FileManipulations:
print("Ok, so you learnt how to navigate into the Linux files!!\n")
print("Let's learn some file manipulations")
def cat_file(self):
print("You want to check the content of the file?\n")
try:
ri = input("Type 'cat' followed by the filename. Ex: 'cat foo.txt'"+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(cat)\s+(\w+)', search_str)
if res.group(1) != commands['display']:
print(colors.ENDC+"Wrong command!")
raise()
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.cat_file()
else:
pass
def copy_file(self):
print("\n\nYou want to edit the file? Use any of the system editor like 'vi' or 'nano' or 'emacs'")
print("To know the usage of the editors, check the man pages. Ex: man nano or man vi\n")
print("Let's try to copy one file to another.")
try:
ri=input("Type 'cp <source file name> <destination file name>' Ex: cp foo.txt bar.txt"+ separator + colors.OKGREEN)
search_str = ri
#res = re.search('(cp)\s+(\W+)\s+(\W+)', search_str)
#if res.group(1) != commands['copy']:
# print(colors.ENDC+"Wrong command!")
# raise()
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.copy_file()
else:
pass
def rename_file(self):
print("\n\nLet's try to rename file.")
try:
ri=input("Type 'mv <original file name> <new file name>' Ex: mv foo.txt bar.txt"+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(mv)\s+(\w+)\s+(\w+)', search_str)
if res.group(1) != commands['rename']:
print(colors.ENDC+"Wrong command!")
raise()
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.rename_file()
else:
pass
def change_file_permission(self):
print("\n\nChange the file permissions")
print("Please check the man page of 'chmod'. [man chmod]")
print("Let's change the file permissions and make the file as executable")
try:
ri = input("Type 'chmod +x <File name>'. Ex. chmod +x foo.py"+ separator + colors.OKGREEN)
search_str = ri
res = re.search('(chmod)\s+', search_str)
if res.group(1) != commands['modify_perm']:
print(colors.ENDC+"Wrong command!")
raise()
os.system(ri)
print(colors.ENDC)
except:
print(colors.ENDC + "Something went wrong!\n")
while(1):
ri=input("If you want to continue exploring last command, press 'Yes' else 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
self.change_file_permission()
else:
pass
try:
while(1):
ri = input("You want to check the permission for your file? Press 'Yes' or 'No'\n")
if not (ri.lower() == 'yes' or ri.lower() == 'no'):
print("[%s] You didn't enter Yes or no, Please try again", ri)
else:
break
if ri.lower() == 'yes':
n = Navigation()
n.long_listing()
else:
pass
except:
print("Oops!!")
pass
def all(self):
self.cat_file()
commands_learned.append('cat')
self.copy_file()
commands_learned.append('copy')
self.rename_file()
commands_learned.append('mv')
self.change_file_permission()
commands_learned.append('chmod')
#
def main():
print("===============================================================================\n")
print("\tThis is very basic hands-on tutorial of Linux.\n")
print("\tIt will just make you understand initial basic commands.\n")
print("\tBest way to learn is explore. If you want to explore more about a command\n")
print("\tsimply use \"man <command name>\". And yes, you have \"man man\" too!!!")
print("\tHave Fun!!!!\n")
print("===============================================================================\n")
print("After you log into your system, open 'Terminal'\n")
print("Hopefully you will be landed into your 'home' sweet home'\n")
n = Navigation()
n.all()
m = FileManipulations()
m.all()
print("Hope you enjoyed and learnt some basics!!")
print("Just to recap, here is the list of commands you learnt:\n", commands_learned)
if __name__ == "__main__":
main()
| 3.359375 | 3 |
utils/nico_spliter.py | Luodian/Learning-Invariant-Representations-and-Risks | 17 | 12789153 | <reponame>Luodian/Learning-Invariant-Representations-and-Risks<filename>utils/nico_spliter.py
import os
# used to remove all .DS_Store files from mac os.
def file_fliter(root_path):
for home, dirs, files in os.walk(root_path):
for file_name in files:
if file_name.startswith("."):
print(os.path.join(home, file_name))
try:
os.remove(os.path.join(home, file_name))
except:
print('Wrong path...')
def delete_blank(root_path):
for category in os.listdir(root_path):
for domain in os.listdir(os.path.join(root_path, category)):
if "on grass" in domain:
os.rename(os.path.join(root_path, category, "on grass"), os.path.join(root_path, category, "grass"))
if "on snow" in domain:
os.rename(os.path.join(root_path, category, "on snow"), os.path.join(root_path, category, "snow"))
def fill_blank(root_path):
for home, dirs, files in os.walk(root_path):
for file_name in files:
if " " in file_name:
print(os.path.join(home, file_name))
try:
os.rename(os.path.join(home, file_name), os.path.join(home, file_name.replace(" ", '_')))
# os.remove(os.path.join(home, file_name))
except:
print('Wrong path...')
def read_dataset(root_path):
ds = {}
domain_freq = {}
for category in os.listdir(root_path):
ds[category] = {}
# new a dict to store each category's domain information.
in_cate = {}
for domain in os.listdir(os.path.join(root_path, category)):
if domain not in domain_freq:
domain_freq[domain] = 0
domain_freq[domain] += 1
# print(domain)
in_cate[domain] = []
for file in os.listdir(os.path.join(root_path, category, domain)):
in_cate[domain].append(os.path.join(category, domain, file))
# print(in_cate[domain][-1])
ds[category] = in_cate
return ds
def domain_spliter(ds, source_domain, target_domain, pct_lists):
source = []
for category in ds:
sub_area_images = ds[category][source_domain]
sub_area_image_ext = []
for item in sub_area_images:
sub_area_image_ext.append((item, category))
source.extend(sub_area_image_ext)
pass
import numpy as np
prepared_target = {}
def select_by_category(domain, pct_list: list):
labeled = {}
for n_pct in pct_list:
labeled[n_pct] = []
unlabed = []
val = []
def rp_list(arr, cat):
ret_arr = []
for index, item in enumerate(arr):
ret_arr.append((item, cat))
return ret_arr
for category in ds:
domain_arr = ds[category][domain]
np.random.shuffle(domain_arr)
for n_pct in pct_list:
if int(len(ds[category][domain]) * 0.01 * n_pct) >= 1:
selection = int(len(ds[category][domain]) * 0.01 * n_pct)
else:
selection = 1
cat_labeled_train = domain_arr[:selection]
labeled[n_pct].extend(rp_list(cat_labeled_train, category))
cat_unlabeled_train = domain_arr[:int(len(domain_arr) * 0.7)]
cat_val = domain_arr[int(len(domain_arr) * 0.7):]
unlabed.extend(rp_list(cat_unlabeled_train, category))
val.extend(rp_list(cat_val, category))
return labeled, unlabed, val
prepared_target = {}
prepared_target['labeled'], prepared_target['unlabeled'], prepared_target[
'validation'] = select_by_category(target_domain, pct_lists)
return source, prepared_target
def write_to_txt(source: list, save_path: str, save_name: str):
def refiner(line: str):
if 'on snow' in line:
line = line.replace('on snow', 'snow')
if 'on grass' in line:
line = line.replace('on grass', 'grass')
return line
with open(os.path.join(save_path, save_name), 'w') as fp:
for item in source:
fp.writelines(refiner(item[0]) + ' ' + item[1] + '\n')
if __name__ == '__main__':
# file_fliter("/home/v-boli4/codebases/external_datasets/NICO-Traffic")
fill_blank("/home/v-boli4/codebases/external_datasets/NICO-ANIMAL")
ds = read_dataset('/home/v-boli4/codebases/external_datasets/NICO-ANIMAL')
ds.pop('bear', None)
ds.pop('bird', None)
pct_lists = [1, 3, 5, 10]
def pipeline(source_domain, target_domain):
source, prepared_target = domain_spliter(ds, source_domain, target_domain, pct_lists)
write_to_txt(source, '/home/v-boli4/codebases/DA_Codebase/datasets/convention/nico/source',
'{}.txt'.format(source_domain))
for pct in pct_lists:
write_to_txt(prepared_target['labeled'][pct],
'/home/v-boli4/codebases/DA_Codebase/datasets/convention/nico/target',
'{}_labeled_{}.txt'.format(target_domain, pct))
write_to_txt(prepared_target['unlabeled'],
'/home/v-boli4/codebases/DA_Codebase/datasets/convention/nico/target',
'{}_{}.txt'.format(target_domain, 'unlabeled'))
write_to_txt(prepared_target['validation'],
'/home/v-boli4/codebases/DA_Codebase/datasets/convention/nico/target',
'{}_{}.txt'.format(target_domain, 'validation'))
pipeline('grass', 'snow')
pipeline('snow', 'grass')
| 3.09375 | 3 |
or_testbed/solvers/tabusearch/multistart.py | Fynardo/or-testbed | 1 | 12789154 | <reponame>Fynardo/or-testbed
# -*- coding:utf-8 -*-
import or_testbed.solvers.base.solver as base_solver
class MultiStartTabuSearch(base_solver.MultiStartSolver):
"""
MultiStart version of Tabu Search.
This is just an extension of base multistart solver. It works fine out of the box.
"""
def __init__(self, iters, inner_tabusearch_factory, debug=True, log_file=None):
super().__init__(iters, inner_tabusearch_factory, debug=debug, log_file=log_file)
self.name = 'MultiStart Tabu Search'
| 2.078125 | 2 |
app/__init__.py | ryzencool/flask_arch | 0 | 12789155 | <filename>app/__init__.py
from flask import Flask
from app.api import user, trans
app_starter = Flask(__name__)
app_starter.register_blueprint(user, url_prefix="/user")
app_starter.register_blueprint(trans, url_prefix="/trans") | 2.03125 | 2 |
src/psiz/keras/layers/similarities/inverse.py | greenfieldvision/psiz | 21 | 12789156 | <filename>src/psiz/keras/layers/similarities/inverse.py
# -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Module of TensorFlow kernel layers.
Classes:
InverseSimilarity: A parameterized inverse similarity layer.
"""
import tensorflow as tf
from tensorflow.python.keras import backend as K
import psiz.keras.constraints as pk_constraints
@tf.keras.utils.register_keras_serializable(
package='psiz.keras.layers', name='InverseSimilarity'
)
class InverseSimilarity(tf.keras.layers.Layer):
"""Inverse-distance similarity function.
The inverse-distance similarity function is parameterized as:
s(x,y) = 1 / (d(x,y)**tau + mu),
where x and y are n-dimensional vectors.
"""
def __init__(
self, fit_tau=True, fit_mu=True, tau_initializer=None,
mu_initializer=None, **kwargs):
"""Initialize.
Arguments:
fit_tau (optional): Boolean indicating if variable is
trainable.
fit_gamma (optional): Boolean indicating if variable is
trainable.
fit_beta (optional): Boolean indicating if variable is
trainable.
"""
super(InverseSimilarity, self).__init__(**kwargs)
self.fit_tau = fit_tau
if tau_initializer is None:
tau_initializer = tf.random_uniform_initializer(1., 2.)
self.tau_initializer = tf.keras.initializers.get(tau_initializer)
tau_trainable = self.trainable and self.fit_tau
with tf.name_scope(self.name):
self.tau = self.add_weight(
shape=[], initializer=self.tau_initializer,
trainable=tau_trainable, name="tau", dtype=K.floatx(),
constraint=pk_constraints.GreaterEqualThan(min_value=1.0)
)
self.fit_mu = fit_mu
if mu_initializer is None:
mu_initializer = tf.random_uniform_initializer(0.0000000001, .001)
self.mu_initializer = tf.keras.initializers.get(tau_initializer)
mu_trainable = self.trainable and self.fit_mu
with tf.name_scope(self.name):
self.mu = self.add_weight(
shape=[], initializer=self.tau_initializer,
trainable=mu_trainable,
name="mu", dtype=K.floatx(),
constraint=pk_constraints.GreaterEqualThan(
min_value=2.2204e-16
)
)
def call(self, inputs):
"""Call.
Arguments:
inputs: A tensor of distances.
Returns:
A tensor of similarities.
"""
return 1 / (tf.pow(inputs, self.tau) + self.mu)
def get_config(self):
"""Return layer configuration."""
config = super().get_config()
config.update({
'fit_tau': self.fit_tau,
'fit_mu': self.fit_mu,
'tau_initializer': tf.keras.initializers.serialize(
self.tau_initializer
),
'mu_initializer': tf.keras.initializers.serialize(
self.mu_initializer
),
})
return config
| 2.5625 | 3 |
fixtitle.py | qingzma/cnd | 1 | 12789157 | <filename>fixtitle.py
import math
fname1 = "paper.csv"
fname2 = "paper_fixed.csv"
with open(fname2, 'w') as f2:
with open(fname1, 'r') as f1:
for line in f1.readlines():
spl = line.split(",")
if len(spl) < 4:
continue
f2.write(spl[0])
f2.write(",")
f2.write(";".join(spl[1:-2]))
f2.write(",")
f2.write(spl[-2])
f2.write(",")
f2.write(spl[-1]) | 2.6875 | 3 |
bpprosdk/websockets/orderbook/orderbook.py | Tibi-Bitpanda/bitpanda-pro-sdk-py | 17 | 12789158 | <filename>bpprosdk/websockets/orderbook/orderbook.py
"""Order Book model"""
import json
from decimal import Decimal
class OrderBook:
"""Stores the state of the order book"""
def __init__(self):
self.asks = dict()
self.bids = dict()
self.instrument_code = None
def get_bids(self):
"""
All bids of the order book
"""
return self.bids
def get_asks(self):
"""
All asks of the order book
"""
return self.asks
def init_with_snapshot(self, json_data: json):
"""
Initialize order book with snapshot. Previous data is discarded.
"""
self.instrument_code = json_data["instrument_code"]
# discard old data
self.asks = dict()
self.bids = dict()
for price, amount in json_data["asks"]:
self.asks.update({price: amount})
for price, amount in json_data["bids"]:
self.bids.update({price: amount})
def update(self, json_update: json):
"""
Updates bids + asks of order book
"""
changes = json_update["changes"]
for side, price, amount in changes:
if side == "BUY":
if Decimal(amount) > Decimal(0):
# add bid
self.bids.update({price: amount})
elif Decimal(amount) <= Decimal(0):
# remove bid
if self.bids.get(price) is not None:
self.bids.pop(price)
elif side == "SELL":
if Decimal(amount) > Decimal(0):
# add ask
self.asks.update({price: amount})
elif Decimal(amount) <= Decimal(0):
# remove ask
if self.asks.get(price) is not None:
self.asks.pop(price)
| 2.78125 | 3 |
llama/tests/ping_test.py | anirudh-ramesh/llama | 0 | 12789159 | """Unittests for metrics lib."""
from llama import ping
from llama import util
import pytest
def fake_runcmd(cmd):
stderr = '''
--- shelby hping statistic ---
5 packets transmitted, 5 packets received, 0% packet loss
round-trip min/avg/max = 0.1/0.1/0.2 ms
'''
stdout = '''
HPING shelby (eth0 192.168.3.11): S set, 40 headers + 0 data bytes
len=46 ip=1.1.7.5 ttl=61 DF id=4696 sport=0 flags=RA seq=0 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4699 sport=0 flags=RA seq=1 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4701 sport=0 flags=RA seq=2 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4702 sport=0 flags=RA seq=3 win=0 rtt=0.1 ms
len=46 ip=1.1.7.5 ttl=61 DF id=4704 sport=0 flags=RA seq=4 win=0 rtt=0.1 ms
'''
return 0, stdout, stderr
class TestHping3(object):
def silence_pyflakes(self):
"""PyFlakes complains because we don't explicitly use the module."""
dir(pytest)
def test_good(self, monkeypatch):
monkeypatch.setattr(util, 'runcmd', fake_runcmd)
assert ping.hping3('somehost', count=5) == ('0', '0.1', 'somehost')
| 2.265625 | 2 |
src/scottbrian_throttle/throttle.py | ScottBrian/scottbrian_throttle | 0 | 12789160 | """Module throttle.
========
Throttle
========
The throttle allows you to limit the rate at which a function is
executed. This is helpful to avoid exceeding a limit, such as when
sending requests to an internet service that specifies a limit as to the
number of requests that can be sent in a specific time interval.
The throttle package include four different algorithms for the limiting
control, each provided as a decorator or as a class:
1. **@throttle_sync** decorator and **ThrottleSync** class provide a
synchronous algorithm.
For synchronous throttling, you specify the *requests* and
*seconds* which determine the send rate limit. The throttle
keeps track of the intervals between each request and will block
only as needed to ensure the send rate limit is not exceeded.
This algorithm provides a strict adherence to the send rate limit
for those cases that need it.
2. **@throttle_sync_ec** decorator and **ThrottleSyncEc** class
provide an early arrival algorithm.
For synchronous throttling with the early arrival algorithm, you
specify the *requests* and *seconds* which determine the send
rate limit. You also specify an *early_count*, the number of
requests the throttle will send immediately without delay. Once
the *early_count* is reached, the throttle kicks in and, if
needed, delays the next request by a cumulative amount that
reflects the current request and the requests that were sent
early. This will ensure that the average send rate for all
requests stay within the send rate limit. This algorithm is best
used when you have a steady stream of requests within the send
rate limit, and an occasional burst of requests that the target
service will tolerate.
3. **@throttle_sync_lb** decorator and **ThrottleSyncLb** class
provide a leaky bucket algorithm.
For synchronous throttling with the leaky bucket algorithm, you
specify the *requests* and *seconds* which determine the send
rate limit. You also specify an *lb_threshold* value, the number
of requests that will fit into a conceptual bucket. As each
request is received, if it fits, it is placed into the bucket and
is sent. The bucket leaks at a fixed rate that reflects the send
rate limit such such that each new request will fit given it does
not exceed the send rate limit. If the bucket becomes full, the
next request will be delayed until the bucket has leaked enough
to hold it, at which time it will be sent. Unlike the early count
algorithm, the leaky bucket algorithm results in an average send
rate that slightly exceeds the send rate limit. This algorithm is
best used when you have a steady stream of requests within the
send rate limit, and an occasional burst of requests that the
target service will tolerate.
4. **@throttle_async** decorator and **ThrottleAsync** class provide
an asynchronous algorithm.
With asynchronous throttling, you specify the *requests* and
*seconds* which determine the send rate limit. As each request is
received, it is placed on a queue and control returns to the
caller. A separate request schedular thread pulls the requests
from the queue and sends them at a steady interval to achieve the
specified send rate limit. You may also specify an *async_q_size*
that determines the number of requests that can build up on the
queue before the caller is blocked while trying to add requests.
This algorithm provides a strict adherence to the send rate limit
without having the delay the user (unless the queue become full).
This is best used when you have a steady stream of requests
within the send rate limit, and an occasional burst of requests
that you do not want to be delayed for. It has an added
responsibility that you need to perform a shutdown of the
throttle when your program ends to ensure that request schedular
thread is properly ended.
:Example: 1) Wrapping a function with the **@throttle_sync** decorator
Here we are using the **@throttle_sync** decorator to wrap a function
that needs to be limited to no more than 2 executions per second. In the
following code, make_request will be called 10 times in rapid
succession. The **@throttle_sync** keeps track of the time for each
invocation and will insert a wait as needed to stay within the limit.
The first execution of make_request will be done immediately while the
remaining executions will each be delayed by 1/2 second as seen in the
output messages.
>>> from scottbrian_throttle.throttle import throttle_sync
>>> import time
>>> @throttle_sync(requests=2, seconds=1)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 2) Using the **ThrottleSync** class
Here's the same example from above, but instead of the decorator we use
the **ThrottleSync** class. Note that the loop now calls send_request,
passing in the make_request function and its arguments:
>>> from scottbrian_throttle.throttle import ThrottleSync
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSync(requests=2, seconds=1)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 3) Wrapping a function with the **@throttle_sync_ec**
decorator
Here we continue with the same example, only this time using the
**@throttle_sync_ec** decorator to see how its algorithm in action.
We will use the same *requests* of 2 and *seconds* of 1, and an
*early_count* of 2. The make_request function will again be called 10
times in rapid succession. The **@throttle_sync_ec** will allow the
first request to proceed immediately. The next two requests are
considered early, so they will be allowed to proceed as well. The third
request will be delayed to allow the throttle to catch up to where we
should be, and then the process will repeat with some requests going
early followed by a catch-up delay. We can see this behavior in the
messages that show the intervals.
>>> from scottbrian_throttle.throttle import throttle_sync_ec
>>> import time
>>> @throttle_sync_ec(requests=2, seconds=1, early_count=2)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 1.5
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.0
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 4.5
:Example: 4) Using the **ThrottleSyncEc** class
Here we show the early count with the **ThrottleSyncEc** class:
>>> from scottbrian_throttle.throttle import ThrottleSyncEc
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSyncEc(requests=2, seconds=1, early_count=2)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 1.5
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.0
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 4.5
:Example: 5) Wrapping a function with the **@throttle_sync_lb**
decorator
We now take the early count example from above and switch in the leaky
bucket algorithm instead. We will use the *requests* of 2, *seconds* of
1, and *lb_threshold* of 3. The make_request function will again be
called 10 times in rapid succession. The **@throttle_sync_lb** will
be able to fit the first three requests into the bucket and send them
immediately. The fourth request will not fit into the bucket which now
causes the throttle to delay to allow the bucket to leak out one of the
requests. After the delay, the fourth request is placed into the bucket
and sent, follwed immediately by the fifth and sunsequent requests, each
of which are delayed to allow the bucket to accomodate them. We can see
this behavior in the messages that show the intervals.
>>> from scottbrian_throttle.throttle import throttle_sync_lb
>>> import time
>>> @throttle_sync_lb(requests=2, seconds=1, lb_threshold=3)
... def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> start_time = time.time()
>>> for i in range(10):
... make_request(i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 0.5
request 4 sent at elapsed time: 1.0
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 2.0
request 7 sent at elapsed time: 2.5
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 3.5
:Example: 6) Using the **ThrottleSyncLb** class
Here we show the leaky bucket example using the **ThrottleSyncLb**
class:
>>> from scottbrian_throttle.throttle import ThrottleSyncLb
>>> import time
>>> def make_request(request_number, time_of_start):
... print(f'request {request_number} sent at elapsed time: '
... f'{time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleSyncLb(requests=2, seconds=1, lb_threshold=3)
>>> start_time = time.time()
>>> for i in range(10):
... a_throttle.send_request(make_request, i, start_time)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.0
request 2 sent at elapsed time: 0.0
request 3 sent at elapsed time: 0.5
request 4 sent at elapsed time: 1.0
request 5 sent at elapsed time: 1.5
request 6 sent at elapsed time: 2.0
request 7 sent at elapsed time: 2.5
request 8 sent at elapsed time: 3.0
request 9 sent at elapsed time: 3.5
:Example: 7) Wrapping a function with the **@throttle_async** decorator
We now continue with the same setup from above, only now we are using
the **@throttle_async** decorator. We will again specify *requests* of
2 and *seconds* of 1. The make_request function will be called 10
times in rapid succession. The **@throttle_aync_lb** will queue the
requests to the request queue and the schedule_request method running
under a separate thread will dequeue and execute them at the send rate
interval determined by the requests and seconds arguments (in this case,
1/2 second). This will have similar behavior to the throttle_sync
algorithm, except that the request are executed from a separate thread.
>>> from scottbrian_throttle.throttle import throttle_async
>>> import time
>>> @throttle_async(requests=2, seconds=1)
... def make_request(request_number, time_of_start):
... results.append(f'request {request_number} sent at elapsed time:'
... f' {time.time() - time_of_start:0.1f}')
>>> results = []
>>> start_time = time.time()
>>> for i in range(10):
... _ = make_request(i, start_time)
>>> shutdown_throttle_funcs(make_request)
>>> for line in results:
... print(line)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
:Example: 8) Using the **ThrottleSyncAsync** class
Here we continue with the same setup, only now using the
**ThrottleSyncAsync** class:
>>> from scottbrian_throttle.throttle import ThrottleAsync
>>> import time
>>> def make_request(request_number, time_of_start):
... results.append(f'request {request_number} sent at elapsed time:'
... f' {time.time() - time_of_start:0.1f}')
>>> a_throttle = ThrottleAsync(requests=2, seconds=1)
>>> results = []
>>> start_time = time.time()
>>> for i in range(10):
... _ = a_throttle.send_request(make_request, i, start_time)
>>> _ = a_throttle.start_shutdown()
>>> for line in results:
... print(line)
request 0 sent at elapsed time: 0.0
request 1 sent at elapsed time: 0.5
request 2 sent at elapsed time: 1.0
request 3 sent at elapsed time: 1.5
request 4 sent at elapsed time: 2.0
request 5 sent at elapsed time: 2.5
request 6 sent at elapsed time: 3.0
request 7 sent at elapsed time: 3.5
request 8 sent at elapsed time: 4.0
request 9 sent at elapsed time: 4.5
The throttle module contains:
1) Throttle class object factory:
2) Error exception classes:
3) @throttle decorator
"""
########################################################################
# Standard Library
########################################################################
import functools
import logging
import queue
import threading
import time
from typing import (Any, Callable, cast, Final, NamedTuple, Optional,
overload, Protocol, TYPE_CHECKING, Type, TypeVar, Union)
from typing_extensions import TypeAlias
########################################################################
# Third Party
########################################################################
from scottbrian_utils.pauser import Pauser
from wrapt.decorators import decorator # type: ignore
########################################################################
# Local
########################################################################
########################################################################
# type aliases and TypeVars
########################################################################
IntFloat: TypeAlias = Union[int, float]
OptIntFloat: TypeAlias = Optional[IntFloat]
# T = TypeVar('T', bound=Throttle)
########################################################################
# Throttle class exceptions
########################################################################
class ThrottleError(Exception):
"""Base class for exceptions in this module."""
pass
class IllegalSoftShutdownAfterHard(ThrottleError):
"""Throttle exception for illegal soft shutdown after hard."""
pass
class IncorrectAsyncQSizeSpecified(ThrottleError):
"""Throttle exception for incorrect async_q_size specification."""
pass
class IncorrectEarlyCountSpecified(ThrottleError):
"""Throttle exception for incorrect early_count specification."""
pass
class IncorrectLbThresholdSpecified(ThrottleError):
"""Throttle exception for incorrect lb_threshold specification."""
pass
class IncorrectModeSpecified(ThrottleError):
"""Throttle exception for incorrect mode specification."""
pass
class IncorrectRequestsSpecified(ThrottleError):
"""Throttle exception for incorrect requests specification."""
pass
class IncorrectSecondsSpecified(ThrottleError):
"""Throttle exception for incorrect seconds specification."""
pass
class IncorrectShutdownTypeSpecified(ThrottleError):
"""Throttle exception for incorrect shutdown_type specification."""
pass
class MissingEarlyCountSpecification(ThrottleError):
"""Throttle exception for missing early_count specification."""
pass
class MissingLbThresholdSpecification(ThrottleError):
"""Throttle exception for missing lb_threshold specification."""
pass
########################################################################
# get_throttle
########################################################################
# def get_throttle(
# *,
# requests: int,
# seconds: IntFloat,
# mode: int,
# async_q_size: Optional[int] = None,
# early_count: Optional[int] = None,
# lb_threshold: OptIntFloat = None
# ) -> Any:
# """Create and return the throttle object given the input mode.
#
# Args:
# requests: The number of requests that can be made in
# the interval specified by seconds.
# seconds: The number of seconds in which the number of
# requests specified in requests can be made.
# mode: Specifies one of four modes for the throttle:
#
# 1) **mode=Throttle.MODE_ASYNC** specifies asynchronous
# mode. With asynchronous throttling, each request is
# placed on a queue and control returns to the caller.
# A separate thread then executes each request at a
# steady interval to achieve the specified number of
# requests per the specified number of seconds. Since
# the caller is given back control, any return values
# from the request must be handled by an established
# protocol between the caller and the request, (e.g.,
# a callback method).
# 2) **mode=Throttle.MODE_SYNC** specifies synchronous
# mode. For synchronous throttling, the caller may be
# blocked to delay the request in order to achieve the
# specified number of requests per the specified number
# of seconds. Since the request is handled
# synchronously, a return value from the request will
# be returned to the caller when the request completes.
# 3) **mode=Throttle.MODE_SYNC_EC** specifies synchronous
# mode using an early arrival algorithm. For
# synchronous throttling with the early arrival
# algorithm, an *early_count* number of requests are
# sent immediately without delay before the throttling
# becomes active. The objective is to allow a bursts of
# requests while also ensuring that the average arrival
# rate is within the limit as specified by the
# *requests* and *seconds* arguments.
# 4) **mode=Throttle.MODE_SYNC_LB** specifies synchronous
# mode using a leaky bucket algorithm. For synchronous
# throttling with the leaky bucket algorithm, some
# number of requests are sent immediately without delay
# even though they may have arrived at a quicker pace
# than that allowed by the requests and seconds
# specification. A lb_threshold specification is
# required when mode Throttle.MODE_SYNC_LB is
# specified. See the lb_threshold parameter for
# details.
# async_q_size: Specifies the size of the request
# queue for async requests. When the request
# queue is totally populated, any additional
# calls to send_request will be delayed
# until queued requests are removed and
# scheduled. The default is 4096 requests.
# early_count: Specifies the number of requests that are
# allowed to proceed immediately without delay
# for **mode=Throttle.MODE_SYNC_EC**.
# Note that a specification of 0 for
# *early_count* will result in the same
# behavior as if **mode=Throttle.MODE_SYNC**
# had been specified.
# lb_threshold: Specifies the threshold for the leaky bucket
# when Throttle.MODE_SYNC_LB is specified for
# mode. This is the number of requests that
# can be in the bucket such that the next
# request is allowed to proceed without delay.
# That request is added to the bucket, and
# then the bucket leaks out the requests.
# When the next request arrives, it will be
# delayed by whatever amount of time is
# needed for the bucket to have leaked enough
# to be at the threshold. Note that a
# specification of 1 for *lb_threshold* will
# result in the same behavior as if
# **mode=Throttle.MODE_SYNC** had been
# specified.
#
# .. # noqa: DAR101
#
# Returns:
# The throttle class for the specified mode.
#
# Raises:
# IncorrectModeSpecified: The *mode* specification must be an
# integer with a value of 1, 2, 3, or 4. Use
# Throttle.MODE_ASYNC, Throttle.MODE_SYNC,
# Throttle.MODE_SYNC_EC, or Throttle.MODE_SYNC_LB.
#
#
# :Example: instantiate an async throttle for 1 request per second
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleAsync(requests=1,
# ... seconds=1)
#
#
# :Example: instantiate an async throttle for 5 requests per 1/2
# second with an async queue size of 256
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> from threading import Event
# >>> request_throttle = ThrottleAsync(requests=5,
# ... seconds=0.5,
# ... async_q_size=256)
#
#
# :Example: instantiate a throttle for 20 requests per 2 minutes
# using the early count algorithm
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleSyncEc(requests=5,
# ... seconds=120,
# ... early_count=3)
#
#
# :Example: instantiate a throttle for 3 requests per second
# using the leaky bucket algorithm
#
# >>> from scottbrian_throttle.throttle import Throttle
# >>> request_throttle = ThrottleSyncLb(requests=5,
# ... seconds=120,
# ... lb_threshold=5)
#
#
# """
# if mode == Throttle.MODE_SYNC:
# return ThrottleSync(requests=requests,
# seconds=seconds)
# elif mode == Throttle.MODE_ASYNC:
# return ThrottleAsync(requests=requests,
# seconds=seconds,
# async_q_size=async_q_size)
# elif mode == Throttle.MODE_SYNC_EC:
# if early_count is None:
# raise MissingEarlyCountSpecification(
# 'An argument for early_count must be specified '
# 'for mode=Throttle.MODE_SYNC_EC.'
# )
# return ThrottleSyncEc(requests=requests,
# seconds=seconds,
# early_count=early_count)
# elif mode == Throttle.MODE_SYNC_LB:
# if lb_threshold is None:
# raise MissingLbThresholdSpecification(
# 'An argument for lb_threshold must be specified '
# 'for mode=Throttle.MODE_SYNC_LB.'
# )
# return ThrottleSyncLb(requests=requests,
# seconds=seconds,
# lb_threshold=lb_threshold)
# else:
# raise IncorrectModeSpecified(
# 'The mode specification must be an '
# 'integer with value 1, 2, 3, or 4.')
########################################################################
# Throttle Base class
########################################################################
class Throttle:
"""Throttle base class."""
DEFAULT_ASYNC_Q_SIZE: Final[int] = 4096
TYPE_SHUTDOWN_NONE: Final[int] = 0
TYPE_SHUTDOWN_SOFT: Final[int] = 4
TYPE_SHUTDOWN_HARD: Final[int] = 8
RC_OK: Final[int] = 0
RC_SHUTDOWN: Final[int] = 4
class Request(NamedTuple):
"""NamedTuple for the request queue item."""
request_func: Callable[..., Any]
args: tuple[Any, ...]
kwargs: dict[str, Any]
arrival_time: float
SECS_2_NS: Final[int] = 1000000000
NS_2_SECS: Final[float] = 0.000000001
__slots__ = ('requests', 'seconds', '_target_interval',
'_target_interval_ns', 'sync_lock', '_arrival_time',
'_next_target_time', 'logger', 'pauser')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat
) -> None:
"""Initialize an instance of the Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
Raises:
IncorrectRequestsSpecified: The *requests* specification
must be a positive integer greater than zero.
IncorrectSecondsSpecified: The *seconds* specification must
be a positive int or float greater than zero.
"""
################################################################
# determine whether we are throttle decorator
################################################################
# self.decorator = False
# frame = inspect.currentframe()
# if frame is not None:
# if frame.f_back.f_code.co_name == 'throttle':
# self.decorator = True
# else:
# self.decorator = False
################################################################
# requests
################################################################
if (isinstance(requests, int)
and (0 < requests)):
self.requests = requests
else:
raise IncorrectRequestsSpecified('The requests '
'specification must be a '
'positive integer greater '
'than zero.')
################################################################
# seconds
################################################################
if isinstance(seconds, (int, float)) and (0 < seconds):
self.seconds = seconds # timedelta(seconds=seconds)
else:
raise IncorrectSecondsSpecified('The seconds specification '
'must be an integer or '
'float greater than zero.')
################################################################
# Set remainder of vars
################################################################
self._target_interval = seconds / requests
self._target_interval_ns: float = (self._target_interval
* Throttle.SECS_2_NS)
self.sync_lock = threading.Lock()
self._arrival_time = 0.0
self._next_target_time: float = time.perf_counter_ns()
self.logger = logging.getLogger(__name__)
self.pauser = Pauser()
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE
################################################################
################################################################
# The SYNC_MODE Throttle algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay
# b) if it arrived before the _next_target_time the request
# is delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
####################################################################
# get_interval
####################################################################
def get_interval_secs(self) -> float:
"""Calculate the interval between requests in seconds.
Returns:
The target interval in seconds.
"""
return self._target_interval
####################################################################
# get_interval
####################################################################
def get_interval_ns(self) -> float:
"""Calculate the interval between requests in nanoseconds.
Returns:
The target interval in nanoseconds.
"""
return self._target_interval_ns
####################################################################
# get_completion_time
####################################################################
def get_completion_time_secs(self,
requests: int,
from_start: bool) -> float:
"""Calculate completion time secs for given number requests.
Args:
requests: number of requests to do
from_start: specifies whether the calculation should be done
for a series that is starting fresh where the
first request has no delay
Returns:
The estimated number of elapsed seconds for the number
of requests specified
"""
if from_start:
return (requests - 1) * self._target_interval
else:
return requests * self._target_interval
####################################################################
# get_completion_time
####################################################################
def get_completion_time_ns(self,
requests: int,
from_start: bool) -> float:
"""Calculate completion time ns for given number requests.
Args:
requests: number of requests to do
from_start: specifies whether the calculation should be done
for a series that is starting fresh where the
first request has no delay
Returns:
The estimated number of elapsed seconds for the number
of requests specified
"""
if from_start:
return (requests - 1) * self._target_interval_ns
else:
return requests * self._target_interval_ns
########################################################################
# Throttle Base class
########################################################################
class ThrottleSync(Throttle):
"""Throttle class for sync mode."""
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat
) -> None:
"""Initialize an instance of the Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
"""
super().__init__(requests=requests,
seconds=seconds)
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 1 requests every 2 seconds
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSync(requests=1,
... seconds=2)
>>> repr(request_throttle)
'ThrottleSync(requests=1, seconds=2.0)'
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSync]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}')
return f'{classname}({parms})'
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE
################################################################
################################################################
# The SYNC_MODE Throttle algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay
# b) if it arrived before the _next_target_time the request
# is delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleSyncEc(ThrottleSync):
"""Throttle class with early count algo."""
__slots__ = ('early_count', '_early_arrival_count')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
early_count: int
) -> None:
"""Initialize an instance of the early count Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
early_count: Specifies the number of requests that are
allowed to proceed immediately without delay
for **mode=Throttle.MODE_SYNC_EC**.
Note that a specification of 0 for the
*early_count* will result in the same
behavior as if **mode=Throttle.MODE_SYNC**
had been chosen.
Raises:
IncorrectEarlyCountSpecified: *early_count* must be an
integer greater than zero.
"""
################################################################
# early_count
################################################################
super().__init__(requests=requests,
seconds=seconds)
if isinstance(early_count, int) and (0 < early_count):
self.early_count = early_count
else:
raise IncorrectEarlyCountSpecified('early_count must be '
'an integer greater'
'than zero.')
################################################################
# Set remainder of vars
################################################################
self._early_arrival_count = 0
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 2 requests per second
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSyncEc(requests=2,
... seconds=1,
... early_count=3)
>>> repr(request_throttle)
'ThrottleSyncEc(requests=2, seconds=1.0, early_count=3)'
.. # noqa: W505, E501
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSyncEc]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'early_count={self.early_count}')
return f'{classname}({parms})'
####################################################################
# send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return code from the request function (may be None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# SYNC_MODE_EC
################################################################
################################################################
# The SYNC_MODE_EC (sync mode with early count) Throttle
# algorithm works as follows:
# 1) during throttle instantiation:
# a) a target interval is calculated as seconds/requests.
# For example, with a specification of 4 requests per 1
# second, the target interval will be 0.25 seconds.
# b) _next_target_time is set to a current time reference via
# time.perf_counter_ns
# c) the specified early_count is saved
# d) _early_arrival_count is set to zero
# 2) as each request arrives, it is checked against the
# _next_target_time and:
# a) if it arrived at or after _next_target_time, it is
# allowed to proceed without delay and the
# _early_arrival_count is reset
# b) if it arrived before the _next_target_time, the
# _early_arrival_count is increased by 1 and:
# 1) if _early_arrival_count is less than or equal to
# early_count, the request is allowed to proceed
# without delay
# 2) if _early_arrival_count is greater than early_count,
# _early_arrival_count is reset and the request is
# delayed until _next_target_time is reached
# 3) _next_target_time is increased by the target_interval
#
# Note that as each request is sent, the _next_target_time is
# increased. This means that once the early count is exhausted,
# the next request will be delayed for the sum of target
# intervals of the requests that were sent without delay. This
# allows short bursts of requests to go immediately while also
# ensuring that the average interval not less than is the
# target interval.
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
if self._next_target_time <= self._arrival_time:
self._early_arrival_count = 0
else:
self._early_arrival_count += 1
if self.early_count < self._early_arrival_count:
self._early_arrival_count = 0 # reset the count
# add an extra millisec for now as a test to see
# why sometimes the average interval is slightly
# less than we expect it to be - could be the
# inaccuracy of time.time()
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
# the shortest interval is 0.015 seconds
# time.sleep(wait_time)
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (max(float(time.perf_counter_ns()),
self._next_target_time
)
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleSyncLb(ThrottleSync):
"""Throttle class with leaky bucket algo."""
__slots__ = ('lb_threshold', '_next_target_time', 'lb_adjustment',
'lb_adjustment_ns')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
lb_threshold: IntFloat
) -> None:
"""Initialize an instance of the leaky bucket Throttle class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
lb_threshold: Specifies the threshold for the leaky bucket
when Throttle.MODE_SYNC_LB is specified for
mode. This is the number of requests that
can be in the bucket such that the next
request is allowed to proceed without delay.
That request is added to the bucket, and
then the bucket leaks out the requests.
When the next request arrives, it will be
delayed by whatever amount of time is
needed for the bucket to have leaked enough
to be at the threshold. A specification of
zero for the lb_threshold will effectively
cause all requests that are early to be
delayed.
Raises:
IncorrectLbThresholdSpecified: *lb_threshold* must be an
integer or float greater than zero.
"""
################################################################
# lb_threshold
################################################################
super().__init__(requests=requests,
seconds=seconds)
if (isinstance(lb_threshold, (int, float))
and (0 < lb_threshold)):
self.lb_threshold = float(lb_threshold)
else:
raise IncorrectLbThresholdSpecified(
'lb_threshold must be an integer or float greater than '
'zero.')
################################################################
# Set remainder of vars
################################################################
self.lb_adjustment: float = max(0.0,
(self._target_interval
* self.lb_threshold)
- self._target_interval)
self.lb_adjustment_ns: float = self.lb_adjustment * Throttle.SECS_2_NS
# adjust _next_target_time for lb algo
self._next_target_time = time.perf_counter_ns() - self.lb_adjustment_ns
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 20 requests per 1/2 minute
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleSyncLb(requests=20,
... seconds=30,
... lb_threshold=4)
>>> repr(request_throttle)
'ThrottleSyncLb(requests=20, seconds=30.0, lb_threshold=4.0)'
.. # noqa: W505, E501
"""
if TYPE_CHECKING:
__class__: Type[ThrottleSyncLb]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'lb_threshold={self.lb_threshold}')
return f'{classname}({parms})'
####################################################################
# MODE_SYNC_LB send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> Any:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
The return value from the request function (perhaps None)
Raises:
Exception: An exception occurred in the request target. It
will be logged and re-raised.
"""
################################################################
# Leaky Bucket
################################################################
with self.sync_lock:
# set the time that this request is being made
self._arrival_time = time.perf_counter_ns()
############################################################
# The leaky bucket algorith uses a virtual bucket into which
# arriving requests are placed. As time progresses, the
# bucket leaks the requests out at the rate of the target
# interval. If the bucket has room for an arriving request,
# the request is placed into the bucket and is sent
# immediately. If, instead, the bucket does not have room
# for the request, the request is delayed until the bucket
# has leaked enough of the preceding requests such that the
# new request can fit and be sent. The effect of the bucket
# is to allow a burst of requests to be sent immediately at
# a faster rate than the target interval, acting as a
# shock absorber to the flow of traffic. The number of
# requests allowed to go immediately is controlled by the
# size of the bucket which in turn is specified by the
# lb_threshold argument when the throttle is instantiated.
#
# Note that by allowing short bursts to go immediately,
# the overall effect is that the average interval will be
# less than the target interval.
#
# The actual implementation does not employ a bucket, but
# instead sets a target time for the next request by adding
# the target interval and subtracting the size of the
# bucket. This has the effect of making it appear as if
# requests are arriving after the target time and are thus
# in compliance with the target interval, but eventually
# the next target time will exceed the size of the bucket
# and request will get delayed and allow the target time
# to catch up.
############################################################
if self._arrival_time < self._next_target_time:
wait_time = (self._next_target_time
- self._arrival_time) * Throttle.NS_2_SECS
self.pauser.pause(wait_time)
############################################################
# Update the expected arrival time for the next request by
# adding the request interval to our current time or the
# next arrival time, whichever is later. Note that we update
# the target time before we send the request which means we
# face a possible scenario where we send a request that gets
# delayed en route to the service, but out next request
# arrives at the updated expected arrival time and is sent
# out immediately, but it now arrives early relative to the
# previous request, as observed by the service. If we update
# the target time after sending the request we avoid that
# scenario, but we would then be adding in the request
# processing time to the throttle delay with the undesirable
# effect that all requests will now be throttled more than
# they need to be.
############################################################
self._next_target_time = (max(float(time.perf_counter_ns()),
self._next_target_time
+ self.lb_adjustment_ns
)
- self.lb_adjustment_ns
+ self._target_interval_ns)
############################################################
# Call the request function and return with the request
# return value. We use try/except to log and re-raise any
# unhandled errors.
############################################################
try:
return func(*args, **kwargs)
except Exception as e:
self.logger.debug('throttle send_request unhandled '
f'exception in request: {e}')
raise
########################################################################
# Throttle class
########################################################################
class ThrottleAsync(Throttle):
"""An asynchronous throttle mechanism."""
__slots__ = ('async_q_size', 'shutdown_lock', '_shutdown',
'do_shutdown', 'hard_shutdown_initiated',
'_check_async_q_time', '_check_async_q_time2',
'shutdown_start_time', 'shutdown_elapsed_time',
'async_q', 'request_scheduler_thread')
####################################################################
# __init__
####################################################################
def __init__(self, *,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None,
) -> None:
"""Initialize an instance of the ThrottleAsync class.
Args:
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of
requests specified in requests can be made.
async_q_size: Specifies the size of the request
queue for async requests. When the request
queue is totally populated, any additional
calls to send_request will be delayed
until queued requests are removed and
scheduled. The default is 4096 requests.
Raises:
IncorrectAsyncQSizeSpecified: *async_q_size* must be an
integer greater than zero.
"""
################################################################
# States and processing for mode Throttle.MODE_ASYNC:
#
# The Throttle is initialized with an empty async_q and the
# scheduler thread is started and ready to receive work. The
# starting state is 'active'.
#
# 1) state: active
# a) send_request called (directly or via decorated func
# call):
# 1) request is queued to the async_q
# 2) state remains 'active'
# b) start_shutdown called:
# 1) state is changed to 'shutdown'
# 2) Any new requests are rejected. For "soft"
# shutdown, scheduler schedules the remaining requests
# currently queued on the async_q with the normal
# interval. With "hard" shutdown, the scheduler
# removes and discards the requests on the async_q.
# 3) scheduler exits
# 4) control returns after scheduler thread returns
# 2) state: shutdown
# a) send_request called (directly or via decorated func
# call):
# 1) request is ignored (i.e, not queued to async_q)
# b) start_shutdown called (non-decorator only):
# 1) state remains 'shutdown'
# 2) control returns immediately
################################################################
################################################################
# async_q_size
################################################################
super().__init__(requests=requests,
seconds=seconds)
if async_q_size is not None:
if (isinstance(async_q_size, int) and
(0 < async_q_size)):
self.async_q_size = async_q_size
else:
raise IncorrectAsyncQSizeSpecified('async_q_size '
'must be an '
'integer greater'
'than zero.')
else:
self.async_q_size = Throttle.DEFAULT_ASYNC_Q_SIZE
################################################################
# Set remainder of vars
################################################################
self.shutdown_lock = threading.Lock()
self._shutdown = False
self.do_shutdown = Throttle.TYPE_SHUTDOWN_NONE
self.hard_shutdown_initiated = False
self._check_async_q_time = 0.0
self._check_async_q_time2 = 0.0
self.shutdown_start_time = 0.0
self.shutdown_elapsed_time = 0.0
self.async_q: queue.Queue[Throttle.Request] = queue.Queue(
maxsize=self.async_q_size)
self.request_scheduler_thread: threading.Thread = threading.Thread(
target=self.schedule_requests)
self.request_scheduler_thread.start()
####################################################################
# len
####################################################################
def __len__(self) -> int:
"""Return the number of items in the async_q.
Returns:
The number of entries in the async_q as an integer
The calls to the send_request add request items to the async_q
for mode Throttle.MODE_ASYNC. The request items are
eventually removed and scheduled. The len of Throttle is the
number of request items on the async_q when the len function
is called. Note that the returned queue size is the approximate
size as described in the documentation for the python threading
queue.
:Example: instantiate a throttle for 1 request per second
>>> from scottbrian_throttle.throttle import Throttle
>>> import time
>>> def my_request():
... pass
>>> request_throttle = ThrottleAsync(requests=1,
... seconds=1)
>>> for i in range(3): # quickly queue up 3 items
... _ = request_throttle.send_request(my_request)
>>> time.sleep(0.5) # allow first request to be dequeued
>>> print(len(request_throttle))
2
>>> request_throttle.start_shutdown()
"""
return self.async_q.qsize()
####################################################################
# repr
####################################################################
def __repr__(self) -> str:
"""Return a representation of the class.
Returns:
The representation as how the class is instantiated
:Example: instantiate a throttle for 20 requests per 1/2 minute
>>> from scottbrian_throttle.throttle import Throttle
>>> request_throttle = ThrottleAsync(requests=30,
... seconds=30)
...
>>> repr(request_throttle)
'ThrottleAsync(requests=30, seconds=30.0, async_q_size=4096)'
>>> request_throttle.start_shutdown()
"""
if TYPE_CHECKING:
__class__: Type[ThrottleAsync]
classname = self.__class__.__name__
parms = (f'requests={self.requests}, '
f'seconds={float(self.seconds)}, '
f'async_q_size={self.async_q_size}')
return f'{classname}({parms})'
####################################################################
# ASYNC_MODE send_request
####################################################################
def send_request(self,
func: Callable[..., Any],
*args: Any,
**kwargs: Any
) -> int:
"""Send the request.
Args:
func: the request function to be run
args: the request function positional arguments
kwargs: the request function keyword arguments
Returns:
* ``Throttle.RC_OK`` (0) request scheduled
* ``Throttle.RC_SHUTDOWN`` (4) - the request was rejected
because the throttle was shut down.
"""
if self._shutdown:
return Throttle.RC_SHUTDOWN
# TODO: use se_lock
# We obtain the shutdown lock to protect against the following
# scenario:
# 1) send_request is entered for async mode and sees at
# the while statement that we are *not* in shutdown
# 2) send_request proceeds to the try statement just
# before the request will be queued to the async_q
# 2) shutdown is requested and is detected by
# schedule_requests
# 3) schedule_requests cleans up the async_q end exits
# 4) back here in send_request, we put our request on the
# async_q - this request will never be processed
with self.shutdown_lock:
request_item = Throttle.Request(func,
args,
kwargs,
time.perf_counter_ns())
while not self._shutdown:
try:
self.async_q.put(request_item,
block=True,
timeout=0.5)
return Throttle.RC_OK
except queue.Full:
continue # no need to wait since we already did
return Throttle.RC_SHUTDOWN
####################################################################
# schedule_requests
####################################################################
def schedule_requests(self) -> None:
"""Get tasks from queue and run them.
Raises:
Exception: re-raise any throttle schedule_requests unhandled
exception in request
"""
# Requests will be scheduled from the async_q at the interval
# calculated from the requests and seconds arguments when the
# throttle was instantiated. If shutdown is indicated,
# the async_q will be cleaned up with any remaining requests
# either processed (Throttle.TYPE_SHUTDOWN_SOFT) or dropped
# (Throttle.TYPE_SHUTDOWN_HARD). Note that async_q.get will only
# wait for a second to allow us to detect shutdown in a timely
# fashion.
while True:
# obtained_nowait = False
# try:
# self._check_async_q_time = time.perf_counter_ns()
#
# request_item = self.async_q.get_nowait()
# self._next_target_time = (time.perf_counter_ns()
# + self._target_interval_ns)
# obtained_nowait = True
# except queue.Empty:
try:
# self._check_async_q_time2 = time.perf_counter_ns()
request_item = self.async_q.get(block=True,
timeout=1)
self._next_target_time = (time.perf_counter_ns()
+ self._target_interval_ns)
except queue.Empty:
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_NONE:
return
continue # no need to wait since we already did
############################################################
# Call the request function.
# We use try/except to log and re-raise any unhandled
# errors.
############################################################
try:
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_HARD:
self._arrival_time = request_item.arrival_time
request_item.request_func(*request_item.args,
**request_item.kwargs)
# obtained_nowait=obtained_nowait)
except Exception as e:
self.logger.debug('throttle schedule_requests unhandled '
f'exception in request: {e}')
raise
############################################################
# wait (i.e., throttle)
# Note that the wait time could be anywhere from a fraction
# of a second to several seconds. We want to be responsive
# in case we need to bail for shutdown, so we wait in 1
# second or fewer increments and bail if we detect shutdown.
############################################################
while True:
# handle shutdown
if self.do_shutdown != Throttle.TYPE_SHUTDOWN_NONE:
if self.async_q.empty():
return # we are done with shutdown
if self.do_shutdown == Throttle.TYPE_SHUTDOWN_HARD:
break # don't sleep for hard shutdown
# Use min to ensure we don't sleep too long and appear
# slow to respond to a shutdown request
sleep_seconds = (self._next_target_time
- time.perf_counter_ns()) * Throttle.NS_2_SECS
if sleep_seconds > 0: # if still time to go
self.pauser.pause(min(1.0, sleep_seconds))
# time_trace, stop_time = self.pauser.pause(min(1.0,
# sleep_seconds))
# self.time_traces.append(time_trace)
# self.stop_times.append(stop_time)
else: # we are done sleeping
break
####################################################################
# start_shutdown
####################################################################
def start_shutdown(self,
shutdown_type: int = Throttle.TYPE_SHUTDOWN_SOFT,
timeout: OptIntFloat = None
) -> bool:
"""Shutdown the async throttle request scheduling.
Shutdown is used to stop and clean up any pending requests on
the async request queue for a throttle created with
mode Throttle.MODE_ASYNC. This should be done during normal
application shutdown or when an error occurs. Once the throttle
has completed shutdown it can no longer be used. If a throttle
is once again needed after shutdown, a new one will need to be
instantiated to replace the old one.
Note that a soft shutdown can be started and eventually be
followed by a hard shutdown to force shutdown to complete
quickly. A hard shutdown, however, can not be followed by a
soft shutdown since there is no way to retrieve and run any
of the requests that were already removed and tossed by the
hard shutdown.
Args:
shutdown_type: specifies whether to do a soft or a hard
shutdown:
* A soft shutdown
(Throttle.TYPE_SHUTDOWN_SOFT),
the default, stops any additional
requests from being queued and cleans up
the request queue by scheduling any
remaining requests at the normal interval
as calculated by the *seconds* and
*requests* arguments specified during
throttle instantiation.
* A hard shutdown
(Throttle.TYPE_SHUTDOWN_HARD) stops any
additional requests from being queued and
cleans up the request queue by quickly
removing any remaining requests without
executing them.
timeout: number of seconds to allow for shutdown to
complete. If the shutdown times out, control is
returned with a return value of False. The
shutdown will continue and a subsequent call to
start_shutdown, with or without a timeout value,
may eventually return control with a return value
of True to indicate that the shutdown has
completed. Note that a *timeout* value of zero or
less is handled as if shutdown None was
specified, whether explicitly or by default, in
which case the shutdown will not timeout and will
control will be returned if and when the shutdown
completes. A very small value, such as 0.001,
can be used to start the shutdown and then get
back control to allow other cleanup activities
to be performed and eventually issue a second
shutdown request to ensure that it is completed.
.. # noqa: DAR101
Returns:
* ``True`` if *timeout* was not specified, or if it was
specified and the ``start_shutdown()`` request completed
within the specified number of seconds.
* ``False`` if *timeout* was specified and the
``start_shutdown()`` request did not complete within the
specified number of seconds, or a soft shutdown was
terminated by a hard shutdown.
Raises:
IllegalSoftShutdownAfterHard: A shutdown with shutdown_type
Throttle.TYPE_SHUTDOWN_SOFT was requested after a
shutdown with shutdown_type Throttle.TYPE_SHUTDOWN_HARD
had already been initiated. Once a hard shutdown has
been initiated, a soft shutdown is not allowed.
IncorrectShutdownTypeSpecified: For start_shutdown,
*shutdownType* must be specified as either
Throttle.TYPE_SHUTDOWN_SOFT or
Throttle.TYPE_SHUTDOWN_HARD
"""
if shutdown_type not in (Throttle.TYPE_SHUTDOWN_SOFT,
Throttle.TYPE_SHUTDOWN_HARD):
raise IncorrectShutdownTypeSpecified(
'For start_shutdown, shutdownType must be specified as '
'either Throttle.TYPE_SHUTDOWN_SOFT or '
'Throttle.TYPE_SHUTDOWN_HARD')
################################################################
# We are good to go for shutdown
################################################################
self._shutdown = True # tell send_request to reject requests
# There is only one shutdown per throttle instantiation, so we
# will capture the shutdown length of time starting with the
# first shutdown request. Any subsequent shutdown requests will
# not affect the total shutdown time.
if self.shutdown_start_time == 0.0:
self.shutdown_start_time = time.time()
# We use the shutdown lock to block us until any in progress
# send_requests are complete
# TODO: use se_lock
with self.shutdown_lock:
# It is OK to start a soft shutdown and follow that with
# a hard shutdown, but not OK to start a hard shutdown
# and then follow that with a soft shutdown. The reason is
# that a soft shutdown finishes the queued requests while
# also doing the throttling, meaning that a soft shutdown
# is done when the queued requests are important and must be
# done. Following a soft shutdown with a hard shutdown
# would indicate that the soft shutdown was taking too long
# and there was a decision to end it with the hard shutdown
# for the more dire need to bring the system down quickly.
# A hard shutdown, on the other hand, is initially
# done when the requests are not required to complete. So,
# following a hard shutdown with a soft shutdown would
# indicate conflict, and in this case it will be impossible
# to retrieve the requests that have already been tossed.
# We tell the caller via the exception that the soft request
# after a hard request is a conflict that may not have been
# intended.
if shutdown_type == Throttle.TYPE_SHUTDOWN_HARD:
self.hard_shutdown_initiated = True
# if soft shutdown in progress
if self.do_shutdown == Throttle.TYPE_SHUTDOWN_SOFT:
self.logger.debug('Hard shutdown request detected soft '
'shutdown in progress - soft shutdown '
'will terminate.')
elif self.hard_shutdown_initiated: # soft after hard
raise IllegalSoftShutdownAfterHard(
'A shutdown with shutdown_type '
'Throttle.TYPE_SHUTDOWN_SOFT was requested after a '
'shutdown with shutdown_type '
'Throttle.TYPE_SHUTDOWN_HARD had already been '
'initiated. Once a hard shutdown has been '
'initiated, a soft shutdown is not allowed.')
# now that we are OK with the shutdown type, set do_shutdown
# with the type of shutdown to tell the schedule_requests
# method how to handle the queued requests (toss for hard
# shutdown, complete normally with throttling for soft
# shutdown)
self.do_shutdown = shutdown_type
# join the schedule_requests thread to wait for the shutdown
if timeout and (timeout > 0):
self.request_scheduler_thread.join(timeout=timeout)
if self.request_scheduler_thread.is_alive():
self.logger.debug('start_shutdown request timed out '
f'with {timeout=:.4f}')
return False # we timed out
else:
self.request_scheduler_thread.join()
with self.shutdown_lock:
if (shutdown_type == Throttle.TYPE_SHUTDOWN_SOFT
and self.hard_shutdown_initiated):
self.logger.debug('Soft shutdown request detected hard '
'shutdown initiated - soft shutdown '
'returning False.')
return False # the soft shutdown was terminated
# indicate shutdown no longer in progress
self.do_shutdown = Throttle.TYPE_SHUTDOWN_NONE
if self.shutdown_elapsed_time == 0.0:
self.shutdown_elapsed_time = (time.time()
- self.shutdown_start_time)
self.logger.debug('start_shutdown request successfully completed '
f'in {self.shutdown_elapsed_time:.4f} seconds')
return True # shutdown was successful
########################################################################
# Pie Throttle Decorator
########################################################################
F = TypeVar('F', bound=Callable[..., Any])
########################################################################
# FuncWithThrottleSyncAttr class
########################################################################
class FuncWithThrottleSyncAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSync
__call__: F
def add_throttle_sync_attr(func: F) -> FuncWithThrottleSyncAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncAttr[F], func)
########################################################################
# FuncWithThrottleSyncEcAttr class
########################################################################
class FuncWithThrottleSyncEcAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSyncEc
__call__: F
def add_throttle_sync_ec_attr(func: F) -> FuncWithThrottleSyncEcAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncEcAttr[F], func)
########################################################################
# FuncWithThrottleSyncLbAttr class
########################################################################
class FuncWithThrottleSyncLbAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleSyncLb
__call__: F
def add_throttle_sync_lb_attr(func: F) -> FuncWithThrottleSyncLbAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleSyncLbAttr[F], func)
########################################################################
# FuncWithThrottleAsyncAttr class
########################################################################
class FuncWithThrottleAsyncAttr(Protocol[F]):
"""Class to allow type checking on function with attribute."""
throttle: ThrottleAsync
__call__: F
def add_throttle_async_attr(func: F) -> FuncWithThrottleAsyncAttr[F]:
"""Wrapper to add throttle attribute to function.
Args:
func: function that has the attribute added
Returns:
input function with throttle attached as attribute
"""
return cast(FuncWithThrottleAsyncAttr[F], func)
########################################################################
# @throttle_sync
########################################################################
@overload
def throttle_sync(wrapped: F, *,
requests: int,
seconds: IntFloat
) -> FuncWithThrottleSyncAttr[F]:
pass
@overload
def throttle_sync(*,
requests: int,
seconds: IntFloat
) -> Callable[[F], FuncWithThrottleSyncAttr[F]]:
pass
def throttle_sync(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any
) -> Union[F, FuncWithThrottleSyncAttr[F]]:
"""Decorator to wrap a function in a sync throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with an async throttle for 1 request
per second
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync(requests=1, seconds=1)
... def f1() -> None:
... print('example 1 request function')
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncAttr[F],
functools.partial(throttle_sync,
requests=requests,
seconds=seconds))
a_throttle_sync = ThrottleSync(requests=requests,
seconds=seconds)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_attr(wrapper)
wrapper.throttle = a_throttle_sync
return cast(FuncWithThrottleSyncAttr[F], wrapper)
########################################################################
# @throttle_sync_ec
########################################################################
@overload
def throttle_sync_ec(wrapped: F, *,
requests: int,
seconds: IntFloat,
early_count: int
) -> FuncWithThrottleSyncEcAttr[F]:
pass
@overload
def throttle_sync_ec(*,
requests: int,
seconds: IntFloat,
early_count: int
) -> Callable[[F], FuncWithThrottleSyncEcAttr[F]]:
pass
def throttle_sync_ec(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
early_count: int
) -> Union[F, FuncWithThrottleSyncEcAttr[F]]:
"""Decorator to wrap a function in a sync ec throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
early_count: Specifies the number of requests that are allowed
to proceed that arrive earlier than the
allowed interval. The count of early requests
is incremented, and when it exceeds the
early_count, the request will be delayed to
align it with its expected arrival time. Any
request that arrives at or beyond the
allowed interval will cause the count to be
reset (included the request that was delayed
since it will now be sent at the allowed
interval). A specification of zero for the
*early_count* will effectively cause all requests
that are early to be delayed.
Returns:
A callable function that, for mode Throttle.MODE_SYNC_EC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with a throttle for 20 requests per 2
minutes using the early count algo
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync_ec(requests=5,
... seconds=120,
... early_count=3)
... def f3(b=3) -> int:
... print(f'example 3 request function with arg {b}')
... return b * 5
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncEcAttr[F],
functools.partial(throttle_sync_ec,
requests=requests,
seconds=seconds,
early_count=early_count))
a_throttle_sync_ec = ThrottleSyncEc(requests=requests,
seconds=seconds,
early_count=early_count)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync_ec.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_ec_attr(wrapper)
wrapper.throttle = a_throttle_sync_ec
return cast(FuncWithThrottleSyncEcAttr[F], wrapper)
########################################################################
# @throttle_sync_lb
########################################################################
@overload
def throttle_sync_lb(wrapped: F, *,
requests: int,
seconds: IntFloat,
lb_threshold: float
) -> FuncWithThrottleSyncLbAttr[F]:
pass
@overload
def throttle_sync_lb(*,
requests: int,
seconds: IntFloat,
lb_threshold: float
) -> Callable[[F], FuncWithThrottleSyncLbAttr[F]]:
pass
def throttle_sync_lb(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
lb_threshold: float
) -> Union[F, FuncWithThrottleSyncLbAttr[F]]:
"""Decorator to wrap a function in a sync lb throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
lb_threshold: Specifies the threshold for the leaky bucket when
Throttle.MODE_SYNC_LB is specified for mode.
This is the number of requests that can be in
the bucket such that the next request is allowed
to proceed without delay. That request is
added to the bucket, and then the bucket leaks
out the requests. When the next request arrives,
it will be delayed by whatever amount of time is
needed for the bucket to have leaked enough to
be at the threshold. A specification of zero for
the *lb_threshold* will effectively cause all
requests that are early to be delayed.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with a throttle for 3 requests per
second using the leaky bucket algo
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_sync_lb(requests=5,
... seconds=120,
... lb_threshold=5)
... def f4(a, *, b=4) -> int:
... print(f'example request function with args {a} and {b}')
... return b * 7
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleSyncLbAttr[F],
functools.partial(throttle_sync_lb,
requests=requests,
seconds=seconds,
lb_threshold=lb_threshold))
a_throttle_sync_lb = ThrottleSyncLb(requests=requests,
seconds=seconds,
lb_threshold=lb_threshold)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_sync_lb.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_sync_lb_attr(wrapper)
wrapper.throttle = a_throttle_sync_lb
return cast(FuncWithThrottleSyncLbAttr[F], wrapper)
########################################################################
# @throttle_async
########################################################################
@overload
def throttle_async(wrapped: F, *,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None
) -> FuncWithThrottleAsyncAttr[F]:
pass
@overload
def throttle_async(*,
requests: int,
seconds: IntFloat,
async_q_size: Optional[int] = None
) -> Callable[[F], FuncWithThrottleAsyncAttr[F]]:
pass
def throttle_async(wrapped: Optional[F] = None, *,
requests: int,
seconds: Any, # : IntFloat,
async_q_size: Optional[int] = None
) -> Union[F, FuncWithThrottleAsyncAttr[F]]:
"""Decorator to wrap a function in an async throttle.
The throttle wraps code around a function that is typically used to
issue requests to an online service. Some services state a limit as
to how many requests can be made per some time interval (e.g., 3
requests per second). The throttle code ensures that the limit is
not exceeded.
Args:
wrapped: Any callable function that accepts optional positional
and/or optional keyword arguments, and optionally
returns a value. The default is None, which will be
the case when the pie decorator version is used with
any of the following arguments specified.
requests: The number of requests that can be made in
the interval specified by seconds.
seconds: The number of seconds in which the number of requests
specified in requests can be made.
async_q_size: Specifies the size of the request
queue for async requests. When the request
queue is totaly populated, any additional
calls to send_request will be delayed
until queued requests are removed and
scheduled. The default is 4096 requests.
Returns:
A callable function that, for mode Throttle.MODE_ASYNC, queues
the request to be scheduled in accordance with the specified
limits, or, for all other modes, delays the request as needed in
accordance with the specified limits.
:Example: wrap a function with an async throttle for 1 request
per second
>>> from scottbrian_throttle.throttle import Throttle
>>> @throttle_async(requests=1, seconds=1)
... def f1() -> None:
... print('example 1 request function')
>>> shutdown_throttle_funcs(f1)
"""
# ==================================================================
# The following code covers cases where throttle is used with or
# without the pie character, where the decorated function has or
# does not have parameters.
#
# Here's an example of throttle with a function that has no
# args:
# @throttle(requests=1, seconds=1, mode=Throttle.MODE_SYNC)
# def aFunc():
# print('42')
#
# This is what essentially happens under the covers:
# def aFunc():
# print('42')
# aFunc = throttle(requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)(aFunc)
#
# The call to throttle results in a function being returned that
# takes as its first argument the aFunc specification that we
# see in parens immediately following the throttle call.
#
# Note that we can also code the above as shown and get the same
# result.
#
# Also, we can code the following and get the same result:
# def aFunc():
# print('42')
# aFunc = throttle(aFunc,
# requests=1,
# seconds=1,
# mode=Throttle.MODE_SYNC)
#
# What happens is throttle gets control and tests whether aFunc
# was specified, and if not returns a call to functools.partial
# which is the function that accepts the aFunc
# specification and then calls throttle with aFunc as the first
# argument with the other args for requests, seconds, and mode).
#
# One other complication is that we are also using the
# wrapt.decorator for the inner wrapper function which does some
# more smoke and mirrors to ensure introspection will work as
# expected.
# ==================================================================
if wrapped is None:
return cast(FuncWithThrottleAsyncAttr[F],
functools.partial(throttle_async,
requests=requests,
seconds=seconds,
async_q_size=async_q_size))
a_throttle_async = ThrottleAsync(requests=requests,
seconds=seconds,
async_q_size=async_q_size)
@decorator # type: ignore
def wrapper(func_to_wrap: F, instance: Optional[Any],
args: tuple[Any, ...],
kwargs2: dict[str, Any]) -> Any:
return a_throttle_async.send_request(func_to_wrap,
*args,
**kwargs2)
wrapper = wrapper(wrapped)
wrapper = add_throttle_async_attr(wrapper)
wrapper.throttle = a_throttle_async
return cast(FuncWithThrottleAsyncAttr[F], wrapper)
########################################################################
# shutdown_throttle_funcs
########################################################################
def shutdown_throttle_funcs(
*args: FuncWithThrottleAsyncAttr[Callable[..., Any]],
# *args: FuncWithThrottleAttr[Protocol[F]],
shutdown_type: int = Throttle.TYPE_SHUTDOWN_SOFT,
timeout: OptIntFloat = None
) -> bool:
"""Shutdown the throttle request scheduling for decorated functions.
The shutdown_throttle_funcs function is used to shutdown one or more
function that were decorated with the throttle. The arguments apply
to each of the functions that are specified to be shutdown. If
timeout is specified, then True is returned iff all functions
shutdown within the timeout number of second specified.
Args:
args: one or more functions to be shutdown
shutdown_type: specifies whether to do a soft or a hard
shutdown:
* A soft shutdown
(Throttle.TYPE_SHUTDOWN_SOFT), the default,
stops any additional requests from being
queued and cleans up the request queue by
scheduling any remaining requests at the
normal interval as calculated by the seconds
and requests that were specified during
instantiation.
* A hard shutdown (Throttle.TYPE_SHUTDOWN_HARD)
stops any additional requests from being
queued and cleans up the request queue by
quickly removing any remaining requests
without executing them.
timeout: number of seconds to allow for shutdown to complete for
all functions specified to be shutdown.
Note that a *timeout* of zero or less is equivalent
to a *timeout* of None, meaning start_shutdown will
return when the shutdown is complete without a
timeout.
.. # noqa: DAR101
Returns:
* ``True`` if *timeout* was not specified, or if it was
specified and all of the specified functions completed
shutdown within the specified number of seconds.
* ``False`` if *timeout* was specified and at least one of the
functions specified to shutdown did not complete within the
specified number of seconds.
"""
start_time = time.time() # start the clock
####################################################################
# get all shutdowns started
####################################################################
for func in args:
func.throttle.start_shutdown(
shutdown_type=shutdown_type,
timeout=0.01)
####################################################################
# check each shutdown
# Note that if timeout was not specified, then we simply call
# shutdown for each func and hope that each one eventually
# completes. If timeout was specified, then we will call each
# shutdown with whatever timeout time remains and bail on the first
# timeout we get.
####################################################################
if timeout is None or timeout <= 0:
for func in args:
func.throttle.start_shutdown(shutdown_type=shutdown_type)
else: # timeout specified and is a non-zero positive value
for func in args:
# use min to ensure non-zero positive timeout value
if not func.throttle.start_shutdown(
shutdown_type=shutdown_type,
timeout=max(0.01, start_time + timeout - time.time())):
func.throttle.logger.debug('timeout of '
'shutdown_throttle_funcs '
f'with timeout={timeout}')
return False # we timed out
# if we are here then all shutdowns are complete
return True
| 3.375 | 3 |
data_parallel_nn.py | jbram22/blackjack_parallel_neural_network | 0 | 12789161 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# # # # This is a data-parallelized Neural Network # # # #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
############################################################################################################
########################################### IMPORT PACKAGES ################################################
############################################################################################################
# General
import os
import functools
import time
import numpy as np
import pandas as pd
import random
import math
import warnings
# Parallelization
from mpi4py import MPI
##############################################################################################################
########################################## HELPER FUNCTIONS ##################################################
##############################################################################################################
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_der(x):
return sigmoid(x) *(1-sigmoid (x))
def train_network(wh,wo,epochs,train_X,train_Y):
for epoch in range(epochs):
# slice data
sliced_inputs = np.asarray(np.split(train_X, comm.size))
sliced_labels = np.asarray(np.split(train_Y, comm.size))
size = int(len(train_X)/comm.size)
inputs_buf = np.zeros((size,hidden_layer_size))
labels_buf = np.zeros(len(train_Y),dtype='i')
# send data to each process
comm.Scatter(sliced_inputs, inputs_buf, root=0)
comm.Scatter(sliced_labels, labels_buf, root=0)
### neural network iterations ###
## feedforward ##
# hidden layer
zh = np.dot(train_X, wh)
ah = sigmoid(zh)
# output layer
zo = np.dot(ah, wo)
ao = sigmoid(zo)
# error calculation
error_out = ((1 / (2*len(train_X))) * (np.power((ao - train_Y), 2)))
## backpropogation ##
# backpropogation from output layer to hidden layer
dcost_dao = ao - train_Y
dao_dzo = sigmoid_der(zo)
dzo_dwo = ah
dcost_wo = np.dot(dzo_dwo.T, (dcost_dao * dao_dzo))
# backpropogate from hidden layer to input layer
dcost_dzo = dcost_dao * dao_dzo
dzo_dah = wo
dcost_dah = np.dot(dcost_dzo , dzo_dah.T)
dah_dzh = sigmoid_der(zh)
dzh_dwh = train_X
dcost_wh = np.dot(dzh_dwh.T, dah_dzh * dcost_dah)
comm.Barrier()
# average error for all processes
error_buf = [0] * comm.size
try:
error_buf = comm.gather(error_out)
error_out = sum(error_buf) / len(error_buf)
except TypeError as e:
pass
# if comm.rank == 0:
# print(f'error at iteration {epoch}: {error_out.sum()}')
# gather gradients of weights for hidden layer from all processes
dcost_wh_buf = np.asarray([np.zeros_like(dcost_wh)] * comm.size)
comm.Gather(dcost_wh, dcost_wh_buf)
comm.Barrier()
dcost_wh = functools.reduce(np.add, dcost_wh_buf) / comm.size # average gradients across all processes
# gather gradients of weights for output layer
dcost_wo_buf = np.asarray([np.zeros_like(dcost_wo)] * comm.size)
comm.Gather(dcost_wo, dcost_wo_buf)
comm.Barrier()
dcost_wo = functools.reduce(np.add, dcost_wo_buf) / comm.size # average gradients across all processes
# update weights
wh -= lr * dcost_wh
wo -= lr * dcost_wo
# send updated weights to processes
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
return wh,wo
def predict(theta1,theta2, inputs):
a2 = np.dot(inputs, theta1)
a2 = sigmoid(a2)
a3 = np.dot(a2, theta2)
a3 = pd.Series(sigmoid(a3).reshape(-1))
predictions = np.where(a3 >= 0.5,1,-1)
return pd.Series(predictions)
def accuracy_measures(predictions,actual):
df = pd.concat([predictions,actual],axis = 1) # concatenate predicitons & actual labels into single dataframe
df.columns = ['predictions','actual']
df['correct'] = np.where(df.predictions == df.actual,1,0)
# true positives
positives = df.loc[df.actual == 1]
true_positives = positives.correct.sum()
# false negatives
false_negatives = (positives.predictions == -1).sum()
# tru negatives
negatives = df.loc[df.actual == -1]
true_negatives = negatives.correct.sum()
# false Positives
false_positives = (negatives.predictions == -1).sum()
# overall accuracy
accuracy = (true_positives + true_negatives)/(true_positives + true_negatives + false_positives + false_negatives)
# precision
precision = true_positives/(true_positives + false_positives)
# recall (sensitivity)
sensitivity = true_positives/(true_positives+false_negatives)
# specificity
specificity = true_negatives/(true_negatives + false_positives)
return accuracy,precision, sensitivity, specificity
############################################################################################################
######################################## EXECUTION & PERFORMANCE ###########################################
############################################################################################################
if __name__ == '__main__':
#suppress warnings
warnings.filterwarnings('ignore')
####################################################
############ DATA IMPORT & FORMATTING ##############
####################################################
model_df = pd.read_csv('blackjack.csv')
X = np.array(model_df[[i for i in model_df.columns if i not in {'correct_action','outcome'}]])
train_X = np.array(model_df[['player_initial_total', 'has_ace', 'dealer_card','count']])
train_Y = np.array(model_df['correct_action']).reshape(-1,1)
####################################################
############### MPI INITIALIZATION #################
####################################################
# Init MPI
comm = MPI.COMM_WORLD
# structure of the 3-layer neural network
hidden_layer_size = 10
output_layer_size = 1
lr = 1 # learning rate
epochs = 50 # iterations
# randomly initialize weights
if comm.rank == 0:
wh = np.random.rand(train_X.shape[1],hidden_layer_size) # weights for hidden layer
wo = np.random.rand(hidden_layer_size, 1) # weights for output layer
else:
wh = np.random.rand(train_X.shape[1],hidden_layer_size)
wo = np.random.rand(hidden_layer_size, 1)
comm.Barrier()
# communicate weight vectors
comm.Bcast([wh, MPI.DOUBLE])
comm.Bcast([wo, MPI.DOUBLE])
#################################################
############ NEURAL NETWORK TRAINING ############
#################################################
if comm.rank == 0:
start = time.time()
wh,wo = train_network(wh,wo,epochs,train_X,train_Y)
if comm.rank == 0:
end = time.time()
train_time = round(end-start,2)
print(f'\nEND OF TRAINING, took {train_time} seconds\n')
# write training time to file for plotting
out_filename = f'nn_train_{comm.size}.txt'
outfile = open(out_filename, "w")
outfile.write(str(train_time))
################################################
############ PREDICTIONS & RESULTS #############
################################################
# generate predictions
predictions = predict(wh,wo,train_X)
actual = pd.Series(train_Y.reshape(-1))
# compute & display results
accuracy,precision, sensitivity, specificity = accuracy_measures(predictions,actual)
print('PERFORMANCE RESULTS:')
print(f'accuracy: {100*round(accuracy,2)}%')
print(f'precision: {100*round(precision,2)}%')
print(f'sensitivity: {100*round(sensitivity,2)}%')
print(f'specificity: {100*round(specificity,2)}%\n')
| 2.453125 | 2 |
oauth2_backend/migrations/0001_initial.py | practian-reusable-applications/django-oauth2-backend | 0 | 12789162 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-29 03:12
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import oauth2_backend.models.user
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('last_hierarchy_id', models.CharField(blank=True, max_length=50, null=True)),
('last_module_id', models.CharField(blank=True, max_length=50, null=True)),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'verbose_name': 'User',
'permissions': (('list_user', 'Can list user'), ('get_user', 'Can get user')),
'verbose_name_plural': 'Users',
},
managers=[
('objects', oauth2_backend.models.user.UserManager()),
],
),
migrations.CreateModel(
name='Hierarchy',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('logo', models.ImageField(blank=True, default='logo/default.png', null=True, upload_to='logos', verbose_name='Logo')),
('code', models.CharField(blank=True, max_length=60, null=True, verbose_name='Code')),
('name', models.CharField(max_length=60, verbose_name='Name')),
('name_short', models.CharField(blank=True, max_length=40, null=True, verbose_name='Name short')),
('fiscal_creation_date', models.DateField(blank=True, null=True, verbose_name='fiscal creation date')),
('fiscal_address', models.CharField(blank=True, max_length=40, null=True, verbose_name='Fiscal address')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'hierarchy',
'permissions': (('list_hierarchy', 'Can list hierarchy'), ('get_hierarchy', 'Can get hierarchy')),
'verbose_name_plural': 'hierarchys',
},
),
migrations.CreateModel(
name='HierarchyType',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('hierarchy_type', models.CharField(choices=[('INSTITUCION', 'Institucion'), ('FILIAL', 'Filial'), ('FACULTAD', 'Facultad'), ('ESCUELA', 'Escuela'), ('CARRERA', 'Carrera'), ('DEPARTAMENTO_ACAD', 'Departamento acad.'), ('OTHER', 'Other')], max_length=50)),
('name', models.CharField(max_length=60, verbose_name='Name')),
('level', models.BigIntegerField(verbose_name='Level')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'hierarchy type',
'db_table': 'oauth2_backend_hierarchy_type',
'permissions': (('list_hierarchytype', 'Can list hierarchytype'), ('get_hierarchytype', 'Can get hierarchytype')),
'verbose_name_plural': 'hierarchy types',
},
),
migrations.CreateModel(
name='Menu',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('module', models.CharField(choices=[('WEB', 'Web informativa'), ('ADMISION', 'Admisión'), ('BACKEND', 'Backend Manager'), ('OTHER', 'Other')], default='BACKEND', max_length=50, verbose_name='module')),
('state', models.CharField(help_text='state or section (estado o grupo de estados)', max_length=50, verbose_name='State or section')),
('title', models.CharField(max_length=50, verbose_name='Title')),
('url', models.CharField(default='#', max_length=150, verbose_name='Url')),
('template_url', models.CharField(default='#', max_length=250, verbose_name='Template url')),
('pos', models.IntegerField(default=1, verbose_name='position')),
('icon', models.CharField(blank=True, default='', max_length=50, null=True, verbose_name='icon')),
('is_active', models.BooleanField(default=True, verbose_name='Active')),
('is_abstract', models.BooleanField(default=False, verbose_name='Is_abstract')),
('description', models.TextField(blank=True, null=True, verbose_name='description')),
('router_json', models.TextField(blank=True, null=True, verbose_name='router json')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='childrens', to='oauth2_backend.Menu', verbose_name='parent')),
('permission', models.ForeignKey(blank=True, help_text='NULL if is root', null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Permission', verbose_name='permission')),
],
options={
'verbose_name': 'menu',
'permissions': (('list_menu', 'Can list menu'), ('get_menu', 'Can get menu')),
'verbose_name_plural': 'menus',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('national_id_doc', models.CharField(blank=True, max_length=20, null=True, verbose_name='National identity document')),
('first_name', models.CharField(help_text='primer nombre', max_length=50, verbose_name='First name')),
('other_names', models.CharField(blank=True, help_text='otros nombres', max_length=50, null=True, verbose_name='Other names')),
('last_name', models.CharField(blank=True, help_text='apellido paterno', max_length=50, null=True, verbose_name='Last name')),
('mother_last_name', models.CharField(blank=True, help_text='apellido materno', max_length=50, null=True, verbose_name="Mother's last name")),
('birth_date', models.DateField(blank=True, null=True, verbose_name='birth date')),
('photo', models.ImageField(blank=True, default='persons/default.png', null=True, upload_to='persons', verbose_name='Photo')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
],
options={
'verbose_name': 'Person',
'verbose_name_plural': 'Persons',
},
),
migrations.CreateModel(
name='UserHierarchyGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('access_info', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='start date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='end date')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Group', verbose_name='group')),
('hierarchy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Hierarchy', verbose_name='hierarchy')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'user hierarchy group',
'db_table': 'oauth2_backend_user_hierarchy_group',
'verbose_name_plural': 'user hierarchy group',
},
),
migrations.CreateModel(
name='UserHierarchyPermission',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('access_info', models.TextField(blank=True, null=True)),
('start_date', models.DateTimeField(blank=True, null=True, verbose_name='start date')),
('end_date', models.DateTimeField(blank=True, null=True, verbose_name='end date')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='created at')),
('updated_at', models.DateTimeField(auto_now=True, null=True, verbose_name='updated at')),
('registered_by', models.TextField(blank=True, null=True)),
('hierarchy', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Hierarchy', verbose_name='hierarchy')),
('permission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='auth.Permission', verbose_name='permission')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user')),
],
options={
'verbose_name': 'user hierarchy permission',
'db_table': 'oauth2_backend_user_hierarchy_permission',
'verbose_name_plural': 'user hierarchy permission',
},
),
migrations.AddField(
model_name='hierarchy',
name='hierarchy_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hierarchy_set', to='oauth2_backend.HierarchyType'),
),
migrations.AddField(
model_name='hierarchy',
name='immediate_parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='immediate_childrens', to='oauth2_backend.Hierarchy'),
),
migrations.AddField(
model_name='hierarchy',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='childrens', to='oauth2_backend.Hierarchy'),
),
migrations.AddField(
model_name='user',
name='person',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='oauth2_backend.Person', verbose_name='Person'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 2 | 2 |
scraper/storage_spiders/giadungsmartcom.py | chongiadung/choinho | 0 | 12789163 | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='tensp']/h2",
'price' : "//div[@class='pd-right fr']/p[@class='p-price']",
'category' : "//ul[@class='breadcrumb all']/li/a",
'description' : "//div[@class='p-introduct all']/div[@class='content_tab_all']",
'images' : "//ul[@class='list_small']/li/a/@href",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'giadungsmart.<EMAIL>'
allowed_domains = ['giadungsmart.com']
start_urls = ['http://giadungsmart.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/[a-zA-Z0-9-]+\d.*\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/[a-zA-Z-]+\.html($|\?Page=\d+$)']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 2.125 | 2 |
aiopyarr/models/common.py | yoki31/aiopyarr | 1 | 12789164 | <filename>aiopyarr/models/common.py
"""Common Models."""
# pylint: disable=invalid-name, too-many-instance-attributes
from __future__ import annotations
from dataclasses import dataclass
from aiopyarr.models.base import APIResponseType, BaseModel
@dataclass(init=False)
class Diskspace(BaseModel):
"""Radarr diskspace attributes."""
_responsetype = APIResponseType.LIST
freeSpace: int | None = None
label: str | None = None
path: str | None = None
totalSpace: int | None = None
@dataclass(init=False)
class _CommonAttrs(BaseModel):
"""Common attributes."""
audioBitrate: int | None = None
audioChannels: float | None = None
audioCodec: str | None = None
audioLanguages: str | None = None
audioStreamCount: int | None = None
resolution: str | None = None
runTime: str | None = None
scanType: str | None = None
subtitles: str | None = None
videoBitDepth: int | None = None
videoBitrate: int | None = None
videoCodec: str | None = None
videoFps: float | None = None
| 2.359375 | 2 |
course_02_python_data_structures/week_5/give_most_mails.py | RicardoRodriguesCosta/python_for_everybody | 0 | 12789165 | name = input("Enter file:")
if len(name) < 1 : name = "mbox-short.txt"
handle = open(name)
emails = dict()
for line in handle:
list_line = line.split()
if len(list_line) > 1 and list_line[0] == "From":
emails[list_line[1]] = emails.get(list_line[1], 0) + 1
else:
continue
mostemails_key = None
mostemails_value = 0
for x,y in emails.items():
if y > mostemails_value:
mostemails_value = y
mostemails_key = x
else:
continue
print(mostemails_key, mostemails_value)
| 3.546875 | 4 |
cgbind/add_substrate.py | duartegroup/cgbind | 7 | 12789166 | from cgbind.log import logger
from copy import deepcopy
import numpy as np
from cgbind.constants import Constants
from rdkit.Chem import AllChem
from scipy.optimize import minimize, Bounds
from scipy.spatial import distance_matrix
from cgbind import geom
from cgbind.atoms import get_vdw_radii
from cgbind.geom import rotation_matrix
from cgbind.geom import calc_com
from cgbind.utils import copy_func
def cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords, with_attraction=True):
"""
Determine the energy using two-body atom-atom repulsion derived from noble
gas dimers where
V_rep(r) = exp(- r/b + a)
where a and b are parameters determined by the atom pairs. Parameters are
suitable to generate V_rep in kcal mol-1
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:param with_attraction: (bool) do or don't return the energy with a
constant attractive term based on the number of
substrate atoms in the structure
:return: energy: (float) Potential energy (V_rep) in kcal mol-1
"""
dist_mat = distance_matrix(cage_coords, subst_coords)
# Matrix with the pairwise additions of the vdW radii
sum_vdw_radii = np.add.outer(np.array(cage.vdw_radii),
np.array(substrate.vdw_radii))
# Magic numbers derived from fitting potentials to noble gas dimers and
# plotting against the sum of vdw radii
b_mat = 0.083214 * sum_vdw_radii - 0.003768
a_mat = 11.576415 * (0.175541 * sum_vdw_radii + 0.316642)
exponent_mat = -(dist_mat / b_mat) + a_mat
energy_mat = np.exp(exponent_mat)
energy = np.sum(energy_mat)
# E is negative for favourable binding but this is a purely repulsive
# function so subtract a number.. which is determined from the best
# classifier for 102 binding affinities (see cgbind paper) 0.4 kcal mol-1
if with_attraction:
return energy - 0.4 * substrate.n_atoms
return energy
def cage_subst_repulsion_and_electrostatic_func(cage, substrate, cage_coords, subst_coords):
"""
Determine the energy of adding a substrate to a cage based on V_rep + V_att
where the attractive term is electrostatic and uses the sum of
q_i q_j / r_ij interaction energies where q_i is the partial atomic charge
on atom i.
:param cage: (Cage object)
:param substrate: (Substrate object)
:param cage_coords: (list(np.ndarray)) Cage coordinates
:param subst_coords: (list(np.ndarray)) Substrate coordinates
:return:
"""
# Calculate the distance matrix in Bohr (a0) so the energies are in au
dist_mat = Constants.ang2a0 * distance_matrix(cage_coords, subst_coords)
# Charges are already in units of e
prod_charge_mat = np.outer(cage.charges, substrate.charges)
# Compute the pairwise iteration energies as V = q1 q2 / r in atomic units
energy_mat = prod_charge_mat / dist_mat
electrostatic_energy = Constants.ha2kcalmol * np.sum(energy_mat)
repulsive_energy = cage_subst_repulsion_func(cage, substrate, cage_coords, subst_coords)
return electrostatic_energy + repulsive_energy
def add_substrate_com(cagesubt):
"""
Add a substrate the centre of a cage defined by its centre of mass (com)
will minimise the energy with respect to rotation of the substrate and the
substrate conformer using cagesubt.energy_func. Will rotate cagesubt.n_init_geom
times and use cagesubt.n_subst_confs number of substrate conformers
:param cagesubt: (CageSubstrateComplex object)
:return: xyzs: (list(list))
"""
logger.info(f'Adding substrate to the cage COM and minimising the energy '
f'with {cagesubt.energy_func.__name__}')
# Minimum energy initialisation and the x parameter array (angles to
# rotate about the x, y, z axes)
min_energy, curr_x = 9999999999.9, np.zeros(3)
# Optimum (minimum energy) conformer
best_coords = None
c, s = cagesubt.cage, cagesubt.substrate
cage_coords = get_centered_cage_coords(c)
c.vdw_radii = [get_vdw_radii(atom) for atom in c.atoms]
if cagesubt.n_subst_confs > 1:
try:
s.gen_confs(n_confs=cagesubt.n_subst_confs)
except (ValueError, RuntimeError):
logger.error('Could not generate substrate conformers')
return None
for i, substrate in enumerate(s.conformers):
subst_coords = get_centered_substrate_coords(substrate)
s.vdw_radii = [get_vdw_radii(atom) for atom in s.atoms]
if s.mol_obj is not None:
s.volume = AllChem.ComputeMolVolume(s.mol_obj, confId=i)
for _ in range(cagesubt.n_init_geom):
rot_angles = 2.0 * np.pi * np.random.rand(3) # rand generates in [0, 1] so multiply with
# Minimise the energy with a BFGS minimiser supporting bounds on
# the values (rotation is periodic)
result = minimize(get_energy, x0=np.array(rot_angles),
args=(c, s, cagesubt.energy_func, cage_coords, subst_coords),
method='L-BFGS-B',
bounds=Bounds(lb=0.0, ub=2*np.pi), tol=0.01)
energy = result.fun
logger.info(f'Energy = {energy:.4f}')
if energy < min_energy:
min_energy = energy
best_coords = get_rotated_subst_coords(result.x, subst_coords)
logger.info(f'Min energy = {min_energy:.4f} kcal mol-1')
cagesubt.binding_energy_kcal = min_energy
if best_coords is not None:
s.set_atoms(coords=best_coords)
c.set_atoms(coords=cage_coords)
return c.atoms + s.atoms
else:
return None
def get_centered_cage_coords(cage):
"""Get the cage coordinates that had been translated to the cage centroid"""
cage_coords = cage.get_coords()
centroid = cage.get_centroid()
return np.array([coord - centroid for coord in cage_coords])
def get_centered_substrate_coords(substrate):
"""Get the substrate coordinates that have been translated to its center of mass"""
substrate.centre()
return substrate.get_coords()
def cat_cage_subst_coords(cage, substrate, cage_coords, substrate_coords):
"""
Concatenate some coordinates into a set of xyzs by adding back the atom
labels from the original xyzs
:param cage:
:param substrate:
:param cage_coords:
:param substrate_coords:
:return:
"""
logger.info('Appending substrate coordinates to cage coordinates')
xyzs = [[cage.xyzs[n][0]] + cage_coords[n].tolist() for n in range(len(cage.xyzs))]
cage.substrate_atom_ids = list(range(len(xyzs), len(xyzs) + len(substrate.xyzs)))
xyzs += [[substrate.xyzs[n][0]] + substrate_coords[n].tolist() for n in range(len(substrate.xyzs))]
return xyzs
def get_rotated_subst_coords(x, subst_coords):
"""Get substrate coordinates that have been rotated by x[0] radians in the
x axis etc."""
x_rot, y_rot, z_rot = x
rot_matrix = np.identity(3)
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.i, theta=x_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.j, theta=y_rot))
rot_matrix = np.matmul(rot_matrix, rotation_matrix(axis=geom.k, theta=z_rot))
return np.array([np.matmul(rot_matrix, coord) for coord in deepcopy(subst_coords)])
def get_energy(x, cage, substrate, energy_func, cage_coords, subst_coords):
"""
Calculate the energy in kcal mol-1 for a particular x, which contains the
rotations in x, y, z cartesian directions
"""
rot_substrate_coords = get_rotated_subst_coords(x, subst_coords)
energy = energy_func(cage, substrate, cage_coords, rot_substrate_coords)
return energy
cage_subst_repulsion_func.__name__ = 'repulsion'
cage_subst_repulsion_and_electrostatic_func.__name__ = 'electrostatic'
cage_subst_repulsion_and_electrostatic_func_est = copy_func(cage_subst_repulsion_and_electrostatic_func)
cage_subst_repulsion_and_electrostatic_func_est.__name__ = 'electrostatic_fast'
energy_funcs = [cage_subst_repulsion_func,
cage_subst_repulsion_and_electrostatic_func,
cage_subst_repulsion_and_electrostatic_func_est]
| 2.40625 | 2 |
src/old_encoder_decoder.py | suhasjs/upgraded-system | 1 | 12789167 | <reponame>suhasjs/upgraded-system<filename>src/old_encoder_decoder.py
import collections
import csv
import random
import numpy as np
import tensorflow as tf
def _read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return filter(None, f.read().decode("utf-8").replace(".", " <eos>").
replace(", ", " <comma> ").replace("\n", " <eop> ").split())
def _read_sentences(filename):
with tf.gfile.GFile(filename, "r") as f:
s = f.read().decode("utf-8").replace(".", " <eos>").replace(", ", " <comma> ").replace("\n", " <eop><EOP_TAG>")
return filter(None, s.split("<EOP_TAG>"))
def build_vocab(filename):
data = _read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(1, len(words) + 1)))
return word_to_id
def _split(data):
xs = []
ys = []
for (x, y) in data:
xs.append(x)
ys.append(y)
return xs, ys
def get_raw_data(xs, ys, train_frac=0.7, val_frac=0.2, test_frac=0.1):
vocab = build_vocab(ys)
sentences = _read_sentences(ys)
ret_data = []
xs = list(csv.reader(open(xs, "r"), delimiter=' '))
for n in range(len(sentences)):
words = filter(None, sentences[n].split())
cur_ys = []
for word in words:
cur_ys.append(vocab[word])
x = map(float, xs[n])
ret_data.append((x, cur_ys))
# Randomly shuffle data
random.shuffle(ret_data)
# Compute split points
tr_end = int(train_frac * len(ret_data))
val_end = tr_end + int(val_frac * len(ret_data))
return _split(ret_data[:tr_end]), _split(ret_data[tr_end:val_end]), _split(ret_data[val_end:]), vocab
# Input - seqs: num_samples*3, labels: num_samples*[list]
# Return X:maxlen*num_samples*3, X_mask: max_len*num_samples, labels: maxlen*num_samples
def prepare_data(seqs, labels, maxlen=None, xdim=3):
"""Create the matrices from the datasets.
This pad each sequence to the same length: the length of the
longest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
length.
This swap the axis!
"""
# Trim all output seqs to have only maxlen steps
if maxlen is not None:
Iseqs = []
Oseqs = []
for i_seq, o_seq in zip(seqs, labels):
if len(o_seq) < maxlen:
Iseqs.append(i_seq)
Oseqs.append(o_seq)
seqs = Iseqs
labels = Oseqs
else:
maxlen = 40
# Pad and compute masks
ret_X = np.zeros((maxlen, len(seqs), xdim))
mask_X = np.zeros((maxlen, len(seqs)))
labels_X = np.zeros((maxlen, len(seqs)))
for k in range(len(seqs)):
mask_X[:len(labels[k]), k] = 1
ret_X[:len(labels[k]), k] = np.asarray(seqs[k])
labels_X[:len(labels[k]), k] = labels[k]
return ret_X, mask_X, labels_X
if __name__ == "__main__":
train, val, test, vocab = get_raw_data("../data/xs1000.txt",
"../data/targets1000.txt")
c = _read_sentences("../data/targets1000.txt")
print np.array(train[0]);
# print val;
# print test;
print( len(vocab) )
print(build_vocab("../data/targets1000.txt"))
| 2.453125 | 2 |
python/parsers/modules/molecular_formula.py | ajmaurais/ms2_annotator | 4 | 12789168 | <gh_stars>1-10
import re
from collections import Counter
from modules import atom_table
class MolecularFormula(object):
_FORMULA_RESIDUE_ORDER = ['C', '(13)C', 'H', 'D', 'Br', 'Cl', 'N',
'(15)N', 'O', '(18)O', 'P', 'S', 'Se']
def __init__(self, seq='', n_term=True, c_term=True):
self.seq = seq
self.formula = self._seq_to_counter(seq, n_term=n_term, c_term=c_term)
def __str__(self):
ret = ''
# Keep track of already printed AAs
printed = {aa: False for aa in self.formula.keys()}
for aa in self._FORMULA_RESIDUE_ORDER:
if self.formula[aa] == 0:
printed[aa] = True
continue
if self.formula[aa] == 1:
ret += aa
else:
ret += '{}{}'.format(aa, self.formula[aa])
printed[aa] = True
for aa, p in printed.items():
if not p:
ret += '{}{}'.format(aa, self.formula[aa])
return ret
def __repr__(self):
return str('MolecularFormula({})'.format(dict(self.formula)))
@staticmethod
def _seq_to_counter(seq, n_term=True, c_term=True):
'''
Convert `seq` to formula.
Parameters
----------
seq: str
Amino acid sequence as string.
n_term: str
Should n terminus be added to formula?
c_term: str
Should c terminus be added to formula?
Returns
-------
formula: Counter
Counter of atoms in formula.
'''
formula = Counter()
for c in seq:
try:
formula += atom_table.BASE_RESIDUE_ATOMS[c]
except KeyError:
raise KeyError('Unknown amino acid in sequence: {}, {}'.format(seq, c.upper()))
if seq != '':
if n_term:
formula += atom_table.BASE_RESIDUE_ATOMS['N_TERM']
if c_term:
formula += atom_table.BASE_RESIDUE_ATOMS['C_TERM']
return formula
def set_seq(self, seq):
'''
Reset peptide sequence and formula.
Parameters
----------
seq: str
Peptide sequence to add.
'''
self.seq = seq
self.formula = self._seq_to_counter(seq)
def add_mod(self, name, residue):
'''
Add residue modificaton to formula.
Parameters
----------
name: str
Name of modification.
residue: str
Single letter aa code on which modification occurred.
Raises
------
KeyError if modification is not known.
'''
temp_mod = atom_table.get_mod(name, residue)
self.formula.update(temp_mod)
def __iadd__(self, rhs):
'''
Add another MolecularFormula. seq member is unmodified.
Parameters
----------
rhs: MolecularFormula
Another formula to add.
'''
self.formula.update(rhs.formula)
return self
def calc_mass(self, **kwargs):
'''
Calculate monoisotopic or avg mass.
Parameters
----------
mono_avg: int
One of (0, 1). 0 for average, 1 for monoisotopic.
Returns
-------
mass: float
mass of formula.
'''
return atom_table.calc_mass(self.formula, **kwargs)
| 2.796875 | 3 |
api/test.py | mingweiarthurli/CMPUT-404-Project | 0 | 12789169 | <reponame>mingweiarthurli/CMPUT-404-Project
import re
# text = ["http://127.0.0.1:5454/author/de305d54-75b4-431b-adb2-eb6b9e546013",
# "http://127.0.0.1:5454/author/ae345d54-75b4-431b-adb2-fb6b9e547891",]
text = "https://cmput404-socialdistribution.herokuapp.com/posts/da61a3f4-8cfb-4046-881f-50e12bfb4a4d"
# for t in text:
r = re.findall(r"(https?://[-A-Za-z0-9+&@#%?=~_|!:,.;]+/)?", text, re.I)
print(r[0])
test2 = "Github Activity"
print("github" not in test2.lower()) | 2.734375 | 3 |
tensorcv/train/lr_policy.py | afcarl/tensorcv | 1 | 12789170 | import tensorflow as tf
def fixed(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
lr = tf.constant(params['base_lr'])
tf.summary.scalar('learining_rate', lr)
return lr
def exponential_decay(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
assert 'decay_steps' in params, 'decay_steps must in params'
assert 'decay_rate' in params, 'decay_rate must in params'
lr = tf.train.exponential_decay(
learning_rate=params['base_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
decay_rate=params['decay_rate'],
staircase=params.get('staircase', True),
name='learning_rate')
tf.summary.scalar('learining_rate', lr)
return lr
def polynomial_decay(global_step, params):
assert 'base_lr' in params, 'base_lr must in params'
assert 'decay_steps' in params, 'decay_steps must in params'
assert 'end_learning_rate' in params, 'end_learning_rate must in params'
assert 'power' in params, 'power must in params'
lr = tf.train.exponential_decay(
learning_rate=params['base_lr'],
global_step=global_step,
decay_steps=params['decay_steps'],
end_learning_rate=params['end_learning_rate'],
power=params['power'],
name='learning_rate')
tf.summary.scalar('learining_rate', lr)
return lr
LR_POLICY_MAP = {
'fixed': fixed,
'exponential_decay': exponential_decay,
'polynomial_decay': polynomial_decay,
}
def get_lr_policy_fn(config):
if config.lr_policy not in LR_POLICY_MAP:
raise ValueError('{} is not a valid lr policy type'.format(config.lr_policy))
return LR_POLICY_MAP[config.lr_policy]
| 2.203125 | 2 |
scripts/generate_mo.py | SiKreuz/discord-birthday-bot | 1 | 12789171 | import os
from pythongettext import msgfmt
LOCALE_PATH = os.path.join('..', 'discord_birthday_bot', 'locale')
for subdir, dirs, files in os.walk(LOCALE_PATH):
for filename in files:
if filename.endswith('.po'):
path = os.path.join(subdir, filename)
mo_str = msgfmt.Msgfmt(path).get()
mo = open(os.path.splitext(path)[0] + '.mo', 'wb')
mo.write(mo_str)
mo.flush()
mo.close()
print('Translated', path)
| 2.796875 | 3 |
classification_aromatic_substitution/predict_desc/post_process.py | coleygroup/QM-augmented_GNN | 11 | 12789172 | from rdkit import Chem
import pandas as pd
import numpy as np
from tqdm import tqdm
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import RobustScaler
tqdm.pandas()
GLOBAL_SCALE = ['partial_charge', 'fukui_neu', 'fukui_elec']
ATOM_SCALE = ['NMR']
def check_chemprop_out(df):
invalid = []
for _,r in df.iterrows():
for c in ['partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order', 'bond_length']:
if np.any(pd.isna(r[c])):
invalid.append(r['smiles'])
break
return invalid
def modify_scaled_df(df, scalers):
for index in df.index:
if "H-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(27.7189 - scalers['NMR']["H"].data_min_[0]) / (scalers['NMR']["H"].data_max_[0] - scalers['NMR']["H"].data_min_[0])])
elif "F-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(481.6514 - scalers['NMR']["F"].data_min_[0]) / (scalers['NMR']["F"].data_max_[0] - scalers['NMR']["F"].data_min_[0])])
elif "Cl-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(1150.4265 - scalers['NMR']["Cl"].data_min_[0]) / (scalers['NMR']["Cl"].data_max_[0] - scalers['NMR']["Cl"].data_min_[0])])
elif "Br-" in df.loc[index, "smiles"]:
df.loc[index, "partial_charge"] = np.array([(-1 - scalers['partial_charge'].data_min_[0]) / (scalers['partial_charge'].data_max_[0] - scalers['partial_charge'].data_min_[0])])
df.loc[index, "NMR"] = np.array([(3126.8978 - scalers['NMR']["Br"].data_min_[0]) / (scalers['NMR']["Br"].data_max_[0] - scalers['NMR']["Br"].data_min_[0])])
return df
def min_max_normalize(df, scalers=None, train_smiles=None):
if train_smiles is not None:
ref_df = df[df.smiles.isin(train_smiles)]
else:
ref_df = df.copy()
if scalers is None:
scalers = get_scaler(ref_df)
for column in GLOBAL_SCALE:
scaler = scalers[column]
df[column] = df[column].apply(lambda x: scaler.transform(x.reshape(-1, 1)).reshape(-1))
def min_max_by_atom(atoms, data, scaler):
data = [scaler[a].transform(np.array([[d]]))[0][0] for a, d in zip(atoms, data)]
return np.array(data)
if ATOM_SCALE:
print('postprocessing atom-wise scaling')
df['atoms'] = df.smiles.apply(lambda x: get_atoms(x))
for column in ATOM_SCALE:
df[column] = df.progress_apply(lambda x: min_max_by_atom(x['atoms'], x[column], scalers[column]), axis=1)
df['bond_order_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_order']), axis=1)
df['distance_matrix'] = df.apply(lambda x: bond_to_matrix(x['smiles'], x['bond_length']), axis=1)
df = modify_scaled_df(df, scalers)
df = df[['smiles', 'partial_charge', 'fukui_neu', 'fukui_elec', 'NMR', 'bond_order_matrix', 'distance_matrix']]
df = df.set_index('smiles')
return df, scalers
def get_scaler(df):
scalers = {}
for column in GLOBAL_SCALE:
scaler = MinMaxScaler()
data = np.concatenate(df[column].tolist()).reshape(-1, 1)
scaler.fit(data)
scalers[column] = scaler
if ATOM_SCALE:
atoms = df.smiles.apply(lambda x: get_atoms(x))
atoms = np.concatenate(atoms.tolist())
for column in ATOM_SCALE:
data = np.concatenate(df[column].tolist())
data = pd.DataFrame({'atoms': atoms, 'data': data})
data = data.groupby('atoms').agg({'data': lambda x: list(x)})['data'].apply(lambda x: np.array(x)).to_dict()
scalers[column] = {}
for k, d in data.items():
scaler = MinMaxScaler()
scalers[column][k] = scaler.fit(d.reshape(-1, 1))
return scalers
def bond_to_matrix(smiles, bond_vector):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
bond_matrix = np.zeros([len(m.GetAtoms()), len(m.GetAtoms())])
for i, bp in enumerate(bond_vector):
b = m.GetBondWithIdx(i)
bond_matrix[b.GetBeginAtomIdx(), b.GetEndAtomIdx()] = bond_matrix[b.GetEndAtomIdx(), b.GetBeginAtomIdx()] = bp
return bond_matrix
def get_atoms(smiles):
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
atoms = [x.GetSymbol() for x in m.GetAtoms()]
return atoms
def minmax_by_element(r, minmax, target):
target = r[target]
elements = r['atoms']
for i, a in enumerate(elements):
target[i] = (target[i] - minmax[a][0]) / (minmax[a][1] - minmax[a][0] + np.finfo(float).eps)
return target
| 2.453125 | 2 |
Modulo-03/ex112/utilidadescev/moeda/__init__.py | Matheus-Henrique-Burey/Curso-de-Python | 0 | 12789173 | def almentar(preco, taxa, formato=False):
res = preco + preco * taxa / 100
return res if not formato else moeda(res)
def diminuir(preco, taxa, formato=False):
res = preco - preco * taxa / 100
return res if not formato else moeda(res)
def dobro(preco, formato=False):
res = preco * 2
return res if not formato else moeda(res)
def metade(preco, formato=False):
res = preco / 2
return res if not formato else moeda(res)
def moeda(preco):
return f'R$ {preco:.2f}'.replace('.', ',')
def resumo(preco, taxadim, taxaalm):
print('-' * 35)
print(f"{'RESUMO DO VALOR':^35}")
print('-' * 35)
print(f'Preço analizao: \t{moeda(preco)}')
print(f'O dobro do preço: \t{dobro(preco, True)}')
print(f'A metade do preço: \t{metade(preco, True)}')
print(f'{taxaalm}% de almentando: \t{almentar(preco, taxaalm, True)}')
print(f'{taxadim}% de desconto: \t{diminuir(preco, taxadim, True)}')
print('-' * 35)
| 3.375 | 3 |
vega/algorithms/nas/adelaide_ea/adelaide_trainer_callback.py | Lzc06/vega | 12 | 12789174 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""The trainer program for Adelaide_EA."""
import logging
import vega
from vega.core.common.class_factory import ClassFactory, ClassType
from vega.core.metrics import calc_model_flops_params
from vega.core.trainer.callbacks import Callback
if vega.is_torch_backend():
import torch
elif vega.is_tf_backend():
import tensorflow as tf
logger = logging.getLogger(__name__)
@ClassFactory.register(ClassType.CALLBACK)
class AdelaideEATrainerCallback(Callback):
"""Construct the trainer of Adelaide-EA."""
def before_train(self, logs=None):
"""Be called before the training process."""
self.config = self.trainer.config
if vega.is_torch_backend():
count_input = torch.FloatTensor(1, 3, 192, 192).cuda()
elif vega.is_tf_backend():
tf.reset_default_graph()
count_input = tf.random_uniform([1, 192, 192, 3], dtype=tf.float32)
flops_count, params_count = calc_model_flops_params(self.trainer.model, count_input)
self.flops_count, self.params_count = flops_count * 1e-9, params_count * 1e-3
logger.info("Flops: {:.2f} G, Params: {:.1f} K".format(self.flops_count, self.params_count))
if self.flops_count > self.config.flops_limit:
logger.info("Flop too large!")
self.trainer.skip_train = True
def after_epoch(self, epoch, logs=None):
"""Update gflops and kparams."""
summary_perfs = logs.get('summary_perfs', {})
summary_perfs.update({'gflops': self.flops_count, 'kparams': self.params_count})
logs.update({'summary_perfs': summary_perfs})
def make_batch(self, batch):
"""Make batch for each training step."""
input = batch["data"]
target = batch["mask"]
if self.config.cuda:
input = input.cuda()
target = target.cuda()
return input, target
| 2.15625 | 2 |
pymesm.py | x86128/pymesm | 2 | 12789175 | <gh_stars>1-10
#!/usr/bin/env python3
import argparse
import sys
from mesm_devs import Bus, RamDevice, Printer
from mesm_cpu import CPU
from mesm_utils import load_oct
argp = argparse.ArgumentParser()
argp.add_argument("-i", required=True, dest="input", help="Input *.oct file")
argp.add_argument("-c", default=100, type=int, help="Number of commands to execute")
argp.add_argument("-t", required=False, dest="trace", action="store_true", help="Print trace")
args = argp.parse_args()
if __name__ == '__main__':
irom = RamDevice("IROM0", 32768)
dram = RamDevice("DRAM0", 32767)
printer = Printer("PRN0")
ibus = Bus("IBUS")
dbus = Bus("DBUS")
ibus.attach(irom, 0)
dbus.attach(dram, 0)
dbus.attach(printer, 32767)
load_oct(args.input, ibus, dbus)
cpu = CPU(ibus, dbus)
if args.trace:
cpu.set_trace()
cpu.run(args.c)
while cpu.running:
cpu.step()
print("Simulation finished.")
if cpu.failure:
sys.exit(1)
else:
sys.exit(0)
| 2.421875 | 2 |
Designning the System Latest/LLF_Schedule.py | JoHussien/rl_mc_scheduler | 1 | 12789176 | <filename>Designning the System Latest/LLF_Schedule.py
from env.job_generator import create_workload
import numpy as np
def compute_SCP_MBS(num_jobs, total_load, lo_per, job_density, time):
# We schedule here using LLF and compute the MBS of the generated Schedule
workload = create_workload(num_jobs, total_load, lo_per, job_density)
print("Generated Workload")
print(workload)
print("LLF-Schedule")
lax = np.subtract(workload[:, 1], workload[:, 2])
min_critical_points = np.argsort(lax, axis=0)
print(min_critical_points)
chosen=workload[min_critical_points]
print(chosen)
Hi_index=np.where(chosen[:,3]==1)
chosen_Hi=chosen[Hi_index]
total_processing = np.sum(chosen_Hi[2], axis=0)
MBS = total_processing / max(workload[:][1])
print("MBS of this Schedule: ", 1 / MBS)
compute_SCP_MBS(10, 0.5, 0.3, 5, 0)
| 2.671875 | 3 |
app/v1/authentication.py | arnxun/Space | 0 | 12789177 | import os
import re
from flask import g, jsonify, request
from flask_httpauth import HTTPTokenAuth # HTTPBasicAuth
from app.models import User
from app.v1 import api
from app.v1.errors import forbidden, unauthorized
from config import config
auth = HTTPTokenAuth()
@auth.verify_token
def verify_token(token):
g.current_user = None
url = request.path
for i in config[os.getenv('Flask_config') or 'default'].White_list:
if re.match(i, url):
return True
user = User.verify_auth_token(token)
if not user:
return False
g.current_user = user
return True
@api.route('/login', methods=['POST'])
def login():
print('ok')
print(request.form)
username = request.form.get('username')
password = request.form.get('password')
print(username)
print('username:' + username if username else '')
print('password:'+ password if username else '')
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
return jsonify({'error': 'Unauthorized Access'})
g.user = user
token = user.generate_auth_token(3600)
return jsonify({'token': token.decode('ascii'), 'code': 20000})
@api.before_request
@auth.login_required
def before_request():
url = request.path
for i in config[os.getenv('Flask_config') or 'default'].White_list:
if re.match(i, url):
return
if not g.current_user:
return forbidden('Unconfirmed account')
# if not g.current_user or not g.current_user.confirmed:
# return forbidden('Unconfirmed account')
@auth.error_handler
def auth_error():
return unauthorized('Invalid credentials')
@api.route('/hello')
@auth.login_required
def hello():
return jsonify({'k': 'hello'})
# auth = HTTPBasicAuth()
#
#
# @auth.verify_password
# def verify_password(username_or_token, password):
# # first try to authenticate by token
# user = User.verify_auth_token(username_or_token)
# if not user:
# # try to authenticate with username/password
# user = User.query.filter_by(username=username_or_token).first()
# if not user or not user.verify_password(password):
# return False
# g.user = user
# return True
#
#
# @api.route('/token')
# @auth.login_required
# def get_auth_token():
# token = g.user.generate_auth_token()
# return jsonify({'token': token.decode('ascii')})
#
#
| 2.859375 | 3 |
scripts/debugging.py | falkben/zoo-checks | 0 | 12789178 | """to be run from root directory
"""
import os
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
django.setup()
from django.db.models.functions import Cast
from django.db.models.fields import DateField
from zoo_checks.models import AnimalCount, Enclosure, GroupCount, SpeciesCount
from django.db.models.functions import TruncDate
from django.utils import timezone
from zoo_checks.helpers import today_time
enclosures = Enclosure.objects.filter(name__in=["Australia", "Barramundi (ARG 1A)"])
tzinfo = "America/New York"
num_days = 1
animal_counts = (
AnimalCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "animal_id")
.distinct("dateonlycounted", "animal_id")
)
group_counts = (
GroupCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "group_id")
.distinct("dateonlycounted", "group_id")
)
species_counts = (
SpeciesCount.objects.filter(
enclosure__in=enclosures,
datetimecounted__lte=timezone.localtime(),
datetimecounted__gt=today_time() - timezone.timedelta(num_days),
)
.annotate(dateonlycounted=TruncDate("datetimecounted", tzinfo=tzinfo))
.order_by("dateonlycounted", "species_id")
.distinct("dateonlycounted", "species_id")
)
animal_dict = animal_counts.values()[0]
group_dict = group_counts.values()[0]
species_dict = species_counts.values()[0]
print(animal_dict)
print(animal_dict.keys())
print(group_dict)
print(group_dict.keys())
print(species_dict)
print(species_dict.keys())
| 2.125 | 2 |
tests/highlevel/transformer-redefine.py | sjdv1982/seamless | 15 | 12789179 | <reponame>sjdv1982/seamless
import os, tempfile
from seamless.highlevel import Context, Cell
import json
ctx = Context()
ctx.transform = lambda a,b: a + b
ctx.transform.a = 2
ctx.transform.b = 3
ctx.translate()
ctx.transform.example.a = 0
ctx.transform.example.b = 0
ctx.result = ctx.transform
ctx.result.celltype = "plain"
ctx.transform.result.example = 0.0 #example, just to fill the schema
ctx.transform.language = "cpp"
ctx.compute()
print(ctx.transform.exception)
print("*" * 80)
print(ctx.transform.header.value)
print("*" * 80)
ctx.code = ctx.transform.code.pull()
ctx.code = """
extern "C" int transform(int a, int b, double *result) {
*result = a + b;
return 0;
}"""
ctx.compute()
print(ctx.result.value)
print(ctx.status)
print(ctx.transform.exception)
del ctx.transform # required!
# else, the following line
# will try to re-use the existing transformer
ctx.transform = lambda a,b: a + b
ctx.transform.a = 12
ctx.transform.b = 13
ctx.result = ctx.transform
ctx.compute()
print(ctx.result.value)
| 2.09375 | 2 |
tests/test_workers.py | iawn/live2p | 0 | 12789180 | <reponame>iawn/live2p
import logging
from glob import glob
from queue import Queue
import sys
from pathlib import Path
from live2p.utils import ptoc, tic, get_true_mm3d_range
from live2p.offline import prepare_init
from live2p.workers import RealTimeQueue
# logging setup
# change for more or less information...
caiman_loglevel = logging.ERROR
live2p_loglevel = logging.ERROR
# changes how logs/updates are printed
logformat = '{relativeCreated:08.0f} - {levelname:8} - [{module}:{funcName}:{lineno}] - {message}'
logging.basicConfig(level=caiman_loglevel, format=logformat, style='{') #sets caiman loglevel
logger = logging.getLogger('live2p')
logger.setLevel(live2p_loglevel) # sets live2p debug level
# experiment info
# put the makeMasks3D image mat file in the folder with your data
if sys.platform == 'linux':
tiff_folder = '/mnt/e/caiman_scratch/test_data/ori/data'
else:
tiff_folder = 'e:/caiman_scratch/test_data/ori/data'
nplanes = 1 # for running multiple planes
plane = 0 # index starts at 0 (for single plane)
fr = 6.36
# x_start and x_end need to be the same or larger than what is in mm3d
# x_start = 110
# x_end = 512-110
# we can auto-determine them now...
# but note: if the stim/vis artifact is in the cropped range, there will be problems
# with motion correction and/or F extraction
mm3d_path = glob(tiff_folder + '/*.mat')[0]
x_start, x_end = get_true_mm3d_range(mm3d_path)
print(f'makeMasks3D range determine to be: {x_start} to {x_end} (pixels)')
# pre-allocated frame buffer, per plane
max_frames = 30000
n_init = 500
params = {
'fr': fr,
'p': 1, # deconv 0 is off, 1 is slow, 2 is fast
'nb': 3, # background compenents -> nb: 3 for complex
'decay_time': 1.0, # sensor tau
'gSig': (7, 7), # expected half size of neurons in pixels, very important for proper component detection
'init_method': 'seeded',
'motion_correct': True,
'expected_comps': 300,
'update_num_comps': False,
'update_freq': 100,
'niter_rig': 2,
'pw_rigid': False,
'dist_shape_update': False,
'normalize': True,
'sniper_mode': False,
'test_both': False,
'ring_CNN': False,
'simultaneously': True,
'use_cuda': False,
}
def _prepare_test_init(plane):
q = Queue()
tiff_files = Path(tiff_folder).glob('*.tif*')
init_list, nchannels, nplanes, _ = prepare_init(plane, n_init, tiff_files)
return init_list, plane, nchannels, nplanes, params, q
init = _prepare_test_init(plane)
def test_no_init():
try:
worker = RealTimeQueue(*init, num_frames_max=max_frames,
Ain_path=mm3d_path, xslice=slice(x_start, x_end), no_init=True)
print('OK... RealTimeWorker ran __init__ without OnACID initialization.')
except:
print('*** FAILED: pycuda ImportError ***')
raise
def test_class_init():
try:
worker = RealTimeQueue(*init, num_frames_max=max_frames,
Ain_path=mm3d_path, xslice=slice(x_start, x_end), no_init=False)
print('OK... RealTimeWorker default initialized from tiffs successfullly.')
except:
print('*** FAILED: RealTimeWorker did not initialize or OnACID initialization failed. ***')
raise
def test_onacid_init_from_tiffs():
try:
worker = RealTimeQueue(*init, num_frames_max=max_frames,
Ain_path=mm3d_path, xslice=slice(x_start, x_end), no_init=True)
init_mmap = worker.make_init_mmap()
acid = worker.initialize(init_mmap)
print('OK... RealTimeWorker initialized from tiffs successfullly.')
except:
print('*** FAILED: RealTimeWorker OnACID initialization from tiffs failed. ***')
raise
def test_onacid_init_from_file():
try:
worker = RealTimeQueue(*init, num_frames_max=max_frames,
Ain_path=mm3d_path, xslice=slice(x_start, x_end), no_init=True)
acid = worker.initialize_from_file()
print('OK... RealTimeWorker initialized from hdf5 file (previous init) successfullly.')
except:
print('*** FAILED: RealTimeWorker OnACID initialization from hdf5 file (previous init) failed. ***')
raise
def test_all():
test_no_init()
test_class_init()
test_onacid_init_from_tiffs()
# test_onacid_init_from_file()
if __name__ == '__main__':
test_all() | 2.0625 | 2 |
metnet_pytorch/model.py | tcapelle/metnet_pytorch | 15 | 12789181 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_model.ipynb (unless otherwise specified).
__all__ = ['DownSampler', 'TemporalEncoder', 'condition_time', 'ConditionTime', 'feat2image', 'MetNet',
'metnet_splitter']
# Cell
from .layers import *
from fastai.vision.all import *
# Cell
def DownSampler(in_channels):
return nn.Sequential(nn.Conv2d(in_channels, 160, 3, padding=1),
nn.MaxPool2d((2,2), stride=2),
nn.BatchNorm2d(160),
nn.Conv2d(160, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, 3, padding=1),
nn.BatchNorm2d(256),
nn.Conv2d(256, 256, 3, padding=1),
nn.MaxPool2d((2,2), stride=2)
)
# Cell
class TemporalEncoder(Module):
def __init__(self, in_channels, out_channels=384, ks=3, n_layers=1):
self.rnn = ConvGRU(in_channels, out_channels, (ks, ks), n_layers, batch_first=True)
def forward(self, x):
x, h = self.rnn(x)
return (x, h[-1])
# Cell
def condition_time(x, i=0, size=(12, 16), seq_len=15):
"create one hot encoded time image-layers, i in [1, seq_len]"
assert i<seq_len
times = (torch.eye(seq_len, dtype=x.dtype, device=x.device)[i]).unsqueeze(-1).unsqueeze(-1)
ones = torch.ones(1,*size, dtype=x.dtype, device=x.device)
return times * ones
# Cell
class ConditionTime(Module):
"Condition Time on a stack of images, adds `horizon` channels to image"
def __init__(self, horizon, ch_dim=2):
self.horizon = horizon
self.ch_dim = ch_dim
def forward(self, x, fstep=0):
"x stack of images, fsteps"
bs, seq_len, ch, h, w = x.shape
ct = condition_time(x, fstep, (h,w), seq_len=self.horizon).repeat(bs, seq_len, 1,1,1)
x = torch.cat([x,ct], dim=self.ch_dim)
assert x.shape[self.ch_dim] == (ch + self.horizon) #check if it makes sense
return x
# Cell
def feat2image(x, target_size=(128,128)):
"This idea comes from MetNet"
x = x.transpose(1,2)
return x.unsqueeze(-1).unsqueeze(-1) * x.new_ones(1,1,1,*target_size)
# Cell
from axial_attention import AxialAttention
# Cell
class MetNet(Module):
def __init__(self, image_encoder, hidden_dim, ks=3, n_layers=1, n_att_layers=1,
head=None, horizon=3, n_feats=0, p=0.2, debug=False):
self.horizon = horizon
self.n_feats = n_feats
self.drop = nn.Dropout(p)
nf = 256 #from the simple image encoder
self.image_encoder = TimeDistributed(image_encoder)
self.ct = ConditionTime(horizon)
self.temporal_enc = TemporalEncoder(nf, hidden_dim, ks=ks, n_layers=n_layers)
self.temporal_agg = nn.Sequential(*[AxialAttention(dim=hidden_dim, dim_index=1, heads=8, num_dimensions=2) for _ in range(n_att_layers)])
if head is None:
self.head = Noop()
else:
self.head = head
self.debug = debug
def encode_timestep(self, x, fstep=1):
if self.debug: print(f'Encode Timestep:(i={fstep})')
if self.debug: print(f' input shape: {x.shape}')
#Condition Time
x = self.ct(x, fstep)
if self.debug: print(f' CondTime->x.shape: {x.shape}')
##CNN
x = self.image_encoder(x)
if self.debug: print(f' encoded images shape: {x.shape}')
#Temporal Encoder
_, state = self.temporal_enc(self.drop(x))
if self.debug: print(f' temp_enc out shape: {state.shape}')
return self.temporal_agg(state)
def forward(self, imgs, feats):
"""It takes a rank 5 tensor
- imgs [bs, seq_len, channels, h, w]
- feats [bs, n_feats, seq_len]"""
if self.debug: print(f' Input -> (imgs: {imgs.shape}, feats: {feats.shape})')
#stack feature as images
if self.n_feats>0:
feats = feat2image(feats, target_size=imgs.shape[-2:])
imgs = torch.cat([imgs, feats], dim=2)
if self.debug: print(f' augmented imgs: {imgs.shape}')
#Compute all timesteps, probably can be parallelized
res = []
for i in range(self.horizon):
x_i = self.encode_timestep(imgs, i)
out = self.head(x_i)
res.append(out)
res = torch.stack(res, dim=1).squeeze()
if self.debug: print(f'{res.shape=}')
return res
# Cell
def metnet_splitter(m):
"A simple param splitter for MetNet"
return [params(m.image_encoder), params(m.te)+params(m.head)] | 2.28125 | 2 |
main.py | pannoi/cdn-dns-controller | 1 | 12789182 | <reponame>pannoi/cdn-dns-controller
from flask import Flask, jsonify
from flask import make_response
from flask import request
import logging
from src.route53 import Route53
from src.cloudfront import CloudFront
app = Flask(__name__)
# Route53 routes
@app.route('/zones/', methods=['GET'])
def list_hosted_zones():
""" Function lists all hosted zones in Route53. """
route53 = Route53()
return route53.list_hosted_zones()
@app.route('/zones/<string:zone_id>', methods=['GET'])
def get_hosted_zone(zone_id):
"""
Function return the hosted zone information
:param zone_id: Id of hosted zone to GET record sets.
"""
route53 = Route53()
return route53.get_hosted_zone(zone_id=zone_id)
@app.route('/zones/', methods=['POST'])
def create_hosted_zone():
""" Function creates new hosted zone under Route53 domain. """
route53 = Route53()
data = request.get_json()
hz_name = data['Name']
comment = data['Comment'] if data['Comment'] else ""
is_private = data['Private'] if data['Private'] else False
return route53.create_hosted_zone(domain_name=hz_name, comment=comment, is_private=is_private)
@app.route('/zones/<string:zone_id>', methods=['POST'])
def change_resource_record(self, zone_id):
"""
Funtion changes resources record set in specified hosted zone.
:param zone_id: Id of targetd hosted zone
"""
route53 = Route53()
data = request.get_json()
if data['RerordType'] == 'Alias':
return route53.change_resource_record_alias(
zone_id=zone_id,
comment=data['Comment'],
action=data['Action'],
type=data['Type'],
hosted_zone=data['HostedZone'],
dns_name=data['DnsName'],
name=data['Name']
)
elif data['RecordType'] == 'Set':
return route53.change_resource_record_set(
zone_id=zone_id,
comment=data['Comment'],
action=data['Action'],
name=data['Name'],
type=data['Type'],
ttl=data['TTL'],
target=data['Target']
)
else:
return make_response(jsonify({'error': 'Bad Request: RecordType not found, should be "Set" or "Alias"'}), 400)
@app.route('/zones/<string:zone_id>', methods=['DELETE'])
def delete_zone(zone_id):
""" Deletes hosted zone. """
route53 = Route53()
data = request.get.json()
force = False
if 'force' in data:
force = True
return route53.delete_hosted_zone(zone_id, force=force)
# CloudFront routes
@app.route('/distributions/', methods=['GET'])
def list_distributions():
""" Lists infromation about all CDN distribution. """
cloudfront = CloudFront()
return cloudfront.list_distirbutions()
@app.route('/distributions/<string:distribution_id>', methods=['GET'])
def get_distribution(distribution_id):
"""
Lists inforamtion about specific distribution by id.
:param distribution_id: Id of CDN distribution
"""
cloudfront = CloudFront()
return cloudfront.get_distribution(distribution_id=distribution_id)
@app.route('/distributions/', methods=['POST'])
def create_distribution():
""" Creates new CDN distribution. """
cloudfront = CloudFront()
data = request.get_json()
return cloudfront.create_distribution(
comment=data['Comment'],
origin_id=data['OriginId'],
domain_name=data['DomainName'],
hosted_zone=data['HostedZone'],
endpoint=data['Endpoint']
)
@app.route('/distributions/<string:distribution_id>', methods=['DELETE'])
def delete_distribution(distribution_id):
"""
Deletes CDN distribution
:param distribution_id: Id of CDN distribution
"""
cloudfront = CloudFront()
return cloudfront.delete_distribution(distribution_id=distribution_id)
@app.errorhandler(404)
def not_found():
""" If route is not defined on backend -> return 404. """
return make_response(jsonify({'error': 'Not found'}), 404)
if __name__ == "__main__":
app.run(host='0.0.0.0')
app.logging.info('cdn-dns-controller is ready to use')
| 2.671875 | 3 |
game/upl/upl_scripts/storage.py | Sipondo/ulix-dexflow | 5 | 12789183 | <filename>game/upl/upl_scripts/storage.py
"""function
Opens the battler storage.
Opens the battler storage.
in:
None
"""
class Storage:
def __init__(self, act, src, user):
act.funcs.append(self)
self.init_time = act.current_time
self.act = act
self.src = src
self.user = user
self.act.game.m_gst.switch_state("storage")
def on_tick(self, time=None, frame_time=None):
return True
def on_read(self):
return None
| 2.515625 | 3 |
utility/__init__.py | Krishna10798/Multi-User-Blog | 5 | 12789184 | <reponame>Krishna10798/Multi-User-Blog<filename>utility/__init__.py
from utility import hash_str, check_secure_val, make_secure_val,\
valid_email, valid_username, valid_password
from filters import filterKey, showCount
| 1.125 | 1 |
logging/logging_api/logging_api_example_2_new_format.py | levs72/pyneng-examples | 11 | 12789185 | import logging
logger = logging.getLogger("__name__")
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"{asctime} - {name} - {levelname} - {message}", datefmt="%H:%M:%S", style="{"
)
console.setFormatter(formatter)
logger.addHandler(console)
## messages
logger.debug("Сообщение уровня debug: %s", "SOS")
logger.info("Сообщение уровня info")
logger.warning("Сообщение уровня warning")
| 2.75 | 3 |
Controllers/CommonModules/authfunc.py | SoulfulArt/BugTracker | 0 | 12789186 | <gh_stars>0
def login_test(request, User):
#login test
user_data = User.objects.all()
user = request.user
user_session = user_data.filter(user_username__exact=user.username)
if user.is_authenticated and user_session[0].user_type=="Admin":
return True
else:
return False
| 2.390625 | 2 |
phr/establecimiento/api/urls.py | richardqa/django-ex | 0 | 12789187 | <filename>phr/establecimiento/api/urls.py
# coding=utf-8
from django.conf.urls import include, url
from phr.establecimiento.api.views import (
DetalleEstablecimientoAPIView, DiresaDetalleAPI, ListaDiresaAPI, ListaEstablecimientoAPI,
ListaEstablecimientoGrupoAPI, ListaEstablecimientoPorCategoriaAPI, ListaEstablecimientosCercanosAPI,
ListaEstablecimientosPorSectorAPI, ListaEstablecimientosSectoresAPI, ListaEstablecimientoUbigeoAPI,
ListaMicroredAPI, ListaRedAPI, MicroredDetalleAPI, RedDetalleAPI, SectorDetalleAPI, SectorRetrieveAPIView,
ServicioAPI, UbigeoDiresaListAPI, UbigeoMicroredesListAPI, UbigeoRedesListAPI,
)
urlpatterns = [
url(r'^v1/', include([
url(r'^establecimiento/', include([
url(r'^(?P<cod_renaes>[-\w]+)/detalle/$', DetalleEstablecimientoAPIView.as_view()),
url(r'^(?P<cod_renaes>[-\w]+)/$', DetalleEstablecimientoAPIView.as_view()),
], namespace='establecimiento')),
url(r'^establecimientos/', include([
url(r'^$', ListaEstablecimientoAPI.as_view()),
url(r'^sectores/$', ListaEstablecimientosSectoresAPI.as_view()),
url(r'^sectores/(?P<codigo>\d+)/$', SectorRetrieveAPIView.as_view()),
url(r'^sector/(?P<pk>\d+)/$', ListaEstablecimientosPorSectorAPI.as_view()),
url(r'^sector/detalle/(?P<pk>\d+)/$', SectorDetalleAPI.as_view()),
url(r'^cercanos/$', ListaEstablecimientosCercanosAPI.as_view()),
url(r'^ubigeo/(?P<cod_ubigeo>\w{2,6})/$', ListaEstablecimientoUbigeoAPI.as_view()),
url(r'^diresa/(?P<diresa_codigo>\w{1,6})/$',
ListaEstablecimientoGrupoAPI.as_view()),
url(r'^diresa/(?P<diresa_codigo>\w{1,6})/red/(?P<red_codigo>\d+)/$',
ListaEstablecimientoGrupoAPI.as_view()),
url(r'^diresa/(?P<diresa_codigo>\w{1,6})/red/(?P<red_codigo>\d+)/microred/(?P<microred_codigo>\d+)/$',
ListaEstablecimientoGrupoAPI.as_view()),
url(r'^categoria/(?P<nombre_categoria>[\w-]+)/$',
ListaEstablecimientoPorCategoriaAPI.as_view()),
], namespace='establecimientos')),
url(r'^diresa/', include([
url(r'^$', ListaDiresaAPI.as_view()),
url(r'^(?P<diresa_codigo>\d+)/$', DiresaDetalleAPI.as_view()),
url(r'^(?P<diresa_codigo>\d+)/red/$', ListaRedAPI.as_view()),
url(r'^(?P<diresa_codigo>\d+)/red/(?P<red_codigo>\d+)/$', RedDetalleAPI.as_view()),
url(r'^(?P<diresa_codigo>\d+)/red/(?P<red_codigo>\d+)/microred/$', ListaMicroredAPI.as_view()),
url(r'^(?P<diresa_codigo>\d+)/red/(?P<red_codigo>\d+)/microred/(?P<microred_codigo>\d+)/$',
MicroredDetalleAPI.as_view()),
], namespace='diresa')),
url(r'^ubigeo/', include([
url(r'^(?P<cod_ubigeo>\d{2,6})/diresas/$', UbigeoDiresaListAPI.as_view()),
url(r'^(?P<cod_ubigeo>\d{2,6})/redes/$', UbigeoRedesListAPI.as_view()),
url(r'^(?P<cod_ubigeo>\d{2,6})/microredes/$', UbigeoMicroredesListAPI.as_view()),
], namespace='ubigeo-diresa')),
url(r'^servicio/', include([
url(r'^(?P<codigo>\d+)/$', ServicioAPI.as_view()),
], namespace='establecimiento-servicio')),
], namespace='v1')),
]
| 1.84375 | 2 |
web_app/__init__.py | yalyakoob/TwitOff | 0 | 12789188 | """Entry point for TwitOff Flask application."""
from web_app.app import create_app
APP = create_app() | 1.273438 | 1 |
SSF_mip/evaluation/evaluation.py | Sijie-umn/SSF-MIP | 6 | 12789189 | import pickle
import numpy as np
import pandas as pd
from numpy import linalg as LA
from scipy import stats
import sys
def compute_rmse(target, prediction):
"""Compute rmse between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: rmse between the ground truth and forecasts
"""
return np.sqrt(mean_squared_error(target, prediction))
def compute_cosine(target, prediction):
"""Compute cosine simialrity between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: cosine simialrity between the ground truth and forecasts
"""
result = np.dot(target, prediction) / (LA.norm(target) * LA.norm(prediction))
return result
def r_squared(y_true, y_pred, y_mean=None):
"""Compute relative R^2 between the ground truth and forecasts
Args:
target: a numpy array with ground truth
forecasts: a numpy array with forecasted values
Returns: relative R^2 between the ground truth and forecasts
"""
if y_mean is None:
y_mean = np.zeros(y_true.shape[0]) * np.mean(y_true)
rss = np.sum((y_true - y_pred)**2)
tss = np.sum((y_true - y_mean)**2)
rsq = 1 - rss / tss
return rsq
def print_eval_stats(eval_result):
"""Print the mean(se), median(se), 0.25 quantile(se), and 0.75 quantile (se) of the array, where se represents standard deviation
Args:
eval_result: a numpy array with evluation results
"""
print('mean: {:.4f} ({:.4f}) median {:.4f} ({:.4f})'.format(np.mean(eval_result),
stats.sem(eval_result),
np.median(eval_result),
quantile_se(eval_result, p=50)))
print('0.25 quantile: {:.4f} ({:.4f}) 0.75 quantile: {:.4f} ({:.4f})'.format(np.quantile(eval_result, 0.25),
quantile_se(eval_result, p=25),
np.quantile(eval_result, 0.75),
quantile_se(eval_result, p=75)))
def quantile_se(x, p=50):
# compute the standard error for different quantiles
# Source: <NAME>, "Mathematical Statistics". Springer Texts in Statistics, 1999. Page 306: Theorem 5.10
# p: quantile: int between 0-100
# x: data sequence
n = len(x) # number of samples
q = np.percentile(x, p)
density = stats.gaussian_kde(x) # density estimate of x
Fp = density(q).item()
p = p / 100.
sF = np.sqrt(p * (1 - p)) / Fp
se = sF / np.sqrt(n)
return se
def eval_forecast(model_name, rootpath, test_years, month_range, rep=False, num_rep=10):
"""Evalute the forecasts on training and test sets
Args:
model_name: a string indicating the name of a model
rootpath: the path where the forecasts are saved
test_years: a list of years in the test set
month_range: a list of months in the test set
rep: True or False, indicating if the reults include repeated runs
num_rep: the number of repetition
Returns:
result_train: the forecasting performance (temporal/spatial cosine/r2) on training set
result_test: the forecasting performance (temporal/spatial cosine/r2) on test set
"""
target_train = []
target_test = []
prediction_train = []
prediction_test = []
for year in test_years:
if year == 2020:
month_range = range(1, 7)
elif year == 2017:
month_range = range(7, 13)
else:
month_range = range(1, 13)
for month_id in month_range:
result_temp = load_results(rootpath + 'forecast_results/results_{}_{}_{}.pkl'.format(model_name, year, month_id))
target_train.append(result_temp['target_train'])
target_test.append(result_temp['target_test'])
if rep is True:
prediction_train_temp = np.zeros(result_temp['target_train'].shape)
prediction_test_temp = np.zeros(result_temp['target_test'].shape)
for i in range(num_rep):
prediction_train_temp += result_temp['prediction_train'][i]
prediction_test_temp += result_temp['prediction_test'][i]
prediction_train.append(prediction_train_temp / float(num_rep))
prediction_test.append(prediction_test_temp / float(num_rep))
else:
prediction_train.append(result_temp['prediction_train'])
prediction_test.append(result_temp['prediction_test'])
# test set evaluation
prediction_test = np.concatenate(prediction_test, axis=0)
target_test = np.concatenate(target_test, axis=0)
temporal_cos = np.zeros(prediction_test.shape[0])
spatial_cos = np.zeros(prediction_test.shape[1])
temporal_r2 = np.zeros(prediction_test.shape[0])
spatial_r2 = np.zeros(prediction_test.shape[1])
for i in range(prediction_test.shape[0]):
temporal_cos[i] = compute_cosine(target_test[i, :], prediction_test[i, :])
temporal_r2[i] = r_squared(target_test[i, :], prediction_test[i, :])
for i in range(prediction_test.shape[1]):
spatial_cos[i] = compute_cosine(target_test[:, i], prediction_test[:, i])
spatial_r2[i] = r_squared(target_test[:, i], prediction_test[:, i])
result_test = {}
result_test['temporal_cos'] = temporal_cos
result_test['spatial_cos'] = spatial_cos
result_test['temporal_r2'] = temporal_r2
result_test['spatial_r2'] = spatial_r2
# training set evaluation
prediction_train = np.concatenate(prediction_train, axis=0)
target_train = np.concatenate(target_train, axis=0)
temporal_cos_train = np.zeros(prediction_train.shape[0])
spatial_cos_train = np.zeros(prediction_train.shape[1])
temporal_r2_train = np.zeros(prediction_train.shape[0])
spatial_r2_train = np.zeros(prediction_train.shape[1])
for i in range(prediction_train.shape[0]):
temporal_cos_train[i] = compute_cosine(target_train[i, :], prediction_train[i, :])
temporal_r2_train[i] = r_squared(target_train[i, :], prediction_train[i, :])
for i in range(prediction_train.shape[1]):
spatial_cos_train[i] = compute_cosine(target_train[:, i], prediction_train[:, i])
spatial_r2_train[i] = r_squared(target_train[:, i], prediction_train[:, i])
result_train = {}
result_train['temporal_cos'] = temporal_cos_train
result_train['spatial_cos'] = spatial_cos_train
result_train['temporal_r2'] = temporal_r2_train
result_train['spatial_r2'] = spatial_r2_train
return result_train, result_test
| 3.359375 | 3 |
lib/python2.7/site-packages/whitenoise/storage_backport.py | vipulkanade/EventbriteDjango | 1 | 12789190 | <filename>lib/python2.7/site-packages/whitenoise/storage_backport.py
from __future__ import absolute_import, unicode_literals
import json
from django.contrib.staticfiles.storage import CachedStaticFilesStorage
class ManifestStaticFilesStorage(CachedStaticFilesStorage):
"""
Basic emulation of ManifestStaticFilesStorage from Django 1.7
"""
manifest_name = 'staticfiles.json'
def __init__(self, *args, **kwargs):
super(ManifestStaticFilesStorage, self).__init__(*args, **kwargs)
self.cache = ManifestCache(self.path(self.manifest_name))
def cache_key(self, name):
return name
class ManifestCache(object):
"""
Acts enough like a cache backend to be used with CachedStaticFilesStorage
from Django < 1.7, but stores data in a manifest file like Django 1.7+
"""
def __init__(self, manifest_file):
self.manifest_file = manifest_file
try:
with open(self.manifest_file) as f:
self.manifest = json.load(f)['paths']
except IOError:
self.manifest = {}
# Wire up the get method directly to the dict getter
self.get = self.manifest.get
def set(self, key, value, **kwargs):
self.manifest[key] = value
def set_many(self, values, **kwargs):
self.manifest.update(values)
payload = {'paths': self.manifest, 'version': '1.0'}
with open(self.manifest_file, 'w') as f:
json.dump(payload, f)
| 2.5 | 2 |
qatrack/faults/migrations/0006_auto_20210317_1651.py | crcrewso/qatrackplus | 20 | 12789191 | <reponame>crcrewso/qatrackplus
# Generated by Django 2.2.18 on 2021-03-17 20:51
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0011_update_proxy_permissions'),
('faults', '0005_auto_20210317_1538'),
]
operations = [
migrations.CreateModel(
name='FaultReviewGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('required', models.BooleanField(default=True, help_text='Is review by this group required in order to consider a fault reviewed', verbose_name='required')),
('group', models.OneToOneField(help_text='Select the group responsible for reviewing a fault', on_delete=django.db.models.deletion.PROTECT, to='auth.Group', verbose_name='group')),
],
),
migrations.RemoveField(
model_name='fault',
name='reviewed',
),
migrations.RemoveField(
model_name='fault',
name='reviewed_by',
),
migrations.CreateModel(
name='FaultReviewInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('reviewed', models.DateTimeField(auto_now_add=True, verbose_name='review date & time')),
('fault', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='faults.Fault', verbose_name='fault')),
('fault_review_group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='faults.FaultReviewGroup', verbose_name='fault review group instance')),
('reviewed_by', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.PROTECT, related_name='faults_reviewed', to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.828125 | 2 |
prompt_tuning/data/preprocessors_test.py | dumpmemory/prompt-tuning | 108 | 12789192 | <reponame>dumpmemory/prompt-tuning
# Copyright 2022 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for preprocessors."""
import os
import textwrap
import unittest.mock as mock
from absl.testing import parameterized
import numpy as np
from prompt_tuning.data import preprocessors
import seqio
from seqio import test_utils
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
TEST_DATA = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "test_data")
INPUTS_SIZE = 10
TARGETS_SIZE = 5
TEXT_SIZE = 10
TEST_T5_FEATURES = {
"inputs": seqio.Feature(
vocabulary=seqio.SentencePieceVocabulary(
os.path.join(TEST_DATA, "t5_vocab"), 100),
add_eos=True,
required=False),
"targets": seqio.Feature(
vocabulary=seqio.SentencePieceVocabulary(
os.path.join(TEST_DATA, "t5_vocab"), 100),
add_eos=True)
}
def create_fake_text_dataset(examples: int = 10, text_size: int = TEXT_SIZE):
text = np.reshape(
# Start at 2 so we skip EOS=1 which could be a problem on any tests that
# actually decode the fake inputs.
np.arange(2, examples * text_size + 2),
(-1, text_size)).astype(np.int32)
return tf.data.Dataset.from_tensor_slices({"targets": text})
class PreprocessorsTest(tf.test.TestCase):
def test_remove_first_text_token(self):
input_strings = ["This is my first example", "The second"]
gold_strings = [" ".join(s.split()[1:]) for s in input_strings]
ds = tf.data.Dataset.from_tensor_slices({"inputs": input_strings})
processed_ds = preprocessors.remove_first_text_token(ds)
for res, gold in zip(processed_ds, gold_strings):
self.assertEqual(res["inputs"].numpy().decode("utf-8"), gold)
def test_add_sentinel_to_beginning(self):
vocab_size = 100
offset = 0
field = "targets"
ds = tf.data.Dataset.from_tensor_slices({
field: tf.zeros([3, 4], dtype=tf.int32),
})
output_features = {
field: mock.MagicMock(vocabulary=mock.MagicMock(vocab_size=vocab_size))
}
processed_ds = preprocessors.add_sentinel_to_beginning(
ds, output_features, field, offset)
for ex in processed_ds:
self.assertEqual(ex[field][0].numpy().item(), vocab_size - (offset + 1))
def test_tsv_to_qa(self):
fake_data = textwrap.dedent("""
id\tcontext\tquestion\tanswer\tanswers
0\tThe capital of France is Paris\tWhat is the capital of France?\tParis\tParis|||paris
1\tAn ant can carry many times it's body weight making it v strong.\tAre ants strong?\tYes\tYes
""".strip("\n"))
ds = tf.data.Dataset.from_tensor_slices(fake_data.split("\n")[1:-1])
ds = preprocessors.preprocess_tsv_to_qa(ds)
gold_data = [{
"id": "0",
"question": "What is the capital of France ? ",
"answer": "Paris",
"answers": ["Paris", "paris"],
"context": "The capital of France is Paris",
"inputs":
"question: What is the capital of France ? context: The capital of"
" France is Paris",
"targets": "Paris"
}, {
"id":
"1",
"question":
"Are ants strong ? ",
"answer":
"Yes",
"answers": ["Yes"],
"context":
"An ant can carry many times it ' s body weight making it v strong . ",
"inputs":
"question: Are ants strong ? context: An ant can carry many times "
"it ' s body weight making it v strong . ",
"targets":
"Yes"
}]
for ex, gold in zip(ds, gold_data):
self.assertEqual(ex["id"].numpy().decode("utf-8"), gold["id"])
self.assertEqual(ex["question"].numpy().decode("utf-8"), gold["question"])
self.assertEqual(ex["answer"].numpy().decode("utf-8"), gold["answer"])
self.assertEqual(ex["context"].numpy().decode("utf-8"), gold["context"])
self.assertEqual(ex["targets"].numpy().decode("utf-8"), gold["targets"])
for answer, gold_answer in zip(ex["answers"].numpy(), gold["answers"]):
self.assertEqual(answer.decode("utf-8"), gold_answer)
def test_preprocess_text_generation(self):
example = tf.data.Dataset.from_tensor_slices({
"source_aligned": {
"en": ["english input"],
"es": ["spanish input"]
},
"target_aligned": {
"en": ["english target"],
"es": ["spanish target"]
}
})
processed_example = preprocessors.preprocess_text_generation(
example,
source_key="source_aligned",
target_key="target_aligned",
task_name=None,
prefix="summarize:",
source_nested_key="en",
target_nested_key="es",
)
test_utils.assert_dataset(processed_example, {
"inputs": "summarize: english input",
"targets": "spanish target"
})
class BARTTaskTest(parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_inputs_shorter_than_targets(self, preprocessor):
ds = create_fake_text_dataset()
ds = preprocessor(ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
for ex in tfds.as_numpy(ds):
self.assertLess(ex["inputs"].shape[0], ex["targets"].shape[0])
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_extra_id_not_in_targets(self, preprocessor):
ds = create_fake_text_dataset()
ds = preprocessor(ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["targets"].vocabulary
for ex in tfds.as_numpy(ds):
targets_text = vocab.decode(ex["targets"].tolist())
self.assertNotIn("extra_id", targets_text)
@parameterized.named_parameters(
dict(testcase_name="text_infilling",
preprocessor=preprocessors.text_infilling),
dict(testcase_name="token_deletion",
preprocessor=preprocessors.token_deletion))
def test_target_tokens_match_original_tokens(self, preprocessor):
ds = create_fake_text_dataset()
processed_ds = preprocessor(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
for processed_ex, ex in zip(tfds.as_numpy(processed_ds), tfds.as_numpy(ds)):
np.testing.assert_array_equal(processed_ex["targets"], ex["targets"])
def test_extra_id_not_in_token_deletion_inputs(self):
ds = create_fake_text_dataset()
ds = preprocessors.token_deletion(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["inputs"].vocabulary
for ex in tfds.as_numpy(ds):
inputs_text = vocab.decode(ex["inputs"].tolist())
self.assertNotIn("extra_id", inputs_text)
def test_extra_id_in_text_infilling_inputs(self):
ds = create_fake_text_dataset()
ds = preprocessors.text_infilling(
ds,
{"inputs": INPUTS_SIZE + 1, "targets": TARGETS_SIZE + 1},
TEST_T5_FEATURES,
noise_density=0.5)
vocab = TEST_T5_FEATURES["inputs"].vocabulary
for ex in tfds.as_numpy(ds):
inputs_text = vocab.decode(ex["inputs"].tolist())
self.assertIn("extra_id", inputs_text)
if __name__ == "__main__":
tf.test.main()
| 2.140625 | 2 |
src/visualize_data.py | h1yung/PyUpBit | 3 | 12789193 | <reponame>h1yung/PyUpBit
import sys
import numpy as np
import matplotlib.pyplot as plt
data = np.load(sys.argv[1])
plt.hist(data, edgecolor='k')
plt.show()
| 2.28125 | 2 |
iot/amqp/header/impl/AMQPAttach.py | mobius-software-ltd/iotbroker.cloud-python-client | 2 | 12789194 | <filename>iot/amqp/header/impl/AMQPAttach.py
"""
# Mobius Software LTD
# Copyright 2015-2018, Mobius Software LTD
#
# This is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation; either version 2.1 of
# the License, or (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this software; if not, write to the Free
# Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA, or see the FSF site: http://www.fsf.org.
"""
from iot.amqp.avps.AMQPType import *
from iot.amqp.avps.HeaderCode import *
from iot.amqp.avps.ReceiveCode import *
from iot.amqp.avps.RoleCode import *
from iot.amqp.avps.SendCode import *
from iot.amqp.constructor.DescribedConstructor import *
from iot.amqp.header.api.AMQPHeader import *
from iot.amqp.header.api.AMQPUnwrapper import *
from iot.amqp.header.api.AMQPWrapper import *
from iot.amqp.terminus.AMQPSource import *
from iot.amqp.terminus.AMQPTarget import *
from iot.amqp.tlv.api.TLVAmqp import *
from iot.amqp.tlv.impl.TLVFixed import *
from iot.amqp.tlv.impl.TLVList import *
from iot.amqp.wrappers.AMQPSymbol import *
class AMQPAttach(AMQPHeader):
def __init__(self,code,doff,type,channel,name,handle,role,sndSettleMode,rcvSettleMode,source,target,unsettled,incompleteUnsettled,initialDeliveryCount,maxMessageSize,offeredCapabilities,desiredCapabilities,properties):
if code is not None:
self.code = code
else:
self.code = HeaderCode.ATTACH
if doff is not None:
self.doff = doff
else:
self.doff = 2
if type is not None:
self.type = type
else:
self.type = 0
if channel is not None:
self.channel = channel
else:
self.channel = 0
self.name = name
self.handle = handle
self.role = role
self.sndSettleMode = sndSettleMode
self.rcvSettleMode = rcvSettleMode
self.source = source
self.target = target
self.unsettled = unsettled
self.incompleteUnsettled = incompleteUnsettled
self.initialDeliveryCount = initialDeliveryCount
self.maxMessageSize = maxMessageSize
self.offeredCapabilities = offeredCapabilities
self.desiredCapabilities = desiredCapabilities
self.properties = properties
def toArgumentsList(self):
list = TLVList(None,None)
wrapper = AMQPWrapper()
if self.name is None:
raise ValueError("Attach header's name can't be null")
list.addElement(0, wrapper.wrapString(self.name))
if self.handle is None:
raise ValueError("Attach header's handle can't be null")
list.addElement(1, wrapper.wrap(self.handle))
if self.role is None:
raise ValueError("Attach header's role can't be null")
list.addElement(2, wrapper.wrap(self.role.value))
if self.sndSettleMode is not None:
list.addElement(3, wrapper.wrap(self.sndSettleMode))
if self.rcvSettleMode is not None:
list.addElement(4, wrapper.wrap(self.rcvSettleMode))
if self.source is not None and isinstance(self.source,AMQPSource):
list.addElement(5,self.source.toArgumentsList())
if self.target is not None and isinstance(self.target,AMQPTarget):
list.addElement(6,self.target.toArgumentsList())
if self.unsettled is not None and len(self.unsettled) > 0:
list.addElement(7, wrapper.wrapMap(self.unsettled))
if self.incompleteUnsettled is not None:
list.addElement(8,wrapper.wrap(self.incompleteUnsettled))
if self.initialDeliveryCount is not None:
list.addElement(9,wrapper.wrap(self.initialDeliveryCount))
elif self.role == RoleCode.SENDER:
raise ValueError("Sender's attach header must contain a non-null initial-delivery-count value")
if self.maxMessageSize is not None:
list.addElement(10,wrapper.wrap(self.maxMessageSize))
if self.offeredCapabilities is not None and len(self.offeredCapabilities) > 0:
list.addElement(11, wrapper.wrapArray(self.offeredCapabilities))
if self.desiredCapabilities is not None and len(self.desiredCapabilities) > 0:
list.addElement(12, wrapper.wrapArray(self.desiredCapabilities))
if self.properties is not None and len(self.properties) > 0:
list.addElement(13, wrapper.wrapMap(self.properties))
constructor = DescribedConstructor(list.getCode(),TLVFixed(AMQPType.SMALL_ULONG, self.code.value))
list.setConstructor(constructor)
return list
def fromArgumentsList(self, list):
unwrapper = AMQPUnwrapper()
if isinstance(list, TLVList):
size = len(list.getList())
if size < 3:
raise ValueError('Received malformed Attach header: mandatory fields name, handle and role must not be null')
if size > 14:
raise ValueError('Received malformed Attach header. Invalid number of arguments: ' + str(size))
if size > 0:
element = list.getList()[0]
if element is not None and not element.isNull():
self.name = unwrapper.unwrapString(element)
else:
raise ValueError("Received malformed Attach header: name can't be null")
if size > 1:
element = list.getList()[1]
if element is not None and not element.isNull():
self.handle = unwrapper.unwrapUInt(element)
else:
raise ValueError("Received malformed Attach header: handle can't be null")
if size > 2:
element = list.getList()[2]
if element is not None and not element.isNull():
self.role = RoleCode(unwrapper.unwrapBool(element))
else:
raise ValueError("Received malformed Attach header: role can't be null")
if size > 3:
element = list.getList()[3]
if element is not None and not element.isNull():
self.sndSettleMode = SendCode(unwrapper.unwrapUByte(element))
if size > 4:
element = list.getList()[4]
if element is not None and not element.isNull():
self.rcvSettleMode = ReceiveCode(unwrapper.unwrapUByte(element))
if size > 5:
element = list.getList()[5]
if element is not None and not element.isNull() and isinstance(element,TLVAmqp):
code = element.getCode()
if code not in (AMQPType.LIST_0,AMQPType.LIST_8,AMQPType.LIST_32):
raise ValueError('Expected type SOURCE - received: ' + str(element.getCode()))
self.source = AMQPSource(None,None,None,None,None,None,None,None,None,None,None)
self.source.fromArgumentsList(element)
if size > 6:
element = list.getList()[6]
if element is not None and not element.isNull() and isinstance(element, TLVAmqp):
code = element.getCode()
if code not in (AMQPType.LIST_0, AMQPType.LIST_8, AMQPType.LIST_32):
raise ValueError('Expected type TARGET - received: ' + str(element.getCode()))
self.target = AMQPTarget(None, None, None, None, None, None, None)
self.target.fromArgumentsList(element)
if size > 7:
element = list.getList()[7]
if element is not None and not element.isNull():
self.unsettled = unwrapper.unwrapMap(element)
if size > 8:
element = list.getList()[8]
if element is not None and not element.isNull():
self.incompleteUnsettled = unwrapper.unwrapBool(element)
if size > 9:
element = list.getList()[9]
if element is not None and not element.isNull():
self.initialDeliveryCount = unwrapper.unwrapUInt(element)
elif self.role == RoleCode.SENDER:
raise ValueError('Received an attach header with a null initial-delivery-count')
if size > 10:
element = list.getList()[10]
if element is not None and not element.isNull():
self.maxMessageSize = unwrapper.unwrapULong(element)
if size > 11:
element = list.getList()[11]
if element is not None and not element.isNull():
self.offeredCapabilities = unwrapper.unwrapArray(element)
if size > 12:
element = list.getList()[12]
if element is not None and not element.isNull():
self.desiredCapabilities = unwrapper.unwrapArray(element)
if size > 13:
element = list.getList()[13]
if element is not None and not element.isNull():
self.properties = unwrapper.unwrapMap(element)
def toString(self):
return "AMQPAttach [name=" + str(self.name) + ", handle=" + str(self.handle) + ", role=" + str(self.role) + ", sndSettleMode=" + str(self.sndSettleMode) + ", rcvSettleMode=" + str(self.rcvSettleMode) + ", source=" + str(self.source) + ", target=" + str(self.target) + ", unsettled=" + str(self.unsettled) + ", incompleteUnsettled=" + str(self.incompleteUnsettled) + ", initialDeliveryCount=" + str(self.initialDeliveryCount) + ", maxMessageSize=" + str(self.maxMessageSize) + ", offeredCapabilities=" + str(self.offeredCapabilities) + ", desiredCapabilities=" + str(self.desiredCapabilities) + ", properties=" + str(self.properties) + ", code=" + str(self.code) + ", doff=" + str(self.doff) + ", type=" + str(self.type) + ", channel=" + str(self.channel) + "]"
def setName(self, name):
self.name = name
def getName(self):
return self.name
def setHandle(self, handle):
self.handle = handle
def getHandle(self):
return self.handle
def setRole(self, role):
self.role = role
def getRole(self):
return self.role
def setSndSettleMode(self, sndSettleMode):
self.sndSettleMode = sndSettleMode
def getSndSettleMode(self):
return self.sndSettleMode
def setRcvSettleMode(self, rcvSettleMode):
self.rcvSettleMode = rcvSettleMode
def getRcvSettleMode(self):
return self.rcvSettleMode
def setSource(self, source):
self.source = source
def getSource(self):
return self.source
def setTarget(self, target):
self.target = target
def getTarget(self):
return self.target
def setUnsettled(self, unsettled):
self.unsettled = unsettled
def getUnsettled(self):
return self.unsettled
def setIncompleteUnsettled(self, incompleteUnsettled):
self.incompleteUnsettled = incompleteUnsettled
def getIncompleteUnsettled(self):
return self.incompleteUnsettled
def setInitialDeliveryCount(self, initialDeliveryCount):
self.initialDeliveryCount = initialDeliveryCount
def getInitialDeliveryCount(self):
return self.initialDeliveryCount
def setMaxMessageSize(self, maxMessageSize):
self.maxMessageSize = maxMessageSize
def getMaxMessageSize(self):
return self.maxMessageSize
def setOfferedCapabilities(self, offeredCapabilities):
self.offeredCapabilities = offeredCapabilities
def getOfferedCapabilities(self):
return self.offeredCapabilities
def setDesiredCapabilities(self, desiredCapabilities):
self.desiredCapabilities = desiredCapabilities
def getDesiredCapabilities(self):
return self.desiredCapabilities
def setProperties(self, properties):
self.properties = properties
def getProperties(self):
return self.properties
| 1.515625 | 2 |
multi_dimensional_clustering/__init__.py | PieterMey/multi_dimensional_clustering | 1 | 12789195 | from .multi_D_clustering import MD_clustering
| 0.996094 | 1 |
_solutions/pandas/series/pandas_series_attributes.py | sages-pl/2022-01-pythonsqlalchemy-aptiv | 0 | 12789196 | <gh_stars>0
result = {
'number of dimensions': DATA.ndim,
'number of elements': DATA.size,
'data type': DATA.dtype,
'shape': DATA.shape,
}
| 1.859375 | 2 |
src/dashboard/settings/travis.py | travishen/aprp | 1 | 12789197 | from os import environ
from .base import *
# provide via Trais CI Dashboard
DAILYTRAN_BUILDER_API['amis'] = environ['AMIS_URL']
DAILYTRAN_BUILDER_API['apis'] = environ['APIS_URL']
DAILYTRAN_BUILDER_API['efish'] = environ['EFISH_URL']
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'travis_ci_test',
'USER': 'postgres',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '',
}
}
| 1.453125 | 1 |
neural_compressor/ux/components/model/shape.py | intel/neural-compressor | 172 | 12789198 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data shape class."""
from typing import Any, Dict, List, Optional, Union
from neural_compressor.ux.utils.json_serializer import JsonSerializer
class Shape(JsonSerializer):
"""Data shape definition."""
def __init__(self, shape: Optional[str] = "", trusted: bool = False) -> None:
"""Object construction."""
super().__init__()
self.shape = shape
self.trusted = trusted
def serialize(
self,
serialization_type: str = "default",
) -> Union[Dict[str, Any], List[Dict[str, Any]]]:
"""Serialize Shape class to dict."""
result = {}
for key, value in self.__dict__.items():
if key in self._skip:
continue
result.update({key: value})
return result
| 2.53125 | 3 |
tests/test_default.py | jonhealy1/stac-validator | 2 | 12789199 | """
Description: Test the default which validates core and extensions
"""
__authors__ = "<NAME>", "<NAME>"
from stac_validator import stac_validator
def test_default_v070():
stac_file = "https://radarstac.s3.amazonaws.com/stac/catalog.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "0.7.0",
"path": "https://radarstac.s3.amazonaws.com/stac/catalog.json",
"asset_type": "CATALOG",
"validation_method": "default",
"schema": ["https://cdn.staclint.com/v0.7.0/catalog.json"],
"valid_stac": True,
}
]
def test_default_item_local_v080():
stac_file = "tests/test_data/v080/items/sample-full.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "0.8.0",
"path": "tests/test_data/v080/items/sample-full.json",
"schema": [
"https://cdn.staclint.com/v0.8.0/extension/eo.json",
"https://cdn.staclint.com/v0.8.0/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_v090():
stac = stac_validator.StacValidate("tests/test_data/v090/items/good_item_v090.json")
stac.run()
print(stac.message)
assert stac.message == [
{
"version": "0.9.0",
"path": "tests/test_data/v090/items/good_item_v090.json",
"schema": [
"https://cdn.staclint.com/v0.9.0/extension/eo.json",
"https://cdn.staclint.com/v0.9.0/extension/view.json",
"https://cdn.staclint.com/v0.9.0/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_v1beta1():
stac_file = "tests/test_data/1beta1/sentinel2.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1beta1/sentinel2.json",
"asset_type": "COLLECTION",
"version": "1.0.0-beta.1",
"validation_method": "default",
"schema": ["https://cdn.staclint.com/v1.0.0-beta.1/collection.json"],
"valid_stac": True,
}
]
def test_default_proj_v1b2():
stac_file = "https://earth-search.aws.element84.com/v0/collections/sentinel-s2-l1c/items/S2A_51SXT_20210415_0_L1C"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "1.0.0-beta.2",
"path": "https://earth-search.aws.element84.com/v0/collections/sentinel-s2-l1c/items/S2A_51SXT_20210415_0_L1C",
"schema": [
"https://cdn.staclint.com/v1.0.0-beta.1/extension/eo.json",
"https://cdn.staclint.com/v1.0.0-beta.1/extension/view.json",
"https://cdn.staclint.com/v1.0.0-beta.1/extension/projection.json",
"https://schemas.stacspec.org/v1.0.0-beta.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_simple_v1rc2():
stac_file = "tests/test_data/1rc2/simple-item.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1rc2/simple-item.json",
"asset_type": "ITEM",
"version": "1.0.0-rc.2",
"validation_method": "default",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json"
],
"valid_stac": True,
}
]
def test_default_extended_v1rc2():
stac_file = "tests/test_data/1rc2/extended-item.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"version": "1.0.0-rc.2",
"path": "tests/test_data/1rc2/extended-item.json",
"schema": [
"https://stac-extensions.github.io/eo/v1.0.0/schema.json",
"https://stac-extensions.github.io/projection/v1.0.0/schema.json",
"https://stac-extensions.github.io/scientific/v1.0.0/schema.json",
"https://stac-extensions.github.io/view/v1.0.0/schema.json",
"https://stac-extensions.github.io/remote-data/v1.0.0/schema.json",
"https://schemas.stacspec.org/v1.0.0-rc.2/item-spec/json-schema/item.json",
],
"asset_type": "ITEM",
"validation_method": "default",
"valid_stac": True,
}
]
def test_default_catalog_v1rc2():
stac_file = "tests/test_data/1rc2/catalog.json"
stac = stac_validator.StacValidate(stac_file)
stac.run()
assert stac.message == [
{
"path": "tests/test_data/1rc2/catalog.json",
"asset_type": "CATALOG",
"version": "1.0.0-rc.2",
"validation_method": "default",
"schema": [
"https://schemas.stacspec.org/v1.0.0-rc.2/catalog-spec/json-schema/catalog.json"
],
"valid_stac": True,
}
]
| 2.265625 | 2 |
code/deep-high-resolution-net.pytorch/lib/core/function.py | SomaKishimoto/AwA-Pose | 12 | 12789200 | <filename>code/deep-high-resolution-net.pytorch/lib/core/function.py
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import logging
import os
import numpy as np
import torch
from core.evaluate import accuracy, accuracy_bbox
from core.inference import get_final_preds
from utils.transforms import flip_back
from utils.vis import save_debug_images, save_debug_images_w_bbox
logger = logging.getLogger(__name__)
def train(config, train_loader, model, criterion, optimizer, epoch,
output_dir, tb_log_dir, writer_dict):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# compute output
outputs = model(input)
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
if isinstance(outputs, list):
loss = criterion(outputs[0], target, target_weight)
for output in outputs[1:]:
loss += criterion(output, target, target_weight)
else:
output = outputs
loss = criterion(output, target, target_weight)
# loss = criterion(output, target, target_weight)
# compute gradient and do update step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure accuracy and record loss
losses.update(loss.item(), input.size(0))
#_, avg_acc, cnt, pred = accuracy(output.detach().cpu().numpy(), target.detach().cpu().numpy())
bbox = meta['bbox'].numpy()
bbox_w = bbox[:, 1,0] - bbox[:, 0,0]
bbox_h = bbox[:, 1,1] - bbox[:, 0,1]
diagonal = np.sqrt(bbox_w * bbox_w + bbox_h * bbox_h)
_, avg_acc, cnt, pred = accuracy_bbox(output.detach().cpu().numpy(),
target.detach().cpu().numpy(), scale = None, thr=0.001*diagonal)
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % config.PRINT_FREQ == 0:
msg = 'Epoch: [{0}][{1}/{2}]\t' \
'Time {batch_time.val:.3f}s ({batch_time.avg:.3f}s)\t' \
'Speed {speed:.1f} samples/s\t' \
'Data {data_time.val:.3f}s ({data_time.avg:.3f}s)\t' \
'Loss {loss.val:.5f} ({loss.avg:.5f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
speed=input.size(0)/batch_time.val,
data_time=data_time, loss=losses, acc=acc)
logger.info(msg)
writer = writer_dict['writer']
global_steps = writer_dict['train_global_steps']
writer.add_scalar('train_loss', losses.val, global_steps)
writer.add_scalar('train_acc', acc.val, global_steps)
writer_dict['train_global_steps'] = global_steps + 1
prefix = '{}_{}'.format(os.path.join(output_dir, 'train'), i)
save_debug_images(config, input, meta, target, pred*4, output,
prefix)
def validate_old(config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None, thresh = 0.0005, save_pickle=True):
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(val_dataset)
all_preds = np.zeros(
(num_samples, config.MODEL.NUM_JOINTS, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
all_orig_boxes = np.zeros((num_samples, 2,2))
image_path = []
filenames = []
imgnums = []
idx = 0
ind_accs = []
animal_list = [
'antelope',
'bobcat',
'buffalo',
'chihuahua',
'collie',
'cow',
'dalmatian',
'deer',
'elephant',
'fox',
'german+shepherd',
'giant+panda',
'giraffe',
'grizzly+bear',
'hippopotamus',
'horse',
'leopard',
'lion',
'moose',
'otter',
'ox',
'persian+cat',
'pig',
'polar+bear',
'rabbit',
'raccoon',
'rhinoceros',
'sheep',
'siamese+cat',
'skunk',
'squirrel',
'tiger',
'weasel',
'wolf',
'zebra']
animal_ids = list(range(8,36))
crnt_animal_id = 8
with torch.no_grad():
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(val_loader):
# compute output
if crnt_animal_id > 0 and animal_list[crnt_animal_id - 1] not in meta['image'][0]:
continue
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
val_dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if config.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
bbox = meta['bbox'].numpy()
#import ipdb; ipdb.set_trace()
#exit(0)
num_images = input.size(0)
# measure accuracy and record loss
losses.update(loss.item(), num_images)
#w_h_original = (s[:, [1, 0]] * 200.0) / 1.25
#diagonal = np.sqrt(s[:, 0] * s[:, 0] + s[:, 1] * s[:, 1])
bbox_w = bbox[:, 1,0] - bbox[:, 0,0]
bbox_h = bbox[:, 1,1] - bbox[:, 0,1]
diagonal = np.sqrt(bbox_w * bbox_w + bbox_h * bbox_h)
ind_acc, avg_acc, cnt, pred = accuracy_bbox(output.cpu().numpy(),
target.cpu().numpy(), scale = s[:, [1, 0]], thr=thresh*diagonal) # swapping (w,h) to (h,w)
ind_accs.append(ind_acc[1:])
#print(avg_acc)
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
all_boxes[idx:idx + num_images, 5] = score
image_path.extend(meta['image'])
#saving pickles
#if save_pickle:
# save_pred_pickle(config, input, meta, target, pred*4, output, prefix)
idx += num_images
if False:#i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
loss=losses, acc=acc)
logger.info(msg)
prefix = '{}_{}'.format(
os.path.join(output_dir, 'val'), i
)
#save_debug_images(config, input, meta, target, pred*4, output, prefix)
save_debug_images_w_bbox(config, input, meta, target, pred*4, output, prefix)
#import ipdb; ipdb.set_trace()
#exit(0)
if False:
name_values, perf_indicator = val_dataset.evaluate(
config, all_preds, output_dir, all_boxes, image_path,
filenames, imgnums
)
model_name = config.MODEL.NAME
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, model_name)
else:
_print_name_value(name_values, model_name)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar(
'valid_loss',
losses.avg,
global_steps
)
writer.add_scalar(
'valid_acc',
acc.avg,
global_steps
)
if isinstance(name_values, list):
for name_value in name_values:
writer.add_scalars(
'valid',
dict(name_value),
global_steps
)
else:
writer.add_scalars(
'valid',
dict(name_values),
global_steps
)
writer_dict['valid_global_steps'] = global_steps + 1
else:
perf_indicator = 100
#return perf_indicator
print('Average PCK @ '+ str(thresh) +'= ', acc.avg)
ind_acc = [0]*len(ind_acc[1:])
ind_acc_count = [0]*len(ind_acc)
for ind_batch in ind_accs:
for i, val in enumerate(ind_batch):
if val >= 0:
ind_acc_count[i] += 1
ind_acc[i] += val
for i in range(len(ind_acc)):
#import ipdb; ipdb.set_trace()
#exit(0)
if ind_acc_count[i] == 0:
ind_acc[i] = -1
else:
ind_acc[i] = ind_acc[i] / float(ind_acc_count[i])
# import ipdb; ipdb.set_trace()
# exit(0)
print("Independent keypoint accuracy: ",ind_acc)
with open('Individual_Keypoint_Accuracy.txt', 'w') as f:
for item in ind_acc:
f.write("%s\n" % item)
return acc.avg
def validate(config, val_loader, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None, thresh = 0.0005, save_pickle=True):
animal_list = [
'antelope',
'bobcat',
'buffalo',
'chihuahua',
'collie',
'cow',
'dalmatian',
'deer',
'elephant',
'fox',
'german+shepherd',
'giant+panda',
'giraffe',
'grizzly+bear',
'hippopotamus',
'horse',
'leopard',
'lion',
'moose',
'otter',
'ox',
'persian+cat',
'pig',
'polar+bear',
'rabbit',
'raccoon',
'rhinoceros',
'sheep',
'siamese+cat',
'skunk',
'squirrel',
'tiger',
'weasel',
'wolf',
'zebra',
'all']
animal_ids = list(range(1,36))
#crnt_animal_id = 8
for crnt_animal_id in animal_ids:
batch_time = AverageMeter()
losses = AverageMeter()
acc = AverageMeter()
# switch to evaluate mode
model.eval()
num_samples = len(val_dataset)
all_preds = np.zeros(
(num_samples, config.MODEL.NUM_JOINTS, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
all_orig_boxes = np.zeros((num_samples, 2,2))
image_path = []
filenames = []
imgnums = []
idx = 0
ind_accs = []
with torch.no_grad():
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(val_loader):
# compute output
# import ipdb; ipdb.set_trace()
# exit(0)
if crnt_animal_id > 0 and not meta['image'][0].split('/')[-1].startswith(animal_list[crnt_animal_id - 1]):
continue
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
if config.TEST.FLIP_TEST:
# this part is ugly, because pytorch has not supported negative index
# input_flipped = model(input[:, :, :, ::-1])
input_flipped = np.flip(input.cpu().numpy(), 3).copy()
input_flipped = torch.from_numpy(input_flipped).cuda()
outputs_flipped = model(input_flipped)
if isinstance(outputs_flipped, list):
output_flipped = outputs_flipped[-1]
else:
output_flipped = outputs_flipped
output_flipped = flip_back(output_flipped.cpu().numpy(),
val_dataset.flip_pairs)
output_flipped = torch.from_numpy(output_flipped.copy()).cuda()
# feature is not aligned, shift flipped heatmap for higher accuracy
if config.TEST.SHIFT_HEATMAP:
output_flipped[:, :, :, 1:] = \
output_flipped.clone()[:, :, :, 0:-1]
output = (output + output_flipped) * 0.5
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
bbox = meta['bbox'].numpy()
num_images = input.size(0)
# measure accuracy and record loss
losses.update(loss.item(), num_images)
#w_h_original = (s[:, [1, 0]] * 200.0) / 1.25
#diagonal = np.sqrt(s[:, 0] * s[:, 0] + s[:, 1] * s[:, 1])
bbox_w = bbox[:, 1,0] - bbox[:, 0,0]
bbox_h = bbox[:, 1,1] - bbox[:, 0,1]
diagonal = np.sqrt(bbox_w * bbox_w + bbox_h * bbox_h)
ind_acc, avg_acc, cnt, pred = accuracy_bbox(output.cpu().numpy(),
target.cpu().numpy(), scale = s[:, [1, 0]], thr=thresh*diagonal) # swapping (w,h) to (h,w)
ind_accs.append(ind_acc[1:])
#print(avg_acc)
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
all_boxes[idx:idx + num_images, 5] = score
image_path.extend(meta['image'])
#saving pickles
#if save_pickle:
# save_pred_pickle(config, input, meta, target, pred*4, output, prefix)
idx += num_images
if avg_acc < 0.35:#i % config.PRINT_FREQ == 0:
msg = 'Test: [{0}/{1}]\t' \
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Loss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Accuracy {acc.val:.3f} ({acc.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time,
loss=losses, acc=acc)
logger.info(msg)
prefix = '{}_{}'.format(
os.path.join(output_dir, 'bad', animal_list[crnt_animal_id - 1]), i
)
#save_debug_images(config, input, meta, target, pred*4, output, prefix)
# output heatmap size becomes 4 times smaller than the input image
save_debug_images_w_bbox(config, input, meta, target, pred*4, output, prefix)
# import ipdb; ipdb.set_trace()
# exit(0)
if False:
name_values, perf_indicator = val_dataset.evaluate(
config, all_preds, output_dir, all_boxes, image_path,
filenames, imgnums
)
model_name = config.MODEL.NAME
if isinstance(name_values, list):
for name_value in name_values:
_print_name_value(name_value, model_name)
else:
_print_name_value(name_values, model_name)
if writer_dict:
writer = writer_dict['writer']
global_steps = writer_dict['valid_global_steps']
writer.add_scalar(
'valid_loss',
losses.avg,
global_steps
)
writer.add_scalar(
'valid_acc',
acc.avg,
global_steps
)
if isinstance(name_values, list):
for name_value in name_values:
writer.add_scalars(
'valid',
dict(name_value),
global_steps
)
else:
writer.add_scalars(
'valid',
dict(name_values),
global_steps
)
writer_dict['valid_global_steps'] = global_steps + 1
else:
perf_indicator = 100
#return perf_indicator
print('Average PCK @ '+ str(thresh) +'= ', acc.avg)
ind_acc = [0]*len(ind_acc[1:])
ind_acc_count = [0]*len(ind_acc)
for ind_batch in ind_accs:
for i, val in enumerate(ind_batch):
if val >= 0:
ind_acc_count[i] += 1
ind_acc[i] += val
for i in range(len(ind_acc)):
#import ipdb; ipdb.set_trace()
#exit(0)
if ind_acc_count[i] == 0:
ind_acc[i] = -1
else:
ind_acc[i] = ind_acc[i] / float(ind_acc_count[i])
# import ipdb; ipdb.set_trace()
# exit(0)
print("Independent keypoint accuracy of " + animal_list[crnt_animal_id - 1] +": ",ind_acc)
with open('Individual_Keypoint_Accuracy_'+ animal_list[crnt_animal_id - 1] +'.txt', 'w') as f:
for item in ind_acc:
f.write("%s\n" % item)
return acc.avg
def generate_predited_keypoints_and_vis(config, img, model, output_dir,
tb_log_dir):
# switch to evaluate mode
model.eval()
num_samples = len(val_dataset)
all_preds = np.zeros(
(num_samples, config.MODEL.NUM_JOINTS, 3),
dtype=np.float32
)
all_boxes = np.zeros((num_samples, 6))
all_orig_boxes = np.zeros((num_samples, 2,2))
image_path = []
filenames = []
imgnums = []
idx = 0
ind_accs = []
with torch.no_grad():
end = time.time()
for i, (input, target, target_weight, meta) in enumerate(val_loader):
# compute output
outputs = model(input)
if isinstance(outputs, list):
output = outputs[-1]
else:
output = outputs
target = target.cuda(non_blocking=True)
target_weight = target_weight.cuda(non_blocking=True)
loss = criterion(output, target, target_weight)
c = meta['center'].numpy()
s = meta['scale'].numpy()
score = meta['score'].numpy()
bbox = meta['bbox'].numpy()
num_images = input.size(0)
# measure accuracy and record loss
losses.update(loss.item(), num_images)
#w_h_original = (s[:, [1, 0]] * 200.0) / 1.25
#diagonal = np.sqrt(s[:, 0] * s[:, 0] + s[:, 1] * s[:, 1])
bbox_w = bbox[:, 1,0] - bbox[:, 0,0]
bbox_h = bbox[:, 1,1] - bbox[:, 0,1]
diagonal = np.sqrt(bbox_w * bbox_w + bbox_h * bbox_h)
ind_acc, avg_acc, cnt, pred = accuracy_bbox(output.cpu().numpy(),
target.cpu().numpy(), scale = s[:, [1, 0]], thr=thresh*diagonal) # swapping (w,h) to (h,w)
ind_accs.append(ind_acc[1:])
#print(avg_acc)
acc.update(avg_acc, cnt)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
preds, maxvals = get_final_preds(
config, output.clone().cpu().numpy(), c, s)
all_preds[idx:idx + num_images, :, 0:2] = preds[:, :, 0:2]
all_preds[idx:idx + num_images, :, 2:3] = maxvals
# double check this all_boxes parts
all_boxes[idx:idx + num_images, 0:2] = c[:, 0:2]
all_boxes[idx:idx + num_images, 2:4] = s[:, 0:2]
all_boxes[idx:idx + num_images, 4] = np.prod(s*200, 1)
all_boxes[idx:idx + num_images, 5] = score
image_path.extend(meta['image'])
#saving pickles
#if save_pickle:
# save_pred_pickle(config, input, meta, target, pred*4, output, prefix)
idx += num_images
save_debug_images_w_bbox(config, input, meta, target, pred*4, output, prefix)
#import ipdb; ipdb.set_trace()
#exit(0)
# markdown format output
def _print_name_value(name_value, full_arch_name):
names = name_value.keys()
values = name_value.values()
num_values = len(name_value)
logger.info(
'| Arch ' +
' '.join(['| {}'.format(name) for name in names]) +
' |'
)
logger.info('|---' * (num_values+1) + '|')
if len(full_arch_name) > 15:
full_arch_name = full_arch_name[:8] + '...'
logger.info(
'| ' + full_arch_name + ' ' +
' '.join(['| {:.3f}'.format(value) for value in values]) +
' |'
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count if self.count != 0 else 0
| 2.046875 | 2 |
ta_lab5/src/utils.py | vivekchand/TA_example_labs | 13 | 12789201 | <gh_stars>10-100
#!/usr/bin/env python
import rospy
import numpy as np
from std_msgs.msg import Header
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Point, Pose, PoseStamped, PoseArray, Quaternion, PolygonStamped,Polygon, Point32, PoseWithCovarianceStamped, PointStamped
import tf.transformations
import tf
import matplotlib.pyplot as plt
import time
class CircularArray(object):
""" Simple implementation of a circular array.
You can append to it any number of times but only "size" items will be kept
"""
def __init__(self, size):
self.arr = np.zeros(size)
self.ind = 0
self.num_els = 0
def append(self, value):
if self.num_els < self.arr.shape[0]:
self.num_els += 1
self.arr[self.ind] = value
self.ind = (self.ind + 1) % self.arr.shape[0]
def mean(self):
return np.mean(self.arr[:self.num_els])
def median(self):
return np.median(self.arr[:self.num_els])
class Timer:
""" Simple helper class to compute the rate at which something is called.
"smoothing" determines the size of the underlying circular array, which averages
out variations in call rate over time.
use timer.tick() to record an event
use timer.fps() to report the average event rate.
"""
def __init__(self, smoothing):
self.arr = CircularArray(smoothing)
self.last_time = time.time()
def tick(self):
t = time.time()
self.arr.append(1.0 / (t - self.last_time))
self.last_time = t
def fps(self):
return self.arr.mean()
def angle_to_quaternion(angle):
"""Convert an angle in radians into a quaternion _message_."""
return Quaternion(*tf.transformations.quaternion_from_euler(0, 0, angle))
def quaternion_to_angle(q):
"""Convert a quaternion _message_ into an angle in radians.
The angle represents the yaw.
This is not just the z component of the quaternion."""
x, y, z, w = q.x, q.y, q.z, q.w
roll, pitch, yaw = tf.transformations.euler_from_quaternion((x, y, z, w))
return yaw
def rotation_matrix(theta):
c, s = np.cos(theta), np.sin(theta)
return np.matrix([[c, -s], [s, c]])
def particle_to_pose(particle):
pose = Pose()
pose.position.x = particle[0]
pose.position.y = particle[1]
pose.orientation = angle_to_quaternion(particle[2])
return pose
def particles_to_poses(particles):
return map(particle_to_pose, particles)
def make_header(frame_id, stamp=None):
if stamp == None:
stamp = rospy.Time.now()
header = Header()
header.stamp = stamp
header.frame_id = frame_id
return header
def point(npt):
pt = Point32()
pt.x = npt[0]
pt.y = npt[1]
return pt
def points(arr):
return map(point, arr)
# converts map space coordinates to world space coordinates
def map_to_world_slow(x,y,t,map_info):
scale = map_info.resolution
angle = quaternion_to_angle(map_info.origin.orientation)
rot = rotation_matrix(angle)
trans = np.array([[map_info.origin.position.x],
[map_info.origin.position.y]])
map_c = np.array([[x],
[y]])
world = (rot*map_c) * scale + trans
return world[0,0],world[1,0],t+angle
def map_to_world(poses,map_info):
scale = map_info.resolution
angle = quaternion_to_angle(map_info.origin.orientation)
# rotate
# rotation
c, s = np.cos(angle), np.sin(angle)
# we need to store the x coordinates since they will be overwritten
temp = np.copy(poses[:,0])
poses[:,0] = c*poses[:,0] - s*poses[:,1]
poses[:,1] = s*temp + c*poses[:,1]
# scale
poses[:,:2] *= float(scale)
# translate
poses[:,0] += map_info.origin.position.x
poses[:,1] += map_info.origin.position.y
poses[:,2] += angle
def world_to_map(poses, map_info):
# equivalent to map_to_grid(world_to_map(poses))
# operates in place
scale = map_info.resolution
angle = -quaternion_to_angle(map_info.origin.orientation)
# translation
poses[:,0] -= map_info.origin.position.x
poses[:,1] -= map_info.origin.position.y
# scale
poses[:,:2] *= (1.0/float(scale))
# rotation
c, s = np.cos(angle), np.sin(angle)
# we need to store the x coordinates since they will be overwritten
temp = np.copy(poses[:,0])
poses[:,0] = c*poses[:,0] - s*poses[:,1]
poses[:,1] = s*temp + c*poses[:,1]
poses[:,2] += angle
# converts world space coordinates to map space coordinates
def world_to_map_slow(x,y,t, map_info):
scale = map_info.resolution
angle = quaternion_to_angle(map_info.origin.orientation)
rot = rotation_matrix(-angle)
trans = np.array([[map_info.origin.position.x],
[map_info.origin.position.y]])
world = np.array([[x],
[y]])
map_c = rot*((world - trans) / float(scale))
return map_c[0,0],map_c[1,0],t-angle
| 2.734375 | 3 |
gallery/models.py | reddevilcero/SchoolWeb | 0 | 12789202 | from django.db import models
from core.models import BaseModel
from django.utils.translation import gettext as _
# Create your models here.
class GalleryImage(BaseModel):
name = models.CharField(_("Picture Name"), max_length=50)
picture = models.ImageField(_("Image"), upload_to="img/gallery")
def __str__(self):
return self.name
class Meta:
db_table = ''
managed = True
verbose_name = 'Gallery Image'
verbose_name_plural = 'Gallery Images'
| 2.25 | 2 |
hinkaponka/__init__.py | shinjiniray/testpackage | 0 | 12789203 | from . import helloworld
__all__ = [
'helloworld'
]
| 1.0625 | 1 |
pyCWD/mesh.py | rdzotz/Coda-Analysis | 7 | 12789204 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 4 11:19:57 2018
@author: rwilson
"""
import pygmsh
import meshio
import numpy as np
import pickle
class utilities():
'''A collection of functions for interacting with the mesh object
'''
def meshOjfromDisk(meshObjectPath='cly.Mesh'):
'''Read the entire mesh object from disk.
Parameters
----------
meshObjectPath : str (default='cly.Mesh')
'''
with open(meshObjectPath, 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
class mesher():
'''Mesh generator class using pygmsh to gmsh code.
Parameters
----------
mesh_param : dict
Expected parameters defining the mesh, char_len, height, radius
cell_data : dict
Contains line, tetra, triangle, vertex 'gmsh:physical' and
'gmsh:geometrical'.
cells : dict
Contains line, tetra, triangle, vertex of the point indicies as defined in
``points``.
points : array(float)
Matrix of xyz coords for each point in the mesh domain
Notes
-----
Understanding the mesh structure
Points are a list of each point or verticies in x,y,z positions.
cell_data['tetra']['gmsh:physical'] : the physical values of each tetra
* cell['tetra'] : list of lists of each tetrahedral verticies index referencing to
the coords inside the points. [points[i1],
points[i2],
points[i3],
points[i4]]
'''
def __init__(self, mesh_param):
self.mesh_param = mesh_param
self.cell_data = None
self.points = None
self.cells = None
self.cell_cent = None
def meshIt(self):
'''Produces the mesh.
'''
self._cylinderMesh()
self._cellCent()
def _cylinderMesh(self):
''' Produce a cylindrical mesh
'''
# The geometry object
geom = pygmsh.opencascade.Geometry()
# Positions
btm_face = [0.0, 0.0, 0.0]
axis = [0.0, 0.0, self.mesh_param['height']]
# create the cylinder with open cascade
geom.add_cylinder(btm_face, axis, self.mesh_param['radius'],
char_length=self.mesh_param['char_len']
)
# Make the mesh
self.points, self.cells, _, self.cell_data, _ = pygmsh.generate_mesh(geom)
def _cellCent(self):
''' Calculate the centre of each tetra.
'''
# The verticies in cart coords
tetra_verts = [ np.array([self.points[vert[0]], self.points[vert[1]],
self.points[vert[2]], self.points[vert[3]]])
for vert in self.cells['tetra']]
# The centre of tetra in cart coords
self.cell_cent = [np.array([vert[:,0].sum()/4, vert[:,1].sum()/4, vert[:,2].sum()/4])
for vert in tetra_verts]
def saveMesh(self, name):
'''Save the mesh to file.
Parameters
----------
name : str
Name of the mesh file saved to the current directory.
'''
mesh = meshio.Mesh(self.points, self.cells, cell_data=self.cell_data)
meshio.write('%s.vtu' % name, mesh)
# meshio.write('%s.vtu' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.write('%s.msh4' % name, self.points, self.cells, cell_data=self.cell_data)
# meshio.gmsh_io.write('%s.msh' % name, self.points, self.cells, cell_data=self.cell_data)
def setCellsVal(self, cell_values):
'''Set each cell physical value.
Parameters
----------
cell_values : array/list
physical values of each tetra cell within the mesh domain in order
corresponding to ``points``.
'''
self.cell_data['tetra']['gmsh:physical'] = cell_values
def meshOjtoDisk(self):
'''Save the entire mesh object to disk
'''
with open('cly.Mesh', 'wb') as clyMesh_file:
pickle.dump(self, clyMesh_file)
def meshOjfromDisk(self):
'''Save the entire mesh object to disk
TODO
----
Should likely depreciate this function and simply use that stored in the utility class
'''
with open('cly.Mesh', 'rb') as clyMesh_file:
return pickle.load(clyMesh_file)
| 3.15625 | 3 |
dstl/translate/__init__.py | kabirkhan/dstl | 2 | 12789205 | <reponame>kabirkhan/dstl
from .azure import AzureTranslator
from .google import GoogleTranslator
from .transformers import TransformersMarianTranslator
| 0.859375 | 1 |
src/ising_animate/__init__.py | davifeliciano/ising_model | 2 | 12789206 | """
A Python Package to easily generate animations of the Ising Model using the Metropolis Algorithm,
the most commonly used Markov Chain Monte Carlo method to calculate estimations for this system.
"""
from .ising import Ising, AnimatedIsing, CoolingAnimatedIsing, DynamicAnimatedIsing
| 2.484375 | 2 |
src/test_who_likes_it.py | hcodydibble/code-katas | 0 | 12789207 | """Test function for who_likes_it module."""
import pytest
TEST_DATA = [([], "no one likes this"),
(["Peter"], "Peter likes this"),
(["Jacob", "Alex"], "Jacob and Alex like this"),
(["Max", "John", "Mark"], "Max, John and Mark like this"),
(["Alex", "Jacob", "Mark", "Max"], "Alex, Jacob and 2 others like this"),
(['Ryan', 'Jonathon', 'Alexandra', 'Jeffery', 'Elizabeth', 'Gina',
'Kristina', 'Hannah', 'Crystal', 'Patrick', 'Brandon', 'Daniel',
'Christian'], "Ryan, Jonathon and 11 others like this"),
(['Ernest', 'Stephanie'], "Ernest and Stephanie like this"),
(['Angelica', 'Karen', 'Kevin', 'William', 'Michaela', 'Kelly',
'Ashley', 'Maria', 'Edward', 'Gregory', 'Sarah', 'Robert',
'Sergio', 'Marvin', 'Nicole', 'Jeremy', 'Charles', 'Sandra',
'Cindy', 'Thomas', 'Dan', 'Karla', 'Travis', 'Pamela',
'Kimberly', 'Robert', 'James', 'David', 'Geoffrey', 'Patrick',
'Nicole', 'Mitchell', 'Angela', 'Kayla', 'Madeline', 'Joann',
'Maria', 'Ryan', 'Michelle', 'William', 'Johnny', 'Michael',
'Patricia'], "Angelica, Karen and 41 others like this"),
(['Lisa', 'Katrina', 'Kelly', 'Kyle', 'Catherine', 'Kimberly',
'Mason', 'Diana', 'Samantha', 'Kimberly', 'Sherry', 'Joseph',
'Allison', 'Mark', 'Virginia', 'Christopher', 'Manuel',
'Michelle', 'Adam', 'Brenda', 'Bradley', 'Marissa', 'Carmen',
'Carol', 'Kathleen', 'Brandon', 'Richard', 'Tara', 'Bonnie',
'Richard', 'Bianca', 'Donald', 'Jonathan', 'Amanda', 'Jennifer',
'Veronica', 'Alison', 'Diane', 'Olivia', 'Joe', 'Janet',
'Stephanie', 'Scott', 'Dale', 'Natasha', 'Stephen', 'Laura',
'Brian', 'Lynn', 'Kurt', 'Julia', 'Janet', 'Cory', 'Cody',
'Mark', 'Elizabeth', 'Leslie', 'Bruce', 'Cindy', 'William',
'Devin', 'Michael', 'Paul', 'Lindsey', 'Julie', 'Michelle',
'Carla', 'Ian', 'Dennis', 'Lindsay', 'Rose', 'Emily', 'Jessica',
'Jerry', 'Riley', 'Jeffery', 'Steven', 'Alisha', 'Mark',
'Joseph', 'Andrew', 'Joshua', 'Nathan'], "Lisa, Katrina and 81 others like this")]
@pytest.mark.parametrize("string, result", TEST_DATA)
def test_likes(string, result):
"""Test for likes function."""
from who_likes_it import likes
assert likes(string) == result
| 3.1875 | 3 |
src/ml_preprocessing/encoding_helpers.py | mjasieczko/new_offer_success_predictor | 0 | 12789208 | import itertools
from typing import List, DefaultDict, Tuple
import numpy as np
import pandas as pd
from sklearn.base import clone
from sklearn.metrics import roc_auc_score
# from sklearn.metrics import recall_score, accuracy_score, confusion_matrix
from sklearn.model_selection import KFold
from .categorical_encoders import LeaveOneOutEncoder
class LOOGridSearchCV:
"""
Specially prepared class to do grid search with cross-validation on our loo encoded
DataFrame.
Scores should be approximately ok, although i have no proof for that :)
"""
def __init__(self,
train_df: pd.DataFrame,
model,
params_grid: DefaultDict,
columns_to_encode: List,
columns_to_drop_from_training: List,
Xs_train: List[pd.DataFrame] = None,
ys_train: List[pd.DataFrame] = None,
Xs_val: List[pd.DataFrame] = None,
ys_val: List[pd.DataFrame] = None,
ohe_emails: bool = True,
mean: int = 1,
std: int = 0.05,
n_folds: int = 5,
encoded_df: pd.DataFrame = pd.DataFrame(),
) -> None:
"""
:param train_df: train_df (will be splitted then to train/and_val n_folds times)
:param model: model to train
:param params_grid: param_grid to search
:param columns_to_encode: categorical columns, which you want to encode using loo
:param columns_to_drop_from_training: columns to drop from training phase
:param ohe_emails: if set to True, performs OHE on emails column
:param Xs_train:
:param mean: mean to regularization part of the encoding
:param std: std to regularization part of the encoding
:param n_folds: n_folds to validate
:param encoded_df: if task was done before, just pass here already encoded_df
"
"""
self.processed_train_df = (train_df.copy(deep=True)
.reset_index()
.drop(columns='name'))
self.model = model
self.params_grid = params_grid
self.columns_to_encode = columns_to_encode
self.columns_to_drop_from_training = columns_to_drop_from_training
self.ohe_emails = ohe_emails
self.mean = mean
self.std = std
self.n_folds = n_folds
if not Xs_train:
self.Xs_train, self.ys_train, self.Xs_val, self.ys_val = ([] for i in range(4))
else:
self.Xs_train = Xs_train
self.ys_train = ys_train
self.Xs_val = Xs_val
self.ys_val = ys_val
self.encoded_df_ = encoded_df
# self.best_accuracy_estimator = None
# self.best_recall_estimator = None
self.best_roc_auc_estimator = None
def _ohe_emails(self) -> pd.DataFrame:
"""
internal method for one hot encoding emails column
"""
email_ohe_names = {0: '0_emails',
1: '1_email',
2: '2_emails',
3: '3_emails',
4: '4_emails',
5: '5_emails'}
self.processed_train_df = (pd.concat([self.processed_train_df, pd.get_dummies(
self.processed_train_df['emails'])], axis=1)
.rename(columns=email_ohe_names))
self.columns_to_drop_from_training.append('emails')
return self.processed_train_df
def _prepare_train_val_dfs(self):
"""
Internal method
bunch of code to prepare train and validation dataframes for given n_folds, to make
grid search and cross validation processes much faster: for each n_folds you will need
to compute encoded_df only once, same for validation and train DataFrames
"""
if self.ohe_emails:
X = self._ohe_emails()
else:
X = self.processed_train_df
if 'emails' in X.columns:
X['emails'] = X['emails'].astype(int)
y = self.processed_train_df[['target']]
X.drop(columns=self.columns_to_drop_from_training, inplace=True)
"""
to have each sample exactly once in validation set
"""
kf = KFold(n_splits=self.n_folds, shuffle=False, random_state=None)
splits = kf.split(X)
dfs_to_mean = []
for train_index, val_index in splits:
X_train, y_train = X.iloc[train_index], y.iloc[train_index]
X_val, y_val = X.iloc[val_index], y.iloc[val_index]
X_val.drop(columns=['target'], inplace=True)
enc = LeaveOneOutEncoder(train_df=X_train,
test_df=X_val,
columns_to_encode=self.columns_to_encode,
target_column='target',
random_state=42,
mean=self.mean,
std=self.std)
X_train, X_val = enc.fit()
encoded_cols = [col for col in X_train.columns if 'encoded_' in col]
dfs_to_mean.append(X_train[encoded_cols])
train_to_drop = self.columns_to_encode.copy()
train_to_drop.extend(['target'])
X_train.drop(columns=train_to_drop, inplace=True)
test_to_drop = self.columns_to_encode.copy()
X_val.drop(columns=test_to_drop, inplace=True)
self.Xs_train.append(X_train)
self.ys_train.append(y_train)
self.Xs_val.append(X_val)
self.ys_val.append(y_val)
"""
we are computing here the mean of the folds with excluding the 'i am now validation not the
training set' part, as I see it as the most proper thing to do, to use cross-validation
approach
"""
for df in dfs_to_mean:
zeros = [0 for col in df.columns]
for index in range(len(self.processed_train_df)):
if index not in df.index:
df.loc[index, :] = zeros
df.sort_index(inplace=True)
mean_df = dfs_to_mean[0].copy(deep=True)
mean_df = mean_df * 0
for num in range(self.n_folds):
mean_df = mean_df + dfs_to_mean[num]
self.encoded_df_ = mean_df.divide(self.n_folds - 1)
def best_roc_auc_estimator_(self, best_roc_auc_estimator):
self.best_roc_auc_estimator = best_roc_auc_estimator
"""
def best_accuracy_estimator_(self, best_accuracy_estimator):
self.best_accuracy_estimator = best_accuracy_estimator
def best_recall_estimator_(self, best_recall_estimator):
self.best_recall_estimator = best_recall_estimator
"""
def grid_search(self) -> Tuple[List, List, List, List]:
"""
performs GridSearchCV
:return: list with each of the models: accuracies, parameters, recalls and confusion
matrices
"""
if self.encoded_df_.empty:
self._prepare_train_val_dfs()
models_roc_auc_scores = []
# models_accuracies, models_recalls, models_parameters, models_cms = ([] for i in range(4))
for p in itertools.product(*self.params_grid.values()):
model_params = self.params_grid.copy()
for counter, key in enumerate(model_params.keys()):
model_params[key] = p[counter]
# models_parameters.append(model_params.items())
clf = clone(self.model)
clf = clf.set_params(**model_params)
cv_roc_auc_scores = []
# cv_accuracies, cv_recalls, cv_cms = ([] for i in range(3))
"""
fitting and predicting for all folds, then scoring them by:
accuracy, recall and confusion matrix
"""
for index in range(self.n_folds):
clf.fit(self.Xs_train[index], self.ys_train[index])
predictions = clf.predict(self.Xs_val[index])
cv_roc_auc_scores.append(roc_auc_score(self.ys_val[index], predictions))
# cv_accuracies.append(accuracy_score(self.ys_val[index], predictions))
# cv_recalls.append(recall_score(self.ys_val[index], predictions))
# cv_cms.append(confusion_matrix(self.ys_val[index], predictions))
"""
final evaluation of scores (means of all folds scores
for confusion matrix we can get not integer values, please treat this more informative
than strict - but anyway, as a source of information which model should we choose
"""
models_roc_auc_scores.append(np.mean(cv_roc_auc_scores))
# models_accuracies.append(np.mean(cv_accuracies))
# models_recalls.append(np.mean(cv_recalls))
# models_cms.append(np.mean(cv_cms, axis=0))
# if max(models_accuracies) == np.mean(cv_accuracies):
# self.best_accuracy_estimator_(clf)
# if max(models_recalls) == np.mean(cv_recalls):
# self.best_recall_estimator_(clf)
if max(models_roc_auc_scores) == np.mean(cv_roc_auc_scores):
self.best_roc_auc_estimator_(clf)
return models_roc_auc_scores
# return models_accuracies, models_parameters, models_recalls, models_cms
def processed_train(self):
"""
:return: processed train DataFrame with added encoded columns
"""
train = self.processed_train_df.copy(deep=True)
encoded = self.encoded_df_.copy(deep=True)
train = train.drop(columns=self.columns_to_encode+['target'])
processed_train = pd.concat([train, encoded], axis=1)
return processed_train
| 2.78125 | 3 |
solution/operators/create_users_0.0.1/content/files/vflow/subengines/com/sap/python36/operators/create_users/create_users.py | thhapke/workshop_registration | 0 | 12789209 | import sdi_utils.gensolution as gs
import subprocess
import io
import logging
import os
import string
import secrets
import base64
try:
api
except NameError:
class api:
queue = list()
class Message:
def __init__(self, body=None, attributes=""):
self.body = body
self.attributes = attributes
def send(port, msg):
api.queue.append(msg.body)
class config:
## Meta data
config_params = dict()
version = "0.0.1"
tags = {'': ''}
operator_name = 'create_users'
operator_description = "Create Users Credentials"
operator_description_long = "Create User Credentials"
num_users = 90
config_params['num_users'] = {'title': 'Number of Users', 'description': 'Number of users', 'type': 'integer'}
root_name = 'TED_'
config_params['root_name'] = {'title': 'Root name', 'description': 'Root name for numbering', 'type': 'string'}
pwd_length = 6
config_params['pwd_length'] = {'title': 'Password Length', 'description': 'Password Length', 'type': 'integer'}
logger = logging.getLogger(name='distribute_users')
# set logging
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
log_stream = io.StringIO()
sh = logging.StreamHandler(stream=log_stream)
#sh.setFormatter(logging.Formatter('%(asctime)s ; %(levelname)s ; %(name)s ; %(message)s', datefmt='%H:%M:%S'))
api.logger.addHandler(sh)
def generate():
# base set for pwd generation
baseset = string.ascii_letters + string.digits
baseset = [i for i in baseset if not i in 'Il0O']
# User and pwd
idx_users_pwd = [[str(i-1),api.config.root_name + str(i), ''.join(secrets.choice(baseset) for n in range(api.config.pwd_length))]
for i in range(1, api.config.num_users + 1)]
#tenant = 'default'
#idx_users_pwd_base64 = [[u[0],u[1],u[2],str(base64.b64encode('{}\\{}:{}'.format(tenant,u[0],u[1]).encode('ascii')))[2:-1]] for u in users_pwd]
header = 'index,user,password\n'
users_csv_str = header + '\n'.join([','.join(elem) for elem in idx_users_pwd])
attributes = {"file": {"connection": {"configurationType": "Connection Management", "connectionID": ""},"path": "", "size": 0}}
msg = api.Message(attributes=attributes,body=users_csv_str)
api.send(outports[0]['name'],msg)
outports = [{'name': 'users', 'type': 'message.file',"description":"new user"}]
api.add_generator(generate)
def test_operator() :
api.config.num_users = 90
api.config.root_name = 'ted_'
api.config.pwd_length = 6
filename = 'DAT262_2.csv'
generate()
with open(os.path.join("/Users/Shared/data/registration",filename), 'w') as file:
for m in api.queue :
file.write(m)
| 1.992188 | 2 |
tohu/v7/foreach.py | maxalbert/tohu | 1 | 12789210 | import ast
import inspect
import textwrap
from .base import TohuBaseGenerator
from .ipython_support import get_ast_node_for_classes_defined_interactively_in_ipython
__all__ = ["Placeholder", "placeholder", "foreach"]
class Placeholder:
def __init__(self, name):
self.name = name
placeholder = Placeholder("<generic>")
def get_ast_node_for_classes_defined_in_source_files(cls):
orig_cls_source = textwrap.dedent(inspect.getsource(cls))
orig_cls_ast_node = ast.parse(orig_cls_source)
return orig_cls_ast_node
def get_cls_compiled_ast_node(cls):
try:
orig_cls_ast_node = get_ast_node_for_classes_defined_in_source_files(cls)
except TypeError as exc:
if exc.args[0] == "<module '__main__'> is a built-in class":
orig_cls_ast_node = get_ast_node_for_classes_defined_interactively_in_ipython(cls)
else:
# unexpected error; re-raise the exception
raise
orig_cls_compiled = compile(orig_cls_ast_node, "<string>", "exec")
return orig_cls_compiled
def reevaluate_class_definition(
orig_cls_compiled_ast_node, *, orig_cls_name, global_vars, local_vars, **custom_var_defs
):
my_global_vars = global_vars.copy()
my_global_vars.update(custom_var_defs)
my_global_vars.update(local_vars)
my_local_vars = {}
exec(orig_cls_compiled_ast_node, my_global_vars, my_local_vars)
# Sanity check to ensure the code only evaluated the expected class definition
assert list(my_local_vars.keys()) == [orig_cls_name], "Unexpected object(s) found during code evaluation."
reevaluated_cls = my_local_vars[orig_cls_name]
return reevaluated_cls
def restore_globals(global_vars, names, clashes):
for name in names:
if name in clashes:
# restore items that were previously defined
global_vars[name] = clashes[name]
else:
# remove items which didn't exist before
global_vars.pop(name)
def foreach(**var_defs):
new_names = var_defs.keys()
parent_frame = inspect.currentframe().f_back
global_vars = parent_frame.f_globals
local_vars = parent_frame.f_locals
clashes = {name: global_vars[name] for name in new_names if name in global_vars}
global_vars.update(var_defs)
def make_foreach_closure(cls):
if not inspect.isclass(cls):
raise TypeError(
f"Foreach decorator must be applied to a tohu generator class, not an object of type {type(cls)}."
)
if not issubclass(cls, TohuBaseGenerator):
raise TypeError("Decorated class must be a subclass of TohuBaseGenerator.")
orig_cls_compiled_ast_node = get_cls_compiled_ast_node(cls)
orig_cls_name = cls.__name__
class ForeachWrapper:
def __init__(self, *args, **kwargs):
self.init_args = args
self.init_kwargs = kwargs
def foreach(self, **custom_var_defs):
custom_var_names = list(custom_var_defs.keys())
missing_params = list(set(new_names).difference(custom_var_names))
extra_params = list(set(custom_var_names).difference(new_names))
if missing_params:
raise ValueError(f"Missing parameter(s): {', '.join(missing_params)!r}")
if extra_params:
raise ValueError(f"Extra parameter(s) provided: {', '.join(extra_params)!r}")
# Re-evaluate the class definition, including the previously missing
# variable values to replace the placeholders.
rewritten_cls = reevaluate_class_definition(
orig_cls_compiled_ast_node,
orig_cls_name=orig_cls_name,
global_vars=global_vars,
local_vars=local_vars,
**custom_var_defs,
)
return rewritten_cls(*self.init_args, **self.init_kwargs)
restore_globals(global_vars, new_names, clashes)
return ForeachWrapper
return make_foreach_closure
| 2.140625 | 2 |
astr-119-session-4/python_use_module.py | jjohnst6260/astr-119 | 0 | 12789211 | <reponame>jjohnst6260/astr-119<filename>astr-119-session-4/python_use_module.py
# this uses the module
import test_module as tm
tm.hello_world() | 1.390625 | 1 |
venv/lib/python2.7/site-packages/sqlalchemy/testing/config.py | banianlabs/Stash-Prototype | 5 | 12789212 | <gh_stars>1-10
requirements = None
db = None
| 0.910156 | 1 |
generate_encryption_keys.py | dan-omniscience/private-secure-cloud-sorage | 0 | 12789213 | <reponame>dan-omniscience/private-secure-cloud-sorage
import os.path, errno, Crypto
from getpass import getpass
from Crypto.PublicKey import RSA
from Crypto.Cipher import PKCS1_OAEP
from Crypto import Random
private_key_filename = "s3-encrypt-sync_private-key.pem"
public_key_filename = "s3-encrypt-sync_public-key.pem"
class KeysExists(IOError):
errno = errno.EEXIST
message = "Keys Already Exists"
def generate_keys(certs_paths, password_file = ".secret"):
private_key_path = os.path.join(certs_paths, private_key_filename)
public_key_path = os.path.join(certs_paths, public_key_filename)
if os.path.isfile(public_key_path) and os.path.isfile(private_key_path):
raise KeysExists()
else:
random_generator = Random.new().read
privatekey = RSA.generate(2048, random_generator) #generate pub and priv key
with open(private_key_path, "w") as f:
f.write(privatekey.exportKey("PEM"))
publickey = privatekey.publickey() # pub key export for exchange
with open(public_key_path, "w") as f:
f.write(publickey.exportKey("PEM"))
os.chmod(private_key_path, 0400)
os.chmod(public_key_path, 0644)
if not os.path.isfile(password_file):
secret = getpass("Enter a 32 byte password:")
encryptionCipher = PKCS1_OAEP.new(publickey)
with open(password_file, "w") as f:
enc_secret = encryptionCipher.encrypt(secret)
f.write(enc_secret) | 2.59375 | 3 |
hpopt/examples/dexter.py | knowledge-learning/hp-optimization | 4 | 12789214 | # coding: utf-8
import random
from hpopt.datasets.uci.dexter import load_corpus
from ..sklearn import SklearnClassifier
def main():
X, y = load_corpus()
random.seed(0)
classifier = SklearnClassifier(popsize=100, select=20, iters=10, timeout=10, verbose=True)
classifier.fit(X, y)
if __name__ == "__main__":
main()
| 2.25 | 2 |
models/__init__.py | mihamerstan/lidar_ordered_points | 1 | 12789215 | <reponame>mihamerstan/lidar_ordered_points<filename>models/__init__.py
import importlib
import os
import torch.nn as nn
MODEL_REGISTRY = {}
def build_model(args):
return MODEL_REGISTRY[args.model].build_model(args)
def build_model_gan(args):
return MODEL_REGISTRY[args.modelG].build_model(args), MODEL_REGISTRY[args.modelD].build_model(args)
def register_model(name):
"""Decorator to register a new model"""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model {}'.format(name))
if not issubclass(cls, nn.Module):
raise ValueError('Model {} must extend {}'.format(name, cls.__name__))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
# Automatically import any Python files in the models/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and file[0].isalpha():
module = file[:file.find('.py')]
importlib.import_module('models.' + module)
| 2.421875 | 2 |
xuan wang/test.py | weruioghvn/alphalens | 0 | 12789216 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 16:12:40 2021
@author: Administrator
"""
import alphalens
import pandas as pd
import numpy as np
# import warnings
# warnings.filterwarnings('ignore')
ticker_sector = {
"ACN" : 0, "ATVI" : 0, "ADBE" : 0, "AMD" : 0, "AKAM" : 0, "ADS" : 0, "GOOGL" : 0, "GOOG" : 0,
"APH" : 0, "ADI" : 0, "ANSS" : 0, "AAPL" : 0, "AMAT" : 0, "ADSK" : 0, "ADP" : 0, "AVGO" : 0,
"AMG" : 1, "AFL" : 1, "ALL" : 1, "AXP" : 1, "AIG" : 1, "AMP" : 1, "AON" : 1, "AJG" : 1, "AIZ" : 1, "BAC" : 1,
"BK" : 1, "BBT" : 1, "BRK.B" : 1, "BLK" : 1, "HRB" : 1, "BHF" : 1, "COF" : 1, "CBOE" : 1, "SCHW" : 1, "CB" : 1,
"ABT" : 2, "ABBV" : 2, "AET" : 2, "A" : 2, "ALXN" : 2, "ALGN" : 2, "AGN" : 2, "ABC" : 2, "AMGN" : 2, "ANTM" : 2,
"BCR" : 2, "BAX" : 2, "BDX" : 2, "BIIB" : 2, "BSX" : 2, "BMY" : 2, "CAH" : 2, "CELG" : 2, "CNC" : 2, "CERN" : 2,
"MMM" : 3, "AYI" : 3, "ALK" : 3, "ALLE" : 3, "AAL" : 3, "AME" : 3, "AOS" : 3, "ARNC" : 3, "BA" : 3, "CHRW" : 3,
"CAT" : 3, "CTAS" : 3, "CSX" : 3, "CMI" : 3, "DE" : 3, "DAL" : 3, "DOV" : 3, "ETN" : 3, "EMR" : 3, "EFX" : 3,
"AES" : 4, "LNT" : 4, "AEE" : 4, "AEP" : 4, "AWK" : 4, "CNP" : 4, "CMS" : 4, "ED" : 4, "D" : 4, "DTE" : 4,
"DUK" : 4, "EIX" : 4, "ETR" : 4, "ES" : 4, "EXC" : 4, "FE" : 4, "NEE" : 4, "NI" : 4, "NRG" : 4, "PCG" : 4,
"ARE" : 5, "AMT" : 5, "AIV" : 5, "AVB" : 5, "BXP" : 5, "CBG" : 5, "CCI" : 5, "DLR" : 5, "DRE" : 5,
"EQIX" : 5, "EQR" : 5, "ESS" : 5, "EXR" : 5, "FRT" : 5, "GGP" : 5, "HCP" : 5, "HST" : 5, "IRM" : 5, "KIM" : 5,
"APD" : 6, "ALB" : 6, "AVY" : 6, "BLL" : 6, "CF" : 6, "DWDP" : 6, "EMN" : 6, "ECL" : 6, "FMC" : 6, "FCX" : 6,
"IP" : 6, "IFF" : 6, "LYB" : 6, "MLM" : 6, "MON" : 6, "MOS" : 6, "NEM" : 6, "NUE" : 6, "PKG" : 6, "PPG" : 6,
"T" : 7, "CTL" : 7, "VZ" : 7,
"MO" : 8, "ADM" : 8, "BF.B" : 8, "CPB" : 8, "CHD" : 8, "CLX" : 8, "KO" : 8, "CL" : 8, "CAG" : 8,
"STZ" : 8, "COST" : 8, "COTY" : 8, "CVS" : 8, "DPS" : 8, "EL" : 8, "GIS" : 8, "HSY" : 8, "HRL" : 8,
"AAP" : 9, "AMZN" : 9, "APTV" : 9, "AZO" : 9, "BBY" : 9, "BWA" : 9, "KMX" : 9, "CCL" : 9,
"APC" : 10, "ANDV" : 10, "APA" : 10, "BHGE" : 10, "COG" : 10, "CHK" : 10, "CVX" : 10, "XEC" : 10, "CXO" : 10,
"COP" : 10, "DVN" : 10, "EOG" : 10, "EQT" : 10, "XOM" : 10, "HAL" : 10, "HP" : 10, "HES" : 10, "KMI" : 10
}
import pandas_datareader.data as web
pan = web.DataReader(list(ticker_sector.keys()), "yahoo", '2014-12-01', '2017-07-01')
# pan.to_pickle(r'xuan wang\pan.pkl')
# pan = pd.read_pickle(r'xuan wang\pan.pkl')
lookahead_bias_days = 5
pan = pan.transpose(2,1,0)
predictive_factor = pan.loc[:,:,'Open']
predictive_factor = predictive_factor.pct_change(lookahead_bias_days)
# introduce look-ahead bias and make the factor predictive
predictive_factor = predictive_factor.shift(-lookahead_bias_days)
predictive_factor = predictive_factor.stack()
predictive_factor.index = predictive_factor.index.set_names(['date', 'asset'])
pricing = pan.loc[:,:,'Open'].iloc[1:]
pricing.head()
sector_names = {
0 : "information_technology",
1 : "financials",
2 : "health_care",
3 : "industrials",
4 : "utilities",
5 : "real_estate",
6 : "materials",
7 : "telecommunication_services",
8 : "consumer_staples",
9 : "consumer_discretionary",
10 : "energy"
}
factor_data = alphalens.utils.get_clean_factor_and_forward_returns(predictive_factor,
pricing,
quantiles=5,
bins=None,
groupby=ticker_sector,
groupby_labels=sector_names) | 1.867188 | 2 |
bttn.py | yunojuno/bttn-webhook | 0 | 12789217 | <reponame>yunojuno/bttn-webhook
# -*- coding: utf-8 -*-
"""bttn webhook handler."""
from os import getenv
import flask
import hipchat
app = flask.Flask(__name__)
# set of valid channels
CHANNELS = ('hipchat', 'sms')
# set of required form keys
FORM_KEYS = ('channel', 'recipient', 'message')
HIPCHAT_API_TOKEN = getenv('HIPCHAT_API_TOKEN')
TWILIO_ACCOUNT_SID = getenv('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = getenv('TWILIO_AUTH_TOKEN')
@app.route('/', methods=['POST'])
def bttn_pressed():
"""Respond to a bttn press event."""
form = flask.request.form
for key in FORM_KEYS:
if key not in form:
return "Request form must include a '%s' key." % key, 400
if form['channel'] == 'hipchat':
return send_to_hipchat(form['recipient'], form['message'])
elif form['channel'] == 'sms':
return send_sms(form['recipient'], form['message'])
else:
return "Unknown channel: '%s'" % form['channel'], 400
def send_to_hipchat(room, message):
"""Forward the button message to HipChat."""
if not HIPCHAT_API_TOKEN:
return "Missing HIPCHAT_API_TOKEN environment variable.", 400
try:
response = hipchat.send_room_message(room, message, message_format='text')
return "Message sent successfully", response.status_code
except hipchat.HipChatError as ex:
return ex.error_message, ex.status_code
def send_sms(number, message):
"""Forward the message via SMS."""
if not TWILIO_ACCOUNT_SID:
return "Missing TWILIO_ACCOUNT_SID environment variable.", 400
if not TWILIO_AUTH_TOKEN:
return "Missing TWILIO_AUTH_TOKEN environment variable.", 400
return "Not yet implemented", 200
| 2.53125 | 3 |
adapter/acumos/setup.py | onap/dcaegen2-platform | 0 | 12789218 | # ============LICENSE_START====================================================
# org.onap.dcae
# =============================================================================
# Copyright (c) 2019 AT&T Intellectual Property. All rights reserved.
# =============================================================================
# Copyright (c) 2021 highstreet technologies GmbH. All rights reserved.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END======================================================
from setuptools import setup, find_packages
setup(
name="aoconversion",
version="1.0.6-SNAPSHOT",
packages=find_packages(exclude=["tests.*", "tests"]),
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
description="Service to create DCAE artifacts from acumos models",
url="",
install_requires=["docker>=4.0.0,<5.0.0", "jsonschema", "PyYAML", "requests"],
package_data={'aoconversion': ['index.html']},
entry_points={
"console_scripts": [
"acumos-adapter=aoconversion.adapter:adapter"
]
}
)
| 1.484375 | 1 |
test/integ/test_s3_checkpoint_save_timeout.py | jmazanec15/sagemaker-tensorflow-containers | 1 | 12789219 | <gh_stars>1-10
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
from sagemaker import fw_utils
from test.integ.docker_utils import train
from test.integ.utils import create_config_files, file_exists
import uuid
from test.integ.conftest import SCRIPT_PATH
# https://github.com/tensorflow/tensorflow/issues/15868
def test_s3_checkpoint_save_timeout(docker_image, opt_ml, sagemaker_session, processor):
resource_path = os.path.join(SCRIPT_PATH, '../resources/python_sdk')
default_bucket = sagemaker_session.default_bucket()
s3_source_archive = fw_utils.tar_and_upload_dir(session=sagemaker_session.boto_session,
bucket=default_bucket,
s3_key_prefix='test_job',
script='rand_model_emb.py',
directory=resource_path)
checkpoint_s3_path = 's3://{}/integ-s3-timeout/checkpoints-{}'.format(default_bucket,
uuid.uuid4())
hyperparameters = dict(
training_steps=3,
evaluation_steps=3,
checkpoint_path=checkpoint_s3_path
)
create_config_files('rand_model_emb.py', s3_source_archive.s3_prefix, opt_ml, hyperparameters)
train(docker_image, opt_ml, processor)
assert file_exists(opt_ml, 'output/success'), 'Success file was not created'
assert not file_exists(opt_ml, 'output/failure'), 'Failure happened'
| 1.546875 | 2 |
goodread/helpers.py | frictionlessdata/goodread | 2 | 12789220 | import os
import shutil
import tempfile
def ensure_dir(path):
dirpath = os.path.dirname(path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
def move_file(source, target):
ensure_dir(target)
shutil.move(source, target)
def copy_file(source, target):
if isinstance(source, (tuple, list)):
source = os.path.join(*source)
if isinstance(target, (tuple, list)):
target = os.path.join(*target)
ensure_dir(target)
shutil.copy(source, target)
def write_file(path, text):
with tempfile.NamedTemporaryFile("wt", delete=False, encoding="utf-8") as file:
file.write(text)
file.flush()
move_file(file.name, path)
| 3.15625 | 3 |
Advanced/Exercises/Multi_Dimentional_Lists_Exercise_2/4_easter_bunny.py | tankishev/Python | 2 | 12789221 | # On the first line, you will be given a number representing the size of the field. On the following few lines, you will be given a field with:
# • One bunny - randomly placed in it and marked with the symbol "B"
# • Number of eggs placed at different positions of the field and traps marked with "X"
# Your job is to determine the direction in which the bunny should go to collect the maximum number of eggs. The directions that should be considered as possible are up, down, left, and right. If you reach a trap while checking some of the directions, you should not consider the fields after the trap in this direction. For more clarifications, see the examples below.
# Note: Consider ONLY the paths from which the bunny has collected 1 or more eggs.
# Input
# • A number representing the size of the field
# • The matrix representing the field (each position separated by a single space)
# Output
# • The direction which should be considered as best (lowercase)
# • The field positions from which we are collecting eggs as lists
# • The total number of eggs collected
move = {
'up': lambda x: [x[0]-1, x[1]],
'down': lambda x: [x[0]+1, x[1]],
'left': lambda x: [x[0], x[1]-1],
'right': lambda x: [x[0], x[1]+1]
}
eggs = {}
field = [[x for x in input().split()] for _ in range(int(input()))]
starting_position = next((x, y) for x in range(len(field)) for y in range(len(field)) if field[x][y]=='B')
for direction in move.keys():
position = starting_position
while True:
position = move[direction](position)
row, col = position
if min(row,col) < 0 or max(row, col) == len(field):
break
elif field[row][col] == 'X':
break
else:
if direction not in eggs:
eggs[direction] = {'cells': [], 'sum': 0}
eggs[direction]['cells'].append(position)
eggs[direction]['sum'] += int(field[row][col])
max_key = [k for k, v in sorted(eggs.items(), key=lambda item: item[1]['sum'], reverse=True)][0]
print(max_key)
print(*eggs[max_key]['cells'], sep='\n')
print(eggs[max_key]['sum'])
| 4.4375 | 4 |
syscall_gen/generator.py | Globidev/strace | 0 | 12789222 | <reponame>Globidev/strace
#!/usr/bin/env python3
from urllib.request import urlopen
from bs4 import BeautifulSoup
from gzip import GzipFile
from os.path import join
import sys
DATABASE_URL = 'https://filippo.io/linux-syscall-table'
CONST_QUALIFIER = 'const'
SYMBOLS = {
'int': 'int_',
's32': 'int_',
'key_serial_t': 'int_',
'clockid_t': 'int_',
'pid_t': 'int_',
'key_t': 'int_',
'unsigned': 'uint_',
'unsigned int': 'uint_',
'u32': 'uint_',
'uid_t': 'uint_',
'qid_t': 'uint_',
'gid_t': 'uint_',
'umode_t': 'uint_',
'void': 'long_', # Not handling voids in strace
'long': 'long_',
'off_t': 'long_',
'loff_t': 'long_',
'ssize_t': 'long_',
'unsigned long': 'ulong_',
'u64': 'ulong_',
'size_t': 'ulong_',
'time_t': 'ulong_',
'timer_t': 'ulong_',
'caddr_t': 'ulong_',
'clock_t': 'ulong_',
'mqd_t': 'ulong_',
'aio_context_t': 'ulong_',
'void *': 'pointer_',
'cap_user_header_t':'pointer_',
'cap_user_data_t': 'pointer_',
'char *': 'string_',
'char **': 'array_',
}
UNKNOWN_SYM = '__unknown_type'
def strip_type(type):
# Handle pointers
if '*' in type:
if 'char' in type:
return 'char {}'.format('*' * type.count('*'))
else:
return 'void *'
# Remove const qualifiers
if type.startswith(CONST_QUALIFIER):
type = type[len(CONST_QUALIFIER) + 1:]
# Remove __
if type.startswith('__'):
type = type[2:]
return type
def type_to_sym(type):
stripped = strip_type(type)
try:
return SYMBOLS[stripped]
except KeyError:
print('unmapped type: {} ({})'.format(stripped, type), file=sys.stderr)
return UNKNOWN_SYM
MAN2_PATH = '/man2'
def fetch_return_type(name):
try:
with GzipFile(join(MAN2_PATH, '{}.2.gz'.format(name)), 'r') as gz:
lines = [
line.decode(sys.getdefaultencoding())
for line in gz.readlines()
]
func_name = '{}('.format(name)
for line in lines:
if line.startswith('.BI') and func_name in line:
name_idx = line.index(func_name)
return line[len(".BI \""):name_idx].strip()
except IOError:
pass
except UnicodeDecodeError:
pass
return 'long' # Defaulting to long
with urlopen(DATABASE_URL) as response:
html = response.read()
soup = BeautifulSoup(html, 'html.parser')
table = soup.find(class_='tbls-table')
entries = table.find_all(class_='tbls-entry-collapsed')
for entry in entries:
defines = entry.find_all('td')
name = defines[1].string
return_type = fetch_return_type(name)
args_row = entry.next_sibling
args = []
if 'tbls-arguments-collapsed' in args_row.attrs.get('class'):
types_row = args_row.find_all('tr')[1]
type_defines = types_row.find_all('td')
for define in type_defines:
type = define.find('strong')
args.append(type.string)
syscall = '{{ "{}", {}, {}, {{ {} }} }},'.format(
name,
type_to_sym(return_type),
len(args),
', '.join(map(type_to_sym, args))
)
print(syscall)
| 2 | 2 |
test/run_all_tests.py | wjchen84/rapprentice | 23 | 12789223 | import rapprentice, os, os.path as osp
from rapprentice.call_and_print import call_and_print
assert osp.basename(os.getcwd()) == "test"
call_and_print("python tps_unit_tests.py")
call_and_print("python ../scripts/download_sampledata.py ~/Data --use_rsync")
call_and_print("python ../scripts/generate_h5.py ~/Data/sampledata/overhand/overhand.yaml")
call_and_print("python test_registration_synthetic.py --plotting=0")
| 1.992188 | 2 |
pycloud/pycloud/network/finder.py | SEI-AMS/pycloud | 14 | 12789224 | # KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, <NAME>
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
from zeroconf import ServiceBrowser, Zeroconf
import time
import socket
#######################################################################################################################
# Finds cloudlets through Zeroconf.
#######################################################################################################################
class CloudletFinder(object):
# DNS-SD identifier for cloudlets.
CLOUDLET_SERVICE_DNS = '_cloudlet._tcp.local.'
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self):
self.services = {}
####################################################################################################################
# Finds cloudlets and returns a list.
####################################################################################################################
def find_cloudlets(self, seconds_to_wait=3):
print 'Started looking for cloudlets'
self.services = {}
zeroconf = Zeroconf()
browser = ServiceBrowser(zeroconf, CloudletFinder.CLOUDLET_SERVICE_DNS, listener=self)
# Wait to find cloudlets.
print 'Waiting for results'
time.sleep(seconds_to_wait)
# Stop looking for cloudlets.
browser.cancel()
# Return the list of cloudlets found.
print 'Cloudlets found: '
print self.services
return self.services
####################################################################################################################
# Called when a new service is found, adds it to a list of services.
####################################################################################################################
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
if info is None:
print("Empty service was registered; ignoring it.")
return
address_string = socket.inet_ntoa(info.address)
print "Service added, service name: {}, and ip {}".format(name, address_string)
self.services[info.server] = info
# Move encryption state to specific property for easier access.
encryption = ''
if 'encryption' in info.properties:
encryption = info.properties['encryption']
self.services[info.server].encryption = encryption
self.services[info.server].address_string = address_string
####################################################################################################################
# Called when a service is removed, removes it from a list of services.
####################################################################################################################
def remove_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
print("Service %s removed, service info: %s" % (name, info))
del self.services[info.server]
| 0.953125 | 1 |
src/pygrambank/commands/issue1797.py | glottobank/pygrambank | 2 | 12789225 | <filename>src/pygrambank/commands/issue1797.py
"""
"""
import itertools
import functools
# flake8: noqa
DATAPOINTS = """\
GB146 | Double check with Daria if there really is no concstruction of this type. | Mishchenko, Daria (p.c. 2013) | HS_loma1260.tsv
GB146 | Double check with Elena if there really is no such construction | Perekhvalskaya, Elena (p.c 2013) | HS_mwan1250.tsv
GB146 | Double check with Daria if there really is no concstruction of this type. | Mishchenko, Daria (p.c. 2013) | HS_toma1245.tsv
GB171 | Double check page source | Harvey (2002:194-195) | JLA_gaga1251.tsv
GB333 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB334 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB335 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB333 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB334 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB335 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB304 | I think so? Double check this with Jeremy. Someone killed himself a deer./ A deer got himself killed. | Saxton (1982:144) | JLA_toho1245.tsv
GB026 | Double check figure 4. | Boxwell (1990:160) | JLA_weri1253.tsv
GB158 | Acc. to Wals no, but double check Sapir (1922), then source entire grammar and delete this comment. | Sapir (1922:215-234); Nevin (1976:237-247) | JLA_yana1271.tsv
GB159 | Acc. to Wals no, but double check Sapir (1922), then source entire grammar and delete this comment. | Sapir (1922:215-234); Nevin (1976:237-247) | JLA_yana1271.tsv
GB160 | Acc. to Wals no, but double check Sapir (1922), then source entire grammar and delete this comment. | Sapir (1922:215-234); Nevin (1976:237-247) | JLA_yana1271.tsv
GB401 | Double check this page against the wiki. Labile verb mentioned. | Gijn (2006: 145) | JLA_yura1255.tsv
GB117 | double-check use of pa, p. 253 | Lee 1975:253 | MD-GR-RSI_kosr1238.tsv
GB074 | I think that all of the adpositions are postpositions, but I found something recently that looks like a preposition. I have to double-check. For now, though, let's go with no. | Robinson 2007 | MD-GR-RSI_roto1249.tsv
GB257 | Requires further research to double-check. | <NAME> p.c. (2018) | NP_wiru1244.tsv
GB312 | Requires further research to double-check. | <NAME> p.c. (2018) | NP_wiru1244.tsv
GB330 | double check this | Heath 1999:195 | RB_koyr1240.tsv
GB021 | I think. According to the grammar there are different markers for definite and indefinite non-topics ('objects'), but the examples are confusing. | Wolfenden 1971 | MD-GR-RSI_hili1240.tsv
GB118 | I think | Haiman 1980 | MD-GR-RSI_huaa1250.tsv
GB105 | [XXX CHECK] The dative is used for animates, and recipients are a priori animate I think; GR: I would think that DAT is not Direct Object | Donohue 2004 | MD-GR-RSI_kris1246.tsv
GB075 | I think so [e.g. aape in the dictionary] | Hurd and Hurd 1966; Hurd and Hurd 1970; Hurd 1977 | MD-GR-RSI_naas1242.tsv
GB053 | I'm not sure, but I think it might. It seems that everything in the fourth noun class is animate. | Robinson 2007 | MD-GR-RSI_roto1249.tsv
GB115 | I'm not sure about reflexive vs. reciprocal. I think they share the same marker. Is that a yes for this one then? Agree | Robinson 2007 | MD-GR-RSI_roto1249.tsv
GB146 | I think | NA | MD-GR-RSI_savo1255.tsv
GB160 | adjectives I think. Several word classes show reduplication. See the long note in the separate file TelefolComm.doc' | Healey 1964; Healey 1965; Healey 1965; Healey 1965; Healey 1966; Healey 1964; Healey 1977; Healey 1962; Healey 1974 | MD-GR-RSI_tele1256.tsv
GB086 | I think so, anyway, with iemo | NA | MD-GR-RSI_touo1238.tsv
GB038 | I think they are | Sagna 2008:118 | SVM_band1340.tsv
GB081 | I think it's not a real infix | Strom 2013:248 | SVM_nden1248.tsv
GB105 | [XXX CHECK] The dative is used for animates, and recipients are a priori animate I think; GR: I would think that DAT is not Direct Object | Donohue 2004 | MD-GR-RSI_kris1246.tsv
GB133 | CHECK GRAMMAR In irrealis clauses with the verb 'lack' SOV is the only order possible, and hence pragmatically unmarked p. 750. But the example given might as well be interpreted as a possessive construction: nia ina=n lalek (3sg mother=GEN lack) 'she has no mother'. In the questionnaire for ENUS II, Van Klinken describes these constructions as object incorporation giving apparent SOV order. | van Engelenhoven and Williams-van-Klinken 2005 | MD-GR-RSI_tetu1245.tsv
GB113 | MIINA CHECKS | Miina Norvik, p.c. | RK-MN_livv1244.tsv
GB123 | check | Churchward 1953:217 | HW_tong1325.tsv
GB103 | check | Rasoloson and Rubino 2005 | MD-GR-RSI_plat1254.tsv
GB134 | check | Rasoloson and Rubino 2005 | MD-GR-RSI_plat1254.tsv
GB119 | check | Van Staden 2000 | MD-GR-RSI_tido1248.tsv
GB135 | check and get back | Agbetsoamedo 2014 | HS_sele1249.tsv
GB105 | check grammar | van Engelenhoven and Williams-van-Klinken 2005 | MD-GR-RSI_tetu1245.tsv
GB027 | check grammar. | van Engelenhoven and Williams-van-Klinken 2005 | MD-GR-RSI_tetu1245.tsv
GB069 | check marking of adjectives; this may just be a matter of analysis or frequency: argument cross-referencing is optional; if affixed the adjective could always be interpreted as heading a relative clause, but it could also be the simple absence of marking. | Van Staden 2000 | MD-GR-RSI_tido1248.tsv
GB401 | check pg 73 | Churchward 1953:73 | HW_tong1325.tsv
GB026 | Double check figure 4. | Boxwell (1990:160) | JLA_weri1253.tsv
GB171 | Double check page source | Harvey (2002:194-195) | JLA_gaga1251.tsv
GB330 | double check this | Heath 1999:195 | RB_koyr1240.tsv
GB401 | Double check this page against the wiki. Labile verb mentioned. | Gijn (2006: 145) | JLA_yura1255.tsv
GB146 | Double check with Daria if there really is no concstruction of this type. | Mishchenko, Daria (p.c. 2013) | HS_loma1260.tsv
GB146 | Double check with Daria if there really is no concstruction of this type. | <NAME> (p.c. 2013) | HS_toma1245.tsv
GB146 | Double check with Elena if there really is no such construction | Perekhvalskaya, Elena (p.c 2013) | HS_mwan1250.tsv
GB333 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB334 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB335 | Double check with team on weds | Omar 1983:203 | JLA_rung1259.tsv
GB333 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB334 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB335 | Double check with team on weds | Omar 1983:233-234 | JLA_tamb1254.tsv
GB070 | GS: You gave N. I suggest Y, since nae-ba a-m-uw-e (I-focus him-give-I indic) ‘I give him’ is distinguished from nae-‘pa na-m-iy-e (I-focus me-give-he) ‘he gives me’ not only in the verb, but also how the focal marker is formed: class N when subject, class Q when benefactor. I see no such clear example in Scott 1978, but check out p.104 #170 Oblique cases). GR: but this still doesn't count as case marking in my view; it is only a focus marker that changes. | Scott 1978:104 | MD-GR-RSI_fore1270.tsv
GB304 | I think so? Double check this with Jeremy. Someone killed himself a deer./ A deer got himself killed. | Saxton (1982:144) | JLA_toho1245.tsv
GB150 | Jeremy will check in collection of narratives | Luo 2008 | JC_yong1276.tsv
GB159 | KM: Don't think so. (I'll check on it.) | McElhanon 1970; McElhanon 1972 | MD-GR-RSI_sele1250.tsv
GB057 | MD: check that this isn't NA | Davies 1992 | MD-GR-RSI_ramo1244.tsv
GB058 | MD: check that this isn't NA | Davies 1992 | MD-GR-RSI_ramo1244.tsv
GB082 | Nick said - checking with Hedvig re Mawng productivity | Pym and Larrimore 1979 | RSI_iwai1244.tsv
GB257 | Requires further research to double-check. | <NAME> p.c. (2018) | NP_wiru1244.tsv
GB312 | Requires further research to double-check. | <NAME> p.c. (2018) | NP_wiru1244.tsv
GB313 | SY: check for pages | Barth (2019) | SY_matu1261.tsv
GB171 | Check 160 | Guo (2010:1-196) | MY_youn1235.tsv
GB074 | Check in locative and instrumental enclitics which seem to mark oblique cases, count here for prep and post. | Campbell (2006:v-115) | JLA_urni1239.tsv
GB075 | Check in locative and instrumental enclitics which seem to mark oblique cases, count here for prep and post. | Campbell (2006:v-115) | JLA_urni1239.tsv
GB150 | Check other sources. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB327 | Check pp. 38-39. Relative clause is mentioned there but the gloss is rather useless. | Piau (1985:1-147) | ER_kuma1280.tsv
GB025 | Check the example under interrogative: savai malamalapopore nake (...) | Ray (1926:373) | DB_akei1237.tsv
GB171 | Check this. | Hayward (1984:191-192) | JLA_arbo1245.tsv
GB329 | Check with Jeremy. | Gerner (2013:65) | JLA_sich1238.tsv
GB330 | Check with Jeremy. | Gerner (2013:92) | JLA_sich1238.tsv
GB331 | Check with Jeremy. | Gerner (2013:93) | JLA_sich1238.tsv
GB327 | Check! | Reichle (1981:91-94) | RHA_bawm1236.tsv
GB037 | Checking required | <NAME>, <NAME> and <NAME> p.c. (2018) | NP_wiru1244.tsv
GB252 | Checking required | <NAME>, <NAME> and <NAME> p.c. (2018) | NP_wiru1244.tsv
GB253 | Checking required | <NAME>, <NAME> and <NAME> p.c. (2018) | NP_wiru1244.tsv
GB256 | Checking required | <NAME>, <NAME> and <NAME> p.c. (2018) | NP_wiru1244.tsv
GB265 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB266 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB270 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB273 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB275 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB276 | Not mentioned in the Deal grammar. Check others. | Deal (2010:1-460); Crook (1999:2-498); Cash (2004:1-72) | JLA_nezp1238.tsv
GB285 | Not mentioned.. requires only a question particle? Check this. | <NAME> and <NAME>. 1994. Cantonese: A Comprehensive Grammar | JLA_yuec1235.tsv"""
DATAPOINTS = sorted((i.split('|')[-1].strip(), i.split('|')[0].strip()) for i in DATAPOINTS.split('\n'))
DATAPOINTS = {fname: set(i[1] for i in dps) for fname, dps in itertools.groupby(DATAPOINTS, lambda i: i[0])}
def fixer(fids, row):
if row['Feature_ID'] in fids:
fids.remove(row['Feature_ID'])
if row['Value'] != '?':
row['Value'] = '?'
print('Value changed')
row['Comment'] = ''
return row
def run(args):
for sheet in args.repos.iter_sheets():
if sheet.path.name in DATAPOINTS:
sheet.visit(
row_visitor=functools.partial(
fixer,
DATAPOINTS[sheet.path.name],
))
for gc, dps in DATAPOINTS.items():
assert not dps
| 2.03125 | 2 |
api_demo/mydate.py | ntlinh16/api-demo | 0 | 12789226 | <filename>api_demo/mydate.py<gh_stars>0
from datetime import datetime
def countdown(date=datetime.now()):
new_year = datetime(date.year + 1, 1, 1)
days_left = (new_year - date).days
return days_left
| 3.109375 | 3 |
mutations/mutation_role/mutation.py | akarapun/elearning | 1 | 12789227 | <reponame>akarapun/elearning
import graphene
from mutation_role.createRole import CreateRoleMutation
from mutation_role.updateRole import UpdateRoleMutation
from mutation_role.deleteRole import DeleteRoleMutation
class RoleMutation(
CreateRoleMutation,
UpdateRoleMutation,
DeleteRoleMutation,
graphene.ObjectType):
pass
| 1.476563 | 1 |
pollbot/PollBot.py | Breee/pokemon-raid-bot | 0 | 12789228 | <filename>pollbot/PollBot.py
"""
MIT License
Copyright (c) 2018 Breee@github
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from config.Configuration import Configuration
from poll.PollFactory import PollFactory
from poll.MultiPoll import MultiPoll
from poll.SinglePoll import SinglePoll
from discord.ext import commands
from utils import replace_quotes
from poll.emoji_storage import EmojiStorage
import discord
import datetime
import aiohttp
import os
from globals.globals import LOGGER
from database.dbhandler import DbHandler
from database.dbmodels import Poll
import asyncio
import sys
import traceback
if os.path.isfile('help_msg.txt'):
with open('help_msg.txt', 'r') as helpfile:
HELP_MSG = helpfile.read()
BANNED_USERS = []
if os.path.isfile('banned_users.txt'):
with open('banned_users.txt', 'r') as banned_users:
for line in banned_users:
BANNED_USERS.append(line)
class PollBot(commands.Bot):
def __init__(self, prefixes, description, config_file):
super().__init__(command_prefix=prefixes, description=description, pm_help=None, help_attrs=dict(hidden=True), fetch_offline_members=False, case_insensitive=True)
self.config = Configuration(config_file)
self.poll_factory = PollFactory()
self.add_command(self.ping)
self.add_command(self.poll)
self.add_command(self.uptime)
self.add_command(self.readpoll)
self.add_command(self.update)
self.add_command(self.ready)
self.start_time = 0.0
self.session = aiohttp.ClientSession(loop=self.loop)
self.use_custom_emojies = True
self.db_handler = DbHandler(host=self.config.db_host, user=self.config.db_user, password=self.config.db_password,
port=self.config.db_port, database=self.config.db_name, dialect=self.config.db_dialect, driver=self.config.db_driver)
self.ready = False
async def on_ready(self):
LOGGER.info("Bot initializing")
# make mentionable.
self.command_prefix.extend([f'<@!{self.user.id}> ', f'<@{self.user.id}> '])
self.start_time = datetime.datetime.utcnow()
await self.init_custom_emojies()
self.ready = False
LOGGER.info("Restoring messages")
await self.restore_messages_and_polls(days=1)
self.ready = True
LOGGER.info("Bot ready")
async def init_custom_emojies(self):
LOGGER.info("Init custom emojies")
if self.use_custom_emojies:
EmojiStorage.PEOPLE_EMOJI_TO_NUMBER = dict()
if self.use_custom_emojies:
EmojiStorage.PEOPLE_EMOJI_TO_NUMBER = dict()
server_emojis = self.emojis
print(server_emojis)
for emoji in server_emojis:
number = None
if "rq_plus_one" in emoji.name:
number = 1
elif "rq_plus_two" in emoji.name:
number = 2
elif "rq_plus_three" in emoji.name:
number = 3
elif "rq_plus_four" in emoji.name:
number = 4
if number is not None and emoji.id not in EmojiStorage.PEOPLE_EMOJI_TO_NUMBER:
EmojiStorage.PEOPLE_EMOJI_TO_NUMBER[emoji.id] = number
if len(EmojiStorage.PEOPLE_EMOJI_TO_NUMBER) != 4:
EmojiStorage.PEOPLE_EMOJI_TO_NUMBER = EmojiStorage.DEFAULT_PEOPLE_EMOJI_TO_NUMBER
LOGGER.info("Done.")
def run(self):
super().run(self.config.token, reconnect=True)
async def close(self):
await super().close()
await self.session.close()
LOGGER.info("Closing")
@commands.command(help="Pings the Bot.")
async def ping(self, ctx):
await ctx.send("pong!, ready: %s" % self.ready)
@commands.command(help="Ready the Bot.", hidden=True)
@commands.is_owner()
async def ready(self, ctx):
self.ready = not self.ready
await ctx.send("ready: %s" % self.ready)
@commands.command(hidden=True)
async def uptime(self, ctx):
await ctx.send("Online for %s" % str(datetime.datetime.utcnow() - self.start_time))
@commands.command()
async def help(self, ctx, here=None):
await ctx.send(HELP_MSG)
@commands.command()
async def update(self, ctx, days):
self.ready = False
await ctx.send("Updating polls...")
await self.restore_messages_and_polls(days)
await ctx.send("Ready!")
self.ready = True
@commands.command(help="Creates a poll.\n"
"Usage:\n"
" <prefix> \"<poll_title>\" <option_1> <option_2> ... \n"
"Example:\n"
" <prefix> \"Rayquaza Jump & Twist\" 9:00 9:15 9:30 9:40")
async def poll(self, ctx, poll_title, *vote_options):
if str(ctx.message.author) in BANNED_USERS:
LOGGER.warning("Denied creation of poll, User %s is banned" % ctx.message.author)
return
with ctx.typing():
await self.create_multi_poll(ctx=ctx, trigger_message=ctx.message, poll_title=poll_title, vote_options=vote_options)
@commands.command(hidden=True)
@commands.is_owner()
async def readpoll(self, ctx, trigger_id, msg_id, *vote_options):
msg = await self.get_message_if_exists(channel_id=ctx.channel.id,message_id=msg_id)
trigger = await self.get_message_if_exists(channel_id=ctx.channel.id, message_id=trigger_id)
title = msg.content.replace("Poll for", "")
poll = self.poll_factory.create_multi_poll(poll_title=title, vote_options=vote_options)
poll.update_embed()
self.db_handler.add_poll(poll, trigger, msg)
self.db_handler.update_poll(poll)
await msg.edit(content=msg.content, embed=poll.embed)
await self.update_poll_after_restart(msg_id,msg.reactions)
async def create_multi_poll(self, ctx, trigger_message, poll_title, vote_options):
"""
Function that creates a new MultiPoll and posts it.
:param trigger_message: Message which triggered the creation of the poll
:param poll_title: Title of the poll
:param vote_options: List of string which contains vote options.
:return:
"""
LOGGER.info("Creating Multipoll: "
"trigger_message: %s,"
"poll_title: %s,"
"vote_options: %s" % (trigger_message.content, poll_title, vote_options))
# Create a new poll and post it.
poll = self.poll_factory.create_multi_poll(poll_title=poll_title, vote_options=vote_options)
poll_message = await ctx.send(content="Poll for **%s**" % (poll_title),embed=poll.embed)
self.db_handler.add_poll(poll, trigger_message, poll_message)
# add vote emoji as reaction
sorted_emoji = [(k, EmojiStorage.LETTEREMOJI_TO_NUMBER[k]) for k in sorted(EmojiStorage.LETTEREMOJI_TO_NUMBER, key=EmojiStorage.LETTEREMOJI_TO_NUMBER.get)]
for emoji, n in sorted_emoji:
if n <= len(vote_options) - 1:
await poll_message.add_reaction(emoji)
# add people emoji as reaction
sorted_people_emoji = [(k, EmojiStorage.PEOPLE_EMOJI_TO_NUMBER[k]) for k in
sorted(EmojiStorage.PEOPLE_EMOJI_TO_NUMBER, key=EmojiStorage.PEOPLE_EMOJI_TO_NUMBER.get)]
for emoji, n in sorted_people_emoji:
await poll_message.add_reaction(self.get_emoji(emoji))
async def create_single_poll(self,trigger_message, poll_title):
"""
Function that creates a new Singlepoll and posts it.
:param trigger_message: Message which triggered the creation of the poll
:param poll_title: Title of the poll
:return:
"""
LOGGER.info("Creating SinglePoll.\n"
"trigger_message: %s" % trigger_message.content)
# Create a new poll and post it.
poll = self.poll_factory.create_single_poll(poll_title=poll_title)
poll.create_summary_message()
poll_message = await trigger_message.channel.send(content=poll.summary_message)
self.db_handler.add_poll(poll, trigger_message, poll_message)
# add people emojie as reaction
sorted_people_emoji = [(k, EmojiStorage.EMOJI_TO_NUMBER[k]) for k in
sorted(EmojiStorage.EMOJI_TO_NUMBER, key=EmojiStorage.EMOJI_TO_NUMBER.get)]
for emoji, n in sorted_people_emoji:
if n < 4:
await poll_message.add_reaction(emoji)
async def on_raw_reaction_add(self, ctx):
if not self.ready:
return
data = {'count' : 1, 'me': ctx.user_id == self.user.id, 'emoji' : {'id': ctx.emoji.id, 'name': ctx.emoji.name}}
channel = self.get_channel(ctx.channel_id)
message = await channel.get_message(ctx.message_id)
reaction = discord.Reaction(message=message, data=data)
user = self.get_user(ctx.user_id)
if user != self.user:
# reaction has to be part of the vote emojis/ people emojis
if str(reaction.emoji) in EmojiStorage.LETTEREMOJI_TO_NUMBER or EmojiStorage.is_people_emoji(reaction.emoji) or reaction.emoji in EmojiStorage.EMOJI_TO_NUMBER:
# get poll
poll_db = self.db_handler.get_poll_with_message_id(message_id=reaction.message.id)
# add reactions
if poll_db:
poll = self.poll_factory.polls[poll_db.external_id]
poll.reactions.append((reaction, user))
# edit poll
if isinstance(poll, MultiPoll):
poll.update_embed()
await reaction.message.edit(content=reaction.message.content, embed=poll.embed)
elif isinstance(poll, SinglePoll):
poll.create_summary_message()
await reaction.message.edit(content=poll.summary_message)
# update poll in DB
self.db_handler.update_poll(poll)
async def on_raw_reaction_remove(self, ctx):
if not self.ready:
return
data = {'count' : 1, 'me': ctx.user_id == self.user.id, 'emoji' : {'id': ctx.emoji.id, 'name': ctx.emoji.name}}
channel = self.get_channel(ctx.channel_id)
message = await channel.get_message(ctx.message_id)
reaction = discord.Reaction(message=message, data=data)
user = self.get_user(ctx.user_id)
if user != self.user:
if str(reaction.emoji) in EmojiStorage.LETTEREMOJI_TO_NUMBER or EmojiStorage.is_people_emoji(
reaction.emoji) or reaction.emoji in EmojiStorage.EMOJI_TO_NUMBER:
poll_db = self.db_handler.get_poll_with_message_id(message_id=reaction.message.id)
if poll_db:
# add reactions
poll = self.poll_factory.polls[poll_db.external_id]
poll.reactions.remove((reaction, user))
# edit poll
if isinstance(poll, MultiPoll):
poll.update_embed()
await reaction.message.edit(content=reaction.message.content, embed=poll.embed)
elif isinstance(poll, SinglePoll):
poll.create_summary_message()
await reaction.message.edit(content=poll.summary_message)
# update poll in DB
self.db_handler.update_poll(poll)
async def on_raw_message_delete(self, message : discord.raw_models.RawMessageDeleteEvent):
"""
Function which handles messages that have been deleted.
:param message: deleted message.
:return:
"""
if isinstance(message, discord.raw_models.RawMessageDeleteEvent):
poll = self.db_handler.disable_poll_via_id(message_id=message.message_id)
if poll:
message = await self.get_message_if_exists(poll.channel, poll.poll_message)
if message:
await message.delete()
async def update_poll_after_restart(self, pollmessage_id, reactions):
"""
Function which is used to update polls after a restart of the bot,
The function will read the reactions of a poll message and update the poll accordingly,
this enables voting, even if the bot is offline.
:param pollmessage_id: id of the pollmessage, which shall be updated
:param reactions: reactions of the pollmessage.
:return:
"""
# get poll
poll_model = self.db_handler.get_poll_with_message_id(pollmessage_id)
if poll_model:
poll_message = await self.get_message_if_exists(channel_id=poll_model.channel, message_id=poll_model.poll_message)
poll = self.poll_factory.polls[poll_model.external_id]
# add reactions
poll.reactions = []
for reaction in reactions:
users = reaction.users()
async for user in users:
if self.user != user:
poll.reactions.append((reaction, user))
# edit poll
if isinstance(poll, MultiPoll):
poll.update_embed()
await poll_message.edit(content=poll_message.content, embed=poll.embed)
elif isinstance(poll, SinglePoll):
poll.create_summary_message()
await poll_message.edit(content=poll.summary_message)
self.db_handler.update_poll(poll)
def is_multi_poll_command(self, message_content):
"""
Function which checks whether a message is a command that triggers the creation of a MultiPoll object.
:param message_content: content of a discord message (a string)
:return:
"""
poll_command = '%spoll' % self.command_prefix
return message_content.startswith(poll_command)
def is_single_poll_command(self, message_content):
"""
Function which checks whether a message is a command that triggers the creation of a SinglePoll object.
:param message_content: content of a discord message (a string)
:return:
"""
poll_command = 'raid '
return message_content.lower().startswith(poll_command)
async def on_message(self,message):
"""
Function which handles posted messages by anyone.
Used to check whether a message triggers the creation of a SinglePoll object.
Falls back to the parent method of commands.Bot if not.
:param message: posted message
:return:
"""
if message.content.lower().startswith("raid "):
if message.author != self.user:
await self.create_single_poll(trigger_message=message, poll_title=message.content)
else:
await super().on_message(message)
@asyncio.coroutine
async def process_commands(self, message):
"""|coro|
This function processes the commands that have been registered
to the bot and other groups. Without this coroutine, none of the
commands will be triggered.
By default, this coroutine is called inside the :func:`.on_message`
event. If you choose to override the :func:`.on_message` event, then
you should invoke this coroutine as well.
This is built using other low level tools, and is equivalent to a
call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.
This also checks if the message's author is a bot and doesn't
call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.
Parameters
-----------
message: :class:`discord.Message`
The message to process commands for.
"""
if message.author.bot:
return
message.content = replace_quotes(message.content)
ctx = await self.get_context(message)
await self.invoke(ctx)
async def update_polls(self, days=1):
LOGGER.info("Updating Polls of the last %d day(s)" % days)
# get enabled polls
polls = self.db_handler.get_polls(age=days)
for poll in polls:
try:
LOGGER.info("Updating poll %s" % poll.external_id)
# Check if triggermessage exists.
trigger_message = await self.get_message_if_exists(channel_id=poll.channel,
message_id=poll.trigger_message)
# Check if poll exists.
poll_message = await self.get_message_if_exists(channel_id=poll.channel,
message_id=poll.poll_message)
# Case 1: triggermessage + poll exists. ---> Update polls.
# Case 2: triggermessage exists, poll exists not.
# Case 3: triggermessage does not exist, poll exists.
# Case 4: triggermessage + poll do not exist anymore
if trigger_message and poll_message:
await self.update_poll_after_restart(poll_message.id, poll_message.reactions)
elif trigger_message is None and poll_message:
LOGGER.debug("trigger_message does not exist anymore")
await poll_message.delete()
self.db_handler.disable_poll_via_id(poll_message.id)
elif trigger_message is None and poll_message is None:
LOGGER.debug("trigger_message and poll do not exist anymore")
except Exception as err:
LOGGER.critical("Error. %s" % err)
LOGGER.info("Polls Updated.")
async def restore_messages_and_polls(self, days):
polls = self.db_handler.get_polls()
self.poll_factory.restore_polls(polls=polls)
await self.update_polls(days)
async def get_message_if_exists(self, channel_id, message_id):
try:
channel = self.get_channel(int(channel_id))
if channel:
message = await channel.get_message(message_id)
return message
except discord.NotFound:
return None
async def on_command_error(self, ctx, error):
if isinstance(error, commands.NoPrivateMessage):
await ctx.author.send('This command cannot be used in private messages.')
elif isinstance(error, commands.DisabledCommand):
await ctx.author.send('Sorry. This command is disabled and cannot be used.')
elif isinstance(error, commands.CommandInvokeError):
LOGGER.critical(f'In {ctx.command.qualified_name}:')
traceback.print_tb(error.original.__traceback__)
LOGGER.critical(f'{error.original.__class__.__name__}: {error.original}')
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.author.send('Sorry. This command is not how this command works.\n %s' % HELP_MSG)
else:
LOGGER.critical(error)
| 1.945313 | 2 |
src/data/1238.py | NULLCT/LOMC | 0 | 12789229 | from collections import deque
def main():
N, Q = map(int, input().split())
path_dat = [list(map(int, input().split())) for _ in range(N - 1)]
queries = [list(map(int, input().split())) for _ in range(Q)]
paths = [[] for _ in range(N)]
for a, b in path_dat:
a -= 1
b -= 1
paths[a].append(b)
paths[b].append(a)
dist = [-1] * N
dist[0] = 0
queue = deque([0])
while queue:
now = queue.popleft()
for nxt in paths[now]:
if dist[nxt] != -1:
continue
dist[nxt] = dist[now] + 1
queue.append(nxt)
for c, d in queries:
c -= 1
d -= 1
tmp = dist[c] + dist[d]
if tmp % 2 == 0:
print('Town')
else:
print('Road')
main()
| 3.234375 | 3 |
lib/models/gpt.py | learning-at-home/clip_hivemind | 3 | 12789230 | <filename>lib/models/gpt.py
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch GPT modules that do not hog your GPU memory """
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.activations import ACT2FN
from transformers.modeling_outputs import CausalLMOutputWithCrossAttentions
from transformers.modeling_utils import PreTrainedModel
from transformers.utils import logging
from lib.models.transformer import GradientCheckpointingMixin, LeanTransformer, LeanTransformerConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "LeanGPTConfig"
_TOKENIZER_FOR_DOC = "GPT2Tokenizer"
class LeanGPTConfig(LeanTransformerConfig):
def __init__(
self,
*args,
vocab_size: int = 50257,
embedding_size: int = 1024,
type_vocab_size: int = 2,
pad_token_id: int = 0,
bos_token_id: int = 2,
eos_token_id: int = 3,
**kwargs
):
super().__init__(
*args,
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
type_vocab_size=type_vocab_size,
tie_word_embeddings=True,
**kwargs
)
self.vocab_size = vocab_size
self.embedding_size = embedding_size
self.type_vocab_size = type_vocab_size
class LeanGPTEmbeddings(nn.Module):
"""
Construct the embeddings from word, position and token_type embeddings. These embeddigns double as logits.
"""
def __init__(self, config: LeanTransformerConfig):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
self.token_type_embeddings = config.get_token_type_embeddings()
self.position_embeddings = config.get_input_position_embeddings()
self.layer_norm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if config.embedding_size != config.hidden_size:
self.embedding_hidden_mapping = nn.Linear(config.embedding_size, config.hidden_size)
if self.position_embeddings is not None:
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
# Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embeddings is not None:
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.layer_norm(embeddings)
embeddings = self.dropout(embeddings)
if hasattr(self, "embedding_hidden_mapping"):
embeddings = self.embedding_hidden_mapping(embeddings)
return embeddings
class TiedMLMHead(nn.Module):
def __init__(self, config, embeddings: LeanGPTEmbeddings):
super().__init__()
self.embeddings = embeddings
if config.embedding_size != config.hidden_size:
self.hidden_bias = nn.Parameter(torch.zeros(config.embedding_size))
self.layer_norm = nn.LayerNorm(config.embedding_size)
self.activation = ACT2FN[config.hidden_act]
self.logits_bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
if hasattr(self, "hidden_bias"):
weight = self.embeddings.embedding_hidden_mapping.weight.t()
hidden_states = F.linear(input=hidden_states, weight=weight, bias=self.hidden_bias)
hidden_states = self.activation(hidden_states)
hidden_states = self.layer_norm(hidden_states)
logits = F.linear(input=hidden_states, weight=self.embeddings.word_embeddings.weight, bias=self.logits_bias)
return logits
class LeanGPTForPreTraining(GradientCheckpointingMixin, PreTrainedModel):
config_class = LeanGPTConfig
base_model_prefix = "lean_gpt"
def __init__(self, config: config_class):
PreTrainedModel.__init__(self, config)
self.config = config
self.embeddings = LeanGPTEmbeddings(config)
self.transformer = LeanTransformer(config)
self.lm_head = TiedMLMHead(config, self.embeddings)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, new_embeddings: nn.Embedding):
assert isinstance(new_embeddings, nn.Embedding)
self.embeddings.word_embeddings = new_embeddings
prev_bias = self.lm_head.logits_bias
intersection_size = min(len(prev_bias), new_embeddings.num_embeddings)
self.lm_head.logits_bias = nn.Parameter(torch.zeros(new_embeddings.num_embeddings, dtype=prev_bias.dtype,
device=prev_bias.device, layout=prev_bias.layout))
with torch.no_grad():
self.lm_head.logits_bias[:intersection_size] = prev_bias[:intersection_size]
def _init_weights(self, module: nn.Module):
return self.config.init_weights(module)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
assert head_mask is None and output_attentions is None and output_hidden_states is None, "not implemented"
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
causal_attention_mask = torch.ones(seq_length, seq_length, dtype=self.dtype, device=device)
causal_attention_mask = torch.tril(causal_attention_mask).view(1, 1, seq_length, seq_length)
causal_attention_mask = (1.0 - causal_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
)
transformer_outputs = self.transformer(embedding_output, (extended_attention_mask, causal_attention_mask))
lm_logits = self.lm_head(transformer_outputs.last_hidden_state)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
ignored_labels = torch.full_like(labels[..., :1], fill_value=-100)
shift_labels = torch.cat([labels[..., 1:], ignored_labels], dim=1)
loss = F.cross_entropy(lm_logits.view(-1, lm_logits.shape[-1]), shift_labels.view(-1),
reduction='mean', ignore_index=-100)
# note: masked labels have index -100 so they will be ignored when computing cross-entropy
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=lm_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| 1.789063 | 2 |
tools/build/v2/test/testing_primitives.py | mike-code/boost_1_38_0 | 130 | 12789231 | #!/usr/bin/python
# Copyright 2002 <NAME>
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import re
def match_re(actual, expected):
return re.match(expected, actual, re.DOTALL) != None
t = BoostBuild.Tester(match=match_re)
t.set_tree('testing-primitives')
# We expect t5 and t7's output to be dumped to stdout.
t.run_build_system(stdout=r'''.*failing t5.*failing t7''')
t.expect_addition('t2.txt')
t.expect_addition('t3.txt')
t.expect_addition('t5.out')
t.expect_addition('t6.out')
t.expect_addition('t6.txt')
t.expect_addition('t7.out')
t.expect_addition('t7.txt')
t.expect_addition('t8.out')
t.expect_nothing_more()
t.cleanup()
| 2.171875 | 2 |
tests/conftest.py | DenMaslov/dz4 | 0 | 12789232 | import pytest
from dz4.calculator.calculator import Calculator
@pytest.fixture()
def calculator() -> Calculator:
return Calculator()
| 1.664063 | 2 |
message_creator/actions_pb2.py | jameshp/deviceadminserver | 0 | 12789233 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: actions.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='actions.proto',
package='Proto.JSON',
syntax='proto3',
serialized_pb=b'\n\ractions.proto\x12\nProto.JSON\"\xaf\x01\n\raction_object\x12\x0e\n\x06\x61\x63tion\x18\x01 \x01(\t\x12\x45\n\nparameters\x18\x02 \x03(\x0b\x32\x31.Proto.JSON.action_object.action_parameter_object\x1aG\n\x17\x61\x63tion_parameter_object\x12\x14\n\x0cparameterKey\x18\x01 \x01(\t\x12\x16\n\x0eparameterValue\x18\x02 \x01(\tB/\n\x1enet.ktc.miles.model.proto.JSONB\x0b\x41\x63tionsJSONH\x01\x62\x06proto3'
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ACTION_OBJECT_ACTION_PARAMETER_OBJECT = _descriptor.Descriptor(
name='action_parameter_object',
full_name='Proto.JSON.action_object.action_parameter_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parameterKey', full_name='Proto.JSON.action_object.action_parameter_object.parameterKey', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameterValue', full_name='Proto.JSON.action_object.action_parameter_object.parameterValue', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=134,
serialized_end=205,
)
_ACTION_OBJECT = _descriptor.Descriptor(
name='action_object',
full_name='Proto.JSON.action_object',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='action', full_name='Proto.JSON.action_object.action', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='Proto.JSON.action_object.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_ACTION_OBJECT_ACTION_PARAMETER_OBJECT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=205,
)
_ACTION_OBJECT_ACTION_PARAMETER_OBJECT.containing_type = _ACTION_OBJECT
_ACTION_OBJECT.fields_by_name['parameters'].message_type = _ACTION_OBJECT_ACTION_PARAMETER_OBJECT
DESCRIPTOR.message_types_by_name['action_object'] = _ACTION_OBJECT
action_object = _reflection.GeneratedProtocolMessageType('action_object', (_message.Message,), dict(
action_parameter_object = _reflection.GeneratedProtocolMessageType('action_parameter_object', (_message.Message,), dict(
DESCRIPTOR = _ACTION_OBJECT_ACTION_PARAMETER_OBJECT,
__module__ = 'actions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.action_object.action_parameter_object)
))
,
DESCRIPTOR = _ACTION_OBJECT,
__module__ = 'actions_pb2'
# @@protoc_insertion_point(class_scope:Proto.JSON.action_object)
))
_sym_db.RegisterMessage(action_object)
_sym_db.RegisterMessage(action_object.action_parameter_object)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), b'\n\036net.ktc.miles.model.proto.JSONB\013ActionsJSONH\001')
# @@protoc_insertion_point(module_scope)
| 1.171875 | 1 |
Diena_8_dictionaries/u2_g2.py | edzya/Python_RTU_08_20 | 8 | 12789234 | # def replace_dict_value(d, bad_val, good_val):
# myDict = d
# for x in myDict: # same as going through x in myDict.keys()
# if myDict[x] == bad_val:
# myDict[x] = good_val
# return myDict
# if i do not want to mutate the original dictionary
# then dictionary comprehension will be easiest and fastest
# 2. Vārdnīcu labotājs
def replace_dict_value(d, bad_val, good_val):
return {k: good_val if v == bad_val else v for k, v in d.items()}
print(replace_dict_value({'a':5,'b':6,'c':5}, 5, 10)) | 3.84375 | 4 |
python version/katona/katona.py | psychology-experiments/Five-square-problem | 0 | 12789235 | from typing import Optional
from psychopy import core, visual
from katona.logic import eventsmeasures
from katona.logic.datasaver import DataSaver
from katona.logic.movement import StickMover
from katona.logic import eventhandler
from katona.logic import essential
from katona.visual import optional
from katona.visual.grid import VisualGrid
class Katona:
def __init__(self,
window: visual.Window,
stick_length: float,
stick_width: float,
field_size: int,
grid_color: str,
data_fp: str,
experiment_info: dict,
special_event_finder: str,
event_threshold: int = 30,
solution=None,
time_limit: int = 15,
background_color="white",
screen_cover_color="white",
time_to_cover_grid=None,
feedback=None,
):
self._window = window
self._clock = core.Clock()
self._feedback_type = feedback
self._stop_questioning_stick_color = False
self._time_limit = time_limit * 60
self._moves_made = 0
self._correct_answers = 0
self.solved: Optional[bool] = None
self.grid_color = grid_color
self.grid = VisualGrid(window=window,
stick_length=stick_length,
stick_width=stick_width,
field_size=field_size,
grid_color=self.grid_color,
)
self.stick_mover = StickMover(window=window)
if special_event_finder == "ObjectiveImpasseFinder":
self._special_event_finder = eventsmeasures.ObjectiveImpasseFinder(
minimum_data_to_identify=event_threshold)
self.data_saver = DataSaver(save_folder=data_fp,
experiment_info=experiment_info,
special_event_finder=self._special_event_finder,
)
button_x_pos = (self.grid.outer_border + window.size[0] / 2) / 2
self._reset_button = optional.Button(win=window,
event_name="default place",
label_size=40,
label_text="Обновить",
pos=(button_x_pos, 0),
button_enabled=True,
button_color=background_color,
)
self._impasse_button = optional.Button(win=window,
event_name="impasse",
label_size=40,
label_text="Я застрял",
pos=(-button_x_pos, 0),
button_enabled=True,
button_color=background_color,
)
self._scree_cover = optional.ScreenCover(window=window,
start_position=(0, self.grid.outer_border),
size=self.grid.outer_border,
cover_color=screen_cover_color,
grow_rate=0.5,
time_to_cover=time_to_cover_grid)
solution_absolute_idx = {}
for solution_name in solution:
solution_absolute_idx[solution_name] = {}
for solution_info_name, solution_info in solution[solution_name].items():
solution_absolute_idx[solution_name][solution_info_name] = \
tuple([self.grid.extract_grid_element_by_grid_idx(grid_idx).grid_idx
for grid_idx in solution_info])
self._answer_checker = essential.AnswerChecker(solution_absolute_idx)
if feedback == "sound":
sound_path = f"sounds/{experiment_info['feedback.type']}.wav"
if experiment_info['feedback.type'] == "positive":
self._event_feedback = eventhandler.PositiveSoundFeedback(sound_path)
else:
self._event_feedback = eventhandler.NegativeSoundFeedback(sound_path)
elif feedback == "image":
image_path = f"images/{experiment_info['feedback.type']}.jpg"
if experiment_info['feedback.type'] == "positive":
self._event_feedback = eventhandler.PositiveImageFeedback(window, image_path)
else:
self._event_feedback = eventhandler.NegativeImageFeedback(window, image_path)
elif feedback == "phrases":
import csv
phrases_fp = "text/phrases.csv"
column = experiment_info['feedback.type']
phrases = []
with open(file=phrases_fp, mode="r", encoding="utf-8") as csv_file:
phrases_file = csv.DictReader(f=csv_file)
for row in phrases_file:
phrases.append(row[column])
max_width = window.size[0] / 2 - self.grid.outer_border
self._event_feedback = eventhandler.TextTimeHandler(window,
phrases_list=phrases,
phrase_time_showed=10,
time_between_phrases=60,
position=(-button_x_pos, 0),
width=max_width * 0.8)
self._impasse_button = optional.FakeButton()
elif feedback is None:
self._event_feedback = eventhandler.ZeroHandler()
self._color_group = experiment_info['feedback.type']
self._chosen_colors = {}
def create_movable_sticks(self,
grid_indexes,
movable_stick_color,
color_positions: str = "all"):
# сохранить информацию о цветах палочек, если условие с окрашиванием палочек
if isinstance(self._event_feedback, eventhandler.ZeroHandler):
self._chosen_colors[movable_stick_color[1]] = f"neutral.{movable_stick_color[1]}"
if self._color_group == "important likable":
self._chosen_colors[movable_stick_color[0]] = f"like.{movable_stick_color[0]}"
self._chosen_colors[movable_stick_color[2]] = f"dislike.{movable_stick_color[2]}"
elif self._color_group == "unimportant likable":
self._chosen_colors[movable_stick_color[2]] = f"like.{movable_stick_color[2]}"
self._chosen_colors[movable_stick_color[0]] = f"dislike.{movable_stick_color[0]}"
elif self._color_group == "control":
self._chosen_colors[movable_stick_color[0]] = f"like.{movable_stick_color[0]}"
self._chosen_colors[movable_stick_color[2]] = f"dislike.{movable_stick_color[2]}"
movable_stick_color = movable_stick_color[1]
self.grid.create_movable_sticks(grid_indexes=grid_indexes,
movable_stick_color=movable_stick_color,
color_positions=color_positions)
def run(self):
if self._clock.getTime() >= self._time_limit:
self.solved = False
self.data_saver.save_failure_to_solve(9999)
return
if self._event_feedback.is_in_progress():
return
if self._moves_made != 3:
self.stick_mover.check_and_execute_moves(movable_sticks=self.grid.movable_elements,
grid_elements=self.grid.grid_elements)
if not self._stop_questioning_stick_color and \
self.stick_mover.chosen_stick is not None:
stick_color_hex = getattr(self.stick_mover.chosen_stick.visual_element, "fillColor")
stick_color_like = self._chosen_colors[stick_color_hex]
self.data_saver.get_stick_color(stick_color_like)
self._stop_questioning_stick_color = True
mouse_last_click = self.stick_mover.last_click
solving = self._answer_checker.is_approach_solution(mouse_last_click)
if self._feedback_type == "phrases":
self._event_feedback.on_event(solving)
if not self.stick_mover.move_made:
self.data_saver.get_click(mouse_last_click)
else:
self._correct_answers += solving
self._event_feedback.on_event(solving)
self.data_saver.get_event_feedback(self._event_feedback.is_new_event())
self.data_saver.get_click(mouse_last_click)
self._moves_made += 1
if self._correct_answers == 3:
self.solved = True
self._stop_questioning_stick_color = False
if self.stick_mover.chosen_stick is None and self._reset_button.button_pressed():
self.data_saver.get_click(self._reset_button.last_click)
self.stick_mover.release_stick()
self.return_to_default()
self._answer_checker.reset()
self._scree_cover.resize()
if self._impasse_button.button_pressed():
self.data_saver.get_click(self._impasse_button.last_click)
def get_moves_made(self):
return self._moves_made
def return_to_default(self):
self._moves_made = 0
self._correct_answers = 0
self.grid.return_to_default_positions()
def start_time(self):
self.stick_mover.reset_time()
self._reset_button.reset_time()
self._impasse_button.reset_time()
self._clock.reset()
self._event_feedback.reset_time()
def draw(self):
self._reset_button.draw()
self._impasse_button.draw()
self.grid.draw()
if self._moves_made == 3 and not self.solved and not self._event_feedback.is_in_progress():
self._scree_cover.draw()
class TrainingOnGrid:
def __init__(self,
window: visual.Window,
stick_length: float,
stick_width: float,
field_size: int,
grid_color: str,
movable_stick_color: str,
):
self.training_finished = False
self.grid_color = grid_color
self.movable_stick_color = movable_stick_color
self._window = window
target_grid_positions = ((7,), (1, 15), (),)
self._target_grid_positions = iter(target_grid_positions)
self._target_grid_marks = []
self._current_target_grid_positions = None
self.grid = VisualGrid(window=self._window,
stick_length=stick_length,
stick_width=stick_width,
field_size=field_size,
grid_color=self.grid_color,
)
self._training_sticks_position = iter((((0, -1),), ((0, 0),), ((0, 1), (0, 2))))
self._training_sticks_marks = []
conditions = (self.first_condition, self.second_condition, lambda _, __: None)
self._conditions = iter(conditions)
self._current_condition = None
training_messages = ("Нажмите на палочку c точкой и поставьте её в ячейку с крестиком",
"Палочки можно вращать, для этого возьмите её и покрутите колесо мыши.\n"
"Теперь расположите палочки с точками в ячейках с крестиками.",
"Кроме уже сделаного на экране могут быть кнопки, на них нужно просто нажимать.\n"
"Нажмите на кнопку на экране.")
self._training_messages = iter(training_messages)
self.stick_mover = StickMover(window=self._window)
text_y_pos = (self.grid.outer_border + window.size[1] / 2) / 2
self._training_instruction = visual.TextStim(win=self._window,
text="",
pos=(0, text_y_pos),
color="black",
height=39,
wrapWidth=self._window.size[0],
)
button_y_pos = -text_y_pos
self._training_button = optional.Button(win=self._window,
event_name="training",
label_size=40,
label_text="Нажми меня",
pos=(0, button_y_pos),
button_enabled=False)
self._last_animation = ((row, col) for col in range(-2, 3)
for row in range(-5, 6, 2))
self._last_animation_created = False
self._clock = core.Clock()
self._last_animation_update = 0
self._next_training_stage()
@staticmethod
def first_condition(movable, grid):
movable_x, movable_y = movable[0].visual_element.pos
grid_x, grid_y = grid[7].visual_element.pos
return movable_x == grid_x and movable_y == grid_y
@staticmethod
def second_condition(movable, grid):
wanted_positions = (tuple(grid[1].visual_element.pos), tuple(grid[15].visual_element.pos))
first_stick_in_place = tuple(movable[0].visual_element.pos) in wanted_positions
second_stick_in_place = tuple(movable[1].visual_element.pos) in wanted_positions
return first_stick_in_place and second_stick_in_place
def _next_training_stage(self):
self._target_grid_marks = []
add_sticks = next(self._training_sticks_position)
self.grid.create_movable_sticks(grid_indexes=add_sticks,
movable_stick_color=self.movable_stick_color)
for grid_element in self.grid.movable_elements:
stick_pos = grid_element.visual_element.pos
circle = visual.Circle(win=self._window,
fillColor="yellow",
pos=stick_pos,
size=7)
self._training_sticks_marks.append(circle)
if self._current_target_grid_positions is not None:
self.grid.set_grid_color(self.grid_color)
# TODO: remove old way
# for grid_element_idx in self._current_target_grid_positions:
# self.grid.grid_elements[grid_element_idx].visual_element.color = self.grid_color
self._current_target_grid_positions = next(self._target_grid_positions)
for grid_element_idx in self._current_target_grid_positions:
# self.grid.grid_elements[grid_element_idx].visual_element.color = "red"
target_pos = self.grid.grid_elements[grid_element_idx].visual_element.pos
cross = visual.ShapeStim(win=self._window,
vertices="cross",
fillColor="yellow",
pos=target_pos,
size=12,
ori=45)
self._target_grid_marks.append(cross)
self._training_instruction.text = next(self._training_messages)
self._current_condition = next(self._conditions)
if len(self.grid.movable_elements) == 4:
self._training_button.buttonEnabled = True
self.grid.return_to_default_positions()
def _is_training_stage_completed(self):
if self._training_button.button_pressed():
self._training_button.buttonEnabled = False
self.training_finished = True
self.grid.movable_elements = []
self._training_sticks_marks = []
self._training_instruction.text = "Тренировка закончена. Чтобы продолжить нажмите пробел"
return self._current_condition(self.grid.movable_elements, self.grid.grid_elements)
def run(self):
self.stick_mover.check_and_execute_moves(movable_sticks=self.grid.movable_elements,
grid_elements=self.grid.grid_elements)
if self._is_training_stage_completed():
self._next_training_stage()
def draw(self):
if not self._last_animation_created and self.training_finished:
try:
if self._clock.getTime() - self._last_animation_update >= 0.1:
self._last_animation_update = self._clock.getTime()
idx_of_grid_for_stick = (next(self._last_animation),)
self.grid.create_movable_sticks(grid_indexes=idx_of_grid_for_stick,
movable_stick_color="brown")
except StopIteration:
self._last_animation_created = True
if self._training_button.buttonEnabled:
self._training_button.draw()
# TODO: remove old way
# for grid_element in self.grid.grid_elements:
# grid_element.visual_element.draw()
self.grid.draw()
for target_mark in self._target_grid_marks:
target_mark.draw()
positions = []
for movable_element in self.grid.movable_elements:
movable_element.visual_element.draw()
pos = movable_element.visual_element.pos
positions.append(pos)
for pos, stick_mark in zip(positions, self._training_sticks_marks):
stick_mark.pos = pos
stick_mark.draw()
self._training_instruction.draw()
| 2.21875 | 2 |
recipes/site_listers/seriouseats.py | cfeenstra67/recipes | 0 | 12789236 | from recipes.site_listers.base import SitemapLister
class SeriousEatsLister(SitemapLister):
""" """
start_url = "https://www.seriouseats.com/sitemap_1.xml"
| 1.359375 | 1 |
Udacity_Course_Homeworks/HW1-4/Like_Lenet-5_1_cnn.py | ZLake/Deep_Learning_on_Tensorflow | 0 | 12789237 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 22:09:19 2017
@author: LinZhang
"""
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
import numpy as np
def reformat(dataset, labels):
dataset = dataset.reshape(
(-1, image_size, image_size, num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
return dataset, labels
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
# def initial variables:
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
# def basic operation in cnn:
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
image_size = 28
num_labels = 10
num_channels = 1 # grayscale
batch_size = 16
#patch_size = 5 # not really used, finetune your network for fun!
#depth = 16 # not really used, finetune your network for fun!
num_hidden = 1024
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# a small network with two convolutional layers, followed by one fully connected layer
graph = tf.Graph()
with graph.as_default():
# Input data.
tf_train_dataset = tf.placeholder(
tf.float32, shape=(batch_size, image_size, image_size, num_channels))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
keep_prob = tf.placeholder(tf.float32)
# Variables.
layer1_weights = weight_variable([3, 3, num_channels, 8]) # conv kernel
layer1_biases = bias_variable([8])
layer2_weights = weight_variable([3, 3, 8, 16]) # conv kernel
layer2_biases = bias_variable([16])
layer3_weights = weight_variable([image_size // 4 * image_size // 4 * 16, num_hidden])
layer3_biases = bias_variable([num_hidden])
layer4_weights = weight_variable([num_hidden, num_labels])
layer4_biases = bias_variable([num_labels])
# Model.
def model(data,use_dropout = False):
# convolution layer 1
conv1 = conv2d(data, layer1_weights)
hidden1 = tf.nn.relu(conv1 + layer1_biases)
hidden1_pool = max_pool_2x2(hidden1)
# convolution layer 2
conv2 = conv2d(hidden1_pool, layer2_weights)
hidden2 = tf.nn.relu(conv2 + layer2_biases)
hidden2_pool = max_pool_2x2(hidden2)
# full connection layer
shape = hidden2_pool.get_shape().as_list()
reshape = tf.reshape(hidden2_pool, [shape[0], shape[1] * shape[2] * shape[3]])
hidden3 = tf.nn.relu(tf.matmul(reshape, layer3_weights) + layer3_biases)
# dropout
if (use_dropout):
return tf.matmul(tf.nn.dropout(hidden3,keep_prob), layer4_weights) + layer4_biases
else:
return tf.matmul(hidden3, layer4_weights) + layer4_biases
# Training computation.
logits = model(tf_train_dataset,use_dropout = True) # only training uses dropout
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits=logits))
# learning rate decay
global_step = tf.Variable(0, trainable=False)
starter_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(0.05,
global_step, 100, 0.95, staircase=True)
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(model(tf_valid_dataset))
test_prediction = tf.nn.softmax(model(tf_test_dataset))
num_steps = 1001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_dataset[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels, keep_prob:0.5}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 100 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
| 2.546875 | 3 |
setup.py | Dyend/django-mercadopago | 30 | 12789238 | <gh_stars>10-100
#!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
setup(
name="django-mercadopago",
description="MercadoPago integration for django",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/WhyNotHugo/django-mercadopago",
license="ISC",
packages=find_packages(),
long_description=open("README.rst").read(),
install_requires=[
"wheel>=0.26.0",
"Django>=2.2.0",
"mercadopago<1.0.0",
"setuptools-git>=1.1",
"setuptools-scm>=1.8.0",
],
use_scm_version={"version_scheme": "post-release"},
setup_requires=["setuptools_scm"],
extras_require={"fixtures": ["factory-boy"]},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.0",
"Framework :: Django :: 3.1",
"Framework :: Django",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| 1.320313 | 1 |
class12pythoncbse-master/Practical3/studentrecordqueue.py | SubrataSarkar32/college3rdsem3035 | 0 | 12789239 | class Student:
def __init__(self,name,classs,section,rollno):
self.name=name
self.classs=classs
self.section=section
self.rollno=rollno
def __str__(self):
string='Student Name:'+str(self.name)+'\nS\
tudent Class:'+str(self.classs)+'\nStudent Section:'+\
str(self.section)+'\nStudent Roll No.:'+str(self.rollno)
return string
class queue:
'''Implementing queue with list'''
def __init__(self,limit):
self.L=[]
self.limit=limit
self.fp=-1
self.rp=0
for i in range(self.limit):
self.L+=[None]
def insert(self,element):
if self.fp+1<self.limit:
self.fp+=1
self.L[self.fp]=element
else:
print 'OVERFLOW!!'
def delete(self):
if self.rp<=self.fp:
print 'Element removed ',self.L[self.rp]
self.rp+=1
if self.rp==self.limit:
self.fp=-1
self.rp=0
else:
print 'UNDERFLOW!!'
def display(self):
for j in range(self.rp,self.fp+1):
print self.L[j]
print '-'*30
if self.fp<self.rp:
print 'Queue is empty',
print
print '#'*30
#--------------------------main----------------------------------
while True:
print 'Creating new record dataset'
limit=input('''Enter number of student's record you want to store:''')
st1=queue(limit)
while True:
print '1.Enqueue student'
print '2.Dequeue student'
print '3.Display record'
print '4.Create new record dataset'
print '5.Quit'
res=raw_input('Enter your choice: ')
if res=='1':
rollno=input("Enter roll no: ")
name=raw_input("Enter name: ")
classs=raw_input("Enter class: ")
section=raw_input("Enter section: ")
stu=Student(name,classs,section,rollno)
from copy import deepcopy
st1.insert(deepcopy(stu))
elif res=='2':
st1.delete()
elif res=='3':
st1.display()
elif res=='4':
break
elif res=='5':
import sys
sys.exit()
else:
print 'Invalid command'
| 3.953125 | 4 |
scripts/parsers/clover_parser.py | patrickjchap/Static-Bug-Detectors-ASE-Artifact | 1 | 12789240 | import os
import subprocess
import sys
from bs4 import BeautifulSoup
class CloverCoveredLine:
def __init__(self, image_tag, filepath, filename, line_number):
self.image_tag = image_tag
self.filepath = filepath
self.filename = filename
self.line_number = line_number
def __members(self):
return (self.image_tag, self.filepath, self.filename, self.line_number)
def __eq__(self, other):
if type(other) is type(self):
return self.__members() == other.__members()
else:
return False
def to_CSV(self):
return '{},{},{},{}'.format(self.image_tag, self.filepath, self.filename, self.line_number)
def main(argv=None):
argv = argv or sys.argv
reports_dir, is_bugswarm, image_tag = _validate_input(argv)
image_tags = get_image_tag_list(reports_dir) if image_tag is None else [image_tag]
covered_lines = []
for image_tag in image_tags:
img_tag_covered_lines = {}
# Parse the POM.
clover_reports = get_clover_reports(reports_dir, image_tag, is_bugswarm)
if clover_reports is None:
continue
for report in clover_reports:
if report == '' or report is None:
continue
soup = BeautifulSoup(open(report), 'lxml-xml')
# Get all packages for the source and test code
project_packages = soup.project.find_all('package')
testproject_packages = soup.testproject.find_all('package')
# Iterate throguh all project packages collecting lines with greater than 0 counts
for package in project_packages:
for file in package.find_all('file'):
for line in file.find_all('line'):
line_count = line.get('count')
line_count = int(line.get('count')) if line_count is not None else 0
# if line_count is None:
# continue
# else:
# line_count = int(line_count)
if line_count > 0:
clover_line = CloverCoveredLine(image_tag, file.get('path'), file.get('name'), line.get('num'))
# if clover_line.to_CSV() not in img_tag_covered_lines:
img_tag_covered_lines[clover_line.to_CSV()] = 1
for test_package in testproject_packages:
for file in test_package.find_all('file'):
for line in file.find_all('line'):
line_count = line.get('count')
if line_count is None:
continue
else:
line_count = int(line_count)
if line_count > 0:
clover_line = CloverCoveredLine(image_tag, file.get('path'), file.get('name'), line.get('num'))
# if clover_line.to_CSV() not in img_tag_covered_lines:
img_tag_covered_lines[clover_line.to_CSV()] = 1
covered_lines.extend(list(img_tag_covered_lines.keys()))
with open('clover-covered-lines.csv', 'w+') as file:
for covered_line in covered_lines:
file.write('{}\n'.format(covered_line))
def _run_command(command):
process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
stdout = stdout.decode('utf-8').strip()
stderr = stderr.decode('utf-8').strip()
ok = process.returncode == 0
return process, stdout, stderr, ok
def _print_error(msg, stdout=None, stderr=None):
print('Error: ' + msg)
if stdout is not None:
print('stdout:\n{}'.format(stdout))
if stderr is not None:
print('stderr:\n{}'.format(stderr))
def get_clover_reports(reports_dir, image_tag, is_bugswarm):
bs_cmd = 'find {}/{}/failed/targetsite -name "clover.xml"'.format(reports_dir, image_tag)
d4j_cmd = 'find {}/{}/b -name "coverage.xml"'.format(reports_dir, image_tag)
cmd = bs_cmd if is_bugswarm else d4j_cmd
print(cmd)
_, stdout, stderr, ok = _run_command(cmd)
if not ok:
_print_error('Error getting clover-reports', stdout, stderr)
return None
return stdout.split('\n')
def get_image_tag_list(directory):
cmd = 'ls {}'.format(directory)
_, stdout, stderr, ok = _run_command(cmd)
if not ok:
_print_error('Error getting list of image_tags', stdout ,stderr)
image_tags = [x.strip() for x in stdout.split('\n')]
if 'from_host' in image_tags:
image_tags.remove('from_host')
return image_tags
def _print_usage():
print('Usage: python3 clover_parser.py <reports_dir> [image_tag]')
print('reports_dir: Path to the directory of reports')
def _validate_input(argv):
if len(argv) != 3 and len(argv) != 4:
_print_usage()
sys.exit(1)
reports_dir = argv[1]
is_bugswarm = True if arg[2] == 'true' else False
image_tag = argv[3] if len(argv) == 4 else None
if not os.path.isdir(reports_dir) and os.path.exists(reports_dir):
print('The reports_dir argument is not a file or does not exist. Exiting.')
_print_usage()
sys.exit(1)
return reports_dir, is_bugswarm, image_tag
if __name__ == '__main__':
sys.exit(main())
| 2.671875 | 3 |
scikeras/utils/random_state.py | metasyn/scikeras | 1 | 12789241 | import os
import random
from contextlib import contextmanager
from typing import Generator
import numpy as np
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import config, ops
DIGITS = frozenset(str(i) for i in range(10))
@contextmanager
def tensorflow_random_state(seed: int) -> Generator[None, None, None]:
# Save values
origin_gpu_det = os.environ.get("TF_DETERMINISTIC_OPS", None)
orig_random_state = random.getstate()
orig_np_random_state = np.random.get_state()
if context.executing_eagerly():
tf_random_seed = context.global_seed()
else:
tf_random_seed = ops.get_default_graph().seed
determism_enabled = config.is_op_determinism_enabled()
config.enable_op_determinism()
# Set values
os.environ["TF_DETERMINISTIC_OPS"] = "1"
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
yield
# Reset values
if origin_gpu_det is not None:
os.environ["TF_DETERMINISTIC_OPS"] = origin_gpu_det
else:
os.environ.pop("TF_DETERMINISTIC_OPS")
random.setstate(orig_random_state)
np.random.set_state(orig_np_random_state)
tf.random.set_seed(tf_random_seed)
if not determism_enabled:
config.disable_op_determinism()
| 2.46875 | 2 |
thumt/scripts/merge_translate_option.py | Glaceon31/NMTPhraseDecoding | 0 | 12789242 | #!/usr/bin/env python
# coding=utf-8
# Copyright 2018 The THUMT Authors
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import operator
import os
import json
import math
def parseargs():
msg = "Merge translation options"
usage = "merge_translation_option.py [<args>] [-h | --help]"
parser = argparse.ArgumentParser(description=msg, usage=usage)
parser.add_argument("--input", type=str, required=True, nargs="+",
help="translation options")
parser.add_argument("--output", type=str, help="output path")
return parser.parse_args()
if __name__ == "__main__":
args = parseargs()
result = {}
tos = [json.load(open(i, 'r')) for i in args.input]
num_options = len(args.input)
for i in range(len(tos)):
print('option', i, ':', len(tos[i]), 'phrases')
for i in range(len(tos)):
for key in tos[i].keys():
if result.has_key(key):
continue
tmp_options = {}
for j in range(len(tos)):
if tos[j].has_key(key):
for item in tos[j][key]:
if tmp_options.has_key(item[0]):
tmp_options[item[0]] += item[1]
else:
tmp_options[item[0]] = item[1]
tmp_options = [list(k) for k in tmp_options.items()]
tmp_options = [[k[0], k[1]/num_options] for k in tmp_options]
result[key] = tmp_options
if len(result) % 10000 == 0:
print(len(result))
for j in range(len(tos)):
if tos[j].has_key(key):
print(tos[j][key])
print(tmp_options)
print('total:', len(result))
json.dump(result ,open(args.output, 'w'))
| 2.890625 | 3 |
python/032-LongestValidParentheses.py | vermouth1992/Leetcode | 0 | 12789243 | """
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
For "(()", the longest valid parentheses substring is "()", which has length = 2.
Another example is ")()())", where the longest valid parentheses substring is "()()", which has length = 4.
"""
"""
How to use dynamic programming technique?
And the DP idea is :
If s[i] is '(', set longest[i] to 0,because any string end with '(' cannot be a valid one.
Else if s[i] is ')'
If s[i-1] is '(', longest[i] = longest[i-2] + 2
Else if s[i-1] is ')' and s[i-longest[i-1]-1] == '(', longest[i] = longest[i-1] + 2 + longest[i-longest[i-1]-2]
For example, input "()(())", at i = 5, longest array is [0,2,0,0,2,0], longest[5] = longest[4] + 2 + longest[1] = 6.
"""
class Solution(object):
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
>>> Solution().longestValidParentheses(')()())')
4
>>> Solution().longestValidParentheses('(()()(')
4
>>> Solution().longestValidParentheses('(()())')
6
>>> Solution().longestValidParentheses("()(()")
2
>>> Solution().longestValidParentheses('())()()(())((()(()()(((()))((((())((()(())()())(()((((()))()(()))(())()(())(()((' + \
'(((())((((((()())())(()(()((())()))(()))))))()(()))((((())()()()))()()()(((()(()())(()()(()(()' + \
'()(((()))))))()()))())())((()()))))))((()))(((()((())()(()()))((())))()()())))))))()))))(()))))' + \
'()))()))()((())))((()))(()))))))(((()))))))))()(()()()(())((())()))()()(())))()()))(()())()))(((()' + \
'())()))((())((((()))(()(()(()()()(((())()(((((()))((()(((((())(()()))((((((((()(()(()(()(())))(())' + \
'(()())())(()((((()(())((()(())))(())))()(((((()(()()(())))))))())(())(())(()()(((())))((()))(((((()))))())))()((()))())' + \
')))())))))((())(((((()()))((((())))(((()(()(())())(((()(()(()()()())))())()))((()((())())()()()(((())(((((()((((((()((()()))' + \
')((((())((()(((((((()(()((()()()(()(()())(()(()()((((())))()(((()())))(()()))()(()()()()(((((())(()))))((()))())))()((((((()))' + \
'())))()(()))(())))((((()())(((((()()())(((((())(()())(()))))()(()()))()))))))())))(((())(()(()()))(()))()(((())))())((((()(((()' + \
'))))))()(()(()))()()(()()))))))))((()))))))(())((()((()))()))((((((()())))))(()((())((((()))))(()(()()()()(()))()()(()(()))(()()' + \
'(((((((()())(())(()())((())())()(()())((())()())())(()())))())))(())())())(())((()())(((()()))()))()()))()(()(())((((((((())))()' + \
'((())((()((((((((((()))))(()(((((())(()(()())())))((())())))))()))(()((()()))((()((())()()()((()(())())((())())(()()(((())))))())' + \
'()()(()))()())(()(()((())))((((()()(())))())(())(()(()(())())())(()()())()(())())))(()()(((())))((()()(((())()()(()())((((()()(()()' + \
')(()((((()(()()(()(()(((()((()())(()()))(()((((()(((((()))))()()))(((()((((((()(()()()()())()))(()(())))))((()(((()())())))(((()()))(()' + \
'(()(((((((()()))(()(())))())()(())())(())(()))(())(()))()()(()()())))))()))()((())(((()((((((((())()()))())))((()())(')
310
"""
if len(s) == 0:
return 0
current_index = 0
num_left_parenthese = 0
current_max_length = 0
start_index = 0
while current_index < len(s):
if num_left_parenthese == 0:
if s[current_index] == ')':
current_index += 1
start_index = current_index
else:
num_left_parenthese += 1
current_index += 1
else:
if s[current_index] == ')':
current_index += 1
num_left_parenthese -= 1
if num_left_parenthese == 0:
current_max_length = max(current_max_length, current_index - start_index)
else:
current_index += 1
num_left_parenthese += 1
# need to deal with end of string case, use stack
if num_left_parenthese != 0:
stack = []
for i in range(start_index, len(s)):
if s[i] == '(':
stack.append(i)
else:
stack.pop()
# the left index in stack is the unmatched
right = len(s)
while len(stack) != 0:
left = stack.pop()
if right - left - 1 > current_max_length:
current_max_length = right - left - 1
right = left
return current_max_length
| 3.859375 | 4 |
src/sleuthdeck/plugins/sleuth/locks.py | sleuth-io/sleuth-deck | 0 | 12789244 | import threading
from os import path
from os.path import dirname
from typing import Optional
from sleuthdeck.deck import Deck
from sleuthdeck.deck import Key
from sleuthdeck.plugins.sleuth import Sleuth
class RepositoryLockKey(Key):
def __init__(self, sleuth: Sleuth, project: str, deployment: Optional[str] = None):
super().__init__()
self.sleuth = sleuth
self.project = project
self.deployment = deployment
self._thread = threading.Thread(target=self._update)
def connect(self, deck: Deck):
image = path.join(dirname(__file__), "lock.jpg")
self.image = image
super().connect(deck)
def _update(self):
# todo: add periodic updates from sleuth to update actions and icon
pass
| 2.25 | 2 |
modules/utils.py | MinionAttack/conll-shared-task-tool | 0 | 12789245 | # -*- coding: utf-8 -*-
from csv import reader, writer
from pathlib import Path
from random import sample
from typing import Any, List
import requests as requests
from resources.constants import BROWSER_USER_AGENT
def handle_request(url: str) -> Any:
headers = {'User-Agent': BROWSER_USER_AGENT, 'Upgrade-Insecure-Requests': '1', 'DNT': '1'}
try:
request = requests.get(url, headers=headers)
request.raise_for_status()
return request
except requests.exceptions.HTTPError as http_error:
print(f"Http Error: {http_error}")
except requests.exceptions.ConnectionError as connection_error:
print(f"Error Connecting: {connection_error}")
except requests.exceptions.TooManyRedirects as redirects_error:
print(f"Too Many Redirects: {redirects_error}")
except requests.exceptions.Timeout as timeout_error:
print(f"Timeout Error: {timeout_error}")
except requests.exceptions.RequestException as request_exception:
print(f"Error: {request_exception}")
return None
def select_subsets(language_set: List[str], treebank_set_size: int, sampling_size: int, cache_samples: bool) -> List[List[str]]:
if cache_samples:
cached_samples = load_cached_samples(treebank_set_size, sampling_size)
if cached_samples:
results = cached_samples
else:
samples = generate_samples(language_set, treebank_set_size, sampling_size)
save_samples(samples, treebank_set_size, sampling_size)
results = samples
else:
results = generate_samples(language_set, treebank_set_size, sampling_size)
return results
def load_cached_samples(treebank_set_size: int, sampling_size: int) -> List[List[str]]:
print(f"INFO: Loading {sampling_size} subset(s) of size {treebank_set_size} from disk")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_cache_folder = Path(root_folder).joinpath("cache")
cache_file = Path(path_cache_folder, f"{treebank_set_size}-{sampling_size}.csv")
if cache_file.exists():
samples = read_csv_file(cache_file)
return samples
else:
print(f"WARNING: There is no file of {sampling_size} subset(s) of size {treebank_set_size} that has been previously saved")
return []
def read_csv_file(file: Path) -> List[List[str]]:
print(f"INFO: Reading {file.name} file")
samples = []
with open(file, 'rt', encoding="utf-8", newline='') as csv_file:
csv_reader = reader(csv_file)
for row in csv_reader:
samples.append(row)
return samples
def generate_samples(language_set: List[str], treebank_set_size: int, sampling_size: int) -> List[List[str]]:
print(f"INFO: Selecting {sampling_size} subset(s) of size {treebank_set_size}")
results = []
if language_set:
while len(results) < sampling_size:
print(f"Number of subsets selected: {len(results)}/{sampling_size}", end="\r")
result = sample(language_set, k=treebank_set_size)
if result not in results:
results.append(result)
return results
def save_samples(samples: List[List[str]], treebank_set_size: int, sampling_size: int) -> None:
print(f"INFO: Saving {sampling_size} subset(s) of size {treebank_set_size} to disk")
file_path = Path(__file__).absolute()
root_folder = file_path.parent.parent
path_cache_folder = Path(root_folder).joinpath("cache")
path_cache_folder.mkdir(parents=True, exist_ok=True)
file_name = Path(path_cache_folder, f"{treebank_set_size}-{sampling_size}.csv")
with open(file_name, 'wt', encoding="utf-8", newline='') as cache_file:
csv_writer = writer(cache_file, dialect='unix')
csv_writer.writerows(samples)
| 2.671875 | 3 |
training/src/training/logger.py | raman-nbg/model-serving | 0 | 12789246 | import logging
def get_logger(module_name: str):
suffix = ""
if module_name != "__main__":
suffix = "." + module_name
# logger = logging.getLogger("serving.model-server" + suffix)
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
logger.setLevel("INFO")
return logger
| 2.421875 | 2 |
src/models/loss_metric.py | shehel/traffic_forecasting | 0 | 12789247 | #!/usr/bin/env python3
from typing import Sequence, Union
import pdb
import torch
from ignite.exceptions import NotComputableError
from ignite.metrics.metric import Metric, reinit__is_reduced, sync_all_reduce
class ForwardLoss(Metric):
r"""Loss metric that simply records the loss calculated
in forward pass
"""
@reinit__is_reduced
def reset(self) -> None:
self._sum_of_squared_errors = torch.tensor(0.0, device=self._device)
self._num_examples = 0
@reinit__is_reduced
def update(self, output: Sequence[torch.Tensor]) -> None:
self._sum_of_squared_errors += torch.sum(output).to(self._device)
self._num_examples += 1
@sync_all_reduce("_sum_of_squared_errors", "_num_examples")
def compute(self) -> Union[float, torch.Tensor]:
if self._num_examples == 0:
raise NotComputableError("MeanSquaredError must have at least one example before it can be computed.")
return self._sum_of_squared_errors.item() / self._num_examples
| 2.28125 | 2 |
mmcls/models/heads/am_head.py | ramosmy/open-speaker-verification | 31 | 12789248 | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import normal_init
from ..builder import HEADS
from .cls_head import ClsHead
@HEADS.register_module()
class AMSoftmaxClsHead(ClsHead):
"""AMSoftmax classifier head.
Args:
num_classes (int): Number of categories excluding the background
category.
in_channels (int): Number of channels in the input feature map.
scale (int): scale with normalized cosine scores.
margin (float): margin of AmSoftmax
loss (dict): Config of classification loss.
""" # noqa: W605
def __init__(self,
num_classes,
in_channels,
scale=30,
margin=0.2,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
topk=(1, )):
super(AMSoftmaxClsHead, self).__init__(loss=loss, topk=topk)
self.in_channels = in_channels
self.num_classes = num_classes
self.s = scale
self.m = margin
if self.num_classes <= 0:
raise ValueError(
f'num_classes={num_classes} must be a positive integer')
self._init_layers()
def _init_layers(self):
self.W = nn.Parameter(torch.randn(self.num_classes, self.in_channels))
def cosine_sim(self, x1, x2, dim=1, eps=1e-8):
# print(x1, x2)
ip = torch.mm(x1, x2.T)
w1 = torch.norm(x1, 2, dim)
w2 = torch.norm(x2, 2, dim)
return ip / torch.ger(w1, w2).clamp(min=eps)
def init_weights(self):
nn.init.xavier_uniform_(self.W)
def simple_test(self, img):
"""Test without augmentation."""
cls_score = self.s * self.cosine_sim(img, self.W)
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
pred = F.softmax(cls_score, dim=1) if cls_score is not None else None
if torch.onnx.is_in_onnx_export():
return pred
pred = list(pred.detach().cpu().numpy())
return pred
def forward_train(self, x, gt_label):
# print(x)
# compute cosine linear
cosine = self.cosine_sim(x, self.W)
# label mapping
one_hot = torch.zeros_like(cosine)
one_hot.scatter_(1, gt_label.view(-1, 1), 1.0)
cls_score = self.s * (cosine - one_hot * self.m)
losses = self.loss(cls_score, gt_label)
return losses
| 2.671875 | 3 |
tests/losses/test_neuralndcg.py | almajo/allRank | 473 | 12789249 | <filename>tests/losses/test_neuralndcg.py<gh_stars>100-1000
import math
from functools import partial
from pytest import approx
from allrank.data.dataset_loading import PADDED_Y_VALUE
from tests.losses.utils import neuralNDCG_wrap, ndcg_wrap
test_cases = [{"stochastic": False, "transposed": False},
{"stochastic": True, "transposed": False},
{"stochastic": False, "transposed": True},
{"stochastic": True, "transposed": True}]
def test_neuralNDCG_simple():
for tc in test_cases:
neuralNDCG_simple(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_simple(fun):
y_pred = [0.5, 0.2]
y_true = [1.0, 0.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_longer():
for tc in test_cases:
neuralNDCG_longer(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_longer(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_stable_for_very_small_prediction():
for tc in test_cases:
neuralNDCG_stable_for_very_small_prediction(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_stable_for_very_small_prediction(fun):
y_pred = [0.5, -1e30]
y_true = [1.0, 0.0]
result = fun(y_pred, y_true)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_ignores_padded_value():
for tc in test_cases:
neuralNDCG_ignores_padded_value(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_ignores_padded_value(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63, 1., 0.5, 0.3]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0, PADDED_Y_VALUE, PADDED_Y_VALUE, PADDED_Y_VALUE]
result = fun(y_pred, y_true, temperature=0.001)
expected = ndcg_wrap(y_pred, y_true)
assert math.isfinite(result)
assert (-1 * result == approx(expected))
def test_neuralNDCG_at_3():
for tc in test_cases:
neuralNDCG_at_3(partial(neuralNDCG_wrap, **tc))
def neuralNDCG_at_3(fun):
y_pred = [0.5, 0.2, 0.1, 0.4, 1.0, -1.0, 0.63]
y_true = [1.0, 2.0, 2.0, 4.0, 1.0, 4.0, 3.0]
ats = 3
result = fun(y_pred, y_true, k=ats)
expected = ndcg_wrap(y_pred, y_true, ats=[ats])
assert math.isfinite(result)
assert (-1 * result == approx(expected))
| 2.078125 | 2 |
src/repair/localization.py | jyi/fangelix | 0 | 12789250 | <reponame>jyi/fangelix
from math import sqrt, ceil
from runtime import TraceItem
from utils import LocationInfo as LI
import logging
logger = logging.getLogger(__name__)
class NoNegativeTestException(Exception):
pass
def ochiai(executed_passing, executed_failing, total_passing, total_failing):
if not total_failing > 0:
raise NoNegativeTestException()
if executed_failing + executed_passing == 0:
return 0
return executed_failing / sqrt(total_failing * (executed_passing + executed_failing))
def jaccard(executed_passing, executed_failing, total_passing, total_failing):
if not total_failing > 0:
raise NoNegativeTestException()
return executed_failing / (total_failing + executed_passing)
def tarantula(executed_passing, executed_failing, total_passing, total_failing):
if not total_failing > 0:
raise NoNegativeTestException()
if executed_failing + executed_passing == 0:
return 0
return ((executed_failing / total_failing) /
((executed_failing / total_failing) + (executed_passing / total_passing)))
class Localizer:
def __init__(self, config, lines):
self.lines = lines
self.config = config
def __call__(self, test_suite, all_positive, all_negative):
'''
test_suite: tests under consideration
all_positive, all_negative: (test * trace) list
trace: expression list
computes config['suspicious']/config['group_size'] groups
each consisting of config['group_size'] suspicious expressions
'''
if self.config['localization'] == 'ochiai':
formula = ochiai
elif self.config['localization'] == 'jaccard':
formula = jaccard
elif self.config['localization'] == 'tarantula':
formula = tarantula
# first, remove irrelevant information:
positive = []
negative = []
dc_idx = 0
loc_idx = 1
def del_val(pair):
def _del_val(t):
dc, loc, val = t
return dc, loc
test, trace = pair
trace = list(map(_del_val, trace))
return test, trace
all_positive = list(map(del_val, all_positive))
all_negative = list(map(del_val, all_negative))
if not self.config['invalid_localization']:
for test, trace in all_positive:
if test in test_suite:
positive.append((test, trace))
for test, trace in all_negative:
if test in test_suite:
locs = [tuple([t[dc_idx], t[loc_idx]]) for t in trace]
negative.append((test, locs))
else:
positive = all_positive
negative = all_negative
# logger.debug('positive: {}'.format(positive))
# logger.debug('negative: {}'.format(negative))
all = set()
for _, trace in positive:
all |= set(trace)
for _, trace in negative:
all |= set(trace)
executed_positive = dict()
executed_negative = dict()
for e in all:
executed_positive[e] = 0
executed_negative[e] = 0
for _, trace in positive:
executed = set(trace)
for e in executed:
executed_positive[e] += 1
for _, trace in negative:
executed = set(trace)
for e in executed:
executed_negative[e] += 1
with_score = []
logger.debug('all: {}'.format(all))
logger.debug('lines: {}'.format(self.lines))
if self.lines is not None:
filtered = filter(lambda item: TraceItem.get_location(item)[0] in self.lines, all)
all = list(filtered)
logger.debug('filtered all: {}'.format(all))
logger.debug('executed_positive: {}'.format(executed_positive))
logger.debug('executed_negative: {}'.format(executed_negative))
logger.debug('total_passing: {}'.format(len(positive)))
logger.debug('total_failing: {}'.format(len(negative)))
for e in all:
try:
if e in executed_negative:
score = formula(executed_positive[e], executed_negative[e],
len(positive), len(negative))
logger.debug('(loc, score) = ({}, {})'.format(e, score))
with_score.append((e, score))
except NoNegativeTestException:
logger.info("No negative test exists")
exit(0)
ranking = sorted(with_score, key=lambda r: r[1], reverse=True)
logger.debug('ranking: {}'.format(ranking))
if self.config['additional_susp_locs'] is not None:
logger.debug('add additional suspicious locations')
default_score = self.config['default_susp_score']
for info in self.config['additional_susp_locs']:
# e.g., info: A-293-7-293-7
dc, loc = info.split('-', 1)
loc_tuple = tuple(map(int, loc.split('-')))
ranking.append(((dc, loc_tuple), default_score))
if len(ranking) == 0:
logger.warning('no location is assigned a score')
logger.debug('executed_positive: {}'.format(executed_positive))
logger.debug('executed_negative: {}'.format(executed_negative))
if self.config['show_suspicious_locations']:
for (loc, score) in ranking:
logger.info('(loc, score) = ({}, {})'.format(loc, score))
logger.debug('all_suspicious: {}'.format(self.config['all_suspicious']))
if self.config['all_suspicious']:
suspicious = len(ranking)
else:
suspicious = self.config['suspicious']
logger.debug('suspicious: {}'.format(suspicious))
if self.config['group_by_score']:
top = ranking[:suspicious]
else:
if self.config['localize_from_bottom']:
# sort by location backward
top = sorted(ranking[:suspicious], key=lambda r: LI.line(r[0]), reverse=True)
else:
# sort by location
top = sorted(ranking[:suspicious], key=lambda r: LI.line(r[0]))
logger.debug('top: {}'.format(top))
group_size = self.config['group_size'] if not self.config['single_group'] \
else suspicious
groups_with_score = []
for i in range(0, ceil(suspicious / group_size)):
if len(top) == 0:
break
group = []
total_score = 0
for j in range(0, group_size):
if len(top) == 0:
break
expr, score = top.pop(0)
total_score += score
group.append(expr)
groups_with_score.append((group, total_score))
sorted_groups = sorted(groups_with_score, key=lambda r: r[1], reverse=True)
if self.config['show_suspicious_locations']:
for idx, (group, score) in enumerate(sorted_groups):
logger.info('group {}: {} ({})'.format(idx + 1, group, score))
groups = []
for (group, score) in sorted_groups:
groups.append(group)
logger.info("selected expressions {} with group score {:.5} ".format(group, score))
logger.debug('groups: {}'.format(groups))
return groups, dict(ranking)
| 2.375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.