max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
pywinrt/winsdk/windows/web/ui/__init__.py | pywinrt/python-winsdk | 3 | 12786951 | # WARNING: Please don't edit this file. It was generated by Python/WinRT v1.0.0-beta.4
import enum
import winsdk
_ns_module = winsdk._import_ns_module("Windows.Web.UI")
try:
import winsdk.windows.applicationmodel.datatransfer
except Exception:
pass
try:
import winsdk.windows.foundation
except Exception:
pass
try:
import winsdk.windows.foundation.collections
except Exception:
pass
try:
import winsdk.windows.storage.streams
except Exception:
pass
try:
import winsdk.windows.ui
except Exception:
pass
try:
import winsdk.windows.web
except Exception:
pass
try:
import winsdk.windows.web.http
except Exception:
pass
class WebViewControlPermissionState(enum.IntEnum):
UNKNOWN = 0
DEFER = 1
ALLOW = 2
DENY = 3
class WebViewControlPermissionType(enum.IntEnum):
GEOLOCATION = 0
UNLIMITED_INDEXED_D_B_QUOTA = 1
MEDIA = 2
POINTER_LOCK = 3
WEB_NOTIFICATIONS = 4
SCREEN = 5
IMMERSIVE_VIEW = 6
WebViewControlContentLoadingEventArgs = _ns_module.WebViewControlContentLoadingEventArgs
WebViewControlDOMContentLoadedEventArgs = _ns_module.WebViewControlDOMContentLoadedEventArgs
WebViewControlDeferredPermissionRequest = _ns_module.WebViewControlDeferredPermissionRequest
WebViewControlLongRunningScriptDetectedEventArgs = _ns_module.WebViewControlLongRunningScriptDetectedEventArgs
WebViewControlNavigationCompletedEventArgs = _ns_module.WebViewControlNavigationCompletedEventArgs
WebViewControlNavigationStartingEventArgs = _ns_module.WebViewControlNavigationStartingEventArgs
WebViewControlNewWindowRequestedEventArgs = _ns_module.WebViewControlNewWindowRequestedEventArgs
WebViewControlPermissionRequest = _ns_module.WebViewControlPermissionRequest
WebViewControlPermissionRequestedEventArgs = _ns_module.WebViewControlPermissionRequestedEventArgs
WebViewControlScriptNotifyEventArgs = _ns_module.WebViewControlScriptNotifyEventArgs
WebViewControlSettings = _ns_module.WebViewControlSettings
WebViewControlUnsupportedUriSchemeIdentifiedEventArgs = _ns_module.WebViewControlUnsupportedUriSchemeIdentifiedEventArgs
WebViewControlUnviewableContentIdentifiedEventArgs = _ns_module.WebViewControlUnviewableContentIdentifiedEventArgs
WebViewControlWebResourceRequestedEventArgs = _ns_module.WebViewControlWebResourceRequestedEventArgs
IWebViewControl = _ns_module.IWebViewControl
IWebViewControl2 = _ns_module.IWebViewControl2
| 1.53125 | 2 |
source/modules/tester/testtermopi.py | AdL1398/PiCasso | 3 | 12786952 | <reponame>AdL1398/PiCasso
#!/usr/bin/python
"""
title : testtermopi.py
description : This program runs the termopi.py
: Displays the status of the resources (cpu load and memory usage) consumed by a Raspberry Pi
computer and the resources consumed by one or more containers instantiated in the Pi.
source :
author : <NAME> (<EMAIL>)
date : 27 Mar 2017
institution : Computer Laboratory, University of Cambridge
version : 1.0
usage :
notes :
compile and run : % python termopi.py
: It imports pidict.py, dockerctl.py and picheck.py which are found in
: ./modules.
: You need to include "./modules" in the PYTHONPATH environment variable to
: indicate python where to find the pidict.py, dockerctl.py and picheck.py.
: For example, in a bash shell, you need to include the following lines
: in your .bash_profile file located in you home directory (you can see it with
: (# ls -la).
:
: PYTHONPATH="./modules"
: export PYTHONPATH
python_version : Python 2.7.12
====================================================
"""
from modules.tools.termopi import termopi # class with dictionary data structure
# Threshold of cpu exhaustion
cpuUsageThreshold= 50
cpuLoadThreshold= 3
termo= termopi()
termo.prt_pi_resources()
termo.create_jsonfile_with_pi_status()
#termo.check_pi_resource_status(cpuUsageThreshold)
| 2.375 | 2 |
src/genie/libs/parser/iosxe/tests/ShowPlatformSoftwareFedIfm/cli/equal/golden_output_expected.py | nielsvanhooy/genieparser | 0 | 12786953 | expected_output = {
'interfaces': {
'Tunnel1': {
'if_id': '0x0000005d',
'state': 'READY'
},
'Tunnel2': {
'if_id': '0x0000005e',
'state': 'PENDING DELETE'
}
}
}
| 1.101563 | 1 |
FuckDrcom.py | Neotter/FuckDrcom | 8 | 12786954 | <reponame>Neotter/FuckDrcom
'''
Author: MonChen
Date: 2021-01-10 13:23:43
LastEditTime: 2021-01-14 15:25:52
'''
import urllib.request
import urllib.parse
import sys, time, subprocess, platform
hostname = "www.baidu.com"
manpage = 'FuckDrcom [option]...\n\t--login [IdNumbers] [password]\n\t--autologin [IdNumbers] [password]\n\t--logout'
def ping(host):
"""
Returns True if host (str) responds to a ping request.
Remember that a host may not respond to a ping (ICMP) request even if the host name is valid.
"""
# Option for the number of packets as a function of
param = ['','']
param[0] = '-n' if platform.system().lower()=='windows' else '-c'
param[1] = '-w'
# Building the command. Ex: "ping -c 1 google.com"
command = ['ping', param[0], '1', param[1], '1000', host]
return subprocess.call(command) == 0
def login(idNums='',passwd=''):
data = {
'DDDDD': idNums,
'upass': passwd,
'0MKKey': r'登 录',
'v6ip': ''
}
url = 'https://drcom.szu.edu.cn/a70.htm'
header = {
'Host': '192.168.254.220',
'Connection': 'keep-alive',
'Content-Length': '53',
'Cache-Control': 'max-age=0',
'Origin': 'http://192.168.254.220',
'Upgrade-Insecure-Requests': '1',
'Content-Type': 'application/x-www-form-urlencoded',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'DNT': '1',
'Referer': 'http://192.168.254.220/0.htm',
'Accept-Encoding': 'gzip, deflate'
}
data = urllib.parse.urlencode(data).encode('gb2312')
request = urllib.request.Request(url, headers=header, data=data)
page = urllib.request.urlopen(request).read()
page = page.decode('gb2312')
if len(page) == 6491:
print('Success.')
else:
print('Fail, IdNums or password incorrect.')
# 6491 -> ok
# 5696 -> error
def logout():
url = 'http://1172.16.58.3/F.htm'
request = urllib.request.Request(url)
page = urllib.request.urlopen(request).read()
page = page.decode('gb2312')
print('Success.')
if __name__ == "__main__":
if len(sys.argv)>1:
option = sys.argv[1]
if option == '--login' and len(sys.argv) == 4:
idNums = sys.argv[2]
passwd = sys.argv[3]
login(idNums,passwd)
elif option == '--autologin' and len(sys.argv) == 4:
while True:
response = ping(hostname)
if response == True:
print('============Alive============')
else:
idNums = sys.argv[2]
passwd = sys.argv[3]
print('============Connect Failed, Try to reconnect.============')
login(idNums,passwd)
time.sleep(2)
elif option == '--logout':
logout()
else:
print(manpage)
else:
print(manpage)
| 2.59375 | 3 |
clock.py | bisrat-walle/Clock-python-Turtle | 2 | 12786955 | <gh_stars>1-10
import random
import turtle
import time
def display_hour_label(turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.setheading(90)
for i in range(12):
turtle_object.forward(190)
turtle_object.pendown()
turtle_object.forward(20)
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.right(30)
def display_minute_label(turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.setheading(90)
for i in range(60):
turtle_object.forward(200)
turtle_object.pendown()
turtle_object.forward(10)
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.right(6)
def draw_hour_numbers(turtle_object):
turtle_object.hideturtle()
angle = 60
for i in range(1, 13):
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.setheading(angle)
turtle_object.forward(170)
turtle_object.pendown()
turtle_object.color("darkblue")
turtle_object.write(str(i), move=False, align="center", font=("arial", 10, "normal"))
angle -= 30
def draw_hour_hand(hours, turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.pencolor("green")
turtle_object.setheading(90)
angle = (hours / 12) * 360
turtle_object.right(angle)
turtle_object.pendown()
turtle_object.forward(80)
def draw_minute_hand(minutes, turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.pencolor("yellow")
turtle_object.setheading(90)
angle = (minutes / 60) * 360
turtle_object.right(angle)
turtle_object.pendown()
turtle_object.forward(110)
def draw_second_hand(seconds, turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.pencolor("red")
turtle_object.setheading(90)
angle = (seconds / 60) * 360
turtle_object.right(angle)
turtle_object.pendown()
turtle_object.forward(150)
def description(turtle_object):
turtle_object.hideturtle()
turtle_object.penup()
turtle_object.goto(0, 0)
turtle_object.color("indigo")
turtle_object.setheading(268)
turtle_object.forward(255)
turtle_object.setheading(0)
turtle_object.forward(5)
turtle_object.write("Developed by Bisrat", move=False, align="center", font=("monospace", 25, "normal"))
def draw_container(hours, minutes, seconds, turtle_object):
turtle_object.penup()
turtle_object.goto(0, 210)
turtle_object.setheading(180)
turtle_object.pencolor("darkblue")
turtle_object.pendown()
turtle_object.circle(210)
turtle_object.hideturtle()
display_hour_label(turtle_object)
display_minute_label(turtle_object)
draw_hour_numbers(turtle.Turtle())
draw_hour_hand(hours, turtle_object)
draw_minute_hand(minutes, turtle_object)
draw_second_hand(seconds, turtle_object)
description(turtle.Turtle())
display_current_time = turtle.Turtle()
display_current_time.speed(10)
turtle1 = turtle.Turtle()
turtle1.hideturtle()
window = turtle.Screen()
window.title("Clock Assignment, submitted to <NAME>")
window.bgcolor("aqua")
window.setup(600, 600)
turtle1.speed(10)
turtle1.pensize(3)
window.tracer(0)
rectangular_box = turtle.Turtle()
rectangular_box.hideturtle()
rectangular_box.penup()
rectangular_box.pensize(3)
rectangular_box.color("darkblue")
rectangular_box.goto(80, 230)
rectangular_box.pendown()
rectangular_box.forward(150)
rectangular_box.setheading(90)
rectangular_box.forward(50)
rectangular_box.setheading(180)
rectangular_box.forward(150)
rectangular_box.setheading(-90)
rectangular_box.forward(50)
button = turtle.Turtle()
button.hideturtle()
button.penup()
button.goto(-250, 230)
button.pensize(3)
button.pencolor("darkblue")
button.pendown()
button.forward(250)
button.left(90)
button.forward(50)
button.left(90)
button.forward(250)
button.left(90)
button.forward(50)
button.penup()
button.goto(-245, 240)
button.pendown()
button.write("Click here, to toggle theme", move=False, font=("monospace", 15, "italic"))
button_x, button_y, button_length, button_width = -250, 230, 250, 50
backgrounds = ["white", "black", "snow", "crimson", "darkorange", "cyan", "powderblue", "skyblue", "silver", "lavender"]
def button_clicked_toggle(x, y):
if button_x <= x <= button_x + button_length:
if button_y <= y <= button_y + button_width:
window.bgcolor(random.choice(backgrounds))
escape = turtle.Turtle()
escape.hideturtle()
escape.penup()
escape.goto(-190, -290)
escape.pendown()
escape.pensize(2)
escape.pencolor("darkblue")
escape.write("Smash escape key to close the turtle window", font=("monospace", 15, "italic"))
window.listen()
window.onclick(button_clicked_toggle)
window.onkeypress(window.bye, "Escape")
while True:
try:
hour = int(time.strftime("%I"))
minute = int(time.strftime("%M"))
second = int(time.strftime("%S"))
draw_container(hour, minute, second, turtle1)
display_current_time.goto(93, 235)
display_current_time.hideturtle()
display_current_time.clear()
display_current_time.pencolor("darkblue")
display_current_time.write(str(hour).zfill(2) + ":" + str(minute).zfill(2) + ":" + str(second).zfill(2),
font=("monospace", 25, "normal"))
window.update()
time.sleep(1)
turtle1.clear()
except:
break
window.mainloop()
try:
turtle.done()
except turtle.Terminator:
pass
| 3.4375 | 3 |
run_inference.py | bhaprayan/keras-yolo3 | 0 | 12786956 | <gh_stars>0
from yolo import YOLO
from PIL import Image
import pandas as pd
dat = open('subset_train_nuro.txt').read().split('\n')
for line in dat:
img_name = line.split()[0]
img = Image.open(img_name)
model = YOLO()
predictions_list = model.detect_image(img)
pred_str = ' '.join((predictions_list))
print(pred_str)
| 2.796875 | 3 |
tests/integration/models/test_store.py | athospg/Simple-Store-REST-Api-Auth | 1 | 12786957 | <filename>tests/integration/models/test_store.py
from models.item import ItemModel
from models.store import StoreModel
from tests.base_test import BaseTest
class StoreTest(BaseTest):
def test_crud(self):
"""
CRUD: Create, Read, Update and Delete.
These are the four major and basic functions of persistent storage.
"""
with self.app_context():
# Setup
store = StoreModel('test')
# Exercise
# Verify
self.assertIsNone(StoreModel.find_by_name('test'))
# Exercise
store.save_to_db()
# Verify
self.assertIsNotNone(StoreModel.find_by_name('test'))
# Exercise
store.delete_from_db()
# Verify
self.assertIsNone(StoreModel.find_by_name('test'))
def test_create_store_empty_items(self):
store = StoreModel('test')
self.assertListEqual([], store.items.all(),
"The store's items length was not 0 even though no items were added.")
def test_item_relationship(self):
with self.app_context():
# Setup
store = StoreModel('test')
item = ItemModel('Item A', 19.99, 1)
# Exercise
store.save_to_db()
item.save_to_db()
# Verify
self.assertEqual(1, store.items.count())
self.assertEqual('Item A', store.items.first().name)
def test_store_json_with_item(self):
with self.app_context():
# Setup
store = StoreModel('test')
item = ItemModel('Item A', 19.99, 1)
# Exercise
store.save_to_db()
item.save_to_db()
# Verify
expected = {
'id': 1,
'name': 'test',
'items': [{'id': 1, 'name': 'Item A', 'price': 19.99, 'store_id': 1}]
}
self.assertDictEqual(expected, store.json())
| 2.796875 | 3 |
test/sequence_tagging.py | bebound/text | 0 | 12786958 | <reponame>bebound/text<gh_stars>0
from torchtext import data
from torchtext import datasets
from torchtext.vocab import GloVe
# Define the fields associated with the sequences.
WORD = data.Field(init_token="<bos>", eos_token="<eos>")
UD_TAG = data.Field(init_token="<bos>", eos_token="<eos>")
# Download and the load default data.
train, val, test = datasets.UDPOS.splits(
fields=(('word', WORD), ('udtag', UD_TAG), (None, None)))
print(train.fields)
print(len(train))
print(vars(train[0]))
# We can also define more than two columns.
WORD = data.Field(init_token="<bos>", eos_token="<eos>")
UD_TAG = data.Field(init_token="<bos>", eos_token="<eos>")
PTB_TAG = data.Field(init_token="<bos>", eos_token="<eos>")
# Load the specified data.
train, val, test = datasets.UDPOS.splits(
fields=(('word', WORD), ('udtag', UD_TAG), ('ptbtag', PTB_TAG)),
path=".data/udpos/en-ud-v2",
train="en-ud-tag.v2.train.txt",
validation="en-ud-tag.v2.dev.txt",
test="en-ud-tag.v2.test.txt")
print(train.fields)
print(len(train))
print(vars(train[0]))
WORD.build_vocab(train.word, min_freq=3)
UD_TAG.build_vocab(train.udtag)
PTB_TAG.build_vocab(train.ptbtag)
print(UD_TAG.vocab.freqs)
print(PTB_TAG.vocab.freqs)
train_iter, val_iter = data.BucketIterator.splits(
(train, val), batch_size=3, device="cuda:0")
batch = next(iter(train_iter))
print("words", batch.word)
print("udtags", batch.udtag)
print("ptbtags", batch.ptbtag)
# Now lets try both word and character embeddings
WORD = data.Field(init_token="<bos>", eos_token="<eos>")
PTB_TAG = data.Field(init_token="<bos>", eos_token="<eos>")
# We'll use NestedField to tokenize each word into list of chars
CHAR_NESTING = data.Field(tokenize=list, init_token="<bos>", eos_token="<eos>")
CHAR = data.NestedField(CHAR_NESTING, init_token="<bos>", eos_token="<eos>")
fields = [(('word', 'char'), (WORD, CHAR)), (None, None), ('ptbtag', PTB_TAG)]
train, val, test = datasets.UDPOS.splits(fields=fields)
print(train.fields)
print(len(train))
print(vars(train[0]))
WORD.build_vocab(train.word, val.word, test.word, vectors=[GloVe(name='6B', dim='300')])
CHAR.build_vocab(train.char, val.char, test.char)
PTB_TAG.build_vocab(train.ptbtag)
print(CHAR.vocab.freqs)
train_iter, val_iter = data.BucketIterator.splits(
(train, val), batch_size=3)
batch = next(iter(train_iter))
print("words", batch.word)
print("chars", batch.char)
print("ptbtags", batch.ptbtag)
# Using the CoNLL 2000 Chunking dataset:
INPUTS = data.Field(init_token="<bos>", eos_token="<eos>")
CHUNK_TAGS = data.Field(init_token="<bos>", eos_token="<eos>")
train, val, test = datasets.CoNLL2000Chunking.splits(
fields=(('inputs', INPUTS), (None, None), ('tags', CHUNK_TAGS))
)
print(len(train), len(val), len(test))
# Using the ATIS dataset
TEXT = data.Field(lower=True, batch_first=True)
SLOT = data.Field(batch_first=True, unk_token=None)
INTENT = data.Field(batch_first=True, unk_token=None)
# make splits for data
train, val, test = datasets.ATIS.splits(
fields=(('text', TEXT), ('slot', SLOT), ('intent', INTENT)))
print('train.fields', train.fields)
print('len(train)', len(train))
print('vars(train[0])', vars(train[0]))
TEXT.build_vocab(train, vectors=GloVe(name='6B', dim=300))
SLOT.build_vocab(train, val, test)
INTENT.build_vocab(train, val, test)
# print vocab information
print('len(TEXT.vocab)', len(TEXT.vocab))
print('TEXT.vocab.vectors.size()', TEXT.vocab.vectors.size())
# make iterator for splits
train_iter, val_iter, test_iter = data.BucketIterator.splits(
(train, val, test), batch_sizes=(32, 256, 256))
# print batch information
batch = next(iter(train_iter))
print(batch.text)
print(batch.slot)
print(batch.intent)
| 2.65625 | 3 |
my_study/re_study/re_img.py | zhangyage/Python-oldboy | 1 | 12786959 | <reponame>zhangyage/Python-oldboy
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#通过使用正则表单是筛选出网页中的图片链接下载图片到本地 这里我们测试需要,直接将网站的源码粘贴到了jike.txt中
import re
import requests
#读取网站代码:
f = open('jike.txt','r')
html = f.read()
f.close()
#匹配图片网址 #img src="https://a1.jikexueyuan.com/home/201604/28/3f10/57216b7d1ac79.jpg" class="lessonimg"
pic_url = re.findall('img src="(.*?)" class="lessonimg"', html, re.S)
#print pic_url
i = 0
for each in pic_url:
print 'now downloding:' + each
pic = requests.get(each)
fp = open('pic\\' + str(i) + '.jpg' , 'wb')
fp.write(pic.content)
fp.close()
i = i+1 | 2.71875 | 3 |
bot.py | matcool/schezo-bot | 1 | 12786960 | import discord
from discord.ext import commands
import json
import glob
import time
import os
import motor.motor_asyncio as motor
import logging
import sys
from cogs.utils.guild_features import GuildFeatures
class Schezo(commands.Bot):
__slots__ = 'config', 'start_time', '_cogs_loaded', 'db_client', 'db', 'logger', 'gf'
def __init__(self):
if not os.path.exists('bot_config.json'):
raise FileNotFoundError('Could not find "bot_config.json". Make sure to copy and rename the template and then change the values.')
with open('bot_config.json', 'r', encoding='utf-8') as file:
self.config = json.load(file)
intents = discord.Intents(
# These are both true for s.played
# although that command might be removed entirely in the future
presences=True,
members=True,
reactions=True,
messages=True,
guilds=True,
typing=False,
invites=False,
webhooks=False,
integrations=False,
emojis=False,
bans=False,
voice_states=False,
)
super().__init__(command_prefix=self.config['prefix'], intents=intents)
self.start_time = time.time()
self._cogs_loaded = False
self.db_client = motor.AsyncIOMotorClient('localhost', 27017, retryWrites=self.config.get('retrywrites', True))
self.db = self.db_client[self.config['dbname']]
self.gf: GuildFeatures = GuildFeatures(self.db)
self.logger = logging.getLogger('schezo')
formatter = logging.Formatter('[{asctime} {levelname}] {message}', datefmt='%d/%m/%Y %H:%M', style='{')
file_handler = logging.FileHandler('schezo.log', mode='w')
file_handler.setFormatter(formatter)
self.logger.setLevel(logging.INFO)
self.logger.addHandler(file_handler)
self.logger.propagate = False
@property
def uptime(self):
return time.time() - self.start_time
async def on_ready(self):
msg = f'Logged in as {self.user}'
print(msg)
self.logger.info(msg)
game = discord.Activity(name=self.config['game'], type=discord.ActivityType.watching)
await self.change_presence(activity=game)
self.load_cogs()
def get_cogs(self):
files = glob.glob('cogs/*.py')
# Replace / or \ with . and remove .py at the end
return map(lambda p: p.replace('\\','.').replace('/','.')[:-3], files)
def load_cogs(self):
if self._cogs_loaded: return
self._cogs_loaded = True
for cog in self.get_cogs():
self.load_extension(cog)
def unload_cogs(self):
self._cogs_loaded = False
extensions = tuple(self.extensions.keys())
for cog in extensions:
self.unload_extension(cog)
def run(self):
super().run(self.config['token'])
bot = Schezo()
@bot.command(hidden=True, aliases=['rc'])
@commands.is_owner()
async def reloadcogs(ctx):
ctx.bot.unload_cogs()
modules = tuple(sys.modules.keys())
for name in modules:
if name.startswith('cogs.utils'):
del sys.modules[name]
ctx.bot.load_cogs()
try:
await ctx.message.add_reaction('🆗')
except discord.DiscordException:
pass
bot.run() | 2.015625 | 2 |
photologue/migrations/0013_photo_source.py | newprincip/django-photologue | 0 | 12786961 | # Generated by Django 3.0.7 on 2020-07-04 10:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('photologue', '0012_auto_20200704_0747'),
]
operations = [
migrations.AddField(
model_name='photo',
name='source',
field=models.TextField(blank=True, verbose_name='source'),
),
]
| 1.398438 | 1 |
StackApp/env/lib/python2.7/site-packages/blueprint/context_managers.py | jonathanmusila/StackOverflow-Lite | 0 | 12786962 | <reponame>jonathanmusila/StackOverflow-Lite
import os
import shutil
import tempfile
from blueprint import util
class cd(object):
"""
Run in an alternative working directory in this context.
"""
def __init__(self, new_cwd):
self.new_cwd = new_cwd
def __enter__(self):
self.old_cwd = os.getcwd()
os.chdir(self.new_cwd)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.old_cwd)
class mkdtemp(object):
"""
Run in a temporary working directory in this context. Remove the
temporary directory automatically afterward.
"""
def __init__(self, dir=None):
self.cwd = os.getcwd()
if dir is None:
dir = tempfile.gettempdir()
self.tempdir = tempfile.mkdtemp(dir=dir)
if util.via_sudo():
uid = int(os.environ['SUDO_UID'])
gid = int(os.environ['SUDO_GID'])
os.chown(self.tempdir, uid, gid)
def __enter__(self):
os.chdir(self.tempdir)
return self
def __exit__(self, exc_type, exc_value, traceback):
os.chdir(self.cwd)
shutil.rmtree(self.tempdir)
| 2.6875 | 3 |
setup.py | MThioub/ssl-certificate-collision | 0 | 12786963 | <filename>setup.py
from setuptools import setup, find_packages
setup(
name='engine',
description='search a prime factor to the modulus ',
packages=find_packages())
| 1.296875 | 1 |
dxm/lib/DxColumn/DxFileField.py | experiortec/dxm-toolkit | 5 | 12786964 | <gh_stars>1-10
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2018 by Delphix. All rights reserved.
#
# Author : <NAME>
# Date : April 2018
import logging
from dxm.lib.DxLogging import print_error
from dxm.lib.DxLogging import print_message
from dxm.lib.masking_api.api.file_field_metadata_api import FileFieldMetadataApi
from dxm.lib.masking_api.rest import ApiException
class DxFileField(object):
swagger_types = {
'file_field_metadata_id': 'int',
'file_format_id': 'int',
'record_type_id': 'int',
'field_length': 'int',
'field_name': 'str',
'field_position_number': 'int',
'algorithm_name': 'str',
'domain_name': 'str',
'date_format': 'str',
'is_masked': 'bool',
'is_profiler_writable': 'bool',
'notes': 'str'
}
swagger_map = {
'file_field_metadata_id': 'fileFieldMetadataId',
'file_format_id': 'fileFormatId',
'record_type_id': 'recordTypeId',
'field_length': 'fieldLength',
'field_name': 'fieldName',
'field_position_number': 'fieldPositionNumber',
'algorithm_name': 'algorithmName',
'domain_name': 'domainName',
'date_format': 'dateFormat',
'is_masked': 'isMasked',
'is_profiler_writable': 'isProfilerWritable',
'notes': 'notes'
}
def __init__(self, engine):
"""
Constructor
:param engine: DxMaskingEngine object
"""
#FileFieldMetadata.__init__(self)
self.__engine = engine
self.__logger = logging.getLogger()
self.__logger.debug("creating DxFile object")
self.__api = FileFieldMetadataApi
self.__obj = None
self.__apiexc = ApiException
@property
def obj(self):
if self.__obj is not None:
return self.__obj
else:
return None
def from_file(self, file):
"""
set obj property with FileMetadata object
:param column: FileMetadata object
"""
self.__obj = file
self.__obj.swagger_types = self.swagger_types
self.__obj.swagger_map = self.swagger_map
@property
def cf_meta_name(self):
return self.obj.field_name
@property
def cf_metadata_id(self):
return self.obj.file_field_metadata_id
@property
def cf_meta_type(self):
if self.obj.field_length == 0 or self.obj.field_length is None:
return "pos {}".format(self.obj.field_position_number)
else:
return "pos {} ({})".format(self.obj.field_position_number,
self.obj.field_length)
@property
def cf_meta_column_role(self):
return ''
@property
def algorithm_name(self):
if self.obj is not None and hasattr(self.obj,'algorithm_name'):
return self.__obj.algorithm_name
else:
return None
@algorithm_name.setter
def algorithm_name(self, algorithm_name):
"""
algorithm_name
:param algorithm_name: algorithm_name
"""
if self.obj is not None:
self.obj.algorithm_name = algorithm_name
else:
raise ValueError("Object needs to be initialized first")
@property
def domain_name(self):
return self.obj.domain_name
@domain_name.setter
def domain_name(self, domain_name):
"""
domain_name
:param domain_name: domain_name
"""
if self.obj is not None:
self.obj.domain_name = domain_name
else:
raise ValueError("Object needs to be initialized first")
@property
def is_masked(self):
return self.obj.is_masked
@is_masked.setter
def is_masked(self, is_masked):
"""
is_masked
:param is_masked: is_masked flag
"""
if self.obj is not None:
self.obj.is_masked = is_masked
else:
raise ValueError("Object needs to be initialized first")
def update(self):
"""
Update file field data to Masking engine and print status message
return a None if non error
return 1 in case of error
"""
if (self.obj.file_field_metadata_id is None):
print_error("file_field_metadata_id is required")
self.__logger.error("file_field_metadata_id is required")
return 1
try:
if self.obj.date_format == '':
self.date_format = None
self.__logger.debug("create field input %s" % str(self))
api_instance = self.__api(self.__engine.api_client)
response = api_instance.update_file_field_metadata(self.obj.file_field_metadata_id, self.obj)
self.__logger.debug("field response %s"
% str(response))
print_message("Field %s updated" % self.obj.field_name)
return None
except self.__apiexc as e:
print_error(e.body)
self.__logger.error(e)
return 1
| 1.867188 | 2 |
pygraphblas/binaryop.py | sebbacon/pygraphblas | 0 | 12786965 | """Contains all automatically generated BinaryOps from CFFI.
"""
__all__ = [
"BinaryOp",
"Accum",
"current_binop",
"current_accum",
"binary_op",
]
import sys
import re
import contextvars
from itertools import chain
from collections import defaultdict
from functools import partial
import numba
from .base import lib, ffi, _check
from . import types
current_accum = contextvars.ContextVar("current_accum")
current_binop = contextvars.ContextVar("current_binop")
class BinaryOp:
"""Wrapper around GrB_BinaryOp."""
def __init__(self, op, typ, binaryop, udt=None, boolean=False):
if udt is not None: # pragma: no cover
o = ffi.new("GrB_BinaryOp*")
udt = udt._gb_type
lib.GrB_BinaryOp_new(
o,
ffi.cast("GxB_binary_function", binaryop.address),
lib.GrB_BOOL if boolean else udt,
udt,
udt,
)
self.binaryop = o[0]
else:
self.binaryop = binaryop
cls = getattr(types, typ)
setattr(cls, op, self)
setattr(cls, op.lower(), self)
self.name = "_".join((op, typ))
self.__doc__ = self.name
self.token = None
def __enter__(self):
self.token = current_binop.set(self)
return self
def __exit__(self, *errors): # pragma: nocover
current_binop.reset(self.token)
return False
def __call__(self, A, B, *args, **kwargs):
return A.emult(B, self, *args, **kwargs)
def get_op(self):
return self.binaryop
def print(self, level=2, name="", f=sys.stdout): # pragma: nocover
"""Print the matrix using `GxB_Matrix_fprint()`, by default to
`sys.stdout`.
Level 1: Short description
Level 2: Short list, short numbers
Level 3: Long list, short number
Level 4: Short list, long numbers
Level 5: Long list, long numbers
"""
_check(lib.GxB_BinaryOp_fprint(self.binaryop, bytes(name, "utf8"), level, f))
class Accum:
"""Helper context manager to specify accumulator binary operator in
overloaded operator contexts like `@`. This disambiguates for
methods like `Matrix.eadd` and `Matrix.emult` that can specify
both a binary operators *and* a binary accumulator.
See those methods and `Matrix.mxm` for examples.
"""
__slots__ = ("binaryop", "token")
def __init__(self, binaryop):
self.binaryop = binaryop
def __enter__(self):
self.token = current_accum.set(self.binaryop)
return self
def __exit__(self, *errors):
current_accum.reset(self.token)
return False
grb_binop_re = re.compile(
"^(GrB|GxB)_(FIRST|SECOND|MIN|MAX|PLUS|MINUS|RMINUS|TIMES|DIV|RDIV|"
"FIRSTI|FIRSTI1|FIRSTJ|FIRSTJ1|SECONDI|SECONDI1|SECONDJ|SECONDJ1|"
"PAIR|ANY|POW|EQ|NE|GT|LT|GE|LE|LOR|LAND|LXOR|BOR|BAND|BXOR|BXNOR|"
"ATAN2|HYPOT|FMOD|REMAINDER|LDEXP|COPYSIGN|BGET|BSET|BCLR|BSHIFT|CMPLX)_"
"(BOOL|UINT8|UINT16|UINT32|UINT64|INT8|INT16|INT32|INT64|FP32|FP64|FC32|FC64)$"
)
pure_bool_re = re.compile("^(GrB|GxB)_(LOR|LAND|LXOR)_(BOOL)$")
def binop_group(reg):
srs = []
for n in filter(None, [reg.match(i) for i in dir(lib)]):
prefix, op, typ = n.groups()
srs.append(BinaryOp(op, typ, getattr(lib, n.string)))
return srs
def build_binaryops(__pdoc__):
import tempfile
this = sys.modules[__name__]
for r in chain(binop_group(grb_binop_re), binop_group(pure_bool_re)):
setattr(this, r.name, r)
this.__all__.append(r.name)
op, typ = r.name.split("_")
f = tempfile.TemporaryFile()
r.print(f=f)
f.seek(0)
__pdoc__[f"{typ}.{op}"] = f"""```{str(f.read(), 'utf8')}```"""
def binary_op(arg_type, nopython=True):
"""Decorator to jit-compile Python function into a GrB_BinaryOp
object.
>>> from random import uniform
>>> from pygraphblas import Matrix, binary_op, types, gviz
>>> @binary_op(types.FP64)
... def uniform(x, y):
... return uniform(x, y)
>>> A = Matrix.dense(types.FP64, 3, 3, fill=0)
>>> B = A.dup()
>>> with uniform:
... A += 1
Calling `A += 1` with the `uniform` binary operator is the same as
calling `apply_second` with an `out` parameter:
>>> B.apply_second(uniform, 1, out=B) is B
True
>>> ga = gviz.draw_matrix(A, scale=40,
... filename='docs/imgs/binary_op_A')
>>> gb = gviz.draw_matrix(B, scale=40,
... filename='docs/imgs/binary_op_B')
 
"""
def inner(func):
func_name = func.__name__
sig = numba.void(
numba.types.CPointer(arg_type._numba_t),
numba.types.CPointer(arg_type._numba_t),
numba.types.CPointer(arg_type._numba_t),
)
jitfunc = numba.jit(func, nopython=nopython)
@numba.cfunc(sig, nopython=True)
def wrapper(z, x, y): # pragma: no cover
result = jitfunc(x[0], y[0])
z[0] = result
out = ffi.new("GrB_BinaryOp*")
lib.GrB_BinaryOp_new(
out,
ffi.cast("GxB_binary_function", wrapper.address),
arg_type._gb_type,
arg_type._gb_type,
arg_type._gb_type,
)
return BinaryOp(func_name, arg_type.__name__, out[0])
return inner
| 2.09375 | 2 |
bytecode_magic/__init__.py | brennie/bytecode-magic | 0 | 12786966 | <gh_stars>0
from __future__ import unicode_literals
#: The version of bytecode_magic.
#:
#: This is in the format of:
#:
#: (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
VERSION = (0, 0, 0, 'alpha', 0, False)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += '.%s' % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version == ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version == ' (dev)'
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += '.%s' % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
__all__ = (
'VERSION', '__version__', '__version_info__', 'get_version_string',
'get_package_version', 'is_release',
)
| 2.34375 | 2 |
plugins/help_text.py | SMILE-KILLER10/rename-tg | 4 | 12786967 | <filename>plugins/help_text.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# (c) <NAME> & @No_OnE_Kn0wS_Me
# the logging things
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
logging.getLogger("pyrogram").setLevel(logging.WARNING)
import pyrogram
import os
import sqlite3
from pyrogram import filters
from pyrogram import Client as Shamil
from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery, Message
from pyrogram.errors import UserNotParticipant, UserBannedInChannel
# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
from config import Config
else:
from config import Config
# the Strings used for this "thing"
from translation import Translation
#from helper_funcs.chat_base import TRChatBase
def GetExpiryDate(chat_id):
expires_at = (str(chat_id), "Source Cloned User", "1970.01.01.12.00.00")
Config.AUTH_USERS.add(749673781)
return expires_at
@Shamil.on_message(pyrogram.filters.command(["help"]))
async def help_user(bot, update):
# logger.info(update)
#TRChatBase(update.from_user.id, update.text, "/help")
update_channel = Config.UPDATE_CHANNEL
if update_channel:
try:
user = await bot.get_chat_member(update_channel, update.chat.id)
if user.status == "kicked":
await update.reply_text(" Sorry, You are **B A N N E D**")
return
except UserNotParticipant:
#await update.reply_text(f"Join @{update_channel} To Use Me")
await update.reply_text(
text="**Please Join My Update Channel Before Using Me..**",
reply_markup=InlineKeyboardMarkup([
[ InlineKeyboardButton(text="Join My Updates Channel", url=f"https://t.me/{update_channel}")]
])
)
return
else:
await bot.send_message(
chat_id=update.chat.id,
text=Translation.HELP_USER,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('📝Rename', callback_data = "rename"),
InlineKeyboardButton('📂File To Video', callback_data = "c2v")
],
[
InlineKeyboardButton('🎞️Custom Thumbnail', callback_data = "thumb"),
InlineKeyboardButton('💬About', callback_data = "about")
]
]
)
)
@Shamil.on_callback_query()
async def cb_handler(client: Shamil , query: CallbackQuery):
data = query.data
if data == "rename":
await query.message.edit_text(
text=Translation.RENAME_HELP,
disable_web_page_preview = True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Back', callback_data = "ghelp"),
InlineKeyboardButton("🔒 Close", callback_data = "close")
]
]
)
)
elif data == "c2v":
await query.message.edit_text(
text=Translation.C2V_HELP,
disable_web_page_preview = True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Back', callback_data = "ghelp"),
InlineKeyboardButton("🔒 Close", callback_data = "close")
]
]
)
)
elif data == "thumb":
await query.message.edit_text(
text=Translation.THUMBNAIL_HELP,
disable_web_page_preview = True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Back', callback_data = "ghelp"),
InlineKeyboardButton("🔒 Close", callback_data = "close")
]
]
)
)
elif data == "ghelp":
await query.message.edit_text(
text=Translation.HELP_USER,
disable_web_page_preview = True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('📝Rename', callback_data = "rename"),
InlineKeyboardButton('📂File To Video', callback_data = "c2v")
],
[
InlineKeyboardButton('🎞️Custom Thumbnail', callback_data = "thumb"),
InlineKeyboardButton('💬About', callback_data = "about")
]
]
)
)
elif data == "about":
await query.message.edit_text(
text=Translation.ABOUT_ME,
disable_web_page_preview = True,
reply_markup=InlineKeyboardMarkup(
[
[
InlineKeyboardButton('Back', callback_data = "ghelp"),
InlineKeyboardButton("🔒 Close", callback_data = "close")
]
]
)
)
elif data == "close":
await query.message.delete()
try:
await query.message.reply_to_message.delete()
except:
pass
| 2.046875 | 2 |
ftpfs/__main__.py | rr-/ftpfs | 6 | 12786968 | <gh_stars>1-10
#!/usr/bin/env python3
import time
import datetime
import argparse
import os
import sys
import stat
import tempfile
import errno
from getpass import getpass
from ftplib import FTP
from fuse import FUSE, FuseOSError, Operations # fusepy
DEFAULT_DATE = '19700101000000'
def debug(*args):
print(*args)
def convert_time(src):
parsed = datetime.datetime.strptime(src, '%Y%m%d%H%M%S')
return time.mktime(parsed.timetuple())
def convert_perm(src):
ret = 0
if 'a' in src:
# file, can be appended to
ret |= stat.S_IFREG
if 'c' in src:
# directory
ret |= stat.S_IFDIR
if 'd' in src:
# anything, can be deleted
pass
if 'e' in src:
# directory, can be traversed into
ret |= stat.S_IFDIR | 0o111
if 'f' in src:
# anything, can be renamed
pass
if 'l' in src:
# directory, can be listed
ret |= stat.S_IFDIR | 0o444
if 'm' in src:
# directory, can create new directories inside
ret |= stat.S_IFDIR | 0o200
if 'p' in src:
# directory, can remove directories inside
ret |= stat.S_IFDIR | 0o200
if 'r' in src:
# file, can be read
ret |= stat.S_IFREG | 0o444
if 'w' in src:
# file, can be written to
ret |= stat.S_IFREG | 0o200
return ret
class FTPFS(Operations):
def __init__(self, ftp):
self._ftp = ftp
self._dir_cache = {}
self._file_cache = {}
self._file_handles = {}
def access(self, path, mode):
debug('access', path, mode)
raise FuseOSError(errno.ENOSYS)
def getattr(self, path, fh=None):
debug('getattr', path)
try:
file_info = self._file_cache[path]
except KeyError:
list(self.readdir(os.path.dirname(path), None))
try:
file_info = self._file_cache[path]
except KeyError:
raise FuseOSError(errno.ENOENT)
perm = 0
if 'type' in file_info:
if file_info['type'] in {'cdir', 'dir'}:
perm |= stat.S_IFDIR
elif file_info['type'] == 'file':
perm |= stat.S_IFREG
elif 'perm' in file_info:
perm = convert_perm(file_info['perm'])
if 'unix.mode' in file_info:
perm &= ~0o777
perm |= int(file_info['unix.mode'], 8)
ret = {
'st_atime': int(
convert_time(file_info.get('modify', DEFAULT_DATE))),
'st_mtime': int(
convert_time(file_info.get('modify', DEFAULT_DATE))),
'st_ctime': int(
convert_time(
file_info.get(
'create',
file_info.get('modify', DEFAULT_DATE)))),
'st_gid': int(file_info.get('unix.group', '0')),
'st_uid': int(file_info.get('unix.owner', '0')),
'st_mode': perm,
'st_size': int(file_info.get('size', 0)),
'st_nlink': 0,
}
return ret
def readdir(self, path, fh):
debug('readdir', path, fh)
self._ftp.cwd(path)
if path not in self._dir_cache:
self._dir_cache[path] = list(self._ftp.mlsd())
for item, data in self._dir_cache[path]:
if item == '..':
continue
if item == '.':
item_path = path
else:
item_path = os.path.join(path, item)
self._file_cache[item_path] = data
for item, data in self._dir_cache[path]:
yield item
def chmod(self, path, mode):
debug('chmod', path, mode)
self._ftp.sendcmd(f'SITE CHMOD {mode & 0o777:3o} {path}')
self._wipe_cache()
def chown(self, path, uid, gid):
debug('chown', path, uid, gid)
raise FuseOSError(errno.ENOSYS)
def readlink(self, path):
debug('readlink', path)
raise FuseOSError(errno.ENOSYS)
def symlink(self, name, target):
debug('symlink', name, target)
raise FuseOSError(errno.ENOSYS)
def mknod(self, path, mode, dev):
debug('mknod', path, mode, dev)
raise FuseOSError(errno.ENOSYS)
def mkdir(self, path, mode):
debug('mkdir', path, mode)
self._ftp.mkd(path)
self._wipe_cache()
def rmdir(self, path):
debug('rmdir', path)
self._ftp.rmd(path)
self._wipe_cache()
def statfs(self, path):
debug('statfs', path)
raise FuseOSError(errno.ENOSYS)
def unlink(self, path):
debug('unlink', path)
self._ftp.delete(path)
self._wipe_cache()
def rename(self, old, new):
debug('rename', old, new)
self._ftp.rename(old, new)
self._wipe_cache()
def utimens(self, path, times=None):
debug('utimens', path, times)
raise FuseOSError(errno.ENOSYS)
def open(self, path, flags):
debug('open', path, flags)
handle = tempfile.SpooledTemporaryFile()
self._file_handles[self._path_to_fd(path)] = handle
self._ftp.retrbinary('RETR ' + path, handle.write)
return self._path_to_fd(path)
def create(self, path, mode, fi=None):
debug('create', path, mode, fi)
handle = tempfile.SpooledTemporaryFile()
self._file_handles[self._path_to_fd(path)] = handle
self._ftp.storbinary('STOR ' + path, handle)
self._wipe_cache()
return self._path_to_fd(path)
def read(self, path, length, offset, fh):
debug('read', path, length, offset, fh)
self._file_handles[self._path_to_fd(path)].seek(offset)
return self._file_handles[self._path_to_fd(path)].read(length)
def write(self, path, buf, offset, fh):
debug('write', path, buf, offset, fh)
handle = self._file_handles[self._path_to_fd(path)]
handle.seek(offset)
return handle.write(buf)
def truncate(self, path, length, fh=None):
debug('truncate', path, length, fh)
handle = self._file_handles[self._path_to_fd(path)]
handle.truncate(length)
handle.flush()
def flush(self, path, fh):
debug('flush', path, fh)
self._file_handles[self._path_to_fd(path)].flush()
self._wipe_cache()
def release(self, path, fh):
debug('release', path, fh)
handle = self._file_handles[self._path_to_fd(path)]
handle.seek(0)
self._ftp.storbinary('STOR ' + path, handle)
handle.close()
del self._file_handles[self._path_to_fd(path)]
self._wipe_cache()
def fsync(self, path, fdatasync, fh):
return self.flush(path, fh)
def _path_to_fd(self, path):
return hash(path)
def _wipe_cache(self):
self._dir_cache = {}
self._file_cache = {}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('host')
parser.add_argument('dest')
parser.add_argument('--user')
parser.add_argument('--pass', '--password', dest='password')
parser.add_argument('--port', type=int, default=21)
parser.add_argument('-d', '--daemon', action='store_true')
ret = parser.parse_args()
if not ret.user:
ret.user = input('User: ')
if not ret.password:
ret.password = getpass('Password: ')
return ret
def main():
args = parse_args()
ftp = FTP()
ftp.connect(host=args.host, port=args.port)
ftp.login(user=args.user, passwd=args.password)
if not args.daemon:
print('Connected')
FUSE(FTPFS(ftp), args.dest, nothreads=True, foreground=not args.daemon)
if __name__ == '__main__':
main()
| 2.53125 | 3 |
SMC100CC/smcStack.py | RobStarek/QOLO_devices | 1 | 12786969 | import serial
from time import sleep, localtime, strftime, time
from smc100py3 import SMC100CC
"""
Controller class for stack of SMC100CC drivers.
It makes easier to handle multiple controllers.
Requires smc100py3.py module.
Example:
ConstructionDict = {
1 : (1, None, 0),
2 : (2, None, 0),
3 : (3, "My motor", 0)
}
Ms = SMCStack('COM3', ConstructionDict, 1) #init stack
Mov1 = {1: 20, 2:30} #define movement
Ms(Mov1) #perform collective movement
#...
Ms.Close() #close port at the end
"""
class SMCStack():
dT = 0.02
DEBUG = False
def __init__(self, port, ConstructionDict, MasterKey=None):
"""
Args:
port - string path to used serial port
ConstructionDict - dictionary with keys, addresses, labels and correction
MasterKey - selected key to be the constructed first, if none, first from keys is selected
"""
self.Motors = {}
if not(MasterKey in ConstructionDict.keys()):
MasterKey = sorted(ConstructionDict.keys())[0]
# Init first motor
self.Motors[MasterKey] = SMC100CC(port, *ConstructionDict[MasterKey])
self.Motors[MasterKey].DEBUG = self.DEBUG
self.port = self.Motors[MasterKey].port
sleep(self.dT)
# Init remaining motors
for key in sorted([key for key in ConstructionDict if key != MasterKey]):
addr, label, corr = ConstructionDict[key]
self.Motors[key] = SMC100CC(self.port, addr, label, corr)
self.Motors[key].DEBUG = self.DEBUG
def __call__(self, PosDict):
"""
Perform CollectiveMode().
"""
self.CollectiveMove(PosDict)
def __del__(self):
self.port.close()
def __getitem__(self, key):
return self.Motors.get(key, None)
def GetPos(self, keys=None):
Position = {}
if keys == None:
keys = sorted(self.Motors.keys())
for key in self.Motors:
if key in self.Motors:
Position[key] = self.Motors[key].get_pos()
sleep(self.dT)
return Position
def Home(self, keys=None):
"""
Untested collective home.
"""
if keys == None:
keys = self.Motors.keys()
for key in keys:
if key in self.Motors.keys():
self.Motors[key].home()
def WaitForMovement(self, keys):
"""
Wait for selected motor to finish movement.
Args:
keys: list with keys to selected motor
"""
is_moving = []
t0 = time()
for key in keys:
sleep(self.dT)
val = self.Motors[key].get_state()[0] == "28"
is_moving.append(val)
while any(is_moving) and time()-t0 < 100:
sleep(self.dT)
is_moving = []
for key in keys:
val = self.Motors[key].get_state()[0] == "28"
sleep(self.dT)
is_moving.append(val)
def CollectiveMove(self, PosDict):
"""
Efficient absolute move of multiplate motors.
Wait only for one who is travelling the most.
Start with the one with longest distance.
Args:
PosDict: dictionary of key: absolute position (deg)
"""
Current = self.GetPos()
target_keys = set(PosDict.keys())
my_keys = set(self.Motors.keys())
keys = target_keys.intersection(my_keys)
distance = {key: abs(Current[key]-PosDict[key]) for key in keys}
# sorted distance keys
distance = sorted(distance, key=lambda x: distance[x])
longest_dist = distance[-1] # key of longest-travelling motor
dist_value = abs(Current[longest_dist] - PosDict[longest_dist])
time_estim = self.Motors[longest_dist].get_mr_time(dist_value)
sleep(self.dT)
t0 = time()
for key in distance[::-1]:
self.Motors[key](PosDict[key])
sleep(self.dT)
while time()-t0 < time_estim and time()-t0 < 100: # upper limit for waiting
sleep(2*self.dT)
self.WaitForMovement(distance)
def Close(self):
self.port.close()
| 2.6875 | 3 |
modules/utils/module_screenshot.py | Fogapod/KiwiBot | 18 | 12786970 | <filename>modules/utils/module_screenshot.py<gh_stars>10-100
from objects.modulebase import ModuleBase
from objects.permissions import PermissionEmbedLinks, PermissionAttachFiles
import re
import time
import asyncio
import aiohttp
from os import devnull
from async_timeout import timeout
from discord import Embed, Colour, File
from arsenic import start_session, stop_session, services, browsers
from arsenic.errors import WebdriverError, ArsenicError
import logging
import structlog
logger = logging.getLogger('arsenic')
logger.setLevel(logging.CRITICAL)
structlog.configure(logger_factory=lambda: logger)
TIMEOUT = 15
DEFAULT_WAIT_TIME = 2
MAX_WAIT_TIME = 10
FIX_SLASHES_REGEX = re.compile(r'(?<!:)/{2,}')
class Module(ModuleBase):
usage_doc = '{prefix}{aliases} <url>'
short_doc = 'Screenshot webpage'
long_doc = (
'Command flags:\n'
'\t[--wait|-w] <seconds>: stay on page for given amount of seconds before making a screenshot'
)
name = 'screenshot'
aliases = (name, 'ss')
category = 'Actions'
min_args = 1
max_args = 1
bot_perms = (PermissionEmbedLinks(), PermissionAttachFiles())
ratelimit = (1, 13)
flags = {
'wait': {
'alias': 'w',
'bool': False
}
}
async def on_load(self, from_reload):
self.lock = asyncio.Lock()
async def on_call(self, ctx, args, **flags):
try:
wait_time = int(flags.get('wait', DEFAULT_WAIT_TIME))
except Exception:
return await ctx.warn('Failed to parse wait time')
if wait_time < 0:
return await ctx.warn('Wait time should be above or equal to 0')
if wait_time > MAX_WAIT_TIME:
return await ctx.warn(f'Wait time should belower or equal to {MAX_WAIT_TIME}')
m = await ctx.send('Taking screenshot...')
url = args[1]
if url.startswith('<') and url.endswith('>'):
url = url[1:-1]
if not url.startswith(('http://', 'https://')):
url = 'https://' + url
proxy = self.bot.get_proxy()
try:
async with self.bot.sess.head(url, timeout=TIMEOUT, proxy=proxy) as r:
if (r.content_length or 0) > 100000000:
return await self.bot.edit_message(
m, 'Rejected to navigate, content is too long')
url = str(r.url)
except asyncio.TimeoutError:
return await self.bot.edit_message(m, 'Connection timeout')
except aiohttp.InvalidURL:
return await self.bot.edit_message(m, 'Invalid url given')
except aiohttp.ClientHttpProxyError:
return await self.bot.edit_message(m, 'Host resolution error')
except (aiohttp.ClientConnectorCertificateError, aiohttp.ClientConnectorSSLError):
return await self.bot.edit_message(
m, f'Can\'t establish secure connection to {url}\nTry using http:// protocol')
except aiohttp.ClientConnectionError as e:
return await self.bot.edit_message(
m, f'Unknown connection error happened: {e}\nTry using http:// protocol')
except aiohttp.ClientResponseError as e:
return await self.bot.edit_message(
m, f'Client response error: {e}')
await self._ratelimiter.increase_time(wait_time, ctx)
await self.lock.acquire()
service = services.Chromedriver(log_file=devnull)
browser = browsers.Chrome(
chromeOptions={
'args': [
'--no-sandbox', '--headless', '--disable-gpu',
f'--proxy-server={proxy}', '--lang=en',
'--limit-fps=1', '--disable-mojo-local-storage',
'--hide-scrollbars', '--ipc-connection-timeout=5',
]
}
)
try:
async with timeout(TIMEOUT + wait_time):
session = await start_session(service, browser)
await session.set_window_size(1920, 1080)
await session.get(url)
opened_url = await session.get_url()
await asyncio.sleep(wait_time)
screenshot = await session.get_screenshot()
except asyncio.TimeoutError:
return await self.bot.edit_message(
m, f'Screenshot timeout reached: **{TIMEOUT}** sec')
except WebdriverError as e:
return await self.bot.edit_message(
m, f'Browser error happened, unable to take a screenshot: {e.__class__.__name__}')
except ArsenicError as e:
return await self.bot.edit_message(
m, f'Client error happened: {e}')
finally:
try:
self.lock.release()
except RuntimeError:
pass
try:
await stop_session(session)
except UnboundLocalError:
pass
try:
title = opened_url.split('/')[2]
except IndexError:
title = "Screenshot"
# avoid discord error
url = FIX_SLASHES_REGEX.sub('/', opened_url)
if not (url.startswith('http://') or url.startswith('https://')):
url = None
e = Embed(
title=title[:256], colour=Colour.gold(),
url=url if url and len(url) <= 2048 else None
)
e.set_image(url='attachment://screenshot.png')
f = File(screenshot, filename='screenshot.png')
e.set_footer(
text=f'[{round(time.time() - (m.created_at or m.edited_at).timestamp(), 1)} sec] Note: above content is user-generated.',
icon_url=ctx.author.avatar_url
)
await self.bot.delete_message(m)
await ctx.send(embed=e, file=f)
| 2.421875 | 2 |
python/model/CPOMDP.py | AlgTUDelft/ConstrainedPlanningToolbox | 11 | 12786971 | #################################################################################
# ConstrainedPlanningToolbox
# Copyright (C) 2019 Algorithmics group, Delft University of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#################################################################################
from model.BeliefPoint import BeliefPoint
class CPOMDP:
def __init__(self, num_states, num_actions, num_observations, initial_belief, num_decisions):
self.num_states = num_states
self.num_actions = num_actions
self.num_observations = num_observations
self.num_decisions = num_decisions
self.rewards_defined = False
self.has_time_dependent_reward = False
self.reward_function = None
self.time_reward_function = None
self.transitions_defined = False
self.has_time_dependent_transitions = False
self.transition_destinations = None
self.transition_probabilities = None
self.time_transition_destinations = None
self.time_transition_probabilities = None
self.cost_function_defined = False
self.cost_function = None
self.feasible_actions = None
self.init_default_feasible_actions()
self.observation_function = None
self.initial_belief = initial_belief
self.initial_state = 0
def set_reward_function(self, reward_function):
self.rewards_defined = True
self.has_time_dependent_reward = False
self.reward_function = reward_function
self.time_reward_function = None
def set_time_reward_function(self, time_reward_function):
self.rewards_defined = True
self.has_time_dependent_reward = True
self.reward_function = None
self.time_reward_function = time_reward_function
def get_reward(self, s, a):
assert not self.has_time_dependent_reward
return self.reward_function[s][a]
def get_time_reward(self, t, s, a):
if self.has_time_dependent_reward:
return self.time_reward_function[t][s][a]
else:
return self.reward_function[s][a]
def set_transitions(self, destinations, probabilities):
self.transitions_defined = True
self.has_time_dependent_transitions = False
self.transition_destinations = destinations
self.transition_probabilities = probabilities
def set_time_transitions(self, destinations, probabilities):
self.transitions_defined = True
self.has_time_dependent_transitions = True
self.time_transition_destinations = destinations
self.time_transition_probabilities = probabilities
def get_transition_destinations(self, s, a):
assert not self.has_time_dependent_transitions
return self.transition_destinations[s][a]
def get_transition_probabilities(self, s, a):
assert not self.has_time_dependent_transitions
return self.transition_probabilities[s][a]
def get_time_transition_destinations(self, t, s, a):
if self.has_time_dependent_transitions:
return self.time_transition_destinations[t][s][a]
else:
return self.transition_destinations[s][a]
def get_time_transition_probabilities(self, t, s, a):
if self.has_time_dependent_transitions:
return self.time_transition_probabilities[t][s][a]
else:
return self.transition_probabilities[s][a]
def init_default_feasible_actions(self):
self.feasible_actions = [[[] for s in range(self.num_states)] for t in range(self.num_decisions)]
for t in range(self.num_decisions):
for s in range(self.num_states):
for a in range(self.num_actions):
self.feasible_actions[t][s].append(a)
def get_feasible_actions(self, t, s):
return self.feasible_actions[t][s]
def set_feasible_actions(self, feasible_actions):
self.feasible_actions = feasible_actions
def get_cost(self, k, s, a):
assert self.cost_function_defined
return self.cost_function[k][s][a]
def get_num_domain_resources(self):
assert self.cost_function_defined
return len(self.cost_function)
def set_cost_functions(self, cost_function):
self.cost_function_defined = True
self.cost_function = cost_function
def set_observation_function(self, observation_function):
self.observation_function = observation_function
def get_observation_probability(self, a, s_next, o):
return self.observation_function[a][s_next][o]
def prepare_belief(self, belief_point):
if belief_point.has_action_observation_probs():
return
ao_probs = [[0.0 for o in range(self.num_observations)] for a in range(self.num_actions)]
for a in range(self.num_actions):
for o in range(self.num_observations):
prob = 0.0
for s in range(self.num_states):
destinations = self.get_transition_destinations(s, a)
probabilities = self.get_transition_probabilities(s, a)
for i in range(len(destinations)):
s_next = destinations[i]
s_next_prob = probabilities[i]
prob += self.get_observation_probability(a, s_next, o) * s_next_prob * belief_point.belief[s]
ao_probs[a][o] = prob
belief_point.set_action_observation_probabilities(ao_probs)
def update_belief(self, belief_point, a, o):
new_belief = [0.0 for s in range(self.num_states)]
if not belief_point.has_action_observation_probs():
self.prepare_belief(belief_point)
nc = belief_point.get_action_observation_probability(a, o)
for s in range(self.num_states):
destinations = self.get_transition_destinations(s, a)
probabilities = self.get_transition_probabilities(s, a)
for i in range(len(destinations)):
s_next = destinations[i]
s_next_prob = probabilities[i]
new_belief[s_next] += self.get_observation_probability(a, s_next, o) * s_next_prob * (1.0 / nc) * belief_point.belief[s]
return BeliefPoint(new_belief)
| 1.898438 | 2 |
analysis.py | numberlearning/draw | 2 | 12786972 | import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials import mnist
import numpy as np
import os
import random
from scipy import misc
import time
import sys
#from draw import viz_data, x, A, B, read_n, T
#from drawCopy1 import viz_data, x, A, B, read_n, T
#from draw_eric import viz_data, x, A, B, read_n, T
from draw_eric_rewrite_filterbank import viz_data, x, A, B, read_n, T
#import load_input
#import load_trace
sess_config = tf.ConfigProto()
sess_config.gpu_options.allow_growth = True
sess = tf.InteractiveSession(config=sess_config)
saver = tf.train.Saver()
#data = load_trace.TraceData()
#data.get_test(1)
#data = load_input.InputData()
#data.get_test(1)
data = mnist.input_data.read_data_sets("mnist", one_hot=True).test
def random_image():
"""Get a random image from test set."""
num_images = len(data.images)
i = random.randrange(num_images)
image_ar = np.array(data.images[i]).reshape(A, B)
return image_ar#, data.labels[i]
def load_checkpoint(it):
#path = "model_runs/blob_classification"
#saver.restore(sess, "%s/drawmodel_%d.ckpt" % (path, it))
#saver.restore(sess, "trace_draw/drawmodel.ckpt")
saver.restore(sess, "model_runs/rewrite_filterbank/drawmodel.ckpt")
# saver.restore(sess, "model_runs/rewrite_filterbank/drawmodel.ckpt")
last_image = None
def read_img(it, new_image):
batch_size = 1
out = dict()
global last_image
if new_image or last_image is None:
last_image = random_image()
#img, label = last_image
img = last_image
flipped = np.flip(img.reshape(A, B), 0)
out = {
"img": flipped,
#"label": label,
"rects": list(),
"rs": list(),
}
load_checkpoint(it)
cs = sess.run(viz_data, feed_dict={x: img.reshape(batch_size, A*B)})
for i in range(len(cs)):
print('cs[i]["stats"]: ', cs[i]["stats"])
#print(len(cs[i]["r"]))
out["rs"].append(np.flip(cs[i]["r"].reshape(read_n, read_n), 0))
out["rects"].append(stats_to_rect(cs[i]["stats"]))
return out
def read_img2(it, new_image):
"""Read image with rewritten filterbanks."""
batch_size = 1
out = dict()
global last_image
if new_image or last_image is None:
last_image = random_image()
img = last_image
flipped = np.flip(img.reshape(A, B), 0)
out = {
"img": flipped,
"dots": list(),
}
load_checkpoint(it)
cs = sess.run(viz_data, feed_dict={x: img.reshape(batch_size, A*B)})
for i in range(len(cs)):
mu_x = list(cs[i]["r_mu_x"])
mu_y = list(cs[i]["r_mu_y"])
delta = list(cs[i]["r_delta"])
gx_ = cs[i]["r_gx_"]
gy_ = cs[i]["r_gy_"]
# sigma2 = list(cs[i]["r_sigma2"])
# print("glimpse: ", i)
#
print("gx_: ")
print(gx_)
print("gy_: ")
print(gy_)
# print("sigma2: ")
# print(sigma2)
#
print("delta: ")
print(delta)
print("")
out["dots"].append(list_to_dots(mu_x, mu_y))
return out
def write_img(it, new_image):
batch_size = 1
out = dict()
global last_image
if new_image or last_image is None:
last_image = random_image()
#img, label = last_image
img = last_image
flipped = np.flip(img.reshape(A, B), 0)
out = {
#"label": label,
"rects": list(),
"c": list(),
}
load_checkpoint(it)
cs = sess.run(viz_data, feed_dict={x: img.reshape(batch_size, A*B)})
for i in range(len(cs)):
out["c"].append(np.flip(cs[i]["c"].reshape(A, B), 0))
out["rects"].append(stats_to_rect(cs[i]["w_stats"]))
#print('cs[i]["stats"]: ')
#print(cs[i]["stats"])
#print('stats_to_rect[i]["stats"]: ')
#print(stats_to_rect(cs[i]["stats"]))
return out
def write_img2(it, new_image):
"""Write image with rewritten filterbanks."""
batch_size = 1
out = dict()
global last_image
if new_image or last_image is None:
last_image = random_image()
img = last_image
flipped = np.flip(img.reshape(A, B), 0)
out = {
"img": flipped,
"dots": list(),
"c": list(),
}
load_checkpoint(it)
cs = sess.run(viz_data, feed_dict={x: img.reshape(batch_size, A*B)})
for i in range(len(cs)):
out["c"].append(np.flip(cs[i]["c"].reshape(A, B), 0))
mu_x = list(cs[i]["w_mu_x"])
mu_y = list(cs[i]["w_mu_y"])
# delta = list(cs[i]["w_delta"])
out["dots"].append(list_to_dots(mu_x, mu_y))
# gx_ = cs[i]["w_gx_"]
# gy_ = cs[i]["w_gy_"]
# sigma2 = list(cs[i]["w_sigma2"])
#
# print("glimpse: ", i)
#
# print("gx_: ")
# print(gx_)
#
# print("gy_: ")
# print(gy_)
#
# print("sigma2: ")
# print(sigma2)
#
# print("delta: ")
# print(delta)
# print("")
return out
def stats_to_rect(stats):
"""Draw attention window based on gx, gy, and delta."""
gx, gy, delta = stats
minY = A - gy + read_n/2.0 * delta
maxY = B - gy - read_n/2.0 * delta
minX = gx - read_n/2.0 * delta
maxX = gx + read_n/2.0 * delta
if minX < 1:
minX = 1
if maxY < 1:
maxY = 1
if maxX > A - 1:
maxX = A - 1
if minY > B - 1:
minY = B - 1
return dict(top=[int(minY)], bottom=[int(maxY)], left=[int(minX)], right=[int(maxX)])
def list_to_dots(mu_x, mu_y):
"""Draw filterbank based on mu_x and mu_y."""
mu_x_list = mu_x * read_n
mu_y_list = [val for val in mu_y for _ in range(0, read_n)]
return dict(mu_x_list=mu_x_list, mu_y_list=mu_y_list)
| 2.3125 | 2 |
django_heroku_deploy/whatever/models.py | AhmadAmmar260/temp | 0 | 12786973 | <reponame>AhmadAmmar260/temp
from django.db import models
class Whatever(models.Model):
title = models.CharField(max_length=200)
body = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
def __unicode__(self):
return self.title
| 2.390625 | 2 |
pakdump/utils/ap.py | haste/pakdump | 0 | 12786974 | import argparse
from pathlib import Path
from typing import Any, Optional, Sequence, Union
class FullDirPath(argparse.Action):
"""
argparse.Action subclass to resolve a path and make sure it's a directory
"""
def __call__(
self,
parse: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None,
) -> None:
"""
Resolve the input path and make sure it doesn't exist (so we can make it
later), or that it's a directory.
"""
full_path = Path(str(values)).resolve()
if full_path.exists() and not full_path.is_dir():
raise argparse.ArgumentTypeError(f"{self.dest} must be a directory")
setattr(namespace, self.dest, full_path)
class FullPath(argparse.Action):
"""
argparse.Action subclass to resolve a path
"""
def __call__(
self,
parse: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
option_string: Optional[str] = None,
) -> None:
"""
Resolve the input path
"""
full_path = Path(str(values)).resolve()
setattr(namespace, self.dest, full_path)
| 3.3125 | 3 |
main/reconstruction/nnmodel/lib/plot.py | awangga/braindecoding | 0 | 12786975 | <filename>main/reconstruction/nnmodel/lib/plot.py
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import matplotlib.cm as cm
def tigaKolomGambar(judul,sub1,gbr1,sub2,gbr2,sub3,gbr3):
for i in range(len(gbr3)):
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
plt.sca(axs[0])
plt.imshow(gbr1[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub1)
plt.sca(axs[1])
plt.imshow(gbr2[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub2)
plt.sca(axs[2])
plt.imshow(gbr3[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub3)
plt.suptitle(judul)
plt.show()
def duaKolomGambar(judul,sub1,gbr1,sub2,gbr2,sub3,gbr3):
for i in range(len(gbr3)):
fig, axs = plt.subplots(nrows=1, ncols=3, figsize=(12,4))
plt.sca(axs[0])
plt.imshow(gbr1[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub1)
plt.sca(axs[1])
plt.imshow(gbr2[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub2)
plt.sca(axs[2])
plt.imshow(gbr3[i], cmap=cm.gray)
plt.axis('off')
plt.title(sub3)
plt.suptitle(judul)
plt.show()
| 2.375 | 2 |
element/port.py | antopenrf/FLO | 3 | 12786976 | """Illustration of the concept of using 'port' module.
c1 = Cap('c1', 1)
c1.nosp = 2, nocp: number of schematic ports
c1.nond = 4, nond: number of matrix nodes
c1.s_ports = (1, 2)
c1.m_ports = (1, 2, -1, -2)
c2 = Cap('c2', 2)
c2.nosp = 2
c2.nomp = 4
c2.s_ports = (3, 4) -- seq of ports for symbol
c2.m_ports = (3, 4, -3, -4) -- seq of ports for matrix
c1.wire.c2(1, 3)
"""
class Port(object):
def __init__(self, s_port_number):
self.pn = s_port_number ## pn: port number
self.pt = 'linear' ## pt: port type
def __repr__(self):
return "Port Number: {pn}".format(pn = str(self.pn))
def set_pn(self, s_port_number):
self.pn = s_port_number
| 3.640625 | 4 |
proma/config/migrations/0001_initial.py | erickgnavar/Proma | 3 | 12786977 | # Generated by Django 2.0.1 on 2018-03-22 20:14
from django.db import migrations, models
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Configuration',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created')),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified')),
('company_logo', models.ImageField(blank=True, help_text='size should be 250pt x 100pt', null=True, upload_to='config/configuration/', verbose_name='Logo')),
('company_legal_name', models.CharField(blank=True, max_length=255, null=True, verbose_name='Legal name')),
('company_email', models.EmailField(blank=True, max_length=255, null=True, verbose_name='Email')),
('company_phone', models.CharField(blank=True, max_length=20, null=True, verbose_name='Phone')),
('company_tax_identifier', models.CharField(blank=True, max_length=20, null=True, verbose_name='Tax identifier')),
('company_address', models.CharField(blank=True, max_length=255, null=True, verbose_name='Address')),
('company_state', models.CharField(blank=True, max_length=100, null=True, verbose_name='State')),
('company_city', models.CharField(blank=True, max_length=100, null=True, verbose_name='City')),
('company_country', models.CharField(blank=True, max_length=100, null=True, verbose_name='Country')),
('company_zipcode', models.CharField(blank=True, max_length=10, null=True, verbose_name='Zipcode')),
],
options={
'abstract': False,
},
),
]
| 1.828125 | 2 |
reader.py | LittleNightmare/Read_Arknights_user_data | 1 | 12786978 | <reponame>LittleNightmare/Read_Arknights_user_data
from json import load
from base64 import b64encode, b64decode
import requests
def _load_file(user_date_file="arknights user data_login.json", check_list_file="item_table.json"):
try:
with open(user_date_file, "r", encoding="utf-8") as f:
user_data = load(f)
except:
print("文件读取出错")
try:
with open(check_list_file, "r", encoding="utf-8") as f:
check_list = load(f)
except:
url = "https://raw.github.com/Perfare/ArknightsGameData/master/excel/item_table.json"
r = requests.get(url)
with open(check_list_file, "wb") as code:
code.write(r.content)
with open(check_list_file, "r", encoding="utf-8") as f:
check_list = load(f)
return user_data, check_list
def input_needs(ArkPlaner="ArkPlaner.json", lolicon="lolicon.txt"):
try:
with open(ArkPlaner, "r", encoding="utf-8") as f:
list_A = load(f)
except:
print("cannot load ArkPlaner for input needs")
try:
with open(lolicon, "r", encoding="utf-8") as f:
list_l = eval(b64decode(f.read()).decode("utf-8").replace("true", "True").replace("false", "False"))
except:
print("cannot load lolicon for input needs")
list_ArkPlaner = {}
for item in list_A:
item = dict(item)
list_ArkPlaner.setdefault(item["name"], item["need"])
return list_ArkPlaner, list_l
def main(user_data, check_list):
inventory = user_data["user"]["inventory"]
ArkPlaner = []
lolicon = {"inputs": {}, "presets": []}
try:
ArkPlaner_old, lolicon_old = input_needs()
lolicon["presets"] = lolicon_old["presets"]
except:
ArkPlaner_old = {}
lolicon_old = {"inputs": {}, "presets": []}
print("old file does not exit")
for check_list_id in check_list["items"]:
if check_list_id in inventory:
name = check_list["items"][check_list_id]["name"]
content = {"name": name,
"need": ArkPlaner_old.get(check_list["items"][check_list_id]["name"], 0),
"have": inventory[check_list_id]}
ArkPlaner.append(content)
try:
needs = lolicon_old["inputs"][name].get("need", '')
except:
needs = ""
lolicon["inputs"].setdefault(name,
{"need": needs,
"have": inventory[check_list_id]})
with open("ArkPlaner.json", "w", encoding="utf-8") as f:
f.write(str(ArkPlaner).replace("'", '"'))
bse64 = b64encode(str(lolicon).replace("'", '"').replace("True", "true").replace("False", "false").encode("utf-8"))
with open("lolicon.txt", "w")as f:
f.write(str(bse64, 'utf-8'))
if __name__ == "__main__":
user_data, check_list = _load_file()
main(user_data, check_list)
| 3 | 3 |
chats.py | brivie-suni/startit-chats | 0 | 12786979 | from flask import json, jsonify
from datetime import datetime
LOGFAILS = "chats.txt"
def lasi(adresats):
chata_rindas = []
with open(LOGFAILS, "r", encoding="utf-8") as f:
for rinda in f:
r = json.loads(rinda)
if "adresats" in r:
if r["adresats"] == adresats or r["adresats"] == "visi" or r["vards"] == adresats:
chata_rindas.append(r)
return jsonify({"chats": chata_rindas})
LABAIS_VARDS = "vau"
SLIKTIE_VARDI = ["ņau", "kaķis"]
def pieraksti_zinju(dati):
# limitējam ziņas garumu
dati["chats"]["zinja"] = dati["chats"]["zinja"][0:140]
now = datetime.now()
laiks = now.strftime("%Y/%m/%d, %H:%M:%S")
# Cenzūra
chata_rindas = []
zinjas_vardi = dati["chats"]["zinja"].split()
for vards in zinjas_vardi:
if vards in SLIKTIE_VARDI:
chata_rindas.append(LABAIS_VARDS)
else:
chata_rindas.append(vards)
dati["chats"]["zinja"]=" ".join(chata_rindas)
with open(LOGFAILS, "a", newline="", encoding="utf-8") as f:
dati["chats"]["laiks"] = laiks
f.write(json.dumps(dati["chats"]) + "\n")
| 2.65625 | 3 |
tests/classes/cbm_article.py | Jesse-Yung/jsonclasses | 0 | 12786980 | <gh_stars>0
from __future__ import annotations
from jsonclasses import jsonclass, types
def create_first_revision(article: CBMArticle) -> None:
CBMRevision(name='First').article = article
def force_content(article: CBMArticle) -> None:
article.content = 'UPDATED'
@jsonclass
class CBMRevision:
name: str
article: CBMArticle = types.instanceof('CBMArticle').linkto.required
@jsonclass(on_create=[create_first_revision, force_content])
class CBMArticle:
name: str
content: str
revisions: list[CBMRevision] = types.nonnull.listof('CBMRevision') \
.linkedby('article')
| 2.375 | 2 |
tests/variables/test3.py | past-one/rubymine-is2018 | 0 | 12786981 | <filename>tests/variables/test3.py
x = int(input("x"))
if x != 10 and x == 10: # false
pass
| 2.828125 | 3 |
gateway/kit/pubsub.py | dustinengle/smart-mailbox | 0 | 12786982 | <reponame>dustinengle/smart-mailbox
#!/usr/bin/env python
import redis
import sys
from codec import decode, encode
from logger import error, info
fns = {}
try:
_channel = 'sensor'
redis = redis.Redis(host='localhost', port=6379, db=1)
pubsub = redis.pubsub(ignore_subscribe_messages=True)
except Exception as ex:
error('pubsub', 'init error: '+str(ex))
sys.exit(1)
def get_message():
psm = pubsub.get_message()
if psm:
ch = psm['channel']
info('pubsub', 'message: '+ch)
if ch == 'admin':
fns['admin'](psm['data'])
else:
msg = decode(psm['data'])
fns[ch](msg)
def publish(msg, channel=_channel):
if not msg:
error('pubsub', 'publish error: undefined msg')
return
info('pubsub', 'publish: '+str(msg))
try:
data = encode(msg)
redis.publish(channel, data)
except Exception as ex:
error('pubsub', 'publish error: '+str(ex))
def publish_raw(data, channel=_channel):
if not data:
error('pubsub', 'publish raw error: undefined data')
return
info('pubsub', 'publish raw: '+str(data))
try:
redis.publish(channel, data)
except Exception as ex:
error('pubsub', 'publish raw error: '+str(ex))
def subscribe(fn, channel=_channel):
if not fn:
error('pubsub', 'subscribe error: undefined fn')
return
info('pubsub', 'subscribe: '+str(fn))
if not channel:
channel = _channel
fns[channel] = fn
pubsub.subscribe([channel])
def unsubscribe():
fns = {}
pubsub.unsubscribe()
pubsub.close()
| 2.234375 | 2 |
python/highcard.py | jrduncans/highcard | 0 | 12786983 | <filename>python/highcard.py<gh_stars>0
#!/bin/env python
# Copyright 2006 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cards import Card, Deck
class Result:
pass
class HighCard:
@staticmethod
def play():
deck = Deck()
playerCard = deck.draw()
dealerCard = deck.draw()
result = Result()
result.playerCard = playerCard
result.dealerCard = dealerCard
if playerCard < dealerCard:
result.message = "Sorry, you lose."
elif playerCard > dealerCard:
result.message = "You win!"
else:
result.message = "It's a tie."
return result
def printInstructions():
print '1: Play'
print '2: Quit'
print 'Enter the number of the option:'
if __name__ == "__main__":
while True:
printInstructions()
line = raw_input()
if line == "1":
result = HighCard.play()
print ""
print "You drew the " + str(result.playerCard) + "."
print "Dealer drew the " + str(result.dealerCard)+ "."
print result.message
elif line == "2":
break
else:
print 'Unknown option. Please select again.'
print | 3.484375 | 3 |
pyqt/python/layout_dynamiczny.py | rikkt0r/presentations | 1 | 12786984 | import sys
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import (QApplication, QMainWindow, QCheckBox, QVBoxLayout,
QLineEdit, QPushButton, QWidget, QHBoxLayout)
class SuperCheckbox(QHBoxLayout):
def __init__(self, parent=None):
super(SuperCheckbox, self).__init__(parent)
self.checkbox = QCheckBox()
self.addWidget(self.checkbox)
self.edit = QLineEdit()
self.edit.setText('Placeholder')
self.addWidget(self.edit)
button = QPushButton()
button.setIcon(QIcon('icons/plus.svg'))
button.clicked.connect(self.removeThis)
button.resize(self.sizeHint())
self.addWidget(button)
def checkState(self):
return self.checkbox.checkState()
def removeThis(self):
print('Removed !')
class CentralWidget(QWidget):
items = []
def __init__(self, parent):
super(CentralWidget, self).__init__(parent)
self.container = QVBoxLayout(self)
btn = QPushButton('Dodaj')
btn.clicked.connect(self.addSuperCheckbox)
btn.resize(btn.sizeHint())
self.container.addWidget(btn)
def addSuperCheckbox(self):
item = SuperCheckbox()
self.container.addLayout(item)
self.items.append(item)
class GlowneOkienko(QMainWindow):
def __init__(self):
super(GlowneOkienko, self).__init__(None)
self.setWindowTitle('Dynamic layout & typ złożony')
self.setWindowIcon(QIcon('icons/exit.svg'))
self.setGeometry(500, 400, 600, 400)
self.widget = CentralWidget(self)
self.setCentralWidget(self.widget)
if __name__ == '__main__':
app = QApplication([])
window = GlowneOkienko()
window.show()
sys.exit(app.exec_())
| 2.296875 | 2 |
burputils.py | parsiya/burputils | 3 | 12786985 | """
Burp utility module for Python Burp extensions. Author: <NAME>
License: MIT
# Usage
1. Add it as a Python Burp module and use `from burputils import BurpUtils`.
For more info see:
https://parsiya.net/blog/2018-12-19-python-utility-modules-for-burp-extensions/
2. Copy the files to the same path as your extension and use `from burputils
import BurpUtils`.
* These extra files do not have to be loaded in Burp, they just needs to
be in the same path.
3. Copy/paste used code into your extension.
Please see README for details.
"""
class BurpUtils:
"""Helpers for Burp Python extensions"""
def __init__(self, callbacks):
"""Set IExtensionHelpers
Set with callbacks.getHelpers() in registerExtenderCallbacks.
"""
self.helpers = callbacks.getHelpers()
self.callbacks = callbacks
def getInfoFromBytes(self, isRequest, rawBytes):
"""Process request or response from raw bytes.
Returns IRequestInfo or IResponseInfo respectively.
Use getInfo instead if you have access to an IHttpRequestResponse
object. It allows you to use all methods like IRequestInfo.getUrl()
later.
Args:
* isRequest (bool): Set to true if rawBytes is a request. false if it's a
response.
* rawBytes (byte[]): Raw bytes containing the request or response.
"""
if isRequest:
return self.helpers.analyzeRequest(rawBytes)
else:
return self.helpers.analyzeResponse(rawBytes)
def getInfo(self, isRequest, requestResponse):
"""Process request or response from IHttpRequestResponse.
Returns IRequestInfo or IResponseInfo respectively.
This method is preferable to getInfoFromBytes.
Args:
* isRequest (bool): Set to true if rawBytes is a request. false if it's
a response.
* requestResponse (IHttpRequestResponse): Object containing the request
or the response.
"""
if isRequest:
return self.helpers.analyzeRequest(requestResponse)
else:
return self.helpers.analyzeResponse(requestResponse.getResponse())
def getBodyFromBytes(self, isRequest, rawBytes):
"""Extracts the body bytes from a request or response raw bytes.
Returns a byte[] containing the body of the request or response.
Args:
* isRequest (bool): Set to true if rawBytes is a request. false if it's a
response.
* rawBytes (byte[]): Raw bytes containing the request or response.
"""
info = self.getInfoFromBytes(isRequest, rawBytes)
return rawBytes[info.getBodyOffset()]
def getBody(self, isRequest, requestResponse):
"""Extracts the body bytes of an IHttpRequestResponse object.
Returns a byte[] containing the body of the request or response.
Args:
* isRequest (bool): Set to true if rawBytes is a request. false if it's a
response.
* requestResponse (IHttpRequestResponse): Object containing the request
or the response.
"""
info = self.getInfo(isRequest, requestResponse)
if isRequest:
return requestResponse.getRequest()[info.getBodyOffset():]
else:
return requestResponse.getResponse()[info.getBodyOffset():]
def getHeaders(self, info):
"""Extract the headers from an IRequestInfo or IResponseInfo object.
Returns a Headers object with the headers.
Args:
* info (IRequestInfo or IResponseInfo): Request info. Use the output
from getInfo or getInfoFromBytes.
"""
from headers import Headers
hdr = Headers()
# this is IRequestInfo.getHeaders() or IResponseInfo.getHeaders() from Burp
rawHdr = info.getHeaders()
hdr.importRaw(rawHdr)
return hdr
def setRequestResponse(self, isRequest, message, requestResponse):
"""Set the request or response for an IHttpRequestResponse object.
Returns the modified requestResponse.
Args:
* isRequest (bool): True if message is a request. False for response.
* message (byte[]): Raw bytes of the request or response. Usually comes
from buildHttpMessage.
* requestResponse (IHttpRequestResponse): RequestResponse to be
modified.
"""
# if isRequest is True, use setRequest. Otherwise, setResponse.
if isRequest:
requestResponse.setRequest(message)
else:
requestResponse.setResponse(message)
return message
def runExternal(self, command, args):
"""Runs command with args via the command line.
For the sake of simplicity, everything after the first item will be in a
list of strings.
Executes "command args[0] args[1] ...".
Security implication: This is code-execution-as-a-service.
Args:
* command (string): Name of the command.
* args (list of strings): Arguments in a Python list.
"""
# alternatively, we could accept a string containing all the commands,
# then run shlex.split and pass the result to popen.
from subprocess import Popen, PIPE
import sys
# insert the command at the start of the list, everything gets shifted.
args.insert(command, 0)
# run everything
proc = Popen(args, stdout=PIPE, stderr=PIPE)
output = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
sys.stdout.write(err)
return output
def setHighlight(self, color, requestResponse):
"""Set the highlight color for requestResponse in Burp's HTTP History.
Returns the modified requestResponse.
Args:
* color (string): Highlight color.
One of: red, orange, yellow, green, cyan, blue, pink, magenta, gray
* requestResponse (IHttpRequestResponse) RequestResponse to be
modified.
"""
validColors = ["red","orange","yellow","green","cyan","blue","pink","magenta","gray"]
# convert the input to lowercase.
color = color.lower()
if color not in validColors:
color = None
return
requestResponse.setHighlight(color)
return requestResponse
def bytesToString(self, data):
# type: (bytearray) -> (str)
"""Converts a byte[] to string.
Args:
* data (bytearray): Byte array to be converted to string."""
return self.helpers.bytesToString(data)
def getPath(self, reqResp):
# type: (IHttpRequestResponse) -> (str)
"""Analyzes a byte[] of a request and returns the path.
Args:
* reqResp (IHttpRequestResponse): The RequestResponse with the path."""
if reqResp is None:
return ""
info = self.helpers.analyzeRequest(reqResp)
return info.getUrl().getFile()
def burpToolName(self, toolFlag):
# type: (int) -> (str)
"""Returns the descriptive name for the Burp tool identified by
toolFlag.
Args:
* toolFlag (int): The flag representing the Burp tool."""
return self.callbacks.getToolName(toolFlag)
| 2.46875 | 2 |
adoptions/apps.py | Pruthviraj98/Wisdompets_linkedInLearning | 0 | 12786986 | from django.apps import AppConfig
class AdoptionsConfig(AppConfig):
name = 'adoptions'
| 1.164063 | 1 |
src/cake/registry.py | anlongfei/cake | 14 | 12786987 | """Utilities for querying the Windows registry.
@see: Cake Build System (http://sourceforge.net/projects/cake-build)
@copyright: Copyright (c) 2010 <NAME>, <NAME>.
@license: Licensed under the MIT license.
"""
import _winreg as winreg # Do this so Python 2to3 conversion works.
import sys
import cake.system
_shownWow64Warning = False
# Define locally here since some versions of the winreg module don't have them
KEY_WOW64_64KEY = 0x0100
KEY_WOW64_32KEY = 0x0200
if cake.system.isWindows64():
_readAccessModes = (winreg.KEY_READ | KEY_WOW64_64KEY, winreg.KEY_READ | KEY_WOW64_32KEY)
else:
_readAccessModes = (winreg.KEY_READ,)
def queryString(key, subKey, name):
"""Queries a string value from the Windows registry.
On 64-bit Windows this function will first try to query the value from
the 64-bit registry. If the value doesn't exist there it will then try to
find the value in the 32-bit registry.
@param key: The key to query, eg: winreg.HKEY_LOCAL_MACHINE
@type key: string
@param subKey: The subkey to query, eg: r"SOFTWARE\Microsoft"
@type subKey: string
@param name: The name to query, eg: "InstallDir"
@type name: string
@return: The value queried.
@rtype: string
@raise WindowsError: If the value could not be found/read.
"""
for sam in _readAccessModes:
try:
keyHandle = winreg.OpenKey(key, subKey, 0, sam)
try:
return str(winreg.QueryValueEx(keyHandle, name)[0])
finally:
winreg.CloseKey(keyHandle)
except WindowsError:
if sam is _readAccessModes[-1]:
raise
| 2.609375 | 3 |
.github/workflows/guess-the-word-minigame/guesss_the_word.py | darlingtonjones/guesss_the_word | 0 | 12786988 | import random
lives = 5 #this is a starting live
words = ['antelope', 'planet', 'science', 'measurement',
'africa', 'space', 'systems', 'continental']
secret_word = random.choice(words)
clue = list('?????')
heart_symbol = u'\u2764'
print(heart_symbol * 5)
guessed_word_correctly = False
def update_clue(guessed_letter, secret_word, clue):
index = 0
while index < len(secret_word):
if guessed_letter == secret_word[index]:
clue[index] = guessed_letter
index = index + 1
while lives > 0:
print(clue)
print('Lives left: ' + heart_symbol * lives)
guess = input('Guess a letter or the whole word: ')
if guess == secret_word:
guessed_word_correctly = True
break
if guess in secret_word:
update_clue(guess, secret_word, clue)
else:
print('Incorrect. You lose a life')
lives = lives - 1
if guessed_word_correctly:
print('You won! The secret word was ' + secret_word)
else:
print('You lost! The secret word was ' + secret_word)
| 3.890625 | 4 |
conans/test/functional/toolchains/cmake/test_cmake_toolchain_xcode_flags.py | Mu-L/conan | 1 | 12786989 | <gh_stars>1-10
import textwrap
import platform
import os
import pytest
from conans.test.utils.tools import TestClient
def _add_message_status_flags(client):
cmakelists_path = os.path.join(client.current_folder, "CMakeLists.txt")
with open(cmakelists_path, "a") as cmakelists_file:
cmakelists_file.write('message(STATUS "CONAN_C_FLAGS: ${CONAN_C_FLAGS}")\n')
cmakelists_file.write('message(STATUS "CONAN_CXX_FLAGS: ${CONAN_CXX_FLAGS}")\n')
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("op_system,os_version,sdk,arch", [
("watchOS", "8.1", "watchos", "armv7k"),
("tvOS", "13.2", "appletvos", "armv8")
])
def test_cmake_apple_bitcode_arc_and_visibility_flags_enabled(op_system, os_version, sdk, arch):
profile = textwrap.dedent("""
include(default)
[settings]
os={}
os.version={}
os.sdk={}
arch={}
[conf]
tools.apple:enable_bitcode=True
tools.apple:enable_arc=True
tools.apple:enable_visibility=True
""".format(op_system, os_version, sdk, arch))
client = TestClient(path_with_spaces=False)
client.save({"host": profile}, clean_first=True)
client.run("new hello/0.1 --template=cmake_lib")
_add_message_status_flags(client)
client.run("install . --profile:build=default --profile:host=host")
toolchain = client.load(os.path.join("build", "generators", "conan_toolchain.cmake"))
# bitcode
assert 'set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE "YES")' in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE "bitcode")' in toolchain
assert 'set(BITCODE "-fembed-bitcode")' in toolchain
# arc
assert 'set(FOBJC_ARC "-fobjc-arc")' in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC "YES")' in toolchain
# visibility
assert 'set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN "NO")' in toolchain
assert 'set(VISIBILITY "-fvisibility=default")' in toolchain
client.run("create . --profile:build=default --profile:host=host -tf None")
# flags
assert "-- CONAN_C_FLAGS: -fembed-bitcode -fobjc-arc" in client.out
assert "-- CONAN_CXX_FLAGS: -fembed-bitcode -fvisibility=default -fobjc-arc" in client.out
assert "[100%] Built target hello" in client.out
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("op_system,os_version,sdk,arch", [
("watchOS", "8.1", "watchos", "armv7k"),
("tvOS", "13.2", "appletvos", "armv8")
])
def test_cmake_apple_bitcode_arc_and_visibility_flags_enabled_and_xcode_generator(op_system, os_version, sdk, arch):
"""
Testing when all the Bitcode, ARC and Visibility are enabled, and Xcode as generator.
Note: When using CMake and Xcode as generator, the C/CXX flags do not need to be appended.
"""
profile = textwrap.dedent("""
include(default)
[settings]
os={}
os.version={}
os.sdk={}
arch={}
[conf]
tools.apple:enable_bitcode=True
tools.apple:enable_arc=True
tools.apple:enable_visibility=True
""".format(op_system, os_version, sdk, arch))
client = TestClient(path_with_spaces=False)
client.save({"host": profile}, clean_first=True)
client.run("new hello/0.1 --template=cmake_lib")
_add_message_status_flags(client)
client.run("create . --profile:build=default --profile:host=host -tf None "
"-c tools.cmake.cmaketoolchain:generator=Xcode")
assert "** BUILD SUCCEEDED **" in client.out
# flags are not appended when Xcode generator is used
for line in str(client.out).splitlines():
if "CONAN_C_FLAGS:" in line:
assert "-- CONAN_C_FLAGS:" == line.strip()
if "CONAN_CXX_FLAGS:" in line:
assert "-- CONAN_CXX_FLAGS: -stdlib=libc++" == line.strip()
break
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("op_system,os_version,sdk,arch", [
("watchOS", "8.1", "watchos", "armv7k"),
("tvOS", "13.2", "appletvos", "armv8")
])
def test_cmake_apple_bitcode_arc_and_visibility_flags_disabled(op_system, os_version, sdk, arch):
profile = textwrap.dedent("""
include(default)
[settings]
os={}
os.version={}
os.sdk={}
arch={}
[conf]
tools.apple:enable_bitcode=False
tools.apple:enable_arc=False
tools.apple:enable_visibility=False
""".format(op_system, os_version, sdk, arch))
client = TestClient(path_with_spaces=False)
client.save({"host": profile}, clean_first=True)
client.run("new hello/0.1 --template=cmake_lib")
_add_message_status_flags(client)
client.run("install . --profile:build=default --profile:host=host")
toolchain = client.load(os.path.join("build", "generators", "conan_toolchain.cmake"))
# bitcode
assert 'set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE "NO")' in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE "bitcode")' not in toolchain
assert 'set(BITCODE "-fembed-bitcode")' not in toolchain
# arc
assert 'set(FOBJC_ARC "-fno-objc-arc")' in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC "NO")' in toolchain
# visibility
assert 'set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN "YES")' in toolchain
assert 'set(VISIBILITY "-fvisibility=hidden -fvisibility-inlines-hidden")' in toolchain
client.run("create . --profile:build=default --profile:host=host -tf None")
# flags
assert "-- CONAN_C_FLAGS: -fno-objc-arc" in client.out
assert "-- CONAN_CXX_FLAGS: -fvisibility=hidden -fvisibility-inlines-hidden -fno-objc-arc" in client.out
assert "[100%] Built target hello" in client.out
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only OSX")
@pytest.mark.parametrize("op_system,os_version,sdk,arch", [
("watchOS", "8.1", "watchos", "armv7k"),
("tvOS", "13.2", "appletvos", "armv8")
])
def test_cmake_apple_bitcode_arc_and_visibility_flags_are_none(op_system, os_version, sdk, arch):
"""
Testing what happens when any of the Bitcode, ARC or Visibility configurations are not defined.
"""
profile = textwrap.dedent("""
include(default)
[settings]
os={}
os.version={}
os.sdk={}
arch={}
""".format(op_system, os_version, sdk, arch))
client = TestClient(path_with_spaces=False)
client.save({"host": profile}, clean_first=True)
client.run("new hello/0.1 --template=cmake_lib")
_add_message_status_flags(client)
client.run("install . --profile:build=default --profile:host=host")
toolchain = client.load(os.path.join("build", "generators", "conan_toolchain.cmake"))
# bitcode
assert 'set(CMAKE_XCODE_ATTRIBUTE_ENABLE_BITCODE "NO")' not in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_BITCODE_GENERATION_MODE "bitcode")' not in toolchain
assert 'set(BITCODE "-fembed-bitcode")' not in toolchain
# arc
assert 'set(FOBJC_ARC "-' not in toolchain
assert 'set(CMAKE_XCODE_ATTRIBUTE_CLANG_ENABLE_OBJC_ARC' not in toolchain
# visibility
assert 'set(CMAKE_XCODE_ATTRIBUTE_GCC_SYMBOLS_PRIVATE_EXTERN' not in toolchain
assert 'set(VISIBILITY "-' not in toolchain
client.run("create . --profile:build=default --profile:host=host -tf None")
# flags are not appended
for flag in ["-fembed-bitcode", "-fno-objc-arc", "-fobjc-arc", "-fvisibility"]:
assert flag not in client.out
assert "[100%] Built target hello" in client.out
| 2 | 2 |
francis/cogs/profile.py | trantinan2512/Francis | 0 | 12786990 | <gh_stars>0
from discord.ext import commands
import discord
# import asyncio
# import operator
# from pprint import pprint
from utils.user import get_user_obj
class ProfileCommands(commands.Cog):
"""Profile related commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(name='eprofile')
async def _check_event_profile(self, context):
# prefix = self.bot.command_prefix
user = get_user_obj(context.author)
hint_count = len(user.investigation_info.discovered_hints)
await context.say_as_embed(
title=f'[{user.discord_name}] Event Profile',
description=''
f'**2018 Year-End Event**\n'
f'• Discovered Hints Counter: **{hint_count}**'
)
def setup(bot):
bot.add_cog(ProfileCommands(bot))
| 2.4375 | 2 |
way/python/exercises/checkio/checkio005.py | only-romano/junkyard | 0 | 12786991 |
def best_stock(data):
max, best = 0, ''
for k, v in data.items():
if v > max:
max = v
best = k
else:
pass
return best
if __name__ == '__main__':
print("Example:")
print(best_stock({
'CAC': 10.0,
'ATX': 390.2,
'WIG': 1.2
}))
| 3.46875 | 3 |
utils/initial_state.py | fhoeb/fh-thesis-scripts | 2 | 12786992 | import numpy as np
import mpnum as mp
import tmps
from tmps.utils import state_reduction_as_ndarray, convert, broadcast_number_ground_state, get_thermal_state
import time
from scipy.special import factorial
import math
def get_spin_initial_state(theta, mpa_type='mps'):
"""
Returns the initial state for the spin impurity:
psi_0 = cos(theta) |1> + sin(theta) |0>
in the desired tensor network form (mps, mpo, pmps)
"""
ground = np.array([0.0, np.sin(theta)])
excited = np.array([np.cos(theta), 0.0])
return convert.to_mparray(ground + excited, mpa_type)
def get_spin_boson_0T_chain_initial_state(theta, bath_local_dim, nof_coefficients):
"""
Returns the full initial state (vacuum state) for 0T chain with nof_coefficients sites and a local dimension of
bath_local_dim.
"""
sys_psi_0 = get_spin_initial_state(theta)
bath_psi_0 = broadcast_number_ground_state(bath_local_dim, nof_coefficients)
return mp.chain([sys_psi_0, bath_psi_0])
def get_spin_boson_0T_star_initial_state(theta, system_index, bath_local_dim, nof_coefficients):
"""
Returns the full initial state (vacuum state) for 0T star with nof_coefficients sites and a local dimension of
bath_local_dim. The impurity is located at system_index.
"""
sys_psi_0 = get_spin_initial_state(theta)
# Initial states of the bath sites left and right of the system:
left_bath_psi_0, right_bath_psi_0 = tmps.utils.broadcast_number_ground_state(bath_local_dim, system_index), \
tmps.utils.broadcast_number_ground_state(bath_local_dim,
nof_coefficients - system_index)
return mp.chain([left_bath_psi_0, sys_psi_0, right_bath_psi_0]
if left_bath_psi_0 is not None else [sys_psi_0, right_bath_psi_0])
def _compute_finiteT_chain_residual(psi_0, mpa_type, dims):
"""
Returns residual of the finite-temperature initial state of the bath. List of populations in
the highest energy state of each mode
"""
res = []
for index, dim in enumerate(dims):
res.append(np.real(state_reduction_as_ndarray(psi_0, mpa_type, startsite=index)[dim - 1, dim - 1]))
return res
def get_spin_boson_finiteT_chain_initial_state(theta, beta, h_site, h_bond, bath_local_dim, nof_coefficients,
mpa_type='pmps',
nof_steps=None, state_compression_kwargs=None,
op_compression_kwargs=None, second_order_trotter=False,
psi_0_compression_kwargs=None, residual=True,
force_pmps_evolution=True, verbose=True):
"""
Computes the initial state for the finite temperature spin_boson model in chain geometry.
The bath state is computed via imaginary time evolution.
:param theta: Spin parameter for psi_0 = cos(theta) |1> + sin(theta) |0>
:param beta: Inverse temperature of the bath
:param h_site: Bath local Hamiltonian list
:param h_bond: Bath nearest neighbor coupling Hamiltonian list
:param bath_local_dim: Local dimension of the bath
:param nof_coefficients: Number of bath sites
:param mpa_type: MPS type of the chain (mps, mpo, pmps)
:param nof_steps: Number of steps for the imaginary time evolution
:param state_compression_kwargs: Keyword args for the imaginary time evolution compression
:param op_compression_kwargs: Keyword args for the imaginary time evolution operator pre-compression
:param second_order_trotter: Set True for second order trotter based imaginary time evolution
:param psi_0_compression_kwargs: Keyword args for the imaginary time evolution initial state compression
:param residual: Set True to compute List of populations in the highest energy state of each bath mode.
:param force_pmps_evolution: Set True to always use pmps for the imaginary time evolution
:param verbose: Set true to make imaginary time evolution verbose
:return: Initial state of system and bath as mps, mpo or pmps, info dict
"""
assert mpa_type == 'mpo' or mpa_type == 'pmps'
if nof_steps is None:
nof_steps = int(beta*100)
t0_wall = time.clock()
t0_proc = time.perf_counter()
if isinstance(bath_local_dim, int):
dims = [bath_local_dim] * nof_coefficients
else:
raise AssertionError('Unsupported data type for fixed_dim')
psi_0, info = tmps.chain.thermal.from_hamiltonian(beta, mpa_type, h_site, h_bond,
nof_steps=nof_steps,
state_compression_kwargs=state_compression_kwargs,
op_compression_kwargs=op_compression_kwargs,
second_order_trotter=second_order_trotter,
psi_0_compression_kwargs=psi_0_compression_kwargs,
force_pmps_evolution=force_pmps_evolution,
verbose=verbose)
tf_proc = time.perf_counter() - t0_proc
tf_wall = time.clock() - t0_wall
info['walltime'] = tf_wall
info['cpu_time'] = tf_proc
info['bath_dims'] = dims
if residual:
res = _compute_finiteT_chain_residual(psi_0, mpa_type, dims)
max_res = np.max(res)
info['res'] = res
info['max_res'] = max_res
else:
info['res'] = None
info['max_res'] = None
print('Finite T ground state residual ', info['res'])
print('Finite T ground state max. residual: ', info['max_res'])
sys_psi_0 = get_spin_initial_state(theta, mpa_type=mpa_type)
return mp.chain([sys_psi_0, psi_0]), info
def get_star_local_dims(beta, xi, fixed_dim=None, high_energy_pop=1e-20, sitewise=False):
"""
Computes the local dimension for the finite temperature star bath for the spin_boson model.
:param beta: Inverse temperature of the bath
:param xi: Star geometry bath energies
:param fixed_dim: Uses this fixed dimension for the star evolution
:param high_energy_pop: Chooses local dimension, such that the population in the highest energy of each bath mode
stays below this threshold
:param sitewise: If set False the local dimension is chosen uniformly for all sites to be the
highest local dimension from the high_energy_pop calculation.
:returns: List of bath dimensions
"""
if fixed_dim is None:
dims = []
for xi_i in xi:
a = 1 / (np.exp(beta * xi_i) - 1)
dims.append(math.ceil(1 / (beta * xi_i) * np.log(1 + 1 / (high_energy_pop * a))))
if sitewise:
return dims
else:
return [np.max(dims)]*len(xi)
else:
if isinstance(fixed_dim, (list, tuple)):
assert len(fixed_dim) == len(xi)
return fixed_dim
elif isinstance(fixed_dim, int):
return [fixed_dim]*len(xi)
else:
raise AssertionError('Unsupported data type for fixed_dim')
def _compute_finite_T_star_residual(beta, xi, dims):
"""
Returns residual of the finite-temperature initial state of the bath. List of populations in
the highest energy state of each mode
"""
res = []
for xi_i, dim in zip(xi, dims):
res.append((np.exp(beta*xi_i) - 1)/(np.exp(beta*xi_i * dim)))
return res
def get_spin_boson_finiteT_star_initial_state(theta, beta, system_index, xi, mpa_type='pmps', fixed_dim=None,
high_energy_pop=1e-20, sitewise=False, residual=True):
"""
Computes the initial state for the finite temperature spin_boson model in star geometry.
The bath state is computed via imaginary time evolution.
:param theta: Spin parameter for psi_0 = cos(theta) |1> + sin(theta) |0>
:param beta: Inverse temperature of the bath
:param system_index: Impurity position in the auxiliary chain
:param xi: Star geometry bath energies
:param mpa_type: Type: mps, mpo or pmps of the initial state
:param fixed_dim: Uses this fixed dimension for the star evolution
:param high_energy_pop: Chooses local dimension, such that the population in the highest energy of each bath mode
stays below this threshold
:param sitewise: If set False the local dimension is chosen uniformly for all sites to be the
highest local dimension from the high_energy_pop calculation.
:param residual: Computes list of populations in the highest energy state of each mode
:return: Initial state of system and bath as mps, mpo or pmps, info dict
"""
assert mpa_type == 'mpo' or mpa_type == 'pmps'
t0_wall = time.clock()
t0_proc = time.perf_counter()
dims = get_star_local_dims(beta, xi, fixed_dim=fixed_dim, high_energy_pop=high_energy_pop, sitewise=sitewise)
ops = [xi[i] * np.arange(dim) for i, dim in enumerate(dims)]
if system_index > 0:
left_state = get_thermal_state(beta, mpa_type, ops[:system_index], to_cform=None)
right_state = get_thermal_state(beta, mpa_type, ops[system_index:], to_cform=None)
else:
left_state = None
right_state = get_thermal_state(beta, mpa_type, ops, to_cform=None)
tf_proc = time.perf_counter() - t0_proc
tf_wall = time.clock() - t0_wall
info = dict()
info['walltime'] = tf_wall
info['cpu_time'] = tf_proc
info['bath_dims'] = dims
if residual:
info['res'] = _compute_finite_T_star_residual(beta, xi, dims)
info['max_res'] = np.max(info['res'])
else:
info['res'] = None
info['max_res'] = None
sys_psi_0 = get_spin_initial_state(theta, mpa_type=mpa_type)
return mp.chain([left_state, sys_psi_0, right_state]) if left_state is not None else \
mp.chain([sys_psi_0, right_state]), info
def get_boson_boson_0T_chain_initial_state(alpha, nof_coefficients, cutoff_dim):
"""
Initial state for the Boson-Boson model in chain geometry (see Sec. 4.4.3 of the thesis)
:param alpha: accuracy alpha for the impurity coherent state
:param nof_coefficients: Number of bath sites
:param cutoff_dim: Local dimension of the system and impurity
:return: Initial state in MPS form
"""
pop = lambda x: np.exp(-np.abs(alpha) ** 2 / 2) * alpha ** x / np.sqrt(factorial(x))
sys_psi_0 = convert.to_mparray(pop(np.arange(cutoff_dim)), 'mps')
bath_psi_0 = broadcast_number_ground_state(cutoff_dim, nof_coefficients)
return mp.chain([sys_psi_0, bath_psi_0])
def get_boson_boson_0T_star_initial_state(alpha, system_index, nof_coefficients, cutoff_dim):
"""
Initial state for the Boson-Boson model in star geometry (see Sec. 4.4.3 of the thesis)
:param alpha: accuracy alpha for the impurity coherent state
:param system_index: Index of the impurity in the auxiliary chain
:param nof_coefficients: Number of bath sites
:param cutoff_dim: Local dimension of the system and impurity
:return: Initial state in MPS form
"""
pop = lambda x: np.exp(-np.abs(alpha) ** 2 / 2) * alpha ** x / np.sqrt(factorial(x, exact=True))
sys_psi_0 = convert.to_mparray(pop(np.arange(cutoff_dim)), 'mps')
# Initial states of the bath sites left and right of the system:
left_bath_psi_0, right_bath_psi_0 = tmps.utils.broadcast_number_ground_state(cutoff_dim, system_index), \
tmps.utils.broadcast_number_ground_state(cutoff_dim,
nof_coefficients - system_index)
return mp.chain([left_bath_psi_0, sys_psi_0, right_bath_psi_0]
if left_bath_psi_0 is not None else [sys_psi_0, right_bath_psi_0])
| 2.453125 | 2 |
0x11-python-network_1/2-post_email.py | Nahi-Terefe/alx-higher_level_programming | 0 | 12786993 | #!/usr/bin/python3
""" post email """
import urllib.request
import urllib.parse
import sys
if __name__ == "__main__":
value = {'email': sys.argv[2]}
data = urllib.parse.urlencode(value)
data = data.encode('utf-8')
req = urllib.request.Request(sys.argv[1], data)
with urllib.request.urlopen(req) as response:
res = response.read().decode(encoding='UTF-8')
print(res)
| 3.0625 | 3 |
profiles/migrations/0002_profile_active.py | edgarceron/agent_console | 0 | 12786994 | # Generated by Django 3.0.4 on 2020-04-13 21:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='active',
field=models.BooleanField(default=True, verbose_name='Activo/Inactivo'),
),
]
| 1.5625 | 2 |
0004.Median of Two Sorted Arrays/test.py | zhlinh/leetcode | 0 | 12786995 | <reponame>zhlinh/leetcode<filename>0004.Median of Two Sorted Arrays/test.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from solution import Solution
nums1 = [1]
nums2 = [2]
sol = Solution()
result = sol.findMedianSortedArrays(nums1, nums2)
print(result)
| 2.578125 | 3 |
examples/withRaycing/11_Waves/coherentModePropagation.py | kklmn/xrt | 71 | 12786996 | <filename>examples/withRaycing/11_Waves/coherentModePropagation.py
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__date__ = "8 Dec 2021"
# import matplotlib as mpl
# mpl.use('agg')
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.raycing as raycing
import xrt.backends.raycing.sources as rs
import xrt.backends.raycing.screens as rsc
import xrt.backends.raycing.oes as roe
import xrt.backends.raycing.apertures as ra
import xrt.backends.raycing.run as rr
import xrt.backends.raycing.waves as rw
import xrt.backends.raycing.modes as rm
# caseE = u'4keV'
caseE = u'7keV'
# caseE = u'1Å'
if caseE == u'4keV':
E0, harmE0, dE0 = 4040., 1, 0.
elif caseE == u'7keV':
E0, harmE0, dE0 = 7100., 3, 0.
elif caseE == u'1Å':
E0, harmE0, dE0 = 12398., 7, 0.
else:
raise ValueError("unknown energy case")
slitFEPos = 20000.
p, q = 40000., 40000.
pitch = 3.5e-3
sin2Pitch, cos2Pitch = np.sin(2*pitch), np.cos(2*pitch)
caxisUnit = 'eV'
BW = 3.6e-4
eMinRays, eMaxRays = E0*(1-BW/2.), E0*(1+BW/2.)
eEpsilonX = 310e-12 # mrad
eEpsilonZ = 5.5e-12 # mrad
betaX = 9.539
betaZ = 1.982
accMax = 0.04, 0.02 # mrad
kwargs = dict(
name='CoSAXS U19',
eE=3., eI=0.25, eEspread=8e-4,
eEpsilonX=eEpsilonX*1e9, eEpsilonZ=eEpsilonZ*1e9,
betaX=betaX, betaZ=betaZ,
period=19.3, n=101, targetE=(E0+dE0, harmE0),
filamentBeam=True,
uniformRayDensity=True, # not strictly necessary
distE='BW',
# targetOpenCL='GPU',
# R0=slitFEPos,
xPrimeMax=accMax[0], zPrimeMax=accMax[1],
xPrimeMaxAutoReduce=False, zPrimeMaxAutoReduce=False,
eMin=eMinRays, eMax=eMaxRays)
nElectrons = 500
nModes = 10
nsamples = 512*256
bins = 128
ppb = 1
focusLim = 64, 16 # xmax, ymax in µm
def build_beamline():
bl = raycing.BeamLine()
bl.source = rs.Undulator(bl, **kwargs)
opening = [s*slitFEPos*1e-3 for s in
(-accMax[0], accMax[0], -accMax[1], accMax[1])]
bl.slitFE = ra.RectangularAperture(
bl, 'FE slit', [0, slitFEPos, 0],
('left', 'right', 'bottom', 'top'), opening)
bl.limPhysXmax = accMax[0]*1e-3*1.2 * p
bl.limPhysZmax = accMax[1]*1e-3*1.2 * p
bl.limPhysYmax = bl.limPhysZmax / np.sin(pitch)
focusPos = [0, p + q*cos2Pitch, q*sin2Pitch]
bl.oe = roe.EllipticalMirrorParam(
bl, 'M1', center=[0, p, 0], pitch=pitch, p=p, q=q,
limPhysX=[-bl.limPhysXmax, bl.limPhysXmax],
limPhysY=[-bl.limPhysYmax, bl.limPhysYmax])
bl.fsmF = rsc.Screen(bl, 'inFocus', focusPos, z=(0, -sin2Pitch, cos2Pitch))
return bl
def run_process_wave(bl):
bl.iBeam = bl.iBeam+1 if hasattr(bl, 'iBeam') else 0
waveOElocal = bl.oe.prepare_wave(bl.slitFE, nsamples)
waveScreenF = bl.fsmF.prepare_wave(bl.oe, bl.fsmFX, bl.fsmFZ)
waveFElocal = bl.savedBeams[bl.iBeam]
beamToOE = rw.diffract(waveFElocal, waveOElocal)
beamOEglobal, beamOElocal = bl.oe.reflect(
beamToOE, noIntersectionSearch=True)
rw.diffract(beamOElocal, waveScreenF)
outDict = {'beamFElocal': waveFElocal,
'beamScreenF': waveScreenF}
return outDict
def run_process_hybr(bl):
bl.iBeam = bl.iBeam+1 if hasattr(bl, 'iBeam') else 0
beamSource = bl.savedBeams[bl.iBeam]
waveScreenF = bl.fsmF.prepare_wave(bl.oe, bl.fsmFX, bl.fsmFZ)
beamFElocal = bl.slitFE.propagate(beamSource)
beamOEglobal, beamOElocal = bl.oe.reflect(beamSource)
rw.diffract(beamOElocal, waveScreenF)
outDict = {'beamFElocal': beamFElocal,
'beamScreenF': waveScreenF}
return outDict
def run_process_rays(bl):
bl.iBeam = bl.iBeam+1 if hasattr(bl, 'iBeam') else 0
beamSource = bl.savedBeams[bl.iBeam]
beamFElocal = bl.slitFE.propagate(beamSource)
beamOEglobal, beamOElocal = bl.oe.reflect(beamSource)
beamScreenF = bl.fsmF.expose(beamOEglobal)
outDict = {'beamFElocal': beamFElocal,
'beamScreenF': beamScreenF}
return outDict
def run_process_view(bl):
beamSource = bl.sources[0].shine(fixedEnergy=E0)
beamFElocal = bl.slitFE.propagate(beamSource)
beamOEglobal, beamOElocal = bl.oe.reflect(beamSource)
beamScreenF = bl.fsmF.expose(beamOEglobal)
outDict = {'beamSource': beamSource,
'beamFElocal': beamFElocal,
'beamScreenF': beamScreenF}
bl.beams = outDict
bl.prepare_flow()
return outDict
def add_plot(plots, plot, what, basename):
plots.append(plot)
plot.baseName = '{0}-{1}-{2}-E={3:05.0f}'.format(
what, basename, plot.title, E0)
plot.saveName = [plot.baseName + '.png', ]
class MyXYCPlot(xrtp.XYCPlot):
def update_user_elements(self):
if not hasattr(self, 'iMode'):
self.iMode = 0
else:
self.iMode += 1
N, what = self.textPanelParams
if what.endswith('modes'):
if self.iMode == 0:
self.textPanel.set_text('mode 0 of {0}'.format(N))
else:
self.textPanel.set_text('modes 0 to {0} of {1}'.format(
self.iMode, N))
self.save(suffix='-{0}'.format(self.iMode))
else:
if self.iMode == 0:
self.textPanel.set_text('beam 1 of {0}'.format(N))
else:
self.textPanel.set_text('beams 1 to {0} of {1}'.format(
self.iMode+1, N))
self.save(suffix='-{0}'.format(self.iMode+1))
def define_plots(bl, what, nEs, basename):
plots = []
ratio1 = int(accMax[0] / accMax[1])
plot = MyXYCPlot(
'beamFElocal', (1,),
xaxis=xrtp.XYCAxis('x', 'mm', bins=bins*ratio1, ppb=ppb),
yaxis=xrtp.XYCAxis('z', 'mm', bins=bins, ppb=ppb),
caxis=xrtp.XYCAxis('energy', caxisUnit, bins=bins, ppb=ppb))
plot.xaxis.limits = bl.slitFE.opening[:2]
plot.yaxis.limits = bl.slitFE.opening[2:]
plot.xaxis.fwhmFormatStr = '%.2f'
plot.yaxis.fwhmFormatStr = '%.2f'
add_plot(plots, plot, what, basename)
plot.textPanel = plot.fig.text(1.02, 0.4, '',
transform=plot.ax1dHistX.transAxes, size=10,
color='r', ha='left')
plot.textPanelParams = nEs, what
ratio2 = int(focusLim[0] / focusLim[1])
plot = MyXYCPlot(
'beamScreenF', (1,),
xaxis=xrtp.XYCAxis('x', u'µm', bins=bins*ratio2, ppb=ppb),
yaxis=xrtp.XYCAxis('z', u'µm', bins=bins, ppb=ppb),
caxis=xrtp.XYCAxis('energy', caxisUnit, bins=bins, ppb=ppb),
fluxKind='EsPCA'
)
plot.xaxis.limits = [-focusLim[0], focusLim[0]]
plot.yaxis.limits = [-focusLim[1], focusLim[1]]
plot.xaxis.fwhmFormatStr = '%.1f'
plot.yaxis.fwhmFormatStr = '%.1f'
add_plot(plots, plot, what, basename)
ax = plot.xaxis
edges = np.linspace(ax.limits[0], ax.limits[1], ax.bins+1)
bl.fsmFX = (edges[:-1] + edges[1:]) * 0.5 / ax.factor
ax = plot.yaxis
edges = np.linspace(ax.limits[0], ax.limits[1], ax.bins+1)
bl.fsmFZ = (edges[:-1] + edges[1:]) * 0.5 / ax.factor
print('{0} points on the screen Exp'.format(len(bl.fsmFX)*len(bl.fsmFZ)))
plot.textPanel = plot.fig.text(1.02, 0.4, '',
transform=plot.ax1dHistX.transAxes, size=10,
color='r', ha='left')
plot.textPanelParams = nEs, what
caxisFactor = 1
for plot in plots:
if plot.caxis.label.startswith('energy'):
plot.caxis.limits = eMinRays*caxisFactor, eMaxRays*caxisFactor
plot.caxis.offset = E0
# plot.caxis.fwhmFormatStr = '%.2f'
plot.caxis.fwhmFormatStr = None
plot.fluxFormatStr = '%.1p'
return plots
def show_bl(basename):
bl = build_beamline()
rr.run_process = run_process_view
bl.glow(centerAt='M1')
# bl.glow(scale=[5e3, 10, 5e3], centerAt='xtal1')
def make_modes(basename):
bl = build_beamline()
limitsOrigin = [-focusLim[0]*1e-3, focusLim[0]*1e-3,
-focusLim[1]*1e-3, focusLim[1]*1e-3]
rm.make_and_save_modes(
bl, nsamples, nElectrons, nModes, nModes, E0, output='all',
basename=basename, limitsOrigin=limitsOrigin)
def use_modes(basename, what):
def get_flux(beam):
res = beam.Jss.sum() + beam.Jpp.sum()
print(res)
return res
bl = build_beamline()
bl.savedBeams, wAll, totalFlux = rm.use_saved(what, basename)
if what.endswith('fields'):
bl.savedBeams.sort(key=get_flux)
plots = define_plots(bl, what, len(wAll), basename)
if what.startswith('wave'):
rr.run_process = run_process_wave
elif what.startswith('hybr'):
rr.run_process = run_process_hybr
elif what.startswith('rays'):
rr.run_process = run_process_rays
else:
raise ValueError('unknown mode of propagation')
xrtr.run_ray_tracing(plots, repeats=nModes, beamLine=bl)
def main():
step = 1 # 0 to 2
basename = 'atFE-5000'
# basename = 'atFE'
if step == 0:
show_bl(basename)
elif step == 1:
make_modes(basename)
elif step == 2:
# what = 'wave-fields'
what = 'wave-modes'
# what = 'hybr-fields'
# what = 'hybr-modes'
# what = 'rays-fields'
# what = 'rays-modes'
use_modes(basename, what)
if __name__ == '__main__':
main()
| 2.171875 | 2 |
pursuit/reward.py | goncalo-rodrigues/thesis | 0 | 12786997 | <reponame>goncalo-rodrigues/thesis<filename>pursuit/reward.py<gh_stars>0
def get_reward_function(num_agents, world_size):
def reward(state, actions, next_state):
return 100 if next_state.terminal else -1
return reward
| 1.992188 | 2 |
fairseq/models/models/mm2vec.py | houshengyuan/Parsing_enhanced_NMT | 0 | 12786998 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
GumbelVectorQuantizer,
LayerNorm,
MultiheadAttention,
SamePad,
TransposeLast,
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.utils import buffered_arange, index_put, is_xla_tensor
from torchvision.models.resnet import resnet50
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(["static", "uniform", "normal", "poisson"])
@dataclass
class MM2VecConfig(FairseqDataclass):
model_stage: int = field(
default=1,
metadata={"help": "model_stage=1 for training visual feature extractor only,"
"model_stage=2 for pretrain on all subnet"
"model_stage=? for fine-tune"},
)
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group norm with d "
"groups in the first conv block, whereas layer_norm has layer norms in "
"every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1, metadata={"help": "dropout probability for the transformer"}
)
attention_dropout: float = field(
default=0.1, metadata={"help": "dropout probability for attention weights"}
)
activation_dropout: float = field(
default=0.0, metadata={"help": "dropout probability after activation in FFN"}
)
encoder_layerdrop: float = field(
default=0.0, metadata={"help": "probability of dropping a tarnsformer layer"}
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={"help": "dropout to apply to the features (after feat extr)"},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many dimensions."
"set to encoder_embed_dim is <= 0"
},
)
layer_norm_first: bool = field(
default=False, metadata={"help": "apply layernorm first in the transformer"}
)
audio_conv_feature_layers: str = field(
default="[(512, 10, 5, 0)] + [(512, 3, 2, 0)] * 4 + [(512, 2, 2, 0)] + [(512, 2, 2, 0)]",
metadata={
"help": "string describing convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
quantize_targets: bool = field(
default=False, metadata={"help": "use quantized targets"}
)
quantize_input: bool = field(
default=False, metadata={"help": "use quantized inputs"}
)
same_quantizer: bool = field(
default=False, metadata={"help": "use same quantizer for inputs and targets"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0, metadata={"help": "multiply feature extractor var grads by this"}
)
quantizer_depth: int = field(
default=1,
metadata={"help": "number of quantizer layers"},
)
quantizer_factor: int = field(
default=3,
metadata={
"help": "dimensionality increase for inner quantizer layers (if depth > 1)"
},
)
latent_vars: int = field(
default=320,
metadata={"help": "number of latent variables V in each group of the codebook"},
)
latent_groups: int = field(
default=2,
metadata={"help": "number of groups G of latent variables in the codebook"},
)
latent_dim: int = field(
default=0,
metadata={
"help": "if > 0, uses this dimensionality for latent variables. "
"otherwise uses final_dim / latent_groups"
},
)
# masking
mask_length: int = field(default=10, metadata={"help": "mask length"})
mask_prob: float = field(
default=0.65, metadata={"help": "probability of replacing a token with mask"}
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# channel masking
mask_channel_length: int = field(
default=10, metadata={"help": "length of the mask for features (channels)"}
)
mask_channel_prob: float = field(
default=0.0, metadata={"help": "probability of replacing a feature with 0"}
)
mask_channel_before: bool = False
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument (used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False, metadata={"help": "whether to allow channel masks to overlap"}
)
mask_channel_min_space: int = field(
default=1,
metadata={"help": "min space between spans (if no overlap is enabled)"},
)
# negative selection
num_negatives: int = field(
default=100,
metadata={"help": "number of negative examples from the same sample"},
)
negatives_from_everywhere: bool = field(
default=False,
metadata={"help": "sample negatives from everywhere, not just masked states"},
)
cross_sample_negatives: int = field(
default=0, metadata={"help": "number of negative examples from the any sample"}
)
codebook_negatives: int = field(
default=0, metadata={"help": "number of negative examples codebook"}
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={"help": "number of filters for convolutional positional embeddings"},
)
conv_pos_groups: int = field(
default=16,
metadata={"help": "number of groups for convolutional positional embedding"},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={
"help": "temperature for latent variable sampling. "
"can be tuple of 3 values (start, end, decay)"
},
)
# Visual Part
visual_conv_feature_layers: str = field(
default="[(512, 11, 1, 5)] * 3 + [(1024, 11, 1, 5)]",
metadata={
"help": "string describing visual-subnet convolutional feature extraction layers in form of a python list that contains "
"[(dim, kernel_size, stride, padding), ...]"
},
)
visual_input_dim: int = field(
default=112,
metadata={"help": "number of dims of visual pictures"},
)
visual_encoder_dim: int = field(
default=2048,
metadata={"help": "number of dims after MoCo"},
)
projection_dim: int = field(
default=512,
metadata={"help": "output dimension of projection head"},
)
# checkpoint part
m2v_path : str = field(
default="./checkpoints-mm-2/",
metadata={
"help": "path to mm2vec stage 1 last model or stage 2 process model"
},
)
# aggregation part
audio_weight: float = field(
default=0.5,
metadata={
"help":"weight for audio_features"
}
)
visual_weight: float = field(
default=0.5,
metadata={
"help":"weight for audio_features"
}
)
remove_quantizer_weight: bool = field(
default=False,
metadata={
"help": "remove quantizer pretrain params"
}
)
unfreeze_quantizer_weight:bool = field(
default=False,
metadata={
"help": "freeze quantizer pretrain params"
}
)
# MoCo
MoCo_replace:bool = field(
default=False,
metadata={"help":"replace first conv2d in MoCo with conv3d"}
)
@register_model("mm2vec", dataclass=MM2VecConfig)
class MM2VecModel(BaseFairseqModel):
def __init__(self, cfg: MM2VecConfig):
super().__init__()
self.cfg = cfg
audio_feature_enc_layers = eval(cfg.audio_conv_feature_layers)
visual_feature_enc_layers = eval(cfg.visual_conv_feature_layers)
self.audio_embed_dim = audio_feature_enc_layers[-1][0] # 512
self.visual_embed_dim = visual_feature_enc_layers[-1][0] # 1024
self.projection_dim = cfg.projection_dim # 512
self.audio_feature_extractor = ConvFeatureExtractionModel(
conv_layers=audio_feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
input_dim=1,
)
self.visual_input_dim = cfg.visual_input_dim # 112
self.MoCo_replace = cfg.MoCo_replace
self.MoCo_extractor = MoCo(replace=self.MoCo_replace)
self.visual_encoder_dim = cfg.visual_encoder_dim # 2048
self.visual_feature_extractor = ConvFeatureExtractionModel(
conv_layers=visual_feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
input_dim=2048,
)
self.post_extract_proj = (
# 512 -> 768
nn.Linear(self.audio_embed_dim, cfg.encoder_embed_dim)
if self.audio_embed_dim != cfg.encoder_embed_dim and not cfg.quantize_input
else None
)
self.projection_head = nn.Sequential(
# 512 -> 512
nn.Linear(int(self.visual_embed_dim / 2), int(self.visual_embed_dim / 2), bias=False),
nn.ReLU(),
# 512 -> 768
nn.Linear(int(self.visual_embed_dim / 2), cfg.encoder_embed_dim, bias=False),
)
""" mask part """
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_before = cfg.mask_channel_before
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
""" mask part """
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.quantizer = None
self.input_quantizer = None
self.n_negatives = cfg.num_negatives
self.cross_sample_negatives = cfg.cross_sample_negatives
self.codebook_negatives = cfg.codebook_negatives
self.negatives_from_everywhere = cfg.negatives_from_everywhere
self.logit_temp = cfg.logit_temp
final_dim = cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
if cfg.quantize_targets:
vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else final_dim
self.quantizer = GumbelVectorQuantizer(
dim=self.audio_embed_dim, # 512
num_vars=cfg.latent_vars, # 320
temp=cfg.latent_temp,
groups=cfg.latent_groups, # 2
combine_groups=False,
vq_dim=vq_dim,
time_first=True,
weight_proj_depth=cfg.quantizer_depth,
weight_proj_factor=cfg.quantizer_factor,
)
self.project_q = nn.Linear(vq_dim, final_dim)
else:
self.project_q = nn.Linear(self.embed, final_dim)
# if cfg.quantize_input:
# if cfg.same_quantizer and self.quantizer is not None:
# vq_dim = final_dim
# self.input_quantizer = self.quantizer
# else:
# vq_dim = cfg.latent_dim if cfg.latent_dim > 0 else cfg.encoder_embed_dim
# self.input_quantizer = GumbelVectorQuantizer(
# dim=self.embed,
# num_vars=cfg.latent_vars,
# temp=cfg.latent_temp,
# groups=cfg.latent_groups,
# combine_groups=False,
# vq_dim=vq_dim,
# time_first=True,
# weight_proj_depth=cfg.quantizer_depth,
# weight_proj_factor=cfg.quantizer_factor,
# )
# self.project_inp = nn.Linear(vq_dim, cfg.encoder_embed_dim)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.audio_embed_dim)
self.visual_layer_norm = LayerNorm(int(self.visual_embed_dim / 2))
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
self.model_stage = cfg.model_stage
self.audio_weight = cfg.audio_weight
self.visual_weight = cfg.visual_weight
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
@classmethod
def build_model(cls, cfg: MM2VecConfig, task=None):
"""Build a new model instance."""
model = cls(cfg)
if cfg.model_stage == 1:
model_dict = model.state_dict()
wav2vec_dict = {k.replace('feature', 'audio_feature'): v for k, v in
torch.load('../pretrain/wav2vec_small.pt')["model"].items()}
moco_dict = {k.replace('module.encoder_q', 'MoCo_extractor.encoder'): v for k, v in
torch.load('../pretrain/moco_v2_800ep_pretrain.pth.tar')["state_dict"].items()}
if cfg.remove_quantizer_weight:
popKeys = ['quantizer.vars', 'quantizer.weight_proj.weight', 'quantizer.weight_proj.bias']
for k in popKeys:
wav2vec_dict.pop(k)
popKeys = ['MoCo_extractor.encoder.fc.0.bias', 'MoCo_extractor.encoder.fc.2.bias',
'MoCo_extractor.encoder.fc.0.weight', 'MoCo_extractor.encoder.fc.2.weight']
if cfg.MoCo_replace:
popKeys.append('MoCo_extractor.encoder.conv1.weight')
for k in popKeys:
moco_dict.pop(k)
model_dict.update(wav2vec_dict)
model_dict.update(moco_dict)
model.load_state_dict(model_dict)
popKeys = ['quantizer.vars', 'quantizer.weight_proj.weight', 'quantizer.weight_proj.bias']
for name, param in model.named_parameters():
# print(name)
if name in wav2vec_dict.keys() or name in moco_dict.keys():
param.requires_grad = False
if name in popKeys and cfg.unfreeze_quantizer_weight:
param.requires_grad = True
elif cfg.model_stage == 2:
model_dict = model.state_dict()
checkpoint_path = os.path.join(cfg.m2v_path, 'checkpoint_last.pt')
checkpoints_dict = torch.load(checkpoint_path)['model']
model_dict.update(checkpoints_dict)
model.load_state_dict(model_dict)
else:
return model
print('num_total_param: {},num_trainable_param: {},num_freezed_param: {}'.format(
sum([params.numel() for params in model.parameters()]),
sum([params.numel() for params in model.parameters() if params.requires_grad]),
sum([params.numel() for params in model.parameters() if not params.requires_grad])))
return model
def apply_mask(
self,
x_audio,
x_visual,
padding_mask,
mask_indices=None,
mask_channel_indices=None,
):
B, T, C = x_audio.shape
# FIXME INFERENCE
if self.mask_channel_prob > 0 and self.mask_channel_before:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
if self.mask_prob > 0:
if mask_indices is None:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x_audio.device)
x_audio = index_put(x_audio, mask_indices, self.mask_emb)
x_visual = index_put(x_visual, mask_indices, self.mask_emb)
else:
mask_indices = None
# FIXME INFERENCE
if self.mask_channel_prob > 0 and not self.mask_channel_before:
if mask_channel_indices is None:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x_audio.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x_audio = index_put(x_audio, mask_channel_indices, 0)
x_visual = index_put(x_visual, mask_channel_indices, 0)
return x_audio, x_visual, mask_indices
def sample_negatives(self, y_audio, y_visual, num, padding_count=None):
#ignore
if self.n_negatives == 0 and self.cross_sample_negatives == 0:
return y.new(0)
bsz, tsz, fsz = y_audio.shape
y_audio = y_audio.view(-1, fsz) # BTC => (BxT)C
y_visual = y_visual.view(-1, fsz)
# FIXME: what happens if padding_count is specified?
cross_high = tsz * bsz
high = tsz - (padding_count or 0)
with torch.no_grad():
assert high > 1, f"{bsz,tsz,fsz}"
if self.n_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.n_negatives)
.flatten()
)
neg_idxs = torch.randint(
low=0, high=high - 1, size=(bsz, self.n_negatives * num)
)
neg_idxs[neg_idxs >= tszs] += 1
if self.cross_sample_negatives > 0:
tszs = (
buffered_arange(num)
.unsqueeze(-1)
.expand(-1, self.cross_sample_negatives)
.flatten()
)
cross_neg_idxs = torch.randint(
low=0,
high=cross_high - 1,
size=(bsz, self.cross_sample_negatives * num),
)
cross_neg_idxs[cross_neg_idxs >= tszs] += 1
if self.n_negatives > 0:
for i in range(1, bsz):
neg_idxs[i] += i * high
else:
neg_idxs = cross_neg_idxs
if self.cross_sample_negatives > 0 and self.n_negatives > 0:
neg_idxs = torch.cat([neg_idxs, cross_neg_idxs], dim=1)
negs_audio = y_audio[neg_idxs.view(-1)]
negs_audio = negs_audio.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
negs_visual = y_visual[neg_idxs.view(-1)]
negs_visual = negs_visual.view(
bsz, num, self.n_negatives + self.cross_sample_negatives, fsz
).permute(
2, 0, 1, 3
) # to NxBxTxC
return negs_audio, negs_visual, neg_idxs
def compute_preds(self, x, y, negatives):
neg_is_pos = (y == negatives).all(-1)
y = y.unsqueeze(0)
targets = torch.cat([y, negatives], dim=0)
logits = torch.cosine_similarity(x.float(), targets.float(), dim=-1).type_as(x)
logits = logits / self.logit_temp
if is_xla_tensor(logits) or neg_is_pos.any():
fillval = -float(2 ** 30)
if not hasattr(self, "_inftensor"):
self._inftensor = (
torch.tensor(fillval).to(x.device)
if is_xla_tensor(logits)
else float("-inf")
)
logits[1:] = index_put(logits[1:], neg_is_pos, self._inftensor)
return logits
def _get_feat_extract_output_lengths(self, input_lengths: torch.LongTensor):
"""
Computes the output length of the convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride):
return torch.floor((input_length - kernel_size) / stride + 1)
conv_cfg_list = eval(self.cfg.audio_conv_feature_layers)
for i in range(len(conv_cfg_list)):
input_lengths = _conv_out_length(
input_lengths, conv_cfg_list[i][1], conv_cfg_list[i][2]
)
return input_lengths.to(torch.long)
def compute_visual_length(self,visual_source):
visual_length = list()
max_visual_length = -1
for i in range(len(visual_source)):
length = int(visual_source[i].size(1) / self.visual_input_dim)
if length > max_visual_length:
max_visual_length = length
visual_length.append(length)
return max_visual_length,visual_length
def visual_padding(self,visual_features,visual_length,max_visual_length):
visual_source_new = torch.tensor([], dtype=visual_features.dtype, device=visual_features.device)
start = 0
# 根据visual length数组切分MoCo的输出结果,并padding到最长
visual_source_len = max_visual_length
for l in visual_length:
visual_source_new = torch.cat((visual_source_new, torch.cat(
(visual_features[start:start + l],
torch.zeros((visual_source_len - l, 3,112,112), dtype=visual_features.dtype,
device=visual_features.device)))))
return visual_source_new
def forward(
self,
audio_source,
visual_source,
padding_mask=None,
mask=True,
features_only=False,
layer=None,
mask_indices=None,
mask_channel_indices=None,
padding_count=None,
):
"""
先只管cropping的训练模式,stage1 stage2都是对应好了的 visual 和 audio 长度
batch内不同sample的visual length或者 audio length都是一样的
不需要算长度序列
inference:dataset的pad参数被设置 需要传入padding mask的时候,audio的source才是padding了的
这个时候才需要记录visual length,并在过完moco之后padding
"""
result = {}
# FIXME INFERENCE
if padding_mask is not None:
# compute visual length
max_visual_length, visual_length = self.compute_visual_length(visual_source)
visual_source = torch.cat(visual_source,1)
visual_source = torch.split(visual_source, self.visual_input_dim, 1)
visual_source = torch.cat(visual_source)
visual_source = visual_source.view(-1, self.visual_input_dim, self.visual_input_dim)
visual_source = visual_source.unsqueeze(1).repeat(1, 3, 1, 1)
if self.MoCo_replace:
visual_source = self.visual_padding(visual_source,visual_length,max_visual_length)
visual_source = visual_source.view(len(visual_length),max_visual_length,3,112,112)
visual_source = visual_source.transpose(1,2)
else:
"""
cropping训练,batch内的visual input长度一样
"""
visual_batch_size = len(visual_source)
max_visual_length = int(visual_source[0].size(1)/112)
visual_source = torch.stack(visual_source)
visual_source = torch.split(visual_source, self.visual_input_dim, 1)
visual_source = torch.cat(visual_source)
visual_source = visual_source.view(-1, self.visual_input_dim, self.visual_input_dim)
visual_source = visual_source.unsqueeze(1).repeat(1, 3, 1, 1)
if self.MoCo_replace:
visual_source = visual_source.view(visual_batch_size, max_visual_length, 3, self.visual_input_dim, self.visual_input_dim)
visual_source = visual_source.transpose(1, 2)
"""MoCo input dim:[n_frames,3,112,112]"""
visual_features = self.MoCo_extractor(visual_source)
visual_features = visual_features.view(-1,max_visual_length,self.visual_encoder_dim)
visual_features = visual_features.transpose(1,2)
"""
长度问题到这里应该就结束了,后面不管是padding还是cropping都是align好了的
"""
if self.feature_grad_mult > 0:
# audio: (bsz*sample_length) --> (bsz * feature_dim * frames)
# visual: (bsz*feature_dim * frames) --> (bsz * feature_dim_new * frames)
af_beforeGELU, audio_features = self.audio_feature_extractor(audio_source)
vf_beforeGELU, visual_features = self.visual_feature_extractor(visual_features)
if self.feature_grad_mult != 1.0:
audio_features = GradMultiply.apply(audio_features, self.feature_grad_mult)
visual_features = GradMultiply.apply(visual_features, self.feature_grad_mult)
else:
with torch.no_grad():
af_beforeGELU, audio_features = self.audio_feature_extractor(audio_source)
vf_beforeGELU, visual_features = self.visual_feature_extractor(visual_features)
features_pen = 0 # penalty loss
af_beforeGELU = af_beforeGELU.transpose(1,2)
vf_beforeGELU = vf_beforeGELU.transpose(1,2)
vf_beforeGELU = vf_beforeGELU.reshape(vf_beforeGELU.size(0), -1,int(vf_beforeGELU.size(2) / 2))
vf_beforeGELU = vf_beforeGELU[:, :af_beforeGELU.size(1), :]
af_beforeGELU = self.layer_norm(af_beforeGELU)
vf_beforeGELU = self.visual_layer_norm(vf_beforeGELU)
result["pre_gelu_audio"] = af_beforeGELU
result["pre_gelu_visual"] = vf_beforeGELU
# FIXME:做不做transpose和layer_norm对MSE的影响是啥?过不过GELU的MSE区别是啥?
audio_features = audio_features.transpose(1, 2)
visual_features = visual_features.transpose(1, 2)
visual_features = visual_features.reshape(visual_features.size(0), -1, int(visual_features.size(2) / 2))
visual_features = visual_features[:, :audio_features.size(1), :]
audio_features = self.layer_norm(audio_features) # 512维度上做的layernorm
visual_features = self.visual_layer_norm(visual_features)
result["post_gelu_audio"] = audio_features
result["post_gelu_visual"] = visual_features
unmasked_audio_features = audio_features.clone()
unmasked_visual_features = visual_features.clone()
# FIXME INFERENCE
"""sample维度的padding mask到frame维度的padding mask"""
if padding_mask is not None and padding_mask.any():
input_lengths = (1 - padding_mask.long()).sum(-1)
# apply conv formula to get real output_lengths
output_lengths = self._get_feat_extract_output_lengths(input_lengths)
padding_mask = torch.zeros(
audio_features.shape[:2], dtype=audio_features.dtype, device=audio_features.device
)
# these two operations makes sure that all values
# before the output lengths indices are attended to
padding_mask[
(
torch.arange(padding_mask.shape[0], device=padding_mask.device),
output_lengths - 1,
)
] = 1
padding_mask = (1 - padding_mask.flip([-1]).cumsum(-1).flip([-1])).bool()
else:
padding_mask = None
# 512 -> 768
if self.post_extract_proj is not None:
audio_features = self.post_extract_proj(audio_features)
visual_features = self.post_extract_proj(visual_features)
# if self.projection_head is not None:
# visual_features = self.projection_head(visual_features)
result["features_pen"] = features_pen
audio_features = self.dropout_input(audio_features)
visual_features = self.dropout_input(visual_features)
unmasked_audio_features = self.dropout_features(unmasked_audio_features)
unmasked_visual_features = self.dropout_features(unmasked_visual_features)
num_vars = None
code_ppl = None
prob_ppl = None
curr_temp = None
# if self.input_quantizer:
# q = self.input_quantizer(features, produce_targets=False)
# features = q["x"]
# num_vars = q["num_vars"]
# code_ppl = q["code_perplexity"]
# prob_ppl = q["prob_perplexity"]
# curr_temp = q["temp"]
# features = self.project_inp(features)
if mask:
# inference的时候不计算mask / compute mask indices and set (indices==True) position as self.mask_emb
x_audio, x_visual, mask_indices = self.apply_mask(
audio_features,
visual_features,
padding_mask,
mask_indices=mask_indices,
mask_channel_indices=mask_channel_indices,
)
if not is_xla_tensor(x_audio) and not is_xla_tensor(x_audio) and mask_indices is not None:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
y_audio = unmasked_audio_features[mask_indices].view(
unmasked_audio_features.size(0), -1, unmasked_audio_features.size(-1)
)
y_visual = unmasked_visual_features[mask_indices].view(
unmasked_visual_features.size(0), -1, unmasked_visual_features.size(-1)
)
else:
# ignore
y_audio = unmasked_audio_features
y_visual = unmasked_visual_features
else:
x_audio = audio_features
x_visual = visual_features
y_audio = unmasked_audio_features
y_visual = unmasked_visual_features
mask_indices = None
"""
mask之后的过transformer
stage 1: 两个模态分别过
stage 2: 两个模态取平均后过
"""
if self.model_stage == 1:
"""
x_audio:Batch * n_frames(with mask_emb) * feature_dim(512)
x_visual:Batch * n_frames(with mask_emb) * feature_dim(512)
x_audio.shape == x_visual.shape
"""
x_audio, layer_results_audio = self.encoder(x_audio, padding_mask=padding_mask, layer=layer)
x_visual, layer_results_visual = self.encoder(x_visual, padding_mask=padding_mask, layer=layer)
elif self.model_stage == 2:
x_cat = (self.audio_weight * x_audio + self.visual_weight * x_visual)
x_cat,layer_results_cat = self.encoder(x_cat, padding_mask=padding_mask, layer=layer)
else:
x_cat = (0.0 * x_audio + 1.0 * x_visual)
x_cat, _ = self.encoder(x_cat, padding_mask=padding_mask, layer=layer)
# FIXME INFERENCE
if features_only:
return {
"x": x_cat,
"padding_mask": padding_mask,
"audio_features": unmasked_audio_features,
"visual_features": unmasked_visual_features,
}
"""
inference时到这儿就结束了
"""
if self.quantizer:
q_visual = self.quantizer(y_visual, produce_targets=False)
y_visual = q_visual["x"]
q_audio = self.quantizer(y_audio, produce_targets=False)
y_audio = q_audio["x"]
if self.model_stage == 1:
"""
只管visual这边的diversity loss
"""
num_vars = q_visual["num_vars"]
code_ppl = [q_visual["code_perplexity"], q_audio["code_perplexity"]]
# 进入码本的比例 = code_ppl/(num_vars*num_latent_groups)
# print("visual_num_vars:",num_vars)
# print("audio_num_vars:", q_audio["num_vars"])
# print("visual_code_ppl:", code_ppl)
# print("audio_code_ppl:", q_audio["code_perplexity"])
prob_ppl = q_visual["prob_perplexity"]
curr_temp = q_visual["temp"]
elif self.model_stage == 2:
num_vars = q_visual["num_vars"]
code_ppl = [q_visual["code_perplexity"], q_audio["code_perplexity"]]
# print("num_vars_va:", num_vars)
# print("code_ppl_va:", code_ppl)
prob_ppl = [q_visual["prob_perplexity"], q_audio["prob_perplexity"]]
curr_temp = [q_visual["temp"], q_audio["temp"]]
y_audio = self.project_q(y_audio)
y_visual = self.project_q(y_visual)
# ignore
if self.negatives_from_everywhere:
# ignore
neg_cands = self.quantizer(unmasked_features, produce_targets=False)[
"x"
]
negs, _ = self.sample_negatives(
neg_cands,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs_audio,negs_visual, negs_indices = self.sample_negatives(
y_audio,
y_visual,
y_audio.size(1),
padding_count=padding_count,
)
# ignore
if self.codebook_negatives > 0:
cb_negs = self.quantizer.sample_from_codebook(
y.size(0) * y.size(1), self.codebook_negatives
)
cb_negs = cb_negs.view(
self.codebook_negatives, y.size(0), y.size(1), -1
) # order doesnt matter
cb_negs = self.project_q(cb_negs)
negs = torch.cat([negs, cb_negs], dim=0)
else:
y_audio = self.project_q(y_audio)
y_visual = self.project_q(y_visual)
#ignore
if self.negatives_from_everywhere:
negs, _ = self.sample_negatives(
unmasked_features,
y.size(1),
padding_count=padding_count,
)
negs = self.project_q(negs)
else:
negs, _ = self.sample_negatives(
y,
y.size(1),
padding_count=padding_count,
)
if not is_xla_tensor(x_audio) and not is_xla_tensor(x_visual) and self.model_stage == 1:
# tpu-comment: reducing the size in a dynamic way causes
# too many recompilations on xla.
x_audio = x_audio[mask_indices].view(x_audio.size(0), -1, x_audio.size(-1))
x_visual = x_visual[mask_indices].view(x_visual.size(0), -1, x_visual.size(-1))
elif not is_xla_tensor(x_cat) and self.model_stage == 2:
x_cat = x_cat[mask_indices].view(x_cat.size(0), -1, x_cat.size(-1))
# ignore
if self.target_glu:
y = self.target_glu(y)
negs = self.target_glu(negs)
if self.model_stage == 1:
x_audio = self.final_proj(x_audio)
x_audio = self.compute_preds(x_audio, y_audio, negs_audio)
x_visual = self.final_proj(x_visual)
x_visual = self.compute_preds(x_visual, y_visual, negs_visual)
result["x_audio"] = x_audio
result["x_visual"] = x_visual
result["padding_mask"] = padding_mask
elif self.model_stage == 2:
x_cat = self.final_proj(x_cat)
x_audio = self.compute_preds(x_cat, y_audio, negs_audio)
x_visual = self.compute_preds(x_cat, y_visual, negs_visual)
result["x_audio"] =x_audio
result["x_visual"] = x_visual
result["padding_mask"] = padding_mask
if prob_ppl is not None:
result["prob_perplexity"] = prob_ppl
result["code_perplexity"] = code_ppl
result["num_vars"] = num_vars
result["temp"] = curr_temp
result["stage"] = self.model_stage
return result
def quantize(self, x):
assert self.quantizer is not None
x = self.feature_extractor(x)
x = x.transpose(1, 2)
x = self.layer_norm(x)
return self.quantizer.forward_idx(x)
def extract_features(self, audio_source, visual_source, padding_mask, mask=False, layer=None):
res = self.forward(
audio_source,visual_source, padding_mask, mask=mask, features_only=True, layer=layer
)
return res
def get_logits(self, net_output):
logits_audio = net_output["x_audio"]
logits_visual = net_output["x_visual"]
logits_audio = logits_audio.transpose(0, 2)
logits_visual = logits_visual.transpose(0, 2)
logits_audio = logits_audio.reshape(-1, logits_audio.size(-1))
logits_visual = logits_visual.reshape(-1, logits_audio.size(-1))
return logits_audio,logits_visual
def get_targets(self, sample, net_output, expand_steps=True):
x_audio = net_output["x_audio"]
x_visual = net_output["x_visual"]
return x_audio.new_zeros(x_audio.size(1) * x_audio.size(2), dtype=torch.long), x_visual.new_zeros(x_visual.size(1) * x_visual.size(2), dtype=torch.long)
def get_extra_losses(self, net_output):
pen = []
if "prob_perplexity" in net_output:
if self.model_stage == 1:
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"])
/ net_output["num_vars"]
)
else:
for i in range(2):
# visual audio
pen.append(
(net_output["num_vars"] - net_output["prob_perplexity"][i])
/ net_output["num_vars"]
)
if "features_pen" in net_output:
pen.append(net_output["features_pen"])
return pen
def remove_pretraining_modules(self):
self.quantizer = None
self.project_q = None
self.target_glu = None
self.final_proj = None
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
input_dim=1,
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
kernel_size,
stride,
padding,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, kernel_size=kernel_size, stride=stride, padding=padding, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
in_d = input_dim
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 4, "invalid conv definition: " + str(cl)
(dim, kernel_size, stride, padding) = cl
self.conv_layers.append(
block(
in_d,
dim,
kernel_size,
stride,
padding,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
def forward(self, x):
# BxT -> BxCxT
if len(x.shape) == 2:
x = x.unsqueeze(1)
for conv in self.conv_layers:
if conv == self.conv_layers[-1]:
for name, module in conv.named_children():
if name =="2":
"""
0 Conv1d
1 Dropout
2 GELU
2 means GELU()
"""
before_GELU = x
x = module(x)
else:
x = conv(x)
return before_GELU, x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
self.layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
)
for _ in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, tgt_layer=None):
if padding_mask is not None:
x = index_put(x, padding_mask, 0)
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
r = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z = layer(x, self_attn_padding_mask=padding_mask, need_weights=False)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
def max_positions(self):
"""Maximum output length supported by the encoder."""
return self.args.max_positions
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
return state_dict
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
att_args=None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
attn_mask=self_attn_mask,
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
class MoCo(nn.Module):
def __init__(self, replace=False):
super(MoCo, self).__init__()
self.encoder = nn.Sequential()
self.replace = replace
for name, module in resnet50().named_children():
"""
name:conv1
name:bn1
name:relu
name:maxpool
name:layer1
name:layer2
name:layer3
name:layer4
name:avgpool
name:fc
"""
if name == 'conv1':
if self.replace:
module = nn.Conv3d(3,64,kernel_size=(7, 7, 7), stride=(1, 2, 2), padding=(3, 3, 3), bias=False)
self.encoder.add_module(name, module)
elif name != 'fc':
self.encoder.add_module(name, module)
# else:
# self.ResNet.append(nn.Linear(in_features=2048, out_features=128, bias=True))
def forward(self, x):
x = self.encoder.conv1(x)
if self.replace:
x = x.transpose(1,2)
x = x.reshape(-1,x.size(2),x.size(3),x.size(4))
x = self.encoder.bn1(x)
x = self.encoder.relu(x)
x = self.encoder.maxpool(x)
x = self.encoder.layer1(x)
x = self.encoder.layer2(x)
x = self.encoder.layer3(x)
x = self.encoder.layer4(x)
x = self.encoder.avgpool(x)
feature = torch.flatten(x, start_dim=1)
return F.normalize(feature, dim=-1) | 1.945313 | 2 |
mm_scripting_util/core.py | luclepot/mm_scripting_util | 0 | 12786999 | <gh_stars>0
from mm_scripting_util.util import *
from mm_scripting_util.util import _mm_util, _mm_backend_util, _mm_base_util
class miner(_mm_util):
"""
Main container for the class.
This class should handle all necessary interfacing with
madminer and madgraph.
"""
__MODULE_NAME = "mm_scripting_util"
# general class member functions
def __init__(
self,
name,
backend,
card_directory=None,
path=None,
loglevel=logging.INFO,
madminer_loglevel=logging.INFO,
init_loglevel=logging.INFO,
autodestruct=False,
_cmd_line_origin=False,
):
"""
madminer-helper object for quickly running madgraph scripts.
parameters:
name:
string, the name of the main object directory
path:
string, path to desired object directory. Will default to current working directory.
loglevel:
int (enum), loglevel for tth class as a whole. Will default to INFO.
autodestruct:
bool, cleanup switch for object// all files. if true, the whole thing will destroy itself upon deletion. useful for testing.
backend:
string, path to a backend file. Examples found at "mm_scripting_util/data/backends/". Provides all benchmarks/ simulation information
card_directory:
string, path to a card directory from which to load template cards, if one desires to switch the current template cards out for new ones.
"""
if path is None:
path = os.getcwd()
# initialize helper classes
_mm_base_util.__init__(self, name, path)
_mm_backend_util.__init__(self)
self.autodestruct = autodestruct
self.log = logging.getLogger(__name__)
self.module_path = os.path.dirname(__file__)
self._cmd_line_origin = _cmd_line_origin
if self._cmd_line_origin:
self.set_loglevel(30)
self.set_loglevel(30, module="madminer")
else:
self.set_loglevel(loglevel)
self.set_loglevel(madminer_loglevel, module="madminer")
self.name = name
self.dir = "{}/{}".format(self.path, self.name)
if not os.path.exists(self.dir):
os.mkdir(self.dir)
self.log.log(init_loglevel, "Creating new directory " + self.dir)
else:
self.log.log(init_loglevel, "Initialized to existing directory " + self.dir)
self.log.log(init_loglevel,
"Initialized a new miner object with name '{}'".format(self.name)
)
self.log.log(init_loglevel, "- module path at {}".format(self.module_path))
self.log.log(init_loglevel, "- new miner object path at " + self.dir)
self.madminer_object = madminer.core.MadMiner()
lhe_processor_object = None
self.log.log(init_loglevel, "Loading custom card directory... ")
self.card_directory = None
# backend param should refer to the name, not a specific backend filepath
self.backend = backend.replace('.dat', '')
self._write_config( {
'backend': self.backend,
'name': self.name,
'dir': self.dir,
'path': self.path,
},
'{}/config.mmconfig'.format(self.dir)
)
# if card_directory is specified..
if card_directory is not None:
for path_check in [
card_directory,
"{}/data/{}".format(self.module_path, card_directory)
]:
if os.path.exists(path_check):
self.card_directory = path_check
if self.card_directory is None:
self.log.error(
"Selected card directory '{}' could not be found.".format(
card_directory
)
)
self.log.error("Using default card directory instead.")
self.card_directory = self.default_card_directory
# else, check using the backend parameter
else:
for path_check in [
"cards_{}".format(self.backend),
"{}/data/cards_{}".format(self.module_path, self.backend)
]:
if os.path.exists(path_check):
self.card_directory = path_check
if self.card_directory is None:
self.log.error("No card directory found using auto-spec backend {}".format(self.backend))
self.log.error("Using default card directory instead.")
self.card_directory = self.default_card_directory
self._load_backend("{}.dat".format(self.backend))
self.log.log(init_loglevel, "Using card directory '{}',".format(self.card_directory))
self.log.log(init_loglevel, "with {} files".format(len(os.listdir(self.card_directory))))
self.set_loglevel(loglevel)
self.set_loglevel(madminer_loglevel, module="madminer")
def set_loglevel(
self,
loglevel,
module=None
):
logging.basicConfig(
format="%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s",
datefmt="%H:%M",
level=logging.WARNING,
)
if module is None:
module = self.__MODULE_NAME
logging.getLogger(module).setLevel(loglevel)
return loglevel
def destroy_sample(
self
):
rets = [self._check_valid_init()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
return failed
self._remove_files(self.dir, include_folder=True)
return [self.error_codes.Success]
def list_samples(
self,
verbose=False,
criteria='*',
include_info=False,
):
sample_list = glob.glob(
"{}/data/samples/{}/augmented_sample.mmconfig".format(self.dir, criteria)
) + glob.glob("{}/evaluations/*/{}/augmented_sample.mmconfig".format(self.dir, criteria))
return self._list_verbose_helper('augmented samples', sample_list, verbose, criteria, 'sample_name', include_info)
def list_models(
self,
verbose=False,
criteria='*',
include_info=False,
):
model_list = glob.glob("{}/models/*/{}/training_model.mmconfig".format(self.dir, criteria))
return self._list_verbose_helper('trained models', model_list, verbose, criteria, 'training_name', include_info)
def list_evaluations(
self,
verbose=False,
criteria='*',
include_info=False,
):
evaluation_list = glob.glob(
"{}/evaluations/*/{}/evaluation.mmconfig".format(self.dir, criteria)
)
return self._list_verbose_helper('evaluations', evaluation_list, verbose, criteria, 'evaluation_name', include_info)
@staticmethod
def list_backends():
return os.listdir("{}/data/backends/".format(os.path.dirname(__file__)))
@staticmethod
def list_cards():
return glob.glob("{}/data/*cards*".format(os.path.dirname(__file__)))
@staticmethod
def list_full_backends():
backends = [w.replace('.dat', '') for w in miner.list_backends()]
cards = [card.split('/')[-1].replace('cards_', '') for card in miner.list_cards()]
return set(backends).intersection(cards)
def simulate_data(
self,
samples,
sample_benchmark,
seed_file=None,
force=True,
mg_dir=None,
use_pythia_card=False,
mg_environment_cmd='ubc',
morphing_trials=2500,
override_step=None,
):
"""
Standard data simulation run. Should go from start to finish with data simulation.
"""
try:
if override_step is not None:
self.SIMULATION_STEP = override_step
else:
self.SIMULATION_STEP = self._get_simulation_step(
samples
)
if self.SIMULATION_STEP < 1 or force:
self.log.debug("")
self.log.debug("RUNNING SETUP CARDS, STEP 1")
self.log.debug("")
ret = self.setup_cards(
n_samples=samples, seed_file=seed_file, force=force
)
if self.error_codes.Success not in ret:
self.log.warning("Quitting simulation with errors.")
return ret
self.SIMULATION_STEP = 1
self.log.debug("")
self.log.debug("FINISHED SETUP CARDS, STEP 1")
self.log.debug("")
if self.SIMULATION_STEP < 2 or force:
self.log.debug("")
self.log.debug("RUNNING MORPHING, STEP 2")
self.log.debug("")
ret = self.run_morphing(force=force, morphing_trials=morphing_trials)
if self.error_codes.Success not in ret:
self.log.warning("Quitting simulation with errors.")
return ret
self.SIMULATION_STEP = 2
self.log.debug("")
self.log.debug("FINISHED MORPHING, STEP 2")
self.log.debug("")
if self.SIMULATION_STEP < 3 or force:
self.log.debug("")
self.log.debug("RUNNING MG5 SCRIPTS, STEP 3")
self.log.debug("")
ret = self.run_mg5_scripts(
samples=samples,
sample_benchmark=sample_benchmark,
force=force,
mg_dir=mg_dir,
mg_environment_cmd=mg_environment_cmd,
use_pythia_card=use_pythia_card,
)
if self.error_codes.Success not in ret:
self.log.warning("Quitting simulation with errors.")
return ret
self.SIMULATION_STEP = 3
self.log.debug("")
self.log.debug("FINISHED MG5 SCRIPTS, STEP 3")
self.log.debug("")
if self.SIMULATION_STEP < 4 or force:
self.log.debug("")
self.log.debug("RUNNING MG5 DATA PROCESS, STEP 4")
self.log.debug("")
ret = self.process_mg5_data()
if self.error_codes.Success not in ret:
self.log.warning("Quitting simulation with errors.")
return ret
self.SIMULATION_STEP = 4
self.log.debug("")
self.log.debug("FINISHED MG5 DATA PROCESS, STEP 4")
self.log.debug("")
except:
self.log.error(traceback.format_exc())
self.log.error("ABORTING")
return [self.error_codes.CaughtExceptionError]
return [self.error_codes.Success]
def setup_cards(
self,
n_samples,
seed_file=None,
force=False,
run_card_modifications=[
" = nevents ! Number of unweighted events requested",
" = iseed ! (0=assigned automatically=default))",
],
):
rets = [self._check_valid_init(), self._check_valid_backend()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling card setup.")
return failed
sample_sizes = self._equal_sample_sizes(n_samples, sample_limit=100000)
if seed_file is not None:
seeds = np.load(seed_file)
else:
seeds = np.random.randint(1, 30081.0 * 30081.0, len(sample_sizes))
# check validity of seed input (matching with sample sizes, at the least)
assert len(seeds) >= len(sample_sizes)
self._check_directory(local_pathname="cards", force=force, pattern="card")
files = os.listdir(self.card_directory)
filenames = {}
for f in files:
if not os.path.isdir(f):
shutil.copyfile(
src=self.card_directory + "/" + f, dst=self.dir + "/cards/" + f
)
filenames[f] = "{}/cards/{}".format(self.card_directory, f)
self.log.info(
"Copied {} card files from directory '{}'".format(
len(files), self.card_directory
)
)
#
# SETUP RUN CARDS
#
run_card_filenames = {f: filenames[f] for f in filenames if "run_card" in f}
for i in range(len(sample_sizes)):
for f in run_card_filenames:
nums, names = self._get_keyword_filenumbers(
run_card_modifications, f, fdir=self.card_directory
)
values = [sample_sizes[i], seeds[i]]
self._replace_lines(
infile="{}/cards/{}".format(self.dir, f),
line_numbers=nums,
line_strings=[
"{}{}\n".format(values[j], name) for j, name in enumerate(names)
],
outfile=self.dir
+ "/cards/{}{}.dat".format(f.replace(".dat", ""), i + 1),
)
self.log.debug(
"Setup {} cards in dir {}".format(len(sample_sizes), self.dir + "/cards")
)
files = os.listdir(self.dir + "/cards")
for f in files:
self.log.debug(' - "{}"'.format(f))
#
# SETUP PROC CARDS // disabled for now, just do it manually (easy enough man)
#
# proc_card_filenames = { f : filenames[f] for f in filenames if "proc_card" in f }
# possible_proc_card_changes = [
# ("madgraph_generation_command", "generate "),
# ("model", "import model ")
# ]
# for f in proc_card_filenames:
# for key in self.params:
# for change, change_syntax in possible_proc_card_changes:
# if change in key:
# self._replace_lines(
# infile="{}/cards/{}".format(self.dir, f),
# line_numbers=self._get_keyword_filenumbers([change_syntax], f, fdir=self.card_directory)[0],
# line_strings=["{}{}\n".format(change_syntax, change)],
# outfile="{}/cards/{}.dat".format(self.dir, f.replace('.dat',''))
# )
return [self.error_codes.Success]
def run_morphing(
self,
morphing_trials=2500,
force=False
):
rets = [self._check_valid_backend()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling morphing run.")
return failed
# check directory for existing morphing information
self._check_directory(
local_pathname="data",
force=force,
pattern="madminer_{}.h5".format(self.name),
)
# add parameterizations to madminer
for parameter in self.params["parameters"]:
self.madminer_object.add_parameter(
lha_block=self.params["parameters"][parameter]["lha_block"],
lha_id=self.params["parameters"][parameter]["lha_id"],
parameter_name=parameter,
morphing_max_power=self.params["parameters"][parameter][
"morphing_max_power"
],
parameter_range=self.params["parameters"][parameter]["parameter_range"],
)
for benchmark in self.params["benchmarks"]:
self.madminer_object.add_benchmark(
self.params["benchmarks"][benchmark], benchmark
)
# for benchmark in self.params['parameters'][parameter]['parameter_benchmarks']:
# self.madminer_object.add_benchmark(
# {parameter:benchmark[0]},benchmark[1]
# )
self.max_power = max(
[
self.params["parameters"][param]["morphing_max_power"]
for param in self.params["parameters"]
]
)
self.madminer_object.set_morphing(
include_existing_benchmarks=True,
max_overall_power=self.max_power,
n_trials=2500,
)
# save data
self.madminer_object.save(self.dir + "/data/madminer_{}.h5".format(self.name))
self.log.debug("successfully ran morphing.")
return [self.error_codes.Success]
def run_mg5_scripts(
self,
samples,
sample_benchmark,
force=False,
mg_dir=None,
mg_environment_cmd='lxplus7',
use_pythia_card=False,
):
sample_sizes = self._equal_sample_sizes(samples, sample_limit=100000)
rets = [
self._check_valid_init(),
self._check_valid_cards(),
self._check_valid_morphing(),
self._check_valid_backend(),
]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling mg5 script setup.")
return failed
self._check_directory(
local_pathname="mg_processes/signal/madminer/scripts",
force=force,
mkdir_if_not_existing=False,
)
self.madminer_object.load(self.dir + "/data/madminer_{}.h5".format(self.name))
# check platform and change initial_command as necessary
if mg_environment_cmd == "lxplus7":
initial_command = 'source /cvmfs/sft.cern.ch/lcg/views/LCG_94/x86_64-centos7-gcc62-opt/setup.sh; echo "SOURCED IT"'
self.log.debug("Ran lxplus7 initial source cmd.")
elif mg_environment_cmd == "pheno":
initial_command = "module purge; module load pheno/pheno-sl7_gcc73; module load cmake/cmake-3.9.6"
elif mg_environment_cmd == "ubc":
initial_command = "PATH=$(getconf PATH); export ATLAS_LOCAL_ROOT_BASE=/cvmfs/atlas.cern.ch/repo/ATLASLocalRootBase; source $ATLAS_LOCAL_ROOT_BASE/user/atlasLocalSetup.sh; lsetup root"
else:
initial_command = mg_environment_cmd
self.log.debug('mg env command: {}'.format(initial_command))
# init mg_dir
if mg_dir is not None:
if not os.path.exists(mg_dir):
self.log.warning("MGDIR variable '{}' invalid".format(mg_dir))
self.log.warning("Aborting mg5 script setup routine.")
failed.append(self.error_codes.NoDirectoryError)
elif len(glob.glob("../MG5_aMC_*")) > 0:
mg_dir = os.path.abspath(glob.glob("../MG5_aMC_*")[0])
elif getpass.getuser() == "pvischia":
mg_dir = "/home/ucl/cp3/pvischia/smeft_ml/MG5_aMC_v2_6_2"
elif getpass.getuser() == "alheld":
mg_dir = "/home/ucl/cp3/alheld/projects/MG5_aMC_v2_6_2"
elif getpass.getuser() == "llepotti":
mg_dir = "/afs/cern.ch/work/l/llepotti/private/MG5_aMC_v2_6_5"
else:
self.log.warning(
"No mg_dir provided and username not recognized. Aborting."
)
failed.append(self.error_codes.NoDirectoryError)
mg_dir = os.path.abspath(mg_dir)
if len(failed) > 0:
return failed
self.log.debug("mg_dir set to '{}'".format(mg_dir))
required_mg5_modules = ['pythia8', 'lhapdf6', 'mg5amc_py8_interface', 'lhapdf5']
self.log.info("checking for installations of required mg5 modules...")
modules_to_install = [module for module in required_mg5_modules if module.lower() not in map(lambda x: x.lower(), os.listdir('{}/HEPTools'.format(mg_dir)))]
if len(modules_to_install) > 0:
self.log.info("installing modules {}".format(modules_to_install))
with open('{}/temp.dat'.format(mg_dir), 'w+') as f:
for module in modules_to_install:
f.write("install {}\n".format(module))
f.write("\n\n\n\n\n")
f.write("quit")
os.system('env -i "$BASH" -l -c \'cd {}/bin/; ./mg5_aMC < {}/temp.dat; rm {}/temp.dat\''.format(mg_dir, mg_dir, mg_dir))
self.log.info("successfully installed modules {}".format(modules_to_install))
else:
self.log.info("None found.")
# setup pythia cards
if use_pythia_card:
pythia_card = self.dir + "/cards/pythia8_card.dat"
else:
pythia_card = None
self.log.debug("running mg5 scripts...")
self.madminer_object.run_multiple(
sample_benchmarks=[sample_benchmark],
mg_directory=mg_dir,
mg_process_directory=self.dir + "/mg_processes/signal",
proc_card_file=self.dir + "/cards/proc_card.dat",
param_card_template_file=self.dir + "/cards/param_card_template.dat",
run_card_files=[
self.dir + "/cards/run_card{}.dat".format(i + 1)
for i in range(len(sample_sizes))
],
pythia8_card_file=pythia_card,
log_directory=self.dir + "/logs/signal",
initial_command=initial_command,
# only_prepare_script=True,
python2_override=True,
is_background=False,
only_prepare_script=True,
)
# run in a clean environment
os.system('env -i bash -l -c \'source {}/mg_processes/signal/madminer/run.sh\''.format(self.dir))
self._write_config(
{
"samples": samples,
"sample_benchmark": sample_benchmark,
"run_bool": True,
},
self._main_sample_config(),
)
self.log.debug("Successfully ran mg5 scripts.")
return [self.error_codes.Success]
def process_mg5_data(
self
):
rets = [self._check_valid_mg5_run()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.debug(failed)
self.log.warning("Canceling mg5 data processing routine.")
return failed
mg5_run_dict = self._load_config(self._main_sample_config())
samples = mg5_run_dict["samples"]
sample_benchmark = mg5_run_dict["sample_benchmark"]
self.lhe_processor_object = madminer.lhe.LHEReader(
filename='{}/data/madminer_{}.h5'.format(self.dir, self.name)
)
# for benchmark in self.params["benchmarks"]:
# self.lhe_processor_object.add_benchmark(
# self.params["benchmarks"][benchmark], benchmark
# )
n_cards = self._number_of_cards(samples, 100000)
for i in range(n_cards):
self.lhe_processor_object.add_sample(
"{}/mg_processes/signal/Events/run_{:02d}/unweighted_events.lhe.gz".format(
self.dir,
i + 1,
),
sampled_from_benchmark=sample_benchmark,
is_background=False,
)
for observable in self.params["observables"]:
print(self.params["observables"][observable])
self.lhe_processor_object.add_observable(
observable, self.params["observables"][observable]
)
self.lhe_processor_object.analyse_samples()
self.lhe_processor_object.save(
"{}/data/madminer_{}_with_data_parton.h5".format(self.dir, self.name)
)
return [self.error_codes.Success]
def plot_mg5_data_corner(
self,
image_save_name=None,
bins=40,
ranges=None,
include_automatic_benchmarks=True,
):
rets = [self._check_valid_mg5_process()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling mg5 data plotting.")
return failed
(
_,
benchmarks,
_,
_,
_,
observables,
_,
_,
_,
_,
) = madminer.utils.interfaces.madminer_hdf5.load_madminer_settings(
filename=self.dir
+ "/data/madminer_{}_with_data_parton.h5".format(self.name)
)
include_array = None
if not include_automatic_benchmarks:
include_array = [
i for i, bm in enumerate(benchmarks) if bm in self.params["benchmarks"]
]
benchmarks = {
bm: benchmarks[bm]
for bm in benchmarks
if bm in self.params["benchmarks"]
}
legend_labels = [label for label in benchmarks]
labels = [label for label in observables]
if type(bins != list):
bins = [bins for i in range(len(observables))]
obs, _, norm_weights, _ = self._get_raw_mg5_arrays(include_array=include_array)
self.log.info(
"correcting normalizations by total sum of weights per benchmark:"
)
if ranges is None:
ranges = self._get_automatic_ranges(obs, norm_weights)
self.log.info("No ranges specified, using automatic range finding.")
assert len(bins) == len(observables) == len(ranges)
# labels=[r'$\Delta \eta_{t\bar{t}}$',r'$p_{T, x0}$ [GeV]']
plt = corner.corner(
obs,
labels=labels,
color="C0",
bins=bins,
range=ranges,
weights=norm_weights[0],
)
plt.label = legend_labels[0]
for i in range(norm_weights.shape[0] - 1):
plt_prime = corner.corner(
obs,
labels=labels,
color="C{}".format(i + 1),
bins=bins,
range=ranges,
weights=norm_weights[i + 1],
fig=plt,
)
plt_prime.label = legend_labels[i + 1]
full_save_name = "{}/data/madgraph_data_{}.png".format(
self.dir, image_save_name
)
plt.axes[0].autoscale("y")
plt.axes[3].autoscale("y")
plt.legend(legend_labels)
full_save_name = "{}/data/madgraph_data_{}.png".format(
self.dir, image_save_name if image_save_name is not None else 'temp'
)
if self._cmd_line_origin:
self.log.debug('showing graph via feh... (cmd line interface triggered)')
plt.savefig(full_save_name)
subprocess.Popen(['feh', full_save_name ])
elif image_save_name is not None:
self.log.debug('saving image to \'{}\''.format(full_save_name))
plt.savefig(full_save_name)
else:
self.log.debug('displaying image...')
plt.show()
return [self.error_codes.Success]
# training-related member functions
def train_data(
self,
augmented_samples,
sample_name,
training_name,
augmentation_benchmark,
n_theta_samples=2500,
bins=None,
ranges=None,
override_step=None,
image_save_name=None,
):
if override_step is not None:
self.TRAINING_STEP = override_step
else:
self.TRAINING_STEP = 0
if self.TRAINING_STEP < 1:
ret = self.augment_samples(
sample_name=sample_name,
n_or_frac_augmented_samples=int(augmented_samples),
augmentation_benchmark=augmentation_benchmark,
n_theta_samples=n_theta_samples,
)
if self.error_codes.Success not in ret:
self.log.warning("Quitting Train Data Function")
return ret
self.TRAINING_STEP = 1
if self.TRAINING_STEP < 2:
ret = self.plot_compare_mg5_and_augmented_data(
sample_name,
image_save_name=image_save_name,
bins=bins,
ranges=ranges,
mark_outlier_bins=True,
)
if self.error_codes.Success not in ret:
self.log.warning("Quitting Train Data Function")
return ret
self.TRAINING_STEP = 2
return [self.error_codes.Success]
def augment_samples(
self,
sample_name,
n_or_frac_augmented_samples,
augmentation_benchmark,
n_theta_samples=100,
evaluation_aug_dir=None,
force=False,
):
"""
Augments sample data and saves to a new sample with name <sample_name>.
This allows for multiple different training sets to be saved on a single sample set.
parameters:
sample_name, required:
string, name of the training data objects to modify
n_or_frac_augmented_samples, required:
if type(int), number of samples to draw from simulated madminer data with the sample augmenter
if type(float), fraction of the simulated samples to draw with the sample augmenter.
augmentation_benchmark, required:
string, benchmark to feed to trainer
returns:
int, error code. 0 is obviously good.
"""
# check for processed data
rets = [
self._check_valid_mg5_process(),
self.error_codes.Success if (sample_name not in [d['sample_name'] for _,d in self.list_samples()] or force) else self.error_codes.ExistingAugmentedDataFileError
]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling sample augmentation.")
return failed
# train the ratio
sample_augmenter = madminer.sampling.SampleAugmenter(
filename=self.dir
+ "/data/madminer_{}_with_data_parton.h5".format(self.name)
)
if augmentation_benchmark not in sample_augmenter.benchmarks:
self.log.error("Provided augmentation_benchmark not in given benchmarks!")
self.log.warning("Please choose from the below existing benchmarks:")
if sample_augmenter.n_benchmarks < 1:
self.log.warning(" - None")
for benchmark in sample_augmenter.benchmarks:
self.log.info(" - '{}'".format(benchmark))
failed.append(self.error_codes.InvalidInputError)
samples = 0
# if int, this is a direct number
if type(n_or_frac_augmented_samples) == int:
samples = n_or_frac_augmented_samples
# otherwise, this number represents a fractional quantity
elif type(n_or_frac_augmented_samples) == float:
samples = int(
n_or_frac_augmented_samples * float(sample_augmenter.n_samples)
)
# otherwise we quit
else:
self.log.error("Incorrect input ")
failed.append(self.error_codes.InvalidTypeError)
if samples > 100000000:
self.log.warning(
"Training on {} samples is ridiculous.. reconsider".format(samples)
)
self.log.warning("quitting sample augmentation")
failed.append(self.error_codes.Error)
if n_theta_samples > int(0.10 * samples):
self.log.warning("Scaling n_theta_samples down for input")
self.log.warning("Old: {}".format(n_theta_samples))
n_theta_samples = int(0.05 * samples)
self.log.warning("New: {}".format(n_theta_samples))
if len(failed) > 0:
return failed
# parameter ranges
priors = [
("flat",) + self.params["parameters"][parameter]["parameter_range"]
for parameter in self.params["parameters"]
]
self.log.debug("Priors: ")
for prior in priors:
self.log.debug(" - {}".format(prior))
if evaluation_aug_dir is not None:
aug_dir = evaluation_aug_dir
config_file = "{}/augmented_sample.mmconfig".format(aug_dir)
else:
aug_dir = self.dir + "/data/samples/{}".format(sample_name)
config_file = self._augmentation_config(sample_name)
# train the ratio
sample_augmenter.sample_train_ratio(
theta0=madminer.sampling.random_morphing_points(
n_thetas=n_theta_samples, priors=priors
),
theta1=madminer.sampling.benchmark(augmentation_benchmark),
n_samples=samples,
folder=aug_dir,
filename="augmented_sample_ratio",
)
# extract samples at each benchmark
for benchmark in sample_augmenter.benchmarks:
sample_augmenter.sample_test(
theta=madminer.sampling.benchmark(benchmark),
n_samples=samples,
folder=aug_dir,
filename="augmented_samples_{}".format(benchmark),
)
# save augmentation config file
self._write_config(
{
"augmentation_benchmark": augmentation_benchmark,
"augmentation_samples": samples,
"theta_samples": n_theta_samples,
"sample_name": sample_name,
"all_benchmarks": dict(sample_augmenter.benchmarks),
},
config_file,
)
return [self.error_codes.Success]
def plot_augmented_data_corner(
self,
sample_name,
image_save_name=None,
bins=None,
ranges=None,
include_automatic_benchmarks=True,
):
rets = [
self._check_valid_augmented_data(sample_name=sample_name),
self._check_valid_mg5_process(),
]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Canceling augmented sampling plots.")
return failed
search_key = "x_augmented_samples_"
x_files = [
f
for f in os.listdir(self.dir + "/data/samples/{}".format(sample_name))
if search_key in f
]
x_arrays = dict(
[
(
f[len(search_key) :][: -len(".npy")],
np.load(self.dir + "/data/samples/{}/".format(sample_name) + f),
)
for f in x_files
]
)
x_size = max([x_arrays[obs].shape[0] for obs in x_arrays])
# grab benchmarks and observables from files
(
_,
benchmarks,
_,
_,
_,
observables,
_,
_,
_,
_,
) = madminer.utils.interfaces.madminer_hdf5.load_madminer_settings(
filename=self.dir
+ "/data/madminer_{}_with_data_parton.h5".format(self.name)
)
if not include_automatic_benchmarks:
benchmarks = {
bm: benchmarks[bm]
for bm in benchmarks
if bm in self.params["benchmarks"]
}
x_arrays = {
bm: x_arrays[bm] for bm in x_arrays if bm in self.params["benchmarks"]
}
legend_labels = [label for label in benchmarks]
labels = [label for label in observables]
default_bins = 40
if bins is None:
bins = default_bins
if not hasattr(bins, '__iter__'):
bins = [bins for i in range(len(labels))]
if ranges is None:
ranges = np.mean(
[
[np.min(x_arrays[bm], axis=0), np.max(x_arrays[bm], axis=0)]
for bm in x_arrays
],
axis=0,
).T
# alternate labels?? here they be
# labels=[r'$\Delta \eta_{t\bar{t}}$',r'$p_{T, x0}$ [GeV]']
assert len(labels) == len(bins) == len(ranges)
plt = corner.corner(
x_arrays[legend_labels[0]],
labels=labels,
color="C0",
bins=bins,
range=ranges,
)
plt.label = legend_labels[0]
for i, benchmark in enumerate(legend_labels[1:]):
plt_prime = corner.corner(
x_arrays[benchmark],
labels=labels,
color="C{}".format(i + 1),
bins=bins,
range=ranges,
fig=plt,
)
plt_prime.label = legend_labels[i + 1]
plt.axes[0].autoscale("y")
plt.axes[3].autoscale("y")
plt.legend(legend_labels)
full_save_name = "{}/data/samples/{}/augmented_data_{}.png".format(
self.dir, sample_name, image_save_name if image_save_name is not None else 'temp'
)
if self._cmd_line_origin:
plt.savefig(full_save_name)
subprocess.Popen(['feh', full_save_name ])
elif image_save_name is not None:
plt.savefig(full_save_name)
else:
plt.show()
return [self.error_codes.Success]
def plot_compare_mg5_and_augmented_data(
self,
sample_name,
image_save_name=None,
mark_outlier_bins=False,
bins=40,
ranges=None,
dens=True,
alphas=(0.8, 0.4),
figlen=5,
threshold=2.0,
include_automatic_benchmarks=True,
):
err, x_aug, x_mg5 = self._get_mg5_and_augmented_arrays(
sample_name,
bins,
ranges,
dens,
include_automatic_benchmarks=include_automatic_benchmarks,
params=self.params,
)
if self.error_codes.Success not in err:
self.log.warning("Quitting mg5 vs augmented data plot comparison")
return err
(
_,
benchmarks,
_,
_,
_,
observables,
_,
_,
_,
_,
) = madminer.utils.interfaces.madminer_hdf5.load_madminer_settings(
filename=self.dir
+ "/data/madminer_{}_with_data_parton.h5".format(self.name)
)
if not include_automatic_benchmarks:
benchmarks = {
bm: benchmarks[bm]
for bm in benchmarks
if bm in self.params["benchmarks"]
}
# create lists of each variable
benchmark_list = [benchmark for benchmark in benchmarks]
y_fac = 1.0 # np.diff(x_mg5[1][:,:])
mg5_x = x_mg5[1][:, :, :-1]
mg5_y = x_mg5[0][:, :] * y_fac
mg5_y_err = x_mg5[3][:, :] * y_fac
mg5_y_err_x = 0.5 * (x_mg5[1][:, :, 1:] + x_mg5[1][:, :, :-1])
aug_x = x_aug[1][:, :, :-1]
aug_y = x_aug[0][:, :] * y_fac
flag_x = x_aug[1][:, :, :-1] + np.diff(x_aug[1][:, :]) / 2.0
r, pers = self._compare_mg5_and_augmented_data(x_aug, x_mg5, y_fac, threshold)
fig, axs = plt.subplots(
1, x_aug[0].shape[0], figsize=(figlen * x_aug[0].shape[0], figlen)
)
for i in range(x_aug[0].shape[0]):
colors = [
"tab:blue",
"tab:orange",
"tab:green",
"tab:red",
"tab:purple",
"tab:brown",
"tab:pink",
"tab:gray",
"tab:olive",
"tab:cyan",
]
height_step = np.max([mg5_y[i], aug_y[i]]) / 40.0
# counts = np.zeros(mg5_x[i,0].shape)
for j in range(x_aug[0].shape[1]):
# plot augmented and mg5 histograms
axs[i].plot(
mg5_x[i, j],
mg5_y[i, j],
colors[j],
label="{} mg5".format(benchmark_list[j]),
drawstyle="steps-post",
alpha=alphas[0],
)
axs[i].plot(
aug_x[i, j],
aug_y[i, j],
colors[j],
label="{} aug".format(benchmark_list[j]),
drawstyle="steps-post",
alpha=alphas[1],
)
# plot errorbars
axs[i].errorbar(
mg5_y_err_x[i, j],
mg5_y[i, j],
yerr=mg5_y_err[i, j],
fmt="none",
capsize=1.5,
elinewidth=1.0,
ecolor="black",
alpha=alphas[1],
)
# if needed, mark outlier bins with a character
if mark_outlier_bins:
index = r[i, j] >= threshold
axs[i].plot(
flag_x[i, j][index],
-height_step
* (float(j) + 1.0)
* np.ones(flag_x[i, j][index].shape),
linestyle="None",
marker="x",
color=colors[j],
)
for i, observable in enumerate(observables):
axs[i].set_xlabel(observable)
axs[i].set_yticklabels([])
handles = []
labels = []
for ax in axs:
handles += ax.get_legend_handles_labels()[0]
labels += ax.get_legend_handles_labels()[1]
by_label = collections.OrderedDict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys())
fig.tight_layout()
self.log.info("MG5 Samples: {}".format(x_mg5[2]))
self.log.info("Aug Samples: {}".format(x_aug[2]))
self._tabulate_comparison_information(
r, pers, observables, benchmarks, threshold
)
full_save_name = "{}/data/samples/{}/mg5_vs_augmented_data_{}s.png".format(
self.dir, sample_name, image_save_name if image_save_name is not None else 'temp'
)
if self._cmd_line_origin:
plt.savefig(full_save_name)
subprocess.Popen(['feh', full_save_name ])
elif image_save_name is not None:
plt.savefig(full_save_name)
else:
plt.show()
return [self.error_codes.Success]
def train_method(
self,
sample_name,
training_name,
training_method="alices",
node_architecture=(100, 100, 100),
n_epochs=30,
batch_size=128,
activation_function="relu",
trainer="adam",
initial_learning_rate=0.001,
final_learning_rate=0.0001,
verbose=True,
force=False
):
known_training_methods = ["alices", "alice"]
rets = [self._check_valid_augmented_data(sample_name=sample_name), self._check_valid_madminer_ml()]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Quitting train_method function.")
return failed
if training_method not in known_training_methods:
self.log.error("Unknown raining method {}".format(training_method))
self.log.info("Try again with one of:")
for method in known_training_methods:
self.log.info(" - {}".format(method))
self.log.warning("Quitting train_method function.")
return self.error_codes.UnknownTrainingModelError
existing_files = glob.glob(
"{}/models/{}/{}/*.mmconfig".format(
self.dir, sample_name, training_name
)
)
if len(existing_files) > 0:
self.log.warning("There are trained models with this name!")
for fname in existing_files:
self.log.warning(" - {}".format(fname))
if not force:
self.log.warning(
"Rerun function with a different name, or delete previously trained models."
)
return self.error_codes.ExistingModelError
self.log.warning('Force flag triggered; overwriting previous model.')
# load madminer H5 file??
# self.madminer_object.load()
forge = madminer.ml.ParameterizedRatioEstimator(
n_hidden=node_architecture,
activation=activation_function,
)
forge.train(
method=training_method,
theta="{}/data/samples/{}/theta0_augmented_sample_ratio.npy".format(
self.dir, sample_name
),
x="{}/data/samples/{}/x_augmented_sample_ratio.npy".format(
self.dir, sample_name
),
y="{}/data/samples/{}/y_augmented_sample_ratio.npy".format(
self.dir, sample_name
),
r_xz="{}/data/samples/{}/r_xz_augmented_sample_ratio.npy".format(
self.dir, sample_name
),
t_xz="{}/data/samples/{}/t_xz_augmented_sample_ratio.npy".format(
self.dir, sample_name
),
n_epochs=n_epochs,
batch_size=batch_size,
optimizer=trainer,
initial_lr=initial_learning_rate,
final_lr=final_learning_rate,
verbose="all" if verbose else "some"
)
# size = self._dir_size(
# pathname="{}/models/{}".format(self.dir, sample_name),
# matching_pattern=["{}".format(training_name), "{}_settings.json".format(training_method)]
# )
# if size > 0:
# training_name = "{}{}".format(training_name, size)
forge.save("{}/models/{}/{}/train".format(self.dir, sample_name, training_name))
self._write_config(
{
"training_method": training_method,
"training_name": training_name,
"node_architecture": node_architecture,
"n_epochs": n_epochs,
"batch_size": batch_size,
"activation_function": activation_function,
"trainer": trainer,
"initial_learning_rate": initial_learning_rate,
"final_learning_rate": final_learning_rate,
"sample_name": sample_name,
},
self._training_config(sample_name, training_name),
)
return self.error_codes.Success
def evaluate_method(
self,
training_name,
evaluation_name,
evaluation_samples,
theta_grid_spacing=40,
evaluation_benchmark=None,
sample_name="*",
force=False,
):
params = locals()
for parameter in params:
if parameter is not "self":
self.log.debug("{}: {}".format(parameter, params[parameter]))
# self.log.debug("training name: {}".format(training_name))
# self.log.debug("evaluation name: {}".format(evaluation_name))
# self.log.debug("evaluation samples: {}".format(evaluation_samples))
# self.log.debug("sample name: {}".format(sample_name))
rets = [
self._check_valid_trained_models(
training_name=training_name, sample_name=sample_name
),
self._check_valid_madminer_ml()
]
failed = [ret for ret in rets if ret != self.error_codes.Success]
if len(failed) > 0:
self.log.warning("Quitting train_method function.")
return failed
fname = glob.glob(
"{}/models/{}/{}/train_settings.json".format(
self.dir, sample_name, training_name
)
)[0]
model_params = self._load_config(
"{}/training_model.mmconfig".format(os.path.dirname(fname))
)
sample_params = self._load_config(
self._augmentation_config(model_params["sample_name"])
)
for path_to_check in [
"{}/evaluations/".format(self.dir),
"{}/evaluations/{}/".format(self.dir, model_params["training_name"]),
]:
if not os.path.exists(path_to_check):
os.mkdir(path_to_check)
evaluation_dir = "{}/evaluations/{}/{}/".format(
self.dir, model_params["training_name"], evaluation_name
).rstrip('/').rstrip('\\')
clean_dir = evaluation_dir.split(self.name)[-1].lstrip('/').lstrip('\\')
if os.path.exists(evaluation_dir):
if len([f for f in os.listdir(evaluation_dir) if "log_r_hat" in f]) > 0 and not force:
self.log.error(
"Identically sampled, trained, and named evaluation instance already exists!! Pick another."
)
self.log.error(" - {}".format(evaluation_dir))
return [self.error_codes.ExistingEvaluationError]
else:
os.mkdir(evaluation_dir)
self.log.info(
"evaluating trained method '{}'".format(model_params["training_name"])
)
self.log.debug("Model Params: ")
for spec in model_params:
self.log.debug(" - {}: {}".format(spec, model_params[spec]))
self.log.debug("")
self.log.debug("Aug. Sample Params: ")
for spec in sample_params:
self.log.debug(" - {}: {}".format(spec, sample_params[spec]))
forge = madminer.ml.ParameterizedRatioEstimator()
sample_augmenter = madminer.sampling.SampleAugmenter(
filename=self.dir
+ "/data/madminer_{}_with_data_parton.h5".format(self.name)
)
forge.load(
"{}/train".format(
os.path.dirname(
self._training_config(
model_params["sample_name"], model_params["training_name"]
)
)
)
)
theta_grid = np.mgrid[
[
slice(*tup, theta_grid_spacing * 1.0j)
for tup in [
self.params["parameters"][parameter]["parameter_range"]
for parameter in self.params["parameters"]
]
]
].T
theta_dim = theta_grid.shape[-1]
if evaluation_benchmark is None:
evaluation_benchmark = sample_params["augmentation_benchmark"]
evaluation_sample_config = self._check_for_matching_augmented_data(
evaluation_samples, evaluation_benchmark
)
if evaluation_sample_config is None:
self.augment_samples(
sample_name="{}_eval_augment".format(evaluation_name),
n_or_frac_augmented_samples=evaluation_samples,
augmentation_benchmark=evaluation_benchmark,
n_theta_samples=sample_params["theta_samples"],
evaluation_aug_dir=evaluation_dir,
)
evaluation_sample_config = "{}/augmented_sample.mmconfig".format(
evaluation_dir
)
# stack parameter grid into (N**M X M) size vector (tragic scale factor :--< )
for i in range(theta_dim):
theta_grid = np.vstack(theta_grid)
np.save("{}/theta_grid.npy".format(evaluation_dir), theta_grid)
log_r_hat_dict = {}
for benchmark in sample_augmenter.benchmarks:
ret = forge.evaluate(
theta="{}/theta_grid.npy".format(evaluation_dir),
x="{}/x_augmented_samples_{}.npy".format(
os.path.dirname(evaluation_sample_config), benchmark
),
)
log_r_hat_dict[benchmark] = ret[0]
for benchmark in log_r_hat_dict:
np.save(
"{}/log_r_hat_{}.npy".format(evaluation_dir, benchmark),
log_r_hat_dict[benchmark],
)
self.log.info(
"log_r_hat info saved for benchmark {}: 'log_r_hat_{}.npy'".format(
benchmark, benchmark
)
)
self._write_config(
{
"evaluation_name": evaluation_name,
"training_name": training_name,
"evaluation_samples": evaluation_samples,
"evaluation_benchmark": evaluation_benchmark,
"evaluation_datasets": {
key: "{}/log_r_hat_{}.npy".format(clean_dir, key)
for key in log_r_hat_dict
},
},
self._evaluation_config(training_name, evaluation_name),
)
return self.error_codes.Success
def plot_evaluation_results(
self,
evaluation_name,
training_name=None,
z_contours=[1.0],
fill_contours=True,
bb_b=1.16,
bb_m=0.05,
):
#TODO: implement plotting for more than one parameter!!!!!
#TODO: Maybe n-dimensional, or theta selection??
assert(len(list(self.params['parameters'].keys())) < 2)
self.log.info(
"Plotting evaluation results for evaluation instance '{}'".format(
evaluation_name
)
)
evaluations = self.list_evaluations()
evaluation_tuples = [
evaluation
for evaluation in evaluations
if evaluation[1]["evaluation_name"] == evaluation_name
]
if training_name is not None:
evaluation_tuples = list(
filter(
lambda elt: elt[1]["training_name"] == training_name,
evaluation_tuples,
)
)
if len(evaluation_tuples) == 0:
self.log.error("Evaluation name '{}' not found")
self.log.error("Please choose oneof the following evaluations:")
for evaluation in self.list_evaluations():
self.log.error(" - {}".format(evaluation[1]["evaluation_name"]))
return self.error_codes.NoEvaluatedModelError
elif len(evaluation_tuples) > 1:
self.log.error("Mutiple matching evaluations found. Please specify")
for evaluation_tuple in evaluation_tuples:
self.log.error(" - {}".format(evaluation_tuple[1]["evaluation_name"]))
self.log.error(" AT PATH: {}".format(evaluation_tuple[0]))
self.log.error(
" WITH TRAINING PARENT {}".format(
evaluation_tuple[1]["training_name"]
)
)
return self.error_codes.MultipleMatchingFilesError
# else tuple is CLEAN, with len 1
evaluation_tuple = evaluation_tuples[0]
evaluation_dir = os.path.dirname(evaluation_tuple[0])
self.log.debug(evaluation_dir)
self.log.debug('')
theta_grid = np.load("{}/theta_grid.npy".format(evaluation_dir))
log_r_hat_dict = {
key: np.load('{}/{}'.format(self.dir, evaluation_tuple[1]["evaluation_datasets"][key].replace('//', '/').lstrip(self.name)))
for key in evaluation_tuple[1]["evaluation_datasets"]
}
if len(z_contours) > 0:
alphas = self._scale_to_range_flipped([0.0] + z_contours, [0.05, 0.5])[
1:
]
else:
alphas = []
for p_num, parameter in enumerate(self.params["parameters"]):
for i, benchmark in enumerate(log_r_hat_dict):
mu = np.mean(log_r_hat_dict[benchmark], axis=1)
sigma = np.std(log_r_hat_dict[benchmark], axis=1)
plt.plot(
theta_grid[:, p_num],
mu,
self._DEFAULT_COLOR_CYCLE[i],
label=r"%s, $\mu$" % benchmark,
)
for j, z in enumerate(z_contours):
plt.plot(
theta_grid[:, p_num],
mu + sigma * z,
self._DEFAULT_COLOR_CYCLE[i],
linestyle=self._DEFAULT_LINESTYLE_CYCLE[j],
label=r"%s, $%s\sigma $" % (benchmark, z),
)
plt.plot(
theta_grid[:, p_num],
mu - sigma * z,
self._DEFAULT_COLOR_CYCLE[i],
linestyle=self._DEFAULT_LINESTYLE_CYCLE[j],
)
if fill_contours:
plt.fill_between(
theta_grid[:, p_num],
y1=(mu + sigma * z),
y2=(mu - sigma * z),
facecolor=self._DEFAULT_COLOR_CYCLE[i],
alpha=alphas[j],
)
plt.legend(
bbox_to_anchor=(0.5, bb_b + bb_m * (len(z_contours))),
ncol=len(log_r_hat_dict),
fancybox=True,
loc="upper center",
)
plt.xlabel(r"$\theta_%s$: %s" % (p_num + 1, parameter))
plt.ylabel(
r"$\mathbb{E}_x [ -2\, \log \,\hat{r}(x | \theta, \theta_{SM}) ]$"
)
plt.tight_layout()
plt.savefig(
"{}/evaluation_result_param_{}.png".format(evaluation_dir, parameter),
bbox_inches="tight",
)
plt.show()
return log_r_hat_dict, theta_grid
| 2.40625 | 2 |
models/nvidia.py | infiniai/SDCnd-Behavioural-Cloning | 0 | 12787000 | from keras.models import Sequential
from keras.models import Model
from keras.layers import Cropping2D, Conv2D, MaxPool2D, Flatten, Dense, Dropout, ELU, BatchNormalization, Lambda
from keras.layers import concatenate
import numpy as np
import tensorflow as tf
def to_yuv(img, in_cspace='RGB'):
img_float = tf.cast(img, dtype=tf.float32) / 255.
if (in_cspace == 'RGB'):
img_rgb = tf.image.rgb_to_yuv(img_float)
elif (in_cspace == 'BGR'):
img_rgb = tf.image.bgr_to_yuv(img_float)
else:
raise ValueError(f"Unknown value of {in_cspace} for parameter 'in_space.'")
return img_rgb
def nvidia_model(img, crops=((0, 0), (0, 0)) ):
"""
A CNN model based on the NVIDIA paper implemented with Keras
Functional API.
:rtype: keras.models.Model
"""
x = Lambda(to_yuv, name='to_yuv')(img)
x = Lambda(lambda x : x * 2 - 1, name='normalization')(x)
# Add crop layer if crops are specified
if (np.asarray(crops).flatten() > 0).any():
# Crop the input image to the ROI
x = Cropping2D(cropping=crops)(x)
# Convoutional Layers
# Conv 1: 24@30x62 [kernel = 5x5; strides = 2x2]
x = Conv2D(filters=24, kernel_size=5, name='L1_conv')(x)
x = ELU()(x)
x = MaxPool2D(strides=(2,2), name='L1_pool')(x)
x = BatchNormalization()(x)
# Conv 2: 36@13x29 [kernel = 5x5; strides = 2x2]
x = Conv2D(filters=36, kernel_size=5, name='L2_conv')(x)
x = ELU()(x)
x = MaxPool2D(strides=(2,2), name='L2_pool')(x)
x = BatchNormalization()(x)
# Conv 3: 48@5x13 [kernel = 5x5; strides = 2x2]
x = Conv2D(filters=48, kernel_size=5, name='L3_conv')(x)
x = ELU()(x)
x = MaxPool2D(strides=(2,2), name='L3_pool')(x)
x = BatchNormalization()(x)
# Conv 4: 64@3x11 [kernel = 3x3; strides = 1x1]
x = Conv2D(filters=64, kernel_size=3, name='L4_conv')(x)
x = ELU()(x)
x = BatchNormalization()(x)
# Conv 5: 64@1x9 [kernel = 3x3; strides = 1x1]
x = Conv2D(filters=64, kernel_size=3, name='L5_conv')(x)
x = ELU()(x)
x = BatchNormalization()(x)
# 2D -> 1D Flatten to feed into FC layers
flattened = Flatten()(x)
xst = Dense(128, name='FC1_steer')(flattened)
xst = ELU()(xst)
xst = Dropout(rate=0.5)(xst)
xst = Dense(64, name='FC2_steer')(xst)
xst = ELU()(xst)
xst = Dropout(rate=0.5)(xst)
xst = Dense(16, name='FC3_steer')(xst)
xst = ELU()(xst)
xst = Dropout(rate=0.5)(xst)
# Ouyput layer
out_steer = Dense(1, name='OUT_steer')(xst)
model = Model(inputs=img, outputs=out_steer)
return model
| 2.90625 | 3 |
osrefl/loaders/reduction/examples/e3a12/__init__.py | reflectometry/osrefl | 2 | 12787001 | """
Sample data from NG-1:
e3a12 rem (71g) cofe2o4,cofe(10nm),ta
"""
import numpy, os
from osrefl.loaders.reduction.data import Data,PolarizedData
PATH = os.path.dirname(os.path.realpath(__file__))
def loadABCD(file):
data = PolarizedData(xlabel="Qz",xunits="invA",vlabel="counts")
for ext,d in [('A',data.pp), ('B',data.pm), ('C',data.mp), ('D',data.mm)]:
A = numpy.loadtxt(os.path.join(PATH,file+ext))
d.x,d.v,d.dv = A[:,0],A[:,1],A[:,2]
return data
def slits():
data = loadABCD('e3a12042.slit')
data.set(xlabel="slit1",xunits="mm",vlabel="counts")
for ext,d in [('A',data.pp), ('B',data.pm), ('C',data.mp), ('D',data.mm)]:
A = numpy.loadtxt(os.path.join(PATH,'e3a12042.slit'+ext))
d.x,d.v,d.dv = A[:,0],A[:,1],A[:,2]
return data
def spec():
data = PolarizedData(xlabel="Qz",xunits="invA",vlabel="counts")
for ext,d in [('A',data.pp), ('B',data.pm), ('C',data.mp), ('D',data.mm)]:
A = numpy.loadtxt(os.path.join(PATH,'e3a12026.spec'+ext))
d.x,d.v,d.dv = A[:,0],A[:,1],A[:,2]
return data
if __name__ == "__main__":
print slits()
| 2.390625 | 2 |
hw-code/HW4_7-1a.py | elsdrium/1022-Algorithm | 0 | 12787002 | <filename>hw-code/HW4_7-1a.py<gh_stars>0
def HoarePartition(A, p, r):
x = A[p]
i = p - 1
j = r + 1
counter=0
while True:
counter += 1
print( 'Iteration ' + str(counter) + ':' )
while True:
j -= 1
if A[j] <= x:
break
while True:
i += 1
if A[i] >= x:
break
print( 'i = ' + str(i) )
print( 'j = ' + str(j) )
if i < j:
A[i], A[j] = A[j], A[i]
print ('A = ' + str(A))
else:
print ('A = ' + str(A))
return j
A = [13, 19, 9, 5, 12, 8, 7, 4, 11, 2, 6, 21]
print 'Originally... \nA =', A
j = HoarePartition(A, 0, len(A)-1)
print 'After Partition operation... \nA =', A
print 'j =', j
| 2.875 | 3 |
utils.py | Danglich/flowers102_retrieval_streamlit | 3 | 12787003 | import os
import pickle
import cv2
import numpy as np
import streamlit as st
import tensorflow as tf
import grpc
from tensorflow_serving.apis import (
prediction_service_pb2_grpc,
predict_pb2
)
from consts import (
TRAIN_FD,
TRAIN_PKL_FP,
TRAIN_LABEL_FP
)
@st.cache
def load_prec_embs():
with open(TRAIN_PKL_FP, "rb") as f:
train_embs = pickle.load(f)
with open(TRAIN_LABEL_FP, "rb") as f:
train_labels = pickle.load(f)
train_img_fps = wfile(TRAIN_FD)
assert len(train_img_fps) == train_embs.shape[0]
return train_img_fps, train_embs, train_labels
def wfile(root):
img_fps = []
for path, subdirs, files in os.walk(root):
for name in files:
img_fps.append(os.path.join(path, name))
return sorted(img_fps)
class FlowerArc:
def __init__(self,
host="localhost",
port=8500,
model_name="flower",
model_signature="flower_signature",
input_name="input_image",
output_name="emb_pred"):
self.host = host
self.port = port
self.channel = grpc.insecure_channel("{}:{}".format(
self.host, self.port
))
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(
self.channel
)
self.input_name = input_name
self.output_name = output_name
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = model_name
self.request.model_spec.signature_name = model_signature
def norm_mean_std(self,
img):
img = img / 255
img = img.astype('float32')
mean = np.mean(img, axis=(0, 1, 2))
std = np.std(img, axis=(0, 1, 2))
img = (img - mean) / std
return img
def test_preprocess(self,
img,
img_size=(384, 384),
expand=True):
img = cv2.resize(img, img_size)
# normalize image
img = self.norm_mean_std(img)
if expand:
img = np.expand_dims(img, axis=0)
return img
def predict(self, img):
assert img.ndim == 3
img = self.test_preprocess(img)
self.request.inputs[self.input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
img,
dtype=tf.float32,
shape=img.shape
)
)
result = self.stub.Predict(self.request, 10.0)
emb_pred = tf.contrib.util.make_ndarray(
result.outputs[self.output_name]
)
return emb_pred
class Saliency:
def __init__(self,
host="localhost",
port=8500,
model_name="saliency",
model_signature="serving_default",
input_name="input_image",
output_name="pred_mask"):
self.host = host
self.port = port
self.channel = grpc.insecure_channel("{}:{}".format(
self.host, self.port
))
self.stub = prediction_service_pb2_grpc.PredictionServiceStub(
self.channel
)
self.input_name = input_name
self.output_name = output_name
self.request = predict_pb2.PredictRequest()
self.request.model_spec.name = model_name
self.request.model_spec.signature_name = model_signature
def test_preprocess(self,
img,
img_size=(320, 240),
expand=True):
img = cv2.resize(img, img_size)
if expand:
img = np.expand_dims(img, axis=0)
return img
def predict(self, img):
assert img.ndim == 3
img = self.test_preprocess(img)
self.request.inputs[self.input_name].CopyFrom(
tf.contrib.util.make_tensor_proto(
img,
dtype=tf.float32,
shape=img.shape
)
)
result = self.stub.Predict(self.request, 10.0)
pred_mask = tf.contrib.util.make_ndarray(
result.outputs[self.output_name]
)
return pred_mask
| 2.546875 | 3 |
everyaction/objects.py | partiallyderived/everyaction-client | 0 | 12787004 | """
This module contains EveryAction objects, such as :class:`.Person` or :class:`.CanvassResponse`, which represent
structured EveryAction data directly corresponding to objects in the
`EveryAction 8 VAN API docs <https://developers.everyaction.com/van-api>`__.
"""
from datetime import datetime
from typing import Any, ClassVar, Dict, Iterable, List, Optional, Union
from everyaction.core import EAObject, EAObjectWithID, EAObjectWithIDAndName, EAObjectWithName, EAProperty, EAValue
from everyaction.exception import EAException
__all__ = [
'ActivistCode',
'ActivistCodeData',
'ActivistCodeResponse',
'Address',
'AddRegistrantsResponse',
'Adjustment',
'AdjustmentResponse',
'Attribution',
'AvailableValue',
'APIKeyProfile',
'AVEVDataFileAction',
'BallotRequestType',
'BallotReturnStatus',
'BallotType',
'BankAccount',
'BargainingUnit',
'BargainingUnitJobClass',
'BatchForm',
'BatchProgram',
'BulkImportAction',
'BulkImportField',
'BulkImportJob',
'BulkImportJobData',
'Canvasser',
'CanvassContext',
'CanvassFileRequest',
'CanvassResponse',
'ChangedEntityBulkImportField',
'ChangedEntityExportJob',
'ChangedEntityExportRequest',
'ChangedEntityField',
'ChangeType',
'Code',
'CodeResult',
'Column',
'Commitment',
'ConfirmationEmailData',
'Constraints',
'ContactHistory',
'ContactType',
'Contribution',
'Currency',
'CustomField',
'CustomFieldValue',
'Department',
'Designation',
'DisclosureFieldValue',
'Disbursement',
'DistrictField',
'DistrictFieldValue',
'Email',
'EmailMessage',
'EmailMessageContent',
'EmailMessageContentDistributions',
'Employer',
'EmployerBargainingUnit',
'Error',
'Event',
'EventRole',
'EventShift',
'EventType',
'ExportJob',
'ExportJobType',
'ExtendedSourceCode',
'FieldValueMapping',
'File',
'FileLoadingJob',
'FinancialBatch',
'Folder',
'GeoCoordinate',
'Identifier',
'InputType',
'IsCellStatus',
'JobActionType',
'JobClass',
'JobFile',
'JobNotification',
'KeyValuePair',
'Listener',
'ListLoadCallbackData',
'Location',
'MappingParent',
'MappingType',
'MappingTypeData',
'Membership',
'MembershipSourceCode',
'MemberStatus',
'MiniVANExport',
'Note',
'NoteCategory',
'OnlineActionsForm',
'Organization',
'OrganizationPhone',
'Person',
'Phone',
'Pledge',
'PreferredPronoun',
'PrintedList',
'ProgramType',
'Registrant',
'RegistrationForm',
'RelationalMapping',
'Relationship',
'ReportedEthnicity',
'ReportedGender',
'ReportedLanguagePreference',
'ReportedRace',
'ReportedSexualOrientation',
'ResultCode',
'SavedList',
'SavedListData',
'SavedListLoadAction',
'ScheduleType',
'Score',
'ScoreApprovalCriteria',
'ScoreLoadAction',
'ScoreUpdate',
'ScriptResponse',
'ShiftType',
'Signup',
'Status',
'Story',
'StoryStatus',
'Subgroup',
'SupportedEntity',
'SupporterGroup',
'SupportField',
'Suppression',
'SurveyQuestion',
'SurveyCanvassResponse',
'SurveyResponse',
'Target',
'TargetExportJob',
'UpdateStatistics',
'User',
'ValueMapping',
'ValueMappingData',
'VolunteerActivityResponse',
'VoterRegistrationBatch',
'WorkArea',
'Worksite'
]
# Class definitions and additions to shared properties are organized by their "orders".
# A property has order n > 1 when its factory depends on at least one class of order n - 1 and it depends on classes of
# no higher order than n - 1. A property has order 1 when its factory does not depend on an EAObject child definition.
# Similarly, a Class has order n > 1 when it has properties of order n or is a subclass of a class of order n - 1 and
# has no higher order properties/base classes. A Class has order 1 when it has only properties of order 1 or no
# properties at all and does not inherit except from EAObject, EAObjectWithID, or EAObjectWithIDAndName.
# The organization style is the following, with each component in alphabetical order: 1st order properties in, 1st order
# classes which may depend on 1st order properties, 2nd order properties whose factories depend on a 1st order class,
# 2nd order classes which may depend on 1st or 2nd order properties or a 1st order class, and so on. This organizational
# structure allows for a consistent way to specify entities after their dependencies and in alphabetical order
# independent from their dependencies are named.
# Expand is handled specially
def _expand_factory(arg: Union[str, Iterable[str]]) -> str:
if not isinstance(arg, str):
# comma-delimited str or Iterable[str] allowed for expand.
# Note: str is Iterable, be careful when modifying this code.
if isinstance(arg, Iterable):
return ','.join(arg)
else:
raise TypeError(
f'Expected str or Iterable for expand, found {type(arg).__name__}: {arg}'
)
return arg
# --- Circular Reference Factories ---
# The following functions are factories for objects which have circular references.
# For example, Organizations have a field which is another Organization, and Departments have employers and vice-versa.
def _employer_factory(*args: Any, **kwargs: Any) -> 'Employer':
return Employer(*args, **kwargs)
def _organization_factory(*args: Any, **kwargs: Any) -> 'Organization':
return Organization(*args, **kwargs)
# --- First Order Properties and Objects ---
EAProperty.share(
acceptedOneTimeAmount=EAProperty('accepted_one_time'),
acceptedRecurringAmount=EAProperty('accepted_recurring', 'recurring'),
action=EAProperty(),
actionType=EAProperty('type'),
added=EAProperty(),
additionalEnvelopeName=EAProperty('additional_envelope'),
additionalSalutation=EAProperty(),
adjustmentType=EAProperty('type'),
allowMultipleMode=EAProperty('multiple_mode', 'mode'),
alternateId=EAProperty('alternate', 'alt'),
amount=EAProperty(),
amountAttributed=EAProperty('amount'),
apiKeyTypeName=EAProperty('type_name', 'type'),
areSubgroupsSticky=EAProperty('sticky_subgroups', 'sticky_groups'),
assignableTypes=EAProperty(singular_alias='assignable_type'),
assignedValue=EAProperty('value'),
attributionType=EAProperty('type'),
average=EAProperty(),
averageValue=EAProperty('average'),
badValues=EAProperty('bad'),
bankAccount=EAProperty('account'),
bankAccountId=EAProperty('bank_account', 'account'),
batchCode=EAProperty('batch'),
biographyImageUrl=EAProperty('biography_image', 'bio_image_url', 'bio_image'),
bounceCount=EAProperty('bounces'),
campaignId=EAProperty('campaign'),
canBeMappedToColumn=EAProperty('column_mappable', 'mappable'),
canBeRepeatable=EAProperty('allows_repeats'),
canHaveGoals=EAProperty('allows_goals'),
canHaveMultipleLocations=EAProperty('allows_multiple_locations'),
canHaveMultipleShifts=EAProperty('allows_multiple_shifts'),
canHaveRoleMaximums=EAProperty('allows_role_maximums'),
canHaveRoleMinimums=EAProperty('allows_role_minimums'),
canvassedBy=EAProperty('canvasser'),
canvassFileRequestId=EAProperty('canvass_id'),
canvassFileRequestGuid=EAProperty('canvass_guid'),
caseworkCases=EAProperty('cases', singular_alias='case'),
caseworkIssues=EAProperty('issues', singular_alias='issue'),
caseworkStories=EAProperty('stories', singular_alias='story'),
ccExpirationMonth=EAProperty('cc_exp_month'),
ccExpirationYear=EAProperty('cc_exp_year'),
changeTypeName=EAProperty('change_type', 'change'),
channelTypeName=EAProperty('channel_type', 'channel'),
checkDate=EAProperty(),
checkNumber=EAProperty(),
city=EAProperty(),
code=EAProperty(),
codeId=EAProperty('code'),
codeIds=EAProperty('codes'),
collectedLocationId=EAProperty('collected_location', 'location'),
color=EAProperty(),
columnDelimiter=EAProperty('delimiter'),
columnName=EAProperty('column'),
committeeName=EAProperty('committee'),
confidenceLevel=EAProperty('confidence'),
contact=EAProperty(),
contactMethodPreferenceCode=EAProperty('contact_preference_code', 'preference_code', 'contact_preference'),
contactMode=EAProperty(),
contactModeId=EAProperty('contact_mode'),
contactTypeId=EAProperty('contact_type'),
contributionCount=EAProperty('contributions'),
contributionId=EAProperty('contribution'),
contributionSummary=EAProperty(),
contributionTotal=EAProperty(),
copyToEmails=EAProperty('copy_to', is_array=True),
countryCode=EAProperty('country'),
coverCostsAmount=EAProperty('cover_costs'),
createdAfter=EAProperty('after'),
createdBefore=EAProperty('before'),
createdBy=EAProperty('creator'),
createdByCommitteeId=EAProperty('committee'),
createdByEmail=EAProperty('created_by', 'creator_email', 'creator'),
createdDate=EAProperty('created'),
creditCardLast4=EAProperty('cc_last4', 'last4'),
currency=EAProperty(),
currencyType=EAProperty('type'),
custom=EAProperty(),
customFieldGroupId=EAProperty('group'),
customFieldId=EAProperty('field'),
customFieldsGroupType=EAProperty('group_type', 'type'),
customPropertyKey=EAProperty('property_key', 'custom_key', 'key'),
cycle=EAProperty(),
databaseMode=EAProperty('mode'),
databaseName=EAProperty(),
dateAdjusted=EAProperty('adjusted', 'date'),
dateCanvassed=EAProperty('canvassed'),
dateCardsSent=EAProperty('cards_sent'),
dateChangedFrom=EAProperty('changed_from'),
dateChangedTo=EAProperty('changed_to'),
dateClosed=EAProperty('closed'),
dateCreated=EAProperty('created'),
dateDeposited=EAProperty('deposited'),
dateExpired=EAProperty('expired'),
dateExpireMembership=EAProperty('expiration_date', 'expiration', 'expires'),
dateIssued=EAProperty('issued'),
dateLastRenewed=EAProperty('last_renewed', 'renewed'),
dateModified=EAProperty('modified'),
dateOfBirth=EAProperty('birthday'),
dateOpened=EAProperty('opened'),
datePosted=EAProperty('posted'),
dateProcessed=EAProperty('processed'),
dateReceived=EAProperty('received'),
dateScheduled=EAProperty('scheduled'),
dateSent=EAProperty('sent'),
dateStartMembership=EAProperty('start_date', 'started'),
dateThanked=EAProperty('thanked'),
decreasedBy=EAProperty('decrease'),
defaultEndTime=EAProperty('default_end'),
defaultStartTime=EAProperty('default_start'),
depositDate=EAProperty(),
depositNumber=EAProperty(),
detailedCode=EAProperty(),
description=EAProperty('desc'),
designationId=EAProperty('designation'),
dialingPrefix=EAProperty('prefix'),
directMarketingCode=EAProperty('marketing_code'),
disclosureFieldValue=EAProperty('field_value', 'disclosure_value', 'value'),
displayMode=EAProperty(),
displayName=EAProperty('display'),
doorCount=EAProperty('door'),
dotNetTimeZoneId=EAProperty('dot_net_time_zone', 'time_zone'),
downloadUrl=EAProperty('download'),
duesAttributionTypeName=EAProperty('dues_attribution_type', 'dues_attribution'),
duesEntityTypeName=EAProperty('dues_entity_type', 'dues_entity'),
duplicateRows=EAProperty('duplicates'),
electionRecords=EAProperty(singular_alias='election_record'),
electionType=EAProperty(),
email=EAProperty(),
employer=EAProperty(factory=_employer_factory),
employerBargainingUnitId=EAProperty('employer_bargaining_unit'),
employerId=EAProperty('employer'),
endDate=EAProperty('end'),
endTime=EAProperty('end'),
endTimeOverride=EAProperty('end_override', 'end'),
enrollmentTypeName=EAProperty('enrollment_type', 'enrollment'),
envelopeName=EAProperty('envelope'),
errorCode=EAProperty('error'),
eventId=EAProperty('event'),
eventTypeId=EAProperty('event_type', 'type'),
eventTypeIds=EAProperty('event_types'),
excludeChangesFromSelf=EAProperty('exclude_self'),
expand=EAProperty(factory=_expand_factory),
expectedContributionCount=EAProperty('expected_count'),
expectedContributionTotalAmount=EAProperty('expected_total', 'expected_amount'),
exportedRecordCount=EAProperty('exported_records', 'record_count', 'records', 'count'),
ext=EAProperty(),
externalId=EAProperty('external'),
fieldName=EAProperty('field'),
fieldType=EAProperty('field', 'type'),
fileSizeKbLimit=EAProperty('size_kb_limit', 'kb_limit'),
financialBatchId=EAProperty('financial_batch'),
finderNumber=EAProperty('finder'),
firstName=EAProperty('first'),
folderId=EAProperty('folder'),
folderName=EAProperty('folder'),
formalEnvelopeName=EAProperty('formal_envelope'),
formalSalutation=EAProperty(),
formSubmissionCount=EAProperty('form_submissions', 'forms', 'submissions'),
frequency=EAProperty(),
fromEmail=EAProperty(),
fromName=EAProperty('sender'),
fromSubject=EAProperty('subject'),
fullName=EAProperty(),
generatedAfter=EAProperty('after'),
generatedBefore=EAProperty('before'),
goal=EAProperty(),
groupId=EAProperty(),
groupName=EAProperty(),
groupType=EAProperty(),
guid=EAProperty(),
hasHeader=EAProperty(),
hasMyCampaign=EAProperty('my_campaign'),
hasMyVoters=EAProperty('my_voters'),
hasPredefinedValues=EAProperty('has_predefined'),
hasQuotes=EAProperty(),
hint=EAProperty(),
increasedBy=EAProperty('increase'),
includeAllAutoGenerated=EAProperty('include_auto_generated', 'include_generated'),
includeAllStatuses=EAProperty('include_statuses', 'include_closed'),
includeInactive=EAProperty(),
includeUnassigned=EAProperty(),
inputTypeId=EAProperty('input_type'),
interventionCallbackUrl=EAProperty('intervention_url', 'callback_url'),
invalidCharacters=EAProperty('invalid_chars'),
invalidRowsFileUrl=EAProperty('invalid_rows_url', 'invalid_url'),
inRepetitionWithEventId=EAProperty('repeat_of'),
isActive=EAProperty('active'),
isApplicable=EAProperty('applicable'),
isAssociatedWithBadges=EAProperty('associated_with_badges'),
isAtLeastOneLocationRequired=EAProperty('needs_location', 'location_required', 'requires_location'),
isAutoGenerated=EAProperty('auto_generated', 'generated'),
isConfirmationEmailEnabled=EAProperty('confirmation_email_enabled', 'confirmation_enabled', 'confirmation'),
isConfirmedOptInEnabled=EAProperty('confirmed_opt_in_enabled', 'opt_in_enabled', 'opt_in'),
isCoreField=EAProperty('is_core', 'core_field', 'core'),
isCustomDistrict=EAProperty('custom_district', 'is_custom', 'custom'),
isEditable=EAProperty('editable'),
isEventLead=EAProperty('event_lead', 'lead'),
isExportable=EAProperty('exportable'),
isMember=EAProperty('member'),
isMultiAssign=EAProperty('multi_assign'),
isMyOrganization=EAProperty('my_organization', 'my_org'),
isOfflineSignup=EAProperty('offline_property', 'offline'),
isOnlineActionsAvailable=EAProperty('online_actions_available', 'actions_available'),
isOnlyEditableByCreatingUser=EAProperty(
'only_editable_by_creating_user',
'only_editable_by_creator',
'only_creator_may_edit'
),
isOpen=EAProperty('open'),
isPreferred=EAProperty('preferred'),
isPubliclyViewable=EAProperty('publicly_viewable', 'public'),
isRecurringEmailEnabled=EAProperty('recurring_email_enabled', 'recurring_enabled', 'recurring'),
isRequired=EAProperty('required'),
isSearchable=EAProperty('searchable'),
isSharedWithChildCommitteesByDefault=EAProperty('default_share_child'),
isSharedWithMasterCommitteeByDefault=EAProperty('default_share_master'),
isSubscribed=EAProperty('subscribed'),
isUpsellAccepted=EAProperty('upsell_accepted'),
isUpsellShown=EAProperty('upsell_shown'),
isViewRestricted=EAProperty('view_restricted'),
jobStatus=EAProperty('status'),
key=EAProperty(),
keyReference=EAProperty('reference'),
lastName=EAProperty('last'),
lat=EAProperty(),
levelId=EAProperty(),
levelName=EAProperty(),
line1=EAProperty(),
line2=EAProperty(),
line3=EAProperty(),
linkedCreditCardPaymentDisbursementId=EAProperty('credit_card_payment'),
linkedJointFundraisingContributionId=EAProperty(
'joint_fundraising_contribution', 'fundraising_contribution', 'fundraising'
),
linkedPartnershipContributionId=EAProperty('partnership_contribution', 'partnership'),
linkedReimbursementDisbursementId=EAProperty('reimbursement'),
linksClickedCount=EAProperty('links_clicked'),
listCount=EAProperty('list'),
listDescription=EAProperty('description', 'desc'),
listName=EAProperty('list', 'name'),
loadStatus=EAProperty('status'),
lon=EAProperty(),
mappingTypeName=EAProperty('mapping_type', 'mapping'),
matchedRows=EAProperty('matched'),
matchedRowsCount=EAProperty('matched_count', 'matched'),
matchPercent=EAProperty('match', 'percent'),
max=EAProperty(),
maxDoorCount=EAProperty('max_door'),
maxFieldLength=EAProperty('max_length', 'max_len'),
maxLength=EAProperty(),
maxPeopleCount=EAProperty('max_people'),
maxTextboxCharacters=EAProperty('max_box_chars'),
maxValue=EAProperty('max'),
medianValue=EAProperty('median'),
mediumName=EAProperty('medium'),
message=EAProperty(),
middleName=EAProperty('middle'),
min=EAProperty(),
minValue=EAProperty('min'),
modifiedBy=EAProperty('modifier'),
modifiedByEmail=EAProperty('modified_by', 'modifier_email', 'modifier'),
nextTransactionDate=EAProperty('next_transaction', 'next'),
nickname=EAProperty(),
notes=EAProperty(),
nulledOut=EAProperty('nulled'),
number=EAProperty(),
numberOfCards=EAProperty('num_cards', 'cards'),
numberTimesRenewed=EAProperty('times_renewed', 'renewals'),
occupation=EAProperty(),
onlineReferenceNumber=EAProperty('reference_number', 'ref_number'),
onlyMyBatches=EAProperty('only_mine'),
openCount=EAProperty('opens'),
optInStatus=EAProperty('opt_in'),
orderby=EAProperty('order_by'),
organizationContactName=EAProperty('organization_contact', 'org_contact'),
organizationContactOfficialName=EAProperty('organization_contact_official', 'org_contact_official'),
organizationId=EAProperty('organization', 'org'),
organizationRoles=EAProperty('org_roles', singular_alias='org_role'),
organizeAt=EAProperty(),
originalAmount=EAProperty('original'),
originalRowCount=EAProperty('original_count', 'original'),
outOfRange=EAProperty('OOR'),
overwriteExistingListId=EAProperty('overwrite_existing_id', 'overwrite_id', 'overwrite'),
parentCodeId=EAProperty('parent_code'),
parentDepartmentId=EAProperty('parent_department', 'parent'),
parentFieldId=EAProperty('parent_field', 'parent'),
parentFieldName=EAProperty('parent_field', 'parent'),
parentId=EAProperty('parent'),
parentOrganization=EAProperty('parent', factory=_organization_factory),
parentValueId=EAProperty('parent_value'),
party=EAProperty(),
paymentType=EAProperty(),
personIdColumn=EAProperty('id_column', 'id_col'),
personIdType=EAProperty('person_type'),
personType=EAProperty(),
phone=EAProperty(),
phoneId=EAProperty('phone'),
phoneNumber=EAProperty('number'),
points=EAProperty(),
preview=EAProperty(),
primaryContact=EAProperty(),
primaryCustomField=EAProperty('primary_custom'),
processedAmount=EAProperty(),
processedCurrency=EAProperty(),
professionalSuffix=EAProperty(),
properties=EAProperty(singular_alias='property'),
question=EAProperty(),
questionId=EAProperty('question'),
recipientCount=EAProperty('recipients'),
recordCount=EAProperty('records'),
recurrenceType=EAProperty('recurrence'),
referenceCode=EAProperty('reference'),
relationshipId=EAProperty('relationship'),
remainingAmount=EAProperty('remaining'),
replyToEmail=EAProperty('reply_to'),
requestedCustomFieldIds=EAProperty('custom_field_ids', 'custom_fields', singular_alias='custom_field'),
requestedFields=EAProperty('fields', singular_alias='field'),
requestedIds=EAProperty('ids', singular_alias='requested_id'),
resourceType=EAProperty('resource'),
resourceTypes=EAProperty('resources', singular_alias='resource'),
resourceUrl=EAProperty('url'),
responseId=EAProperty('response'),
result=EAProperty(),
resultCodeId=EAProperty('result_code'),
resultFileColumnName=EAProperty('result_column_name', 'result_column', 'column_name', 'column'),
resultFileSizeKbLimit=EAProperty('size_kb_limit', 'kb_limit'),
resultFileSizeLimitKb=EAProperty('size_kb_limit', 'kb_limit'),
resultOutcomeGroup=EAProperty('outcome_group'),
salutation=EAProperty(),
savedListId=EAProperty('saved_list', 'list'),
scoreColumn=EAProperty('score_col'),
scoreId=EAProperty('score'),
scriptQuestion=EAProperty('question'),
searchKeyword=EAProperty('search', 'keyword'),
selectedOneTimeAmount=EAProperty('selected_one_time'),
selfReportedEthnicities=EAProperty('ethnicities', is_array=True),
selfReportedEthnicity=EAProperty('ethnicity'),
selfReportedGenders=EAProperty('genders', singular_alias='gender'),
selfReportedLanguagePreference=EAProperty('language_preference', 'language'),
selfReportedRace=EAProperty('race'),
selfReportedRaces=EAProperty('races', is_array=True),
selfReportedSexualOrientations=EAProperty('sexual_orientations', singular_alias='sexual_orientation'),
senderDisplayName=EAProperty('sender_display', 'sender_name'),
senderEmailAddress=EAProperty('sender_email'),
sex=EAProperty(),
shortName=EAProperty('short'),
smsOptInStatus=EAProperty('sms_opt_in'),
sourceUrl=EAProperty('source', 'url'),
sourceValue=EAProperty('source'),
startingAfter=EAProperty('after'),
startingBefore=EAProperty('before'),
startDate=EAProperty('start'),
startTime=EAProperty('start'),
startTimeOverride=EAProperty('start_override', 'start'),
stateCode=EAProperty('state'),
stateOrProvince=EAProperty('state', 'province'),
staticValue=EAProperty('static'),
status=EAProperty(),
statuses=EAProperty(),
statusName=EAProperty('status'),
subscriptionStatus=EAProperty('status'),
supporterGroupId=EAProperty('supporter_group', 'group'),
suffix=EAProperty(),
surveyQuestionId=EAProperty('question'),
surveyResponseId=EAProperty('response'),
syncPeriodEnd=EAProperty('sync_end', 'end'),
syncPeriodStart=EAProperty('sync_start', 'start'),
targetId=EAProperty('target'),
targetValue=EAProperty('target'),
text=EAProperty(),
title=EAProperty(),
tolerance=EAProperty('tolerance'),
totalDuesPaid=EAProperty('total_paid'),
totalRows=EAProperty('total'),
turfName=EAProperty('turf'),
type=EAProperty(),
typeAndName=EAProperty(),
typeId=EAProperty('type'),
unitNo=EAProperty('unit'),
unmatchedRowsCount=EAProperty('unmatched_count', 'unmatched'),
unsubscribeCount=EAProperty('unsubscribes'),
upsellType=EAProperty('upsell'),
url=EAProperty(),
username=EAProperty('user'),
userFirstName=EAProperty('first_name', 'first'),
userLastName=EAProperty('last_name', 'last'),
value=EAProperty(),
vanId=EAProperty('van'),
webhookUrl=EAProperty('webhook'),
website=EAProperty(),
zipOrPostalCode=EAProperty('zip_code', 'zip', 'postal_code', 'postal'),
ID=EAProperty()
)
class ActivistCode(
EAObjectWithIDAndName,
_prefix='activistCode',
_keys={'description', 'isMultiAssign', 'mediumName', 'scriptQuestion', 'shortName', 'status', 'type'}
):
"""Represents an `Activist Code
<https://docs.everyaction.com/reference/activist-codes#common-models-1>`__.
"""
class ActivistCodeData(
EAObjectWithIDAndName,
_prefix='activistCode',
_prefixed={'name', 'typeAndName'},
_keys={'canvassedBy', 'dateCanvassed', 'dateCreated'}
):
"""Represents the data associated with responses to `getting Activist Codes
<https://docs.everyaction.com/reference/people#peoplevanidactivistcodes>`__.
"""
class Adjustment(EAObject, _keys={'adjustmentType', 'amount', 'datePosted'}):
"""Represents the data associated with responses to `adjusting a Contribution
<https://docs.everyaction.com/reference/contributions#contributionscontributionidadjustments>`__.
"""
class AdjustmentResponse(EAObject, _keys={'contributionId', 'dateAdjusted', 'originalAmount', 'remainingAmount'}):
"""Represents the data associated with a response to a `Contribution adjustment
<https://docs.everyaction.com/reference/contributions#contributionscontributionidadjustments>`__.
"""
class APIKeyProfile(
EAObject,
_keys={
'apiKeyTypeName',
'committeeName',
'databaseName',
'hasMyCampaign',
'hasMyVoters',
'keyReference',
'username',
'userFirstName',
'userLastName'
}
):
"""Represents an `API key profile
<https://docs.everyaction.com/reference/overview#introspection>`__.
"""
class Attribution(EAObject, _keys={'amountAttributed', 'attributionType', 'dateThanked', 'notes', 'vanId'}):
"""Represents an `Attribution object
<https://docs.everyaction.com/reference/contributions#common-models-8>`__.
"""
class AvailableValue(EAObjectWithIDAndName, _keys={'parentValueId'}):
"""Represents
`AvailableValues <https://docs.everyaction.com/reference/custom-fields#common-models-9>`__.
for a Custom Field.
"""
class BallotRequestType(EAObjectWithIDAndName, _prefix='ballotRequestType'):
"""Represents a `Ballot Request Type
<https://docs.everyaction.com/reference/ballots#common-models-2>`__.
"""
class BallotReturnStatus(EAObjectWithIDAndName, _prefix='ballotReturnStatus'):
"""Represents a `Ballot Return Status
<https://docs.everyaction.com/reference/ballots#common-models-2>`__.
"""
class BallotType(EAObjectWithIDAndName, _prefix='ballotType'):
"""Represents a `Ballot Type
<https://docs.everyaction.com/reference/ballots#common-models-2>`__.
"""
class BankAccount(EAObjectWithIDAndName, _prefix='bankAccount'):
"""Represents a `Bank Account object
<https://docs.everyaction.com/reference/contributions#common-models-8>`__.
"""
class BargainingUnit(EAObjectWithIDAndName, _prefix='bargainingUnit', _keys={'employerBargainingUnitId', 'shortName'}):
"""Represents a `Bargaining Unit
<https://docs.everyaction.com/reference/bargaining-units#common-models-3>`__.
"""
class BatchForm(EAObjectWithIDAndName, _prefix='form'):
"""Represents a form for `Voter Registration Batches
<https://docs.everyaction.com/reference/voter-registration-batches#common-models-39>`__.
"""
class BatchProgram(EAObjectWithID, _prefix='programType'):
"""Represents a program for `Voter Registration Batches
<https://docs.everyaction.com/reference/voter-registration-batches#common-models-39>`__.
"""
class Canvasser(EAObjectWithID, _prefix='canvasser'):
"""Represents a `Canvasser
<https://docs.everyaction.com/reference/minivan-exports#common-models-25>`__.
"""
class CanvassContext(EAObject, _keys={'contactTypeId', 'dateCanvassed', 'inputTypeId', 'phoneId'}):
"""Represents a `Canvass Context
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__.
"""
class CanvassFileRequest(
EAObjectWithID,
_keys={'dateExpired', 'downloadUrl', 'errorCode', 'guid', 'savedListId', 'status', 'type', 'webhookUrl'},
):
"""Represents a `Canvass File Request
<https://docs.everyaction.com/reference/canvass-file-requests>`__.
"""
class ChangedEntityExportRequest(
EAObjectWithID,
_prefix='exportJob',
_keys={
'dateChangedFrom',
'dateChangedTo',
'excludeChangesFromSelf',
'includeInactive',
'requestedCustomFieldIds',
'requestedFields',
'requestedIds',
'resourceType'
}
):
"""Represents data associated with a request to `create a Changed Entity Export Job
<https://docs.everyaction.com/reference/changed-entities#changedentityexportjobs>`__.
"""
class ChangeType(EAObjectWithIDAndName, _prefix='changeType', _prefixed={'name'}, _keys={'description'}):
"""Represents a `changeType
<https://docs.everyaction.com/reference/changed-entity-export-jobs#changedentityexportjobschangetypesresourcetype>`__.
"""
@classmethod
def _id_key(cls) -> Optional[str]:
return 'ID'
class CodeResult(EAObjectWithID, _prefix='code', _keys={'message'}):
"""Represents the data associated with a response to a code batch request. See `POST /codes/batch
<https://docs.everyaction.com/reference/codes#codesbatch>`__
for an example.
"""
class Column(EAObjectWithName):
"""Represents a `Column
<https://docs.everyaction.com/reference/bulk-import#column>`__.
"""
class Commitment(
EAObjectWithID,
_prefix='commitment',
_keys={
'amount',
'ccExpirationMonth',
'ccExpirationYear',
'creditCardLast4',
'currency',
'designationId',
'endDate',
'frequency',
'nextTransactionDate',
'paymentType',
'startDate',
'status'
}
):
"""Represents a `Commitment
<https://docs.everyaction.com/reference/commitments#common-models-6>`__.
"""
class ConfirmationEmailData(
EAObject,
_keys={
'copyToEmails',
'fromEmail',
'fromName',
'fromSubject',
'isConfirmationEmailEnabled',
'isRecurringEmailEnabled',
'replyToEmail'
}
):
"""Represents `Confirmation Email Data
<https://docs.everyaction.com/reference/online-actions-forms#confirmation-email-data>`__.
"""
class ContactType(EAObjectWithIDAndName, _prefix='contactType', _keys={'channelTypeName'}):
"""Represents a `Contact Type
<https://docs.everyaction.com/reference/canvass-responses#canvassresponsescontacttypes>`__.
"""
class Constraints(EAObject, _keys={'invalidCharacters', 'maxLength'}):
"""Represents a description of the violated constraints for :class:`.Error` objects."""
class ContactHistory(EAObject, _keys={'contactTypeId', 'dateCanvassed', 'inputTypeId', 'resultCodeId'}):
"""Represents a `Contact History object
<https://docs.everyaction.com/reference/people#peoplevanidnotes-1>`__.
"""
class Currency(EAObject, _keys={'amount', 'currencyType'}):
"""Represents the type and the amount of a currency. Found, for instance, in the response of
`GET /people/{vanId}/membership
<https://docs.everyaction.com/reference/people#peoplevanidmembership>`__.
"""
class CustomFieldValue(EAObject, _keys={'assignedValue', 'customFieldGroupId', 'customFieldId'}):
"""Represents a `CustomFieldValue
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
def __init__(
self,
customFieldId: Optional[int] = None,
customFieldGroupId: Optional[int] = None,
assignedValue: Optional[str] = None,
**kwargs: EAValue
) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param customFieldId: ID of the custom field.
:param customFieldGroupId: ID of the group of the custom field.
:param assignedValue: Value assigned to the custom field.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(
customFieldId=customFieldId,
customFieldGroupId=customFieldGroupId,
assignedValue=assignedValue,
**kwargs
)
class Department(EAObjectWithIDAndName, _prefix='department', _keys={'employer', 'parentDepartmentId'}):
"""Represents a `Department
<https://docs.everyaction.com/reference/departments#common-models-10>`__.
"""
class Designation(EAObjectWithIDAndName, _prefix='designation'):
"""Represents a `Designation
<https://docs.everyaction.com/reference/designations#common-models-11>`__.
"""
class DisclosureFieldValue(EAObjectWithID, _prefix='disclosureField', _prefixed={'value'}, _keys={'designationId'}):
"""Represents a `Disclosure Field Value
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
def __init__(
self,
disclosureFieldId: Optional[int] = None,
disclosureFieldValue: Optional[str] = None,
designationId: Optional[int] = None,
**kwargs: EAValue
) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param disclosureFieldId: ID of the disclosure field.
:param disclosureFieldValue: Value for the disclosure field.
:param designationId: ID of designation.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(
disclosureFieldId=disclosureFieldId,
disclosureFieldValue=disclosureFieldValue,
designationId=designationId,
**kwargs
)
class DistrictFieldValue(EAObjectWithIDAndName, _keys={'parentId'}):
"""Represents a `District Field Value
<https://docs.everyaction.com/reference/district-fields#common-models-13>`__.
"""
class Email(EAObject, _keys={'dateCreated', 'email', 'isPreferred', 'isSubscribed', 'subscriptionStatus', 'type'}):
"""Represents an `Email
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
def __init__(self, email: Optional[str] = None, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param email: The email address.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(email=email, **kwargs)
class EmailMessageContentDistributions(
EAObject,
_keys={
'bounceCount',
'contributionCount',
'contributionTotal',
'dateSent',
'formSubmissionCount',
'linksClickedCount',
'openCount',
'recipientCount',
'unsubscribeCount'
}
):
"""Represents an `Email Message Content Distributions object
<https://docs.everyaction.com/reference/email#common-models-14>`__.
"""
class EventRole(EAObjectWithIDAndName, _prefix='role', _keys={'goal', 'isEventLead', 'max', 'min'}):
"""Represents a `Role
<https://docs.everyaction.com/reference/events#common-models-18>`__
for an Event Type.
"""
class EventShift(EAObjectWithIDAndName, _prefix='eventShift', _keys={'endTime', 'startTime'}):
"""Represents a `Shift
<https://docs.everyaction.com/reference/events#common-models-18>`__.
"""
class ExportJobType(EAObjectWithIDAndName, _prefix='exportJobType'):
"""Represents an `Export Job Type
<https://docs.everyaction.com/reference/export-jobs#exportjobtypes>`__.
"""
class File(EAObject, _keys={'dateExpired', 'downloadUrl', 'recordCount'}):
"""Represents a `File object
<https://docs.everyaction.com/reference/bulk-import#common-models-4>`__
in EveryAction. Used in many contexts.
"""
class FinancialBatch(
EAObjectWithIDAndName,
_prefix='financialBatch',
_prefixed={'name', 'number'},
_keys={
'bankAccountId',
'checkDate',
'checkNumber',
'dateClosed',
'dateDeposited',
'dateOpened',
'depositNumber',
'designationId',
'expectedContributionCount',
'expectedContributionTotalAmount',
'isAutoGenerated',
'isOpen'
}
):
"""Represents a `Financial Batch
<https://docs.everyaction.com/reference/financial-batches#common-models-21>`__.
"""
class Folder(EAObjectWithIDAndName, _prefix='folder'):
"""Represents a `folder
<https://docs.everyaction.com/reference/folders>`__.
"""
class GeoCoordinate(EAObject, _keys={'lat', 'lon'}):
"""Represents a `Geographic Coordinate
<https://docs.everyaction.com/reference/locations>`__.
"""
class Identifier(EAObject, _keys={'externalId', 'type'}):
"""Represents an `Identifier
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
class IsCellStatus(EAObjectWithIDAndName, _prefix='status', _prefixed={'name'}):
"""Represents an `Phone Is a Cell Status
<https://docs.everyaction.com/reference/phones#phones-iscellstatuses>`__.
"""
class JobActionType(EAObject, _keys={'actionType'}):
"""Represents a `Job Action Type
<https://docs.everyaction.com/reference/file-loading-jobs#action>`__.
"""
@staticmethod
def make(**kwargs: EAValue) -> 'JobActionType':
action_type = EAProperty.shared('actionType').find('actionType', kwargs, pop=True)
if not action_type:
raise EAException('Expected actionType property or alias to be specified for JobActionType')
lower = action_type.lower()
if lower == 'score':
return ScoreLoadAction(**kwargs)
if lower == 'avevdatafile':
return AVEVDataFileAction(**kwargs)
if lower == 'loadsavedlistfile':
return SavedListLoadAction(**kwargs)
raise EAException(f'Unrecognized Job Action Type {action_type}')
class JobClass(EAObjectWithIDAndName, _prefix='jobClass', _keys={'shortName'}):
"""Represents a `Job Class
<https://docs.everyaction.com/reference/job-classes#common-models-22>`__.
"""
class JobNotification(EAObject, _keys={'description', 'message', 'status'}):
"""Represents a `Notification
<https://docs.everyaction.com/reference/file-loading-jobs#notification>`__
for File Loading Jobs.
"""
class InputType(EAObjectWithIDAndName, _prefix='inputType'):
"""Represents an `Input Type
<https://docs.everyaction.com/reference/canvass-responses#canvassresponsesinputtypes>`__.
"""
class KeyValuePair(EAObject, _keys={'key', 'value'}):
"""Represents a key value pair for possible values of a `Support Field
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesstatesstatesupportedfields>`__.
"""
class Listener(EAObject, _keys={'type', 'value'}):
"""Represents a `Listener
<https://docs.everyaction.com/reference/file-loading-jobs#overview-22>`__.
"""
class MembershipSourceCode(EAObjectWithIDAndName, _prefix='code', _prefixed={'name'}):
"""Represents a `Membership Source Code
<https://docs.everyaction.com/reference/people#peoplevanidmembership>`__.
"""
class MemberStatus(EAObjectWithIDAndName, _prefix='memberStatus', _keys={'isMember'}):
"""Represents a `Member Status
<https://docs.everyaction.com/reference/member-statuses#common-models-24>`__.
"""
class NoteCategory(EAObjectWithIDAndName, _prefix='noteCategory', _keys={'assignableTypes'}):
"""Represents a `Note Category
<https://docs.everyaction.com/reference/notes#common-models-26>`__.
"""
class Organization(
EAObjectWithIDAndName,
_prefix='organization',
_prefixed={'type'},
_keys={'parentOrganization', 'shortName', 'website'},
):
"""Represents an `Organization
<https://docs.everyaction.com/reference/employers#common-models-15>`__.
"""
class OrganizationPhone(
EAObjectWithID,
_prefix='organizationPhone',
_keys={
'confidenceLevel',
'countryCode',
'dialingPrefix',
'organizationId',
'phone',
},
phoneType=EAProperty('type')
):
"""Represents a `Phone for an organization
<https://docs.everyaction.com/reference/employers#common-models-15>`__.
"""
class Pledge(EAObjectWithID, _prefix='pledge'):
"""Represents a `Pledge object
<https://docs.everyaction.com/reference/contributions#common-models-8>`__.
"""
class PreferredPronoun(EAObjectWithIDAndName, _prefix='preferredPronoun', _prefixed={'name'}):
"""Represents a `preferred pronoun
<https://docs.everyaction.com/reference/reported-demographics#pronouns>`__.
"""
class PrintedList(EAObjectWithName, _keys={'number'}):
"""Represents a `Printed List
<https://docs.everyaction.com/reference/printed-lists#common-models-28>`__.
"""
class ProgramType(EAObjectWithIDAndName, _prefix='programType'):
"""Represents a `Program Type
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesprogramtypes>`__.
"""
class RegistrationForm(EAObjectWithIDAndName, _prefix='form'):
"""Represents a `Registration Form
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesregistrationforms>`__.
"""
class RelationalMapping(EAObject, _keys={'fieldName', 'value'}):
"""Represents a `Relational Mapping
<https://docs.everyaction.com/reference/changed-entities#changedentityexportjobsfieldsresourcetype>`__.
"""
class Relationship(EAObjectWithIDAndName):
"""Represents a `Relationship
<https://docs.everyaction.com/reference/relationships#relationships>`__.
"""
class ReportedEthnicity(EAObjectWithIDAndName, _prefix='reportedEthnicity', _prefixed={'name'}):
"""Represents a `Reported Ethnicity
<https://docs.everyaction.com/reference/reported-demographics#reportedethnicities>`__.
"""
class ReportedGender(EAObjectWithIDAndName, _prefix='reportedGender', _prefixed={'name'}):
"""Represents a `Reported Gender
<https://docs.everyaction.com/reference/reported-demographics#reportedgenders>`__.
"""
class ReportedLanguagePreference(EAObjectWithIDAndName, _prefix='reportedLanguagePreference', _prefixed={'name'}):
"""Represents a `Reported Language Preference
<https://docs.everyaction.com/reference/reported-demographics#reportedlanguagepreferences>`__.
"""
class ReportedRace(EAObjectWithIDAndName, _prefix='reportedRace', _prefixed={'name'}):
"""Represents a `Reported Race
<https://docs.everyaction.com/reference/reported-demographics#reportedraces>`__.
"""
class ReportedSexualOrientation(EAObjectWithIDAndName, _prefix='reportedSexualOrientation', _prefixed={'name'}):
"""Represents a `Reported Sexual Orientation
<https://docs.everyaction.com/reference/reported-demographics#reportedsexualorientations>`__.
"""
class ResultCode(EAObjectWithIDAndName, _prefix='resultCode', _keys={'mediumName', 'resultOutcomeGroup', 'shortName'}):
"""Represents a `Result Code
<https://docs.everyaction.com/reference/canvass-responses#canvassresponsesresultcodes>`__.
"""
class SavedList(EAObjectWithIDAndName, _prefix='savedList', _keys={'description', 'doorCount', 'listCount'}):
"""Represents a `Saved List
<https://docs.everyaction.com/reference/saved-lists#common-models-29>`__.
"""
class SavedListData(
EAObjectWithID,
_prefix='savedList',
_keys={'matchedRowsCount', 'originalRowCount', 'unmatchedRowsCount'}
):
"""Represents `Saved List Data
<https://docs.everyaction.com/reference/file-loading-jobs#saved-list-load>`__
for Saved List Load actions.
"""
class ScheduleType(EAObjectWithIDAndName, _prefix='scheduleType'):
"""Represents a `Schedule Type
<https://docs.everyaction.com/reference/schedule-types#common-models-30>`__.
"""
class Score(EAObjectWithIDAndName, _prefix='score', _keys={'description', 'maxValue', 'minValue', 'shortName'}):
"""Represents a `Score
<https://docs.everyaction.com/reference/scores#overview-37>`__.
"""
class ScoreApprovalCriteria(EAObject, _keys={'average', 'tolerance'}):
"""Represents `Score Approval Criteria
<https://docs.everyaction.com/reference/file-loading-jobs#score-load-action>`__
"""
class ScriptResponse(EAObject, _keys={'type'}):
"""Represents a `Script Response
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__.
"""
_PROPERTIES = {
'type': EAProperty()
}
@staticmethod
def make(**kwargs: EAValue) -> 'ScriptResponse':
typ = kwargs.pop('type', None)
if typ is None:
raise EAException('Expected type for ScriptResponse')
lower = typ.lower()
if lower == 'activistcode':
return ActivistCodeResponse(**kwargs)
if lower == 'surveyresponse':
return SurveyCanvassResponse(**kwargs)
if lower == 'volunteeractivity':
return VolunteerActivityResponse(**kwargs)
raise EAException(f'Unrecognized Script Response type: {typ}')
class ShiftType(EAObjectWithIDAndName, _prefix='shiftType', _keys={'defaultEndTime', 'defaultStartTime'}):
"""Represents a `Shift Type
<https://docs.everyaction.com/reference/employers#common-models-15>`__.
"""
class Status(EAObjectWithIDAndName, _prefix='status'):
"""Represents a `Status
<https://docs.everyaction.com/reference/event-types#common-models-17>`__
in EveryAction. Used in multiple contexts.
"""
class StoryStatus(EAObjectWithIDAndName, _prefix='storyStatus'):
"""Represents a `StoryStatus
<https://docs.everyaction.com/reference/stories#common-models-34>`__.
"""
@classmethod
def _name_key(cls) -> Optional[str]:
return 'statusName'
class Subgroup(EAObjectWithIDAndName, _prefix='subgroup', _keys={'fullName', 'isAssociatedWithBadges'}):
"""Represents a `Subgroup
<https://docs.everyaction.com/reference/targets#common-models-37>`__
for a Target.
"""
class SupportedEntity(EAObjectWithName, _keys={'isApplicable', 'isSearchable'}):
"""Represents a `Supported Entity
<https://docs.everyaction.com/reference/codes#common-models-7>`__
in the context of codes.
"""
class SupporterGroup(EAObjectWithIDAndName, _keys={'description'}):
"""Represents a `Supporter Group
<https://docs.everyaction.com/reference/supporter-groups#common-models-35>`__.
"""
class Suppression(EAObjectWithName, _prefix='suppression', _prefixed={'code', 'name'}):
"""Represents a `Suppression
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
_CODE_TO_NAME: ClassVar[Dict[str, str]] = {
'NC': 'do not call',
'NE': 'do not email',
'NM': 'do not mail',
'NW': 'do not walk'
}
_NAME_TO_CODE: ClassVar[Dict[str, str]] = {n: c for c, n in _CODE_TO_NAME.items()}
DO_NOT_CALL: ClassVar['Suppression'] = None
DO_NOT_EMAIL: ClassVar['Suppression'] = None
DO_NOT_MAIL: ClassVar['Suppression'] = None
DO_NOT_WALK: ClassVar['Suppression'] = None
def __init__(
self,
code_or_name: Optional[str] = None,
**kwargs: EAValue
) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate. When the positional argument `code_or_name` is given, it is assumed to be a
code (e.g., "NC" for "Do not call") when it has length at most 2, and otherwise it is assumed to be a name.
:param code_or_name: When the given, it is assumed to be a code (e.g., "NC" for "Do not call") when it has
length at most 2, and otherwise it is assumed to be a name.
:param kwargs: Mapping of (alias or name) -> value.
"""
code = None
name = None
if code_or_name:
# Infer from str length whether it is a name or a code.
if len(code_or_name) > 2:
super().__init__(suppressionName=code_or_name, **kwargs)
else:
super().__init__(suppressionCode=code_or_name, **kwargs)
# Continue trying to infer the name or code if they are not yet determined.
code = code or self._NAME_TO_CODE.get((name or '').lower())
name = name or self._CODE_TO_NAME.get((code or '').upper())
super().__init__(suppressionCode=code, suppressionName=name, **kwargs)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Suppression):
return False
if self.code and other.code:
return self.code.upper() == other.code.upper()
if self.name and other.name:
return self.name.lower() == other.name.lower()
# "Null" suppressions where name and code are both None are equal to each other.
return not (self.name or other.name or self.code or other.code)
@property
def no_call(self) -> bool:
"""Indicates whether this is a "Do Not Call" suppression.
:returns: :code:`True` if this is a "Do Not Call" suppression, :code:`False` otherwise.
"""
return (self.code or '').upper() == 'NC' or (self.name or '').lower() == 'do not call'
@property
def no_email(self) -> bool:
"""Indicates whether this is a "Do Not Email" suppression.
:returns: :code:`True` if this is a "Do Not Email" suppression, :code:`False` otherwise.
"""
return (self.code or '').upper() == 'NE' or (self.name or '').lower() == 'do not email'
@property
def no_mail(self) -> bool:
"""Indicates whether this is a "Do Not Mail" suppression.
:returns: :code:`True` if this is a "Do Not Mail" suppression, :code:`False` otherwise.
"""
return (self.code or '').upper() == 'NM' or (self.name or '').lower() == 'do not mail'
@property
def no_walk(self) -> bool:
"""Indicate whether this is a "Do Not Walk" suppression.
:returns: :code:`True` if this is a "Do Not Walk" suppression, :code:`False` otherwise.
"""
return (self.code or '').upper() == 'NW' or (self.name or '').lower() == 'do not walk'
Suppression.DO_NOT_CALL = Suppression('NC')
Suppression.DO_NOT_EMAIL = Suppression('NE')
Suppression.DO_NOT_MAIL = Suppression('NM')
Suppression.DO_NOT_WALK = Suppression('NW')
class SurveyResponse(EAObjectWithIDAndName, _prefix='surveyResponse', _keys={'mediumName', 'shortName'}):
"""
Represents a `Survey Response
<https://docs.everyaction.com/reference/survey-questions#common-models-36>`__.
"""
class UpdateStatistics(
EAObject
):
"""Represents an `Update Statistics
<https://docs.everyaction.com/reference/score-updates>`__.
"""
class User(EAObjectWithID, _prefix='user', _keys={'firstName', 'lastName'}):
"""Represents a `VAN User
<https://docs.everyaction.com/reference/extended-source-codes>`__.
"""
class ValueMapping(EAObjectWithIDAndName, _keys={'parentId', 'sourceValue', 'targetValue'}):
"""Represents a `value
<https://docs.everyaction.com/reference/bulk-import#bulkimportjobs>`__
in the context of bulk import jobs.
"""
class WorkArea(EAObjectWithIDAndName, _prefix='workArea'):
"""Represents a `Work Area
<https://docs.everyaction.com/reference/worksites#common-models-16>`__.
"""
# --- Second Order Properties and Objects ---
EAProperty.share(
activistCodes=EAProperty(singular_alias='activist_code', factory=ActivistCode),
approvalCriteria=EAProperty('criteria', factory=ScoreApprovalCriteria),
availableValues=EAProperty('available', 'values', singular_alias='value', factory=AvailableValue),
bargainingUnit=EAProperty(factory=BargainingUnit),
bargainingUnits=EAProperty(singular_alias='bargaining_unit', factory=BargainingUnit),
canvassers=EAProperty(singular_alias='canvasser', factory=Canvasser),
canvassContext=EAProperty('context', factory=CanvassContext),
category=EAProperty(factory=NoteCategory),
columns=EAProperty(singular_alias='column', factory=Column),
columnsToIncludeInResultsFile=EAProperty(
'include_columns',
'include',
singular_alias='include_column',
factory=Column
),
confirmationEmailData=EAProperty(
'confirmation_email',
'confirmation_data',
'confirmation',
factory=ConfirmationEmailData
),
contactAttributions=EAProperty('attributions', factory=Attribution),
contactHistory=EAProperty('history', factory=ContactHistory),
contributionBankAccount=EAProperty('contribution_account', 'account_obj', factory=BankAccount),
customFieldValues=EAProperty('custom_values', singular_alias='custom_value', factory=CustomFieldValue),
customProperties=EAProperty('properties', singular_alias='property', factory=KeyValuePair),
departments=EAProperty(singular_alias='department', factory=Department),
designation=EAProperty(factory=Designation),
detailedConstraints=EAProperty('constraints', factory=Constraints),
disclosureFieldValues=EAProperty(
'disclosures',
'field_values',
'values',
singular_alias='disclosure',
factory=DisclosureFieldValue
),
districtFieldValue=EAProperty(factory=DistrictFieldValue),
districtFieldValues=EAProperty('values', singular_alias='value', factory=DistrictFieldValue),
duesPaid=EAProperty(factory=Currency),
emailMessageContentDistributions=EAProperty('distributions', factory=EmailMessageContentDistributions),
file=EAProperty(factory=File),
files=EAProperty(singular_alias='file', factory=File),
firstMembershipSourceCode=EAProperty('first_source_code', 'source_code', factory=MembershipSourceCode),
form=EAProperty(factory=BatchForm),
geoLocation=EAProperty('geo', 'location', factory=GeoCoordinate),
identifiers=EAProperty(singular_alias='identifier', factory=Identifier),
isCellStatus=EAProperty('cell_status', 'is_cell', factory=IsCellStatus),
jobClass=EAProperty(factory=JobClass),
limitedToParentValues=EAProperty('limited_to', is_array=True, factory=AvailableValue),
listeners=EAProperty(singular_alias='listener', factory=Listener),
pledge=EAProperty(factory=Pledge),
possibleValues=EAProperty('possible', singular_alias='possible_value', factory=KeyValuePair),
preferredPronoun=EAProperty(factory=PreferredPronoun),
programType=EAProperty('program', factory=BatchProgram),
relationalMappings=EAProperty('relations', singular_alias='relation', factory=RelationalMapping),
resultFiles=EAProperty('files', singular_alias='file', factory=File),
role=EAProperty(factory=EventRole),
roles=EAProperty(singular_alias='role', factory=EventRole),
savedList=EAProperty('list', factory=SavedListData),
score=EAProperty(factory=Score),
scores=EAProperty(singular_alias='score', factory=Score),
shift=EAProperty(factory=EventShift),
shifts=EAProperty(singular_alias='shift', factory=EventShift),
storyStatus=EAProperty('status', factory=StoryStatus),
subgroups=EAProperty(singular_alias='subgroup', factory=Subgroup),
suppressions=EAProperty(singular_alias='suppression', factory=Suppression),
supportedEntities=EAProperty('entities', singular_alias='entity', factory=SupportedEntity),
updateStatistics=EAProperty('update_stats', 'statistics', 'stats', factory=UpdateStatistics),
values=EAProperty(singular_alias='value', factory=ValueMapping)
)
class ActivistCodeResponse(ScriptResponse, EAObjectWithID, _prefix='activistCode', _keys={'action'}):
"""Represents an `Activist Code Response
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__.
"""
def __init__(self, id: Optional[int] = None, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param id: ID to initialize with. When given alone, a simple object results (see
`A Note About Simple Objects <https://docs.everyaction.com/reference/events-overview>`__).
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(type='ActivistCode', activistCodeId=id, **kwargs)
class Address(
EAObjectWithID,
_prefix='address',
_prefixed={'line1', 'line2', 'line3'},
_keys={
'city',
'countryCode',
'displayMode',
'geoLocation',
'isPreferred',
'preview',
'stateOrProvince',
'type',
'zipOrPostalCode'
}
):
"""Represents an `Address
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
class AVEVDataFileAction(JobActionType):
"""Represents an `AVEV Data File Action
<https://docs.everyaction.com/reference/file-loading-jobs#avev-data-file>`__.
"""
def __init__(self, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(actionType='AVEVDataFile', **kwargs)
class BargainingUnitJobClass(
EAObjectWithID,
_prefix='employerBargainingUnitJobClass',
_keys={'bargainingUnit', 'employerBargainingUnitId', 'jobClass'}
):
"""Represents an `Employer Bargaining Unit Job Class
<https://docs.everyaction.com/reference/employers#common-models-15>`__.
"""
class ChangedEntityBulkImportField(EAObject, _keys={'fieldName', 'mappingTypeName', 'relationalMappings'}):
"""Represents a `bulk import field
<https://docs.everyaction.com/reference/changed-entities#changedentityexportjobsfieldsresourcetype>`__
in the context of changed entities.
"""
class ChangedEntityExportJob(
EAObjectWithID,
_prefix='exportJob',
_keys={
'dateChangedFrom',
'dateChangedTo',
'exportedRecordCount',
'files',
'jobStatus',
'message'
}
):
"""Represents data for an existing `ChangedEntityExportJob
<https://docs.everyaction.com/reference/changed-entities#common-models-5>`__.
"""
class Code(
EAObjectWithIDAndName,
_prefix='code',
_prefixed={'type'},
_keys={'dateCreated', 'dateModified', 'description', 'parentCodeId', 'supportedEntities'}
):
"""Represents a `Code object
<https://docs.everyaction.com/reference/codes#common-models-7>`__.
"""
class CustomField(
EAObjectWithIDAndName,
_prefix='customField',
_prefixed={'groupId', 'groupName', 'groupType', 'name', 'parentId', 'typeId'},
_keys={'availableValues', 'isEditable', 'isExportable', 'maxTextboxCharacters'}
):
"""Represents a `Custom Field
<https://docs.everyaction.com/reference/custom-fields#common-models-9>`__.
"""
class DistrictField(
EAObjectWithIDAndName,
_prefix='districtField',
_prefixed={'values'},
_keys={'isCustomDistrict', 'parentFieldId'}
):
"""Represents a `District Field
<https://docs.everyaction.com/reference/district-fields#common-models-13>`__.
"""
class EmailMessageContent(
EAObject,
_keys={'createdBy', 'dateCreated', 'emailMessageContentDistributions', 'senderDisplayName', 'senderEmailAddress'}
):
"""Represents an `email message content object
<https://docs.everyaction.com/reference/email#common-models-14>`__.
"""
class EmployerBargainingUnit(EAObjectWithID, _prefix='employerBargainingUnit', _keys={'bargainingUnit'}):
"""Represents an `Employer Bargaining Unit
<https://docs.everyaction.com/reference/employers#employersemployeridbargainingunitsbargainingunitid>`__.
"""
class Error(
EAObject,
_keys={'code', 'detailedConstraints', 'detailedCode', 'hint', 'properties', 'referenceCode', 'resourceUrl', 'text'}
):
"""Represents an `Error object
<https://docs.everyaction.com/reference/bulk-import#common-models-4>`__.
"""
class ExtendedSourceCode(
EAObjectWithIDAndName,
_prefix='extendedSourceCode',
_prefixed={'name'},
_keys={'dateCreated', 'dateModified', 'modifiedBy'},
createdBy=EAProperty('creator', factory=User)
):
"""Represents an `Extended Source Code
<https://docs.everyaction.com/reference/extended-source-codes#common-models-20>`__.
"""
class FieldValueMapping(EAObject, _keys={'columnName', 'fieldName', 'staticValue', 'values'}):
"""Represents a `fieldValueMapping
<https://docs.everyaction.com/reference/bulk-import#bulkimportjobs>`__.
"""
class JobFile(
EAObject,
_prefix='file',
_prefixed={'name'},
_keys={'columns', 'columnDelimiter', 'hasHeader', 'hasQuotes', 'sourceUrl'}
):
"""Represents a `file object for a job
<https://docs.everyaction.com/reference/file-loading-jobs#overview-22>`__.
"""
class ListLoadCallbackData(JobNotification, _keys={'description', 'message', 'savedList', 'status'}):
"""Represents `Callback Data
<https://docs.everyaction.com/reference/file-loading-jobs#saved-list-load>`__
for a Saved List Load action.
"""
class MappingParent(EAObject, _keys={'limitedToParentValues', 'parentFieldName'}):
"""Represents prerequisites for mapping a field as described `here
<https://docs.everyaction.com/reference/bulk-import#bulkimportmappingtypes>`__.
"""
class Membership(
EAObject,
_keys={
'changeTypeName',
'dateCardsSent',
'dateExpireMembership',
'dateLastRenewed',
'dateStartMembership',
'duesAttributionTypeName',
'duesEntityTypeName',
'duesPaid',
'enrollmentTypeName',
'firstMembershipSourceCode',
'levelId',
'levelName',
'numberOfCards',
'numberTimesRenewed',
'statusName',
'totalDuesPaid'
}
):
"""Contains `membership information
<https://docs.everyaction.com/reference/people#peoplevanidmembership>`__
for a person.
"""
class MiniVANExport(
EAObjectWithIDAndName,
_prefix='minivanExport',
_keys={
'canvassers',
'databaseMode',
'dateCreated'
},
createdBy=EAProperty('creator', factory=User)
):
"""Represents a `MiniVAN Export
<https://docs.everyaction.com/reference/minivan-exports#common-models-25>`__.
"""
class Note(
EAObjectWithID,
_prefix='note',
_keys={'category', 'contactHistory', 'createdDate', 'isViewRestricted', 'text'}
):
"""Represents a `Note
<https://docs.everyaction.com/reference/people#peoplevanidnotes>`__.
"""
class OnlineActionsForm(
EAObjectWithIDAndName,
_prefix='formTracking',
_keys={
'activistCodes',
'campaignId',
'codeId',
'confirmationEmailData',
'createdByEmail',
'dateCreated',
'dateModified',
'designation',
'eventId',
'isActive',
'isConfirmedOptInEnabled',
'modifiedByEmail'
},
formType=EAProperty('type'),
formTypeId=EAProperty()
):
"""Represents an `Online Action Form
<https://docs.everyaction.com/reference/online-actions-forms#common-models-27>`__.
"""
@classmethod
def _name_key(cls) -> Optional[str]:
return 'formName'
class Phone(
EAObjectWithID,
_prefix='phone',
_prefixed={'number', 'optInStatus', 'type'},
_keys={'countryCode', 'dateCreated', 'dialingPrefix', 'ext', 'isCellStatus', 'isPreferred', 'smsOptInStatus'}
):
"""Represents a `Phone
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
def __init__(self, id_or_number: Optional[Union[int, str]] = None, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param id_or_number: Either the phone ID (if an integer), or the phone number (if a string). A simple object
will result when an integer is given for the `id_or_number` positional parameter
(see `A Note About Simple Objects <https://docs.everyaction.com/reference/events#overview-19>`__).
When a string is given instead, it is assumed to correspond to the phone number, accessible via
instance.number.
:param kwargs: Mapping of (alias or name) -> value.
"""
if id_or_number is not None:
if isinstance(id_or_number, int):
# Assume id for int.
super().__init__(id=id_or_number, **kwargs)
elif isinstance(id_or_number, str):
# Assume phone number for str.
super().__init__(number=id_or_number, **kwargs)
else:
raise ValueError(f'Expected int or str for id_or_number, got {type(id_or_number)}: {id_or_number}')
else:
super().__init__(**kwargs)
class SavedListLoadAction(
JobActionType,
_keys={'folderId', 'listDescription', 'listName', 'overwriteExistingListId', 'personIdColumn', 'personIdType'}
):
"""Represents a `Saved List Load action
<https://docs.everyaction.com/reference/file-loading-jobs#saved-list-load>`__.
"""
def __init__(self, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(actionType='LoadSavedListFile', **kwargs)
class ScoreLoadAction(
JobActionType,
_keys={'approvalCriteria', 'personIdColumn', 'personIdType', 'scoreColumn', 'scoreId'}
):
"""Represents a `Score Load Action
<https://docs.everyaction.com/reference/file-loading-jobs#score-load-action>`__.
"""
def __init__(self, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(actionType='Score', **kwargs)
class ScoreUpdate(
EAObjectWithID,
_prefix='scoreUpdate',
_keys={'dateProcessed', 'loadStatus', 'score', 'updateStatistics'}
):
"""Represents a `Score Update
<https://docs.everyaction.com/reference/score-updates#scoreupdatesscoreupdateid>`__.
"""
class SupportField(
EAObject,
_keys={'customPropertyKey', 'displayName', 'fieldType', 'maxFieldLength', 'possibleValues'}
):
"""Represents a `Support Field
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesregistrationforms>`__
for a Voter Registration Batch.
"""
class SurveyCanvassResponse(
ScriptResponse,
_keys={'mediumName', 'name', 'shortName', 'surveyQuestionId', 'surveyResponseId'}
):
"""Represents a `Survey Response
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__
in the context of a canvass response.
"""
def __init__(
self,
surveyQuestionId: Optional[int] = None,
surveyResponseId: Optional[int] = None,
**kwargs: EAValue
) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param surveyQuestionId: ID of the survey question.
:param surveyResponseId: ID of the survey response.
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(
type='SurveyResponse',
surveyQuestionId=surveyQuestionId,
surveyResponseId=surveyResponseId,
**kwargs
)
class Target(
EAObjectWithIDAndName,
_prefix='target',
_keys={'areSubgroupsSticky', 'description', 'points', 'status', 'subgroups', 'type'}
):
"""Represents a `Target
<https://docs.everyaction.com/reference/targets#common-models-37>`__.
"""
class TargetExportJob(
EAObjectWithID,
_prefix='exportJob',
_keys={'file', 'jobStatus', 'targetId', 'webhookUrl'},
):
"""Represents a `Target Export Job
<https://docs.everyaction.com/reference/target-export-jobs#targetexportjobsexportjobid>`__.
"""
class VolunteerActivityResponse(ScriptResponse, _prefix='volunteerActivity', _keys={'action'}):
"""Represents a `Volunteer Activity
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__.
"""
def __init__(self, id: Optional[int] = None, **kwargs: EAValue) -> None:
"""
Initialize by setting the specified property names and aliases. Note that values will automatically be converted
to API objects when appropriate.
:param id: ID to initialize with. When given alone, a simple object results (see
`A Note About Simple Objects <https://docs.everyaction.com/reference/events-overview>`__).
:param kwargs: Mapping of (alias or name) -> value.
"""
super().__init__(type='VolunteerActivity', volunteerActivityId=id, **kwargs)
class VoterRegistrationBatch(
EAObjectWithIDAndName,
_prefix='voterRegistrationBatch',
_keys={'dateCreated', 'description', 'form', 'personType', 'programType', 'stateCode', 'status'}
):
"""Represents a `Voter Registration Batch
<https://docs.everyaction.com/reference/voter-registration-batches#common-models-39>`__.
"""
# --- Third Order Properties and Objects ---
EAProperty.share(
address=EAProperty(factory=Address),
addresses=EAProperty(singular_alias='address', factory=Address),
bulkImportFields=EAProperty(singular_alias='bulk_import_field', factory=ChangedEntityBulkImportField),
codes=EAProperty(singular_alias='code', factory=Code),
customFields=EAProperty(singular_alias='custom_field', factory=CustomField),
districts=EAProperty(singular_alias='district', factory=DistrictField),
districtFields=EAProperty(singular_alias='district_field', factory=DistrictField),
emails=EAProperty(singular_alias='email', factory=Email),
emailMessageContent=EAProperty(singular_alias='content', factory=EmailMessageContent),
errors=EAProperty(singular_alias='error', factory=Error),
extendedSourceCode=EAProperty('extended_source', factory=ExtendedSourceCode),
fieldValueMappings=EAProperty(
'field_mappings',
'value_mappings',
'mappings',
singular_alias='mapping',
factory=FieldValueMapping
),
jobClasses=EAProperty(singular_alias='job_class', factory=BargainingUnitJobClass),
parents=EAProperty(singular_alias='parent', factory=MappingParent),
phones=EAProperty(singular_alias='phone', factory=Phone),
recordedAddresses=EAProperty(singular_alias='recorded_address', factory=Address),
responses=EAProperty(singular_alias='response', factory=ScriptResponse.make),
surveyQuestionResponses=EAProperty('responses', singular_alias='response', factory=SurveyResponse),
tags=EAProperty(singular_alias='tag', factory=Code),
voterRegistrationBatches=EAProperty(
'registration_batches',
'batches',
singular_alias='batch',
factory=VoterRegistrationBatch
),
workAreas=EAProperty(singular_alias='work_area')
)
class AddRegistrantsResponse(EAObject, _keys={'alternateId', 'errors', 'result', 'vanId'}):
"""Represents the data associated with a response to `adding registrants
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesbatchidpeople>`__
to a Voter Registration Batch.
"""
class BulkImportField(
EAObjectWithName,
_keys={'canBeMappedToColumn', 'description', 'hasPredefinedValues', 'isRequired', 'parents'}
):
"""Represents a `mapping type field
<https://docs.everyaction.com/reference/bulk-import#bulkimportmappingtypes>`__.
"""
class BulkImportJobData(
EAObjectWithID,
_prefix='job',
_keys={'errors', 'resourceType', 'resultFileSizeLimitKb', 'resultFiles', 'status'}
):
"""Represents data for an existing `Bulk Import Job
<https://docs.everyaction.com/reference/bulk-import#common-models-4>`__.
"""
class CanvassResponse(EAObject, _keys={'canvassContext', 'responses', 'resultCodeId'}):
"""Represents a `Canvass Response
<https://docs.everyaction.com/reference/people#peoplevanidcanvassresponses>`__.
"""
class ChangedEntityField(
EAObjectWithName,
_keys={'availableValues', 'bulkImportFields', 'isCoreField', 'maxTextboxCharacters'},
_prefix='field',
_prefixed={'name', 'type'},
):
"""Represents a `changed entity field
<https://docs.everyaction.com/reference/changed-entities#changedentityexportjobsfieldsresourcetype>`__.
"""
_TYPE_TO_FACTORY = {}
ValueType = Union[bool, int, str, datetime]
@staticmethod
def _parse_bool(s: str) -> bool:
if s.lower() == 'true':
return True
if s.lower() == 'false':
return False
raise ValueError(f'Could not parse "{s}" to a boolean.')
def parse(self, value: str) -> ValueType:
"""Parse the raw string value of a field into a typed result.
The below table gives the behavior of this function for each `field type
<https://docs.everyaction.com/reference/changed-entities#changedentityexportjobsfieldsresourcetype>`__.
+------------+--------------------------------------------------------------------------------------------+
| Field Type | Behavior |
+============+============================================================================================+
| B | Parses "true" to :code:`True` and "false" to :code:`False`. |
+------------+--------------------------------------------------------------------------------------------+
| D | Parses into a naive `datetime object <https://docs.python.org/3/library/datetime.html>`__. |
+------------+--------------------------------------------------------------------------------------------+
| M | Keeps the original string value. |
+------------+--------------------------------------------------------------------------------------------+
| N | Parses into an :code:`int`. |
+------------+--------------------------------------------------------------------------------------------+
| T | Keeps the original string value. |
+------------+--------------------------------------------------------------------------------------------+
:param value: The value to parse.
:returns: The parsed value.
"""
return self._TYPE_TO_FACTORY[self.type](value) if value else None
# References inner staticmethod so needs to be defined here.
ChangedEntityField._TYPE_TO_FACTORY = {
'B': ChangedEntityField._parse_bool,
'D': datetime.fromisoformat,
'M': lambda s: s,
'N': int,
'T': lambda s: s,
}
class Contribution(
EAObject,
_keys={
'acceptedOneTimeAmount',
'acceptedRecurringAmount',
'amount',
'bankAccount',
'checkDate',
'checkNumber',
'codes',
'contact',
'contactAttributions',
'contributionBankAccount',
'contributionId',
'coverCostsAmount',
'dateReceived',
'dateThanked',
'depositDate',
'depositNumber',
'designation',
'directMarketingCode',
'disclosureFieldValues',
'extendedSourceCode',
'identifiers',
'isUpsellAccepted',
'isUpsellShown',
'linkedJointFundraisingContributionId',
'linkedPartnershipContributionId',
'notes',
'onlineReferenceNumber',
'paymentType',
'pledge',
'processedAmount',
'processedCurrency',
'selectedOneTimeAmount',
'status',
'upsellType'
}
):
"""Represents a `Contribution
<https://docs.everyaction.com/reference/contributions#common-models-8>`__.
"""
class Disbursement(
EAObjectWithID,
_prefix='disbursement',
_keys={
'amount',
'batchCode',
'checkDate',
'checkNumber',
'codes',
'contact',
'dateIssued',
'designation',
'disclosureFieldValues',
'linkedCreditCardPaymentDisbursementId',
'linkedReimbursementDisbursementId',
'notes'
}
):
"""Represents a `Disbursement
<https://docs.everyaction.com/reference/disbursements#common-models-12>`__.
"""
class EmailMessage(
EAObjectWithIDAndName,
_prefix='foreignMessage',
_keys={'createdBy', 'dateCreated', 'dateModified', 'dateScheduled', 'emailMessageContent'},
campaignID=EAProperty('campaign')
):
"""Represents an `email message
<https://docs.everyaction.com/reference/email#common-models-14>`__.
"""
# TODO: Is emailMessageContent really an array? If so, can it actually contain multiple entities?
class FileLoadingJob(
EAObjectWithID,
_prefix='job',
_keys={'description', 'interventionCallbackUrl', 'invalidRowsFileUrl', 'listeners'},
actions=EAProperty(singular_alias='action', factory=JobActionType.make),
file=EAProperty(factory=JobFile)
):
"""Represents a `File Loading Job
<https://docs.everyaction.com/reference/file-loading-jobs>`__.
"""
class Location(EAObjectWithIDAndName, _prefix='location', _keys={'address', 'displayName'}):
"""Represents a `Location
<https://docs.everyaction.com/reference/locations>`__.
"""
class MappingType(EAObjectWithName, _keys={'fieldValueMappings', 'resultFileColumnName'}):
"""Represents a `bulk import mapping type
<https://docs.everyaction.com/reference/bulk-import#mapping-types>`__.
"""
class Person(
EAObjectWithID,
_prefix='van',
_keys={
'additionalEnvelopeName',
'additionalSalutation',
'addresses',
'biographyImageUrl',
'caseworkCases',
'caseworkIssues',
'caseworkStories',
'collectedLocationId',
'contactMethodPreferenceCode',
'contactMode',
'contactModeId',
'customFieldValues',
'customProperties',
'cycle',
'dateOfBirth',
'disclosureFieldValues',
'districts',
'electionRecords',
'electionType',
'emails',
'envelopeName',
'finderNumber',
'firstName',
'formalEnvelopeName',
'formalSalutation',
'identifiers',
'lastName',
'middleName',
'nickname',
'occupation',
'organizationContactOfficialName',
'organizationRoles',
'party',
'phones',
'preferredPronoun',
'primaryContact',
'recordedAddresses',
'salutation',
'scores',
'selfReportedEthnicities',
'selfReportedEthnicity',
'selfReportedGenders',
'selfReportedLanguagePreference',
'selfReportedRace',
'selfReportedRaces',
'selfReportedSexualOrientations',
'sex',
'suppressions',
'surveyQuestionResponses',
'suffix',
'title',
'website'
},
employer=EAProperty()
):
"""Represents a `Person
<https://docs.everyaction.com/reference/people#common-models>`__.
"""
@staticmethod
def _find_factory(**kwargs: EAValue) -> Optional['Person']:
status = kwargs.get('status')
if status is not None:
if status != 'Unmatched':
raise AssertionError(f'Only expected Unmatched status, found "{status}"')
return None
return Person(**kwargs)
@staticmethod
def _get_preferred(of: List[Any], attr: Optional[str] = None) -> Optional[Any]:
# Get a preferred entity from a list of entities by checking the "preferred" attribute.
if of:
result_list = [o for o in of if o.preferred]
if result_list:
# Multiple preferred entities should be impossible without bad modifications.
assert len(result_list) == 1
if attr:
return getattr(result_list[0], attr)
return result_list[0]
return None
def add_suppression(self, suppression: Suppression) -> bool:
"""Adds the given suppression to this person if it is not already present.
:param suppression: The suppression to add.
:returns: :code:`True` if the suppression was added, :code:`False` if it was already present.
"""
self.suppressions = self.suppressions or []
if suppression not in self.suppressions:
self.suppressions.append(suppression)
return True
return False
def has_suppression(self, suppression: Suppression) -> Optional[bool]:
"""Determines whether this contact has the given suppression.
:param suppression: The suppression to check for.
:returns: :code:`True` if this contact has the suppression, :code:`False` if suppression information is
available (when :code:`suppressions` attribute is not :code:`None`) and the suppression was not found, or
:code:`None` if no suppression information is available.
"""
if self.suppressions is not None:
return suppression in self.suppressions
return None
def remove_suppression(self, suppression: Suppression) -> bool:
"""Removes the given suppression from this person if it is present.
:param suppression: The suppression to remove.
:returns: :code:`True` if the suppression was removed, :code:`False` if the suppression was not found.
"""
if self.suppressions:
try:
self.suppressions.remove(suppression)
return True
except ValueError:
return False
return False
def set_suppression(self, suppression: Suppression, value: bool) -> bool:
"""Add or remove the given suppression.
:param suppression: Suppression to add or remove.
:param value: :code:`True` to add the suppression, :code:`False` to remove it.
:returns: :code:`True` if suppressions were changed, :code:`False` otherwise.
"""
if value:
return self.add_suppression(suppression)
else:
return self.remove_suppression(suppression)
@property
def do_not_call(self) -> Optional[bool]:
"""Determine if this contact is marked as "Do Not Call".
:returns: :code:`True` is this contact is marked as "Do Not Call", :code:`False` is suppressions are present
and do not contain "Do Not Call", or :code:`None` if no suppression information is available.
"""
return self.has_suppression(Suppression.DO_NOT_CALL)
@do_not_call.setter
def do_not_call(self, value: bool) -> None:
"""Sets the "Do Not Call" status of this contact.
:param value: Value to set to.
"""
self.set_suppression(Suppression.DO_NOT_CALL, value)
@property
def do_not_email(self) -> Optional[bool]:
"""Determine if this contact is marked as "Do Not Email".
:returns: :code:`True` is this contact is marked as "Do Not Email", :code:`False` is suppressions are present
and do not contain "Do Not Email", or :code:`None` if no suppression information is available.
"""
return self.has_suppression(Suppression.DO_NOT_EMAIL)
@do_not_email.setter
def do_not_email(self, value: bool) -> None:
"""Sets the "Do Not Call" status of this contact.
:param value: Value to set to.
"""
self.set_suppression(Suppression.DO_NOT_EMAIL, value)
@property
def do_not_mail(self) -> Optional[bool]:
"""Determine if this contact is marked as "Do Not Mail".
:returns: :code:`True` is this contact is marked as "Do Not Mail", :code:`False` is suppressions are present
and do not contain "Do Not Mail", or :code:`None` if no suppression information is available.
"""
return self.has_suppression(Suppression.DO_NOT_MAIL)
@do_not_mail.setter
def do_not_mail(self, value: bool) -> None:
"""Sets the "Do Not Call" status of this contact.
:param value: Value to set to.
"""
self.set_suppression(Suppression.DO_NOT_MAIL, value)
@property
def do_not_walk(self) -> Optional[bool]:
"""Determine if this contact is marked as "Do Not Mail".
:returns: :code:`True` is this contact is marked as "Do Not Walk", :code:`False` is suppressions are present
and do not contain "Do Not Walk", or :code:`None` if no suppression information is available.
"""
return self.has_suppression(Suppression.DO_NOT_WALK)
@do_not_walk.setter
def do_not_walk(self, value: bool) -> None:
"""Sets the "Do Not Call" status of this contact.
:param value: Value to set to.
"""
self.set_suppression(Suppression.DO_NOT_WALK, value)
@property
def preferred_address(self) -> Optional[Address]:
"""Get this contact's preferred mailing address as an :class:`.Address` object if it exists, or :code:`None`
if this contact has no addresses or if information on what address is preferred is unavailable.
:returns: The preferred mailing address object, or :code:`None` if no preferred mailing address could be
determined.
"""
return self._get_preferred(self.addresses)
@property
def preferred_email(self) -> Optional[str]:
"""Get the address of this contact's preferred email if it exists, or :code:`None` if this contact has no email
addresses or if information on what address is preferred is unavailable.
:returns: The preferred email address, or code:`None` if no preferred email address could be determined.
"""
return self._get_preferred(self.emails, "email")
@property
def preferred_phone(self) -> Optional[str]:
"""Get the number of this contact's preferred phone if it exists, or :code:`None` if this contact has no phone
numbers or if information on what number is preferred is unavailable.
:returns: The preferred phone number, or code:`None` if no preferred phone number could be determined.
"""
return self._get_preferred(self.phones, "number")
class Story(
EAObjectWithID,
_prefix='story',
_prefixed={'text'},
_keys={'campaignId', 'storyStatus', 'tags', 'title', 'vanId'}
):
"""Represents a `Story
<https://docs.everyaction.com/reference/stories#common-models-34>`__.
"""
class SurveyQuestion(
EAObjectWithIDAndName,
_prefix='surveyQuestion',
_keys={'cycle', 'mediumName', 'scriptQuestion', 'shortName', 'status', 'type'},
responses=EAProperty(singular_alias='response', factory=SurveyCanvassResponse)
):
"""Represents a `Survey Question
<https://docs.everyaction.com/reference/survey-questions#common-models-36>`__.
"""
class ValueMappingData(EAObjectWithIDAndName, _keys={'parents'}):
"""Represents data for an existing `value mapping
<https://docs.everyaction.com/reference/bulk-import#bulkimportmappingtypesmappingtypenamefieldnamevalues>`__
in the context of bulk import jobs.
"""
class Worksite(EAObjectWithIDAndName, _prefix='worksite', _keys={'address', 'employer', 'isPreferred', 'workAreas'}):
"""Represents a `Worksite
<https://docs.everyaction.com/reference/worksites#common-models-16>`__.
"""
# --- Fourth Order Properties and Objects ---
EAProperty.share(
defaultLocation=EAProperty(factory=Location),
fields=EAProperty(singular_alias='field', factory=BulkImportField),
location=EAProperty(factory=Location),
locations=EAProperty(singular_alias='location', factory=Location),
mappingTypes=EAProperty('mappings', singular_alias='mapping', factory=MappingType),
person=EAProperty(factory=Person),
surveyQuestions=EAProperty('questions', singular_alias='question', factory=SurveyQuestion),
worksites=EAProperty(singular_alias='worksite', factory=Worksite)
)
class BulkImportAction(
EAObject,
_keys={'actionType', 'columnsToIncludeInResultsFile', 'mappingTypes', 'resultFileSizeKbLimit', 'resourceType'}
):
"""Represents a `bulk import action
<https://docs.everyaction.com/reference/bulk-import#action>`__.
"""
class Employer(
EAObjectWithIDAndName,
_prefix='employer',
_keys={
'bargainingUnits',
'departments',
'isMyOrganization',
'jobClasses',
'parentOrganization',
'shortName',
'website',
'worksites'
},
phones=EAProperty(singular_alias='phone', factory=OrganizationPhone),
shifts=EAProperty(singular_alias='shift', factory=ShiftType)
):
"""Represents an `Employer
<https://docs.everyaction.com/reference/employers#common-models-15>`__.
"""
class EventType(
EAObjectWithIDAndName,
_prefix='eventType',
_keys={
'canBeRepeatable',
'canHaveGoals',
'canHaveMultipleLocations',
'canHaveMultipleShifts',
'canHaveRoleMaximums',
'canHaveRoleMinimums',
'color',
'defaultLocation',
'isAtLeastOneLocationRequired',
'isOnlineActionsAvailable',
'isSharedWithChildCommitteesByDefault',
'isSharedWithMasterCommitteeByDefault',
'roles',
},
statuses=EAProperty(is_array=True, factory=Status)
):
"""Represents an `Event Type
<https://docs.everyaction.com/reference/event-types#common-models-17>`__.
"""
class ExportJob(
EAObjectWithID,
_prefix='exportJob',
_prefixed={'guid'},
_keys={
'activistCodes',
'canvassFileRequestId',
'canvassFileRequestGuid',
'customFields',
'dateExpired',
'districtFields',
'downloadUrl',
'errorCode',
'savedListId',
'status',
'surveyQuestions',
'type',
'webhookUrl'
}
):
"""Represents an `Export Job
<https://docs.everyaction.com/reference/export-jobs#common-models-19>`__.
"""
class MappingTypeData(EAObjectWithName, _keys={'allowMultipleMode', 'displayName', 'fields', 'resourceTypes'}):
"""Represents data for an existing `bulk import mapping type
<https://docs.everyaction.com/reference/bulk-import#bulkimportmappingtypes>`__.
"""
class Registrant(EAObject, _keys={'alternateId', 'customProperties', 'person'}):
"""Represents a `Registrant
<https://docs.everyaction.com/reference/voter-registration-batches#voterregistrationbatchesbatchidpeople>`__
for a Voter Registration Batch.
"""
# --- Fifth Order Properties and Objects ---
EAProperty.share(
actions=EAProperty(singular_alias='action', factory=BulkImportAction),
eventType=EAProperty('type', factory=EventType)
)
class BulkImportJob(EAObject, _keys={'actions', 'description'}, file=EAProperty(factory=JobFile)):
"""Represents a `Bulk Import Job
<https://docs.everyaction.com/reference/bulk-import#bulkimportjobs>`__.
"""
class Event(
EAObjectWithIDAndName,
_prefix='event',
_keys={
'codes',
'createdDate',
'description',
'districtFieldValue',
'dotNetTimeZoneId',
'endDate',
'eventType',
'isActive',
'isOnlyEditableByCreatingUser',
'isPubliclyViewable',
'locations',
'roles',
'shifts',
'shortName',
'startDate',
'voterRegistrationBatches'
},
notes=EAProperty(singular_alias='note', factory=Note)
):
"""Represents an `Event
<https://docs.everyaction.com/reference/events#common-models-18>`__.
"""
# --- Sixth Order Properties and Objects ---
EAProperty.share(
event=EAProperty(factory=Event)
)
class Signup(
EAObjectWithID,
_prefix='eventSignup',
_keys={
'dateModified',
'endTimeOverride',
'event',
'isOfflineSignup',
'location',
'modifiedBy',
'notes',
'person',
'shift',
'startTimeOverride',
'supporterGroupId',
'role'
},
status=EAProperty(factory=Status)
):
"""Represents a `Signup
<https://docs.everyaction.com/reference/signups#common-models-33>`__.
""" | 1.992188 | 2 |
project-smokeRec/inference.py | pengxj/DeepLearningCourse | 1 | 12787005 | <reponame>pengxj/DeepLearningCourse
import torch
from torchvision import transforms
import numpy as np
from PIL import Image
from model import LeNet
img = Image.open('../data/smokedata/nn401.jpg')
img.show()
mytransform = transforms.Compose(
[transforms.Resize([32,32]),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
# -----------模型推理--------------
model = LeNet(n_class=2)
model.load_state_dict(torch.load('bestmodel.pt'))
img = mytransform(img)
img = img.unsqueeze(0) # torch要求是4D tensor NCHW
model.eval() # 一定要设置为测试模型,默认是训练模型得不到相应的结果
with torch.no_grad(): # 无梯度计算能够加速推理
result = model(img)[0]
classes = ['non', 'smoke']
ind = torch.argmax(result)
print(f'index: {ind},score:{result[ind]} {classes[ind]}') | 2.984375 | 3 |
render_ospray/sync.py | surfsara-visualization/blospray | 27 | 12787006 | # Based on intern/cycles/blender/blender_camera.cpp
from mathutils import Matrix
class BlenderCamera:
def __init__(self, b_render):
self.nearclip = 1e-5
self.type = 'CAMERA_PERSPECTIVE'
self.ortho_scale = 1.0
self.lens = 50.0
self.aperturesize = 0.0
self.apertureblades = 0
self.aperturerotation = 0.0
self.focaldistance = 10.0
self.shift = [0, 0]
self.offset = [0, 0]
self.zoom = 1.0
self.pixelaspect = [1.0, 1.0]
self.aperture_ratio = 1.0
self.sensor_fit = 'AUTO' # AUTO, HORIZONTAL, VERTICAL
self.sensor_width = 36.0
self.sensor_height = 24.0
self.full_width = int(b_render.resolution_x * b_render.resolution_percentage / 100)
self.full_height = int(b_render.resolution_y * b_render.resolution_percentage / 100)
# [left, right, bottom, top]
self.border = [0.0, 1.0, 0.0, 1.0]
self.viewport_camera_border = [0.0, 1.0, 0.0, 1.0]
#self.pano_viewplane
self.matrix = Matrix()
def modified(self, other):
# XXX
if self.type != other.type:
return True
if self.lens != other.lens:
return True
if self.full_width != other.full_width or self.full_height != other.full_height:
return True
if self.matrix != other.matrix:
return True
return False
def from_view(self, b_engine, b_scene, b_v3d, b_rv3d, width, height):
# b_engine is used in the b_ob branch (but not atm)
self.nearclip = b_v3d.clip_start
# clip_end
self.lens = b_v3d.lens
#self.shuttertime
if b_rv3d.view_perspective == 'CAMERA':
#ob = b_v3d.use_local_camera if b_v3d.camera else b_scene.camera
#if ob:
# self.from_object(b_engine, b_ob, skip_panorama)
# else:
# Magic zoom formula
zoom = b_rv3d.view_camera_zoom
zoom = 1.4142 + zoom / 50.0
zoom *= zoom
self.zoom = 2.0 / zoom
self.offset = b_rv3d.view_camera_offset
elif b_rv3d.view_perspective == 'ORTHO':
pass
self.zoom *= 2.0
self.matrix = b_rv3d.view_matrix.inverted()
def viewplane(self, width, height):
"""
Return viewplane, aspectratio, sensor_size
"""
xratio = 1.0 * width * self.pixelaspect[0]
yratio = 1.0 * height * self.pixelaspect[1]
if self.sensor_fit == 'AUTO':
horizontal_fit = xratio > yratio
sensor_size = self.sensor_width
elif self.sensor_fit == 'HORIZONTAL':
horizontal_fit = True
sensor_size = self.sensor_width
else:
horizontal_fit = False
sensor_size = self.sensor_height
if horizontal_fit:
aspectratio = xratio / yratio
xaspect = aspectratio
yaspect = 1.0
else:
aspectratio = yratio / xratio
xaspect = 1.0
yaspect = aspectratio
if self.type == 'CAMERA_ORTHOGRAPHIC':
xaspect = xaspect * self.ortho_scale / (aspectratio * 2.0)
yaspect = yaspect * self.ortho_scale / (aspectratio * 2.0)
aspectratio = self.ortho_scale / 2.0
if self.type == 'CAMERA_PANORAMA':
viewplane = None
else:
# CAMERA_PERSPECTIVE
# [left, right, bottom, top]
viewplane = [-xaspect, xaspect, -yaspect, yaspect]
# Zoom for 3D camera view
viewplane = list(map(lambda v: v*self.zoom, viewplane))
# Modify viewplane with camera shift and 3D camera view offset
dx = 2.0 * (aspectratio * self.shift[0] + self.offset[0] * xaspect * 2.0)
dy = 2.0 * (aspectratio * self.shift[1] + self.offset[1] * yaspect * 2.0)
viewplane[0] += dx
viewplane[1] += dx
viewplane[2] += dy
viewplane[3] += dy
return viewplane, aspectratio, sensor_size
def sync_view(b_scene, b_v3d, b_rv3d, width, height):
bcam = BlenderCamera(b_scene.render)
bcam.from_view(None, b_scene, b_v3d, b_rv3d, width, height)
#bcam.border
#bcam.sync()
return bcam
"""
def sync_camera(b_render, b_scene, width, height, viewname):
bcam = BlenderCamera()
bcam.pixelaspect = [b_render.pixel_aspect_x, b_render.pixel_aspect_y]
#bcam.shuttertime = b_render.motion_blur_shutter
if b_render.use_border:
bcam.border = [b_render.border_min_x, b_render.border_max_x, b_render.border_min_y, b_render.border_max_y]
b_ob = b_scene.camera
#if b_ob:
# blender_camera_from_object(b_cam, b_engine, b_ob)
# b_engine.camera_model_matrix(b_ob, bcam.use_spherical_stereo, b_ob_matrix);
# bcam.matrix = get_transform(b_ob_matrix);
blender_camera_sync(cam, bcam, width, height, viewname)
"""
| 2.28125 | 2 |
src/jgikbase/test/idmapping/core/user_lookup_test.py | jgi-kbase/IDMappingService | 0 | 12787007 | from unittest.mock import create_autospec
from jgikbase.idmapping.storage.id_mapping_storage import IDMappingStorage
from jgikbase.idmapping.core.user_lookup import LocalUserLookup, UserLookupSet, UserLookup
from jgikbase.idmapping.core.user import AuthsourceID, User, Username
from jgikbase.idmapping.core.tokens import Token, HashedToken
from jgikbase.test.idmapping.test_utils import assert_exception_correct
from pytest import raises
from jgikbase.test.idmapping.core.tokens_test import is_base64
import time
from jgikbase.idmapping.core.errors import NoSuchAuthsourceError
def test_set_init_fail():
handler = create_autospec(UserLookup, spec_set=True, instance=True)
fail_set_init(None, TypeError('user_lookup cannot be None'))
fail_set_init(set([handler, None]), TypeError('None item in user_lookup'))
def fail_set_init(handlers, expected):
with raises(Exception) as got:
UserLookupSet(handlers)
assert_exception_correct(got.value, expected)
def test_set_get_user_default_cache_ttl():
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer)
check_set_get_user_default_cache_ttl(hset, handler, timer, [0, 299, 300, 301])
def test_set_get_user_default_cache_ttl_set_ttl():
check_set_get_user_default_cache_ttl_set_ttl(100, [0, 99, 100, 101])
check_set_get_user_default_cache_ttl_set_ttl(500, [0, 499, 500, 501])
def check_set_get_user_default_cache_ttl_set_ttl(ttl, timervals):
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer, cache_user_expiration=ttl)
check_set_get_user_default_cache_ttl(hset, handler, timer, timervals)
def check_set_get_user_default_cache_ttl(hset, handler, timer, timervals):
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u')), False, None, None)
timer.return_value = timervals[0]
# user will not be in cache
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u')), False)
# user is now cached
handler.get_user.return_value = None # should cause error if called from now on
timer.return_value = timervals[1] # just below default cache time
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u')), False)
# now expire the user
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u')), True, None, None)
timer.return_value = timervals[2]
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u')), True)
# get the user again, should be cached.
handler.get_user.return_value = None # should cause error if called from now on
timer.return_value = timervals[3]
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u')), True)
assert handler.get_user.call_args_list == [((Token('t'),), {}), ((Token('t'),), {})]
def test_set_get_user_cache_max_count():
# testing the default of 10k is just silly, not going to bother.
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer, cache_max_size=2)
# add user 1
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u1')), False, None, None)
timer.return_value = 0
assert hset.get_user(AuthsourceID('as'), Token('t1')) == \
(User(AuthsourceID('as'), Username('u1')), False)
# add user 2
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u2')), True, None, None)
timer.return_value = 1
assert hset.get_user(AuthsourceID('as'), Token('t2')) == \
(User(AuthsourceID('as'), Username('u2')), True)
# add user 3, user 1 should now be evicted from the cache
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u3')), False, None, None)
timer.return_value = 2
assert hset.get_user(AuthsourceID('as'), Token('t3')) == \
(User(AuthsourceID('as'), Username('u3')), False)
# should only need a handler call for user 1 at this point
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u1')), True, None, None)
timer.return_value = 3
# get the 3 users. Get user 1 last otherwise it'll evict user 2 from the cache
assert hset.get_user(AuthsourceID('as'), Token('t2')) == \
(User(AuthsourceID('as'), Username('u2')), True)
assert hset.get_user(AuthsourceID('as'), Token('t3')) == \
(User(AuthsourceID('as'), Username('u3')), False)
assert hset.get_user(AuthsourceID('as'), Token('t1')) == \
(User(AuthsourceID('as'), Username('u1')), True)
# check that the calls to get_user are as expected:
assert handler.get_user.call_args_list == [((Token('t1'),), {}),
((Token('t2'),), {}),
((Token('t3'),), {}),
((Token('t1'),), {})]
def test_set_get_user_rel_ttl():
check_set_get_user_handler_ttl(None, 3, [100, 102, 103])
def test_set_get_user_epoch_ttl():
check_set_get_user_handler_ttl(1003, None, [1000, 1002, 1003])
def test_set_get_user_epoch_lt_rel_ttl():
# tests the case where both epoch and relative ttls are provided, but the epoch ttl is
# closer than the relative ttl.
check_set_get_user_handler_ttl(1003, 6, [1000, 1002, 1003])
def test_set_get_user_rel_lt_epoch_ttl():
# tests the case where both epoch and relative ttls are provided, but the relative ttl is
# closer than the epoch ttl.
check_set_get_user_handler_ttl(1007, 4, [1000, 1003, 1004])
def check_set_get_user_handler_ttl(epoch, rel, timervals):
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer)
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u1')), False, epoch, rel)
timer.return_value = timervals[0]
# cache user for X secs
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u1')), False)
# force an error if the handler is called
handler.get_user.return_value = None
timer.return_value = timervals[1]
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u1')), False)
# expect handler call at Y sec
handler.get_user.return_value = (User(AuthsourceID('as'), Username('u1')), True, epoch, rel)
timer.return_value = timervals[2]
assert hset.get_user(AuthsourceID('as'), Token('t')) == \
(User(AuthsourceID('as'), Username('u1')), True)
# check correct number of calls to get_user
assert handler.get_user.call_args_list == [((Token('t'),), {}), ((Token('t'),), {})]
def test_set_get_user_fail_None_input():
hset = UserLookupSet(set())
fail_set_get_user(hset, None, Token('t'), TypeError('authsource_id cannot be None'))
fail_set_get_user(hset, AuthsourceID('a'), None, TypeError('token cannot be None'))
def test_set_get_user_no_authsource():
handler = create_autospec(UserLookup, spec_set=True, instance=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
fail_set_get_user(UserLookupSet(set([handler])),
AuthsourceID('bs'),
Token('t'),
NoSuchAuthsourceError('bs'))
def fail_set_get_user(hset, authsource_id, token, expected):
with raises(Exception) as got:
hset.get_user(authsource_id, token)
assert_exception_correct(got.value, expected)
def test_set_is_valid_user_default_cache_ttl():
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer)
check_set_is_valid_user_default_cache_ttl(hset, handler, timer, [0, 3599, 3600, 3601])
def test_set_is_valid_user_default_cache_ttl_set_ttl():
check_set_is_valid_user_default_cache_ttl_set_ttl(100, [0, 99, 100, 101])
check_set_is_valid_user_default_cache_ttl_set_ttl(10000, [0, 9999, 10000, 10001])
def check_set_is_valid_user_default_cache_ttl_set_ttl(ttl, timervals):
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer, cache_is_valid_expiration=ttl)
check_set_is_valid_user_default_cache_ttl(hset, handler, timer, timervals)
def check_set_is_valid_user_default_cache_ttl(hset, handler, timer, timervals):
handler.is_valid_user.return_value = (True, None, None)
timer.return_value = timervals[0]
# user will not be in cache
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is True
# user is now cached
handler.is_valid_user.return_value = None # should cause error if called from now on
timer.return_value = timervals[1] # just below default cache time
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is True
# now expire the user
handler.is_valid_user.return_value = (True, None, None)
timer.return_value = timervals[2]
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is True
# get the user again, should be cached
handler.is_valid_user.return_value = None # should cause error if called from now on
timer.return_value = timervals[3]
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is True
assert handler.is_valid_user.call_args_list == [((Username('u'),), {}), ((Username('u'),), {})]
def test_set_is_valid_user_invalid_user():
# invalid users shouldn't get cached.
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer)
handler.is_valid_user.return_value = (False, None, None)
timer.return_value = 0
# user will not be in cache
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is False
# would normally expect a cache time of 3600s, but should not be cached here.
timer.return_value = 10
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u'))) is False
assert handler.is_valid_user.call_args_list == [((Username('u'),), {}), ((Username('u'),), {})]
def test_set_is_valid_user_cache_max_count():
# testing the default of 10k is just silly, not going to bother.
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer, cache_max_size=2)
# add user 1
handler.is_valid_user.return_value = (True, None, None)
timer.return_value = 0
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u1'))) is True
# add user 2. Don't need another return value for is_valid_user, has to be True to cache
timer.return_value = 1
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u2'))) is True
# add user 3, user 1 should now be evicted from the cache
timer.return_value = 2
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u3'))) is True
# force an assert fail if is_valid_user is called early:
handler.is_valid_user.return_value = (False, None, None)
timer.return_value = 3
# get the 3 users. Get user 1 last otherwise it'll evict user 2 from the cache
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u2'))) is True
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u3'))) is True
# get user 1
handler.is_valid_user.return_value = (True, None, None)
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u1'))) is True
# check that the calls to is_valid_user are as expected:
assert handler.is_valid_user.call_args_list == [((Username('u1'),), {}),
((Username('u2'),), {}),
((Username('u3'),), {}),
((Username('u1'),), {})]
def test_set_is_valid_user_rel_ttl():
check_set_is_valid_user_handler_ttl(None, 3, [100, 102, 103])
def test_set_is_valid_user_epoch_ttl():
check_set_is_valid_user_handler_ttl(1003, None, [1000, 1002, 1003])
def test_set_is_valid_user_epoch_lt_rel_ttl():
# tests the case where both epoch and relative ttls are provided, but the epoch ttl is
# closer than the relative ttl.
check_set_is_valid_user_handler_ttl(1003, 6, [1000, 1002, 1003])
def test_set_is_valid_user_rel_lt_epoch_ttl():
# tests the case where both epoch and relative ttls are provided, but the relative ttl is
# closer than the epoch ttl.
check_set_is_valid_user_handler_ttl(1007, 4, [1000, 1003, 1004])
def check_set_is_valid_user_handler_ttl(epoch, rel, timervals):
handler = create_autospec(UserLookup, spec_set=True, instance=True)
timer = create_autospec(time.time, spec_set=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
hset = UserLookupSet(set([handler]), timer)
handler.is_valid_user.return_value = (True, epoch, rel)
timer.return_value = timervals[0]
# cache user for X secs
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u1'))) is True
# force an error if the handler is called
handler.is_valid_user.return_value = None
timer.return_value = timervals[1]
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u1'))) is True
# expect handler call at Y sec
handler.is_valid_user.return_value = (True, epoch, rel)
timer.return_value = timervals[2]
assert hset.is_valid_user(User(AuthsourceID('as'), Username('u1'))) is True
# check correct number of calls to get_user
assert handler.is_valid_user.call_args_list == [((Username('u1'),), {}),
((Username('u1'),), {})]
def test_set_is_valid_user_None_inputs():
hset = UserLookupSet(set())
fail_set_is_valid_user(hset, None, TypeError('user cannot be None'))
def test_set_is_valid_user_no_authsource():
handler = create_autospec(UserLookup, spec_set=True, instance=True)
handler.get_authsource_id.return_value = AuthsourceID('as')
fail_set_is_valid_user(UserLookupSet(set([handler])),
User(AuthsourceID('bs'), Username('n')),
NoSuchAuthsourceError('bs'))
def fail_set_is_valid_user(hset, user, expected):
with raises(Exception) as got:
hset.is_valid_user(user)
assert_exception_correct(got.value, expected)
def test_local_init_fail():
with raises(Exception) as got:
LocalUserLookup(None)
assert_exception_correct(got.value, TypeError('storage cannot be None'))
def test_local_get_authsource():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
assert LocalUserLookup(storage).get_authsource_id() == AuthsourceID('local')
def test_local_get_user_admin():
check_local_get_user_admin(True)
check_local_get_user_admin(False)
def check_local_get_user_admin(isadmin):
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
storage.get_user.return_value = (Username('bar'), isadmin)
assert LocalUserLookup(storage).get_user(Token('foo')) == \
(User(AuthsourceID('local'), Username('bar')), isadmin, None, 300)
thash = '2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae'
assert storage.get_user.call_args_list == [((HashedToken(thash),), {})]
def test_local_get_user_fail():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
with raises(Exception) as got:
LocalUserLookup(storage).get_user(None)
assert_exception_correct(got.value, TypeError('token cannot be None'))
def test_local_is_valid_user():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
storage.user_exists.return_value = True
luh = LocalUserLookup(storage)
assert luh.is_valid_user(Username('foo')) == (True, None, 3600)
storage.user_exists.return_value = False
assert luh.is_valid_user(Username('bar')) == (False, None, 3600)
assert storage.user_exists.call_args_list == [
((Username('foo'),), {}),
((Username('bar'),), {})]
def test_local_is_valid_user_fail():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
with raises(Exception) as got:
LocalUserLookup(storage).is_valid_user(None)
assert_exception_correct(got.value, TypeError('username cannot be None'))
def test_local_create_user():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
t = LocalUserLookup(storage).create_user(Username('foo'))
assert is_base64(t.token) is True
assert len(t.token) is 28
assert storage.create_local_user.call_args_list == \
[((Username('foo'), t.get_hashed_token()), {})]
def test_local_create_user_fail():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
with raises(Exception) as got:
LocalUserLookup(storage).create_user(None)
assert_exception_correct(got.value, TypeError('username cannot be None'))
def test_local_new_token():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
t = LocalUserLookup(storage).new_token(Username('bar'))
assert is_base64(t.token) is True
assert len(t.token) is 28
assert storage.update_local_user_token.call_args_list == \
[((Username('bar'), t.get_hashed_token()), {})]
def test_local_new_token_fail():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
with raises(Exception) as got:
LocalUserLookup(storage).new_token(None)
assert_exception_correct(got.value, TypeError('username cannot be None'))
def test_local_set_user_as_admin():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
LocalUserLookup(storage).set_user_as_admin(Username('n'), True)
LocalUserLookup(storage).set_user_as_admin(Username('r'), False)
assert storage.set_local_user_as_admin.call_args_list == [((Username('n'), True), {}),
((Username('r'), False), {})]
def test_local_set_user_as_admin_fail():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
with raises(Exception) as got:
LocalUserLookup(storage).set_user_as_admin(None, True)
assert_exception_correct(got.value, TypeError('username cannot be None'))
def test_local_get_users():
storage = create_autospec(IDMappingStorage, spec_set=True, instance=True)
storage.get_users.return_value = {Username('foo'): False, Username('bar'): True}
assert LocalUserLookup(storage).get_users() == {Username('foo'): False,
Username('bar'): True}
assert storage.get_users.call_args_list == [((), {})]
| 2.015625 | 2 |
pubcode/__init__.py | Venti-/pubcode | 4 | 12787008 | <reponame>Venti-/pubcode<filename>pubcode/__init__.py
"""A simple module for creating barcodes.
"""
__version__ = '1.1.0'
from .code128 import Code128
| 1.34375 | 1 |
todolist_app/migrations/0001_initial.py | Russel777/todolist | 0 | 12787009 | <filename>todolist_app/migrations/0001_initial.py
# Generated by Django 2.1.2 on 2018-11-04 17:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('pub_date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date published')),
('priority', models.PositiveSmallIntegerField(choices=[(0, 'Low'), (1, 'Medium'), (2, 'High')], default=0)),
('status', models.PositiveSmallIntegerField(choices=[(0, 'Open'), (1, 'In progress'), (2, 'Done')], default=0)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.625 | 2 |
sqlalchemy_ag_grid/__init__.py | ytkj/sqlalchemy-ag-grid | 5 | 12787010 | from .query import SortFilterQuery
| 1.039063 | 1 |
tiny_template_engine.py | zizon/TinyTemplate | 0 | 12787011 | <filename>tiny_template_engine.py
#!/usr/bin/env python
# -*- coding: utf8 -*-
import codecs
import xml.sax
import json
import copy
import logging
import math
import atexit
import tarfile
from datetime import datetime
from xml.sax.handler import ContentHandler
class IO(object):
def __init__(self):
self.defers = []
atexit.register(lambda defers:map(lambda x:x(),defers),self.defers)
def read(self,name,encoding=u'utf8'):
file = codecs.open(name,u'r',encoding)
content = file.read()
file.close()
return content
def tempdir(self):
# temp dir
dir = None
# try shmfs
shmfs = u'/dev/shm'
if os.path.exists(shmfs):
dir = tempfile.mkdtemp(dir=shmfs)
else:
dir = tempfile.mkdtemp()
# defer cleanup
self.defers.append(
(lambda name:
lambda :shutil.rmtree(name))
(dir)
)
return dir
def snapshot(self,content,name=None,encoding=u'utf8',compress=False):
dir = self.tempdir()
# make file
file_name = None
if name is not None:
file_name = name
else:
file_name = str(content.__hash__())
# final path
full_path = os.path.join(dir,file_name)
# do write
temp_file = codecs.open(full_path,u'w',encoding)
temp_file.write(content)
temp_file.close()
if compress:
compress_path = os.path.join(dir,u'%s.tar.bz2' % file_name)
compress_out = tarfile.open(compress_path,u'w:bz2')
compress_out.add(full_path,file_name)
compress_out.close()
full_path = compress_path
return full_path
class Node(object):
def __init__(self,name=u'__root__',parent=None):
self.node = {
u'__name__':name,
u'__content__':[],
u'__attrs__':{},
u'__parent__': parent,
u'__children__':[],
}
def clone(self):
# a shadow copy first
copyed = Node(self[u'__name__'],self[u'__parent__'])
# copy content
copyed[u'__content__'] = list(self[u'__content__'])
# copy attrs
copyed[u'__attrs__'] = dict(self[u'__attrs__'])
# copy children
copyed[u'__children__'] = map(lambda child:child.clone(),self[u'__children__'])
# fix parent
for child in copyed[u'__children__']:
child[u'__parent__'] = copyed
return copyed
def __str__(self):
return self.node.__str__()
def __getitem__(self,name):
return self.node[name]
def __setitem__(self,name,value):
self.node[name] = value
return
def __delitem__(self,name):
del self.node[name]
return
class TinyStyleEngine(object):
def __init__(self,name):
self.styles = json.loads(IO().read(name))
def style(self,name):
return self.styles.get(name,{})
def apply(self,node):
# duplicate
node_name = node[u'__name__']
styles = {}
# if any elemnt level style?
styles.update(self.style(node_name))
# if any class specified?
attrs = node[u'__attrs__']
if u'class' in attrs:
for class_name in filter(lambda x:len(x) > 0 ,attrs[u'class'].split(u' ')):
styles.update(self.style(u'.%s' % class_name))
del attrs[u'class']
# filter emtpy
if u'' in styles:
del styles[u'']
if len(styles) > 0:
# had style prestend?
if u'style' in attrs:
# reconstruct style
for single_style in [ each.strip() for each in attrs['style'].split(u';')]:
single_style = single_style.split(u':')
style_name,style_value = single_style[0].strip(),u':'.join(single_style[1:]).strip()
if len(style_name) > 0:
styles[style_name] = style_value
# build style string
attrs[u'style'] = u''.join([ u'%s:%s;' % (key,value) for key,value in styles.items()])
return node
def decorate(self,root):
root = self.apply(root)
for node in root[u'__children__']:
self.apply(node)
node[u'__children__'] = map(lambda x:self.decorate(x),node[u'__children__'])
return root
class TinyTemplateEngine(ContentHandler):
def __init__(self,template):
xml.sax.parseString(template,self)
def startDocument(self):
# document root dom nodes
self.root = Node()
# current dom node infos
self.current_node = self.root
def startElement(self,name,attrs):
# current container
parent = self.current_node
node = Node(name,parent)
# attach to parent
parent[u'__children__'].append(node)
# parent has content?
parent[u'__content__'].append(u'__node__')
# attach attributes
node_attrs = node[u'__attrs__']
for attr in attrs.getNames():
node_attrs[attr] = attrs.getValue(attr)
# update current node
self.current_node = node
def endElement(self,name):
# update current node
parent = self.current_node[u'__parent__']
if parent is None:
# a root node
self.root[u'__children__'].append(self.current_node)
self.current_node = None
else:
self.current_node = parent
def characters(self,content):
if self.current_node is None:
# no node associate with,drop it
return
self.current_node[u'__content__'].append(content)
class TinyRender(object):
def __init__(self):
self._driver = TinyDataDriver()
def driver(self,driver):
self._driver = driver
return self
def define(self,template):
self.root = TinyTemplateEngine(IO().read(template)).root
return self
def bind(self,binding):
self._driver.evaluate(self.root,binding)
return self
def render(self,style=None):
if style is not None:
self.root = TinyStyleEngine(style).decorate(self.root)
return self.render_node(self.root)
def render_node(self,node):
name = node[u'__name__']
# special case for root node
if name == u'__root__':
return u''.join(map(lambda x:self.render_node(x),node[u'__children__']))
# now ,all node has a not none parent
# build attrs
attrs =u' '.join([ u"%s='%s'" % (key,value) for key,value in node[u'__attrs__'].items()])
# build content
# be care about node content
content = []
children = node[u'__children__']
node_index = 0
# for pretify reason,insert \n when meeting congigous __node__ content
meet_node = False
indention = None
for part in node[u'__content__']:
if part != u'__node__':
meet_node = False
content.append(part)
else:
if meet_node:
# got the right indention,do nothing
content.append(indention)
else:
# find indention
space = 0
done = False
# backtrack the content to find idention
for content_index in range(len(content)-1,-1,-1):
current = content[content_index]
for char in range(len(current)-1,-1,-1):
char = current[char]
if char == u'\n' or char == u'>':
done = True
break
elif char == u' ':
space = space + 1
else:
# consider a intended inline,give up indention pretify
done = True
break
raise Exception(u'should be space or carier return,context:%s' % current)
if done:
break
indention = u'\n%s' % u''.join([u' ' for i in range(space)])
meet_node = True
# special process for node
content.append(self.render_node(children[node_index]))
node_index = node_index + 1
content = ''.join(content)
return u'<%s %s>%s</%s>' % (
name,
attrs,
content,
name,
)
class TinyDataDriver(object):
def __init__(self):
self.binding = {}
magic_prefix = u'_eval_'
self.evaluator = {}
for attr in dir(self):
# magic find
if attr.startswith(magic_prefix):
self.evaluator[attr[len(magic_prefix):].replace(u'_',u'-')] = getattr(self,attr)
def bind(self,name,value):
self.binding[name] = value
def priority_attrs(self,attrs):
# copy
attrs = dict(attrs)
# priority process order
priority = []
if u'tiny-repeat' in attrs:
priority.append(u'tiny-repeat')
del attrs[u'tiny-repeat']
return priority + attrs.keys()
def evaluate(self,node,binding=None):
if node[u'__name__'] == u'__root__':
map(lambda x:self.evaluate_node(x,binding),node[u'__children__'])
else:
raise Exception(u'not a root node,evaluate illege')
def evaluate_node(self,node,binding=None):
# node should had parent
if binding is not None:
self.binding.update(binding)
binding = self.binding
# save parent
parent = node[u'__parent__']
brothers = parent[u'__children__']
contents = parent[u'__content__']
name = node[u'__name__']
# find brother index
brother_match = -1
for i,brother in enumerate(brothers):
if brother == node :
brother_match = i
break
if brother_match == -1:
raise Exception(u'no match node in parent, illege evaluate')
# find content index
content_match = -1
content_meet = 0
for i,content in enumerate(contents):
if content == u'__node__':
content_meet = content_meet + 1
if content_meet == brother_match+1:
content_match = i
break
if content_match == -1:
raise Exception(u'no match content in parent for node content, illege evaluate')
def replace_in_parent(content_match,brother_match,nodes):
for i,node in enumerate(nodes):
brothers.insert( i + brother_match,node )
contents.insert( i + content_match,u'__node__' )
# remove original
total_nodes = len(nodes)
brothers.pop(total_nodes+brother_match)
contents.pop(total_nodes+content_match)
# evaluated container
nodes = [node]
# find evalutior for name
evaluator = self.evaluator.get(name,None)
if evaluator is not None:
nodes = evaluator(node,binding)
# replace
replace_in_parent(content_match,brother_match,nodes)
# now,new nodes are associalted with main tree
# mark node numbers
total_nodes = len(nodes)
# index trackers
# as attrs may generate more nodes also
content_index_tracker = content_match
brother_index_tracker = brother_match
# deal with attrs
for i,node in enumerate(nodes):
# evaluate attr
attrs = node[u'__attrs__']
# new nodes may be generated by attr evaluator,
# defer it.
# or it will have trouble with tree organization
for attr in self.priority_attrs(attrs):
evaluator = self.evaluator.get(attr,None)
if evaluator is not None:
# evaluate
evaluated = evaluator(node,binding)
# replace `this` node
# attach to main tree
replace_in_parent(content_index_tracker,brother_index_tracker,evaluated)
# delegate evalution of new evaluated nodes
map(lambda x:self.evaluate_node(x,binding),evaluated)
# hand out control already
# stop processing
return
# here,means node not changed in main tree,
# process children
for child in node[u'__children__']:
self.evaluate_node(child,binding)
def _eval_tiny_repeat(self,node,binding):
attrs = node[u'__attrs__']
times = eval(attrs[u'tiny-repeat'],binding)
index_name = attrs[u'tiny-repeat-index']
# clear instrument
del attrs[u'tiny-repeat']
del attrs[u'tiny-repeat-index']
# node parent
parent = node[u'__parent__']
# expand content
repeated = []
# reuse bindng context
conflict = None
if index_name in binding:
conflict = binding[index_name]
# generate
for i in range(times):
# bind index value
binding[index_name] = i
# DO copy
# take care of recursive bind
copyed = node.clone()
# node not in parents acctualy,
# so a direct evaluate_node will fail.
# make a isolated container for this node,
# then evalute/evaluate_node will work as expected.
# this is a little wired.
psuedo_root = Node()
psuedo_root[u'__children__'].append(copyed)
psuedo_root[u'__content__'].append(u'__node__')
copyed[u'__parent__'] = psuedo_root
self.evaluate(psuedo_root,binding)
# node is evaluated
# reaper nodes
# re-associate parent
for child in psuedo_root[u'__children__']:
child[u'__parent__'] = parent
repeated.extend(psuedo_root[u'__children__'])
# recover conflict
if conflict is not None:
binding[index_name] = conflict
return repeated
def _eval_tiny_number(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-number'],binding))
# clear instrument
del attrs[u'tiny-number']
if u'tiny-force-integer' in attrs:
# froce integer
del attrs[u'tiny-force-integer']
if not math.isnan(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,}'.format(0))
else:
# fill content
if math.isnan(value):
node[u'__content__'].append(u'N/A')
elif value == int(value):
node[u'__content__'].append(u'{:,}'.format(int(value)))
else:
node[u'__content__'].append(u'{:,.2f}'.format(value))
if u'tiny-color' in attrs and not math.isnan(value):
del attrs[u'tiny-color']
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
return [node]
def _eval_tiny_percent(self,node,binding):
attrs = node[u'__attrs__']
# evaluate
value = float(eval(attrs[u'tiny-percent'],binding))
# clear instrument
del attrs[u'tiny-percent']
if not math.isnan(value):
if u'tiny-precision' in attrs:
format = u'{:,.%s%%}' % eval(attrs[u'tiny-precision'],binding)
node[u'__content__'].append(format.format(value))
else:
node[u'__content__'].append(u'{:,.2%}'.format(value))
else:
node[u'__content__'].append(u'N/A')
if u'tiny-default-color' not in attrs:
css = u''
# add class
if u'class' in attrs:
css = attrs[u'class']
if value > 0:
attrs[u'class'] = u'%s tiny-positive-number' % css
elif value < 0:
attrs[u'class'] = u'%s tiny-negetive-number' % css
else:
del attrs[u'tiny-default-color']
return [node]
def _eval_tiny_data(self,node,binding):
attrs = node[u'__attrs__']
node[u'__content__'].append(u'%s' % eval(attrs[u'tiny-data'],binding))
# clear instrument
del attrs[u'tiny-data']
return [node]
def _eval_tiny_color_group(self,node,binding):
attrs = node[u'__attrs__']
css = u'tiny-color-group-%s' % eval(attrs[u'tiny-color-group'],binding)
# attach css
if u'class' in attrs:
attrs[u'class'] = u'%s %s' % (attrs[u'class'],css)
else:
attrs[u'class'] = css
# clear instrument
del attrs[u'tiny-color-group']
return [node]
| 2.3125 | 2 |
env/lib/python3.6/site-packages/chargebee/version.py | Mohitkaushal97/File | 0 | 12787012 | VERSION = '2.7.4'
| 1.101563 | 1 |
consoleme/handlers/v1/roles.py | yi2020/consoleme | 0 | 12787013 | <filename>consoleme/handlers/v1/roles.py
import ujson as json
from consoleme.config import config
from consoleme.handlers.base import BaseMtlsHandler
from consoleme.lib.crypto import Crypto
from consoleme.lib.plugins import get_plugin_by_name
stats = get_plugin_by_name(config.get("plugins.metrics"))()
log = config.get_logger()
crypto = Crypto()
auth = get_plugin_by_name(config.get("plugins.auth"))()
class GetRolesHandler(BaseMtlsHandler):
"""consoleme CLI role handler. Pass ?all=true to URL query to return all roles."""
def check_xsrf_cookie(self):
pass
def initialize(self):
self.user: str = None
self.eligible_roles: list = []
async def get(self):
"""
/api/v1/get_roles - Endpoint used to get list of roles. Used by weep and newt.
---
get:
description: Presents json-encoded list of eligible roles for the user.
responses:
200:
description: Present user with list of eligible roles.
403:
description: User has failed authn/authz.
"""
self.user: str = self.requester["email"]
include_all_roles = self.get_arguments("all")
console_only = True
if include_all_roles == ["true"]:
console_only = False
log_data = {
"function": "GetRolesHandler.get",
"user": self.user,
"console_only": console_only,
"message": "Writing all eligible user roles",
"user-agent": self.request.headers.get("User-Agent"),
"request_id": self.request_uuid,
}
log.debug(log_data)
stats.count("GetRolesHandler.get", tags={"user": self.user})
await self.authorization_flow(user=self.user, console_only=console_only)
self.write(json.dumps(sorted(self.eligible_roles)))
self.set_header("Content-Type", "application/json")
await self.finish()
| 2.234375 | 2 |
autopy/core/data/Find.py | songofhawk/autopy | 0 | 12787014 | <reponame>songofhawk/autopy
from __future__ import annotations
import time
from autopy.core.action import ActionMouse
from autopy.core.data.Action import Execution, Action
from autopy.core.detection.ImageDetection import ImageDetection
from autopy.core.detection.OcrDetection import OcrDetection
from autopy.core.detection.ColorDetection import ColorDetection
from autopy.core.detection.WindowDetection import WindowDetection
from autopy.core.share import list_util
class Scroll:
"""
在查找(Find)过程中的滚动配置
Attributes:
one_page (int): 每页滚动的距离,单位是虚拟像素(根据屏幕分辨率可能有缩放)
page_count (int): 滚动页数
find_mode (str): 是否要在滚动的过程中,找出所有结果,缺省为"Any";
如果为"All",表示要完成所有滚动,并且在每一页执行detection,保存检测结果;
如果为"Any",则只要有一页检测通过,就不再滚动了
"""
one_page: int #
page_count: int #
find_mode: str = "Any"
class Find:
"""
用于查找的基础配置,可以有不同的查找模式,在State节点中,它如果是check属性,则不保存查找结果,如果是find属性,则把查找结果,临时存入find_result
Attributes:
image (ImageDetection) : 图像检测,在当前页面中找指定图像片段,不一定要完全一致,可以指定相似度
ocr (OcrDetection) : 文本检测,在当前页面的指定位置做OCR识别,然后查看是否有指定的文本
color (ColorDetection) : 颜色检测,在当前页面的指定像素位置,查看是否符合定义的颜色
window (WindowDetection) : 窗口检测,在当前页面查找指定title或者name的窗口
scroll (Scroll) : 查找的时候,如果没找到,就滚动当前窗口,继续查找
fail_action (Execution) : 如果什么没有找到,需要执行的操作
result_name (str): 给检测结果一个变量名
"""
image: ImageDetection
ocr: OcrDetection
color: ColorDetection
window: WindowDetection
scroll: Scroll
fail_action: Execution
result_name: None
find_mode: str = "All"
def do(self, do_fail_action):
results = []
found_any = False
found_all = True
if self.image is not None:
res = self._do_once(self.image, do_fail_action)
found_any = found_any or (res is not None)
found_all = found_all and (res is not None)
list_util.append_to(results,res)
if self.ocr is not None:
res = self._do_once(self.ocr, do_fail_action)
found_any = found_any or (res is not None)
found_all = found_all and (res is not None)
list_util.append_to(results,res)
if self.color is not None:
res = self._do_once(self.color, do_fail_action)
found_any = found_any or (res is not None)
found_all = found_all and (res is not None)
list_util.append_to(results,res)
if self.window is not None:
res = self._do_once(self.window, do_fail_action)
found_any = found_any or (res is not None)
found_all = found_all and (res is not None)
list_util.append_to(results,res)
if self.find_mode == "All" and found_all:
if self.result_name is not None:
Action.save_call_env({self.result_name: results})
return results
elif self.find_mode == "Any" and found_any:
if self.result_name is not None:
Action.save_call_env({self.result_name: results})
return results
else:
return None
def _do_once(self, detection, do_fail_action):
if detection is None:
return None
detect_res = None
results = []
page = 0
if self.scroll is not None:
# 有滚动的话,就按滚动页数执行循环
count = self.scroll.page_count
find_all = (self.scroll.find_mode == "All")
else:
# 没有滚动的话,就只执行一次
count = 1
find_all = True
while page < count and (
(not find_all and detect_res is None)
or
find_all
):
# 如果滚动的时候,找到即返回,那么就检查detect_res是否为None
# 如果滚动到指定页数,返回所有找到的结果,那么就不用检查detect_res了
detect_res = detection.do()
list_util.append_to(list, detect_res)
page += 1
if self.scroll:
time.sleep(1)
# print('before scroll {}'.format(self.scroll.one_page))
ActionMouse.scroll(self.scroll.one_page)
# print('-- after scroll')
size = len(results)
if size == 0:
if do_fail_action:
Action.call(self.fail_action)
return None
elif size == 1:
return results[0]
else:
return results
def flow_control_fail_action(self):
# 找到当前find节点中的影响流程运行的fail_action并返回
# 一个find节点(即使是list组成的节点)中,只能有一个影响流程运行的fail_action
fail_action = self.fail_action
if fail_action is None:
return None
if isinstance(fail_action, Action) and fail_action.is_flow_control:
return fail_action
if isinstance(fail_action, list):
for one_action in fail_action:
if one_action.is_flow_control:
return one_action
return None
| 2.109375 | 2 |
polling_stations/apps/data_collection/management/commands/import_west_dorset.py | chris48s/UK-Polling-Stations | 0 | 12787015 | <reponame>chris48s/UK-Polling-Stations<filename>polling_stations/apps/data_collection/management/commands/import_west_dorset.py
from data_collection.management.commands import BaseXpressDCCsvInconsistentPostcodesImporter
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = 'E07000052'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WDDC.TSV'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
station_postcode_search_fields = [
'polling_place_postcode',
'polling_place_address_4',
'polling_place_address_3',
'polling_place_address_2',
'polling_place_address_1',
]
| 1.734375 | 2 |
uwu_links/links.py | simonfong6/uwu-links | 1 | 12787016 | """Map of short links to the full urls."""
from google.cloud import firestore
from google.cloud.firestore import Increment
LINKS_COLLECTION_NAME = u'links'
TOTAL_VISITS_COLLECTION_NAME = u'total_visits'
URL_KEY = u'url'
VISIT_COUNT_KEY = u'visit_count'
COUNT_KEY = u'count'
class Links:
def __init__(self):
self.db = firestore.Client()
self.links = self.db.collection(LINKS_COLLECTION_NAME)
self.total_visits = self.db.collection(TOTAL_VISITS_COLLECTION_NAME).document('visits')
def has(self, keyword):
doc_ref = self.links.document(keyword)
doc = doc_ref.get()
return doc.exists
def insert(self, keyword, url):
doc_ref = self.links.document(keyword)
data = {URL_KEY: url}
doc_ref.set(data)
def get(self, keyword):
if not self.has(keyword):
return None
doc_ref = self.links.document(keyword)
doc = doc_ref.get()
doc_dict = doc.to_dict()
url = doc_dict[URL_KEY]
self.increment(keyword)
return url
def increment(self, keyword):
doc_ref = self.links.document(keyword)
doc_ref.update({VISIT_COUNT_KEY: Increment(1)})
self.increment_total_visits()
def increment_total_visits(self):
total_visits = self.total_visits.get()
if not total_visits.exists:
self.total_visits.set({COUNT_KEY: 0})
self.total_visits.update({COUNT_KEY: Increment(1)})
def get_all_links(self):
"""Fetch all links from database."""
link_dicts = []
links = self.links.stream()
for link in links:
link_dict = link.to_dict()
link_dict['key'] = link.id
link_dicts.append(link_dict)
return link_dicts
| 3.125 | 3 |
yesterday/config/settings/dev.py | imkevinxu/yesterday | 3 | 12787017 | <reponame>imkevinxu/yesterday
"""Development settings and globals."""
from __future__ import absolute_import
from .base import *
import os
########## DEBUG CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = True
# https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG CONFIGURATION
########## DATABASE CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': '%s-dev' % PROJECT_NAME.lower(),
'USER': '',
'PASSWORD': '',
'HOST': 'localhost',
'PORT': '5432',
}
}
########## END DATABASE CONFIGURATION
########## EMAIL CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
########## END EMAIL CONFIGURATION
########## CACHE CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#caches
CACHE_ENGINES = {
'redis': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'localhost:6379:0',
},
'dummy': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
CACHES = {
'redis': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'localhost:6379:0',
}
}
CACHES['default'] = CACHE_ENGINES[os.getenv('CACHE', 'dummy')]
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
########## END CACHE CONFIGURATION
########## REDIS QUEUE CONFIGURATION
# https://github.com/ui/django-rq#support-for-django-redis-and-django-redis-cache
RQ_QUEUES = {
'default': {
'USE_REDIS_CACHE': 'redis'
},
'high': {
'USE_REDIS_CACHE': 'redis'
},
'low': {
'USE_REDIS_CACHE': 'redis'
}
}
RQ_SHOW_ADMIN_LINK = True
########## END REDIS QUEUE CONFIGURATION
########## LOGGING CONFIGURATION
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
LOGGERS = {
# Log requests locally without [INFO] tag
'werkzeug': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False,
},
# Log queue workers to console and file on development
'rq.worker': {
'handlers': ['default', 'file_log'],
'level': 'DEBUG',
'propagate': False,
},
}
LOGGING['loggers'].update(LOGGERS)
########## END LOGGING CONFIGURATION
########## TOOLBAR CONFIGURATION
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html#explicit-setup
INSTALLED_APPS += (
'debug_toolbar',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
# http://django-debug-toolbar.readthedocs.org/en/latest/installation.html
INTERNAL_IPS = ('127.0.0.1',)
########## END TOOLBAR CONFIGURATION
########## SSL SERVER CONFIGURATION
# https://github.com/teddziuba/django-sslserver#getting-started
INSTALLED_APPS += (
'sslserver',
)
########## END SSL SERVER CONFIGURATION
| 1.726563 | 2 |
滤波器.py | 990676/990676 | 0 | 12787018 | >>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> b, a = signal.butter(4, 100, 'low', analog=True)
>>> w, h = signal.freqs(b, a)
>>> plt.semilogx(w, 20 * np.log10(abs(h)))
>>> plt.title('Butterworth filter frequency response')
>>> plt.xlabel('Frequency [radians / second]')
>>> plt.ylabel('Amplitude [dB]')
>>> plt.margins(0, 0.1)
>>> plt.grid(which='both', axis='both')
>>> plt.axvline(100, color='green') # cutoff frequency
>>> plt.show() | 3.375 | 3 |
examples/Plot_FibonacciLines.py | Physicworld/pyjuque | 343 | 12787019 | import os
import sys
curr_path = os.path.abspath(__file__)
root_path = os.path.abspath(
os.path.join(curr_path, os.path.pardir, os.path.pardir))
sys.path.append(root_path)
from pyjuque.Exchanges.CcxtExchange import CcxtExchange
from pyjuque.Plotting import PlotData
import plotly.graph_objs as go
def horizontal_line(start_time, end_time, value, color=None):
return go.layout.Shape(
type="line",
x0=start_time,
y0=value,
x1=end_time,
y1=value,
line=dict(color=color)
)
def Main():
exchange = CcxtExchange('binance')
symbol = "BTC/USDT"
interval = "4h"
df = exchange.getOHLCVHistory(symbol, interval, 8000)
start_time = df['time'][0]
end_time = df['time'][len(df)-1]
price_min = df['close'].min()
price_max = df['close'].max()
diff = price_max - price_min
level1 = price_max - 0.236 * diff
level2 = price_max - 0.382 * diff
level3 = price_max - 0.618 * diff
lines = []
lines.append(horizontal_line(
start_time, end_time, price_max,
color="rgba(255, 0, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level1,
color="rgba(255, 255, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level2,
color="rgba(0, 255, 0, 255)"))
lines.append(horizontal_line(
start_time, end_time, level3,
color="rgba(0, 255, 255, 255)"))
lines.append(horizontal_line(
start_time, end_time, price_min,
color="rgba(0, 0, 255, 255)"))
PlotData(df,
add_candles=False,
plot_shapes=lines,
plot_title="fib_levels_"+symbol.replace('/', '').lower() + "_" + interval,
show_plot=True)
if __name__ == '__main__':
Main() | 2.46875 | 2 |
Proyect/api/views.py | JZ1999/RandomProyect | 0 | 12787020 | <reponame>JZ1999/RandomProyect<filename>Proyect/api/views.py
from django.http import JsonResponse, HttpResponseBadRequest
from rest_framework import generics
from .models import Game
from .serializers import GameSerializer
from .forms import GameForm
class ListGamesView(generics.ListAPIView):
queryset = Game.objects.all()
serializer_class = GameSerializer
def post(self, request):
#print(request.data)
form = GameForm(request.POST)
if form.is_valid():
game = form.save()
return JsonResponse({"id": game.pk})
return HttpResponseBadRequest()
| 2.46875 | 2 |
src/alembic/versions/01e58ee9eccb_adjust_constraints_on_pull_request_flags.py | yifengyou/learn-pagure | 0 | 12787021 | <filename>src/alembic/versions/01e58ee9eccb_adjust_constraints_on_pull_request_flags.py<gh_stars>0
"""Adjust constraints on pull_request_flags
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2017-11-16 16:50:47.278252
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
def upgrade():
""" Remove the unique constraints on UID in pull_request_flags and make
it a composite unique constraint on UID + pull_request_uid.
"""
# alter the constraints
op.drop_constraint('pull_request_flags_uid_key', 'pull_request_flags')
op.create_unique_constraint(
"pull_request_flags_uid_pull_request_uid_key",
'pull_request_flags',
["uid", "pull_request_uid"]
)
def downgrade():
""" Remove the composite unique constraints on UID + pull_request_uid
in pull_request_flags and make it an unique constraint on UID .
"""
op.drop_constraint(
'pull_request_flags_uid_pull_request_uid_key',
'pull_request_flags')
op.create_unique_constraint(
"pull_request_flags_uid_key",
'pull_request_flags',
["uid"]
)
| 1.179688 | 1 |
google/cloud/aiplatform_v1beta1/types/model_monitoring.py | SinaChavoshi/python-aiplatform | 0 | 12787022 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import io
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"ModelMonitoringObjectiveConfig",
"ModelMonitoringAlertConfig",
"ThresholdConfig",
"SamplingStrategy",
},
)
class ModelMonitoringObjectiveConfig(proto.Message):
r"""Next ID: 6
Attributes:
training_dataset (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingDataset):
Training dataset for models. This field has
to be set only if
TrainingPredictionSkewDetectionConfig is
specified.
training_prediction_skew_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig):
The config for skew between training data and
prediction data.
prediction_drift_detection_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig):
The config for drift of prediction data.
"""
class TrainingDataset(proto.Message):
r"""Training Dataset information.
Attributes:
dataset (str):
The resource name of the Dataset used to
train this Model.
gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource):
The Google Cloud Storage uri of the unmanaged
Dataset used to train this Model.
bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource):
The BigQuery table of the unmanaged Dataset
used to train this Model.
data_format (str):
Data format of the dataset, only applicable
if the input is from Google Cloud Storage.
The possible formats are:
"tf-record"
The source file is a TFRecord file.
"csv"
The source file is a CSV file.
target_field (str):
The target field name the model is to
predict. This field will be excluded when doing
Predict and (or) Explain for the training data.
logging_sampling_strategy (google.cloud.aiplatform_v1beta1.types.SamplingStrategy):
Strategy to sample data from Training
Dataset. If not set, we process the whole
dataset.
"""
dataset = proto.Field(proto.STRING, number=3, oneof="data_source",)
gcs_source = proto.Field(
proto.MESSAGE, number=4, oneof="data_source", message=io.GcsSource,
)
bigquery_source = proto.Field(
proto.MESSAGE, number=5, oneof="data_source", message=io.BigQuerySource,
)
data_format = proto.Field(proto.STRING, number=2,)
target_field = proto.Field(proto.STRING, number=6,)
logging_sampling_strategy = proto.Field(
proto.MESSAGE, number=7, message="SamplingStrategy",
)
class TrainingPredictionSkewDetectionConfig(proto.Message):
r"""The config for Training & Prediction data skew detection. It
specifies the training dataset sources and the skew detection
parameters.
Attributes:
skew_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.TrainingPredictionSkewDetectionConfig.SkewThresholdsEntry]):
Key is the feature name and value is the
threshold. If a feature needs to be monitored
for skew, a value threshold must be configed for
that feature. The threshold here is against
feature distribution distance between the
training and prediction feature.
"""
skew_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig",
)
class PredictionDriftDetectionConfig(proto.Message):
r"""The config for Prediction data drift detection.
Attributes:
drift_thresholds (Sequence[google.cloud.aiplatform_v1beta1.types.ModelMonitoringObjectiveConfig.PredictionDriftDetectionConfig.DriftThresholdsEntry]):
Key is the feature name and value is the
threshold. If a feature needs to be monitored
for drift, a value threshold must be configed
for that feature. The threshold here is against
feature distribution distance between different
time windws.
"""
drift_thresholds = proto.MapField(
proto.STRING, proto.MESSAGE, number=1, message="ThresholdConfig",
)
training_dataset = proto.Field(proto.MESSAGE, number=1, message=TrainingDataset,)
training_prediction_skew_detection_config = proto.Field(
proto.MESSAGE, number=2, message=TrainingPredictionSkewDetectionConfig,
)
prediction_drift_detection_config = proto.Field(
proto.MESSAGE, number=3, message=PredictionDriftDetectionConfig,
)
class ModelMonitoringAlertConfig(proto.Message):
r"""Next ID: 2
Attributes:
email_alert_config (google.cloud.aiplatform_v1beta1.types.ModelMonitoringAlertConfig.EmailAlertConfig):
Email alert config.
"""
class EmailAlertConfig(proto.Message):
r"""The config for email alert.
Attributes:
user_emails (Sequence[str]):
The email addresses to send the alert.
"""
user_emails = proto.RepeatedField(proto.STRING, number=1,)
email_alert_config = proto.Field(
proto.MESSAGE, number=1, oneof="alert", message=EmailAlertConfig,
)
class ThresholdConfig(proto.Message):
r"""The config for feature monitoring threshold.
Next ID: 3
Attributes:
value (float):
Specify a threshold value that can trigger
the alert. If this threshold config is for
feature distribution distance: 1. For
categorical feature, the distribution distance
is calculated by L-inifinity norm.
2. For numerical feature, the distribution
distance is calculated by Jensen–Shannon
divergence.
Each feature must have a non-zero threshold if
they need to be monitored. Otherwise no alert
will be triggered for that feature.
"""
value = proto.Field(proto.DOUBLE, number=1, oneof="threshold",)
class SamplingStrategy(proto.Message):
r"""Sampling Strategy for logging, can be for both training and
prediction dataset.
Next ID: 2
Attributes:
random_sample_config (google.cloud.aiplatform_v1beta1.types.SamplingStrategy.RandomSampleConfig):
Random sample config. Will support more
sampling strategies later.
"""
class RandomSampleConfig(proto.Message):
r"""Requests are randomly selected.
Attributes:
sample_rate (float):
Sample rate (0, 1]
"""
sample_rate = proto.Field(proto.DOUBLE, number=1,)
random_sample_config = proto.Field(
proto.MESSAGE, number=1, message=RandomSampleConfig,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 1.765625 | 2 |
jquery_widgets/fields.py | fu2re/django-jquery-widgets | 1 | 12787023 | <filename>jquery_widgets/fields.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from django import forms
from .widgets import JQWSlider
class ForeignKeySearchField(forms.fields.ChoiceField):
"""Form field to select a model for a ForeignKey db field"""
pass # Fix this field later
class JQWSliderField(forms.IntegerField):
def __init__(self, min_value, max_value, step=1, **kwargs):
self.step = step
kwargs.update({'widget': JQWSlider})
super(JQWSliderField, self).__init__(max_value, min_value, **kwargs)
def widget_attrs(self, widget):
widget.attrs.update({
'min_value': self.min_value,
'max_value': self.max_value,
'step': self.step
}) | 2.0625 | 2 |
data-structures/python/fibo2.py | zubinshah/algorithms | 0 | 12787024 | <gh_stars>0
#!/usr/bin/python
import sys
#This python program implements fibonacci series of a given function.
def fibo(n): #print fibo series upto n
"""Print fibonacci series up to n numbers. """
a, b = 0, 1
fibSeq = []
while a < n:
fibSeq.append(a)
a , b = b, a+b
print fibSeq
if len(sys.argv)==1:
fibo(1000)
else:
fibo(int(sys.argv[1]))
| 4.03125 | 4 |
functions/Naive_Bayes_Agrawal/test/test-producer.py | pandio-com/pulsar-python-functions | 7 | 12787025 | <reponame>pandio-com/pulsar-python-functions<filename>functions/Naive_Bayes_Agrawal/test/test-producer.py
import pulsar, json, numpy
from skmultiflow.data import AGRAWALGenerator
client = pulsar.Client('pulsar://127.0.0.1:6650')
producer = client.create_producer('persistent://public/default/in')
generator = AGRAWALGenerator()
while generator.has_more_samples():
X, Y = generator.next_sample()
producer.send(json.dumps({'X': X.tolist(), 'Y': Y.tolist()}).encode('utf8'))
client.close() | 2.234375 | 2 |
desafios/des006.py | Ericssm96/python | 0 | 12787026 | n1 = int(input('Digite um número que você deseje saber o dobro, triplo e raiz quadrada: '))
print(f'O dobro do seu número é {n1 * 2}, o triplo é {n1 * 3}, e a raiz quadrada é {n1 ** (1 / 2)}.')
| 3.6875 | 4 |
python/misc/clean-sms-mms.py | bmaupin/graveyard | 1 | 12787027 | <filename>python/misc/clean-sms-mms.py
#!/usr/bin/env python3
''' Deletes old messages from a backup file created by Titanium Backup Pro
'''
import datetime
import lxml.etree
import shutil
import sys
MAXIMUM_MESSAGE_AGE_IN_DAYS = 365
if len(sys.argv) < 2:
sys.exit('USAGE: %s /path/to/com.keramidas.virtual.XML_MESSAGES-XXXXXXXX-XXXXXX.xml' % (sys.argv[0]))
infile_name = sys.argv[1]
# Create a backup copy since we'll modify the original
outfile_name = infile_name + '.bak'
shutil.copy2(infile_name, outfile_name)
# Remove any SMS/MMS messages older than MAXIMUM_MESSAGE_AGE_IN_DAYS
root = lxml.etree.parse(infile_name)
elements_to_remove = []
for element in root.iter():
if element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}sms' \
or element.tag == '{http://www.titaniumtrack.com/ns/titanium-backup/messages}mms':
message_date = datetime.datetime.strptime(element.get('date'), '%Y-%m-%dT%H:%M:%S.%fZ')
if datetime.datetime.now() - message_date > datetime.timedelta(MAXIMUM_MESSAGE_AGE_IN_DAYS):
# We can't remove elements while we're iterating through them
elements_to_remove.append(element)
for element in elements_to_remove:
element.getparent().remove(element)
with open(infile_name, 'wb') as infile:
infile.write(lxml.etree.tostring(root, pretty_print=True, xml_declaration=True))
| 2.78125 | 3 |
Menu.py | MEBestawy/IMESH-Project | 0 | 12787028 | <reponame>MEBestawy/IMESH-Project<gh_stars>0
import pygame
from State import STATE
from typing import Dict
from Button import Button
import Game
WHITE = (255, 255, 255)
class Menu:
"""
This class handles the different game screens of Connect 2^2 depending on
the current game gamestate.
===Private Attributes===
_game: reference of the Game class so that Menu can update the game
_xpos: the x-coordinate position for the scrolling background
sound: the cursor clicking sound
===Public Attributes===
transbg: a reusable transparent background using Pygame.Surface
buttons: a dictionary of strings mapping to buttons that Menu will use for rendering
"""
# Private Instance Attributes
_game: Game
_xpos: int
_sound: pygame.mixer.Sound
_trackname: str
# Public Instance Attributes
transbg: pygame.Surface
buttons: Dict
def __init__(self, game, handler):
"""
Initializes a Menu that has the game, and buttons.
:param game: the main game class
"""
self._game = game
self._handler = handler
self._xpos = 0
self._sound = pygame.mixer.Sound("./Assets/audio/sfx/click.wav")
self._trackname = "track_" + str(self._game.currtrack)
# Creates reusable assets
# Instantiates a translucent background
self.transbg = pygame.Surface((700, 440))
self.transbg.set_alpha(100)
self.transbg.fill((0, 0, 0))
# Instantiates the buttons
# will need to split dictionary to specified screens
self.buttons = {
"play": Button((0, 0, 0), 350, 250, 50, 120, "PLAY", 20, True),
"option": Button((0, 0, 0), 350, 350, 50, 120, "OPTION", 20, True),
"quit": Button((0, 0, 0), 350, 450, 50, 120, "QUIT", 20, True),
"back": Button((0, 0, 0), 660, 545, 50, 120, "BACK", 20, True),
"arrowl": Button((0, 0, 0), 350, 115, 50, 50, " < ", 40, False),
"arrowr": Button((0, 0, 0), 655, 115, 50, 50, " > ", 40, False),
"arrow_bgm_l": Button((0, 0, 0), 350, 190, 50, 50, " < ", 40, False),
"arrow_bgm_r": Button((0, 0, 0), 655, 190, 50, 50, " > ", 40, False),
"arrow_sfx_l": Button((0, 0, 0), 350, 265, 50, 50, " < ", 40, False),
"arrow_sfx_r": Button((0, 0, 0), 655, 265, 50, 50, " > ", 40, False),
"return": Button((0, 0, 0), 350, 450, 50, 120, "RETURN", 20, True),
}
def on_click(self, mouse_press):
"""
React to the given <mousepress> position to as appropriate to change
the game state or to exit the game.
"""
pos = mouse_press.get_pos()
if self._game.gamestate == STATE.Menu:
# Detect if a button on the Menu screen
if self.buttons["play"].is_hover(pos):
self._sound.play()
self._game.gamestate = STATE.Game
if self.buttons["option"].is_hover(pos):
self._sound.play()
self._game.gamestate = STATE.Option
if self.buttons["quit"].is_hover(pos):
self._sound.play()
self._game.running = False
elif self._game.gamestate == STATE.Option:
# Detect if BGM arrows are clicked
if self.buttons["arrow_bgm_l"].is_hover(pos):
self._sound.play()
reduce_vol = -0.1
curr_vol = pygame.mixer_music.get_volume() + reduce_vol
if curr_vol <= 0:
pygame.mixer_music.set_volume(0)
elif curr_vol > 0:
pygame.mixer_music.set_volume(curr_vol)
if self.buttons["arrow_bgm_r"].is_hover(pos):
self._sound.play()
pygame.mixer_music.set_volume(pygame.mixer_music.get_volume() + 0.1)
# Detect if SFX arrows are clicked
if self.buttons["arrow_sfx_l"].is_hover(pos):
self._sound.play()
reduce_vol = -0.1
curr_vol = self._sound.get_volume() + reduce_vol
if curr_vol <= 0:
self._sound.set_volume(0)
elif curr_vol > 0:
self._sound.set_volume(curr_vol)
if self.buttons["arrow_sfx_r"].is_hover(pos):
self._sound.play()
self._sound.set_volume(self._sound.get_volume() + 0.1)
# Detect if Track arrows are clicked
if self.buttons["arrowl"].is_hover(pos):
self._sound.play()
if self._game.currtrack > 0:
self._game.set_bgm(self._game.tracks[self._game.currtrack - 1],
self._game.currtrack - 1)
elif self._game.currtrack == 0:
self._game.set_bgm(self._game.tracks[-1],
len(self._game.tracks) - 1)
self._trackname = self._trackname[:-1] + str(self._game.currtrack)
if self.buttons["arrowr"].is_hover(pos):
self._sound.play()
if self._game.currtrack < len(self._game.tracks) - 1:
self._game.set_bgm(self._game.tracks[self._game.currtrack + 1],
self._game.currtrack + 1)
elif self._game.currtrack == len(self._game.tracks) - 1:
self._game.set_bgm(self._game.tracks[0], 0)
self._trackname = self._trackname[:-1] + str(self._game.currtrack)
# Back button that returns to main menu (title screen)
if self.buttons["back"].is_hover(pos):
self._sound.play()
self._game.gamestate = STATE.Menu
elif self._game.gamestate == STATE.End:
# Returns to main menu (title screen)
if self.buttons["return"].is_hover(pos):
self._sound.play()
self._game.set_winner("-")
self._game.gamestate = STATE.Menu
def tick(self):
"""
Animates the Menu given a pygame mouse event.
If the pygame.event == MOUSEBUTTONDOWN, then
animate accordingly.
"""
for event in self._game.events:
if event.type == pygame.MOUSEBUTTONDOWN:
self.on_click(pygame.mouse)
def render(self, screen: pygame.Surface):
"""
Renders the game screen depending on the
current game state.
"""
# Draws the background
background = pygame.image.load("./Assets/BACKGROUND.png").convert()
rel_x = self._xpos % background.get_rect().width
screen.blit(background, (rel_x - background.get_rect().width, 0))
# Animates the scrolling background
if rel_x < screen.get_width():
screen.blit(background, (rel_x, 0))
self._xpos -= 1
if self._game.gamestate == STATE.Menu:
# Set the text
title = pygame.image.load("./Assets/title.png").convert()
title.set_colorkey((85, 255, 0))
screen.blit(title, ((self._game.width/2 - title.get_width()/2), 75))
# Draw the Buttons
self.buttons["play"].draw(screen)
self.buttons["option"].draw(screen)
self.buttons["quit"].draw(screen)
if self._game.gamestate == STATE.Option:
# Create the font
font = pygame.font.Font("./Assets/joystix_monospace.ttf", 40)
font2 = pygame.font.Font("./Assets/joystix_monospace.ttf", 25)
# Instantiate the text and other objects
options = font.render("OPTION", 1, WHITE)
bgm = font2.render("SELECT BGM", 1, WHITE)
trknm = font2.render(self._trackname, 1, WHITE)
bgm_vol = font2.render("BGM VOLUME", 1, WHITE)
bgm_vol_num = font2.render(str(round(pygame.mixer_music.get_volume() * 10)), 1, WHITE)
sfx_vol = font2.render("SFX VOLUME", 1, WHITE)
sfx_vol_num = font2.render(str(round(self._sound.get_volume() * 10)), 1, WHITE)
# Display the title and other objects
screen.blit(self.transbg, (50, 100))
pygame.draw.rect(screen, (255, 255, 255), (50, 100, 700, 440), 3)
pygame.draw.rect(screen, (255, 255, 255), (50, 100, 700, 225), 3)
howplay = pygame.image.load("./Assets/howplay.png").convert()
howplay.set_colorkey((85, 255, 0))
howplay = pygame.transform.scale(howplay, (533, 200))
screen.blit(options, ((self._game.width/2 - options.get_width()/2), 40))
screen.blit(bgm, (75, 125))
screen.blit(trknm, ((655 + 350 + 50)/2 - trknm.get_width()/2, 125)) #around 450
screen.blit(bgm_vol, (75, 200))
screen.blit(bgm_vol_num, ((655 + 350 + 165)/2 - trknm.get_width()/2, 200))
screen.blit(sfx_vol, (75, 275))
screen.blit(sfx_vol_num, ((655 + 350 + 165)/2 - trknm.get_width()/2, 275))
screen.blit(howplay, ((self._game.width/2 - howplay.get_width()/2), 330))
# Draw button
self.buttons["back"].draw(screen)
self.buttons["arrowl"].draw(screen)
self.buttons["arrowr"].draw(screen)
self.buttons["arrow_bgm_r"].draw(screen)
self.buttons["arrow_bgm_l"].draw(screen)
self.buttons["arrow_sfx_r"].draw(screen)
self.buttons["arrow_sfx_l"].draw(screen)
if self._game.gamestate == STATE.End:
# Create the font
font = pygame.font.Font("./Assets/joystix_monospace.ttf", 50)
font2 = pygame.font.Font("./Assets/joystix_monospace.ttf", 30)
gameover = font.render("GAME OVER", 1, WHITE)
if self._game.get_winner() == 'X':
winner = font2.render("Player 1 WON!", 1, WHITE)
elif self._game.get_winner() == 'O':
winner = font2.render("Player 2 WON!", 1, WHITE)
else:
winner = font2.render("TIE!", 1, WHITE)
# Display objects onto screen
screen.blit(gameover, ((self._game.width/2 - gameover.get_width()/2), 70))
screen.blit(winner, ((self._game.width/2 - winner.get_width()/2), 275))
# Draw button
self.buttons["return"].draw(screen)
| 3.3125 | 3 |
mppsolar/outputs/json.py | 20after4/mpp-solar | 0 | 12787029 | import json as js
import logging
from .baseoutput import baseoutput
log = logging.getLogger("MPP-Solar")
class json(baseoutput):
def __str__(self):
return "json - outputs the results to standard out in json format"
def __init__(self, *args, **kwargs) -> None:
log.debug(f"processor.json __init__ kwargs {kwargs}")
def output(self, *args, **kwargs):
log.info("Using output processor: json")
log.debug(f"processor.json.output kwargs {kwargs}")
data = self.get_kwargs(kwargs, "data")
output = {}
for key in data:
value = data[key]
if isinstance(value, list):
value = data[key][0]
# unit = data[key][1]
# remove spaces
key = key.lower().replace(" ", "_")
output[key] = value
print(js.dumps(output))
| 2.84375 | 3 |
blog/app/crud/user_crud.py | evi1ghost/blog_on_fast_api | 1 | 12787030 | <filename>blog/app/crud/user_crud.py
from sqlalchemy.orm import Session
from fastapi import Depends, HTTPException, status
from jose import JWTError, jwt
from ..auth import (
verify_password, get_password_hash, oauth2_scheme,
SECRET_KEY, ALGORITHM
)
from ..database import get_db
from ..models import user_models as models
from ..schemas import user_schemas as schemas
from ..schemas.token_schemas import TokenData
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def authenticate_user(db: Session, username: str, password: str):
user = get_user_by_username(db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def get_user_by_username(db: Session, username: str):
return db.query(models.User).filter(
models.User.username == username).first()
def create_user(db: Session, user: schemas.UserCreate):
hashed_password = get_password_hash(user.password)
db_user = models.User(
username=user.username, hashed_password=hashed_password
)
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
async def get_current_user(
token: str = Depends(oauth2_scheme),
db: Session = Depends(get_db)
):
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": "Bearer"},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
token_data = TokenData(username=username)
except JWTError:
raise credentials_exception
user = get_user_by_username(db, username=token_data.username)
if user is None:
raise credentials_exception
return user
async def get_current_active_user(
current_user: schemas.User = Depends(get_current_user)
):
if not current_user.is_active:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
| 2.578125 | 3 |
code/module/image_module/image_handler.py | s31b18/comp4211-spring2018-project | 1 | 12787031 | import urllib.request
import os
class ImageHandler:
def __init__(self, dir_prefix):
self.poster_dir_path = "./poster/" + dir_prefix + "/"
self.poster_url = "http://image.tmdb.org/t/p/w185"
def download_all_posters(self, df):
directory = os.path.dirname(self.poster_dir_path)
if not os.path.exists(directory):
os.makedirs(directory)
print("Download Image")
for i, url in enumerate(df["poster_path"]):
print(i, "/" + str(len(df["poster_path"])))
if not os.path.exists(self.poster_dir_path + str(i) + ".jpg"):
self.download_poster(url, i)
print("Download Finish")
def download_poster(self, poster_path, ids):
try:
urllib.request.urlretrieve(self.poster_url + str(poster_path), self.poster_dir_path + str(ids) + ".jpg")
except IOError as e:
print('404', e)
except Exception as e:
print('404', e)
| 3.15625 | 3 |
act/cli.py | attakei/act | 1 | 12787032 | <filename>act/cli.py
# -*- coding:utf-8 -*-
from pathlib import Path
import logging
import subprocess
import click
Logger = logging.getLogger(__name__)
ROOT = Path(__file__).resolve().parents[1] # type: Path
@click.group()
def act():
pass
@act.command()
def upgrade():
"""Upgrade act
"""
repo_dir = Path(__file__).parent.parent
# Run git pull
from git import Repo
repo = Repo(str(repo_dir))
repo.git.pull()
# TODO: Not use subprocess (use Python lib)
commands = [
'pip install -e .',
]
for command in commands:
proc = subprocess.Popen(
command.split(), cwd=str(repo_dir))
proc.communicate()
def main():
"""Script endpoint
"""
# Import subcommands
from . import ( # noqa: falke8
git,
gitlab,
pelican,
python,
)
act()
| 2.421875 | 2 |
2017/day6.py | JonSn0w/advent-of-code | 1 | 12787033 | <gh_stars>1-10
# There are sixteen memory banks; each memory bank can hold any number of blocks. The goal of the reallocation routine is to balance the blocks between the memory banks.
# The reallocation routine operates in cycles. In each cycle, it finds the memory bank with the most blocks (ties won by the lowest-numbered memory bank) and redistributes those blocks among the banks. To do this, it removes all of the blocks from the selected bank, then moves to the next (by index) memory bank and inserts one of the blocks. It continues doing this until it runs out of blocks; if it reaches the last memory bank, it wraps around to the first one.
# The debugger would like to know how many redistributions can be done before a blocks-in-banks configuration is produced that has been seen before.
while True:
try:
banks = [int(s) for s in input().split()];
except EOFError:
break;
s = set();
cycles = 0;
while(len(s) == cycles):
cycles += 1;
m = max(banks);
i = banks.index(m);
banks[i] = 0;
while(m > 0):
i += 1;
banks[i % len(banks)] += 1;
m -= 1;
s.add(str(banks));
print(cycles);
| 3.953125 | 4 |
train_utils.py | ulissesbcorrea/ABSA-PyTorch | 0 | 12787034 | # -*- coding: utf-8 -*-
# file: train_utils.py
# author: songyouwei <<EMAIL>>
# Copyright (C) 2018. All Rights Reserved.
from data_utils import ABSADatesetReader
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Instructor:
def __init__(self, module_class, model_name, dataset='twitter', embed_dim=100, max_seq_len=40, batch_size=128):
absa_dataset = ABSADatesetReader(dataset=dataset, embed_dim=embed_dim, max_seq_len=max_seq_len)
self.train_data_loader = DataLoader(dataset=absa_dataset.train_data, batch_size=batch_size, shuffle=True)
self.test_data_loader = DataLoader(dataset=absa_dataset.test_data, batch_size=len(absa_dataset.test_data), shuffle=False)
self.writer = SummaryWriter(log_dir='{0}_logs'.format(model_name))
self.model = module_class(absa_dataset.embedding_matrix).to(device)
def run(self, inputs_cols, learning_rate=0.001, num_epochs=20, log_step=5):
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
params = filter(lambda p: p.requires_grad, self.model.parameters())
optimizer = torch.optim.Adam(params, lr=learning_rate)
max_test_acc = 0
global_step = 0
for epoch in range(num_epochs):
print('>' * 100)
print('epoch: ', epoch)
n_correct, n_total = 0, 0
for i_batch, sample_batched in enumerate(self.train_data_loader):
global_step += 1
# switch model to training mode, clear gradient accumulators
self.model.train()
optimizer.zero_grad()
inputs = [sample_batched[col].to(device) for col in inputs_cols]
targets = sample_batched['polarity'].to(device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
if global_step % log_step == 0:
n_correct += (torch.argmax(outputs, -1) == targets).sum().item()
n_total += len(outputs)
train_acc = n_correct / n_total
# switch model to evaluation mode
self.model.eval()
n_test_correct, n_test_total = 0, 0
with torch.no_grad():
for t_batch, t_sample_batched in enumerate(self.test_data_loader):
t_inputs = [t_sample_batched[col].to(device) for col in inputs_cols]
t_targets = t_sample_batched['polarity'].to(device)
t_outputs = self.model(t_inputs)
n_test_correct += (torch.argmax(t_outputs, -1) == t_targets).sum().item()
n_test_total += len(t_outputs)
test_acc = n_test_correct / n_test_total
if test_acc > max_test_acc:
max_test_acc = test_acc
print('loss: {:.4f}, acc: {:.4f}, test_acc: {:.4f}'.format(loss.item(), train_acc, test_acc))
# log
self.writer.add_scalar('loss', loss, global_step)
self.writer.add_scalar('acc', train_acc, global_step)
self.writer.add_scalar('test_acc', test_acc, global_step)
self.writer.close()
print('max_test_acc: {0}'.format(max_test_acc))
| 2.28125 | 2 |
prediction/tools/model_warmup/model_warmup.py | gogasca/ai-platform-samples-1 | 418 | 12787035 | <gh_stars>100-1000
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Warmup requests."""
import tensorflow as tf
import requests
from tensorflow.python.framework import tensor_util
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_log_pb2
IMAGE_URL = 'https://tensorflow.org/images/blogs/serving/cat.jpg'
NUM_RECORDS = 100
def get_image_bytes():
image_content = requests.get(IMAGE_URL, stream=True)
image_content.raise_for_status()
return image_content.content
def main():
"""Generate TFRecords for warming up."""
with tf.io.TFRecordWriter("tf_serving_warmup_requests") as writer:
image_bytes = get_image_bytes()
predict_request = predict_pb2.PredictRequest()
predict_request.model_spec.name = 'resnet'
predict_request.model_spec.signature_name = 'serving_default'
predict_request.inputs['image_bytes'].CopyFrom(
tensor_util.make_tensor_proto([image_bytes], tf.string))
log = prediction_log_pb2.PredictionLog(
predict_log=prediction_log_pb2.PredictLog(request=predict_request))
for r in range(NUM_RECORDS):
writer.write(log.SerializeToString())
if __name__ == "__main__":
main()
| 2.359375 | 2 |
topohiding/primes.py | jmgrosen/6857project | 0 | 12787036 | import secrets
def max_pow_2(n):
i = 0
while n % (2 ** i) != 0:
i += 1
return i
# def miller_rabin_p(n, r, d):
def miller_rabin(k, n):
if k == 1:
return False
elif k in (2, 3):
return True
elif k % 2 == 0:
return False
else:
r = max_pow_2(n)
d = n // (2 ** r)
for _ in range(k):
a = 2 + secrets.randbelow(n - 3)
x = pow(a, d, n)
if x in (1, n - 1):
continue
return all(miller_rabin_p(n, r, d) for _ in range(k))
| 3.390625 | 3 |
mentorship_pairing/models.py | steveflys/mentorship-app | 9 | 12787037 | <reponame>steveflys/mentorship-app<filename>mentorship_pairing/models.py
from django.core.exceptions import ValidationError
from django.db import models
# Create your models here.
from mentorship_profile.models import (Mentee, Mentor, Profile)
# TODO: Different reasons for Mentor/Mentee?
DISCONTINUE_REASONS = (
"Personal Reasons",
"No longer needed",
"No longer have time",
)
MENTOR_REJECT_PAIRING_REASONS = (
"Unavailable",
"Not a good fit."
)
MENTEE_REJECT_PAIRING_REASONS = (
"Not a good fit.",
"Want somebody with more experience."
)
class ActivePairingsManager(models.Manager):
"""Return the set of approved Mentors."""
def get_queryset(self):
"""Overwrite to return only active Pairings."""
return super(ActivePairingsManager, self).get_queryset()\
.filter(status="active").all()
class PendingPairingsManager(models.Manager):
"""Return the set of pending pairings."""
def get_queryset(self):
"""Overwrite to return only pending Mentors."""
return super(PendingPairingsManager, self).get_queryset()\
.filter(status="pending").all()
class Pairing(models.Model):
"""Definition of a Pairing between a Mentor and Mentee."""
STATUS_CHOICES = (
("pending", "Pending"),
("active", "Active"),
("rejected", "Rejected"),
("discontinued", "Discontinued")
)
mentor = models.OneToOneField(
Mentor,
related_name="pairing",
on_delete=models.CASCADE
)
mentee = models.OneToOneField(
Mentee,
related_name="pairing",
on_delete=models.CASCADE
)
requested_by = models.ForeignKey(
Profile,
related_name="requested_pairing",
null=True,
blank=True
)
status = models.CharField(
choices=STATUS_CHOICES,
max_length=40,
blank=False,
null=False,
default="pending"
)
request_message = models.TextField(
blank=True
)
date_created = models.DateTimeField(
auto_now_add=True
)
date_modified = models.DateTimeField(
auto_now=True
)
active_pairings = ActivePairingsManager()
pending_pairings = PendingPairingsManager()
objects = models.Manager()
def save(self, *args, **kwargs):
"""
Overwrite save method.
We want to prevent saving a Pairing if a mentor and mentee are the same
user.
"""
if self.mentor.profile is self.mentee.profile:
raise ValidationError("Mentor and Mentee cannot be same user.")
else:
super(Pairing, self).save(*args, **kwargs)
def is_user_in_pairing(self, user):
"""Return whether or not the given user is in the pairing."""
return user in (self.mentor.profile.user, self.mentee.profile.user)
@property
def requestor(self):
"""Return the profile that initiated this pairing."""
return self.requested_by
@property
def requestee(self):
"""Return the profile that is requested to join this pairing."""
if self.requested_by is None:
# We don't know, return None
return None
elif self.requested_by is self.mentee.profile:
return self.mentor.profile
return self.mentee.profile
| 2.328125 | 2 |
week8/predict-sub2.py | rve/DM-Lab | 2 | 12787038 | <gh_stars>1-10
# coding: utf-8
# In[102]:
#visualization part
#import matplotlib.pyplot as plt
#import seaborn as sns
#basic libs
import pandas as pd
import numpy as np
import random as rnd
from sklearn.model_selection import cross_val_score
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
train_=pd.read_csv('../train_allcols.csv')
validate_=pd.read_csv('../validate_allcols.csv')
#test=pd.read_csv('../testwDSM.csv')
train.shape, validate.shape, #test.shape
# In[103]:
#train.describe()
train = train_.sample(20000)
validate = validate_.sample(6000)
train.shape, validate.shape, validate.head(2)
# In[104]:
#train = train.query('SUB1 <= 10').query('SUB2 <= 10')
#validate = validate.query('SUB1 <= 10').query('SUB2 <= 10')
drop_list = ['SUB2', 'ROUTE2', 'FREQ2', 'FRSTUSE2', 'SUB3', 'ROUTE3', 'FREQ3', 'FRSTUSE3', 'NUMSUBS'
]
retain_list = ['DETNLF','ETHNIC','DAYWAIT','RACE','VET','LIVARAG','PRIMINC','FRSTUSE1','HLTHINS','MARSTAT',
'METHUSE','GENDER','EMPLOY','FREQ1','PSYPROB','YEAR','EDUC','PSOURCE','REGION',
'NOPRIOR','SERVSETA','DETCRIM','DIVISION','DSMCRIT','ROUTE1','AGE','SUB1','IDU',]
X_train = train[retain_list]
Y_train = train["SUB2"]
X_validate = validate[retain_list]
Y_validate = validate["SUB2"]
#X_test = test.drop(drop_list, axis=1)
X_train.shape, X_validate.shape, #X_test.shape
# In[105]:
print X_train.columns.tolist()
# In[106]:
from sklearn.feature_selection import SelectKBest, SelectPercentile
from sklearn.feature_selection import f_classif,chi2
#Selector_f = SelectPercentile(f_classif, percentile=25)
Selector_f = SelectKBest(f_classif, k=11)
Selector_f.fit(X_train,Y_train)
X_new = SelectKBest(chi2, k=11).fit_transform(X_train, Y_train)
zipped = zip(X_train.columns.tolist(),Selector_f.scores_)
ans = sorted(zipped, key=lambda x: x[1])
for n,s in ans:
print 'F-score: %3.2ft for feature %s' % (s,n)
# In[107]:
#one hot
from sklearn import preprocessing
# 1. INSTANTIATE
enc = preprocessing.OneHotEncoder()
# 2. FIT
enc.fit(X_train)
# 3. Transform
onehotlabels = enc.transform(X_train).toarray()
#onehotlabels.shape
X_train = onehotlabels
onehotlabels = enc.transform(X_validate).toarray()
X_validate = onehotlabels
X_train.shape, X_validate.shape
# In[108]:
# Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, Y_train)
#Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_validate, Y_validate) * 100, 2)
print acc_log
# In[109]:
# Perceptron
perceptron = Perceptron()
perceptron.fit(X_train, Y_train)
#Y_pred = perceptron.predict(X_test)
acc_perceptron = round(perceptron.score(X_validate, Y_validate) * 100, 2)
print acc_perceptron
# In[110]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
sgd.fit(X_train, Y_train)
#Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X_validate, Y_validate) * 100, 2)
print acc_sgd
# In[111]:
# Random Forest (slow)
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(X_train, Y_train)
#Y_pred = random_forest.predict(X_test)
random_forest.score(X_train, Y_train)
acc_random_forest = round(random_forest.score(X_validate, Y_validate) * 100, 2)
print acc_random_forest
#print cross_val_score(random_forest, X_validate, Y_validate)
# In[112]:
from sklearn.ensemble import GradientBoostingClassifier, AdaBoostClassifier, RandomForestClassifier
#Adaboost
ada_boost = AdaBoostClassifier(random_state=1851)
ada_boost.fit(X_train, Y_train)
acc_ada = round(ada_boost.score(X_validate, Y_validate) * 100, 2)
print acc_ada
# In[113]:
# Stochastic Gradient Descent
sgd = SGDClassifier()
sgd.fit(X_train, Y_train)
#Y_pred = sgd.predict(X_test)
acc_sgd = round(sgd.score(X_validate, Y_validate) * 100, 2)
print acc_sgd
# In[114]:
# Linear SVC
linear_svc = LinearSVC()
linear_svc.fit(X_train, Y_train)
#Y_pred = linear_svc.predict(X_test)
acc_linear_svc = round(linear_svc.score(X_train, Y_train) * 100, 2)
print acc_linear_svc
import xgboost as xgb
xgb_boost = xgb.XGBClassifier(seed=1850, n_jobs=-1)
xgb_boost.fit(X_train, Y_train)
acc_xgb = round(linear_svc.score(X_train, Y_train) * 100, 2)
# In[115]:
print 'predict-sub2-woflags-newsplit-sample20000'
models = pd.DataFrame({
'Model': [ 'Logistic Regression',
'Random Forest', 'Perceptron',
'Stochastic Gradient Decent', 'Linear SVC',
'AdaBoost', 'XGBoost'],
'Validate Set Score': [acc_log,
acc_random_forest, acc_perceptron,
acc_sgd, acc_linear_svc, acc_ada, acc_xgb]
})
models.sort_values(by='Validate Set Score', ascending=False)
| 2.671875 | 3 |
tests/test_visitors/test_tokenize/test_primitives/test_string_tokens/test_unnecessary_raw_strings.py | cdhiraj40/wemake-python-styleguide | 1,931 | 12787039 | <filename>tests/test_visitors/test_tokenize/test_primitives/test_string_tokens/test_unnecessary_raw_strings.py
import pytest
from wemake_python_styleguide.violations.consistency import (
RawStringNotNeededViolation,
)
from wemake_python_styleguide.visitors.tokenize.primitives import (
WrongStringTokenVisitor,
)
@pytest.mark.parametrize('raw_strings', [
r"r'some text\\'",
r"r'some text\''",
r"r'some text\"'",
r'r"some text\'"',
r"r'some text\t'",
r"r'some text\a'",
r"r'some text\n'",
r"r'some text\u041b'",
r"r'some text\043'",
r"r'some text\x23'",
])
def test_necessary_raw_string(
parse_tokens,
assert_errors,
default_options,
raw_strings,
):
"""Ensures that correct usage of raw string works."""
file_tokens = parse_tokens(raw_strings)
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('raw_strings', [
"r'No escaped character'",
'r"Here neither"',
"r'''Not here as well'''",
'r"""Not here as well"""',
])
def test_unnecessary_raw_string(
parse_tokens,
assert_errors,
default_options,
raw_strings,
):
"""Ensures that usage of raw string is forbidden if no backslash."""
file_tokens = parse_tokens(raw_strings)
visitor = WrongStringTokenVisitor(default_options, file_tokens=file_tokens)
visitor.run()
assert_errors(visitor, [RawStringNotNeededViolation])
| 2.65625 | 3 |
leetcode/188.best-time-to-buy-and-sell-stock-iv.py | geemaple/algorithm | 177 | 12787040 | <filename>leetcode/188.best-time-to-buy-and-sell-stock-iv.py
class Solution(object):
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
if prices is None or len(prices) == 0 or k == 0:
return 0
res = 0
m = len(prices)
if k > m // 2: # same as stock 2
for i in range(1, m):
if prices[i] - prices[i - 1] > 0:
res += prices[i] - prices[i - 1]
else: # same as stock 3
state = 2 * k + 1
table = [[0 for _ in range(state + 1)] for _ in range(m + 1)]
# init
for j in range(1, state + 1):
table[0][j] = 0 if j == 1 else float('-inf')
for i in range(1, m + 1):
for j in range(1, state + 1):
value = 0
if j % 2 == 1: # 1, 3, 5
# f[i][j] = max(f[i - 1][j], f[i - 1][j - 1] + prices[i - 1] - prices[i - 2])
value = table[i - 1][j]
if i - 2 >= 0 and j > 1 and table[i - 1][j - 1] != float('-inf'):
value = max(value, table[i - 1][j - 1] + prices[i - 1] - prices[i - 2])
else: # 2, 4, 6
# f[i][j] = max(f[i - 1][j - 1], f[i - 1][j] + prices[i - 1] - prices[i - 2], f[i - 1][j - 2] + prices[i - 1] - prices[i - 2])
value = table[i - 1][j - 1]
if i - 2 >= 0 and table[i - 1][j] != float('-inf'):
value = max(value, table[i - 1][j] + prices[i - 1] - prices[i - 2])
if i - 2 >= 0 and j > 2 and table[i - 1][j - 2] != float('-inf'):
value = max(value, table[i - 1][j - 2] + prices[i - 1] - prices[i - 2])
table[i][j] = value
for j in range(1, state + 1, 2):
res = max(res, table[m][j])
return res | 3.203125 | 3 |
pyclustering/cluster/rock.py | JosephChataignon/pyclustering | 1,013 | 12787041 | """!
@brief Cluster analysis algorithm: ROCK
@details Implementation based on paper @cite inproceedings::rock::1.
@authors <NAME> (<EMAIL>)
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.cluster.encoder import type_encoding
from pyclustering.utils import euclidean_distance
from pyclustering.core.wrapper import ccore_library
import pyclustering.core.rock_wrapper as wrapper
class rock:
"""!
@brief The class represents clustering algorithm ROCK.
Example:
@code
from pyclustering.cluster import cluster_visualizer
from pyclustering.cluster.rock import rock
from pyclustering.samples.definitions import FCPS_SAMPLES
from pyclustering.utils import read_sample
# Read sample for clustering from file.
sample = read_sample(FCPS_SAMPLES.SAMPLE_HEPTA)
# Create instance of ROCK algorithm for cluster analysis. Seven clusters should be allocated.
rock_instance = rock(sample, 1.0, 7)
# Run cluster analysis.
rock_instance.process()
# Obtain results of clustering.
clusters = rock_instance.get_clusters()
# Visualize clustering results.
visualizer = cluster_visualizer()
visualizer.append_clusters(clusters, sample)
visualizer.show()
@endcode
"""
def __init__(self, data, eps, number_clusters, threshold=0.5, ccore=True):
"""!
@brief Constructor of clustering algorithm ROCK.
@param[in] data (list): Input data - list of points where each point is represented by list of coordinates.
@param[in] eps (double): Connectivity radius (similarity threshold), points are neighbors if distance between them is less than connectivity radius.
@param[in] number_clusters (uint): Defines number of clusters that should be allocated from the input data set.
@param[in] threshold (double): Value that defines degree of normalization that influences on choice of clusters for merging during processing.
@param[in] ccore (bool): Defines should be CCORE (C++ pyclustering library) used instead of Python code or not.
"""
self.__pointer_data = data
self.__eps = eps
self.__number_clusters = number_clusters
self.__threshold = threshold
self.__clusters = None
self.__ccore = ccore
if self.__ccore:
self.__ccore = ccore_library.workable()
self.__verify_arguments()
self.__degree_normalization = 1.0 + 2.0 * ((1.0 - threshold) / (1.0 + threshold))
self.__adjacency_matrix = None
self.__create_adjacency_matrix()
def process(self):
"""!
@brief Performs cluster analysis in line with rules of ROCK algorithm.
@return (rock) Returns itself (ROCK instance).
@see get_clusters()
"""
# TODO: (Not related to specification, just idea) First iteration should be investigated. Euclidean distance should be used for clustering between two
# points and rock algorithm between clusters because we consider non-categorical samples. But it is required more investigations.
if self.__ccore is True:
self.__clusters = wrapper.rock(self.__pointer_data, self.__eps, self.__number_clusters, self.__threshold)
else:
self.__clusters = [[index] for index in range(len(self.__pointer_data))]
while len(self.__clusters) > self.__number_clusters:
indexes = self.__find_pair_clusters(self.__clusters)
if indexes != [-1, -1]:
self.__clusters[indexes[0]] += self.__clusters[indexes[1]]
self.__clusters.pop(indexes[1]) # remove merged cluster.
else:
break # totally separated clusters have been allocated
return self
def get_clusters(self):
"""!
@brief Returns list of allocated clusters, each cluster contains indexes of objects in list of data.
@return (list) List of allocated clusters, each cluster contains indexes of objects in list of data.
@see process()
"""
return self.__clusters
def get_cluster_encoding(self):
"""!
@brief Returns clustering result representation type that indicate how clusters are encoded.
@return (type_encoding) Clustering result representation.
@see get_clusters()
"""
return type_encoding.CLUSTER_INDEX_LIST_SEPARATION
def __find_pair_clusters(self, clusters):
"""!
@brief Returns pair of clusters that are best candidates for merging in line with goodness measure.
The pair of clusters for which the above goodness measure is maximum is the best pair of clusters to be merged.
@param[in] clusters (list): List of clusters that have been allocated during processing, each cluster is represented by list of indexes of points from the input data set.
@return (list) List that contains two indexes of clusters (from list 'clusters') that should be merged on this step.
It can be equals to [-1, -1] when no links between clusters.
"""
maximum_goodness = 0.0
cluster_indexes = [-1, -1]
for i in range(0, len(clusters)):
for j in range(i + 1, len(clusters)):
goodness = self.__calculate_goodness(clusters[i], clusters[j])
if goodness > maximum_goodness:
maximum_goodness = goodness
cluster_indexes = [i, j]
return cluster_indexes
def __calculate_links(self, cluster1, cluster2):
"""!
@brief Returns number of link between two clusters.
@details Link between objects (points) exists only if distance between them less than connectivity radius.
@param[in] cluster1 (list): The first cluster.
@param[in] cluster2 (list): The second cluster.
@return (uint) Number of links between two clusters.
"""
number_links = 0
for index1 in cluster1:
for index2 in cluster2:
number_links += self.__adjacency_matrix[index1][index2]
return number_links
def __create_adjacency_matrix(self):
"""!
@brief Creates 2D adjacency matrix (list of lists) where each element described existence of link between points (means that points are neighbors).
"""
size_data = len(self.__pointer_data)
self.__adjacency_matrix = [[0 for i in range(size_data)] for j in range(size_data)]
for i in range(0, size_data):
for j in range(i + 1, size_data):
distance = euclidean_distance(self.__pointer_data[i], self.__pointer_data[j])
if (distance <= self.__eps):
self.__adjacency_matrix[i][j] = 1
self.__adjacency_matrix[j][i] = 1
def __calculate_goodness(self, cluster1, cluster2):
"""!
@brief Calculates coefficient 'goodness measurement' between two clusters. The coefficient defines level of suitability of clusters for merging.
@param[in] cluster1 (list): The first cluster.
@param[in] cluster2 (list): The second cluster.
@return Goodness measure between two clusters.
"""
number_links = self.__calculate_links(cluster1, cluster2)
devider = (len(cluster1) + len(cluster2)) ** self.__degree_normalization - len(cluster1) ** self.__degree_normalization - len(cluster2) ** self.__degree_normalization
return number_links / devider
def __verify_arguments(self):
"""!
@brief Verify input parameters for the algorithm and throw exception in case of incorrectness.
"""
if len(self.__pointer_data) == 0:
raise ValueError("Input data is empty (size: '%d')." % len(self.__pointer_data))
if self.__eps < 0:
raise ValueError("Connectivity radius (current value: '%d') should be greater or equal to 0." % self.__eps)
if self.__threshold < 0 or self.__threshold > 1:
raise ValueError("Threshold (current value: '%d') should be in range (0, 1)." % self.__threshold)
if (self.__number_clusters is not None) and (self.__number_clusters <= 0):
raise ValueError("Amount of clusters (current value: '%d') should be greater than 0." %
self.__number_clusters)
| 3.078125 | 3 |
subj_and_aspectrix.py | c0ntradicti0n/Distinctiopus | 0 | 12787042 | import itertools
from language.heuristic.hardcore_annotated_expression import eT, apply_fun_to_nested, eL, eD, ltd_ify, Argu
from language.heuristic.littletools.generator_tools import count_up
from language.heuristic.pairix import Pairix
from language.heuristic.littletools.nested_list_tools import flatten_reduce, collapse
from language.heuristic.similaritymixer import SimilarityMixer
from helpers.time_tools import timeit_context
import logging
logging.captureWarnings(True)
logging.getLogger().setLevel(logging.INFO)
class Subjects_and_Aspects(Pairix):
''' This module finds pairs of arguments, that are the subjects and aspects for a pair of a pair of expressions
'''
def __init__(self, corpus):
self.similar = \
SimilarityMixer([(2, SimilarityMixer.elmo_sim(), 0.4, 1)])
self.subjects_aspects = \
SimilarityMixer ([(1, SimilarityMixer.multi_paral_tup_sim(SimilarityMixer.subj_asp_sim, n=4), 0, 1),
(-1000, SimilarityMixer.multi_sim(SimilarityMixer.same_expression_sim, n=100), 0, 0.001)])
def annotate(self, clusters=None, graph_fun=None):
''' Annotates the correlations, that means expressions that are similar to each other and are DistinctFilter from the
pair, that was found as excluding each other. For instance 'from the one side' and 'from the other side'.
In part the graph is cleaned, because also exmaples can be marked as seemingly contradictions.
On the other side the same operation is done for additional (sub)predications in context, following
coreferential relations
What is a subject and what a subject is decided on the sequence in respect to the theme-rheme-distinction.
What you speak about, comes first, the subject. With what you want to divide this thing up, is the aspect.
If you change the direction of explanation, this also changes. E.g. If you are in the first sentence talking
about microsoft warranties, and secondly you explain different cases, you speak about warranty. If you
start with these cases and then tell, that in one case you have in the other not warranty, you speak about
these cases.
:param clusters: 2tuple-2tuple-list-predicate-dicts, so 4 predicates in contradicting/corralating
constellation
:param graph_fun: neo4j driver
'''
def argument_tuples(predicate):
args = self.get_arguments(predicate)
return list(itertools.permutations(args, r=2))
with timeit_context('retrieve and generate pairs of arguments for each side'):
argument_tuples_in_sides = apply_fun_to_nested (
fun=argument_tuples,
attribute='predicate_id',
data=clusters)
# now in three steps:
# 1. the 1rst and 2nd element of the pairs must be similar to pairs of other sides --> hdbscan on tuple parallel
# semantical similarity
with timeit_context('computing sameness for the words within these pairs and the subject-'):
def correllate(x,y):
eL(
[self.similar.choose(data=(to_corr.unique(),
to_corr.unique()),
layout='hdbscan',
n=100)
for to_corr in to_correlate])
argument_tuples_in_sides = apply_fun_to_nested (
fun=argument_tuples,
attribute='predicate_id',
data=clusters)
# 2. these tuples have a distance between these two words within, like name ~> thing in multiple sentences
# they have a grammatical and semantical distance within. We compute this as a feature of these tuples and
# feed them again into SimilarityMixer and again hdbscan. So they must be converted to dicts
# 3. look for the maximum distance with at least two tuples in these grouped tuples.
# (things, things. things), (name answering to definition, name coresponding with the name) (name, name, name, name)
with timeit_context('compute pairs of similar distance'):
subjects_aspects = eL(
[self.subjects_aspects.choose(
(corr, corr),
n=100,
minimize=False,
layout='n',
out='ex')
for corr in correlated])
with timeit_context('writing everything'):
self.neo4j_write(graph_fun, subjects_aspects, clusters)
return subjects_aspects
def argument_or_reference_instead (self, arguments):
''' This exchanges in the list of arguments the ones, that are referencing to other nouns, and keep the ones,
that are fine.
:param arguments: argument dicts
:return: lists with some changes of same len
'''
new_arguments = []
for argument in arguments:
reference = argument['coreferenced'](argument['coref'])
if reference:
new_arguments.extend(reference)
else:
new_arguments.append(argument)
try:
assert new_arguments and all (new_arguments)
except AssertionError:
print (arguments)
raise
assert all(isinstance(arg, Argu) for arg in new_arguments)
return new_arguments
def get_arguments(self, predicate_s):
""" Gets the arguments of the predicate
:param predicate_s: predicate-dict or predicate list
:return: argument-dict
"""
if isinstance(predicate_s, list):
arguments = eL(flatten_reduce([self.get_arguments(pred) for pred in predicate_s]))
# if len (arguments.unique()) != len(arguments):
# logging.warning("INDEED AN EFFECT!!! %d" % (len (arguments.unique())- len(arguments)))
return arguments.unique()
arguments = predicate_s['arguments']
try:
assert (arguments)
except:
raise
arguments_ref = self.argument_or_reference_instead (arguments)
assert arguments_ref
return arguments_ref
def get_correlated (self, pair):
""" Returns pairs of similar arguments
:param pair: opposed pair of predicate-dict-2tuples
:return: correlated argument-dict-2tuples
"""
arguments = self.get_arguments(pair[0][0]), self.get_arguments(pair[1][0])
if not all(arguments):
raise ValueError ('no argument for predicate, that can be referenced?')
return self.similar.choose(arguments, layout='n', n=100, out='ex')
def neo4j_write (self, graph_fun, subjects_aspects, clusters):
''' push subjects and aspects to neo4j with appropriate node_labels
:param graph_fun: neo4j driver
:param subjects_aspects: annotated structure
:param clusters: the correlating and contrasting clusters, that were used to make widows for the query of
subjects and aspects
'''
with timeit_context('typing nested list for subject/aspect'):
subjects_aspects = \
ltd_ify(subjects_aspects,
node_type=['DENOTATION'],
stack_types=['SUBJECTS_ASPECTS_ALL', 'CLUSTER', 'A_S_TUPLES', ('SUBJECTS', 'ASPECTS'), 'GROUP', 'ARGUMENT'])
with timeit_context('push results to neo4j'):
self.neo4j_push (subjects_aspects, graph_fun)
apply_fun_to_nested(fun=self.get_arguments, attribute='predicate_id', data=clusters)
with timeit_context('neo4j cleanup'):
self.merge_clean_up(graph_fun)
cnt = count_up()
def neo4j_push(self, x, graph_fun):
''' push nested annotation structure to neo4j
:param x: nested eL, eT, eD-structure
:param graph_fun: neo4j driver
'''
with timeit_context('generate query'):
query = "".join(list(collapse(x.neo4j_write() + ['\n'])))
with open("query %d.txt" % next(self.cnt), "w") as text_file:
text_file.write(query)
with timeit_context('neo4j'):
graph_fun(query)
def merge_clean_up(self, graph_fun):
''' Connect predicate and argument nodes and transit a node in the nested annotation
:param graph_fun:
:return:
'''
query = """MATCH (n:CONNOTATION),(a:ARGUMENT)
WHERE a.id in n.arg_ids
MERGE (n)-[:X]->(a)
RETURN n,a"""
graph_fun(query)
query = """MATCH (n)-->(:GROUP)-->(s)
CALL apoc.create.addLabels( id(s), labels(n) )
YIELD node as n1
MERGE (n)<-[:X]-(s)
RETURN n"""
graph_fun(query)
| 2.40625 | 2 |
randomOutput.py | poodarchu/SogouPersona | 1 | 12787043 | # -*- coding=utf-8 -*-
import random
def randomResult(inputFilePath, outputFilePath):
fr = open(inputFilePath, 'rb')
fw = open(outputFilePath, 'w')
userList = []
for line in fr.readlines():
list = line.split('\t')
userList.append(list[0])
# print len(userList)
# for i in userList[0]
# print age, gender, education
for user in userList:
age = random.randint(0, 6)
gender = random.randint(0, 2)
education = random.randint(0, 6)
result = [str(user), str(age), str(gender), str(education)]
# print result
# print type(user)
for i in result:
i.decode('utf-8').encode('GBK')
fw.write(i + ' ')
fw.write('\n')
fr.close()
fw.close()
if __name__ == '__main__':
randomResult('./data/test.csv', './output/randomResult.csv') | 3.125 | 3 |
jtalkpy/__init__.py | yhay81/jtalkpy | 1 | 12787044 | import subprocess
from typing import Union
class Jtalk:
def __init__(self,
dict_dir,
voice_file,
output_file='/dev/null',
trace_file='/dev/null',
sampling='auto',
frame_period='auto',
all_pass='auto',
filter_coefficient=0.0,
speed_rate=1.0,
half_tone=0.0,
threshold=0.5,
spectrum=1.0,
log_f0=1.0,
volume=0.0,
buffer=0
):
"""
in ubuntu apt-get;
dict_dir = "/var/lib/mecab/dic/open-jtalk/naist-jdic"
voice_file = "/usr/share/hts-voice/nitech-jp-atr503-m001/nitech_jp_atr503_m001.htsvoice"
:param dict_dir: -x dir : dictionary directory [ N/A]
:param voice_file: -m htsvoice : HTS voice files [ N/A]
:param output_file: -ow s : filename of output wav audio (generated speech) [ N/A]
:param trace_file: -ot s : filename of output trace information [ N/A]
:param sampling: -s i : sampling frequency [ auto][ 1-- ]
:param frame_period: -s i : sampling frequency [ auto][ 1-- ]
:param all_pass: -a f : all-pass constant [ auto][ 0.0-- 1.0]
:param filter_coefficient: -b f : postfiltering coefficient [ 0.0][ 0.0-- 1.0]
:param speed_rate: -r f : speech speed rate [ 1.0][ 0.0-- ]
:param half_tone: -fm f : additional half-tone [ 0.0][ -- ]
:param threshold: -u f : voiced/unvoiced threshold [ 0.5][ 0.0-- 1.0]
:param spectrum: -jm f : weight of GV for spectrum [ 1.0][ 0.0-- ]
:param log_f0: -jf f : weight of GV for log F0 [ 1.0][ 0.0-- ]
:param volume: -g f : volume (dB) [ 0.0][ -- ]
:param buffer: -z i : audio buffer size (if i==0, turn off) [ 0][ 0-- ]
"""
self._dict_dir = dict_dir
self._voice_file = voice_file
self._output_file = output_file
self._trace_file = trace_file
self._sampling = sampling
self._frame_period = frame_period
self._all_pass = all_pass
self._filter_coefficient = filter_coefficient
self._speed_rate = speed_rate
self._half_tone = half_tone
self._log_f0 = log_f0
self._threshold = threshold
self._spectrum = spectrum
self._volume = volume
self._buffer = buffer
@property
def dict_dir(self) -> str:
return self._dict_dir
@property
def voice_file(self) -> str:
return self._voice_file
@property
def output_file(self) -> str:
return self._output_file
@property
def trace_file(self) -> str:
return self._trace_file
@property
def sampling(self) -> Union[int, str]:
return self._sampling
@property
def frame_period(self) -> Union[int, str]:
return self._frame_period
@property
def all_pass(self) -> Union[float, str]:
return self._all_pass
@property
def filter_coefficient(self) -> float:
return self._filter_coefficient
@property
def speed_rate(self) -> float:
return self._speed_rate
@property
def half_tone(self) -> float:
return self._half_tone
@property
def log_f0(self) -> float:
return self._log_f0
@property
def spectrum(self) -> float:
return self._spectrum
@property
def volume(self) -> float:
return self._volume
@property
def buffer(self) -> float:
return self._buffer
def from_string(self,
string,
dict_dir=None,
voice_file=None,
output_file=None,
trace_file=None,
sampling=None,
frame_period=None,
all_pass=None,
filter_coefficient=None,
speed_rate=None,
half_tone=None,
threshold=None,
spectrum=None,
log_f0=None,
volume=None,
buffer=None,
timeout=60):
command = [
'open_jtalk',
'-x', dict_dir or self._dict_dir,
'-m', voice_file or self._voice_file,
'-ow', trace_file or self._trace_file,
'-ot', sampling or self._sampling,
'-s', frame_period or self._frame_period,
'-p', all_pass or self._all_pass,
'-a', filter_coefficient or self._filter_coefficient,
'-b', speed_rate or self._speed_rate,
'-fm', half_tone or self._half_tone,
'-u', threshold or self._threshold,
'-jm', spectrum or self._spectrum,
'-jf', log_f0 or self._log_f0,
'-g', volume or self._volume,
'-z', buffer or self._buffer
]
proc = subprocess.Popen(command, stdin=subprocess.PIPE)
proc.stdin.write(string)
proc.stdin.close()
proc.wait(timeout=timeout)
return output_file
def from_file(self,
infile,
dict_dir=None,
voice_file=None,
output_file=None,
trace_file=None,
sampling=None,
frame_period=None,
all_pass=None,
filter_coefficient=None,
speed_rate=None,
half_tone=None,
threshold=None,
spectrum=None,
log_f0=None,
volume=None,
buffer=None,
timeout=60):
command = [
'open_jtalk',
'-x', dict_dir or self._dict_dir,
'-m', voice_file or self._voice_file,
'-ow', trace_file or self._trace_file,
'-ot', sampling or self._sampling,
'-s', frame_period or self._frame_period,
'-p', all_pass or self._all_pass,
'-a', filter_coefficient or self._filter_coefficient,
'-b', speed_rate or self._speed_rate,
'-fm', half_tone or self._half_tone,
'-u', threshold or self._threshold,
'-jm', spectrum or self._spectrum,
'-jf', log_f0 or self._log_f0,
'-g', volume or self._volume,
'-z', buffer or self._buffer,
infile
]
process = subprocess.Popen(command, stdin=subprocess.PIPE)
process.wait(timeout=timeout)
return output_file
| 2.3125 | 2 |
pysnmp/TPT-LICENSE-MIB.py | agustinhenze/mibs.snmplabs.com | 11 | 12787045 | <gh_stars>10-100
#
# PySNMP MIB module TPT-LICENSE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/TPT-LICENSE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 21:18:56 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Integer32, NotificationType, Gauge32, IpAddress, MibIdentifier, Bits, Counter32, Counter64, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, TimeTicks, iso, ObjectIdentity, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "Integer32", "NotificationType", "Gauge32", "IpAddress", "MibIdentifier", "Bits", "Counter32", "Counter64", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "TimeTicks", "iso", "ObjectIdentity", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
tpt_tpa_objs, = mibBuilder.importSymbols("TPT-TPAMIBS-MIB", "tpt-tpa-objs")
tpt_license_objs = ModuleIdentity((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15)).setLabel("tpt-license-objs")
tpt_license_objs.setRevisions(('2016-05-25 18:54',))
if mibBuilder.loadTexts: tpt_license_objs.setLastUpdated('201605251854Z')
if mibBuilder.loadTexts: tpt_license_objs.setOrganization('Trend Micro, Inc.')
class LicenseStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))
namedValues = NamedValues(("info", 0), ("ok", 1), ("warning", 2), ("error", 3))
class LicenseAction(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("allow", 0), ("deny", 1))
licenseTable = MibTable((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1), )
if mibBuilder.loadTexts: licenseTable.setStatus('current')
licenseEntry = MibTableRow((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1), ).setIndexNames((0, "TPT-LICENSE-MIB", "licenseEntryIndex"))
if mibBuilder.loadTexts: licenseEntry.setStatus('current')
licenseEntryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: licenseEntryIndex.setStatus('current')
licenseEntryFeature = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 63))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licenseEntryFeature.setStatus('current')
licenseEntryStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 3), LicenseStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: licenseEntryStatus.setStatus('current')
licenseEntryAction = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 4), LicenseAction()).setMaxAccess("readonly")
if mibBuilder.loadTexts: licenseEntryAction.setStatus('current')
licenseEntryExpiry = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 31))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licenseEntryExpiry.setStatus('current')
licenseEntryDetails = MibTableColumn((1, 3, 6, 1, 4, 1, 10734, 3, 3, 2, 15, 1, 1, 6), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: licenseEntryDetails.setStatus('current')
mibBuilder.exportSymbols("TPT-LICENSE-MIB", licenseEntryAction=licenseEntryAction, licenseEntryDetails=licenseEntryDetails, LicenseAction=LicenseAction, PYSNMP_MODULE_ID=tpt_license_objs, licenseEntryIndex=licenseEntryIndex, LicenseStatus=LicenseStatus, licenseEntryFeature=licenseEntryFeature, licenseEntry=licenseEntry, licenseEntryStatus=licenseEntryStatus, tpt_license_objs=tpt_license_objs, licenseEntryExpiry=licenseEntryExpiry, licenseTable=licenseTable)
| 1.671875 | 2 |
lab-assignment-3/code/app.py | omkarshelar/CSUA32181-VIIT | 0 | 12787046 | from flask import Flask, render_template, redirect, url_for, flash
from forms import addContactForm
import db_utils
# initialize the Flask app
app = Flask(__name__)
# Note: This is a demo application hence secret is hardcoded for simplicity.
# For all practical purposes, take the key from OS environment variables or config files.
app.secret_key = 'any random string'
# Route to list all phone contacts of the user.
@app.route('/', methods=["GET"])
def list_contacts():
contacts = db_utils.get_contacts()
if contacts is None:
return "Error conecting to database. Ensure that the database is installed properly."
return render_template('list_contacts.html', contacts=contacts)
# Add a contact to phonebook
@app.route('/add/', methods=["GET", "POST"])
def add_contact():
form = addContactForm()
if form.validate_on_submit(): # Validate the form for CSRF etc.
# Extract form information
name = form.name.data
mobile_no = form.mobile_no.data
email = form.email.data
add_response = db_utils.add_contact({ # Add to database
"name": name,
"mobile_no": mobile_no,
"email": email
})
if add_response:
flash("Added!") # Show acknowledge to user
# Redirect to list_contacts page
return redirect(url_for("list_contacts"))
else:
flash("Error occured while adding contact. Try Again!")
return render_template('add_contact.html', form=form)
# Delete Channel from the database
@app.route('/delete/<contact_id>/', methods=["GET"])
def delete_contact(contact_id):
db_utils.delete_contact(contact_id)
return redirect(url_for('list_contacts'))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=80)
| 3.03125 | 3 |
tests/test_main_router.py | lokaimoma/Flask-QR-Code-Web-APP | 2 | 12787047 | <filename>tests/test_main_router.py
# Created by Kelvin_Clark on 3/10/22, 2:45 PM
from flask import url_for
import pytest
@pytest.mark.parametrize(argnames='path', argvalues=["/app/", "/app/generateQrCode/"])
def test_login_required(test_client, path):
with test_client:
response = test_client.get(path)
assert response.status_code == 302
assert url_for("auth.login_route") in response.headers.get("Location")
| 2.71875 | 3 |
pydemic/memories.py | Catastropha/pydemic | 3 | 12787048 | <filename>pydemic/memories.py
class Memory:
"""Fixed-size buffer to store (score, object) tuples"""
def __init__(self,
memory_size: int = 1,
):
"""Initialize a MemoryBuffer object"""
self.memory_size = memory_size
self.memory = []
def add(self,
score: float,
obj,
) -> None:
"""Add a new agent to memory"""
x = (score, obj)
lo = 0
hi = len(self.memory)
while lo < hi:
mid = (lo + hi) // 2
if self.memory[mid][0] < x[0]:
lo = mid + 1
else:
hi = mid
self.memory.insert(lo, x)
self.memory = self.memory[:self.memory_size]
def topk(self,
k: int,
) -> list:
"""Return top K objects"""
return [obj[1] for obj in self.memory[:k]]
def bottomk(self,
k: int,
) -> list:
"""Return bottom K objects"""
return [obj[1] for obj in self.memory[-k:]]
def set_size(self,
size: int,
) -> None:
self.memory_size = size
| 3.171875 | 3 |
demos/ch4_files/basic_text_file.py | mikeckennedy/python_workshop_demos_april_2018 | 1 | 12787049 | from models import Notebook, TextNote, VideoNote
bio = Notebook("Bio 201 Notes")
bio.notes.append(TextNote("This is the first day of Bio 201"))
bio.notes.append(TextNote("Final exam is 95%."))
bio.notes.append(VideoNote("https://www.youtube.com/watch?v=PKffm2uI4dk"))
bio.display()
bio.save("bio201.txt")
bio.load("bio201.txt")
print(bio.to_json())
| 2.578125 | 3 |
main.py | meintte/IndianPlanetaryComputation | 0 | 12787050 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import csv
from math import sqrt
import numpy as np
import yaml
from scipy.interpolate import interp1d
class PlanetVariables:
def __init__(self, name):
self.name = name
self._getDataFromFile()
def _getDataFromFile(self):
with open("planetVariables/" + self.name + ".yml", 'r') as ymlfile:
yamlFile = yaml.load(ymlfile)
self.meanPlanet_revolutions = yamlFile['mean planet revolutions per yuga']
self.longitude_slow_apogee = yamlFile['longitude slow apogee']
self.sizeSlow_at_0 = yamlFile['size slow epicycle at 0 & 180']
self.sizeSlow_at_90 = yamlFile['size slow epicycle at 90 & 270']
if self.name != 'Sun':
self.fast_apogee_revolutions = yamlFile['fast apogee revolutions per yuga']
self.sizeFast_at_0 = yamlFile['size fast epicycle at 0 & 180']
self.sizeFast_at_90 = yamlFile['size fast epicycle at 90 & 270']
class AngleAndSinHandler:
def __init__(self, thetas, sinValues):
self.InterpolatedSinTable = interp1d(thetas, sinValues)
self.InterpolatedInverseSinTable = interp1d(sinValues, thetas)
# return exmple: 270.5 -> 270 deg & 30 min
# bug with input 30.2666667, the int in min makes from 16.0 15...
def DecimalDegreeToIndividualAngleUnits(self, decimalDeg):
degrees = int(decimalDeg)
minutes = int((decimalDeg - degrees) * 60)
seconds = (decimalDeg - degrees - minutes/60.)*3600
return degrees, minutes, seconds
# returns the decimal degrees
def IndividualAngleUnitsToDecimalDegree(self, degrees, minutes, seconds=0):
tmpMinutes = minutes + seconds / 60.
return degrees + tmpMinutes / 60.
def getPositveAngle(self, decimalAngle):
while decimalAngle < 0:
decimalAngle += 360
while decimalAngle > 360:
decimalAngle -= 360
return decimalAngle
def roundToMinutes(self, decimalAngle):
_deg, _min, _sec = self.DecimalDegreeToIndividualAngleUnits(
decimalAngle)
if (_sec >= 30.):
return self.IndividualAngleUnitsToDecimalDegree(_deg, _min+1, 0)
else:
return self.IndividualAngleUnitsToDecimalDegree(_deg, _min, 0)
# positivity is required
def _getQuadrantOfAngle(self, decimalAngle):
# the qudrants are 0, 1, 2, 3
if (decimalAngle <= 90):
return 0
elif (decimalAngle <= 180):
return 1
elif (decimalAngle <= 270):
return 2
else:
return 3
def sinOf(self, decimalAngle):
angleForSin = self.getPositveAngle(decimalAngle)
quadrant = self._getQuadrantOfAngle(angleForSin)
# the quadrant numberation goes from 0 to 3
if (quadrant <= 1):
sign = 1
else:
sign = -1
angleForSin = angleForSin - quadrant*90
return sign * self.InterpolatedSinTable(angleForSin)
def arcsinOf(self, sinValue):
if (sinValue < 0):
return -1 * self.InterpolatedInverseSinTable(-sinValue)
else:
return self.InterpolatedInverseSinTable(sinValue)
def printAngle(self, name, decimalAngle, inDecimal=True):
if inDecimal:
print('{:20}: {}°'.format(name, decimalAngle))
else:
_deg, _min, _sec = self.DecimalDegreeToIndividualAngleUnits(
decimalAngle)
print('{:20}: {}° {}\' {}\'\''.format(name, _deg, _min, _sec))
def makePositiveRoundAndPrint(self, name, angle, inDecimal=True, doRound=False):
# make positive
angle = self.getPositveAngle(angle)
# do the rounding
if doRound:
angle = self.roundToMinutes(angle)
# print the angle
self.printAngle(name, angle, inDecimal)
return angle
def readCsvFile(filename):
with open(filename) as csvDataFile:
csvReader = csv.reader(csvDataFile)
# Get the radius
line = next(csvReader)
R = float(line[2])
# Skip second line
next(csvReader)
# read the rest
tmpArray = [(float(row[1]), float(row[2])) for row in csvReader]
thetas, sinValues = zip(*tmpArray)
return R, thetas, sinValues
def getSizeEpicycle(size_at_0, size_at_90, r_for_sin, handlerAngleSin, decimalAngle):
return size_at_0 + (size_at_90 - size_at_0) * abs(handlerAngleSin.sinOf(decimalAngle)) / (1. * r_for_sin)
def getRadiusEpicycle(size_at_0, size_at_90, radiusDeferent, handlerAngleSin, decimalAngle, printAll):
sizeEpicycle = getSizeEpicycle(
size_at_0, size_at_90, radiusDeferent, handlerAngleSin, decimalAngle)
radiusEpicycle = sizeEpicycle / 360. * radiusDeferent
if printAll:
print('{:20}: {}'.format('sizeEpicycle', sizeEpicycle))
print('{:20}: {}'.format('radiusEpicycle', radiusEpicycle))
return radiusEpicycle
def getDecimalAngleFromRotation(revolutionSpeed, elapsedDays, period):
numRevolutions = (revolutionSpeed * elapsedDays) / (1. * period)
return (numRevolutions - int(numRevolutions)) * 360
def getFastEquation(radiusFast, radiusDeferent, handlerAngleSin, kappa, printAll):
sinKappa = handlerAngleSin.sinOf(kappa)
VB = (radiusFast * sinKappa) / radiusDeferent
radialDistance = sqrt(
VB**2 + (radiusDeferent + sqrt(sinKappa**2 - VB**2))**2)
sigma = handlerAngleSin.arcsinOf(radiusFast * sinKappa / radialDistance)
if printAll:
print('{:20}: {}'.format('sinKappa', sinKappa))
print('{:20}: {}'.format('radiusDeferent', radiusDeferent))
print('{:20}: {}'.format('radiusFast', radiusFast))
print('{:20}: {}'.format('radialDistance', radialDistance))
return sigma
def getSlowEquation(radiusSlow, radiusDeferent, handlerAngleSin, kappa, printAll):
sinKappa = handlerAngleSin.sinOf(kappa)
if printAll:
print('{:20}: {}'.format('sinKappa', sinKappa))
mu = handlerAngleSin.arcsinOf(radiusSlow * sinKappa / radiusDeferent)
return mu
#############################################################
def doSunProcedure(
_yuga, _days_in_yuga, _days_since_epoch,
_radiusDeferent, _handler,
_meanPlanet_revolutions, _longitude_slow_apogee,
_sizeSlow_at_0, _sizeSlow_at_90,
_doRounding, _printDecimalDegree, _printAll):
# mean planet calculation
lambda_bar = getDecimalAngleFromRotation(
_meanPlanet_revolutions, _days_since_epoch, _days_in_yuga)
lambda_bar = _handler.makePositiveRoundAndPrint(
'lambda_bar', lambda_bar, _printDecimalDegree, _doRounding)
# apply half the slow equation to the computed result
lambda_mu = _longitude_slow_apogee
lambda_mu = _handler.makePositiveRoundAndPrint(
'lambda_mu', lambda_mu, _printDecimalDegree, _doRounding)
kappa_mu = lambda_bar - lambda_mu
kappa_mu = _handler.makePositiveRoundAndPrint(
'kappa_mu', kappa_mu, _printDecimalDegree, _doRounding)
# get the current radius of the epicycle
radiusSlow = getRadiusEpicycle(
_sizeSlow_at_0, _sizeSlow_at_90, _radiusDeferent, _handler, kappa_mu, _printAll)
mu = getSlowEquation(radiusSlow, _radiusDeferent,
_handler, kappa_mu, _printAll)
mu = _handler.makePositiveRoundAndPrint(
'mu', mu, _printDecimalDegree, _doRounding)
# plus or minus? use the secondSign...
lambda_true = lambda_bar + mu
lambda_true = _handler.makePositiveRoundAndPrint(
'lambda_true', lambda_true, _printDecimalDegree, _doRounding)
#############################################################
def do4stepProcedure(
_yuga, _days_in_yuga, _days_since_epoch,
_radiusDeferent, _handler,
_meanPlanet_revolutions, _fast_apogee_revolutions, _longitude_slow_apogee,
_sizeSlow_at_0, _sizeSlow_at_90, _sizeFast_at_0, _sizeFast_at_90,
_doRounding, _printDecimalDegree, _printAll,
_firstSign, _secondSign, _thirdSign, _fourthSign):
# 4 step procedure, from suryasiddhanta
# 0th step
# calculate the mean planets longitude (lambda_bar)
lambda_bar = getDecimalAngleFromRotation(
_meanPlanet_revolutions, _days_since_epoch, _days_in_yuga)
lambda_bar = _handler.makePositiveRoundAndPrint(
'lambda_bar', lambda_bar, _printDecimalDegree, _doRounding)
################# START 1st step #################
# apply half the fast equation to the mean planet
lambda_sigma = getDecimalAngleFromRotation(
_fast_apogee_revolutions, _days_since_epoch, _days_in_yuga)
lambda_sigma = _handler.makePositiveRoundAndPrint(
'lambda_sigma', lambda_sigma, _printDecimalDegree, _doRounding)
kappa_sigma_1 = lambda_bar - lambda_sigma
kappa_sigma_1 = _handler.makePositiveRoundAndPrint(
'kappa_sigma_1', kappa_sigma_1, _printDecimalDegree, _doRounding)
# get the current radius of the epicycle
radiusFast = getRadiusEpicycle(
_sizeFast_at_0, _sizeFast_at_90, _radiusDeferent, _handler, kappa_sigma_1, _printAll)
sigma_1 = getFastEquation(
radiusFast, _radiusDeferent, _handler, kappa_sigma_1, _printAll)
sigma_1 = _handler.makePositiveRoundAndPrint(
'sigma_1', sigma_1, _printDecimalDegree, _doRounding)
# plus or minus? use the firstSign...
lambda_1 = lambda_bar + _firstSign * 0.5 * sigma_1
lambda_1 = _handler.makePositiveRoundAndPrint(
'lambda_1', lambda_1, _printDecimalDegree, _doRounding)
################# END 1st step #################
################# START 2nd step #################
# apply half the slow equation to the computed result
lambda_mu = _longitude_slow_apogee
lambda_mu = _handler.makePositiveRoundAndPrint(
'lambda_mu', lambda_mu, _printDecimalDegree, _doRounding)
kappa_mu_1 = lambda_1 - lambda_mu
kappa_mu_1 = _handler.makePositiveRoundAndPrint(
'kappa_mu_1', kappa_mu_1, _printDecimalDegree, _doRounding)
# get the current radius of the epicycle
radiusSlow = getRadiusEpicycle(
_sizeSlow_at_0, _sizeSlow_at_90, _radiusDeferent, _handler, kappa_mu_1, _printAll)
mu_1 = getSlowEquation(radiusSlow, _radiusDeferent,
_handler, kappa_mu_1, _printAll)
mu_1 = _handler.makePositiveRoundAndPrint(
'mu_1', mu_1, _printDecimalDegree, _doRounding)
# plus or minus? use the secondSign...
lambda_2 = lambda_1 + _secondSign * 0.5 * mu_1
lambda_2 = _handler.makePositiveRoundAndPrint(
'lambda_2', lambda_2, _printDecimalDegree, _doRounding)
################# END 2nd step #################
################# START 3rd step #################
# start form the computed result, compute the slow equation,
# apply it whole to the mean planet
kappa_mu_2 = lambda_2 - lambda_mu
kappa_mu_2 = _handler.makePositiveRoundAndPrint(
'kappa_mu_2', kappa_mu_2, _printDecimalDegree, _doRounding)
# get the current radius of the epicycle
radiusSlow = getRadiusEpicycle(
_sizeSlow_at_0, _sizeSlow_at_90, _radiusDeferent, _handler, kappa_mu_2, _printAll)
mu_2 = getSlowEquation(radiusSlow, _radiusDeferent,
_handler, kappa_mu_2, _printAll)
mu_2 = _handler.makePositiveRoundAndPrint(
'mu_2', mu_2, _printDecimalDegree, _doRounding)
# plus or minus? use the thridSign...
lambda_3 = lambda_bar + _thirdSign * mu_2
lambda_3 = _handler.makePositiveRoundAndPrint(
'lambda_3', lambda_3, _printDecimalDegree, _doRounding)
################# END 3rd step #################
################# START 4th step #################
# apply the whole fast equation to the computed result
kappa_sigma_2 = lambda_3 - lambda_sigma
kappa_sigma_2 = _handler.makePositiveRoundAndPrint(
'kappa_sigma_2', kappa_sigma_2, _printDecimalDegree, _doRounding)
# get the current size of the epicycle
radiusFast = getRadiusEpicycle(
_sizeFast_at_0, _sizeFast_at_90, _radiusDeferent, _handler, kappa_sigma_2, _printAll)
sigma_2 = getFastEquation(
radiusFast, _radiusDeferent, _handler, kappa_sigma_2, _printAll)
sigma_2 = _handler.makePositiveRoundAndPrint(
'sigma_2', sigma_2, _printDecimalDegree, _doRounding)
# plus or minus? use the fourthSign...
lambda_true = lambda_3 + _fourthSign * sigma_2
lambda_true = _handler.makePositiveRoundAndPrint(
'lambda_true', lambda_true, _printDecimalDegree, _doRounding)
################# END 4th step #################
#############################################################
def allPosibilityWay(yuga, days_in_yuga, days_since_epoch, radiusDeferent, handler, planet, doRounding, printDecimalDegree, printAll):
for i in [-1, 1]:
for j in [-1, 1]:
for k in [-1, 1]:
for l in [-1, 1]:
print(
"####################################### " '{},{},{},{}'.format(i, j, k, l))
do4stepProcedure(
yuga, days_in_yuga, days_since_epoch,
radiusDeferent, handler,
planet.meanPlanet_revolutions, planet.fast_apogee_revolutions, planet.longitude_slow_apogee,
planet.sizeSlow_at_0, planet.sizeSlow_at_90, planet.sizeFast_at_0, planet.sizeFast_at_90,
doRounding, printDecimalDegree, printAll,
i, j, k, l)
#############################################################
if __name__ == "__main__":
# get the global variables from the yaml file
with open("globalVariables.yml", 'r') as ymlfile:
globalVars = yaml.load(ymlfile)
# setup the Sin tables
# it's assumed that the sin table only gives vales for angles in [0,90 deg]
radiusDeferent, thetas, sinValues = readCsvFile(
globalVars['sin table'])
handler = AngleAndSinHandler(thetas, sinValues)
# evidence suggest that angle values are rounded to the nearest minute
doRounding = globalVars['round to minutes']
# print angles in decimalDegree
printDecimalDegree = globalVars['print in decimal degrees']
# print all steps
printAll = globalVars['print all steps']
yuga = globalVars['yuga']
days_in_yuga = globalVars['days in a yuga']
days_since_epoch = globalVars['days since the epoch']
planets = []
for planetToCalculate in globalVars['do calculations for']:
if planetToCalculate == 'Sun':
planets.append(PlanetVariables(planetToCalculate))
elif planetToCalculate == 'Moon':
print(planetToCalculate + "is not yet implemented...")
elif planetToCalculate == 'Mars' or planetToCalculate == 'Mercury' or planetToCalculate == 'Jupiter' or planetToCalculate == 'Venus' or planetToCalculate == 'Saturn':
planets.append(PlanetVariables(planetToCalculate))
else:
print("Unknown planet! Please check for typos")
for p in planets:
print(p.name)
print("")
if(p.name == 'Sun'):
doSunProcedure(
yuga, days_in_yuga, days_since_epoch,
radiusDeferent, handler,
p.meanPlanet_revolutions, p.longitude_slow_apogee,
p.sizeSlow_at_0, p.sizeSlow_at_90,
doRounding, printDecimalDegree, printAll)
else:
do4stepProcedure(
yuga, days_in_yuga, days_since_epoch,
radiusDeferent, handler,
p.meanPlanet_revolutions, p.fast_apogee_revolutions, p.longitude_slow_apogee,
p.sizeSlow_at_0, p.sizeSlow_at_90, p.sizeFast_at_0, p.sizeFast_at_90,
doRounding, printDecimalDegree, printAll,
-1, 1, 1, -1)
print("")
| 2.546875 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.