max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/CLSystemReferenceImport.py | shmouses/SpectrumImageAnalysisPy | 3 | 12798151 | from __future__ import print_function
import csv
import numpy as np
import re
import Spectrum
#import matplotlib.pyplot as plt
def ReadCSVRef(filename):
with open(filename) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
headers = list(filter(None, next(reader)))
data = []
for row in reader:
data.append(row[:-1])
data = np.array(data)
data[data == ''] = np.nan
data = data.astype(float)
dataDict = {}
i = 0
columns_per_data = int(np.shape(data[0])[0]/np.shape(headers)[0])
print(columns_per_data)
for hh in headers:
label = tuple(map(int, re.findall(r'\d+', hh)))
dataDict[label] = data[:, i:i+columns_per_data]
data[:, i:i+columns_per_data]
i+= columns_per_data
return dataDict
# Add error-checking for entering a non-existent grating/wavelength pair
class SystemCorrectionFactor(object):
def __init__(self, grating, center_wavelength, wavelengths = None):
self.grating = grating
self.center_wavelength = center_wavelength
if grating >= 1000:
self.correction_spectrum = self.ImportIR()
elif wavelengths is not None:
self.correction_spectrum = self.ImportVis(wavelengths)
else:
print('No valid reference for system correction!')
def ImportIR(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/CorrectionFactorSCAlIRCamera_2015_02_26.csv'
dataDict = ReadCSVRef(filename)
d = dataDict[self.grating, self.center_wavelength]
correction_spectrum = Spectrum.CLSpectrum(d[:,1], d[:,0])
return correction_spectrum
def ImportVis(self, wavelengths):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/SystemResponseVISInterpolated_20150717.csv'
dataDict = ReadCSVRef(filename)
d = dataDict[(self.grating,)]
spectrum_interp = np.interp(wavelengths, d[:, 0], d[:, 1])
correction_spectrum = Spectrum.CLSpectrum(spectrum_interp, wavelengths)
return correction_spectrum
class WavelengthCorrectionFactor(object):
def __init__(self, grating, center_wavelength):
self.grating = grating
self.center_wavelength = center_wavelength
if self.grating in (1250, 1600, 2000):
self.wavelength = self.importIRwavelengths()
elif self.grating in (500, 800):
self.wavelength = self.importVISwavelengths()
else:
print('No valid reference for wavelength correction!')
def importIRwavelengths(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/WinspecCorrWavelengthsIR20150428.csv'
dataDict = ReadCSVRef(filename)
correction_spectrum = dataDict[self.grating, self.center_wavelength]
return correction_spectrum
def importVISwavelengths(self):
filename = '/home/isobel/Documents/McMaster/CL/SystemResponseFcns/WinspecCorrWavelengthsVis20150309.csv'
dataDict = ReadCSVRef(filename)
wavelengths = dataDict[self.grating, self.center_wavelength]
return wavelengths
#wvls = np.linspace(400, 980)
#p = SystemCorrectionFactor(800, 750, wvls)
#print(np.shape(p.correction_spectrum.SpectrumRange))
#plt.plot(p.correction_spectrum.SpectrumRange, p.correction_spectrum.intensity)
#plt.show()
| 2.71875 | 3 |
email_utils/email_verification.py | Aayush-hub/Bulk-Mailer | 0 | 12798152 | <reponame>Aayush-hub/Bulk-Mailer<gh_stars>0
from itsdangerous import URLSafeTimedSerializer, SignatureExpired
from json import load
config = None
with open("import.json", "r") as f:
config = load(f)["jsondata"]
# Token is valid for 1 day
if len(config["email_verification_timeout"]) != 0:
MAX_TIME = int(config["email_verification_timeout"])
else:
raise Exception("Property 'email_verification_timeout' not set in 'import.json' file")
# Salt
if len(config["email_verification_timeout"]) != 0:
VERIFICATION_SALT = config["email_verification_salt"]
else:
raise Exception("Property 'email_verification_salt' not set in 'import.json' file")
# Secret Key
if len(config["email_verification_timeout"]) != 0:
SECRET = config["email_verification_secret"]
else:
raise Exception("Property 'email_verification_secret' not set in 'import.json' file")
def validate_token(token=None):
"""Helps in confirming the Email Address with the help of the token, sent on the registered email address.\n
Keyword Arguments:
token -- Token passed in the user's email
"""
try:
res = URLSafeTimedSerializer(SECRET).loads(token, salt=VERIFICATION_SALT, max_age=MAX_TIME)
except SignatureExpired:
return False
# Token was successfully validated
return True
def generate_token(email=None):
"""
Returns a token for the purpose of email verification.\n
Keyword Arguments
email -- Email address for which the token is to be generated
"""
if not isinstance(email, str) or len(email) == 0:
print("Error: Invalid Email address passed")
return None
token = URLSafeTimedSerializer(SECRET).dumps(email, salt=VERIFICATION_SALT)
## Return token for the email
return token | 2.5 | 2 |
evennia_wiki/markdown_engine.py | vlegoff/evennia-wiki | 2 | 12798153 | """Class containing the generic markdown engine used by evenniq_wiki."""
from bs4 import BeautifulSoup
from markdown import Markdown
class MarkdownEngine(Markdown):
"""A special markdown engine for the evennia_wiki.
This pre-loads some common extensions and allows some inner processing.
"""
def __init__(self):
super(MarkdownEngine, self).__init__(extensions=[
'markdown.extensions.fenced_code',
'markdown.extensions.footnotes',
'markdown.extensions.tables',
'markdown.extensions.toc',
])
def convert(self, text):
"""Convert the text to HTML, changing some classes.
1. Table elements will have classes table table-responsive table-striped
2. Table headers will have the class thead-inverse
3. Links elements will be re-mapped if absolute (beginning by /)
"""
html = super(MarkdownEngine, self).convert(text)
soup = BeautifulSoup(html, 'html.parser')
# Add classes to tables
for tag in soup.find_all("table"):
tag["class"] = "table table-responsive table-striped"
# Add classes to table headers
for tag in soup.find_all("thead"):
tag["class"] = "thead-inverse"
# Change link location of pointing to /* . We assume an absolute
# URL (/) means a wiki page.
for tag in soup.find_all("a"):
href = tag.get("href")
if href and href.startswith("/"):
tag["href"] = "/wiki" + href
return str(soup)
ENGINE = MarkdownEngine()
| 3.421875 | 3 |
services/web/canonizer.py | vpodpecan/canonical_forms | 0 | 12798154 | <filename>services/web/canonizer.py
import os
import classla
import csv
import argparse
from lemmagen3 import Lemmatizer
classla.download('sl', logging_level='WARNING')
BASEDIR = os.path.dirname(__file__)
def lem_adj(gender, wrd):
lem = Lemmatizer()
if gender == 'm':
lem.load_model(os.path.join(BASEDIR, 'lemmagen_models/kanon-adj-male.bin'))
elif gender == 'f':
lem.load_model(os.path.join(BASEDIR, 'lemmagen_models/kanon-adj-female.bin'))
elif gender == 'n':
lem.load_model(os.path.join(BASEDIR, 'lemmagen_models/kanon-adj-neutral.bin'))
form = lem.lemmatize(wrd)
return form
def process_nlp_pipeline(lang, text):
nlp = classla.Pipeline(lang=lang, processors='tokenize,pos,lemma', tokenize_pretokenized=True, logging_level='WARNING')
doc = nlp(text)
return doc
def get_adj_msd(head, word):
feats = head.feats
feats_dict = {}
feats = feats.strip().split('|')
for f in feats:
f = f.strip().split('=')
feats_dict[f[0]] = f[1]
gender = feats_dict['Gender']
#print(gender)
#gender = gender.strip().split('=')[1]
if gender == 'Masc' and len(word.xpos) == 6:
msd = word.xpos[:-1]+'ny'
elif gender == 'Masc' and len(word.xpos) == 7:
msd = word.xpos[:-1]+'y'
elif gender == 'Fem':
msd = word.xpos[:-1]+'n'
elif gender == 'Neut':
msd = word.xpos[:-1]+'n'
else:
msd = None
return msd
def subfinder(mylist, pattern):
matches = []
for i in range(len(mylist)):
#print(mylist[i].text)
#if mylist[i].text == pattern[0] and mylist[i:i+len(pattern)].text == pattern:
if mylist[i].text.lower() == pattern[0] and [t.text.lower() for t in mylist[i:i+len(pattern)]] == pattern:
matches.append(mylist[i:i+len(pattern)])
return matches
def find_canon(term):
head = None
pre = []
post = []
for word in term.words:
if word.upos == 'NOUN' or word.upos == 'PROPN':
head = word
break
if head is None:
if len(term.words) == 1:
head2 = term.words[0]
lem = Lemmatizer()
lem.load_model(os.path.join(BASEDIR, 'lemmagen_models/kanon.bin'))
head_form = lem.lemmatize(head2.text.lower())
return head_form
else:
return 'HEAD not found'
else:
for word in term.words:
if word.id < head.id:
pre.append(word)
elif word.id > head.id:
post.append(word)
canon = []
for el in pre:
msd = get_adj_msd(head, el)
if msd is None:
canon.append(el.lemma.lower())
else:
if msd[0] == 'A' and msd[3] == 'm':
form = lem_adj('m', el.text.lower())
canon.append(form)
elif msd[0] == 'A' and msd[3] == 'f':
form = lem_adj('f', el.text.lower())
canon.append(form)
elif msd[0] == 'A' and msd[3] == 'n':
form = lem_adj('n', el.text.lower())
canon.append(form)
lem = Lemmatizer()
lem.load_model(os.path.join(BASEDIR, 'lemmagen_models/kanon.bin'))
head_form = lem.lemmatize(head.text.lower())
canon.append(head_form)
for el in post:
canon.append(el.text)
return ' '.join(canon)
# def process(data):
# '''data is a list of pairs (lemma, form)
# '''
# terms = [x[1] for x in data]
# lemmas = [x[0] for x in data]
# text = '\n'.join(terms)
# doc = process_nlp_pipeline('sl', text)
#
# result = []
# for term, sent, lemma in zip(terms, doc.sentences, lemmas):
# result.append((lemma, find_canon(sent)))
# return result
def process(forms):
text = '\n'.join(forms)
doc = process_nlp_pipeline('sl', text)
return [find_canon(sent) for sent in doc.sentences]
def read_csv(fname, columnID=0):
data = []
with open(fname) as csvfile:
try:
dialect = csv.Sniffer().sniff(csvfile.read(2048))
except csv.Error:
print('Warning: cannot determine delimiter, assuming Excel CSV dialect.')
dialect = 'excel'
csvfile.seek(0)
reader = csv.reader(csvfile, dialect)
for i, row in enumerate(reader):
try:
data.append(row[columnID])
except:
print('Error, line {}'.format(i))
return data
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Converter to canonical form in Slovene language')
parser.add_argument('csv_file', type=argparse.FileType('r'), help='Input csv file')
parser.add_argument('column_id', type=int, help='CSV column number (zero indexed)')
args = parser.parse_args()
data = read_csv(args.csv_file.name, columnID=args.column_id)
results = process(data)
for canon in results:
print('{}'.format(canon))
# if __name__ == '__main__':
# if len(sys.argv) != 2:
# print('Usage: python generate_cannonical.py <csv file>')
# else:
# data = read_csv(sys.argv[1], columnID=0, sep='\t')
# results = process(data)
# for canon in results:
# print('{}'.format(canon))
| 2.5 | 2 |
roster2ical/roster.py | SimonCW/roster2cal | 0 | 12798155 | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_roster.ipynb (unless otherwise specified).
__all__ = ['ShiftProperties', 'Shift', 'Roster']
# Cell
from dataclasses import dataclass
from datetime import datetime, timedelta, date, time
from ics import Calendar, Event
import re
from typing import Optional
from zoneinfo import ZoneInfo
@dataclass
class ShiftProperties:
name: str
starting_hour: timedelta
duration: timedelta
@dataclass
class Shift:
properties: ShiftProperties
date: datetime
def __post_init__(self):
self.beginning: datetime = self.date + self.properties.starting_hour
# Cell
@dataclass
class Roster:
shifts: list[Shift]
name: str = "<NAME>"
_year: int = 2022
_month: int = 3 # TODO: Read from Excel
_dayp = re.compile(r"MO|DI|MI|DO|FR|SA|SO")
_datep = re.compile(r"\d{2}")
@classmethod
def from_dict(
cls, input: dict[str, str], mapper: Optional[dict] = None
) -> "Roster":
shifts = []
# TODO: This whole continue stuff is just horrible. Change it future me!
for date_str, abbr in input.items():
if abbr == "(/)":
continue
try:
props = mapper[abbr]
if not props:
print(f"No properties for shift abbrevation: {abbr}")
continue
except KeyError:
print(f"Shift abbrevation not found in mapper: {abbr}")
continue
date = datetime(
year=cls._year,
month=cls._month,
day=int(cls._datep.search(date_str).group()),
tzinfo=ZoneInfo("Europe/Berlin"),
)
shift = Shift(props, date=date)
shifts.append(shift)
return cls(shifts=shifts)
def to_ics(self):
c = Calendar()
for shift in self.shifts:
e = Event()
e.name = shift.properties.name
e.begin = shift.beginning
e.duration = shift.properties.duration
c.events.add(e)
return c
| 2.65625 | 3 |
ArticleTranslation.py | soumendrak/Odia-Translation | 0 | 12798156 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
import re
import goslate
def trans(word):
gs = goslate.Goslate()
ro = gs.translate(word, 'or')
if ro == "":
ro = word
return ro
i = 3
while i != 100000:
ArtName = raw_input('\n\ntype the name of article from english wikipedia: ')
urls = "\n\nhttps://en.wikipedia.org/wiki/" + ArtName
urls2 = "https://en.wikipedia.org/w/index.php?title=" + ArtName + "&action=edit"
print urls
request = urllib2.Request(urls)
handle = urllib2.urlopen(request)
content = handle.read()
if '<table class="infobox' in content:
print '\n\ndata about ' + ArtName + ' found... working...\n\n'
splitted_page = content.split('<p>', 1);
splitted_page = splitted_page[1].split('<span class="mw-headline" id="References">References</span>', 1)
art = splitted_page[0]
art = art.replace('</p>', '\n')
art = art.replace(' ', '')
art = art.replace('<span class="mw-headline"', '\n==<')
art = re.sub('\<.*?\>','', art)
art = re.sub('\ox .*?\em">','', art)
to_print = trans(art.decode('utf-8')) #translate the text in to Odia
# to write reference --BEGIN
request2 = urllib2.Request(urls2)
handle2 = urllib2.urlopen(request2)
content2 = handle2.read()
content2 = content2.split('name="wpTextbox1">', 1)
content2 = content2[1].split('</textarea>', 1)
content2 = content2[0]
content2 = content2.replace('<', '<')
content3 = content2
ref = input('entre the no. of references: ')
i = 0
while i != ref:
if '<ref' in content2:
ref_dataa = content2.split('<ref', 1)
if '</ref>' in ref_dataa[1]:
ref_data = ref_dataa[1].split('</ref>', 1)
ref_data = ref_data[0]
else:
ref_data = ''
ref_data = '<ref' + ref_data + '</ref>'
if '/>' in ref_data:
ref_data = ref_dataa[1].split('/>', 1)
ref_data = '<ref' + ref_data[0] + '/>'
content2 = content2.replace(ref_data, '')
ref_no = '[' + str(i+1) + ']'
to_print = to_print.replace(ref_no.decode('utf-8'), ref_data.decode('utf-8'))
i+=1
# to write reference -- END
print "'''" + ArtName + "'''\n"
to_print = to_print.replace('[ସମ୍ପାଦନା]'.decode('utf-8'), '==\n')
print to_print
#to print text after reference as it is -- BEGIN
as_it_is = content3.split('==References==', 1)
as_it_is = as_it_is[1]
as_it_is = as_it_is.replace('</p>', '\n')
as_it_is = as_it_is.replace(' ', '')
print '\n==References==\n', as_it_is
#to print text after reference as it is -- END
else:
print 'escape', i+1
i+=1
| 3.328125 | 3 |
EloRater.py | Wally869/RankingELO-Python | 2 | 12798157 | from __future__ import annotations
def GetWinningProbability(rating1: float, rating2: float):
return 1.0 / (1.0 + 10 ** ((rating1 - rating2) / 400))
def ComputeDeltaRating(ratingPlayer1: float, ratingPlayer2: float, isWinPlayer1: bool) -> float:
P1 = (1.0 / (1.0 + pow(10, ((ratingPlayer1 - ratingPlayer2) / 400))))
P2 = (1.0 / (1.0 + pow(10, ((ratingPlayer2 - ratingPlayer1) / 400))))
#return (isWinPlayer1 - P2), (1 - isWinPlayer1 - P1)
return (isWinPlayer1 - P2)
| 2.8125 | 3 |
src/command/voice_log/main.py | link1345/Vol-GameClanTools-DiscordBot | 0 | 12798158 |
import sys
import discord
from discord.ext import tasks
import base.command_base as base
import base.DiscordSend as Sendtool
import base.ColorPrint as CPrint
import base.time_check as CTime
import os
import collections as cl
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import json
import command.voice_log.Config_Main as CSetting
import command.voice_log.chart as Chart
import pandas as pd
class command(base.command_base) :
def __init__(self) :
super().__init__()
self.test_task: tasks.Loop = None
self.now_filepath = CSetting.baseLogFolder + CSetting.JSONPATH_row + CSetting.JSONPATH_now
# JSON記入
## https://qiita.com/KEINOS/items/ea4bda15506bbd3e6913 から勝手に拝借
def append_json_to_file(self, _dict, path_file):
try :
with open(path_file, 'ab+') as f: # ファイルを開く
f.seek(0,2) # ファイルの末尾(2)に移動(フォフセット0)
if f.tell() == 0 : # ファイルが空かチェック
f.write(json.dumps([_dict],indent=4,ensure_ascii=False).encode()) # 空の場合は JSON 配列を書き込む
else :
f.seek(-1,2) # ファイルの末尾(2)から -1 文字移動
f.truncate() # 最後の文字を削除し、JSON 配列を開ける(]の削除)
f.write(' , '.encode()) # 配列のセパレーターを書き込む
f.write(json.dumps(_dict,indent=4,ensure_ascii=False).encode()) # 辞書を JSON 形式でダンプ書き込み
f.write(']'.encode()) # JSON 配列を閉じる
except OSError as e:
CPrint.error_print( path_file + "が、存在しませんでした")
print(os.getcwd())
print(e)
return f.close() # 連続で追加する場合は都度 Open, Close しない方がいいかも
# JSON出力(1ヵ月定期・ファイルチェンジ機能付き)
async def MonthOutput(self, client: discord.Client):
today = datetime.today()
filetime = today - relativedelta(months=1)
# Renameするときのファイル名を決定する
m_month = datetime.strftime(filetime,'%m')
m_year = datetime.strftime(filetime,'%Y')
month_filename = '{0}{1}'.format(m_year, m_month)
mv_filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + month_filename + ".json"
if os.path.exists(self.now_filepath) == False:
# ここにエラー文を出して置く
return None
# Rename
os.rename(self.now_filepath, mv_filename )
# now生ログファイルを、空作成しておく
with open( self.now_filepath ,"w"):pass
# 加工済みデータを作る
timeData = await Chart.makeTimeList(client, mv_filename , CSetting.OneMonthOutput_RoleID , mode="NAME")
# CSVで加工済みを保存する
if timeData is not None :
send_fileName = CSetting.baseLogFolder + CSetting.JSONPATH_analysis + month_filename + ".csv"
timeData.to_csv( send_fileName )
return send_fileName
else :
return None
#async def on_message(self, config, client: discord.Client, message: discord.Message) :
#sendfile = await self.MonthOutput(client=client)
#if sendfile is None :
# await Sendtool.Send_Member(Data=message, message="ログファイルがありませんでした。", filename=None)
#else :
# await Sendtool.Send_Member(Data=message, message="MonthOutput!", filename=sendfile)
#pass
## 入退室監視
async def on_voice_state_update(self, config, client: discord.Client, member: discord.Member, before: discord.VoiceState , after: discord.VoiceState):
data = cl.OrderedDict()
if before.channel is None:
## 入ってきたら
print( datetime.now().strftime("%Y/%m/%d %H:%M:%S") ,":" , after.channel.name, "から" , member.name , "#" , member.discriminator , "さんが入りました")
data["Flag"] = "entry"
data["before.channel.name"] = "NULL"
data["before.channel.id"] = "NULL"
data["after.channel.name"] = after.channel.name
data["after.channel.id"] = after.channel.id
data["member.name"] = member.name
data["member.discriminator"] = member.discriminator
data["member.id"] = member.id
data["time"] = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
elif after.channel is None:
## 抜けたら
print(datetime.now().strftime("%Y/%m/%d %H:%M:%S") ,":" , before.channel.name, "から" , member.name , "#" , member.discriminator , "さんが抜けました")
data["Flag"] = "exit"
data["before.channel.name"] = before.channel.name
data["before.channel.id"] = before.channel.id
data["after.channel.name"] = "NULL"
data["after.channel.id"] = "NULL"
data["member.name"] = member.name
data["member.discriminator"] = member.discriminator
data["member.id"] = member.id
data["time"] = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
elif after.channel.id != before.channel.id :
print(datetime.now().strftime("%Y/%m/%d %H:%M:%S") ,":" , before.channel.name, "から" , member.name , "#" , member.discriminator , "さんが移動しました")
data["Flag"] = "move"
data["before.channel.name"] = before.channel.name
data["before.channel.id"] = before.channel.id
data["after.channel.name"] = after.channel.name
data["after.channel.id"] = after.channel.id
data["member.name"] = member.name
data["member.discriminator"] = member.discriminator
data["member.id"] = member.id
data["time"] = datetime.now().strftime("%Y/%m/%d %H:%M:%S")
else :
# 特になし
pass
self.append_json_to_file( data, self.now_filepath )
pass
# 定期送信(1ヵ月)
async def voice_outputlog(self, config, client: discord.Client):
channellist = []
if config.get("on_task") is not None :
if config["on_task"].get(sys._getframe().f_code.co_name) is not None :
channellist = config["on_task"][sys._getframe().f_code.co_name].get("message-channelID")
if channellist is None :
return
#await Sendtool.Send_ChannelID(client=client, channelID=channellist , message="TASKCheck! - voice_outputlog")
## --------
flag = False
# 動作時間決定
# ※ 指定日時に動作できないので、これで代用。
TestFlag = False # --- 定期実行のプログラムテスト以外では、これは、Falseにしてください --------------
if TestFlag == False : # 1日に実行する
flag = CTime.check('%d %H', '01 00')
else : # 1時に実行する
flag = CTime.check('%M', '00')
# -- 出力処理 --
if flag :
sendfile = await self.MonthOutput(client=client)
filetime = today - relativedelta(months=1)
m_month = datetime.strftime(filetime,'%m')
m_year = datetime.strftime(filetime,'%Y')
month_filename = '{0}{1}'.format(m_year, m_month)
mv_filename = CSetting.baseLogFolder + CSetting.JSONPATH_row + month_filename + ".json"
if sendfile is None :
text = "【一か月定期連絡】"+ m_year + "年"+ m_month +"月の音声チャンネルログインはありませんでした"
await Sendtool.Send_ChannelID(client=client, channelID=CSetting.OneMonthOutput_ChannelID, message=text, filename=None)
else :
text = "【一か月定期連絡】"+ m_year + "年"+ m_month +"月の音声チャンネルログイン生データ"
await Sendtool.Send_ChannelID(client=client, channelID=CSetting.OneMonthOutput_ChannelID, message=text, filename=mv_filename)
text = "【一か月定期連絡】"+ m_year + "年"+ m_month +"月の音声チャンネルログイン加工データ"
await Sendtool.Send_ChannelID(client=client, channelID=CSetting.OneMonthOutput_ChannelID, message=text, filename=sendfile)
pass
pass | 2.203125 | 2 |
backend/app/routers/documents.py | nokia-wroclaw/innovativeproject-wiki | 9 | 12798159 | <filename>backend/app/routers/documents.py
"""
TODO module docstring
"""
import json
from fastapi import APIRouter
from app.routers.files import get_document_path
from app.utils.message import Message, MsgStatus
router = APIRouter(prefix="/api/document", tags=["Document Management"])
DOCUMENT_FILE = "document.json"
@router.get("/{document_name}")
async def load_document_content(workspace_name: str, document_name: str) -> json:
"""TODO function docstring"""
path = get_document_path(workspace_name, document_name) / DOCUMENT_FILE
with open(path, "r") as document_file:
document_data = json.load(document_file)
return document_data
@router.post("/{document_name}")
async def save_document_content(
workspace_name: str, document_name: str, document_data: list
) -> Message:
"""TODO function docstring"""
path = get_document_path(workspace_name, document_name) / DOCUMENT_FILE
with open(path, "w") as document_file:
json.dump(document_data, document_file, indent=4)
return Message(
status=MsgStatus.INFO, detail="Document content updated successfully"
)
| 2.640625 | 3 |
leetcode/add_binary.py | zhangao0086/Python-Algorithm | 3 | 12798160 | <gh_stars>1-10
#!/usr/bin/python3
# -*-coding:utf-8-*-
__author__ = "Bannings"
class Solution:
def addBinary(self, a: str, b: str) -> str:
carry, ans = 0 , ''
for i in range(max(len(a), len(b))):
carry += ord(a[len(a) - i - 1]) - ord('0') if i < len(a) else 0
carry += ord(b[len(b) - i - 1]) - ord('0') if i < len(b) else 0
ans = chr(carry % 2 + ord('0')) + ans
carry //= 2
return ans if carry == 0 else '1' + ans
if __name__ == '__main__':
assert Solution().addBinary("11", "1") == "100"
assert Solution().addBinary("1010", "1011") == "10101" | 3.265625 | 3 |
cvpy25.py | L3ndry/guanabara-python | 0 | 12798161 | nome_completo = input("Digite o seu nome completo: ").lower()
print("silva" in nome_completo)
| 3.234375 | 3 |
blackjack/hand.py | Simon-Lee-UK/blackjack-game | 1 | 12798162 | """
This module exports the 'Hand' class, 'PlayerHand' and 'DealerHand' subclasses, and related methods.
"""
import time
draw_delay = 1 # The pause in seconds between drawn card actions
twenty_one = 21 # Ideal score value for both players
class Hand:
"""
A class defining the properties and methods of a hand object.
A hand object is a collection of cards associated with either the dealer or a player (each having their own
respective subclasses with specialised methods and attributes). Within a round of blackjack, cards are added to a
hand when the associated player chooses to 'hit'. The outcome of each round is determined by the relative values
of the player's and dealer's hands.
"""
def __init__(self, holder_name="Player"):
"""
Initialises an empty hand object for a given participant.
Parameters
----------
holder_name : str
Defines the owner, or 'holder', of the hand object bseing created: either 'Player' or 'Dealer'.
Defaults to 'Player' for this base hand class.
"""
self._live_hand = (
[]
) # A list of card objects making up the hand; initialised as an empty list
self._active = True # The active status communicates whether the hand is still active in the current round
self._bust = False # The bust status communicates whether the hand is bust (value > 21) in the current round
self._natural = False # The natural status communicates whether the hand is a natural (value = 21 with 2 cards)
self._holder_name = holder_name
def __iter__(self):
"""
Allows hand objects to be iterated over, yielding constituent card objects in the order they were added.
Yields
------
card : blackjack.card.Card
The next card in the hand (within the hand object's '_live_hand' attribute).
"""
for card in self._live_hand:
yield card
def __repr__(self):
"""
Entering the reference for a hand object in the terminal triggers this method, printing all hand details.
Returns
-------
Output of 'print_hand' method : str
Prints the hand's owner followed by shorthand details of all cards currently within the hand.
"""
return self.print_hand()
def __len__(self):
"""Allows len() to be used on hand objects, returning the number of cards in the hand as the object 'length'."""
return len(self._live_hand)
def hand_value(self, bypass_face_down=False):
"""
Returns the total value(s) of the target hand by summing the values of all constituent card objects.
Parameters
----------
bypass_face_down : bool
Tells method whether to include face-down cards in calculating the value(s) of the hand. Defaults to False.
Returns
-------
hand_value_list : list of int / str
A list containing all possible values the hand's combination of cards can take with no duplicates. For a
hand with all cards face-up: returns a list of integers. For hands with any cards face-down: returns a
list of strings.
"""
ace_count = 0
ace_values = None
face_down_count = 0
non_ace_sum = 0
# Loop: counts number of face-down cards in the hand; counts face-up aces; sums face-up cards that aren't an ace
for card in self:
# Try statement catches AssertionErrors thrown when 'is_ace' method encounters a face-down card
try:
if card.is_ace(bypass_face_down):
ace_count += 1
ace_values = card.card_value(bypass_face_down)
else:
non_ace_sum += card.card_value(bypass_face_down)
except AssertionError:
face_down_count += 1
# This if-else block defines a list of possible values associated with all face-up cards in the hand
if ace_count > 0:
ace_sum_possibilities = self._calculate_ace_values(ace_count, ace_values)
ace_sum = [
possibility + non_ace_sum for possibility in ace_sum_possibilities
]
hand_value_list = ace_sum
else:
hand_value_list = [non_ace_sum]
# Where the hand contains face-down cards, this block adds the consistent face-down string to the face-up values
if face_down_count > 0:
hand_value_list = [
str(value) + " + *-*" * face_down_count for value in hand_value_list
]
return hand_value_list
def best_hand_value(self):
"""
Returns the best possible value of the hand as an integer. If hand value is bust (> 21), returns None.
Returns
-------
best_value : int or None
The best possible total value of the hand's constituent cards. If no hand value <= 21, 'best_value' = None.
"""
max_best_value = 21
all_hand_values = self.hand_value(bypass_face_down=True)
try:
best_value = max([val for val in all_hand_values if val <= max_best_value])
except ValueError:
best_value = None
return best_value
def is_active(self):
"""
As a boolean, returns the active status of the hand in the current round (bust/stand = False; otherwise = True).
A hand is regarded as active in a round while cards can still be added to the hand. Once a player decides to
'stand' at their hand's current value, or if they go bust (> 21), the hands '_active' attribute is set to False
signalling that no further actions are required by the player holding the hand in the current round.
Returns
-------
bool
True when hand can still receive cards in the current round; otherwise False.
"""
return self._active
def is_bust(self):
"""
As a boolean, returns 'bust' status of hand in the current round (value > 21: returns True; otherwise False).
Returns
-------
bool
True when lowest possible hand value exceeds 21; otherwise False.
"""
return self._bust
def is_natural(self):
"""
As a boolean, returns 'natural' status of hand (2 cards in hand and value = 21: returns True; otherwise False).
Returns
-------
bool
True when card contains two cards with combined value of 21; otherwise False.
"""
return self._natural
def stand(self):
"""Updates hand status to inactive: triggered when player chooses to draw no more cards in the current round."""
self._active = False
def draw_card(self, deck_obj, face_dir="up"):
"""
Removes one card from the input deck and adds this card to the hand with orientation defined by 'face_dir'.
Calls the 'deal_card' method of an input deck object, the deck returns a single card object and deletes this
card from the deck. If the 'face_dir' input argument requires the hand to be dealt face-down, the freshly
drawn card (face-up by default) calls its 'flip_card' method to ensure the card is correctly face-down before it
it is appended to the hand array. Finally, the method calls '_validate_hand_status' that checks whether the hand
is now bust and updates all hand statuses accordingly.
Parameters
----------
deck_obj : blackjack.deck.Deck
The game's 'live' deck object - a card will be removed from this deck and added to the current hand object.
face_dir : str
Defines whether card is added to the hand face-up or face-down. By default, the card will be added
face-up with face_dir = 'up'. Any value of face_dir not spelling 'up' (case-insensitive) will add the card
face-down.
Raises
------
AssertionError
Raised when the hand is inactive (can't accept further cards).
"""
assert (
self.is_active()
), "Cannot draw a card to this hand: it is marked as inactive in the current round."
drawn_card = deck_obj.deal_card()
if face_dir.lower() != "up":
drawn_card.flip_card()
self._live_hand.append(drawn_card)
self._verify_hand_status()
def print_hand(self, alt_text=None):
"""
Prints the hand's owner followed by shorthand details of all cards currently within the hand.
Parameters
----------
alt_text : str
This optional argument will be printed instead of the hand owner's name if provided.
Returns
-------
empty_string : str
An empty string, returned so that the 'print_hand' method can be called by the Hand class' __repr__
method which must return a string-like object.
"""
empty_string = ""
ends_with_s = self._holder_name[-1].lower() == "s"
if alt_text is not None:
print(alt_text)
elif ends_with_s:
print(f"\n{self._holder_name}' hand")
else:
print(f"\n{self._holder_name}'s hand")
for idx, single_card in enumerate(self):
print(f"Card {idx}: {single_card.short_card_details()}")
if (
self.is_active()
or self.is_bust()
or (self.best_hand_value() == twenty_one and alt_text is not None)
):
print(f"Value: {self.hand_value()}")
return empty_string
def _verify_hand_status(self):
"""Checks whether the hand is bust, has value equal to 21 or is a natural. Updates hand status accordingly."""
natural_length = 2
if self.best_hand_value() is None:
self._bust = True
self.stand()
elif self.best_hand_value() == twenty_one:
self.stand()
if len(self) == natural_length:
self._natural = True
@staticmethod
def _calculate_ace_values(ace_count, ace_values):
"""
Returns the possible values of a collection of ace cards as a sorted list.
Parameters
----------
ace_count : int
The number of ace cards to calculate possible summed values for.
ace_values : tuple
A two-element tuple containing the possible card values an ace can take e.g. (1, 11).
Returns
-------
ace_sum_possibilities : list of int
A list containing each value 'ace_count' number of aces can combine to make.
TODO: Refactor to allow any number of possible ace values (additional loop over keys of dict?)
"""
ace_sum_possibilities = [0]
for ace_idx in range(ace_count):
first_set = [
ace_values[0] + ace_sum_element
for ace_sum_element in ace_sum_possibilities
]
second_set = [
ace_values[1] + ace_sum_element
for ace_sum_element in ace_sum_possibilities
]
ace_sum_possibilities = list(set(first_set + second_set))
ace_sum_possibilities.sort()
return ace_sum_possibilities
class DealerHand(Hand):
"""
A subclass defining the properties and methods specific to a hand object held by the dealer.
The dealer's hand is unique because: the first card dealt to the dealer will always be dealt face-down;
the dealer's turn in a single round must be resolved automatically.
"""
def __init__(self):
"""Calls the __init__ method of the base Hand class, initialising an empty hand object for the dealer."""
super().__init__("Dealer")
def draw_card(self, deck_obj, face_dir=None):
"""
Removes one card from the input deck and adds this card to the hand with orientation defined by 'face_dir'.
Parameters
----------
deck_obj : blackjack.deck.Deck
The game's 'live' deck object - a card will be removed from this deck and added to the dealer's hand object.
face_dir : None / str
Defines whether card is added to the hand face-up or face-down. By default, 'face_dir' is None when
method is called against a dealer's hand object. Where None, the orientation of the card is determined
by the number of cards currently in the dealer's hand. If the dealer currently has a single card in their
hand, the card is dealt face-down; otherwise face-up. If the method is called with face_dir specified, it
behaves identically to the equivalent method on the base Hand class.
"""
if face_dir:
super().draw_card(deck_obj, face_dir)
elif len(self) == 1:
face_dir = "down"
super().draw_card(deck_obj, face_dir)
else:
face_dir = "up"
super().draw_card(deck_obj, face_dir)
def resolve_hand(self, deck_obj, player_hand, player_score_message):
"""
This method automatically resolves the dealer's hand: drawing cards until the hand value exceeds seventeen.
Method initially checks the dealer's hand value: if its best value > 17, the dealer stands. If < 17, the hand
draws cards until its value exceeds 17 or goes bust. The dealer's final hand score is printed to the screen
or the player is informed that the dealer has gone bust.
Parameters
----------
deck_obj : blackjack.deck.Deck
The game's 'live' deck object - cards may be removed from this deck and added to the dealer's hand object.
player_hand : blackjack.hand.PlayerHand
A player's 'live' hand object. Allows the player's hand to be printed for comparison as the dealer's hand is
resolved.
player_score_message : str
A string that communicates the players score. As the dealer's hand is resolved, the players score is
printed each time the dealer's hand is printed so the user can easily compare the relative scores.
"""
dealer_target = 17
print(player_score_message)
if player_hand.best_hand_value() == twenty_one:
print("You've got 21!")
time.sleep(draw_delay)
self._reveal_hand()
while self.is_active():
if self.best_hand_value() < dealer_target:
self.draw_card(deck_obj)
self.print_hand(alt_text="\nDealer hits:")
player_hand.print_hand()
print(player_score_message)
print("\n---")
time.sleep(draw_delay)
else:
self.stand()
self.print_hand(alt_text="\nDealer stands:")
print(f"Dealer's score = {self.best_hand_value()}")
player_hand.print_hand()
print(player_score_message)
break
if self.is_bust():
self.print_hand(alt_text="\nDealer has gone bust!")
player_hand.print_hand()
print(player_score_message)
print("\n---")
def _reveal_hand(self):
"""Turns all cards in the hand face-up and prints hand details to the screen."""
print("\n---------------")
for card in self:
if not card.is_face_up():
card.flip_card()
self.print_hand(alt_text="Dealer reveals hand:")
print("---------------")
time.sleep(draw_delay)
def settle_naturals(self, player_hand, player_obj):
"""
Method detects naturals and settles any bets as necessary; returns True if round is concluded, otherwise False.
A hand is a 'natural' if it contains two cards with a total value of 21. Players and dealers can get naturals
upon drawing their first two cards at the start of a round. If the dealer gets a natural, the round is over and
they collect the bet of any player who did not also get a natural. If a player gets a natural and the dealer did
not, they are immediately paid 1.5x the value of their bet.
Parameters
----------
player_hand : blackjack.hand.PlayerHand
A player's 'live' hand object. The 'natural' status of this hand is read and compared to the status of the
dealer's hand. Where a payout is required, the amount bet against the hand is also read into 'bet_amount'.
player_obj : blackjack.player.Player
The player object that owns the input 'player_hand'. Where a payout is required, this player's balance
will be updated accordingly.
Returns
-------
round_complete : bool
Returns True if no further actions are possible in the current round, following the settling of naturals;
otherwise False (and the round continues).
"""
if not any((self.is_natural(), player_hand.is_natural())):
round_complete = False
return round_complete
else:
round_complete = True
bet_amount = player_hand.get_bet()
if self.is_natural() and not player_hand.is_natural():
# No action, round ends and bet is collected (discarded) automatically with player's hand
self._reveal_hand()
print("Dealer has a natural!")
elif not self.is_natural() and player_hand.is_natural():
# Player wins 1.5x their original bet; multiplier is 2.5x so bet amount is also deposited back into balance
print(f"\n{player_obj.get_name()} has a natural (dealer does not)!")
payout_multiplier = 2.5
player_obj.update_balance(bet_amount * payout_multiplier)
elif all((self.is_natural(), player_hand.is_natural())):
# Stand-off between player and dealer: player's bet is deposited back into balance
print(f"\n{player_obj.get_name()} has a natural!")
self._reveal_hand()
print("\nSo does the dealer! It's a stand-off!")
payout_multiplier = 1
player_obj.update_balance(bet_amount * payout_multiplier)
return round_complete
def settle_bet(self, player_hand, player_obj):
"""
Method settles any bets at the end of the round; where the player loses, the method exits and their bet is lost.
The value of the dealer's and player's hands are compared. If the player wins, their player object is payed the
value of their bet plus the original bet amount is returned. If it's a draw, the bet is returned to the player's
balance but they receive no winnings. If the player loses, the method exits and their balance is uneffected.
The bet placed against their hand is lost when a new round starts and new hands are initialised.
Parameters
----------
player_hand : blackjack.hand.PlayerHand
A player's 'live' hand object. The value of this hand is read and compared to the value of the
dealer's hand. Where a payout is required, the amount bet against the hand is also read into 'bet_amount'.
player_obj : blackjack.player.Player
The player object that owns the input 'player_hand'. Where a payout is required, this player's balance
will be updated accordingly.
"""
assert not any(
(self.is_active(), player_hand.is_active())
), "Bets cannot be settled between the dealer and a player unless both participants have 'stood' or gone bust."
if player_hand.is_bust():
return
if self.is_bust():
dealer_score = 0
else:
dealer_score = self.best_hand_value()
if dealer_score > player_hand.best_hand_value():
return
else:
bet_amount = player_hand.get_bet()
if player_hand.best_hand_value() > dealer_score:
payout_multiplier = 2
player_obj.update_balance(bet_amount * payout_multiplier)
elif player_hand.best_hand_value() == dealer_score:
payout_multiplier = 1
player_obj.update_balance(bet_amount * payout_multiplier)
class PlayerHand(Hand):
"""
A subclass defining the properties and methods specific to a hand object held by a player.
Players' hands are special because bets can be made against these hands.
"""
def __init__(self, player_obj):
"""
Calls the __init__ method of the base Hand class, initialising an empty hand object for the player.
Parameters
----------
player_obj : blackjack.player.Player
The player object that owns the hand being initialised. The name of this player is queried and set
used to define the '_holder_name' attribute on the base class. This name is then displayed when printing
hand details to screen.
"""
self._bet = float(
0
) # An attribute holding the amount bet by a player against this hand: initially zero
player_name = player_obj.get_name()
super().__init__(player_name)
def add_bet(self, amount):
"""
Adds a bet made by a player to the current hand object: at the end of a round, the dealer resolves this bet.
Parameters
----------
amount : float
The amount bet against the hand object. In typical game flow, this bet amount has already been verified
as positive and has already been removed from the player's balance.
"""
self._bet += amount
def get_bet(self):
"""Returns the amount bet against this player's hand as a float."""
return self._bet
| 3.796875 | 4 |
create_input_files_coco.py | sulabhkatiyar/show_tell | 0 | 12798163 | from utils import create_input_files
"""
To create files that contain all images stored in h5py format and captions stored in json files.
Minimum word frequencies to be used as cut-off for removing rare words to be specifiied here.
"""
if __name__ == '__main__':
create_input_files(dataset='coco',
karpathy_json_path='path_to___dataset_coco.json',
image_folder='path_to__mscoco_folder',
captions_per_image=5,
min_word_freq=5,
output_folder='folder_for_processed_data',
max_len=50)
| 3.03125 | 3 |
website/TableManager.py | uclchem/HITS | 0 | 12798164 | import pandas as pd
class TableManager:
def __init__(self,table_file):
self.master_table=pd.read_csv(table_file).sort_values("Info",ascending=False)
def get_filtered_table(self,low_freq,high_freq,delta_freq,target):
low_freq=float(low_freq)
high_freq=float(high_freq)
delta_freq=float(delta_freq)
table=self.master_table[self.master_table["Target"]==target]
print(table.columns)
table=table[table["Freq 1"]>low_freq]
table=table[table["Freq 1"]<high_freq]
table=table[table["Freq 2"]>low_freq]
table=table[table["Freq 2"]<high_freq]
table=table[(table["Freq 1"]-table["Freq 2"]).abs()<delta_freq]
return table[["Feature","Info"]] | 3.203125 | 3 |
Python 3/Crash course/list comprehension.py | DarkShadow4/python | 0 | 12798165 | # Without list comprehension
squares1 = []
for value in range(1, 11):
squares1.append(value**2)
print(squares1)
# With list comprehension
squares2 = [value**2 for value in range(1, 11)]
print(squares2)
| 3.90625 | 4 |
pytorch2paddle.py | JiaoPaner/craft-det | 0 | 12798166 | # -*- coding: utf-8 -*-
# @Time : 2022/3/8 14:38
# @Author : jiaopaner
import sys
sys.path.insert(0, './')
import torch
from collections import OrderedDict
from craft import CRAFT
def copyStateDict(state_dict):
if list(state_dict.keys())[0].startswith("module"):
start_idx = 1
else:
start_idx = 0
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = ".".join(k.split(".")[start_idx:])
new_state_dict[name] = v
return new_state_dict
if __name__ == '__main__':
net = CRAFT()
net.load_state_dict(copyStateDict(torch.load("/Volumes/storage/resources/models/paddle-ocr-models/craft_mlt_25k.pth", map_location="cpu")))
net.eval()
#dynamic shape
x = torch.randn((1, 3, 960, 960))
torch.onnx.export(net, x, './pd_model/model.onnx', opset_version=11, input_names=["input"],
output_names=["output"], dynamic_axes={'input': [2,3]})
# x2paddle --framework=onnx --model=./pd_model/model.onnx --save_dir=pd_model_dynamic | 2.203125 | 2 |
Data/EGFR.py | cdhavala26/railroad-diagrams | 0 | 12798167 |
import sys
from railroad import *
print('<h1>Molecules</h1>')
add("EGF",
Diagram(
"EGF(",
Choice(0, Comment(" "),
"Site",),
")"
))
add("EGFR",
Diagram(
"EGFR(",
Choice(0, Comment(" "),
"ecd",),
Choice(0, Comment(" "),
"tmd",),
Choice(0, Comment(" "),
Sequence("Y1",
Choice(0, Comment(" "), "~u", "~p"),)),
Choice(0, Comment(" "),
Sequence("Y2",
Choice(0, Comment(" "), "~u", "~p"),)),
")"
))
add("Grb2",
Diagram(
"Grb2(",
Choice(0, Comment(" "),
"sh2",),
")"
))
add("Shc",
Diagram(
"Shc(",
Choice(0, Comment(" "),
"sh3",),
Choice(0, Comment(" "),
Sequence("Y",
Choice(0, Comment(" "), "~u", "~p"),)),
")"
))
print('<h1>Species</h1>')
add("EGFR",
Diagram(
"EGFR(",
Choice(0, Comment(" "),
"ecd",),
Choice(0, Comment(" "),
"tmd",),
Choice(0, Comment(" "),
Sequence("Y1",
Choice(0, Comment(" "), "~u"),)),
Choice(0, Comment(" "),
Sequence("Y2",
Choice(0, Comment(" "), "~u"),)),
")"
))
add("EGF",
Diagram(
"EGF(",
Choice(0, Comment(" "),
"Site",),
")"
))
add("Grb2",
Diagram(
"Grb2(",
Choice(0, Comment(" "),
"sh2",),
")"
))
add("Shc",
Diagram(
"Shc(",
Choice(0, Comment(" "),
"sh3",),
Choice(0, Comment(" "),
Sequence("Y",
Choice(0, Comment(" "), "~p"),)),
")"
))
add("Shc",
Diagram(
"Shc(",
Choice(0, Comment(" "),
"sh3",),
Choice(0, Comment(" "),
Sequence("Y",
Choice(0, Comment(" "), "~u"),)),
")"
))
add("EGFR.EGFR",
Diagram("EGFR(",
Choice(0, Comment(" "),
"ecd",),
Choice(0, Comment(" "),
Sequence("Y1",
Choice(0, Comment(" "), "~A"),)),
Choice(0, Comment(" "),
Sequence("Y2",
Choice(0, Comment(" "), "~B"),)),
")","EGFR(",
Choice(0, Comment(" "),
"ecd",),
Choice(0, Comment(" "),
Sequence("Y1",
Choice(0, Comment(" "), "~C"),)),
Choice(0, Comment(" "),
Sequence("Y2",
Choice(0, Comment(" "), "~D"),)),
")"
))
print('<h1>Observables</h1>')
add("EGF",
Diagram(
"EGF(",
Choice(0, Comment(" "),
"Site",),
")"
))
add("EGFR",
Diagram(
"EGFR(",
Choice(0, Comment(" "),
"ecd",),
Choice(0, Comment(" "),
"tmd",),
Choice(0, Comment(" "),
Sequence("Y1",
Choice(0, Comment(" "), "~u", "~p"),)),
Choice(0, Comment(" "),
Sequence("Y2",
Choice(0, Comment(" "), "~p"),)),
")"
))
| 2.390625 | 2 |
examples/spatially-varying-anisotropy/run.py | davidcortesortuno/finmag | 10 | 12798168 | <filename>examples/spatially-varying-anisotropy/run.py<gh_stars>1-10
"""
Demonstrating spatially varying anisotropy. Example with anisotropy vectors as follows:
-----------------------------------
--> --> --> --> --> --> --> --> -->
--> --> --> --> --> --> --> --> -->
--> --> --> --> --> --> --> --> -->
-----------------------------------
^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^
| | | | | | | | | | | |
| | | | | | | | | | | |
-----------------------------------
"""
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import pylab
import dolfin as df
import matplotlib.pyplot as plt
from finmag import Simulation
from finmag.field import Field
from finmag.energies import UniaxialAnisotropy, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def run_simulation(plot=False):
mu0 = 4.0 * np.pi * 10**-7 # vacuum permeability N/A^2
Ms = 1.0e6 # saturation magnetisation A/m
A = 13.0e-12 # exchange coupling strength J/m
Km = 0.5 * mu0 * Ms**2 # magnetostatic energy density scale kg/ms^2
lexch = (A/Km)**0.5 # exchange length m
unit_length = 1e-9
K1 = Km
L = lexch / unit_length
nx = 10
Lx = nx * L
ny = 1
Ly = ny * L
nz = 30
Lz = nz * L
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(Lx, Ly, Lz), nx, ny, nz)
# Anisotropy easy axis is (0, 0, 1) in the lower half of the film and
# (1, 0, 0) in the upper half. This is a toy model of the exchange spring
# systems that <NAME> is working on.
boundary = Lz / 2.0
expr_a = df.Expression(("x[2] <= b ? 0 : 1", "0", "x[2] <= b ? 1 : 0"), b=boundary, degree=1)
V = df.VectorFunctionSpace(mesh, "DG", 0, dim=3)
a = Field(V, expr_a)
sim = Simulation(mesh, Ms, unit_length)
sim.set_m((1, 0, 1))
sim.add(UniaxialAnisotropy(K1, a))
sim.add(Exchange(A))
sim.relax()
if plot:
points = 200
zs = np.linspace(0, Lz, points)
axis_zs = np.zeros((points, 3)) # easy axis probed along z-axis
m_zs = np.zeros((points, 3)) # magnetisation probed along z-axis
for i, z in enumerate(zs):
axis_zs[i] = a((Lx/2.0, Ly/2.0, z))
m_zs[i] = sim.m_field((Lx/2.0, Ly/2.0, z))
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(zs, axis_zs[:, 0], "-o", label="a_x")
ax.plot(zs, axis_zs[:, 2], "-x", label="a_z")
ax.plot(zs, m_zs[:, 0], "-", label="m_x")
ax.plot(zs, m_zs[:, 2], "-", label="m_z")
ax.set_xlabel("z (nm)")
ax.legend(loc="upper left")
plt.savefig(os.path.join(MODULE_DIR, "profile.png"))
sim.m_field.save_pvd(os.path.join(MODULE_DIR, 'exchangespring.pvd'))
if __name__ == "__main__":
run_simulation(plot=True)
| 2.71875 | 3 |
MetioTube/profiles/models.py | Sheko1/MetioTube | 0 | 12798169 | from cloudinary.models import CloudinaryField
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from MetioTube.core.validators import validate_image
UserModel = get_user_model()
class Profile(models.Model):
username = models.CharField(
max_length=30
)
profile_picture = CloudinaryField(
resource_type='image',
blank=True,
validators=(validate_image,)
)
about = models.TextField(
blank=True
)
user = models.OneToOneField(
UserModel,
on_delete=models.CASCADE,
primary_key=True
)
subscribers = models.ManyToManyField(
UserModel,
related_name='subscribers',
blank=True,
)
def __str__(self):
return self.username
| 2.34375 | 2 |
getseq.py | l0o0/bio-analysis-kit | 3 | 12798170 | <reponame>l0o0/bio-analysis-kit
#!/bin/python
# 2014-11-4 Linxzh
# retrive gene seq for genome seq by gene id
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser(description='Retrive gene sequence by gene id', prog='SeqGeter', usage='PROG [options]')
parser.add_argument('-i', help='file contains gene ids')
parser.add_argument('-o', help='output file in fasta format', type=argparse.FileType('w'))
parser.add_argument('-c', help='column of gene id, default is 1', type=int, default=1)
parser.add_argument('-g', help="format gene id as 'Csa1G000111', default is FALSE'", default='F', choices=['T', 'F'])
args = parser.parse_args()
def pos_dict(infile):
D={}
with open(infile) as f:
for fl in f:
if 'gene' in fl:
fl_list = fl.split()
chrom = fl_list[0]
geneid = fl_list[8].split(';')[0][3:]
start = int(fl_list[3]) - 1
end = int(fl_list[4])
D[geneid] = [chrom, start, end]
return D
def read_id(infile, c=1, g='F'):
gene_list = []
with open(infile) as f:
for fl in f:
if '#' in fl:
continue
elif 'Csa' not in fl:
continue
elif fl == '\n':
continue
fl_list = fl.split()
gene = fl_list[c-1]
if g == 'T':
gene = gene.split('.')[0]
gene = gene.replace('P','G')
gene = gene.replace('M','G')
gene_list.append(gene)
return gene_list
def get_seq(gene_list, outfile):
D = pos_dict('/share/fg3/Linxzh/Data/Cucumber_ref/Cucumber_20101104.gff3')
fa_dict = SeqIO.to_dict(SeqIO.parse('/share/fg3/Linxzh/Data/Cucumber_ref/whole_genome/origin/domestic_Chr_20101102.fa','fasta'))
for gene in gene_list:
seq = str(fa_dict[D[gene][0]][D[gene][1]:D[gene][2]].seq)
wl = '>%s\n%s\n' % (gene, seq)
outfile.write(wl)
outfile.close()
if __name__ == '__main__':
gene_list = read_id(args.i, c = args.c, g = args.g)
get_seq(gene_list, args.o)
| 2.984375 | 3 |
FRCScouting/TheBlueAlliance/team.py | xNovax/FRCScouting.ca | 1 | 12798171 | <gh_stars>1-10
from django.conf import settings
import tbaapiv3client
from tbaapiv3client.rest import ApiException
def get_team(teamkey):
configuration = tbaapiv3client.Configuration()
configuration.api_key['X-TBA-Auth-Key'] = settings.THE_BLUE_ALLIANCE_KEY
api_instance = tbaapiv3client.TeamApi(tbaapiv3client.ApiClient(configuration))
try:
api_response = api_instance.get_team("frc" + str(teamkey))
info = api_response
return info
except ApiException as e:
return None
def get_team_events(teamkey):
configuration = tbaapiv3client.Configuration()
configuration.api_key['X-TBA-Auth-Key'] = settings.THE_BLUE_ALLIANCE_KEY
api_instance = tbaapiv3client.TeamApi(tbaapiv3client.ApiClient(configuration))
try:
api_response = api_instance.get_team_events("frc" + str(teamkey))
info = api_response
return info
except ApiException as e:
return None
| 2.015625 | 2 |
ecco_v4_py/test/test_ecco_utils.py | owang01/ECCOv4-py | 24 | 12798172 |
import warnings
from datetime import datetime
import numpy as np
import xarray as xr
import pytest
import ecco_v4_py
from .test_common import all_mds_datadirs, get_test_ds
@pytest.mark.parametrize("mytype",['xda','nparr','list','single'])
def test_extract_dates(mytype):
dints = [[1991,8,9,13,10,15],[1992,10,20,8,30,5]]
dates = [datetime(year=x[0],month=x[1],day=x[2],
hour=x[3],minute=x[4],second=x[5]) for x in dints]
dates = np.array(dates,dtype='datetime64[s]')
dates = [np.datetime64(x) for x in dates]
if mytype=='xda':
dates = xr.DataArray(np.array(dates))
elif mytype=='nparr':
dates = np.array(dates)
elif mytype=='single':
dints=dints[0]
dates = dates[0]
test_out = ecco_v4_py.extract_yyyy_mm_dd_hh_mm_ss_from_datetime64(dates)
for test,expected in zip(test_out,np.array(dints).T):
print('test: ',test)
print('exp: ',expected)
test = test.values if mytype=='xda' else test
assert np.all(test==expected)
def test_get_grid(get_test_ds):
"""make sure we can make a grid ... that's it"""
grid = ecco_v4_py.get_llc_grid(get_test_ds)
| 2.34375 | 2 |
prompt_toolkit/contrib/ssh/__init__.py | anthonyrota/school-yr10-russian-mafia-game | 0 | 12798173 | # from .server import PromptToolkitSession, PromptToolkitSSHServer
# __all__ = [
# "PromptToolkitSession",
# "PromptToolkitSSHServer",
# ]
| 1.039063 | 1 |
src/genie/libs/parser/iosxe/tests/ShowLicenseSummary/cli/equal/golden_output2_expected.py | oianson/genieparser | 1 | 12798174 | expected_output = {
'license_usage':{
'C9300 48P DNA Advantage':{
'entitlement':'C9300-48 DNA Advantage',
'count':'2',
'status':'AUTHORIZED'
},
'C9300 48P Network Adv...':{
'entitlement':'C9300-48 Network Advan...',
'count':'2',
'status':'AUTHORIZED'
},
'C9300 24P Network Adv...':{
'entitlement':'C9300-24 Network Advan...',
'count':'1',
'status':'AUTHORIZED'
},
'C9300 24P DNA Advantage':{
'entitlement':'C9300-24 DNA Advantage',
'count':'1',
'status':'AUTHORIZED'
}
}
} | 1.53125 | 2 |
src/multi_SIR.py | suryadheeshjith/episimmer | 0 | 12798175 | <reponame>suryadheeshjith/episimmer
import sys
import ReadFile
import pickle
import World
import importlib.util
import os.path as osp
import policy_generator as pg
import matplotlib
import matplotlib.pyplot as plt
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
import numpy as np
def module_from_file(module_name, file_path):
spec = importlib.util.spec_from_file_location(module_name, file_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
def get_example_path():
return sys.argv[1]
def get_config_path(path):
config_filepath=osp.join(path,'config.txt')
return config_filepath
def get_file_paths(example_path,config_obj):
# File Names
locations_filename=None
agents_filename=osp.join(example_path,config_obj.agents_filename)
interactions_FilesList_filename=osp.join(example_path,config_obj.interactions_files_list)
events_FilesList_filename=osp.join(example_path,config_obj.events_files_list)
if config_obj.locations_filename=="":
locations_filename=None
else:
locations_filename=osp.join(example_path,config_obj.locations_filename)
return agents_filename, interactions_FilesList_filename, events_FilesList_filename, locations_filename
def get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj):
# Reading through a file (for interactions/events) that contain file names which contain interactions and event details for a time step
interactions_files_list=None
events_files_list=None
if config_obj.interactions_files_list=='':
print('No Interaction files uploaded!')
else:
interactionFiles_obj=ReadFile.ReadFilesList(interactions_FilesList_filename)
interactions_files_list=list(map(lambda x : osp.join(example_path,x) ,interactionFiles_obj.file_list))
if interactions_files_list==[]:
print('No Interactions inputted')
if config_obj.events_files_list=='':
print('No Event files uploaded!')
else:
eventFiles_obj=ReadFile.ReadFilesList(events_FilesList_filename)
events_files_list=list(map(lambda x : osp.join(example_path,x) ,eventFiles_obj.file_list))
if events_files_list==[]:
print('No Events inputted')
return interactions_files_list, events_files_list
def get_model(example_path):
UserModel = module_from_file("Generate_model", osp.join(example_path,'UserModel.py'))
model = UserModel.UserModel()
return model
def get_policy(example_path):
Generate_policy = module_from_file("Generate_policy", osp.join(example_path,'Generate_policy.py'))
policy_list, event_restriction_fn=Generate_policy.generate_policy()
return policy_list, event_restriction_fn
if __name__=="__main__":
example_path = get_example_path()
config_filename = get_config_path(example_path)
# Read Config file using ReadFile.ReadConfiguration
config_obj=ReadFile.ReadConfiguration(config_filename)
agents_filename, interactions_FilesList_filename,\
events_FilesList_filename, locations_filename = get_file_paths(example_path,config_obj)
interactions_files_list, events_files_list = get_file_names_list(example_path,interactions_FilesList_filename,events_FilesList_filename,config_obj)
# User Model
model = get_model(example_path)
##########################################################################################
fp = open("multi_SIR.txt","w")
num_tests = 90
color_dict = {(1,1):'cyan',(2,1):'blue',(3,2):'grey',(4,2):'pink',(5,2):'orange',(5,3):'red',(6,2):'purple',(6,3):'green'}
pattern = ['dashed','solid','dotted']
tdicts = []
f = plt.figure()
f.set_figwidth(15)
f.set_figheight(8)
for key in color_dict.keys():
i,j = key
policy_list, event_restriction_fn = pg.generate_group_testing_tests_policy(num_tests, i, j)
world_obj=World.World(config_obj,model,policy_list,event_restriction_fn,agents_filename,interactions_files_list,locations_filename,events_files_list)
tdict, total_infection, total_quarantined_days, wrongly_quarantined_days, total_test_cost = world_obj.simulate_worlds(plot=False)
fp.write("("+str(i)+","+str(j)+") : ")
fp.write(str(tdict)+"\n")
tdicts.append(tdict)
for i,state in enumerate(tdict.keys()):
for j in range(len(tdict[state])):
tdict[state][j] /= 1000
plt.plot(tdict[state], color = color_dict[key], linestyle = pattern[i], label =state+"_"+str(key))
fp.close()
plt.legend(loc='upper right', shadow=True, bbox_to_anchor=(1.12, 1))
plt.xlabel("Timesteps")
plt.ylabel("Population proportion")
# plt.show()
plt.savefig('multi_SIR.pgf')
###############################################################################################
| 2.09375 | 2 |
pagi_api.py | RAIRLab/PAGIapi-python | 0 | 12798176 | <reponame>RAIRLab/PAGIapi-python<gh_stars>0
"""
Python PAGIworld API
"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, RAIR Lab"
__credits__ = ["<NAME>"]
__license__ = "MIT"
import math
import os
import socket
import time
ERROR_CHECK = True
VALID_COMMANDS = ["sensorRequest", "addForce", "loadTask", "print", "findObj", "setState",
"getActiveStates", "setReflex", "removeReflex", "getActiveReflexes"]
VALID_SENSORS = ["S", "BP", "LP", "RP", "A", "MDN", "MPN"]
for i in range(5):
VALID_SENSORS.append("L%d" % i)
VALID_SENSORS.append("R%d" % i)
for i in range(0, 31):
for j in range(0, 21):
VALID_SENSORS.append("V%d.%d" % (i, j))
for i in range(0, 16):
for j in range(0, 11):
VALID_SENSORS.append("P%d.%d" % (i, j))
VALID_FORCES = ["RHvec", "LHvec", "BMvec", "RHH", "LHH", "RHV", "LHV", "BMH", "BMV", "J", "BR",
"RHG", "LHG", "RHR", "LHR"]
# pylint: disable=too-many-instance-attributes
class PAGIWorld(object):
"""
:type pagi_socket: socket.socket
:type __ip_address: str
:type __port: int
:type __timeout: float
:type __message_fragment: str
:type __task_file: str
:type message_stack: list
"""
def __init__(self, ip_address="", port=42209, timeout=3):
"""
:param ip:
:param port:
:return:
"""
self.pagi_socket = None
self.__ip_address = ip_address
self.__port = port
self.__timeout = timeout
self.__message_fragment = ""
self.__task_file = ""
self.message_stack = list()
self.connect(ip_address, port, timeout)
self.agent = PAGIAgent(self)
def connect(self, ip_address="", port=42209, timeout=3):
"""
Create a socket to the given
:param ip:
:param port:
:return:
:raises: ConnectionRefusedError
"""
if ip_address == "":
ip_address = socket.gethostbyname(socket.gethostname())
self.__ip_address = ip_address
self.__port = port
self.__timeout = timeout
self.__message_fragment = ""
self.__task_file = ""
self.message_stack = list()
self.pagi_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.pagi_socket.connect((ip_address, port))
self.pagi_socket.setblocking(False)
self.pagi_socket.settimeout(timeout)
def disconnect(self):
"""
Close the socket to PAGIWorld and then reset internal variables (in case we just use
connect directly without creating new PAGIWorld instance)
:return:
"""
self.pagi_socket.close()
def __assert_open_socket(self):
"""
Make sure that we have an existing socket connection. If we don't, exception will be raised.
:return:
:raises: RuntimeError
"""
if self.pagi_socket is None:
raise RuntimeError("No open socket. Use connect() to open a new socket connection")
def send_message(self, message):
"""
Send a message to the socket. We make sure that the message is a valid action type, as well
verify that if the message is for a sensor or action, that it's a valid sensor or action
to prevent bad calls.
:param message:
:type message: str
:return:
:raises: RuntimeError
"""
self.__assert_open_socket()
if ERROR_CHECK:
command = message[:message.find(",")]
if command == "" or command not in VALID_COMMANDS:
raise RuntimeError("Invalid command found in the message '%s'" % message)
end = message[len(command)+1:].find(",")
if end == -1:
secondary = message[len(command)+1:]
else:
secondary = message[len(command)+1:end + len(command) + 1]
if command == "sensorRequest" and secondary not in VALID_SENSORS:
raise RuntimeError("Invalid sensor '%s' in message '%s'" % (secondary, message))
elif command == "addForce" and secondary not in VALID_FORCES:
raise RuntimeError("Invalid force '%s' in message '%s'" % (secondary, message))
# all messages must end with \n
if message[-1] != "\n":
message += "\n"
self.pagi_socket.send(message.encode())
def get_message(self, code="", block=False):
"""
Gets messages from the socket. If code is blank, then we just return the first message
from the socket, otherwise return the first matching message with that code, saving all
other messages to a stack. If block is set to False, and there's no response from the
socket, after self.__timeout seconds, function will raise socket.timeout exception. If
block is set to true, no exception will be thrown, but program will stop in this function
if socket doesn't return anything
:param code:
:type code: str
:param block:
:type block: bool
:return:
:raises: socket.timeout
"""
if block:
self.pagi_socket.setblocking(True)
response = self.__get_message_from_stack(code)
while True and response != "":
while "\n" not in self.__message_fragment:
self.__message_fragment += self.pagi_socket.recv(4096).decode()
message_index = self.__message_fragment.find("\n")
if message_index == -1:
break
else:
response = self.__message_fragment[:message_index]
self.__message_fragment = self.__message_fragment[message_index+1:]
if code == "" or (response[:len(code)] == code and response[len(code)] == ","):
break
else:
self.message_stack.append(response)
if block:
self.pagi_socket.setblocking(False)
self.pagi_socket.settimeout(self.__timeout)
return response
def __get_message_from_stack(self, code):
"""
Attempts to return a message from the stack if (1) the stack isn't empty and (2) either
code is blank or it matches something on the message stack
:param code:
:return: str
"""
if len(self.message_stack) > 0:
if code != "":
for index in range(len(self.message_stack)):
if self.message_stack[index][:len(code)] == code and \
self.message_stack[index][len(code)] == ",":
return self.message_stack.pop(0)
return None
else:
return self.message_stack.pop(0)
def load_task(self, task_file):
"""
Loads a task in PAGIworld. We additionally save the task file name so we can reset things
if necessary
:param task_file:
:type task_file: str
:raises: FileNotFoundError
"""
if not os.path.isfile(task_file):
raise RuntimeError("Task file at '%s' was not found" % task_file)
self.__task_file = task_file
self.send_message("loadTask,%s" % task_file)
def reset_task(self):
"""
Resets the task to the one that was loaded in self.load_task. If one wasn't loaded, then
a RuntimeError will be raised.
:raises: RuntimeError
"""
if self.__task_file == "" or self.__task_file is None:
raise RuntimeError("Cannot reset task, no previous task file found")
self.load_task(self.__task_file)
def print_text(self, text):
"""
Print text to the PAGIworld console window.
:param text:
:type text: str
:return:
"""
text = str(text)
self.send_message("print,%s" % text)
self.get_message(code="print")
def set_state(self, name, length):
"""
Set a state within PAGIworld.
:param name:
:type name: str
:param length:
:type length: int
:return:
"""
self.send_message("setState,%s,%d" % (name, length))
self.get_message(code="setState")
def remove_state(self, name):
"""
"Removes" states from PAGIworld by just setting it's duration to zero (so that can't ever
really be in a state)
:param name:
:return:
"""
self.send_message("setState,%s,0" % name)
self.get_message(code="setState")
def get_all_states(self):
"""
Returns a list of all states that are currently in PAGIworld.
:return: list
"""
self.send_message("getActiveStates")
states = self.get_message(code="activeStates").split(",")
return states[1:]
def set_reflex(self, name, conditions, actions=None):
"""
Sets a reflex in PAGIworld to be carried out on conditions.
:param name:
:param conditions:
:param actions:
:return:
"""
if actions is not None:
self.send_message("setReflex,%s,%s,%s" % (name, conditions, actions))
else:
self.send_message("setReflex,%s,%s" % (name, conditions))
self.get_message(code="setReflex")
def remove_reflex(self, name):
"""
Removes a reflex completely from PAGIworld
:param name:
:return:
"""
self.send_message("removeReflex,%s" % name)
self.get_message(code="removeReflex")
def get_all_reflexes(self):
"""
Returns a list of all the active reflexes in PAGIworld
:return: list
"""
self.send_message("getActiveReflexes")
reflexes = self.get_message(code="activeReflexes").split(",")
return reflexes[1:]
def drop_item(self, name, x_coord, y_coord, description=None):
"""
Creates an item and drops into into PAGIworld. These items are the ones pre-built into
PAGIworld.
:param name:
:param x:
:param y:
:param n:
:return:
"""
if description is None or description == "":
self.send_message("dropItem,%s,%f,%f" % (name, x_coord, y_coord))
else:
self.send_message("dropItem,%s,%f,%f,%s" % (name, x_coord, y_coord, description))
self.get_message(code="dropItem")
# pylint: disable=too-many-arguments
def create_item(self, name, image_file, x, y, m, ph, r, e, k, degrees=True):
"""
Creates a new item in PAGIworld with the specified properties
:param name:
:param image_file:
:param x:
:param y:
:param m:
:param ph:
:param r:
:param e:
:param k:
:param degrees:
:return:
"""
if degrees:
r = r * math.pi / 180.
self.send_message("createItem,%s,%s,%f,%f,%f,%d,%f,%f,%d" % (name, image_file,
x, y, m, ph, r, e, k))
self.get_message(code="createItem")
class PAGIAgent(object):
"""
PAGIAgent
:type pagi_world: PAGIWorld
:type left_hand: PAGIAgentHand
:type right_hand: PAGIAgentHand
"""
def __init__(self, pagi_world):
if not isinstance(pagi_world, PAGIWorld):
raise ValueError("You must pass in a valid PagiWorld variable to PagiAgent")
self.pagi_world = pagi_world
self.left_hand = PAGIAgentHand('l', pagi_world)
self.right_hand = PAGIAgentHand('r', pagi_world)
def jump(self):
"""
Causes the agent to try and jump. He will only be able to if his bottom edge is touching
something solid, otherwise he'll do nothing.
:return: bool True if agent has jumped (his bottom is touching something solid) otherwise
False
"""
self.pagi_world.send_message("addForce,J,1000")
response = self.pagi_world.get_message(code="J").split(",")
return int(response[1]) == 1
def reset_agent(self):
"""
Resets agent state back to a starting position (looking upward with hands in starting
position)
:return:
"""
self.reset_rotation()
def reset_rotation(self):
"""
Resets the agent's rotation back to 0 degrees (looking upward)
:return:
"""
self.rotate(0, absolute=True)
def rotate(self, val, degrees=True, absolute=False):
"""
Rotate the agent some number of degrees/radians. If absolute is True, then we rotate to
position specified from 0 (looking up), otherwise rotate him relative to where he's looking.
Therefore, if he's looking down at 180 degrees, and we tell him to rotate 90 degrees, if
absolute is True, he'll be looking to the left at 90 degrees and if absolute is False,
he'll be looking to the right at 270 degrees
0
90 agent 270
180
:param val:
:type val: float
:param degrees:
:type degrees: bool
:param absolute:
:type absolute: bool
:return:
"""
if not degrees:
val = val * 180. / math.pi
if absolute:
val %= 360.
val -= self.get_rotation()
self.pagi_world.send_message("addForce,BR,%f" % val)
self.pagi_world.get_message(code="BR")
def get_rotation(self, degrees=True):
"""
Returns rotation in either degrees (0 - 359) or radians (0 - 2*pi) of agent (0 is looking
upward)
:param degrees:
:type degrees: bool
:return:
"""
self.pagi_world.send_message("sensorRequest,A")
response = self.pagi_world.get_message(code="A").split(",")
rotation = float(response[-1])
rotation %= 360
if degrees:
rotation = rotation * 180 / math.pi
return rotation
def move_paces(self, paces, direction='L'):
"""
Attempts to move the agent some number of paces (defined as one width of his body) to
either the left or right.
:param paces:
:type paces: int
:param direction:
:type direction: str
:return:
"""
assert_left_or_right(direction)
val = 1 if direction[0].upper() == "R" else -1
cnt = 0
while cnt < paces:
self.send_force(x=(val * 1000), absolute=True)
time.sleep(2)
cnt += 1
def send_force(self, x=0, y=0, absolute=False):
"""
Sends a vector force to the agent to move his body. If absolute is False, then vectors are
relative to the direction agent is looking, thus +y is always in direction of top of agent,
-y is bottom, +x is towards his right side, -x is his left side. If absolute is true, then
vector +y is world up, -y is world bottom, +x is world right and -x is world left.
:param x:
:type x: float
:param y:
:type y: float
:param absolute:
:type absolute: bool
:return:
"""
x = float(x)
y = float(y)
if not absolute or (x == 0 and y == 0):
self.pagi_world.send_message("addForce,BMvec,%f,%f" % (x, y))
else:
rotation = self.get_rotation()
if x != 0 and y != 0:
ax = math.fabs(x)
ay = math.fabs(y)
hyp = math.sqrt(ax ** 2 + ay ** 2)
angle = math.acos(ay / hyp)
z = math.sin(angle) * ay
else:
if x != 0:
z = math.fabs(x)
else:
z = math.fabs(y)
nx, ny = PAGIAgent.__get_relative_vector(x, y, z, rotation)
print(nx, ny)
self.pagi_world.send_message("addForce,BMvec,%f,%f" % (nx, ny))
self.pagi_world.get_message(code="BMvec")
@staticmethod
def __get_relative_vector(x, y, z, rotation):
"""
TODO: Finish and simplify
:param x:
:param y:
:param z:
:param rotation:
:return:
"""
if x == 0:
if y < 0:
angle = 180
else:
angle = 0
elif y == 0:
if x > 0:
angle = 270
else:
angle = 90
elif x < 0:
if y > 0:
angle = math.acos(z / y) * 180 / math.pi
else:
angle = math.acos(z / x) * 180 / math.pi + 90
else:
if y < 0:
angle = math.acos(z / y) * 180 / math.pi + 180
else:
angle = math.acos(z / x) * 180 / math.pi + 270
adjusted = rotation - angle
radjusted = adjusted * math.pi / 180
if adjusted == 0:
return 0, z
elif adjusted == 180 or adjusted == -180:
return 0, (-1 * z)
elif adjusted == 90 or adjusted == -270:
return z, 0
elif adjusted == 270 or adjusted == -90:
return (-1 * z), 0
else:
if adjusted > 0:
if adjusted < 90:
ny = math.cos(radjusted) * z
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2))
elif adjusted < 180:
nx = math.cos(radjusted - 90) * z
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2)) * -1
elif adjusted < 270:
ny = math.cos(radjusted - 180) * z * -1
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2)) * -1
else:
nx = math.cos(radjusted - 270) * z * -1
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2))
else:
if adjusted < -90:
ny = math.cos(radjusted * -1) * z
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2)) * -1
elif adjusted < -180:
nx = math.cos(radjusted * -1 - 90) * z * -1
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2)) * -1
elif adjusted < -270:
ny = math.cos(radjusted * -1 - 180) * z * -1
nx = math.sqrt(math.pow(z, 2) - math.pow(ny, 2))
else:
nx = math.cos(radjusted * -1 - 270) * z
ny = math.sqrt(math.pow(z, 2) - math.pow(nx, 2))
return nx, ny
def get_position(self):
"""
Gets x/y coordinates of the agent in the world
:return: tuple(float, float) of coordinates of agent
"""
self.pagi_world.send_message("sensorRequest,BP")
response = self.pagi_world.get_message(code="BP").split(",")
return float(response[1]), float(response[2])
def get_periphal_vision(self):
"""
Returns a list of 11 (rows) x 16 (columns) points which contains all of his periphal vision.
vision[0][0] represents lower left of the vision field with vision[10][15] representing
upper right
:return: list of size 11 x 16
"""
self.pagi_world.send_message("sensorRequest,MPN")
response = self.pagi_world.get_message(code="MPN").split(",")
return self.__process_vision(response, 16)
def get_detailed_vision(self):
"""
Returns a list of ?x? points which contains all of his detailed vision
:return:
"""
self.pagi_world.send_message("sensorRequest,MDN")
response = self.pagi_world.get_message(code="MDN").split(",")
return self.__process_vision(response, 21)
@staticmethod
def __process_vision(response, column_length):
"""
Internal method to process returned vision repsonse. Splits the response into a list of
lists where each inner list is the length of specified column_length.
:param response:
:param column_length:
:return:
"""
vision = list()
current = list()
for j in range(1, len(response)):
if (j - 1) % column_length == 0:
if len(current) > 0:
vision.append(current)
current = list()
current.append(response[j])
vision.append(current)
return vision
def center_hands(self):
"""
Moves both of the agent's hands to the center of his body
:return:
"""
raise NotImplementedError
class PAGIAgentHand(object):
"""
:type pagi_world: PAGIWorld
"""
def __init__(self, hand, pagi_world):
assert_left_or_right(hand)
self.hand = hand[0].upper()
self.pagi_world = pagi_world
def get_position(self):
"""
Gets the position of the hand relative to the agent
:return: tupe(float, float) of the x, y coordinates of the hand
"""
self.pagi_world.send_message("sensorRequest,%sP" % self.hand)
response = self.pagi_world.get_message(code=("%sP" % self.hand)).split(",")
return float(response[1]), float(response[2])
def release(self):
"""
Opens the hand, releasing anything it could be holding
:return:
"""
self.pagi_world.send_message("%sHR" % self.hand)
self.pagi_world.get_message(code="%sHR" % self.hand)
def grab(self):
"""
Closes the hand, grabbing anything it is touching
:return:
"""
self.pagi_world.send_message("%sHG" % self.hand)
self.pagi_world.get_message(code="%sHG" % self.hand)
def send_force(self, x, y, absolute=False):
"""
Sends a vector of force to the hand moving it
:param x:
:type x: float
:param y:
:type y: float
:param absolute:
:type absolute: bool
:return:
"""
if not absolute:
self.pagi_world.send_message("%sHvec,%f,%f" % (self.hand, x, y))
else:
pass
self.pagi_world.get_message(code="%sHvec" % self.hand)
def assert_left_or_right(direction):
"""
Checks that the given direction is either left or right, and if it isn't, raise exception
:param direction:
:return:
"""
if not direction.upper() == 'R' and not direction.upper() == 'L' \
and not direction.upper() == 'RIGHT' and not direction.upper() == 'LEFT':
raise ValueError("You can only use a L or R value for hands")
| 2.28125 | 2 |
code_python/Easy_Run.py | pinxau1000/Computer-Vision | 0 | 12798177 | <filename>code_python/Easy_Run.py
import subprocess
import sys
_PYTHON_INTERPRETER = sys.executable
_CURRENT_DIRECTORY = sys.path[0]
_opt = ""
while _opt != '0':
print("---------------- MAIN MENU ----------------")
print("1 - Noise Removal")
print("2 - Edge Extraction")
print("3 - Corner Detection")
print("4 - Custom Harris")
print("0 - Exit")
print("-------------------------------------------")
_opt = input("Option:\t")
_opt = str(_opt)
if _opt == "1":
while _opt != "0":
print("---------------- Noise Remove ----------------")
print("a - Plot Original Pictures")
print("b - Use Mean Filter")
print("c - Visualize Mean Filter Anchor Effect")
print("d - Use Median Filter")
print("e - Use Gaussian Filter")
print("f - Visualize Gaussian Filter Sigma Effect")
print("g - Use Bilateral Filter")
print("h - Visualize Bilateral Filter Sigma Effect")
print("0 - Exit")
print("----------------------------------------------")
_opt = input("Option:\t")
_opt = str(_opt)
if _opt == "a":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/original-pictures.py")
elif _opt == "b":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/mean-filter.py")
elif _opt == "c":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/mean-filter-anchor.py")
elif _opt == "d":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/median-filter.py")
elif _opt == "e":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/gaussian-filter.py")
elif _opt == "f":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/gaussian-filter-sigma.py")
elif _opt == "g":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/bilateral-filter.py")
elif _opt == "h":
subprocess.run(_PYTHON_INTERPRETER +
" 1.1/bilateral-filter-sigma.py")
elif _opt == "0":
pass
else:
print("Invalid Option!")
_opt = ""
elif _opt == "2":
while _opt != "0":
print("---------------- Edge Extraction ----------------")
print("a - Visualize Data Type Problems on Edge Detection")
print("b - Use Sobel Operator")
print("c - Use Scharr 3x3 Kernel")
print("d - Use Scharr 3x3 Kernel and Apply Threshold")
print("e - Use Prewitt Filter")
print("f - Use Roberts Filter")
print("g - Use Canny Edge Detector")
print("h - Generate an Animation of Canny")
print("i - Use Laplacian Filter")
print("0 - Exit")
print("-------------------------------------------------")
_opt = input("Option:\t")
_opt = str(_opt)
if _opt == "a":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/sobel-filter-ddepth.py")
elif _opt == "b":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/sobel-filter.py")
elif _opt == "c":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/scharr-filter.py")
elif _opt == "d":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/scharr-filter-threshold.py")
elif _opt == "e":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/prewitt-filter.py")
elif _opt == "f":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/roberts-filter.py")
elif _opt == "g":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/canny-filter.py")
elif _opt == "h":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/canny-filter-animate.py")
elif _opt == "i":
subprocess.run(_PYTHON_INTERPRETER +
" 1.2/laplacian-filter.py")
elif _opt == "0":
pass
else:
print("Invalid Option!")
_opt = ""
elif _opt == "3":
while _opt != "0":
print("---------------- Corner Detection ----------------")
print("a - Apply the Harris to an image")
print("b - Visualize the Effect of Block Size on Harris")
print("c - Visualize the Effect of Sobel Kernel Aperture on Harris")
print("d - Visualize the Effect of Harris Free Parameter on Harris")
print("e - Generate an Animation Sweeping the Harris Parameters")
print("0 - Exit")
print("-------------------------------------------------")
_opt = input("Option:\t")
_opt = str(_opt)
if _opt == "a":
subprocess.run(_PYTHON_INTERPRETER +
" 1.3/harris-detector.py")
elif _opt == "b":
subprocess.run(_PYTHON_INTERPRETER +
" 1.3/harris-detector-bsize.py")
elif _opt == "c":
subprocess.run(_PYTHON_INTERPRETER +
" 1.3/harris-detector-ksize.py")
elif _opt == "d":
subprocess.run(_PYTHON_INTERPRETER +
" 1.3/harris-detector-k.py")
elif _opt == "e":
subprocess.run(_PYTHON_INTERPRETER +
" 1.3/harris-detector-animate.py")
elif _opt == "0":
pass
else:
print("Invalid Option!")
_opt = ""
elif _opt == "4":
while _opt != "0":
print("-------------------- Custom Harris --------------------")
print("a - Apply the Custom Harris Corner Detector to an image")
print("b - Compare the OpenCV and Custom Harris results")
print("0 - Exit")
print("-------------------------------------------------------")
_opt = input("Option:\t")
_opt = str(_opt)
if _opt == "a":
subprocess.run(_PYTHON_INTERPRETER +
" my_Harris/my-harris.py")
elif _opt == "b":
subprocess.run(_PYTHON_INTERPRETER +
" my_Harris/my-harris-compare.py")
elif _opt == "0":
pass
else:
print("Invalid Option!")
_opt = ""
elif _opt == "0":
pass
else:
print("Invalid Option!")
| 3.140625 | 3 |
ped.py | rchui/Stat530 | 0 | 12798178 | import csv
import sys
csv.field_size_limit(sys.maxsize)
"""Reads in data passed by the user from a CSV file."""
count = 0
fileName = sys.argv[1]
csvArray = {}
with open(fileName) as csvFile:
reader = csv.reader(csvFile)
for row in reader:
row.pop()
if count != 0:
readRow = [i for i in row]
csvArray[readRow[2]] = readRow
count += 1
csvFile.close()
"""Find rows to remove."""
rmArray = []
for i in csvArray.keys():
if csvArray[i][12] == '':
csvArray[i][12] = -9
if csvArray[i][9] == '':
csvArray[i][9] = -9
"""Read in ped file"""
pedArray = []
for line in open(sys.argv[2]):
pedArray.append(line.split())
for row in pedArray:
if csvArray[row[1]][5] == 1:
row[4] = 2
else:
row[4] = 1
if max(csvArray[row[1]][9], csvArray[row[1]][12]) == 0:
row[5] = 1
elif max(csvArray[row[1]][9], csvArray[row[1]][12]) == 1:
row[5] = 2
else:
row[5] = -9
file = open(sys.argv[3], 'a')
output = ""
for row in pedArray:
output = str(row[0])
for i in range(len(row)):
if i != 0:
output += " " + str(row[i])
output += "\n"
file.write(output)
| 3.296875 | 3 |
rss-feeds.py | jimit105/rss-feeds-articles | 1 | 12798179 | <reponame>jimit105/rss-feeds-articles<gh_stars>1-10
# -*- coding: utf-8 -*-
"""
@author: Jimit.Dholakia
"""
from datetime import datetime, timedelta
import time
import os
import itertools
import feedparser
import urllib.parse
import dateutil.parser
import signal
import sys
os.environ['TZ'] = 'Asia/Kolkata'
if os.name != 'nt':
time.tzset()
TIME_FMT = '%b %d, %Y %H:%M:%S %Z'
RSS_FEEDS = ['https://jimit105.medium.com/feed',
'https://jimit105.github.io/pytricks/rss.xml',
'https://scitech105.blogspot.com/rss.xml',
'https://jimit105.github.io/medium-articles/rss.xml',
'https://www.pyimagesearch.com/feed/',
'https://machinelearningmastery.com/feed/',
'https://www.fast.ai/atom.xml',
'https://openai.com/blog/rss/',
'https://research.fb.com/feed/',
'http://googleaiblog.blogspot.com/atom.xml',
'https://blogs.microsoft.com/ai/feed/',
'https://www.analyticsvidhya.com/feed/',
'https://www.hackerearth.com/blog/machine-learning/feed',
'https://mlfromscratch.com/rss/',
'https://www.mygreatlearning.com/blog/category/artificial-intelligence/feed/',
'https://blog.tensorflow.org/atom.xml',
'http://feeds.feedburner.com/kdnuggets-data-mining-analytics',
'https://lazyprogrammer.me/feed/'
]
TOP_N = 250
def handler(signum, frame):
print('Signal handler called with signal', signum)
sys.exit('Took too long.. Goodbye!')
def parse_date(input_date):
dt = dateutil.parser.parse(input_date)
dt2 = dt - timedelta(seconds=time.timezone)
return dt2
def convert_timezone(datetime_struct_time):
dt = datetime.fromtimestamp(time.mktime(datetime_struct_time))
dt2 = dt - timedelta(seconds=time.timezone)
return dt2
def fetch_feeds(feed_url):
try:
feed = feedparser.parse(feed_url)
output = []
for entry in feed.entries:
if entry.updated_parsed is None:
article = '<p><a href="' + entry.link + '" target="_blank">' + entry.title + '</a><br/>' + feed.feed.title + \
' | ' + \
parse_date(entry.updated).strftime(
TIME_FMT) + '</p>'
output.append((article, parse_date(entry.updated)))
else:
article = '<p><a href="' + entry.link + '" target="_blank">' + entry.title + '</a><br/>' + feed.feed.title + \
' | ' + \
convert_timezone(entry.updated_parsed).strftime(
TIME_FMT) + '</p>'
output.append(
(article, convert_timezone(entry.updated_parsed)))
print('Fetching complete for ' + feed_url)
except Exception as e:
print('Fetching failed for ' + feed_url + '\tException: ' + str(e))
print('Exception:', str(e))
return output
signal.signal(signal.SIGALRM, handler)
signal.alarm(300)
result = list(map(fetch_feeds, RSS_FEEDS))
signal.alarm(0)
merged = list(itertools.chain(*result))
merged = list(set(merged))
output = sorted(merged, key=lambda x: x[-1], reverse=True)
all_articles = ''
for article in output[:TOP_N]:
all_articles += article[0]
current_time = time.strftime(TIME_FMT, time.localtime())
action_badge = ''
maintainer_badge = '[](https://github.com/jimit105)'
linkedin_badge = '[](https://www.linkedin.com/in/jimit105/)'
medium_badge = '[](https://jimit105.medium.com/)'
header = action_badge + '\n' + maintainer_badge + '\n' + linkedin_badge + '\n' + medium_badge + '\n + '-yellowgreen)' + '\n\n'
complete_text = header + all_articles
with open('README.md', 'w') as f:
f.write(complete_text)
print('RSS Feeds Update Complete')
| 1.726563 | 2 |
test_project_2/demo/models.py | Munduruca/django | 0 | 12798180 | <reponame>Munduruca/django
from django.db import models
class Book(models.Model):
title = models.CharField(max_length=36, unique=True)
description = models.TextField(max_length=256,
default=None) | 2.609375 | 3 |
homeassistant/components/laundrify/__init__.py | liangleslie/core | 30,023 | 12798181 | """The laundrify integration."""
from __future__ import annotations
from laundrify_aio import LaundrifyAPI
from laundrify_aio.exceptions import ApiConnectionException, UnauthorizedException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN, Platform
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DEFAULT_POLL_INTERVAL, DOMAIN
from .coordinator import LaundrifyUpdateCoordinator
PLATFORMS = [Platform.BINARY_SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up laundrify from a config entry."""
session = async_get_clientsession(hass)
api_client = LaundrifyAPI(entry.data[CONF_ACCESS_TOKEN], session)
try:
await api_client.validate_token()
except UnauthorizedException as err:
raise ConfigEntryAuthFailed("Invalid authentication") from err
except ApiConnectionException as err:
raise ConfigEntryNotReady("Cannot reach laundrify API") from err
coordinator = LaundrifyUpdateCoordinator(hass, api_client, DEFAULT_POLL_INTERVAL)
await coordinator.async_config_entry_first_refresh()
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = {
"api": api_client,
"coordinator": coordinator,
}
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| 2.15625 | 2 |
swexpert/d3/sw_3499.py | ruslanlvivsky/python-algorithm | 3 | 12798182 | <gh_stars>1-10
test_cases = int(input())
for t in range(1, test_cases + 1):
n = int(input())
cards = list(input().split())
deck = []
if len(cards) % 2 == 0:
mid = len(cards) // 2
for i in range(mid):
deck.append(cards[i])
deck.append(cards[i + mid])
else:
mid = len(cards) // 2
for i in range(mid):
deck.append(cards[i])
deck.append(cards[i + mid + 1])
deck.append(cards[mid])
print('#{} {}'.format(t, ' '.join(deck))) | 3.171875 | 3 |
helloworld-tls/src/app/__init__.py | JeNeSuisPasDave/Selenium-and-TLS | 2 | 12798183 | # Copyright 2017 <NAME>
#
# Licensed under the MIT License. If the LICENSE file is missing, you
# can find the MIT license terms here: https://opensource.org/licenses/MIT
from flask import Flask, render_template
from config import config
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
# attach routes and custom error pages here
#
from .main import main as main_blueprint
app.register_blueprint(main_blueprint)
return app
| 1.992188 | 2 |
lux_ai/lux_gym/reward_spaces.py | mrzhuzhe/Kaggle_Lux_AI_2021 | 44 | 12798184 | from abc import ABC, abstractmethod
import copy
import logging
import numpy as np
from scipy.stats import rankdata
from typing import Dict, NamedTuple, NoReturn, Tuple
from ..lux.game import Game
from ..lux.game_constants import GAME_CONSTANTS
from ..lux.game_objects import Player
def count_city_tiles(game_state: Game) -> np.ndarray:
return np.array([player.city_tile_count for player in game_state.players])
def count_units(game_state: Game) -> np.ndarray:
return np.array([len(player.units) for player in game_state.players])
def count_total_fuel(game_state: Game) -> np.ndarray:
return np.array([
sum([city.fuel for city in player.cities.values()])
for player in game_state.players
])
def count_research_points(game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players])
def should_early_stop(game_state: Game) -> bool:
ct_count = count_city_tiles(game_state)
unit_count = count_units(game_state)
ct_pct = ct_count / max(ct_count.sum(), 1)
unit_pct = unit_count / max(unit_count.sum(), 1)
return ((ct_count == 0).any() or
(unit_count == 0).any() or
(ct_pct >= 0.75).any() or
(unit_pct >= 0.75).any())
class RewardSpec(NamedTuple):
reward_min: float
reward_max: float
zero_sum: bool
only_once: bool
# All reward spaces defined below
class BaseRewardSpace(ABC):
"""
A class used for defining a reward space and/or done state for either the full game or a sub-task
"""
def __init__(self, **kwargs):
if kwargs:
logging.warning(f"RewardSpace received unexpected kwargs: {kwargs}")
@staticmethod
@abstractmethod
def get_reward_spec() -> RewardSpec:
pass
@abstractmethod
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
pass
def get_info(self) -> Dict[str, np.ndarray]:
return {}
# Full game reward spaces defined below
class FullGameRewardSpace(BaseRewardSpace):
"""
A class used for defining a reward space for the full game.
"""
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
return self.compute_rewards(game_state, done), done
@abstractmethod
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
pass
class GameResultReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=True
)
def __init__(self, early_stop: bool = False, **kwargs):
super(GameResultReward, self).__init__(**kwargs)
self.early_stop = early_stop
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
if not done:
return 0., 0.
# reward here is defined as the sum of number of city tiles with unit count as a tie-breaking mechanism
rewards = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
rewards = (rankdata(rewards) - 1.) * 2. - 1.
return tuple(rewards)
@staticmethod
def compute_player_reward(player: Player):
ct_count = player.city_tile_count
unit_count = len(player.units)
# max board size is 32 x 32 => 1024 max city tiles and units,
# so this should keep it strictly so we break by city tiles then unit count
return ct_count * 10000 + unit_count
class CityTileReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
return tuple(count_city_tiles(game_state) / 1024.)
class StatefulMultiReward(FullGameRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
positive_weight: float = 1.,
negative_weight: float = 1.,
early_stop: bool = False,
**kwargs
):
assert positive_weight > 0.
assert negative_weight > 0.
self.positive_weight = positive_weight
self.negative_weight = negative_weight
self.early_stop = early_stop
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 10.,
"city": 1.,
"unit": 0.5,
"research": 0.1,
"fuel": 0.005,
# Penalize workers each step that their cargo remains full
# "full_workers": -0.01,
"full_workers": 0.,
# A reward given each step
"step": 0.,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(StatefulMultiReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
if self.early_stop:
done = done or should_early_stop(game_state)
return self.compute_rewards(game_state, done), done
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
reward_items_dict = {
"city": new_city_count - self.city_count,
"unit": new_unit_count - self.unit_count,
"research": new_research_points - self.research_points,
# Don't penalize losing fuel at night
"fuel": np.maximum(new_total_fuel - self.total_fuel, 0),
"full_workers": np.array([
sum(unit.get_cargo_space_left() > 0 for unit in player.units if unit.is_worker())
for player in game_state.players
]),
"step": np.ones(2, dtype=float)
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[self.weight_rewards(reward_items_dict[key] * w) for key, w in self.weights.items()],
axis=0
).sum(axis=0)
return tuple(reward / 500. / max(self.positive_weight, self.negative_weight))
def weight_rewards(self, reward: np.ndarray) -> np.ndarray:
reward = np.where(
reward > 0.,
self.positive_weight * reward,
reward
)
reward = np.where(
reward < 0.,
self.negative_weight * reward,
reward
)
return reward
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
class ZeroSumStatefulMultiReward(StatefulMultiReward):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1.,
reward_max=1.,
zero_sum=True,
only_once=False
)
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
reward = np.array(super(ZeroSumStatefulMultiReward, self).compute_rewards(game_state, done))
return tuple(reward - reward.mean())
class PunishingExponentialReward(BaseRewardSpace):
@staticmethod
def get_reward_spec() -> RewardSpec:
return RewardSpec(
reward_min=-1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
reward_max=1. / GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"],
zero_sum=False,
only_once=False
)
def __init__(
self,
**kwargs
):
self.city_count = np.empty((2,), dtype=float)
self.unit_count = np.empty_like(self.city_count)
self.research_points = np.empty_like(self.city_count)
self.total_fuel = np.empty_like(self.city_count)
self.weights = {
"game_result": 0.,
"city": 1.,
"unit": 0.5,
"research": 0.01,
"fuel": 0.001,
}
self.weights.update({key: val for key, val in kwargs.items() if key in self.weights.keys()})
for key in copy.copy(kwargs).keys():
if key in self.weights.keys():
del kwargs[key]
super(PunishingExponentialReward, self).__init__(**kwargs)
self._reset()
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
new_research_points = count_research_points(game_state)
new_total_fuel = count_total_fuel(game_state)
city_diff = new_city_count - self.city_count
unit_diff = new_unit_count - self.unit_count
reward_items_dict = {
"city": new_city_count,
"unit": new_unit_count,
"research": new_research_points,
"fuel": new_total_fuel,
}
if done:
game_result_reward = [int(GameResultReward.compute_player_reward(p)) for p in game_state.players]
game_result_reward = (rankdata(game_result_reward) - 1.) * 2. - 1.
self._reset()
else:
game_result_reward = np.array([0., 0.])
self.city_count = new_city_count
self.unit_count = new_unit_count
self.research_points = new_research_points
self.total_fuel = new_total_fuel
reward_items_dict["game_result"] = game_result_reward
assert self.weights.keys() == reward_items_dict.keys()
reward = np.stack(
[reward_items_dict[key] * w for key, w in self.weights.items()],
axis=0
).sum(axis=0)
lost_unit_or_city = (city_diff < 0) | (unit_diff < 0)
reward = np.where(
lost_unit_or_city,
-0.1,
reward / 1_000.
)
return tuple(reward), done or lost_unit_or_city.any()
def compute_rewards(self, game_state: Game, done: bool) -> Tuple[float, float]:
raise NotImplementedError
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
self.research_points = np.zeros_like(self.research_points)
self.total_fuel = np.zeros_like(self.total_fuel)
# Subtask reward spaces defined below
# NB: Subtasks that are "different enough" should be defined separately since each subtask gets its own embedding
# See obs_spaces.SUBTASK_ENCODING
# TODO: Somehow include target locations for subtasks?
class Subtask(BaseRewardSpace, ABC):
@staticmethod
def get_reward_spec() -> RewardSpec:
"""
Don't override reward_spec or you risk breaking classes like multi_subtask.MultiSubtask
"""
return RewardSpec(
reward_min=0.,
reward_max=1.,
zero_sum=False,
only_once=True
)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
goal_reached = self.completed_task(game_state)
return tuple(goal_reached.astype(float)), goal_reached.any() or done
@abstractmethod
def completed_task(self, game_state: Game) -> np.ndarray:
pass
def get_subtask_encoding(self, subtask_encoding: dict) -> int:
return subtask_encoding[type(self)]
class CollectNWood(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"], **kwargs):
super(CollectNWood, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.wood for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNCoal(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 2, **kwargs):
super(CollectNCoal, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.coal for unit in player.units])
for player in game_state.players
]) >= self.n
class CollectNUranium(Subtask):
def __init__(self, n: int = GAME_CONSTANTS["PARAMETERS"]["RESOURCE_CAPACITY"]["WORKER"] // 5, **kwargs):
super(CollectNUranium, self).__init__(**kwargs)
self.n = n
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
sum([unit.cargo.uranium for unit in player.units])
for player in game_state.players
]) >= self.n
class MakeNCityTiles(Subtask):
def __init__(self, n_city_tiles: int = 2, **kwargs):
super(MakeNCityTiles, self).__init__(**kwargs)
assert n_city_tiles > 1, "Players start with 1 city tile already"
self.n_city_tiles = n_city_tiles
def completed_task(self, game_state: Game) -> np.ndarray:
return count_city_tiles(game_state) >= self.n_city_tiles
class MakeNContiguousCityTiles(MakeNCityTiles):
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
# Extra -1 is included to avoid taking max of empty sequence
max([len(city.citytiles) for city in player.cities.values()] + [0])
for player in game_state.players
]) >= self.n_city_tiles
class CollectNTotalFuel(Subtask):
def __init__(self, n_total_fuel: int = GAME_CONSTANTS["PARAMETERS"]["LIGHT_UPKEEP"]["CITY"] *
GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"], **kwargs):
super(CollectNTotalFuel, self).__init__(**kwargs)
self.n_total_fuel = n_total_fuel
def completed_task(self, game_state: Game) -> np.ndarray:
return count_total_fuel(game_state) >= self.n_total_fuel
class SurviveNNights(Subtask):
def __init__(self, n_nights: int = 1, **kwargs):
super(SurviveNNights, self).__init__(**kwargs)
cycle_len = GAME_CONSTANTS["PARAMETERS"]["DAY_LENGTH"] + GAME_CONSTANTS["PARAMETERS"]["NIGHT_LENGTH"]
self.target_step = n_nights * cycle_len
assert self.target_step <= GAME_CONSTANTS["PARAMETERS"]["MAX_DAYS"]
self.city_count = np.empty((2,), dtype=int)
self.unit_count = np.empty_like(self.city_count)
def compute_rewards_and_done(self, game_state: Game, done: bool) -> Tuple[Tuple[float, float], bool]:
failed_task = self.failed_task(game_state)
completed_task = self.completed_task(game_state)
if failed_task.any():
rewards = np.where(
failed_task,
0.,
0.5 + 0.5 * completed_task.astype(float)
)
else:
rewards = completed_task.astype(float)
done = failed_task.any() or completed_task.any() or done
if done:
self._reset()
return tuple(rewards), done
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([
game_state.turn >= self.target_step
]).repeat(2)
def failed_task(self, game_state: Game) -> np.ndarray:
new_city_count = count_city_tiles(game_state)
new_unit_count = count_units(game_state)
failed = np.logical_or(
new_city_count < self.city_count,
new_unit_count < self.unit_count
)
self.city_count = new_city_count
self.unit_count = new_unit_count
return failed
def _reset(self) -> NoReturn:
self.city_count = np.ones_like(self.city_count)
self.unit_count = np.ones_like(self.unit_count)
class GetNResearchPoints(Subtask):
def __init__(
self,
n_research_points: int = GAME_CONSTANTS["PARAMETERS"]["RESEARCH_REQUIREMENTS"]["COAL"],
**kwargs
):
super(GetNResearchPoints, self).__init__(**kwargs)
self.n_research_points = n_research_points
def completed_task(self, game_state: Game) -> np.ndarray:
return np.array([player.research_points for player in game_state.players]) >= self.n_research_points
| 2.46875 | 2 |
A2/Q3.py | Vasily-Piccone/ECSE543-NumericalMethods | 0 | 12798185 | import numpy as np
import matplotlib.pyplot as plt
# To use LaTeX in the plots
plt.rcParams.update({
"text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
# for Palatino and other serif fonts use:
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Palatino"],
})
plt.rcParams.update({
"text.usetex": True,
"font.family": "Helvetica"
})
# constants used in the problem
E = 0.220 # in volts
R = 500 # in Ohms
Vt = 0.025 # in volts
Isa = 0.6e-6 # in Amps
Isb = 1.2e-6 # in Amps
# Calculates the vector F which solves the equation F = 0
def F(v): # v is a 2 x 1 vector which contains the voltage values of the circuit
f1 = (E - v[0]) / R - Isa * (np.exp((v[0] - v[1]) / Vt) - 1)
f2 = Isa*(np.exp((v[0]-v[1]) / Vt)-1)-Isb * (np.exp((v[1] / Vt)) - 1)
F = np.array([f1, f2])
return F
# compute the Jacobian
def Jacobian(v):
J = np.zeros(shape=(2, 2))
J[0][0] = -1/R - (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[0][1] = (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[1][0] = (Isa / Vt) * np.exp((v[0] - v[1]) / Vt)
J[1][1] = -(Isb / Vt) * np.exp(v[1] / Vt) - (Isa/Vt)*np.exp((v[0]-v[1])/Vt)
return J
# uses the above two functions to calculate the voltage solution to the circuit
def newton_raphson(maxerr):
i = 0
Vnew = np.zeros(shape=(2, 1))
dV_vec = []
val_vec = []
conv = False
while not conv:
i += 1
F_p = Jacobian(Vnew) # calculate the Jacobian given teh current voltage values
eff = F(Vnew) # calculate the value of the F vector for the current voltage values
dV = np.multiply(np.dot(np.linalg.inv(F_p), eff), -1) # compute dV
crit_val = np.linalg.norm(dV, 2) # compute the 2-norm of dV for convergence criteria
Vnew = np.add(Vnew, dV) # compute new voltage value for next step
dV_vec.append(crit_val)
val_vec.append(Vnew)
print("------------------------------------")
print("iteration = "+str(i))
print("Jacobian = "+str(F_p))
print("F-vector = "+str(eff))
print("\u0394 V = "+str(dV))
if crit_val < maxerr:
break
return Vnew, dV_vec, i, val_vec
if __name__ == "__main__":
error = 10e-15 # the maximum allowable error
val = newton_raphson(error)
# plot error in the log scale
dV_norm_err = val[1]
iter_no = val[2]
ans = val[3]
print("------------------------------------")
print(ans[7])
# Plot the 10log_10 of dV
x_val = np.linspace(1, iter_no, iter_no)
dV = 10*np.log10(dV_norm_err)
plt.plot(x_val, dV)
plt.xlabel("Number of Iterations")
plt.ylabel("log(2-norm dV)")
plt.show() | 3.015625 | 3 |
sdks/blue-krill/tests/storages/conftest.py | alex-smile/bkpaas-python-sdk | 17 | 12798186 | <gh_stars>10-100
# -*- coding: utf-8 -*-
"""
* TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-蓝鲸 PaaS 平台(BlueKing-PaaS) available.
* Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
"""
import tempfile as _tempfile_
from pathlib import Path
from typing import Callable, Generator, Optional
import pytest
@pytest.fixture
def mktemp() -> Generator[Callable[..., Path], None, None]:
files = []
def core(content: Optional[str] = None):
filepath = Path(_tempfile_.mktemp())
files.append(filepath)
if content:
with open(filepath, mode="w") as fh:
fh.write(content)
return filepath
yield core
for item in files:
if item.exists:
item.unlink()
| 1.984375 | 2 |
hackerhub/urls.py | jason17h/hackerhub | 0 | 12798187 | from django.urls import path
from hackerhub import views
app_name = 'hackerhub'
urlpatterns = [
# path('hackathons/', views.hackathonList, name='hackathonList'),
] | 1.476563 | 1 |
MPIderivHelperFuncs.py | Carlson-J/energy-transfer-analysis | 1 | 12798188 | <gh_stars>1-10
import numpy as np
import FFTHelperFuncs
def MPIderiv2(comm,var,dim):
"""Returns first derivative (2-point central finite difference)
of a 3-dimensional, real space, uniform grid (with L = 1) variable.
Assumes that the field is split on axis 0 between processes.
Args:
comm -- MPI world communicator
var -- input field
dim -- axis along the derivative should be taken
"""
rank = comm.Get_rank()
size = comm.Get_size()
sl_m1 = slice(None,-2,None)
sl_p1 = slice(2,None,None)
sl_c = slice(1,-1,None)
ds = 2.0/float(var.shape[2]) # assumes axis 2 covers entire grid with L = 1
N = np.array(FFTHelperFuncs.FFT.global_shape(), dtype=int)
loc_slc = FFTHelperFuncs.local_shape
n_proc = N // loc_slc
if (dim == 0):
next_proc = (rank + n_proc[1]) % (n_proc[0] * n_proc[1])
prev_proc = (rank - n_proc[1]) % (n_proc[0] * n_proc[1])
# send right slice of local proc as left slab to follow proc
leftSlice = None
leftSlice = comm.sendrecv(sendobj=var[-1:,:,:],dest=next_proc,source=prev_proc)
# send left slice of local proc as right slab to follow proc
rightSlice = None
rightSlice = comm.sendrecv(sendobj=var[:1,:,:],dest=prev_proc,source=next_proc)
tmp = np.concatenate((leftSlice,var,rightSlice),axis=0)
p1 = tmp[sl_p1,:,:]
m1 = tmp[sl_m1,:,:]
elif (dim == 1):
next_proc = (rank + 1) % n_proc[1] + (rank // n_proc[1]) * n_proc[1]
prev_proc = (rank - 1) % n_proc[1] + (rank // n_proc[1]) * n_proc[1]
# send right slice of local proc as left slab to follow proc
leftSlice = None
leftSlice = comm.sendrecv(sendobj=var[:,-1:,:],dest=next_proc,source=prev_proc)
# send left slice of local proc as right slab to follow proc
rightSlice = None
rightSlice = comm.sendrecv(sendobj=var[:,:1,:],dest=prev_proc,source=next_proc)
tmp = np.concatenate((leftSlice,var,rightSlice),axis=1)
p1 = tmp[:,sl_p1,:]
m1 = tmp[:,sl_m1,:]
elif (dim == 2):
# nothing special required here as we do pencil decomp in x-y
tmp = np.concatenate((var[:,:,-1:],var,var[:,:,:1]),axis=2)
p1 = tmp[:,:,sl_p1]
m1 = tmp[:,:,sl_m1]
else:
print("watch out for dimension!")
del tmp
return np.array((p1 - m1)/ds)
def MPIXdotGradYScalar(comm,X,Y):
""" returns (X . grad) Y
"""
return X[0] * MPIderiv2(comm,Y,0) + X[1] * MPIderiv2(comm,Y,1) + X[2] * MPIderiv2(comm,Y,2)
def MPIXdotGradY(comm,X,Y):
""" returns (X . grad) Y
"""
res = np.zeros_like(X)
for i in range(3):
res[i] = (X[0] * MPIderiv2(comm,Y[i],0) + X[1] * MPIderiv2(comm,Y[i],1) + X[2] * MPIderiv2(comm,Y[i],2))
return res
def MPIdivX(comm,X):
""" returns div X = x_dx + y_dy + z_dz
"""
return MPIderiv2(comm,X[0],0) + MPIderiv2(comm,X[1],1) + MPIderiv2(comm,X[2],2)
def MPIdivXY(comm,X,Y):
""" returns pd_j X_j Y_i
"""
res = np.zeros_like(Y)
for i in range(3):
res[i] = MPIderiv2(comm,X[0]*Y[i],0) + MPIderiv2(comm,X[1]*Y[i],1) + MPIderiv2(comm,X[2]*Y[i],2)
return res
def MPIgradX(comm,X):
""" returns grad X = [ x_dx, y_dy, z_dz ]
"""
return np.array([MPIderiv2(comm,X,0),
MPIderiv2(comm,X,1),
MPIderiv2(comm,X,2),
])
def MPIrotX(comm,X):
""" returns curl X = [ z_dy - y_dz, x_dz - z_dx, y_dx - x_dy ]
"""
return np.array([MPIderiv2(comm,X[2],1) - MPIderiv2(comm,X[1],2),
MPIderiv2(comm,X[0],2) - MPIderiv2(comm,X[2],0),
MPIderiv2(comm,X[1],0) - MPIderiv2(comm,X[0],1),
])
| 2.5 | 2 |
numpy_practice.py | MiroGasparek/python_intro | 0 | 12798189 | <reponame>MiroGasparek/python_intro
# 21 February 2018 <NAME>
# Practice with NumPy
import numpy as np
# Practice 1
# Generate array of 0 to 10
my_ar1 = np.arange(0,11,dtype='float')
print(my_ar1)
my_ar2 = np.linspace(0,10,11,dtype='float')
print(my_ar2)
# Practice 2
# Load in data
xa_high = np.loadtxt('data/xa_high_food.csv',comments='#')
xa_low = np.loadtxt('data/xa_low_food.csv',comments='#')
def xa_to_diameter(xa):
""" Convert an array of cross-sectional areas to diameters with
commensurate units."""
# Compute diameter from area
diameter = np.sqrt((4*xa)/np.pi)
return diameter
# Practice 3
# Create matrix A
A = np.array([[6.7, 1.3, 0.6, 0.7],
[0.1, 5.5, 0.4, 2.4],
[1.1, 0.8, 4.5, 1.7],
[0.0, 1.5, 3.4, 7.5]])
# Create vector b
b = np.array([1.1, 2.3, 3.3, 3.9])
# 1. Print row 1 (remember, indexing starts at zero) of A.
print(A[0,:])
# 2. Print columns 1 and 3 of A.
print(A[:,(0,2)])
# 3. Print the values of every entry in A that is greater than 2.
print(A[A > 2])
# 4. Print the diagonal of A. using the np.diag() function.
print(np.diag(A))
# 1. First, we'll solve the linear system A⋅x=bA⋅x=b .
# Try it out: use np.linalg.solve().
# Store your answer in the Numpy array x.
x = np.linalg.solve(A,b)
print('Solution of A*x = b is x = ',x)
# 2. Now do np.dot(A, x) to verify that A⋅x=bA⋅x=b .
b1 = np.dot(A,x)
print(np.isclose(b1,b))
# 3. Use np.transpose() to compute the transpose of A.
AT = np.transpose(A)
print('Transpose of A is AT = \n',AT)
# 4. Use np.linalg.inv() to compute the inverse of A.
AInv = np.linalg.inv(A)
print('Inverse of A is AInv = \n',AInv)
# 1. See what happens when you do B = np.ravel(A).
B = np.ravel(A)
print(B)
# 2. Look of the documentation for np.reshape(). Then, reshape B to make it look like A again.
C = B.reshape((4,4))
print(C)
| 4.46875 | 4 |
core/Sessions/sessions.py | ctg-group/scky | 0 | 12798190 | <filename>core/Sessions/sessions.py
import pickle
import os
from .Connection import *
class Session:
def __init__(self, display_name, conn):
self.display_name = display_name
self.conn = conn
self.id = conn.id
def load_sessions():
if os.path.exists('.sessions') and os.path.isfile('.sessions'):
f=open('.sessions','r')
sessions=[]
sess=f.read()
if len(sess)<2:
return sessions
for l in sess.split('\n'):
opts=l.split(':')
sessions.append(Session(opts[0],Connection(opts[1],int(opts[2]),int(opts[3]))))
f.close()
return sessions
else:
f=open('.sessions','w+')
f.close()
return []
def load_sessions_(fn):
if os.path.exists(fn) and os.path.isfile(fn):
f=open(fn,'r')
sessions=[]
if len(f.read())<2:return sessions
for l in f.read():
opts=l.split(':')
sessions.append(Session(opts[0],Connection(opts[1],opts[2],opts[3])))
f.close()
return sessions
else:
f=open(fn,'w+')
f.close()
return []
def save(self):
sessions = [f'{self.display_name}:{self.conn.bind_ip}:{self.conn.bind_port}:{self.conn.id}']
with open('.sessions','r') as f:
if not len(f.read())<2:
for l in f.read():
sessions.append(l)
f = open('.sessions', 'w')
r=''
for s in sessions:r+=s
f.write(r)
f.close()
def save_(self,fn):
sessions = [f'{self.display_name}:{self.conn.bind_ip}:{self.conn.bind_port}:{self.conn.id}']
with open(fn,'r') as f:
if not len(f.read())<2:
for l in f.read():
sessions.append(l)
f = open(fn, 'w')
r=''
for s in sessions:r+=s
f.write(r)
f.close() | 2.953125 | 3 |
examples/dialogs1.py | akloster/blender-asyncio | 54 | 12798191 | import bpy
import asyncio
from asyncio import Task, coroutine, sleep
import blender_async
class TestDialog(blender_async.dialogs.AsyncDialog):
my_float = bpy.props.FloatProperty(name="Some Floating Point")
my_bool = bpy.props.BoolProperty(name="Toggle Option")
my_string = bpy.props.StringProperty(name="String Value")
async def example():
await sleep(1)
file_name = await blender_async.open_file_dialog()
print(file_name)
await sleep(1)
results = await blender_async.open_dialog(TestDialog)
print(results)
await sleep(1)
loop = blender_async.get_event_loop()
loop.create_task(example())
| 2.6875 | 3 |
Bank_loan_project/code.py | NehaBhojani/ga-learner-dsmp-repo | 0 | 12798192 | # --------------
# Import packages
import numpy as np
import pandas as pd
from scipy.stats import mode
bank = pd.read_csv(path)
categorical_var = bank.select_dtypes(include='object')
print(categorical_var)
numerical_var = bank.select_dtypes(include='number')
print(numerical_var)
# code starts here
# code ends here
# --------------
# code starts here
bank.columns
banks = bank.drop('Loan_ID',axis=1)
banks.columns
banks.isnull().sum()
bank_mode = banks.mode(axis=0)
#col = list(banks.columns)
bank_mode.loc[0,:]
banks.isnull().sum()
#for x in banks.columns.values:
# banks[x]=banks[x].fillna(value=bank_mode[x].loc[0])
##banks = banks[col].apply(lambda x: x.fillna(x.mode,inplace=True))
banks.fillna(bank_mode.loc[0,:],inplace=True)
banks.isnull().sum()
#banks.isnull().sum()
#code ends here
# --------------
# Code starts here
banks[['Gender','Married', 'Self_Employed','LoanAmount']]
avg_loan_amount = pd.pivot_table(banks, values='LoanAmount', index=['Gender','Married','Self_Employed'], aggfunc=np.mean)
# code ends here
# --------------
# code starts here
self_emp_y = banks['Self_Employed'] == 'Yes'
loan_status = banks['Loan_Status'] == 'Y'
self_emp_n = banks['Self_Employed'] == 'No'
Loan_Status = 614
loan_approved_se = (self_emp_y & loan_status).value_counts()[1]
loan_approved_nse = (self_emp_n & loan_status).value_counts()[1]
print(loan_approved_se ,' ',loan_approved_nse, Loan_Status)
percentage_se = (loan_approved_se/Loan_Status) * 100
percentage_nse = (loan_approved_nse/Loan_Status) * 100
print("Percent of Loan approval for Self employed people is : ",percentage_se)
print("Percent of Loan approval for people who are not self-employed is: ",percentage_nse)
# code ends here
# --------------
# code starts here
loan_term = banks['Loan_Amount_Term'].apply(lambda x : x/12)
loan_term>=25
big_loan_term = banks[loan_term>=25].shape[0]
# code ends here
# --------------
# code starts here
loan_groupby = banks.groupby('Loan_Status')
loan_groupby = loan_groupby[['ApplicantIncome','Credit_History']]
mean_values = loan_groupby.mean()
print(mean_values)
# code ends here
| 2.8125 | 3 |
03_01_dice.py | bolivaralejandro/prog_pi_ed2- | 0 | 12798193 | #03_01_dice
import random # import random module
for x in range(1,11): # for loop to go from 1 -10
random_number = random.randint(1, 6) # ranint pick between 1 and 6
print(random_number) # print the # that was saved to variable random_number
| 3.71875 | 4 |
TPP/API/top_pro_pack.py | Buddyboy201/top_pro_pack-v3 | 0 | 12798194 | <reponame>Buddyboy201/top_pro_pack-v3
import os
import sys
import sqlalchemy
from pathlib import Path
from TPP.API.centroid_protein import CentroidProtein
import json
from shutil import copyfile
from time import perf_counter
def get_config(name, pdb_path, json_path, exclude_backbone, distance_cutoff, ignored_paths):
config = {
"name": name,
"pdb_path": Path(pdb_path).__str__(),
"json_path": Path(json_path).__str__(),
"exclude_backbone": exclude_backbone,
"distance_cutoff": distance_cutoff,
"ignored_paths": [ Path(file).__str__() for file in ignored_paths ]
}
return config
def create_project(config_path, name, pdb_path, json_path, exclude_backbone=False, distance_cutoff=6, ignored_paths=[]):
#config_path = Path.cwd() / Path("{}_config.json".format(name))
config = get_config(name=name, pdb_path=pdb_path, json_path=json_path, exclude_backbone=exclude_backbone, distance_cutoff=distance_cutoff, ignored_paths=ignored_paths)
with open(config_path, "wt") as file:
json.dump(config, file)
class Project:
def __init__(self, config_path):
self._init_project(config_path)
self.config_path = Path(config_path)
self.proteins = {}
def generate_default_ids(self):
return [f.stem if f not in self.list_ignored() else "" for f in self.list_pdb_files()]
def _get_function_perf_decorator(func):
def inner(self, id, filename):
start = perf_counter()
out = func(self, id, filename)
end = perf_counter()
print(end - start)
return out
return inner
def _init_project(self, config_path):
if not Path(config_path).is_file():
raise Exception("invalid config path: {}".format(Path(config_path)))
with open(config_path, "rt") as config_file:
config = json.load(config_file)
self.distance_cutoff = config["distance_cutoff"]
self.exclude_backbone = config["exclude_backbone"]
self.name = config["name"]
self.pdb_path = Path(config["pdb_path"])
self.json_path = Path(config["json_path"])
self.ignored_paths = [ Path(file) for file in config["ignored_paths"] ]
self.ignore_links = {}
if not self.pdb_path.is_dir():
self.pdb_path.mkdir(parents=True)
if not self.json_path.is_dir():
self.json_path.mkdir(parents=True)
def get_protein(self, id):
try:
if not self.ignore_links.get(id):
return self.proteins[id]
else:
return None
except:
raise Exception("{} is invalid/ignored".format(id))
@_get_function_perf_decorator
def load_protein(self, id, file_name):
file_path = None
if Path(file_name).suffix == ".json":
file_path = self.json_path / Path(file_name)
else:
file_path = self.pdb_path / Path(file_name)
if file_path.is_file():
if Path(file_path) not in self.ignored_paths:
val = self._init_protein(id, file_path)
if isinstance(val, Exception):
return val
self.proteins[id] = val
self.ignore_links[id] = False
return val
else:
# print("\n#################################################\n#########################################################\n",file_path, "\n#################################################\n#########################################################\n\n")
self.ignore_links[id] = True
return None
else:
raise Exception("Not a valid {} file".format(file_path.suffix))
def add_protein(self, file_path):
if Path(file_path).is_file():
if Path(file_path).suffix == ".json":
new_file_path = self.json_path / Path(file_path).name
if self.json_links.get(name) is not None:
raise Exception("{} already taken by {}".format(name, self.json_links.get(name)))
else:
self.json_links[name] = Path(file_path).suffix
else:
new_file_path = self.pdb_path / Path(file_path).name
copyfile(Path(file_path), new_file_path)
else:
raise Exception("Not a valid {} file".format(file_path.suffix))
def add_ignored_path(self, file_path):
if Path(file_path).is_file():
self.ignored_paths.append(Path(file_path))
else:
raise Exception("{} does not exist".format(Path(file_path)))
def remove_ignored_path(self, file_path):
if Path(file_path).is_file():
self.ignored_paths.remove(Path(file_path))
else:
raise Exception("{} does not exist".format(Path(file_path)))
def load_all_pdbs(self, ids, pdb_filter=None):
try:
for pdb_file, id in zip(self.list_pdb_files(), ids):
print("loading {} as {} ...".format(Path(pdb_file), id))
val = self.load_protein(id, Path(pdb_file))
if isinstance(val, Exception):
print(val)
elif isinstance(val, type(None)):
print("{} is ignored".format(pdb_file))
else:
print("{} loaded as {}".format(pdb_file, id))
except:
raise Exception("All pdbs could not be loaded or handled")
def load_all_json(self, ids):
try:
for json_file, id in zip(self.json_path.iterdir(), ids):
self.load_protein(id, Path(json_file))
except:
raise Exception("All jsons could not be loaded")
def get_config(self):
config = {
"name": self.name,
"pdb_path": Path(self.pdb_path).__str__(),
"json_path": Path(self.json_path).__str__(),
"exclude_backbone": self.exclude_backbone,
"distance_cutoff": self.distance_cutoff,
"ignored_paths": self.ignored_paths
}
return config
def list_pdb_files(self):
return self.pdb_path.glob("*.pdb")
def list_json_files(self):
return self.json_path.glob("*.json")
def list_ignored(self):
return self.ignored_paths
def _init_protein(self, id, file_path):
try:
P = CentroidProtein(id, file_path, exclude_backbone=self.exclude_backbone)
except:
e = sys.exc_info()[0]
return Exception(e)
if len(P.residues) > 0 and Path(file_path).suffix != ".json":
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
else:
return Exception("{} is empty".format(P.name))
return P
test_code = '''class Project:
def __init__(self, config_path):
self._init_project(config_path)
self.config_path = Path(config_path)
self.loaded_proteins = {}
def _update_links(self):
for pdb_name in self.pdb_links:
if self.pdb_links.get(pdb_name) not in list(self.pdb_path.iterdir()):
self.pdb_links.pop(pdb_name, None)
for json_name in self.json_links:
if self.json_links.get(json_name) not in list(self.json_path.iterdir()):
self.json_links.pop(json_name, None)
self.ignore_links = [Path(file) for file in self.ignore_links if file in list(self.pdb_path.iterdir())+list(self.json_path.iterdir())]
#if len([Path(file) for file in list(self.pdb_path.iterdir())+list(self.json_path.iterdir()) if Path(file) not in list(self.pdb_links.values())+list(self.json_links.values())+list(self.ignore_links)]) > 0:
#raise Exception("Not all files have valid identifier providided")
def _init_project(self, config_path):
if not Path(config_path).is_file():
raise Exception("invalid config path: {}".format(Path(config_path)))
with open(config_path, "rt") as config_file:
config = json.load(config_file)
self.distance_cutoff = config["distance_cutoff"]
self.exclude_backbone = config["exclude_backbone"]
self.name = config["name"]
self.pdb_path = Path(config["pdb_path"])
self.json_path = Path(config["json_path"])
self.pdb_links = config["pdb_links"]
self.json_links = config["json_links"]
self.ignore_links = config["ignore_links"]
if not self.pdb_path.is_dir():
self.pdb_path.mkdir(parents=True)
if not self.json_path.is_dir():
self.json_path.mkdir(parents=True)
self._update_links()
def _init_protein(self, name, file_path):
try:
P = CentroidProtein(name, file_path, exclude_backbone=self.exclude_backbone)
except:
e = sys.exc_info()[0]
return Exception(e)
if len(P.residues) > 0 and Path(file_path).suffix != ".json":
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
else:
return Exception("{} is empty".format(P.name))
return P
def get_config(self):
config = {
"name": self.name,
"pdb_path": self.pdb_path.__str__(),
"json_path": self.json_path.__str__(),
"exclude_backbone": self.exclude_backbone,
"distance_cutoff": self.distance_cutoff,
"pdb_links": self.pdb_links,
"json_links": self.json_links,
"ignore_links": self.ignore_links
}
return config
def add_protein(self, name, file_path):
if Path(file_path).is_file():
if Path(file_path).suffix == ".json":
new_file_path = self.json_path / Path(file_path).name
if self.json_links.get(name) is not None:
raise Exception("{} already taken by {}".format(name, self.json_links.get(name)))
else:
self.json_links[name] = Path(new_file_path).name
else:
new_file_path = self.pdb_path / Path(file_path).name
self.pdb_links[name] = new_file_path
copyfile(Path(file_path), new_file_path)
else:
raise Exception("Not a valid {} file".format(file_path.suffix))
def load_protein(self, name, priority="pdb"):
if priority == "pdb":
if self.pdb_links.get(name) is not None and self.pdb_links.get(name).is_file() and self.pdb_links.get(name) not in self.ignore_links:
self._init_protein(name, self.pdb_path / Path(self.pdb_links.get(name)))
else:
class Project:
def __init__(self, config_path):
self._init_project(config_path)
self.config_path = Path(config_path)
self.proteins = {}
def get_protein(self, name):
try:
if self.ignore_links.get(name) is None:
return self.proteins[name]
else:
return None
except:
return Exception("{} not loaded yet".format(name))
def get_filename_from_name(self, name, priority="pdb"):
if priority == "json":
return self.json_links.get(name)
elif priority == "pdb":
return self.pdb_links.get(name)
else:
return self.ignore_links.get(name)
def load_protein(self, name, file_name):
file_path = None
if Path(file_name).suffix == ".json":
file_path = self.json_path / Path(file_name)
else:
file_path = self.pdb_path / Path(file_name)
if file_path.is_file():
self.proteins[name] = self._init_protein(name, file_path)
else:
raise Exception("Not a valid {} file".format(file_path.suffix))
def add_protein(self, name, file_path):
if Path(file_path).is_file():
if Path(file_path).suffix == ".json":
new_file_path = self.json_path / Path(file_path).name
if self.json_links.get(name) is not None:
raise Exception("{} already taken by {}".format(name, self.json_links.get(name)))
else:
self.json_links[name] = Path(file_path).suffix
else:
new_file_path = self.pdb_path / Path(file_path).name
copyfile(Path(file_path), new_file_path)
else:
raise Exception("Not a valid {} file".format(file_path.suffix))
def ignore_protein(self, name):
def get_config(self):
config = {
"name": self.name,
"pdb_path": self.pdb_path.__str__(),
"json_path": self.json_path.__str__(),
"ignore_path": self.ignore_path.__str__(),
"exclude_backbone": self.exclude_backbone,
"distance_cutoff": self.distance_cutoff,
"pdb_links": self.pdb_links,
"json_links": self.json_links,
"ignore_links": self.ignore_links
}
return config
def _update_links(self):
for pdb_name in self.pdb_links:
if self.pdb_links.get(pdb_name) not in list(self.list_pdbs()):
self.pdb_links.pop(pdb_name, None)
for json_name in self.json_links:
if self.json_links.get(json_name) not in list(self.list_json()):
self.json_links.pop(json_name, None)
for ignore_name in self.ignore_links:
if self.ignore_links.get(ignore_name) not in list(self.list_ignored()):
self.ignore_links.pop(ignore_name, None)
def _init_project(self, config_path):
if not Path(config_path).is_file():
raise Exception("invalid config path: {}".format(Path(config_path)))
with open(config_path, "rt") as config_file:
config = json.load(config_file)
self.distance_cutoff = config["distance_cutoff"]
self.exclude_backbone = config["exclude_backbone"]
self.name = config["name"]
self.pdb_path = Path(config["pdb_path"])
self.json_path = Path(config["json_path"])
self.ignore_path = Path(config["ignore_path"])
self.pdb_links = config["pdb_links"]
self.json_links = config["json_links"]
self.ignore_links = config["ignore_links"]
if not self.pdb_path.is_dir():
self.pdb_path.mkdir(parents=True)
if not self.ignore_path.is_dir():
self.ignore_path.mkdir(parents=True)
if not self.json_path.is_dir():
self.json_path.mkdir(parents=True)
self._update_links()
def list_loaded_proteins(self):
return self.proteins.keys()
def list_pdbs(self):
return self.pdb_path.iterdir()
def list_json(self):
return self.json_path.iterdir()
def list_ignored(self):
return self.ignore_path.iterdir()
def get_name(self):
return self.name
def get_pdb_path(self):
return self.pdb_path
def get_json_path(self):
return self.json_path
def get_ignore_path(self):
return self.ignore_path
def is_mc(self):
return not self.exclude_backbone
def get_cutoff(self):
return self.distance_cutoff
def _init_protein(self, name, file_path):
try:
P = CentroidProtein(name, file_path, exclude_backbone=self.exclude_backbone)
except:
e = sys.exc_info()[0]
return Exception(e)
if len(P.residues) > 0 and Path(file_path).suffix != ".json":
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
else:
return Exception("{} is empty".format(P.name))
return P
class TPP_Engine:
def __init__(self):
self.projects = {}
def load_project(self, config_path):
proj = Project(Path(config_path))
self.projects[proj.get_name()] = proj'''
old = '''class TPP_Engine:
def __init__(self):
try:
os.makedirs(Path.home() / Path("top_pro_pack/bin"))
print("Initializing top_pro_pack data folder at {}".format(Path.home() / Path("top_pro_pack/bin")))
except:
print("top_pro_pack data files located at {}".format(Path.home() / Path("top_pro_pack/bin")))
self.base_lib_path = Path.home() / Path("top_pro_pack")
self.projects = {}
self.exclude_backbone = False
self.distance_cutoff = 6
def add_protein(self, project_name, name, file_path, json_load=True, data_load=True, data_url="https://files.rcsb.org/download/{}", raw_data=None):
out = self.init_protein(name, file_path, json_load=json_load, data_load=data_load, data_url=data_url.format(name), raw_data=raw_data)
if type(out) is Exception: print(out)
else: self.projects[project_name].append(out)
def add_dataset(self, project_name, proteins, modifers={"json_load": True, "data_load": True, "data_url": "https://files.rcsb.org/download/{}", "raw_data": None}):
prev_pdb = ""
for pdb, file_path in proteins:
if prev_pdb != pdb: self.add_protein(project_name, pdb, file_path, json_load=modifers["json_load"], data_load=modifers["data_load"], data_url=modifers["data_url"], raw_data=modifers["raw_data"])
prev_pdb = pdb
def create_new_project(self, name="project_{}", exclude_backbone=False, distance_cutoff=6, proteins=None):
if name == "project_{}":
name = name.format(len(self.projects)+1)
print("Attempting to create new project: {}".format(name))
try:
project_path = self.base_lib_path / Path("bin/{}".format(name))
os.makedirs(project_path)
self.projects[name] = []
if proteins is not None:
self.add_dataset(name, proteins)
print("Project {} created!".format(name))
except:
print("Project {} already exists, cancelling operation".format(name))
def load_protein_json(self, project, name):
file_path = self.base_lib_path / Path("bin/{}/{}/data.json".format(project, name))
P = centroid_protein.CentroidProtein("", "", load_json=True, json_data_file_path=file_path)
#self.proteins.append(P)
self.E.update_static_total_pairs_table(P.get_heatmap_data_centroid())
return P
def init_protein(self, project, name, file_path, json_load=True, data_load=True, data_url="https://files.rcsb.org/download/{}", raw_data=None):
if name in os.listdir(os.getcwd() + "\\top_pro_pack_logs") and json_load:
print("Attempting to load {} from JSON".format(name))
return self.load_protein_json(project, name)
elif len(file_path) > 0:
print("Atempting to process {} from directly from pdb file".format(name))
try: P = centroid_protein.CentroidProtein(name, file_path, exclude_backbone=self.exclude_backbone)
except:
e = sys.exc_info()[0]
return Exception(e)
if len(P.residues) > 0:
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
# self.proteins.append(P)
#self.E.update_static_total_pairs_table(P.get_heatmap_data_centroid())
return P
else:
return Exception("{} is empty".format(P.name))
elif data_load and data_url is not None:
data_url = data_url.format(name[:4] + ".pdb")
print("Attempting to download/process {} from RCSB".format(name))
try:
P = centroid_protein.CentroidProtein(name, "", exclude_backbone=self.exclude_backbone,
download_data=data_load, data_url=data_url)
except sqlalchemy.orm.exc.NoResultFound:
return Exception("{} does not exist in RCSB database".format(name))
if len(P.residues) > 0:
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
# self.proteins.append(P)
#self.E.update_static_total_pairs_table((P.get_heatmap_data_centroid()))
return P
else:
return Exception("{} is empty".format(P.name))
elif raw_data != None:
print("Atempting to process {} from raw text".format(name))
try: P = centroid_protein.CentroidProtein(name, "", exclude_backbone=self.exclude_backbone, download_data=data_load, data_url=data_url, raw_data=raw_data)
except:
e = sys.exc_info()[0]
return Exception(e)
if len(P.residues) > 0:
P.generate_centroid_cliques(distance_cutoff=self.distance_cutoff)
# self.proteins.append(P)
#self.E.update_static_total_pairs_table((P.get_heatmap_data_centroid()))
return P
else:
return Exception("{} is empty".format(P.name))
else:
print("All processing attempts failed for {}, check provided info and try again".format(name))''' | 2.078125 | 2 |
src/task_runner/tests/test_api.py | tessia-project/tessia-mesh | 0 | 12798195 | <gh_stars>0
# Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=redefined-outer-name,no-self-use
"""
Task Runner API tests
"""
#
# IMPORTS
#
import pytest
from starlette.testclient import TestClient
from task_runner.api.star_app import create_app
#
# CONSTANTS AND DEFINITIONS
#
#
# CODE
#
@pytest.fixture
def client():
"""Create test client"""
with TestClient(create_app()) as client:
yield client
# client()
def test_api_info_responses_are_valid(client):
"""Query API version"""
resp = client.get('/')
api_info = resp.json()
resp = client.get(f"{api_info['apis'][0]['root']}/schema")
schema_info = resp.json()
assert api_info['name'] == 'task_runner'
assert 'version' in api_info['apis'][0]
assert 'min_version' in api_info['apis'][0]
assert '/' in schema_info
# test_api_info_responses_are_valid()
class TestApiV1:
"""Tests for API v1"""
ECHO_MACHINE = {
'machine': 'echo', 'parameters': """
echo Machine starting
sleep 5
echo Machine stopping
"""}
def test_invalid_request_is_rejected(self, client):
"""Invalid (wrong schema) requests are rejected"""
resp = client.post(
'/v1/tasks',
json={'without-machine-parameters': 'request-is-invalid'})
assert resp.status_code == 400
# test_invalid_request_is_rejected()
def test_not_found_return_code_is_404(self, client):
"""API returns 404 for a task that is not existing"""
resp = client.get('/v1/tasks/some-unspecified-id')
assert resp.status_code == 404
# test_not_found_return_code_is_404()
def test_echo_machine_is_started_and_stopped(self, client):
"""Echo machine can be staeted and stopped"""
resp_create = client.post('/v1/tasks', json=self.ECHO_MACHINE)
task_created = resp_create.json()
resp_status = client.get(f'/v1/tasks/{task_created["taskId"]}')
task_status = resp_status.json()
resp_stop = client.post(f'/v1/tasks/{task_created["taskId"]}/stop')
stop_status = resp_stop.json()
assert resp_create.status_code == 201
assert task_created['taskId']
assert resp_status.status_code == 200
assert task_status['taskId'] == task_created['taskId']
assert task_status['state'] # just anything
assert resp_stop.status_code == 200
assert stop_status['taskId'] == task_created['taskId']
# test_echo_machine_is_started_and_stopped()
def test_parallel_tasks_are_running(self, client):
"""Start two tasks and expect them to be running"""
resp_create = client.post('/v1/tasks', json=self.ECHO_MACHINE)
task_1 = resp_create.json()
resp_create = client.post('/v1/tasks', json=self.ECHO_MACHINE)
task_2 = resp_create.json()
resp_list = client.get('/v1/tasks/')
task_list_running = resp_list.json()
client.post(f'/v1/tasks/{task_1["taskId"]}/stop')
client.post(f'/v1/tasks/{task_2["taskId"]}/stop')
assert task_list_running == [
{'taskId': task_1["taskId"], 'state': 'running'},
{'taskId': task_2["taskId"], 'state': 'running'}]
assert task_1["taskId"] != task_2["taskId"]
# test_parallel_tasks_are_running()
def test_remove_running_task(self, client):
"""Start two tasks, stop one and check reported task lists"""
task_1 = client.post('/v1/tasks', json=self.ECHO_MACHINE).json()
task_2 = client.post('/v1/tasks', json=self.ECHO_MACHINE).json()
task_list_running = client.get('/v1/tasks/').json()
client.post(f'/v1/tasks/{task_1["taskId"]}/stop')
client.delete(f'/v1/tasks/{task_1["taskId"]}')
task_list_remaining = client.get('/v1/tasks/').json()
client.post(f'/v1/tasks/{task_2["taskId"]}/stop')
assert task_list_running == [
{'taskId': task_1["taskId"], 'state': 'running'},
{'taskId': task_2["taskId"], 'state': 'running'}]
assert task_list_remaining == [
{'taskId': task_2["taskId"], 'state': 'running'}]
# test_remove_running_task()
# TestApiV1
| 1.984375 | 2 |
tap/parser.py | cans/tappy-pkg | 0 | 12798196 | <reponame>cans/tappy-pkg
# Copyright (c) 2015, <NAME>
import re
from tap.directive import Directive
from tap.line import Bail, Diagnostic, Plan, Result, Unknown, Version
class Parser(object):
"""A parser for TAP files and lines."""
# ok and not ok share most of the same characteristics.
result_base = r"""
\s* # Optional whitespace.
(?P<number>\d*) # Optional test number.
\s* # Optional whitespace.
(?P<description>[^#]*) # Optional description before #.
\#? # Optional directive marker.
\s* # Optional whitespace.
(?P<directive>.*) # Optional directive text.
"""
ok = re.compile(r'^ok' + result_base, re.VERBOSE)
not_ok = re.compile(r'^not\ ok' + result_base, re.VERBOSE)
plan = re.compile(r"""
^1..(?P<expected>\d+) # Match the plan details.
[^#]* # Consume any non-hash character to confirm only
# directives appear with the plan details.
\#? # Optional directive marker.
\s* # Optional whitespace.
(?P<directive>.*) # Optional directive text.
""", re.VERBOSE)
diagnostic = re.compile(r'^#')
bail = re.compile(r"""
^Bail\ out!
\s* # Optional whitespace.
(?P<reason>.*) # Optional reason.
""", re.VERBOSE)
version = re.compile(r'^TAP version (?P<version>\d+)$')
TAP_MINIMUM_DECLARED_VERSION = 13
def parse_file(self, filename):
"""Parse a TAP file and determine what each line in the file is.
This is a generator method that will yield each parsed line. The
filename is assumed to exist.
"""
with open(filename, 'r') as tap_file:
for line in tap_file:
yield self.parse_line(line.rstrip())
def parse_line(self, text):
"""Parse a line into whatever TAP category it belongs."""
match = self.ok.match(text)
if match:
return self._parse_result(True, match)
match = self.not_ok.match(text)
if match:
return self._parse_result(False, match)
if self.diagnostic.match(text):
return Diagnostic(text)
match = self.plan.match(text)
if match:
return self._parse_plan(match)
match = self.bail.match(text)
if match:
return Bail(match.group('reason'))
match = self.version.match(text)
if match:
return self._parse_version(match)
return Unknown()
def _parse_plan(self, match):
"""Parse a matching plan line."""
expected_tests = int(match.group('expected'))
directive = Directive(match.group('directive'))
# Only SKIP directives are allowed in the plan.
if directive.text and not directive.skip:
return Unknown()
return Plan(expected_tests, directive)
def _parse_result(self, ok, match):
"""Parse a matching result line into a result instance."""
return Result(
ok, match.group('number'), match.group('description').strip(),
Directive(match.group('directive')))
def _parse_version(self, match):
version = int(match.group('version'))
if version < self.TAP_MINIMUM_DECLARED_VERSION:
raise ValueError('It is an error to explicitly specify '
'any version lower than 13.')
return Version(version)
| 2.796875 | 3 |
solutions/python3/811.py | sm2774us/amazon_interview_prep_2021 | 42 | 12798197 | <filename>solutions/python3/811.py<gh_stars>10-100
class Solution:
def subdomainVisits(self, cpdomains):
counter = collections.Counter()
for cpdomain in cpdomains:
count, *domains = cpdomain.replace(" ",".").split(".")
for i in range(len(domains)):
counter[".".join(domains[i:])] += int(count)
return [" ".join((str(v), k)) for k, v in counter.items()] | 3.21875 | 3 |
directorio de trabajo/Jorge/convertirGeodata/convertir_geodata.py | felinblackcat/Trabajo1TAE2020 | 0 | 12798198 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 17 23:03:32 2020
@author: quipo
"""
import pandas as pd
import numpy as np
import re
from unidecode import unidecode
def diccionario_quitar_tildes(col):
return {col: {'á': 'a', 'Á': 'A','é': 'e', 'É': 'E','í': 'i', 'Í': 'I','ó': 'o', 'Ó': 'O','ú': 'u', 'Ú': 'U'}}
data_completo=pd.read_csv('../../datos_proscesados.csv',sep=',',encoding='utf8')
data_completo = data_completo.apply(lambda x: x.str.strip() if(x.dtype == "str") else x)
data_analizar=data_completo
data_analizar=data_analizar[data_analizar['PERIODO']<=2018]
data_clustering = pd.read_excel("../../Andres/cluster/clustering.xlsx")
data_clustering=data_clustering[['BARRIO','cluster']]
data_clustering = data_clustering.apply(lambda x: x.str.strip() if(x.dtype == "str") else x)
data=pd.read_csv('geoDataframe_cluster.csv',sep=';',encoding='utf8')
data = data.apply(lambda x: x.str.strip() if(x.dtype == "str") else x)
print(data.columns)
data =data.drop('CLUSTER',axis=1)
joindata = pd.merge(data,data_clustering, how='left', left_on=['NOMBRE'], right_on = ['BARRIO'])
joindata=joindata.rename(columns={'cluster':'CLUSTER'})
joindata['CLUSTER'] = joindata['CLUSTER'].fillna(3)
joindata=joindata.drop('BARRIO',axis=1)
#print(data['NOMBRE'])
barrios=data_completo['BARRIO']
barrios_geo=data['NOMBRE']
barrios=barrios.drop_duplicates()
barrios_geo=barrios_geo.drop_duplicates()
barrios=pd.DataFrame(barrios)
barrios['ESTA']=0
print(barrios)
data['NOMBRE_MIN']=data['NOMBRE']
#data['NOMBRE_MIN']=data['NOMBRE_MIN'].apply(lambda x: str(unidecode(x)))
data=data.replace(diccionario_quitar_tildes('NOMBRE_MIN'), regex=True)
data['NOMBRE_MIN']=data['NOMBRE_MIN'].str.lower()
#barrios.to_csv('barrios.csv',sep=',',encoding='utf8',index=False)
#data.drop_duplicates(subset='...')
"""
for index, value in barrios.items():
data.loc[data.NOMBRE_MIN == f'{value}', 'ESTA']=1
"""
for index, value in barrios_geo.items():
barrios.loc[barrios.BARRIO== f'{value}', 'ESTA']=1
print(barrios[barrios['ESTA']==0])
barrios.to_csv('barrios.csv',sep=';',encoding='utf8',index=False)
data['NOMBRE']=data['NOMBRE_MIN']
data=data.drop('NOMBRE_MIN',axis=1)
data=data[['OBJECTID', 'CODIGO','NOMBRE',
'SUBTIPO_BA', 'NOMBRE_COM', 'SHAPEAREA', 'SHAPELEN']]
#print(data.columns)
#data.to_csv('geoDataframe_funciona.csv',sep=',',encoding='utf8',index=False)
joindata['CLUSTER_CORREGIDO']=3
joindata.loc[joindata.CLUSTER == 0, 'CLUSTER_CORREGIDO']=0
joindata.loc[joindata.CLUSTER == 1, 'CLUSTER_CORREGIDO']=1
joindata.loc[joindata.CLUSTER == 2, 'CLUSTER_CORREGIDO']=2
joindata=joindata.drop('CLUSTER',axis=1)
joindata['CLUSTER']=joindata['CLUSTER_CORREGIDO']
joindata=joindata.drop('CLUSTER_CORREGIDO',axis=1)
#data_analizar
#dumies = pd.get_dummies(data_analizar.CLASE)
joindata.to_csv('geoDataframe_temporal.csv',sep=';',encoding='utf8',index=False)
#data_completo.to_csv('datos_proscesados.csv', encoding='utf-8')
| 2.59375 | 3 |
tests/BaseTest.py | connor9/python-draytonwiser-api | 7 | 12798199 | <gh_stars>1-10
import os
import unittest
import logging
#logging.basicConfig(level=logging.INFO)
class BaseTest(unittest.TestCase):
def setUp(self):
self.wiser_hub_ip = '192.168.1.171'
self.base_url = url = "http://{}/data/domain/".format(self.wiser_hub_ip)
self.token = "<PASSWORD>"
self.source_data_file = "all-with-itrv.json"
#self.source_data_file = "all-with-itrv-and-hotwater.json"
def load_from_file(self, json_file):
filename = os.path.dirname(__file__)
with open(os.path.join(filename, 'data/%s' % json_file), 'r') as f:
return f.read() | 2.609375 | 3 |
getimage.py | junerye/test | 5 | 12798200 |
#!/usr/local/bin/python3
#encoding:utf8
'''
作用:爬取京东商城手机分类下的的所有手机商品的展示图片。
url:为需要爬取的网址
page:页数
'''
import re
import urllib.request
def getimage(url, page):
html = urllib.request.urlopen(url).read();
html = str(html);
pattern1 = '<div id="plist".+? <div class="page clearfix">';
rst1 = re.compile(pattern1).findall(html);
rst1 = rst1[0];
pattern2 = '<img width="220" height="220" .+?//.+?\.jpg';
imagelist = re.compile(pattern2).findall(rst1);
x = 1;
for imageurl in imagelist:
imagename = "Desktop/jd/"+str(page)+"-"+str(x)+".jpg";
pattern3 = '//.+?\.jpg';
imageurl = re.compile(pattern3).findall(imageurl);
imageurl = "http:"+imageurl[0];
try:
urllib.request.urlretrieve(imageurl, filename=imagename);
except urllib.error.URLError as e:
if hasattr(e, 'code'):
x+=1;
if hasattr(e, 'reason'):
x+=1;
x+=1;
for i in range(1, 2):
url = "https://list.jd.com/list.html?cat=9987,653,655&page=" + str(i);
getimage(url, i);
| 3.28125 | 3 |
DBSHA256/catalog/urls.py | darketmaster/DBSHA256 | 1 | 12798201 | <gh_stars>1-10
from django.urls import path
from . import views
urlpatterns = [
path("", views.home, name="home"),
#path("<name>", views.mainPage, name="main"),
path("about/", views.about, name="about"),
#path("contact/", views.contact, name="contact"),
path("home/", views.home, name="main"),
path("release/", views.release, name="release"),
path("home2/", views.home2, name="home2"),
path("test/", views.get_name, name="test"),
path('compare/', views.compare, name='compare'),
path('generate/', views.generate, name='generate'),
]
| 1.90625 | 2 |
app/server.py | LlamaComedian/personal-cw-proto | 0 | 12798202 | import os
import json
from flask import Flask, render_template
from flask.ext.assets import Environment
app = Flask(__name__)
app.debug = True
# govuk_template asset path
@app.context_processor
def asset_path_context_processor():
return {
'asset_path': '/static/govuk-template/',
'prototypes_asset_path': '/static/'
}
@app.route('/')
def home():
return render_template('index.html')
@app.errorhandler(404)
def page_not_found(e):
return render_template('common/proto-404.html'), 404
@app.route('/404')
def edge_of_proto(e):
return render_template('common/proto-404.html')
@app.route('/proto')
def proto():
return render_template('index2.html')
@app.route('/hack-day')
def hackday():
return render_template('index-hack.html')
# ---------------------------------------------------------------------------
#casework prototype list
@app.route('/casework/cases')
def casework_case_list():
json_data=open('app/static/data/casework-list.json', "r")
data = json.load(json_data)
return render_template('casework/case-list.html', data=data)
#casework details page
@app.route('/casework/cases/<ABR>')
def casework_case_details(ABR):
json_data=open('app/static/data/' + ABR + '.json', "r")
data = json.load(json_data)
return render_template('casework/case-details.html', data=data, backpage='/casework/cases')
# ---------------------------------------------------------------------------
#hackday
@app.route('/hackday/land-ownership-record')
def hackday_land_record():
return render_template('hackday/land-record.html', next_page="404")
@app.route('/hackday/land-ownership-record-1')
def hackday_land_record_1():
return render_template('hackday/land-record-1.html', next_page="404")
@app.route('/hackday/land-ownership-record-2')
def hackday_land_record_2():
return render_template('hackday/land-record-2.html', next_page="404")
# ---------------------------------------------------------------------------
# LAST OF THE ALPHA PROTOTYPES!
# A "citizen facing" register concept
#
# If we're having to download a "legal copy" then this page can be much more straightforward
@app.route('/register-view/register-view-citizen-1')
def register_view_citizen_1():
return render_template('register-view/register-view-citizen-1.html', next_page="404")
# ---------------------------------------------------------------------------
# -----------------
@app.route('/common/payment')
def common_payment():
return render_template('common/payment.html', next_page="/")
# ---------------------------------------------------------------------------
# GOV.UK pages, search / start v2.0 -----------------
@app.route('/govuk/search-2.0')
def govuk_search_2_0():
return render_template('govuk-views/search-2.0.html')
# GOV.UK pages, results listing v2.0 -----------------
@app.route('/govuk/results-2.0')
def govuk_results_2_0():
return render_template('govuk-views/results-2.0.html')
# GOV.UK pages, property details v2.0 -----------------
@app.route('/govuk/property-details-2.0')
def govuk_property_details_2_0():
return render_template('govuk-views/property-details-2.0.html')
# GOV.UK pages, property details v2.1 -----------------
@app.route('/govuk/property-details-2.1')
def govuk_property_details_2_1():
return render_template('govuk-views/property-details-2.1.html')
# ---------------------------------------------------------------------------
# scenario: user wants to find out who owns a property
# starts on GOV.UK and flows into register view
@app.route('/find-owner/search')
def find_owner_search():
return render_template('user-find-owner/search.html', next_page="/find-owner/results")
# GOV.UK pages, results listing -----------------
@app.route('/find-owner/results')
def find_owner_results():
return render_template('user-find-owner/results.html', next_page="/find-owner/property-details-2.0")
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/property-details-2.0')
def find_owner_details_2_0():
return render_template('user-find-owner/property-details-2.0.html', next_page="/find-owner/verify")
# GOV.UK pages, IDA/Credit Card/login stuff -----------------
# Step 1 - login with GOV.UK Verify - use sub flow...
# Sub flow - GOV.UK Verification ---------------------
# GOV.UK verify - Sub flow Step 1 - for conveyancer create relationship flow
@app.route('/find-owner/verify')
def find_owner_verify():
return render_template('user-find-owner/govuk-verify/verify-intro.html', next_page="/find-owner/who-verified-you")
# GOV.UK verify - Sub flow Step 2 - who verified you
@app.route('/find-owner/who-verified-you')
def find_owner_verify_who():
return render_template('user-find-owner/govuk-verify/verify-who.html', next_page="/find-owner/experian-sign-in")
# GOV.UK verify - Sub flow Step 3 - experian sign in
@app.route('/find-owner/experian-sign-in')
def find_owner_verify_experian_sign_in_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in.html', next_page="/find-owner/experian-sign-in-part-2")
# GOV.UK verify - Sub flow Step 4 - experian 2nd phase sign in
@app.route('/find-owner/experian-sign-in-part-2')
def find_owner_verify_experian_sign_in_2nd_part_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in-2.html', next_page="/find-owner/register-view")
# end Sub flow - GOV.UK Verification ---------------------
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/register-view')
def find_owner_register_view():
return render_template('user-find-owner/register-3.0.html', next_page="/find-owner/changes-view")
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/changes-view')
def find_owner_historian_view():
return render_template('user-find-owner/changes-1.0.html', next_page="/")
# ---------------------------------------------------------------------------
# scenario: user wants to find out who owns a property (IDA + payment)
# starts on GOV.UK and flows into register view
@app.route('/find-owner/b/search')
def find_owner_b_search():
return render_template('user-find-owner/search.html', next_page="/find-owner/b/results")
# GOV.UK pages, results listing -----------------
@app.route('/find-owner/b/results')
def find_owner_b_results():
return render_template('user-find-owner/results.html', next_page="/find-owner/b/property-details-2.0")
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/b/property-details-2.0')
def find_owner_b_details_2_0():
return render_template('user-find-owner/property-details-2.1.html', next_page="/find-owner/b/verify")
# Sub flow - GOV.UK Verification ---------------------
# GOV.UK verify - Sub flow Step 1
@app.route('/find-owner/b/verify')
def find_owner_b_verify():
return render_template('user-find-owner/govuk-verify/verify-intro.html', next_page="/find-owner/b/who-verified-you")
# GOV.UK verify - Sub flow Step 2
@app.route('/find-owner/b/who-verified-you')
def find_owner_b_verify_who():
return render_template('user-find-owner/govuk-verify/verify-who.html', next_page="/find-owner/b/experian-sign-in")
# GOV.UK verify - Sub flow Step 3 - experian sign in
@app.route('/find-owner/b/experian-sign-in')
def find_owner_b_verify_experian_sign_in_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in.html', next_page="/find-owner/b/experian-sign-in-part-2")
# GOV.UK verify - Sub flow Step 4 - experian 2nd phase sign in
@app.route('/find-owner/b/experian-sign-in-part-2')
def find_owner_b_verify_experian_sign_in_2nd_part_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in-2.html', next_page="/find-owner/b/card-payment")
# end Sub flow - GOV.UK Verification ---------------------
# Sub flow - card payment ---------------------
# GOV.UK pages, accept cost to view register -----------------
@app.route('/find-owner/b/accept-cost')
def find_owner_b_accept_cost():
return render_template('user-find-owner/accept-cost.html', next_page="/find-owner/b/card-payment")
# GOV.UK pages, pay to view register -----------------
@app.route('/find-owner/b/card-payment')
def find_owner_b_card_payment():
return render_template('common/payment.html', next_page="/find-owner/register-view")
# end sub flow - card payment ---------------------
# ---------------------------------------------------------------------------
# scenario: user wants to find out who owns a property rouute c - (IDA) (real fake title)
# starts on GOV.UK and flows into register view
@app.route('/find-owner/c/search')
def find_owner_c_search():
return render_template('user-find-owner/search.html', next_page="/find-owner/c/results")
# GOV.UK pages, results listing -----------------
@app.route('/find-owner/c/results')
def find_owner_c_results():
return render_template('user-find-owner/results-c.html', next_page="/find-owner/c/property-details-2.0")
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/c/property-details-2.0')
def find_owner_c_details_2_0():
return render_template('user-find-owner/property-details-2.1-c.html', next_page="/find-owner/c/verify")
# Sub flow - GOV.UK Verification ---------------------
# GOV.UK verify - Sub flow Step 1
@app.route('/find-owner/c/verify')
def find_owner_c_verify():
return render_template('user-find-owner/govuk-verify/verify-intro.html', next_page="/find-owner/c/who-verified-you")
# GOV.UK verify - Sub flow Step 2
@app.route('/find-owner/c/who-verified-you')
def find_owner_c_verify_who():
return render_template('user-find-owner/govuk-verify/verify-who.html', next_page="/find-owner/c/experian-sign-in")
# GOV.UK verify - Sub flow Step 3 - experian sign in
@app.route('/find-owner/c/experian-sign-in')
def find_owner_c_verify_experian_sign_in_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in.html', next_page="/find-owner/c/experian-sign-in-part-2")
# GOV.UK verify - Sub flow Step 4 - experian 2nd phase sign in
@app.route('/find-owner/c/experian-sign-in-part-2')
def find_owner_c_verify_experian_sign_in_2nd_part_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in-2.html', next_page="/find-owner/c/register-view")
# end Sub flow - GOV.UK Verification ---------------------
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/c/register-view')
def find_owner_c_register_view():
return render_template('register-view/register-test-title.html')
# ---------------------------------------------------------------------------
# scenario: user wants to find out ... something about a property
# starts on GOV.UK and flows into register view
# Verify + Payment + real fake title
@app.route('/find-owner/d/search')
def find_owner_d_search():
return render_template('user-find-owner/search.html', next_page="/find-owner/d/results")
# GOV.UK pages, results listing -----------------
@app.route('/find-owner/d/results')
def find_owner_d_results():
return render_template('user-find-owner/results-c.html', next_page="/find-owner/d/property-details-2.0")
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/d/property-details-2.0')
def find_owner_d_details_2_0():
return render_template('user-find-owner/property-details-2.1-c.html', next_page="/find-owner/d/verify")
# Verify ---------------------
# verify - Step 1
@app.route('/find-owner/d/verify')
def find_owner_d_verify():
return render_template('user-find-owner/govuk-verify/verify-intro.html', next_page="/find-owner/d/who-verified-you")
# verify - Step 2
@app.route('/find-owner/d/who-verified-you')
def find_owner_d_verify_who():
return render_template('user-find-owner/govuk-verify/verify-who.html', next_page="/find-owner/d/experian-sign-in")
# verify - Step 3 - experian sign in
@app.route('/find-owner/d/experian-sign-in')
def find_owner_d_verify_experian_sign_in_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in.html', next_page="/find-owner/d/experian-sign-in-part-2")
# verify - Step 4 - experian 2nd phase sign in
@app.route('/find-owner/d/experian-sign-in-part-2')
def find_owner_d_verify_experian_sign_in_2nd_part_1():
return render_template('user-find-owner/govuk-verify/verify-sign-in-2.html', next_page="/find-owner/d/card-payment")
# end Verify ---------------------
# card payment ---------------------
# pay to view register -----------------
@app.route('/find-owner/d/card-payment')
def find_owner_d_card_payment():
return render_template('common/payment.html', next_page="/find-owner/d/register-view")
# end card payment ---------------------
# GOV.UK pages, property details v2.0 -----------------
@app.route('/find-owner/d/register-view')
def find_owner_d_register_view():
return render_template('register-view/register-test-title.html')
# ---------------------------------------------------------------------------
# Alternate Register view. V4 with sections fully open
@app.route('/register-view/register-view-4-expanded')
def register_view_4_0_expanded():
return render_template('register-view/register-test-title-expanded.html', next_page="404")
# ---------------------------------------------------------------------------
# ---------------------------------------------------------------------------
# Alternate Register view. V4 with help on show
@app.route('/register-view/register-view-4-help-text')
def register_view_4_0_help_text():
return render_template('register-view/register-test-title-help.html', next_page="404")
# ---------------------------------------------------------------------------
# Transfer prototypes, login page
@app.route('/transfer/login')
def transfer_login():
return render_template('common/login.html', next_page="/transfer/conveyancer-case-list")
# Transfer prototypes, conveyancer-case-list page
@app.route('/transfer/conveyancer-case-list')
def conveyancer_case_list():
json_data=open('app/static/data/cases.json', "r")
data = json.load(json_data)
return render_template('transfer/conveyancer-case-list.html', data=data)
# Transfer prototypes, create transfer page
@app.route('/transfer/create-transfer')
def create_transfer():
json_data=open('app/static/data/complete-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/create-transfer.html', editable=True, data=data)
# Transfer prototypes, new provisions page
@app.route('/transfer/new-provisions')
def transfer_new_provisions():
return render_template('transfer/new-provisions.html')
# Transfer prototypes, mortgage details page
@app.route('/transfer/mortgage-details')
def transfer_mortgage_details():
return render_template('transfer/mortgage-details.html')
# Transfer prototypes, mortgage details entered page
@app.route('/transfer/mortgage-details-entered')
def transfer_mortgage_details_entered():
return render_template('transfer/mortgage-details-entered.html')
# Transfer prototypes, summary page
@app.route('/transfer/summary')
def transfer_summary():
json_data=open('app/static/data/complete-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/summary.html', editable=True, conveyancer="buyer", data=data)
# Transfer prototypes, summary with no mortgage details page
@app.route('/transfer/summary-no-mortgage')
def transfer_summary_no_mortgage():
json_data=open('app/static/data/no-mortgage.json', "r")
data = json.load(json_data)
return render_template('transfer/summary-no-mortgage.html', editable=True, conveyancer="buyer", data=data)
# Transfer prototypes, transfer that has been withdrawn
@app.route('/transfer/transfer-withdrawn')
def transfer_withdrawn():
json_data=open('app/static/data/withdrawn-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-withdrawn.html', editable=True, data=data)
# Transfer prototypes, summary with option to withdraw
@app.route('/transfer/summary-withdraw-option')
def transfer_withdraw_option():
json_data=open('app/static/data/complete-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/summary-withdraw-option.html', editable=False, data=data)
# Transfer prototypes, summary with empty states
@app.route('/transfer/transfer-empty-states')
def transfer_empty_states():
json_data=open('app/static/data/incomplete-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-empty-states.html', editable=True, data=data)
# Transfer prototypes, done page
@app.route('/transfer/done')
def transfer_done():
return render_template('transfer/done.html')
# Transfer prototypes, signing the transfer page
@app.route('/transfer/transfer-signing')
def transfer_signing():
json_data=open('app/static/data/ready-to-sign-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-signing.html', editable=False, data=data, role="buyer")
# Transfer prototypes, signing the transfer page
@app.route('/transfer/transfer-signing-seller')
def transfer_signing_seller():
json_data=open('app/static/data/ready-to-sign-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-signing-seller.html', editable=False, data=data, role="seller")
# ---------------------------------------------------------------------------
# Transfer prototypes - 2nd conveyancer, Step 1 - login page
@app.route('/transfer-2nd-con/login')
def transfer_2nd_conveyancer_login():
return render_template('common/login.html', next_page="/transfer-2nd-con/conveyancer-case-list")
# Transfer prototypes - 2nd conveyancer, Step 2 - conveyancer-case-list
@app.route('/transfer-2nd-con/conveyancer-case-list')
def transfer_2nd_conveyancer_case_list():
json_data=open('app/static/data/cases-seller.json', "r")
data = json.load(json_data)
return render_template('transfer-2nd-conveyancer/conveyancer-case-list.html', data=data)
# Transfer prototypes - 2nd conveyancer, Step 3 - confirm page
@app.route('/transfer-2nd-con/review-transfer')
def transfer_2nd_conveyancer_review_transfer():
json_data=open('app/static/data/complete-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer-2nd-conveyancer/review-transfer.html', editable=False, data=data, role="seller")
# Transfer prototypes - 2nd conveyancer, Step 4 - transfer ready to sign
@app.route('/transfer-2nd-con/marked-ready')
def transfer_2nd_conveyancer_marked_ready():
return render_template('transfer-2nd-conveyancer/marked-ready.html')
# Transfer prototypes, transfer that has been withdrawn
@app.route('/transfer-2nd-con/transfer-withdrawn')
def transfer_2nd_con_withdrawn():
json_data=open('app/static/data/withdrawn-transfer.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-withdrawn.html', editable=False, data=data)
# ---------------------------------------------------------------------------
# Transaction flows, citizens sign transfer and charge v2.0 -----------------
@app.route('/transfer-and-charge/citizen-1-start')
def transfer_and_charge_citizen_1_start_2_0():
return render_template('transfer-and-charge/citizen-1-start-2.0.html', next_page="citizen-1-login")
# Step 1 - login with GOV.UK Verify
@app.route('/transfer-and-charge/citizen-1-login')
def transfer_and_charge_citizen_1_login_2_0():
return render_template('transfer-and-charge/citizen-1-login-2.0.html', next_page="citizen-1-enter-token")
# Step 2 - Client 1 enters token
@app.route('/transfer-and-charge/citizen-1-enter-token')
def transfer_and_charge_citizen_1_enter_token_2_0():
return render_template('transfer-and-charge/citizen-1-enter-token-2.0.html', next_page="citizen-1-sign-mortgage")
# Step 3 - Client 1 signs mortgage deed
@app.route('/transfer-and-charge/citizen-1-sign-mortgage')
def transfer_and_charge_citizen_1_sign_mortgage_2_0():
return render_template('transfer-and-charge/citizen-1-sign-mortgage-2.0.html', next_page="citizen-1-sign-transfer")
# Step 4 - Client 1 signs transfer
@app.route('/transfer-and-charge/citizen-1-sign-transfer')
def transfer_and_charge_citizen_1_sign_transfer_2_0():
return render_template('transfer-and-charge/citizen-1-sign-transfer-2.0.html', next_page="citizen-1-semi-confirmed")
# Step 5 - Client 1 - semi confirmation
@app.route('/transfer-and-charge/citizen-1-semi-confirmed')
def transfer_and_charge_citizen_1_semi_confirmed_2_0():
return render_template('transfer-and-charge/citizen-1-semi-confirmed-2.0.html')
# ---------------------------------------------------------------------------
# Transaction flows, citizens sign transfer and charge v3 -----------------
# Step 1a - external process step - show user email
@app.route('/transfer-and-charge-v3/citizen-1-email')
def transfer_and_charge_citizen_1_email_3_0():
return render_template('transfer-and-charge/citizen-1-email-2.0.html', next_page="citizen-1-start")
@app.route('/transfer-and-charge-v3/citizen-1-start')
def transfer_and_charge_citizen_1_start_3_0():
return render_template('transfer-and-charge/citizen-1-start-2.0.html', next_page="citizen-1-login")
# Step 1 - login with GOV.UK Verify
@app.route('/transfer-and-charge-v3/citizen-1-login')
def transfer_and_charge_citizen_1_login_3_0():
return render_template('transfer-and-charge/citizen-1-login-2.0.html', next_page="citizen-1-enter-token")
# Step 2 - Client 1 enters token
@app.route('/transfer-and-charge-v3/citizen-1-enter-token')
def transfer_and_charge_citizen_1_enter_token_3_0():
return render_template('transfer-and-charge/citizen-1-enter-token-2.0.html', next_page="citizen-1-sign-mortgage")
# Step 3 - Client 1 signs mortgage deed
@app.route('/transfer-and-charge-v3/citizen-1-sign-mortgage')
def transfer_and_charge_citizen_1_sign_mortgage_3_0():
return render_template('transfer-and-charge/citizen-1-sign-mortgage-2.0.html', next_page="/transfer-and-charge-v3/citizen-1-sign-transfer")
# Step 3 - Client 1 signs transfer deed
@app.route('/transfer-and-charge-v3/citizen-1-sign-transfer')
def transfer_and_charge_citizen_1_sign_transfer_3_0():
json_data=open('app/static/data/transfer-signing-data.json', "r")
data = json.load(json_data)
return render_template('transfer/transfer-signing.html', next_page="/transfer-and-charge-v3/citizen-1-sms", data=data, role="citizen")
# Step 3a - external process step - show user sms message
@app.route('/transfer-and-charge-v3/citizen-1-sms')
def transfer_and_charge_citizen_1_sms_3_0():
return render_template('transfer-and-charge/citizen-1-sms-2.0.html', next_page="citizen-1-2-factor-auth")
# Step 4 - Client 1 2 factor authentication
@app.route('/transfer-and-charge-v3/citizen-1-2-factor-auth')
def transfer_and_charge_citizen_1_2_factor_auth():
return render_template('transfer-and-charge/citizen-1-2-factor.html', next_page="/transfer-and-charge-v3/citizen-1-semi-confirmed")
# Step 5 - Client 1 - semi confirmation
@app.route('/transfer-and-charge-v3/citizen-1-semi-confirmed')
def transfer_and_charge_citizen_1_semi_confirmed_3_0():
return render_template('transfer-and-charge/citizen-1-semi-confirmed-2.0.html')
# ---------------------------------------------------------------------------
# Transaction flows, relationship starts, conveyancer initiates v2.2 --------
@app.route('/relationship-starts/conveyancer-start')
def conveyancer_start_2_2():
return render_template('relationship-starts/conveyancer-start-2.2.html')
# Step 1 - log in
@app.route('/relationship-starts/login')
def relationship_starts_login_2_2():
return render_template('common/login.html', next_page="/relationship-starts/conveyancer-find-property")
# Step 2 - find correct property
@app.route('/relationship-starts/conveyancer-find-property')
def conveyancer_find_property_2_2():
return render_template('relationship-starts/conveyancer-find-property-2.2.html')
# Step 3 - results and select correct property
@app.route('/relationship-starts/conveyancer-select-property')
def conveyancer_select_property_2_2():
return render_template('relationship-starts/conveyancer-select-property-2.2.html')
# Step 4 - select associated task
@app.route('/relationship-starts/conveyancer-select-task')
def conveyancer_select_task_2_2():
return render_template('relationship-starts/conveyancer-select-task-2.2.html')
# Step 5 - set the number of clients
@app.route('/relationship-starts/conveyancer-add-clients')
def conveyancer_add_clients_2_2():
return render_template('relationship-starts/conveyancer-add-clients-2.2.html')
# Step 6 - add 1st client
@app.route('/relationship-starts/conveyancer-add-client-1')
def conveyancer_add_client_1_2_2():
return render_template('relationship-starts/conveyancer-add-client-1-2.2.html')
# Step 7 - add 2nd client
@app.route('/relationship-starts/conveyancer-add-client-2')
def conveyancer_add_client_2_2_2():
return render_template('relationship-starts/conveyancer-add-client-2-2.2.html')
# Step 8 - confirmation
@app.route('/relationship-starts/conveyancer-confirm')
def conveyancer_confirm_2_2():
return render_template('relationship-starts/conveyancer-confirm-2.2.html')
# Step 9 - generated token
@app.route('/relationship-starts/conveyancer-token')
def conveyancer_token_2_2():
return render_template('relationship-starts/conveyancer-token-2.2.html')
# ---------------------------------------------------------------------------
# Transaction flows, relationship starts, client(s) confirm v2.2 --------
@app.route('/relationship-starts/client-start')
def client_start_2_2():
return render_template('relationship-starts/client-start-2.2.html')
# Step 1 - login with GOV.UK Verify - use sub flow...
# Sub flow - GOV.UK Verification ---------------------
# GOV.UK verify - Sub flow Step 1 - for conveyancer create relationship flow
@app.route('/relationship-starts/client-login')
def client_verify_2_2():
return render_template('relationship-starts/verify-subflow-client-1/verify-intro.html')
# GOV.UK verify - Sub flow Step 2 - who verified you
@app.route('/relationship-starts/client-who-verified-you')
def relationship_starts_client_verify_who_1():
return render_template('relationship-starts/verify-subflow-client-1/verify-who.html')
# GOV.UK verify - Sub flow Step 3 - experian sign in
@app.route('/relationship-starts/client-experian-sign-in')
def relationship_starts_client_verify_experian_sign_in_1():
return render_template('relationship-starts/verify-subflow-client-1/verify-sign-in.html')
# GOV.UK verify - Sub flow Step 4 - experian 2nd phase sign in
@app.route('/relationship-starts/client-experian-sign-in-part-2')
def relationship_starts_client_verify_experian_sign_in_2nd_part_1():
return render_template('relationship-starts/verify-subflow-client-1/verify-sign-in-2.html')
# end Sub flow - GOV.UK Verification ---------------------
# Step 2 - Client 1 enters token
@app.route('/relationship-starts/client-enter-token')
def client_enter_token_2_1():
return render_template('relationship-starts/client-enter-token-2.1.html')
# Step 3 - Client 1 confirms
@app.route('/relationship-starts/client-confirm')
def client_confirm_2_2():
return render_template('relationship-starts/client-confirm-2.2.html')
# Step 4 - Client 1 receives confirmation
@app.route('/relationship-starts/client-semi-confirmed')
def client_semi_confirmed_2_2():
return render_template('relationship-starts/client-semi-confirmed-2.2.html')
# Step 5 - Client can now view the register if they want to.
@app.route('/relationship-starts/client-view-register')
def client_view_register_2_1():
return render_template('relationship-starts/register-2.1-no-pending.html')
# Step 6 - Client 2 visits start page
@app.route('/relationship-starts/client-2-start')
def client_2_start_2_2():
return render_template('relationship-starts/client-2-start-2.2.html')
# Step 7 - login with GOV.UK Verify - use sub flow...
# Sub flow - GOV.UK Verification ---------------------
# GOV.UK verify - Sub flow Step 1 - for conveyancer create relationship flow
@app.route('/relationship-starts/client-2-login')
def client_2_verify_2_0():
return render_template('relationship-starts/verify-subflow-client-2/verify-intro.html')
# GOV.UK verify - Sub flow Step 2 - who verified you
@app.route('/relationship-starts/client-2-who-verified-you')
def relationship_starts_client_2_verify_who_1():
return render_template('relationship-starts/verify-subflow-client-2/verify-who.html')
# GOV.UK verify - Sub flow Step 3 - experian sign in
@app.route('/relationship-starts/client-2-experian-sign-in')
def relationship_starts_client_2_verify_experian_sign_in_1():
return render_template('relationship-starts/verify-subflow-client-2/verify-sign-in.html')
# GOV.UK verify - Sub flow Step 4 - experian 2nd phase sign in
@app.route('/relationship-starts/client-2-experian-sign-in-part-2')
def relationship_starts_client_2_verify_experian_sign_in_2nd_part_1():
return render_template('relationship-starts/verify-subflow-client-2/verify-sign-in-2.html')
# end Sub flow - GOV.UK Verification ---------------------
# Step 8 - Client 2 enters token
@app.route('/relationship-starts/client-2-enter-token')
def client_2_enter_token_2_0():
return render_template('relationship-starts/client-2-enter-token-2.0.html')
# Step 9 - Client 2 confirms
@app.route('/relationship-starts/client-2-confirm')
def client_2_confirm_2_2():
return render_template('relationship-starts/client-2-confirm-2.2.html')
# Step 10 - Client 2 receives (all parties) confirmation
@app.route('/relationship-starts/clients-confirmed')
def clients_confirmed_2_2():
return render_template('relationship-starts/clients-confirmed-2.2.html')
# ---------------------------------------------------------------------------
# Transaction flows, relationship starts, citizen confirms v2.0 --------
@app.route('/relationship-starts/citizen-confirms')
def citizen_confirms_2_0():
return render_template('relationship-starts/citizen-confirms-2.0.html')
# ---------------------------------------------------------------------------
# Page prototypes, Register View --------------------------
@app.route('/register-view/register-2.0')
def register_2_0():
return render_template('register-view/register-2.0.html')
@app.route('/register-view/register-2.1')
def register_2_1():
return render_template('register-view/register-2.1.html')
@app.route('/register-view/register-3.0')
def register_3_0():
return render_template('register-view/register-3.0.html')
@app.route('/register-view/register-test-title')
def register_test_title():
return render_template('register-view/register-test-title.html')
@app.route('/register-view/register-hybrid')
def register_hybrid():
return render_template('register-view/register-hybrid.html')
# ---------------------------------------------------------------------------
# Page prototypes, Register Changes View --------------------------
# Change history - pending and historical
@app.route('/changes-view/changes-1.0')
def changes_1_0():
return render_template('changes-view/changes-1.0.html')
# Change history - historical only - nothing pending
@app.route('/changes-view/changes-no-pending-1.0')
def changes_no_pending_1_0():
return render_template('changes-view/changes-no-pending-1.0.html')
# ---------------------------------------------------------------------------
# Page prototypes, Example mortgage agreement --------------------------
@app.route('/legal-documents/mortgage-agreement-v1')
def mortgage_agreement_1():
return render_template('legal-documents/mortgage-agreement-v1.html')
# Page prototypes, Example transfer agreement --------------------------
@app.route('/legal-documents/transfer-agreement-v1')
def transfer_agreement_1():
return render_template('legal-documents/transfer-agreement-v1.html')
# ---------------------------------------------------------------------------
# Reserve Priority (Freeze register) ---------------------------------------
@app.route('/reserve-priority/select')
def reserve_priority_1_select():
return render_template('reserve-priority/protect-what-2.0.html')
@app.route('/reserve-priority/confirm')
def reserve_priority_2_confirm():
return render_template('reserve-priority/protect-confirm-2.0.html')
@app.route('/reserve-priority/confirmed')
def reserve_priority_3_confirmed():
return render_template('reserve-priority/protect-confirmed-2.0.html')
# ---------------------------------------------------------------------------
# Sprint 4, Relationship verifier flow --------------------------
@app.route('/sprint-4/citizen-reference')
def sprint_4_reference():
return render_template('sprint-4/relationship/citizen-reference.html')
@app.route('/sprint-4/citizen-login')
def sprint_4_citizen_login():
return render_template('sprint-4/relationship/citizen-login.html')
@app.route('/sprint-4/citizen-confirm')
def sprint_4_citizen_confirm():
return render_template('sprint-4/relationship/citizen-confirm.html')
@app.route('/sprint-4/citizen-complete')
def sprint_4_citizen_complete():
return render_template('sprint-4/relationship/citizen-complete.html')
@app.route('/sprint-4/citizen-register')
def sprint_4_citizen_register():
return render_template('sprint-4/relationship/citizen-register.html')
# ---------------------------------------------------------------------------
# Sprint 3, Register view --------------------------
@app.route('/sprint-3/register-v1')
def sprint_3_register_v1():
return render_template('sprint-3/register-view/register-v1.html')
@app.route('/sprint-3/register-v1a-history')
def sprint_3_register_v1a_history():
return render_template('sprint-3/register-view/register-v1a-history.html')
@app.route('/sprint-3/register-v1a-history-1')
def sprint_3_register_v1a_history_1():
return render_template('sprint-3/register-view/register-v1a-history-1.html')
# Sprint 3, prototype 1, conveyancer - buyer relationship --------------------------
@app.route('/sprint-3/conveyancer-start')
def sprint_3_conveyancer_start():
return render_template('sprint-3/buyer-conveyancer/conveyancer-0-start.html')
@app.route('/sprint-3/conveyancer-login')
def sprint_3_conveyancer_login():
return render_template('sprint-3/buyer-conveyancer/conveyancer-1-login.html')
@app.route('/sprint-3/conveyancer-enter-title')
def sprint_3_conveyancer_enter_title():
return render_template('sprint-3/buyer-conveyancer/conveyancer-2-enter-title.html')
@app.route('/sprint-3/conveyancer-add-buyers')
def sprint_3_conveyancer_add_buyers():
return render_template('sprint-3/buyer-conveyancer/conveyancer-5-add-buyers.html')
@app.route('/sprint-3/relationship-reference')
def sprint_3_relationship_reference():
return render_template('sprint-3/buyer-conveyancer/conveyancer-6-ref-for-buyers.html')
# Sprint 3, prototype 1, buyer -> conveyancer relationship --------------------------
@app.route('/sprint-3/buyer-login')
def sprint_3_buyer_login():
return render_template('sprint-3/buyer-conveyancer/buyer-1-login.html')
@app.route('/sprint-3/buyer-ref-code')
def sprint_3_buyer_ref_code():
return render_template('sprint-3/buyer-conveyancer/buyer-2-reference-code.html')
@app.route('/sprint-3/buyer-register')
def sprint_3_buyer_register():
return render_template('sprint-3/buyer-conveyancer/buyer-3-register.html')
# Sprint 3, Execute Deed - reworked from sprint 2 -----------------------------------
@app.route('/sprint-3/buyer-signing-start')
def sprint_3_buyer_signing_start():
return render_template('sprint-3/deed/buyer-0-start.html')
@app.route('/sprint-3/buyer-signing-login')
def sprint_3_buyer_signing_login():
return render_template('sprint-3/deed/buyer-0a-login.html')
@app.route('/sprint-3/display-charge-for-signing')
def sprint_3_execute_deed():
return render_template('sprint-3/deed/buyer-1-sign-charge.html')
@app.route('/sprint-3/display-transfer-for-signing')
def sprint_3_execute_transfer():
return render_template('sprint-3/deed/buyer-1a-sign-transfer.html')
@app.route('/sprint-3/two-factor')
def sprint_3_two_factor():
return render_template('sprint-3/deed/buyer-2-two-factor.html')
@app.route('/sprint-3/signing-complete')
def sprint_3_signing_complete():
return render_template('sprint-3/deed/buyer-3-signing-complete.html')
# ---------------------------------------------------------------------------
# Sprint 2, prototype 1: Passing a "token" -----------------------------------------
@app.route('/sprint-2/token')
def sprint_2_token():
return render_template('sprint-2/token/citizen-1-register.html')
@app.route('/sprint-2/select-action')
def sprint_2_select_action():
return render_template('sprint-2/token/citizen-2-select-action.html')
@app.route('/sprint-2/choose-method')
def sprint_2_choose_method():
return render_template('sprint-2/token/citizen-3-choose-method.html')
@app.route('/sprint-2/generate-token')
def sprint_2_generate_token():
return render_template('sprint-2/token/citizen-4-generate-token.html')
@app.route('/sprint-2/show-change')
def sprint_2_show_change():
return render_template('sprint-2/token/citizen-5-register-during-change.html')
@app.route('/sprint-2/input-token')
def sprint_2_input_token():
return render_template('sprint-2/token/conveyancer-1-input-token.html')
@app.route('/sprint-2/retrieve-token')
def sprint_2_retrieve_token():
return render_template('sprint-2/token/conveyancer-2-retrieve-details.html')
# Sprint 2, spike - Execute Deed -----------------------------------------
@app.route('/sprint-2/execute-deed')
def sprint_2_execute_deed():
return render_template('sprint-2/deed/buyer-1-execute-deed.html')
@app.route('/sprint-2/execution-complete')
def sprint_2_execution_complete():
return render_template('sprint-2/deed/buyer-2-execution-complete.html')
# Example pages - for designers -----------------------------------------
@app.route('/examples/example-1')
def example_1():
return render_template('examples/example-page.html')
if __name__ == '__main__':
# Bind to PORT if defined, otherwise default to 5000.
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 2.5 | 2 |
tools/read_test_results.py | VirgiAgl/updated_AutoGP | 0 | 12798203 | <gh_stars>0
'''Directly copied from MMD case'''
from numpy import sqrt
import os
from pickle import load
import sys
os.chdir(sys.argv[1])
load_filename = "results.bin"
load_f = open(load_filename,"r")
[counter, numTrials, param, average_time, pvalues] = load(load_f)
load_f.close()
rate = counter/float(numTrials)
stder = 1.96*sqrt( rate*(1-rate) / float(numTrials) )
'''this stder is symmetrical in terms of rate'''
print "Parameters:"
for keys,values in param.items():
print(keys)
print(values)
print "Rejection rate: %.3f +- %.3f (%d / %d)" % (rate, stder, counter, numTrials)
print "Average test time: %.5f sec" % average_time
os.chdir('..')
#Minor: need to change the above for Gaussian Kernel Median Heuristic
| 2.328125 | 2 |
help/migrations/0004_article_faq_article_order_articlecategory_order_and_more.py | AppointmentGuru/kb | 0 | 12798204 | <filename>help/migrations/0004_article_faq_article_order_articlecategory_order_and_more.py
# Generated by Django 4.0.1 on 2022-02-01 17:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('help', '0003_article_categories_articlecategory'),
]
operations = [
migrations.AddField(
model_name='article',
name='faq',
field=models.BooleanField(db_index=True, default=False),
),
migrations.AddField(
model_name='article',
name='order',
field=models.PositiveIntegerField(default=1000),
),
migrations.AddField(
model_name='articlecategory',
name='order',
field=models.PositiveIntegerField(default=1000),
),
migrations.AlterField(
model_name='article',
name='categories',
field=models.ManyToManyField(blank=True, through='help.ArticleCategory', to='help.Category'),
),
]
| 1.59375 | 2 |
Day_00/01_Basics/temperature.py | saudijack/unfpyboot | 0 | 12798205 | <reponame>saudijack/unfpyboot<gh_stars>0
faren = raw_input("Enter the temperature in Fahrenheit): ")
faren = float(faren) # The calculation on the right gets saved to the variable on the left
cel = 5./9. * (faren - 32.)
print "The temperature in Celcius is " + str(cel) + " degrees." | 3.65625 | 4 |
app/submission/urls.py | PICT-ACM-Student-Chapter/OJ_API | 2 | 12798206 | <reponame>PICT-ACM-Student-Chapter/OJ_API<filename>app/submission/urls.py
from django.urls import path
from .views import Run, CheckRunStatus, CallbackRunNow, \
CallbackSubmission
urlpatterns = [
path('run', Run.as_view()),
path('run/<int:id>', CheckRunStatus.as_view()),
path('callback/run/<int:sub_id>', CallbackRunNow.as_view()),
path('callback/submit/<int:verdict_id>', CallbackSubmission.as_view())
]
| 1.820313 | 2 |
poolink_backend/apps/board/migrations/0001_initial.py | jaethewiederholen/Poolink_backend | 0 | 12798207 | <reponame>jaethewiederholen/Poolink_backend<gh_stars>0
# Generated by Django 3.1.12 on 2021-06-24 15:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(default='이름없는 보드', help_text='보드의 이름입니다', max_length=255, verbose_name='보드 이름')),
('image', models.ImageField(help_text='유저가 설정한 보드 이미지입니다.', null=True, upload_to='', verbose_name='보드 이미지')),
('bio', models.TextField(help_text='보드에 대한 설명입니다', null=True, verbose_name='보드 설명')),
('charge', models.BooleanField(default=False, help_text='유료 보드인지를 나타냅니다. MVP 단계에서 사용되지 않습니다.', verbose_name='유료보드 설정 여부')),
('like_count', models.IntegerField(help_text='보드의 좋아요 수를 나타냅니다.', verbose_name='좋아요 수')),
('scrap_count', models.IntegerField(help_text='보드가 스크랩 된 수를 나타냅니다.', verbose_name='스크랩 수')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='boards', to=settings.AUTH_USER_MODEL, verbose_name='보드 소유자')),
],
options={
'verbose_name': '보드',
'verbose_name_plural': '보드',
},
),
]
| 1.78125 | 2 |
inferlo/base/graph_model.py | InferLO/inferlo | 1 | 12798208 | <filename>inferlo/base/graph_model.py<gh_stars>1-10
# Copyright (c) 2020, The InferLO authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 - see LICENSE file.
from __future__ import annotations
import abc
import itertools
from typing import TYPE_CHECKING, Iterable, Tuple, Dict, List
import networkx as nx
import numpy as np
from inferlo.base.factors import FunctionFactor
from inferlo.base.variable import Variable
if TYPE_CHECKING:
from inferlo.base import Domain, Factor
class GraphModel(abc.ABC):
"""Abstract class representing any graphical model."""
def __init__(self, num_variables: int, domain: Domain):
"""
:param num_variables: Number of variables in the model.
:param domain: Default domain of each variable.
"""
self.num_variables = num_variables
self._default_domain = domain
self._vars = dict()
def get_variable(self, idx: int) -> Variable:
"""Returns variable by its index."""
if not 0 <= idx < self.num_variables:
raise IndexError(
"index %d is out of bounds for random vector of size %d" % (
idx, self.num_variables))
if idx not in self._vars:
v = Variable(self, idx, self._default_domain)
self._vars[idx] = v
return self._vars[idx]
def get_variables(self) -> List[Variable]:
"""Returns all variables."""
return [self.get_variable(i) for i in range(self.num_variables)]
def __getitem__(self, idx: int) -> Variable:
return self.get_variable(idx)
@abc.abstractmethod
def add_factor(self, factor: Factor):
"""Adds a factor to the model."""
def __imul__(self, other: Factor):
self.add_factor(other)
return self
def __len__(self):
return self.num_variables
@abc.abstractmethod
def infer(self, algorithm='auto', **kwargs):
"""Performs inference."""
@abc.abstractmethod
def max_likelihood(self, algorithm='auto', **kwargs) -> np.ndarray:
"""Finds the most probable state."""
def sample(self, num_samples: int, algorithm='auto',
**kwargs) -> np.ndarray:
"""Generates samples."""
@abc.abstractmethod
def get_factors(self) -> Iterable[Factor]:
"""Returns all factors."""
def get_symbolic_variables(self) -> List[FunctionFactor]:
"""Prepares variables for usage in expressions.
Returns lists of trivial ``FunctionFactor`` s, each of them
representing a factor on one variable with identity function.
They can be used in mathematical expressions, which will result in
another ``FunctionFactor``.
"""
return [FunctionFactor(self, [i], lambda x: x[0]) for i in
range(self.num_variables)]
def get_factor_graph(self) -> Tuple[nx.Graph, Dict[int, str]]:
"""Builds factor graph for the model.
Factor graph is a bipartite graph with variables in one part and
factors in other graph. Edge denotes that factor depends on variable.
"""
factors = list(self.get_factors())
var_labels = [v.name for v in self.get_variables()]
fact_labels = [f.get_name() for f in factors]
labels = var_labels + fact_labels
labels = {i: labels[i] for i in range(len(labels))}
graph = nx.Graph()
graph.add_nodes_from(range(self.num_variables), bipartite=0)
graph.add_nodes_from(
range(self.num_variables, self.num_variables + len(factors)),
bipartite=1)
for factor_id in range(len(factors)):
for var_id in factors[factor_id].var_idx:
graph.add_edge(var_id, self.num_variables + factor_id)
return graph, labels
def draw_factor_graph(self, ax):
"""Draws the factor graph."""
graph, labels = self.get_factor_graph()
top = nx.bipartite.sets(graph)[0]
vc = self.num_variables
fc = len(nx.bipartite.sets(graph)[1])
pos = nx.bipartite_layout(graph, top)
nx.draw_networkx(graph, pos, ax, labels=labels, node_shape='o',
nodelist=list(range(vc)),
node_color='#ffaaaa')
# Draw factors in another color.
nx.draw_networkx(graph, pos, ax, labels=labels,
nodelist=list(range(vc, vc + fc)),
node_shape='s',
edgelist=[],
node_color='lightgreen')
def evaluate(self, x: np.ndarray) -> float:
"""Returns value of non-normalized pdf in point.
In other words, just substitutes values into factors and multiplies
them.
"""
x = np.array(x)
assert x.shape == (self.num_variables,)
result = 1.0
for factor in self.get_factors():
result *= factor.value(x[factor.var_idx])
return result
def part_func_bruteforce(model):
"""Evaluates partition function in very inefficient way."""
part_func = 0
for x in itertools.product(
*(v.domain.values for v in model.get_variables())):
part_func += model.evaluate(np.array(x))
return part_func
def max_likelihood_bruteforce(model):
"""Evaluates most likely state in a very inefficient way."""
best_state = None
best_prob = 0.0
for x in itertools.product(
*(v.domain.values for v in model.get_variables())):
prob = model.evaluate(np.array(x))
if prob >= best_prob:
best_state = x
best_prob = prob
return best_state
def get_max_domain_size(self):
"""Returns the biggest domain size over all variables."""
return max([var.domain.size() for var in self.get_variables()])
| 2.53125 | 3 |
game/version1/main.py | aniknagato/Balloon-Blaster-Mace-Ball | 0 | 12798209 | # need better design
import pyglet
import resources
import random
import math
from pyglet.window import key
score = 0
game_window = pyglet.window.Window()
pyglet.resource.path = ['./resources']
pyglet.resource.reindex()
ast_img = pyglet.resource.image("player.png")
def distance(point_1=(0, 0), point_2=(0, 0)):
"""Returns the distance between two points"""
return math.sqrt((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2)
class PhysicalObject(pyglet.sprite.Sprite):
def __init__(self, *args, **kwargs):
super(PhysicalObject, self).__init__(*args, **kwargs)
self.dead = False
self.velocity_x, self.velocity_y = 0.0, 0.0
def update(self, dt):
self.x += self.velocity_x * dt
self.y += self.velocity_y * dt
def collides_with(self, other_object):
collision_distance = self.image.width/2 + other_object.image.width/2
actual_distance = distance(self.position, other_object.position)
return (actual_distance <= collision_distance)
def handle_collision_with(self, other_object):
self.dead = True
global score
score += 1
class Asteroid(PhysicalObject):
def __init__(self, *args, **kwargs):
super(Asteroid, self).__init__(*args, **kwargs)
def asteroids(num_asteroids, player_position):
asteroids = []
for i in range(num_asteroids):
asteroid_x, asteroid_y = player_position
while distance((asteroid_x, asteroid_y), player_position) < 10:
asteroid_x = random.randint(0, 800)
asteroid_y = random.randint(0, 600)
new_asteroid = Asteroid(img=ast_img, x=asteroid_x, y=asteroid_y)
new_asteroid.rotation = random.randint(0, 360)
new_asteroid.velocity_x = random.random()*100 - 50
new_asteroid.velocity_y = random.random()*100 - 50
new_asteroid.rotation = random.randint(0, 360)
asteroids.append(new_asteroid)
return asteroids
def center_image(image):
"""Sets an image's anchor point to its center"""
image.anchor_x = image.width // 2
image.anchor_y = image.height // 2
player_image = pyglet.resource.image("ship.png")
bullet_image = pyglet.resource.image("player.png")
asteroid_image = pyglet.resource.image("player.png")
score_label = pyglet.text.Label(text="Score: {}".format(score), x=10, y=460)
level_label = pyglet.text.Label(text="Balloon Blaster Mace Ball",
x=game_window.width//2, y=460, anchor_x='center')
center_image(player_image)
player_ship = pyglet.sprite.Sprite(img=player_image, x=400, y=300)
asteroids = asteroids(20, player_ship.position)
class Player(PhysicalObject):
def __init__(self, *args, **kwargs):
super(Player,self).__init__(*args, **kwargs)
self.keys = dict(left=False, right=False, up=False, down = False)
self.rotate_speed = 200.0
self.velocity_x = 0
self.velocity_y = self.velocity_x
def on_key_press(self, symbol, modifiers):
if symbol == key.UP:
self.keys['up'] = True
elif symbol == key.LEFT:
self.keys['left'] = True
elif symbol == key.RIGHT:
self.keys['right'] = True
elif symbol == key.DOWN:
self.keys['down'] = True
def on_key_release(self, symbol, modifiers):
if symbol == key.UP:
self.keys['up'] = False
elif symbol == key.LEFT:
self.keys['left'] = False
elif symbol == key.RIGHT:
self.keys['right'] = False
elif symbol == key.DOWN:
self.keys['down'] = False
def update(self, dt):
super(Player, self).update(dt)
if self.keys['left']:
self.x -= 100 * dt
if self.keys['right']:
self.x += 100 * dt
if self.keys['up']:
self.y += 100 * dt
if self.keys['down']:
self.y -= 100 * dt
player = Player(img=player_image, x=400, y=300)
game_objects = asteroids + [player]
def update(dt):
for obj in game_objects:
obj.update(dt)
player = game_objects[-1]
baloons = game_objects[0:-1]
score_label.text = "Score: {}".format(score)
for b in baloons:
if not b.dead and b.collides_with(player):
b.handle_collision_with(player)
for to_remove in [obj for obj in game_objects if obj.dead]:
to_remove.delete()
game_objects.remove(to_remove)
@game_window.event
def on_draw():
game_window.clear()
level_label.draw()
score_label.draw()
player.draw()
for asteroid in asteroids:
if not asteroid.dead:
asteroid.draw()
game_window.push_handlers(player)
pyglet.clock.schedule_interval(update, 1/120.0)
pyglet.app.run()
| 3.09375 | 3 |
Files/add_edit_delete_share.py | SlaveForGluten/MyWallet | 0 | 12798210 | <filename>Files/add_edit_delete_share.py
import tkinter as tk
from tkinter import messagebox
from Files import (shares_page, manage_db, calculate, scrap_web)
FONT = "Calabria 12"
def add_shares(parent):
"""allows you to add share"""
def save():
if(manage_db.check_if_valid_name(name.get()) and
manage_db.check_for_real_numbers(entry_price.get()) and
manage_db.check_date_format(date.get())):
share = {"Name": name.get(),
"Quantity": quantity.get(),
"BuyingPrice": entry_price.get(),
"BuyingDate": date.get(),
"Cost": "",
"SellingPrice": "",
"SellingDate": "",
"Dividends": ""}
manage_db.add_share("gpw_shares", share)
manage_db.add_current_price(
name.get(), scrap_web.pull_current_price(name.get()))
shares_page.Shares.curent_canvas(parent)
top_window.destroy()
top_window = tk.Toplevel(parent, height=600, width=390)
# LABELS:
list_of_labels = ["Name:", "Quantity:", "Entry price (per share):",
"Entry date:", ]
for txt in list_of_labels:
label = tk.Label(top_window, text=txt, font=FONT)
label.grid(sticky="nw")
# ENTRIES:
name = tk.Entry(top_window, width=9, font=FONT)
name.grid(row=0, column=1, padx=10)
quantity = tk.Entry(top_window, width=9, font=FONT)
quantity.grid(row=1, column=1, padx=10)
entry_price = tk.Entry(top_window, width=9, font=FONT)
entry_price.grid(row=2, column=1, padx=10)
date = tk.Entry(top_window, width=9, font=FONT)
date.grid(row=3, column=1, padx=10)
add_button = tk.Button(
top_window, text="Add", font=FONT, command=save)
add_button.grid(sticky="nw", padx=5, pady=5)
def menu_window(parent, share):
"""right clicking on a closed or active share opens a menu windows
with buttons allowing you to edit/add alarm/delete share
"""
menu_window = tk.Toplevel(master=None, width=400, height=200)
edit_button = tk.Button(
menu_window, text="Edit", font=FONT, bg="green",
command=lambda: edit(parent, share, menu_window))
edit_button.grid(row=0, column=0, pady=20, padx=20)
alarm_button = tk.Button(
menu_window, text="Alarm", font=FONT, bg="green",
command=lambda: set_alarm(parent, share, menu_window))
alarm_button.grid(row=0, column=1, pady=20)
delete_button = tk.Button(
menu_window, text="Delete", font=FONT, bg="red",
command=lambda: delete(parent, share, menu_window))
delete_button.grid(row=0, column=2, pady=20, padx=20)
cancel_button = tk.Button(
menu_window, text="Cancel", font=FONT,
command=menu_window.destroy)
cancel_button.grid(row=0, column=3, pady=20, )
def delete(parent, to_delete, choice_window):
"""deletes unwanted share"""
if messagebox.askyesno(
"Delete", "Are you sure you want to delete this?"):
if to_delete[6] == "":
manage_db.delete_row_from_table(
"gpw_shares", "timestamp", to_delete[0])
shares_page.Shares.curent_canvas(parent)
else:
manage_db.delete_row_from_table(
"gpw_shares_closed", "timestamp", to_delete[0])
shares_page.Shares.historical_canvas(parent)
choice_window.destroy()
def edit(parent, old_share, choice_window):
"""display edit window for active or sold shares. After viewing or
editing you can save changes """
def save():
share = {"Name": name.get(),
"Quantity": quantity.get(),
"BuyingPrice": entry_price.get(),
"BuyingDate": entry_date.get(),
"SellingPrice": exit_price.get(),
"SellingDate": exit_date.get(),
"Cost": ''
}
# cost depends on changing current share price but if a
# share was sold, you need to consider fixed selling price.
# Therefore sold cost can be calculated and added to dictionary
# without the need of calculating it all over again:
if exit_price.get() != "":
total_buying_price = int(
share["Quantity"])*float(share["BuyingPrice"])
total_selling_price = int(
share["Quantity"])*float(share["SellingPrice"])
share["Cost"] = str(calculate.total_costs(
total_buying_price, total_selling_price))
# Collect and save in order all user input from dividend
# and dividend_date entries.
# If no previous inputs:
if old_share[8] == "":
list_of_entries = (div_2, div_3, div_4, div_5)
list_of_dates = (div_date_2, div_date_3,
div_date_4, div_date_5)
share["Dividends"] = ""
if div_1.get():
share["Dividends"] = (
div_1.get()+"-"+div_date_1.get())
for counter, entrie in enumerate(list_of_entries):
if entrie.get():
share["Dividends"] = (share["Dividends"]+"|" +
entrie.get()+"-" +
list_of_dates[counter].get())
# If previous entries exist, replace the ones displayed
# (up to 4 latest) with the new ones (in case edits were made)
else:
list_of_entries = (div_1, div_2, div_3, div_4, div_5)
list_of_dates = (div_date_1, div_date_2, div_date_3,
div_date_4, div_date_5)
number_of_entries_to_clear = 4
all_dividents = old_share[8].split("|")
for __ in range(0, number_of_entries_to_clear):
if all_dividents:
# if len(all_dividents) > 0:
all_dividents.remove(
all_dividents[len(all_dividents)-1])
for counter, entrie in enumerate(list_of_entries):
if entrie.get():
all_dividents.append(entrie.get() + "-" +
list_of_dates[counter].get())
share["Dividends"] = '|'.join(all_dividents)
# check if all input correct
if(manage_db.check_if_valid_name(name.get()) and
manage_db.check_for_real_numbers(entry_price.get()) and
manage_db.check_for_real_numbers(exit_price.get()) or
exit_price.get() == '' and
manage_db.check_date_format(entry_date.get()) and
manage_db.check_date_format(exit_date.get()) or
exit_date.get() == ''):
top_window.destroy()
if old_share[6] == "":
manage_db.delete_row_from_table(
"gpw_shares", "timestamp", old_share[0])
else:
manage_db.delete_row_from_table(
"gpw_shares_closed", "timestamp", old_share[0])
if share["SellingDate"] == "":
manage_db.add_share("gpw_shares", share)
manage_db.add_current_price(
share["Name"], scrap_web.pull_current_price(share["Name"]))
shares_page.Shares.curent_canvas(parent)
else:
manage_db.add_share("gpw_shares_closed", share)
shares_page.Shares.historical_canvas(parent)
choice_window.destroy()
top_window = tk.Toplevel(parent, height=600, width=390)
# LABELS
list_of_labels = ["Name:", "Quantity:", "Entry price (per share):",
"Entry date:", "Divident:", "Divident date:",
"Exit price (per share):", "Exit dete:"]
for txt in list_of_labels:
label = tk.Label(top_window, text=txt, font=FONT)
label.grid(sticky="nw")
# ENTRIES
name = tk.Entry(top_window, width=9, font=FONT)
name.grid(row=0, column=1, padx=5)
name.insert(0, old_share[1])
quantity = tk.Entry(top_window, width=9, font=FONT)
quantity.grid(row=1, column=1, padx=5)
quantity.insert(0, old_share[2])
entry_price = tk.Entry(top_window, width=9, font=FONT)
entry_price.grid(row=2, column=1, padx=5)
entry_price.insert(0, old_share[3])
entry_date = tk.Entry(top_window, width=9, font=FONT)
entry_date.grid(row=3, column=1, padx=5)
entry_date.insert(0, old_share[4])
div_1 = tk.Entry(top_window, width=9, font=FONT)
div_1.grid(row=4, column=1, padx=5)
div_date_1 = tk.Entry(top_window, width=9, font=FONT)
div_date_1.grid(row=5, column=1, padx=5)
div_2 = tk.Entry(top_window, width=9, font=FONT)
div_2.grid(row=4, column=2, padx=5)
div_date_2 = tk.Entry(top_window, width=9, font=FONT)
div_date_2.grid(row=5, column=2, padx=5)
div_3 = tk.Entry(top_window, width=9, font=FONT)
div_3.grid(row=4, column=3, padx=5)
div_date_3 = tk.Entry(top_window, width=9, font=FONT)
div_date_3.grid(row=5, column=3, padx=5)
div_4 = tk.Entry(top_window, width=9, font=FONT)
div_4.grid(row=4, column=4, padx=5)
div_date_4 = tk.Entry(top_window, width=9, font=FONT)
div_date_4.grid(row=5, column=4, padx=5)
# if share does have a dividend inputed, insert up to four last
# dividend entries and leave last (5th) field empty for a new
# entry, this allows you to view and edit previous entries
if old_share[8]:
list_of_entries = (div_4, div_3, div_2, div_1)
list_of_notes = (
div_date_4, div_date_3, div_date_2, div_date_1)
last_dividends = old_share[8].split("|")
last_dividends.reverse()
if len(last_dividends) >= 4:
dividends_to_display = 4
for counter in range(0, dividends_to_display):
for div_or_date, value in enumerate(
last_dividends[counter].split("-")):
# first run of last for loop gives you dividend and
# last gives this dividend's date
if div_or_date == 0:
list_of_entries[counter].insert(0, value)
else:
list_of_notes[counter].insert(0, value)
else:
list_of_entries = (div_1, div_2, div_3)
list_of_notes = (div_date_1, div_date_2, div_date_3)
last_dividends = old_share[8].split("|")
number_of_dividends = len(last_dividends)
for counter in range(0, number_of_dividends):
for div_or_date, value in enumerate(
last_dividends[counter].split("-")):
# first run of last for loop gives you dividend and
# last gives this dividend's date
if div_or_date == 0:
list_of_entries[counter].insert(0, value)
else:
list_of_notes[counter].insert(0, value)
div_5 = tk.Entry(top_window, width=9, font=FONT)
div_5.grid(row=4, column=5, padx=5)
div_date_5 = tk.Entry(top_window, width=9, font=FONT)
div_date_5.grid(row=5, column=5, padx=5)
exit_price = tk.Entry(top_window, width=9, font=FONT)
exit_price.grid(row=6, column=1, padx=5)
exit_price.insert(0, old_share[6])
exit_date = tk.Entry(top_window, width=9, font=FONT)
exit_date.grid(row=7, column=1, padx=5)
exit_date.insert(0, old_share[7])
frame = tk.Frame(top_window, width=200, height=30)
frame.grid(sticky="nw", columnspan=5)
add_button = tk.Button(frame, text="Edit", font=FONT, command=save)
add_button.grid(row=0, column=0, padx=10, pady=5)
close_button = tk.Button(
frame, text="Close", font=FONT, command=top_window.destroy)
close_button.grid(row=0, column=1, pady=5)
help_button = tk.Button(frame, text="?", font=FONT)
help_button.grid(row=0, column=2, padx=10, pady=5)
def set_alarm(parent, share, choice_window):
"""adds alarm to a share"""
prof = share[1]
alarm = manage_db.fetch_alarm(prof)
choice_window.destroy()
def save():
high = high_price_entry.get()
low = low_price_entry.get()
if(manage_db.check_for_real_numbers(high) and
manage_db.check_for_real_numbers(low)):
manage_db.add_alarm(prof, high, low)
top_window.destroy()
if share[6] == "":
shares_page.Shares.curent_canvas(parent)
else:
shares_page.Shares.historical_canvas(parent)
top_window = tk.Toplevel(parent, height=350, width=390)
top_window.title("Edit")
amount_label = tk.Label(
top_window, text=prof, font=FONT)
amount_label.grid(sticky="nw", padx=5, pady=5)
amount_label = tk.Label(
top_window, text="Let me know when:", font=FONT)
amount_label.grid(sticky="nw", padx=5, pady=5)
label = tk.Label(
top_window, text="Price is higher or equal to:", font=FONT)
label.grid(sticky="nw", padx=5, pady=5)
label = tk.Label(
top_window, text="Price is lower or equal to:", font=FONT)
label.grid(sticky="nw", padx=5, pady=5)
high_price_entry = tk.Entry(top_window, width=10, font=FONT)
high_price_entry.grid(row=2, column=1, padx=5, pady=5)
low_price_entry = tk.Entry(top_window, width=10, font=FONT)
low_price_entry.grid(row=3, column=1, padx=5, pady=5)
high_price_entry.insert(0, alarm[1])
low_price_entry.insert(0, alarm[2])
add_button = tk.Button(
top_window, text="Add", font=FONT, command=save)
add_button.grid(sticky="nw", padx=5)
| 3 | 3 |
Codewars/IpValidation.py | SelvorWhim/competitive | 0 | 12798211 | <reponame>SelvorWhim/competitive<gh_stars>0
def is_valid_octet(n_str):
return n_str.isdigit() and (n_str[0] != '0' or n_str == '0') and int(n_str) <= 255 # isdigit also returns false for empty strings and negatives
def is_valid_IP(str):
nums = str.split('.')
return (len(nums) == 4) and all(is_valid_octet(n) for n in nums)
| 3.125 | 3 |
rial/util/util.py | L3tum/RIAL | 2 | 12798212 | import hashlib
import random
import string
from llvmlite.ir import Context
def generate_random_name(count: int):
return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=count))
def rreplace(s, old, new, occurrence=-1):
li = s.rsplit(old, occurrence)
return new.join(li)
def pythonify(data: dict):
for key, value in data.items():
if isinstance(value, dict):
value = pythonify(value)
elif isinstance(value, list):
value = [pythonify(val) for val in value]
elif isinstance(value, str):
if value.lower() == "true":
value = True
elif value.lower() == "false":
value = False
data[key] = value
return data
def good_hash(w: str):
return hashlib.md5(w.encode()).hexdigest()
def _get_identified_type_if_exists(self: Context, name: str):
if name in self.identified_types:
return self.identified_types[name]
return None
def monkey_patch():
Context.get_identified_type_if_exists = _get_identified_type_if_exists
| 2.5 | 2 |
swmmnetwork/convert.py | austinorr/swmmnetwork | 9 | 12798213 | <reponame>austinorr/swmmnetwork<filename>swmmnetwork/convert.py<gh_stars>1-10
import pandas
import networkx as nx
import hymo
from .util import _upper_case_column, _validate_hymo_inp
from .compat import from_pandas_edgelist, set_node_attributes
SWMM_LINK_TYPES = [
'weirs',
'orifices',
'conduits',
'outlets',
'pumps',
]
SWMM_NODE_TYPES = [
'subcatchments',
'junctions',
'outfalls',
'dividers',
'storage',
]
def nodes_to_df(G, index_col=None):
ls = []
for node in G.nodes(data=True):
df = {}
n, data = node
if index_col is not None:
df[index_col] = str(n)
df['from'] = str(n)
df['to'] = str(sorted(G.successors(n)))
df['type'] = 'node'
df.update(data)
ls.append(df)
return pandas.DataFrame(ls)
def edges_to_df(G):
ls = []
for edge in G.edges(data=True):
df = {}
_from, _to, data = edge
df['from'] = str(_from)
df['to'] = str(_to)
df['type'] = 'link'
df.update(data)
ls.append(df)
return pandas.DataFrame(ls)
def network_to_df(G, index_col=None):
df = (
pandas.concat([nodes_to_df(G, index_col), edges_to_df(G)])
.reset_index(drop=True)
)
if index_col is not None:
df = df.set_index(index_col)
df.index = df.index.map(str)
df = df.sort_index()
return df
def pandas_edgelist_from_swmm_inp(inp):
"""
"""
inp = _validate_hymo_inp(inp)
catchment_links = (
inp.subcatchments
.pipe(_upper_case_column, cols='Outlet', include_index=True)
.assign(Inlet_Node=lambda df: df.index)
.assign(id=lambda df: df.index.map(lambda s: '^' + s))
.assign(xtype='dt')
.rename(columns={'Outlet': 'Outlet_Node'})
.loc[:, ['Inlet_Node', 'Outlet_Node', 'xtype', 'id']]
.rename(columns=lambda s: s.lower())
)
edge_types = SWMM_LINK_TYPES
edge_dfs = []
for xtype in edge_types:
df = getattr(inp, xtype, None)
if df is not None:
df = (
df
.rename(columns={'From_Node': 'Inlet_Node', 'To_Node': 'Outlet_Node'})
.pipe(_upper_case_column, cols=['Inlet_Node', 'Outlet_Node'], include_index=True)
.loc[:, ['Inlet_Node', 'Outlet_Node']]
.assign(id=lambda df: df.index)
.assign(xtype=xtype if xtype[-1] != 's' else xtype[:-1])
.loc[:, ['Inlet_Node', 'Outlet_Node', 'xtype', 'id']]
.rename(columns=lambda s: s.lower())
)
edge_dfs.append(df)
edges = pandas.concat([catchment_links] + edge_dfs).astype(str)
return edges
def pandas_edgelist_to_edgelist(df, source='source', target='target', cols=None):
edges = df.set_index([source, target])
if cols is not None:
if isinstance(cols, str):
cols = [cols]
edges = edges.loc[:, cols]
edge_list = []
for index, row in edges.iterrows():
_to, _from = index
data = row.to_dict()
edge_list.append([_to, _from, data])
return edge_list
def pandas_nodelist_to_nodelist(df):
return list(df.to_dict('index').items())
def pandas_node_attrs_from_swmm_inp(inp):
"""
"""
inp = _validate_hymo_inp(inp)
node_types = SWMM_NODE_TYPES
node_dfs = []
for xtype in node_types:
df = getattr(inp, xtype, None)
if df is not None:
df = (
df
.pipe(_upper_case_column, include_index=True)
.assign(xtype=xtype if xtype[-1] != 's' else xtype[:-1])
.loc[:, ['xtype']]
.rename(columns=lambda s: s.lower())
)
node_dfs.append(df)
return pandas.concat(node_dfs).astype(str)
def add_edges_from_swmm_inp(G, inp):
"""Add the edges and nodes from a SWMM 5.1 input file.
Parameters
----------
G : nx.Graph-like object
inp : file_path or hymo.SWMMInpFile
"""
inp = _validate_hymo_inp(inp)
df_edge_list = pandas_edgelist_from_swmm_inp(inp=inp)
edge_list = pandas_edgelist_to_edgelist(df_edge_list,
source='inlet_node',
target='outlet_node')
G.add_edges_from(edge_list)
df_node_attrs = pandas_node_attrs_from_swmm_inp(inp=inp).to_dict('index')
set_node_attributes(G, values=df_node_attrs)
def from_swmm_inp(inp, create_using=None):
"""Create new nx.Graph-like object from a SWMM5.1 inp file
Parameters
----------
inp : file_path or hymo.SWMMInpFile
create_using : nx.Graph-like object, optional (default=None)
the type of graph to make. If None is specified, then this
function defaults to an nx.MultiDiGraph() instance
Returns
-------
Graph
Reference
---------
This function is meant to be similar to the nx.from_pandas_edgelist()
"""
inp = _validate_hymo_inp(inp)
if create_using is None:
create_using = nx.MultiDiGraph()
df_edge_list = pandas_edgelist_from_swmm_inp(inp=inp)
G = from_pandas_edgelist(df_edge_list,
source='inlet_node',
target='outlet_node',
edge_attr=True,
create_using=create_using,
)
df_node_attrs = pandas_node_attrs_from_swmm_inp(inp=inp).to_dict('index')
set_node_attributes(G, values=df_node_attrs)
return G
def swmm_inp_layout_to_pos(inp):
"""Reads and converts swmm node coordinates and subcatchment from inp
file to networkx drawing `pos` format, i.e., a dict of node names with
x, y coordinates as values.
Parameters
----------
inp : string or hymo.SwmmInputFile
this file will be read to pull the node coordinates and subcatchment
positions. Polygons are converted to coordinate pairs through their
centroid.
Returns
-------
dict suitable for use as the `pos` kwarg of networkx drawing methods.
"""
inp = _validate_hymo_inp(inp)
coords = inp.coordinates.pipe(_upper_case_column, include_index=True)
polys = inp.polygons.pipe(_upper_case_column, include_index=True)
pos = (
coords
.astype(float)
.append(
polys
.astype(float)
.groupby(polys.index)
.mean())
.T
.to_dict('list')
)
return {str(k): list(map(float, v)) for k, v in pos.items()}
| 2.234375 | 2 |
tlh/data/rom.py | notyourav/the-little-hat | 0 | 12798214 | from typing import Optional
from tlh.const import RomVariant
from tlh import settings
class Rom:
def __init__(self, filename: str):
with open(filename, 'rb') as rom:
self.bytes = bytearray(rom.read())
def get_bytes(self, from_index: int, to_index: int) -> bytearray:
# TODO apply constraints here? Or one level above in the HexEditorInstance?
return self.bytes[from_index:to_index]
def get_byte(self, index: int) -> int:
return self.bytes[index]
def length(self) -> int:
return len(self.bytes)
def get_pointer(self, index: int) -> int:
return int.from_bytes(self.bytes[index:index+4], 'little')
# Rom data is read only, so we only need to read it once
roms: dict[RomVariant, Rom] = {}
# TODO invalidate roms when settings change?
# necessary? Once we have a valid rom, there will be no changes
def get_rom(variant: RomVariant) -> Optional[Rom]:
global roms
if variant not in roms:
try:
roms[variant] = Rom(settings.get_rom(variant))
except:
return None
return roms[variant]
def invalidate_rom(variant: RomVariant) -> None:
global roms
if variant in roms:
del roms[variant] | 2.578125 | 3 |
accounts/forms.py | xNovax/RoomScout | 24 | 12798215 | <filename>accounts/forms.py<gh_stars>10-100
from captcha.fields import ReCaptchaField
from captcha.widgets import ReCaptchaV3
from django import forms
class AllauthSignupForm(forms.Form):
captcha = ReCaptchaField(widget=ReCaptchaV3, label='')
field_order = ['email', 'password1', 'password2', 'captcha']
def signup(self, request, user):
""" Required, or else it throws deprecation warnings """
pass
class PreferencesForm(forms.Form):
general_contact = forms.BooleanField(label='Yes, I would like RoomScout to contact me about activity in Houses that I am a member of.', required=False)
promo_contact = forms.BooleanField(label='Yes, I would like RoomScout to contact me about events, new features, and other promotional information.', required=False)
class VerificationForm(forms.Form):
phone_number = forms.CharField(max_length=20, label='Phone Number', required=False)
| 2.53125 | 3 |
linuxOperation/app/security/views.py | zhouli121018/core | 0 | 12798216 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import copy
# import os
import json
# import ConfigParser
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.template.response import TemplateResponse
from django.core.paginator import Paginator, EmptyPage, InvalidPage
from django.db.models import Q
from django_redis import get_redis_connection
from django.utils.translation import ugettext_lazy as _
from app.core.models import Mailbox, DomainAttr, Domain
from app.utils.domain_session import get_domainid_bysession, get_session_domain
# from lib.tools import get_process_pid, restart_process, get_fail2ban_info, fail2ban_ip
from lib.licence import licence_required
from lib.tools import clear_redis_cache
from .forms import BanRuleForm, BanBlockListForm, Fail2BanTrustForm, SpamSetForm, \
SendFrequencyForm, PasswordWeakForm, PasswordWeakImportForm
from .models import Fail2Ban, Fail2BanTrust, Fail2BanBlock, PasswordWeakList
def clear_fail2ban_cache():
redis = get_redis_connection()
for keyname in redis.keys("fail2ban_cache*") :
redis.delete(keyname)
clear_redis_cache()
###############################
# 禁用IP列表
@licence_required
def fail2ban_rulelist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2Ban.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rulelist.html",context={})
@licence_required
def fail2ban_rulelist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'name', 'proto', 'internal','block_fail', 'block_unexists', 'block_minute', 'update_time', 'disabled',]
lists = Fail2Ban.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(proto__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_rulelist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def fail2ban_rule_add(request):
form = BanRuleForm()
if request.method == "POST":
form = BanRuleForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加规则成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rule_add.html",context={"form":form})
@licence_required
def fail2ban_rule_modify(request, rule_id):
obj = Fail2Ban.objects.get(id=rule_id)
form = BanRuleForm(instance=obj)
if request.method == "POST":
form = BanRuleForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改规则成功'))
return HttpResponseRedirect(reverse('fail2ban_rulelist'))
return render(request, "security/fail2ban_rule_add.html",context={"form":form})
###############################
# 屏蔽IP
@licence_required
def fail2ban_blocklist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2BanBlock.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_blocklist.html",context={})
@licence_required
def fail2ban_blocklist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'name', 'ip', 'expire_time', 'update_time', 'disabled',]
lists = Fail2BanBlock.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(ip__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_blocklist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def fail2ban_block_add(request):
form = BanBlockListForm()
if request.method == "POST":
form = BanBlockListForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_block_add.html",context={"form":form})
@licence_required
def fail2ban_block_modify(request, block_id):
obj = Fail2BanBlock.objects.get(id=block_id)
form = BanBlockListForm(instance=obj)
if request.method == "POST":
form = BanBlockListForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改成功'))
return HttpResponseRedirect(reverse('fail2ban_blocklist'))
return render(request, "security/fail2ban_block_add.html",context={"form":form})
###############################
# 屏蔽白名单
@licence_required
def fail2ban_whitelist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
Fail2BanTrust.objects.filter(pk=id).delete()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist.html",context={})
@licence_required
def fail2ban_whitelist_add(request):
form = Fail2BanTrustForm()
if request.method == "POST":
form = Fail2BanTrustForm(request.POST)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'添加成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist_add.html",context={"form":form})
@licence_required
def fail2ban_whitelist_modify(request, white_id):
obj = Fail2BanTrust.objects.get(id=white_id)
form = Fail2BanTrustForm(instance=obj)
if request.method == "POST":
form = Fail2BanTrustForm(request.POST, instance=obj)
if form.is_valid():
form.save()
clear_fail2ban_cache()
messages.add_message(request, messages.SUCCESS, _(u'修改成功'))
return HttpResponseRedirect(reverse('fail2ban_whitelist'))
return render(request, "security/fail2ban_whitelist_add.html",context={"form":form})
@licence_required
def fail2ban_whitelist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'ip', 'name', 'disabled',]
lists = Fail2BanTrust.objects.all()
if search:
lists = lists.filter( Q(name__icontains=search) | Q(ip__icontains=search) )
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = len(lists)
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/fail2ban_whitelist_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def security_antispam(request):
domain_id = get_domainid_bysession(request)
obj = Domain.objects.filter(id=domain_id).first()
if not obj:
return HttpResponseRedirect(reverse('security_antispam'))
spam_set = DomainAttr.objects.filter(domain_id=obj.id,type="system",item="cf_antispam").first()
form = SpamSetForm(instance=spam_set, request=request, domain_id=obj.id)
if request.method == "POST":
form = SpamSetForm(instance=spam_set, post=request.POST, request=request, domain_id=obj.id)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'修改设置成功'))
return HttpResponseRedirect(reverse('security_antispam'))
else:
messages.add_message(request, messages.ERROR, _(u'修改设置失败,请检查输入参数'))
return render(request, "security/antispam.html", context={
"form": form,
"domain": obj,
"spam_check_local_spam" : form.spam_check_local_spam.value,
"spam_check_local_virus" : form.spam_check_local_virus.value,
"spam_check_outside_spam" : form.spam_check_outside_spam.value,
"spam_check_outside_virus" : form.spam_check_outside_virus.value,
})
@licence_required
def security_frequency(request):
domain_id = get_domainid_bysession(request)
domain = Domain.objects.filter(id=domain_id).first()
if not domain:
return HttpResponseRedirect(reverse('security_frequency'))
frequency_set = DomainAttr.objects.filter(domain_id=domain.id,type="system",item="cf_sendlimit").first()
form = SendFrequencyForm(instance=frequency_set)
if request.method == "POST":
form = SendFrequencyForm(instance=frequency_set, post=request.POST)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, _(u'修改设置成功'))
return render(request, "security/frequency_setting.html", context={
"form" : form,
"domain" : domain,
})
@licence_required
def password_weaklist(request):
if request.method == "POST":
id = request.POST.get('id', "")
status = request.POST.get('status', "")
if status == "delete":
PasswordWeakList.objects.filter(pk=id).delete()
clear_redis_cache()
messages.add_message(request, messages.SUCCESS, _(u'删除成功'))
return HttpResponseRedirect(reverse('password_weaklist'))
return render(request, "security/password_weak_list.html",context={})
@licence_required
def password_weaklist_ajax(request):
data = request.GET
order_column = data.get('order[0][column]', '')
order_dir = data.get('order[0][dir]', '')
search = data.get('search[value]', '')
colums = ['id', 'password']
if search:
lists = PasswordWeakList.objects.filter( Q(password__contains=search) )
else:
lists = PasswordWeakList.objects.all()
if lists.exists() and order_column and int(order_column) < len(colums):
if order_dir == 'desc':
lists = lists.order_by('-%s' % colums[int(order_column)])
else:
lists = lists.order_by('%s' % colums[int(order_column)])
lists = lists[:10000]
try:
length = int(data.get('length', 1))
except ValueError:
length = 1
try:
start_num = int(data.get('start', '0'))
page = start_num / length + 1
except ValueError:
start_num = 0
page = 1
count = lists.count()
if start_num >= count:
page = 1
paginator = Paginator(lists, length)
try:
lists = paginator.page(page)
except (EmptyPage, InvalidPage):
lists = paginator.page(paginator.num_pages)
rs = {"sEcho": 0, "iTotalRecords": count, "iTotalDisplayRecords": count, "aaData": []}
re_str = '<td.*?>(.*?)</td>'
number = length * (page-1) + 1
for d in lists.object_list:
t = TemplateResponse(request, 'security/password_weak_ajax.html', {'d': d, 'number': number})
t.render()
rs["aaData"].append(re.findall(re_str, t.content, re.DOTALL))
number += 1
return HttpResponse(json.dumps(rs), content_type="application/json")
@licence_required
def password_weaklist_import(request):
form = PasswordWeakImportForm()
domain_id = get_domainid_bysession(request)
domain = get_session_domain(domain_id)
if request.method == "POST":
form = PasswordWeakImportForm(data=request.POST, files=request.FILES)
if form.is_valid():
success, fail = 0, 0
fail_list = []
password_list = []
if form.file_ext == 'txt':
for line in form.file_obj.readlines():
password = line.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
if form.file_ext == 'csv':
import csv
lines = list(csv.reader(form.file_obj))
for elem in lines:
password = line.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
if form.file_ext in ('xls', 'xlsx'):
import xlrd
content = form.file_obj.read()
workbook = xlrd.open_workbook(filename=None, file_contents=content)
table = workbook.sheets()[0]
for line in xrange(table.nrows):
#前两行跳过
if line in (0,1):
continue
password = table.row_values(line)
password = password.strip().replace('\n', '').replace('\r', '').replace('\000', '').replace(' ', '').replace('\t', '')
if not password:
continue
password_list.append( password )
fail_list = form.save_password_list(password_list)
fail = len(fail_list)
success = len(password_list) - fail
for line in fail_list:
messages.add_message(request, messages.ERROR, _(u'批量添加失败 : %(fail)s') % {"fail": line})
messages.add_message(request, messages.SUCCESS,
_(u'批量添加成功%(success)s个, 失败%(fail)s个') % {"success": success, "fail": fail})
return HttpResponseRedirect(reverse('password_weaklist'))
return render(request, "security/password_weak_import.html", {'form': form,}) | 1.820313 | 2 |
clai/server/plugins/helpme/helpme.py | cohmoti/clai | 391 | 12798217 | #
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
import os
from pathlib import Path
from clai.tools.colorize_console import Colorize
from clai.server.searchlib.data import Datastore
from clai.server.agent import Agent
from clai.server.command_message import State, Action, NOOP_COMMAND
from clai.server.logger import current_logger as logger
class HelpMeAgent(Agent):
def __init__(self):
super(HelpMeAgent, self).__init__()
inifile_path = os.path.join(str(Path(__file__).parent.absolute()), 'config.ini')
self.store = Datastore(inifile_path)
def compute_simple_token_similarity(self, src_sequence, tgt_sequence):
src_tokens = set([x.lower().strip() for x in src_sequence.split()])
tgt_tokens = set([x.lower().strip() for x in tgt_sequence.split()])
return len(src_tokens & tgt_tokens) / len(src_tokens)
def compute_confidence(self, query, forum, manpage):
"""
Computes the confidence based on query, stack-exchange post answer and manpage
Algorithm:
1. Compute token-wise similarity b/w query and forum text
2. Compute token-wise similarity b/w forum text and manpage description
3. Return product of two similarities
Args:
query (str): standard error captured in state variable
forum (str): answer text from most relevant stack exchange post w.r.t query
manpage (str): manpage description for most relevant manpage w.r.t. forum
Returns:
confidence (float): confidence on the returned manpage w.r.t. query
"""
query_forum_similarity = self.compute_simple_token_similarity(query, forum[0]['Content'])
forum_manpage_similarity = self.compute_simple_token_similarity(forum[0]['Answer'], manpage)
confidence = query_forum_similarity * forum_manpage_similarity
return confidence
def get_next_action(self, state: State) -> Action:
return Action(suggested_command=state.command)
def post_execute(self, state: State) -> Action:
logger.info("==================== In Helpme Bot:post_execute ============================")
logger.info("State:\n\tCommand: {}\n\tError Code: {}\n\tStderr: {}".format(state.command,
state.result_code,
state.stderr))
logger.info("============================================================================")
if state.result_code == '0':
return Action(suggested_command=state.command)
apis:OrderedDict=self.store.get_apis()
helpWasFound = False
for provider in apis:
# We don't want to process the manpages provider... thats the provider
# that we use to clarify results from other providers
if provider == "manpages":
logger.info(f"Skipping search provider 'manpages'")
continue
thisAPI:Provider = apis[provider]
# Skip this provider if it isn't supported on the target OS
if not thisAPI.can_run_on_this_os():
logger.info(f"Skipping search provider '{provider}'")
logger.info(f"==> Excluded on platforms: {str(thisAPI.get_excludes())}")
continue # Move to next provider in list
logger.info(f"Processing search provider '{provider}'")
if thisAPI.has_variants():
logger.info(f"==> Has search variants: {str(thisAPI.get_variants())}")
variants:List = thisAPI.get_variants()
else:
logger.info(f"==> Has no search variants")
variants:List = [None]
# For each search variant supported by the current API, query
# the data store to find the closest matching data. If there are
# no search variants (ie: the singleton variant case), the variants
# list will only contain a single, Nonetype value.
for variant in variants:
if variant is not None:
logger.info(f"==> Searching variant '{variant}'")
data = self.store.search(state.stderr, service=provider, size=1, searchType=variant)
else:
data = self.store.search(state.stderr, service=provider, size=1)
if data:
apiString = str(thisAPI)
if variant is not None:
apiString = f"{apiString} '{variant}' variant"
logger.info(f"==> Success!!! Found a result in the {apiString}")
# Find closest match b/w relevant data and manpages for unix
searchResult = thisAPI.extract_search_result(data)
manpages = self.store.search(searchResult, service='manpages', size=5)
if manpages:
logger.info("==> Success!!! found relevant manpages.")
command = manpages['commands'][-1]
confidence = manpages['dists'][-1]
# FIXME: Artificially boosted confidence
confidence = 1.0
logger.info("==> Command: {} \t Confidence:{}".format(command, confidence))
# Set return data
suggested_command="man {}".format(command)
description=Colorize() \
.emoji(Colorize.EMOJI_ROBOT).append(f"I did little bit of Internet searching for you, ") \
.append(f"and found this in the {thisAPI}:\n") \
.info() \
.append(thisAPI.get_printable_output(data)) \
.warning() \
.append("Do you want to try: man {}".format(command)) \
.to_console()
# Mark that help was indeed found
helpWasFound = True
# We've found help; no need to keep searching
break
# If we found help, then break out of the outer loop as well
if helpWasFound:
break
if not helpWasFound:
logger.info("Failure: Unable to be helpful")
logger.info("============================================================================")
suggested_command=NOOP_COMMAND
description=Colorize().emoji(Colorize.EMOJI_ROBOT) \
.append(
f"Sorry. It looks like you have stumbled across a problem that even the Internet doesn't have answer to.\n") \
.info() \
.append(f"Have you tried turning it OFF and ON again. ;)") \
.to_console()
confidence=0.0
return Action(suggested_command=suggested_command,
description=description,
confidence=confidence)
| 2.03125 | 2 |
cli/src/orchestrate/main.py | muskanmahajan37/solutions-cloud-orchestrate | 17 | 12798218 | <reponame>muskanmahajan37/solutions-cloud-orchestrate
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Executes main command-line entry-point.
"""
import inspect
import logging
import optparse
import os
import pkgutil
import sys
from orchestrate import base
from orchestrate import utils
# We need to import this module in order to configure loggers.
# pylint: disable=unused-import
import orchestrate.logger
log = logging.getLogger(__name__)
class ModuleLoaderError(Exception):
"""Provide details on why a given module could not be loaded.
"""
pass
def create_command(name, loader):
"""Returns a command instance from the given module loader.
Args:
name: Module name.
loader: Module loader.
Raises:
ModuleLoaderError: If module could not be loaded or does not contain a
subclass of OrchestateCommand.
"""
log.debug('loading module %s from %s', name, loader)
module = loader.find_module(name).load_module(name)
try:
command_type = getattr(module, 'Command')
if not inspect.isclass(command_type) \
or not issubclass(command_type, base.OrchestrateCommand):
raise TypeError()
except (AttributeError, TypeError):
message = (
'Could not find implementation of OrchestrateCommand {name} in module'
' {module}'
).format(
name=name,
module=module.__file__,
)
raise ModuleLoaderError(message)
command = command_type()
return command
def parse_arguments(command, name, parents, arguments):
"""Parse command-line and splits it into options and arguments.
Args:
command: OrchestrateCommand instance.
name: Module name.
parents: Command hierarchy.
arguments: Entire command-line arguments.
Returns:
A tuple of options and arguments.
"""
log.debug('Parsing arguments')
usage = """Usage: {parents} {command} [OPTIONS] [ARGUMENTS]
{description}""".format(
parents=' '.join(parents),
command=name,
description=command.description,
)
parser = optparse.OptionParser(usage=usage)
defaults = dict()
defaults.update(utils.get_common_option_defaults())
defaults.update(command.defaults)
parser.set_defaults(**defaults)
common_options_group = optparse.OptionGroup(parser, 'Global options')
common_options_group.add_options(utils.get_common_options())
parser.add_option_group(common_options_group)
parser.add_options(command.options)
options, arguments = parser.parse_args(arguments)
return options, arguments
def execute_command(name, parents, loader, arguments):
"""Executes the given command.
Args:
name: Command name, e.g. create.
parents: Names of parent commands, e.g. ['orchestrate', 'images']
loader: Object that can load the module containing the command.
arguments: Arguments relevant to the command.
"""
log.debug('execute %(parents)s %(command)s %(arguments)s', dict(
parents=' '.join(parents),
command=name,
arguments=arguments,
))
command = create_command(name, loader)
options, arguments = parse_arguments(command, name, parents, arguments)
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
command.run(options, arguments)
def find_valid_commands(path):
"""Returns list of valid commands in the given path.
A valid command is either a module, or a package that contains at least one
package or a module. This would effectively trim empty command packages.
Args:
path: Path to package to introspect.
"""
commands = []
for module_info in pkgutil.walk_packages([path]):
if not module_info.ispkg:
commands.append(module_info.name)
else:
submodule_path = os.path.sep.join([path, module_info.name])
for _ in pkgutil.walk_packages([submodule_path]):
# Add module (not submodule) if it contains at least one valid submodule
commands.append(module_info.name)
break
return commands
def suggest_recovery_options(command, parents, path, children_names):
"""Suggest sensible recovery options when no command is found.
There is likely a syntax error, or a non-existent command, e.g.
orchestrate images crate (instead of create)
orchestrate foobar create (foobar in not a command)
User could have typed a non-leaf command, e.g.:
orchestrate images (instead of orchestrate images create)
Let's provide user with information about recovery options and possible
subcommands at the deepest level we managed to get.
Args:
command: Attempted command.
parents: Upper command levels.
path: Path to last valid command.
children_names: Names of valid commands found at immediate parent above.
"""
parent_path = os.path.dirname(path)
valid_commands = find_valid_commands(path)
# Was it a syntax error?
if command not in children_names[parent_path]:
log.error('Invalid choice: %s', command)
log.info('Maybe you meant:')
else:
# It was an incomplete command
if valid_commands:
log.info('Command name argument expected.')
full_command = ' '.join(parents)
else:
log.error('Invalid choice: %s', command)
full_command = ' '.join(parents[:-1])
log.info('Available commands for %s:', full_command)
# If no commands at the current level, provide suggestions at the level above.
if not valid_commands:
valid_commands = find_valid_commands(parent_path)
for valid_command in valid_commands:
log.info(' %s', valid_command.replace('_', '-'))
def main(arguments=None):
"""Runs command-line.
Args:
arguments: Command arguments. If none specified, it uses the default
provided from the command-line, i.e. sys.argv.
"""
if arguments is None:
arguments = sys.argv[1:]
loaders = dict()
parents = ['orchestrate']
directory = os.path.dirname(__file__)
path = os.path.abspath(os.path.sep.join([directory, 'commands']))
children_names = {
directory: parents[:],
}
command = ''
# Iterate arguments trying to find a matching command by name.
# If we find a submodule with a matching name, we try to load a command
# instance from the submodule and execute it with the remaining arguments.
# For example:
# orchestrate images create test-image-1 --packages=maya,nuke,houdini
# Would walk looking for the following commands in this order:
# 1. images
# 2. create
# When it reaches "create", it would load the orchestrate.commands.image.create
# module and will attempt to run Command.run() with the remaining arguments:
# test-image-1 --packages=maya,nuke,houdini
for index, command in enumerate(arguments):
command = command.replace('-', '_')
children_names[path] = []
can_continue = False
for loader, name, is_package in pkgutil.walk_packages([path]):
# Save reference to modules in every level so that we can provide more
# information to user in case we fail to find a matching command.
module_path = os.path.sep.join([path, name])
loaders[module_path] = loader
children_names[path].append(name)
# Execute command if we reach a submodule with a matching name
if command == name:
if is_package:
# Matching command that expects a subcommand, let's advance to
# next level searching for a leaf command
parents.append(command)
path = os.path.sep.join([path, command])
can_continue = True
break
else:
execute_command(command, parents, loader, arguments[index+1:])
# nothing further to do
return
if not can_continue:
# No matching command at current level. Don't look further.
break
suggest_recovery_options(command, parents, path, children_names)
if __name__ == '__main__':
main()
| 2.109375 | 2 |
Terry_toolkit/SearchBaidu.py | napoler/Terry-toolkit | 0 | 12798219 | <reponame>napoler/Terry-toolkit<filename>Terry_toolkit/SearchBaidu.py
from MagicBaidu import MagicBaidu
import pprint
class SearchBaidu:
"""SearchBaidu 百度搜索结果抓取
使用https://github.com/napoler/MagicBaidu
"""
def __init__(self):
print('kaishi')
# self.text = text
# self.salary = salary
# Employee.empCount += 1
# """ 你的 APPID AK SK """
def get(self, keyword,start=0):
"""获取百度搜索结果
>>> get(keyword)
"""
mb = MagicBaidu()
li=[]
for i in mb.search(query=keyword,start=start):
# print(mb.get_real_url(i['url']))
i['url']=mb.get_real_url(i['url'])
# print(i)
li.append(i)
# pprint.pprint(li)
return li | 2.984375 | 3 |
dentcam/mainwin.py | molkoback/dentcam | 0 | 12798220 | from .app import data_path
from .optionswin import OptionsWin
from .camera import Camera, CameraControl, getCameraDevices
from .camlabel import CamLabel
from PyQt5.QtCore import pyqtSlot
from PyQt5.QtGui import QImage, QIcon
from PyQt5.QtWidgets import QApplication, QWidget, QMainWindow, QAction, QVBoxLayout, QHBoxLayout, QMessageBox, QFileDialog, QLabel, QPushButton, QComboBox, QLineEdit
import functools
import logging
import os
import glob
import sys
import time
_data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data")
class Action(QAction):
def __init__(self, parent, text, action, shortcut=None, enabled=True):
super().__init__(text, parent)
if shortcut:
self.setShortcut(shortcut)
self.triggered.connect(action)
self.setEnabled(enabled)
class MainWin(QMainWindow):
def __init__(self):
super().__init__()
self.options = OptionsWin(self)
self.options.saveSignal.connect(self.saveOptionsSlot)
self.cameras = []
self.cameraComboBox = QComboBox()
self.camActions = []
self.loadCameras()
self.folderLineEdit = QLineEdit()
self.switching = False
self.camControl = None
self.camLabel = CamLabel(self)
self.imgLabel = CamLabel(self)
self.createWindow()
self.createMenu()
self.createLayout()
def createWindow(self):
self.setWindowIcon(QIcon(os.path.join(data_path, "img", "icon.png")))
self.show()
def createMenu(self):
self.mainMenu = self.menuBar()
fileMenu = self.mainMenu.addMenu("File")
self.snapAction = Action(self, "Snap", self.on_snapAction, "Space", False)
fileMenu.addAction(self.snapAction)
self.saveAction = Action(self, "Save", self.on_saveAction, "Ctrl+S", False)
fileMenu.addAction(self.saveAction)
self.saveAsAction = Action(self, "Save As...", self.on_saveAsAction, "Ctrl+Shift+S", False)
fileMenu.addAction(self.saveAsAction)
fileMenu.addAction(Action(self, "Quit", lambda: self.close(), "Ctrl+Q"))
toolsMenu = self.mainMenu.addMenu("Tools")
toolsMenu.addAction(Action(self, "Options", self.options.show))
helpMenu = self.mainMenu.addMenu("Help")
helpMenu.addAction(Action(self, "About", self.on_aboutAction))
helpMenu.addAction(Action(self, "About Qt", lambda: QMessageBox.aboutQt(self)))
def createLayout(self):
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
hbox = QHBoxLayout()
hbox.addWidget(QLabel("Camera:"))
self.cameraComboBox.currentIndexChanged.connect(self.cameraChangedSlot)
hbox.addWidget(self.cameraComboBox)
hbox.addStretch()
hbox.addWidget(QLabel("Folder:"))
hbox.addWidget(self.folderLineEdit)
self.folderLineEdit.returnPressed.connect(lambda: self.folderLineEdit.clearFocus())
vbox.addLayout(hbox)
self.setCamera(None)
vbox.addWidget(self.camLabel)
self.imgLabel.setImage(QImage(os.path.join(data_path, "img", "images.png")))
vbox.addWidget(self.imgLabel)
self.setCentralWidget(w)
self.restoreGeometry(self.options.geometry)
def loadCameras(self):
# Clear old shortcuts
for action in self.camActions:
self.removeAction(action)
# Read camera files
path = os.path.join(self.options.cfgPath, "*.json")
cameras = [Camera.fromJSON(fn) for fn in glob.glob(path)]
cameras.sort(key=lambda cam: cam.name)
self.cameras = [None] + cameras
# Update our camera combo box
items = ["None"] + [cam.name for cam in cameras]
self.cameraComboBox.clear()
self.cameraComboBox.addItems(items)
# Add shortcuts for cameras
for i in range(len(self.cameras)):
f = functools.partial(self.cameraComboBox.setCurrentIndex, i)
action = Action(self, "", f, "Ctrl+%d" % i)
self.addAction(action)
self.camActions.append(action)
def close(self):
if self.camControl:
self.camControl.stopGrab()
super().close()
def on_snapAction(self):
self.snapAction.setEnabled(False)
image = self.camControl.snapshot()
self.imgLabel.setImage(image)
self.snapAction.setEnabled(True)
# Enable image saving
self.saveAction.setEnabled(True)
self.saveAsAction.setEnabled(True)
if self.options.autoSave:
self.on_saveAction()
def savePath(self):
path = os.path.join(
self.options.outputPath,
self.folderLineEdit.text().strip()
)
if not os.path.exists(path):
os.mkdir(path)
fn = os.path.join(path, "%d.jpg" % int(time.time()*1000))
return fn
def saveImage(self, path):
image = self.imgLabel.image()
if not image:
return
logging.debug("saving '%s'" % path)
image = image.mirrored(
horizontal=self.options.flipHoriz,
vertical=self.options.flipVert
)
if not image.save(path, quality=100):
QMessageBox.critical(self, "Couldn't Save Image", "Couldn't Save Image '%s'." % path)
def on_saveAction(self):
path = self.savePath()
self.saveImage(path)
def on_saveAsAction(self):
path = QFileDialog.getSaveFileName(
self,
"Save Image",
self.options.outputPath, "Image File (*.png *.jpg *.bmp)"
)
if path[0]:
self.saveImage(path[0])
def on_aboutAction(self):
msg = (
"{} v{}<br>"
"<br>"
"Copyright (C) 2018 <a href=\"mailto:<EMAIL>\"><NAME></a><br>"
"<br>"
"This software is licensed under WTFPL. See COPYING file for details.<br>"
)
QMessageBox.about(
self,
"About %s" % QApplication.applicationName(),
msg.format(QApplication.applicationName(), QApplication.applicationVersion())
)
def setCamera(self, cam):
if self.switching:
return False
self.switching = True
self.snapAction.setEnabled(False)
# Stop the current camera
if self.camControl:
self.camControl.stopGrab()
self.camControl = None
# Show blank image if we don't have camera
if cam == None:
self.camLabel.setImage(QImage(os.path.join(data_path, "img", "camera.png")))
self.switching = False
return True
# Try to open camera
try:
if not cam.open():
raise Exception()
self.camControl = CameraControl(self, self.camLabel, cam)
self.camControl.startGrab()
except:
QMessageBox.critical(self, "Couldn't Open Camera", "Couldn't Open Camera '%s'." % cam.name)
self.camControl = None
return self.setCamera(None)
# Camera opened successfully
self.snapAction.setEnabled(True)
self.switching = False
return True
@pyqtSlot(int)
def cameraChangedSlot(self, index):
if index >= 0:
self.setCamera(self.cameras[index])
@pyqtSlot()
def saveOptionsSlot(self):
self.loadCameras()
def closeEvent(self, e):
self.options.geometry = self.saveGeometry()
self.options.save()
| 2.140625 | 2 |
src/cython/test.py | chandnii7/ImageProcessing | 3 | 12798221 | <filename>src/cython/test.py
import smoothing_convolution
import numpy as np
print(smoothing_convolution.apply_convolution(np.array([[1,1,1],[1,1,1],[1,1,1]]), np.array([[1,1,1],[1,1,1],[1,1,1]])))
| 2.328125 | 2 |
tensorflow1/basic-graph.py | Alwaysproblem/explore-ipu | 0 | 12798222 | <filename>tensorflow1/basic-graph.py
import numpy as np
from tensorflow.python.ipu.scopes import ipu_scope
import tensorflow.compat.v1 as tf
from tensorflow.python.ipu.config import IPUConfig
tf.disable_v2_behavior()
# Configure arguments for targeting the IPU
cfg = IPUConfig()
cfg.auto_select_ipus = 1
cfg.configure_ipu_system()
with tf.device("cpu"):
pa = tf.placeholder(np.float32, [2], name="a")
pb = tf.placeholder(np.float32, [2], name="b")
pc = tf.placeholder(np.float32, [2], name="c")
def basic_graph(pa, pb, pc):
# Do basic addition with tensors
o1 = pa + pb
o2 = pa + pc
simple_graph_output = o1 + o2
return simple_graph_output
with ipu_scope("/device:IPU:0"):
result = basic_graph(pa, pb, pc)
with tf.Session() as sess:
# Run the graph through the session feeding it an arbitrary dictionary
result = sess.run(result,
feed_dict={
pa: [1., 1.],
pb: [0., 1.],
pc: [1., 5.]
})
print(result) | 3.140625 | 3 |
surro/util.py | MatthewScholefield/surro | 0 | 12798223 | import sys
from os.path import join
from threading import Timer
import numpy as np
from openal.audio import SoundSource
assets_root = getattr(sys, '_MEIPASS', '.')
def get_sfx(name):
return join(assets_root, 'assets', name)
def new_pt(*values):
return np.array(values or (0, 0, 0), dtype=float)
def vec_mag(vec: np.array):
return np.sqrt(vec.dot(vec))
def vec_dist(a: np.array, b: np.array):
return vec_mag(a - b)
class ContinuousSoundSource(SoundSource):
def __init__(self, sound_generator):
super().__init__()
def play_sound():
sound = sound_generator()
self.queue(sound)
self.__dict__['timer'] = timer = Timer(self.calc_length(sound), play_sound)
timer.daemon = True
timer.start()
play_sound()
@staticmethod
def calc_length(sound):
return sound.size / (sound.frequency * sound.bitrate / 8)
| 2.28125 | 2 |
datatrans/fooddata/search/request.py | KooCook/datatrans | 1 | 12798224 | <reponame>KooCook/datatrans
"""
References:
https://fdc.nal.usda.gov/api-guide.html#food-search-endpoint
"""
from typing import Dict, Union
from datatrans import utils
from datatrans.utils.classes import JSONEnum as Enum
__all__ = ['FoodDataType', 'SortField', 'SortDirection', 'FoodSearchCriteria']
class FoodDataType(Enum):
FOUNDATION = 'Foundation'
SURVEY = 'Survey (FNDDS)'
BRANDED = 'Branded'
LEGACY = 'SR Legacy'
class SortField(Enum):
DESCRIPTION = 'lowercaseDescription.keyword'
DATATYPE = 'dataType.keyword'
PUBDATE = 'publishedDate'
ID = 'fdcId'
class SortDirection(Enum):
ASC = 'asc'
DESC = 'desc'
def verify_included_data_types(d: Dict[Union[FoodDataType, str], bool]):
d = {FoodDataType(k): v for k, v in d.items()}
return {
FoodDataType.FOUNDATION.value: d.pop(FoodDataType.FOUNDATION, False),
FoodDataType.SURVEY.value: d.pop(FoodDataType.SURVEY, False),
FoodDataType.BRANDED.value: d.pop(FoodDataType.BRANDED, False),
FoodDataType.LEGACY.value: d.pop(FoodDataType.LEGACY, False),
}
class FoodSearchCriteria(utils.DataClass):
"""Represents a FoodData Central search criteria.
Attributes:
general_search_input (str): Search query (general text)
included_data_types (Dict[str, bool]): Specific data types to include in search
ingredients: The list of ingredients (as it appears on the product label)
brand_owner (str): Brand owner for the food
require_all_words (bool): When True, the search will only return foods
contain all of the words that were entered in the search field
page_number (int): The page of results to return
sort_field (SortField): The name of the field by which to sort
sort_direction (SortDirection): The direction of the sorting
"""
__slots__ = (
'general_search_input', 'included_data_types', 'ingredients', 'brand_owner', 'require_all_words', 'page_number',
'sort_field', 'sort_direction')
__attr__ = (
('general_search_input', str),
('included_data_types', dict,
verify_included_data_types),
('ingredients', str),
('brand_owner', str),
('require_all_words', bool),
('page_number', int),
('sort_field', SortField),
('sort_direction', SortDirection),
)
def __init__(self, _dict_: dict = None, **kwargs):
if _dict_ is not None:
super().__init__(_dict_=_dict_)
return
for k, v in kwargs.items():
if k in self.__slots__:
kwargs[utils.snake_to_camel(k)] = kwargs.pop(k)
super().__init__(_dict_=kwargs)
| 2.703125 | 3 |
tensorflowQuantum/hackernoon/play.py | hpssjellis/tfQuantumJs | 1 | 12798225 | import cirq
import numpy as np
from cirq import Circuit
from cirq.devices import GridQubit
# creating circuit with 5 qubit
length = 5
qubits = [cirq.GridQubit(i, j) for i in range(length) for j in range(length)]
print(qubits)
circuit = cirq.Circuit()
H1 = cirq.H(qubits[0])
H2 = cirq.H(qubits[1])
H3 = cirq.H(qubits[2])
H4 = cirq.H(qubits[3])
H5 = cirq.H(qubits[4])
C1 = cirq.CNOT(qubits[0],qubits[1])
C2 = cirq.CNOT(qubits[1],qubits[2])
C3 = cirq.CNOT(qubits[2],qubits[3])
C4 = cirq.CNOT(qubits[3],qubits[4])
#swap
S1 = cirq.SWAP(qubits[0],qubits[4])
#Rotation
X1 = cirq.X(qubits[0])
X2 = cirq.X(qubits[1])
X3 = cirq.X(qubits[2])
X4 = cirq.X(qubits[3])
X5 = cirq.X(qubits[4])
moment1 = cirq.Moment([H1])
moment2 = cirq.Moment([H2])
moment3 = cirq.Moment([H3])
moment4 = cirq.Moment([H4])
moment5 = cirq.Moment([H5])
moment6 = cirq.Moment([C1])
moment7 = cirq.Moment([C2])
moment8 = cirq.Moment([C3])
moment9 = cirq.Moment([S1])
moment10 = cirq.Moment([X1])
moment11 = cirq.Moment([X2])
moment12 = cirq.Moment([X3])
moment13 = cirq.Moment([X4])
moment14 = cirq.Moment([X5])
#circuit
circuit = cirq.Circuit((moment1, moment2, moment3, moment4, moment5 ,moment6 ,moment7, moment8, moment9, moment10, moment11, moment12, moment13, moment14))
print(circuit)
| 2.609375 | 3 |
flybirds/utils/dsl_helper.py | LinuxSuRen/flybirds | 1 | 12798226 | <reponame>LinuxSuRen/flybirds<filename>flybirds/utils/dsl_helper.py
# -*- coding: utf-8 -*-
"""
dsl helper
"""
import re
import flybirds.utils.flybirds_log as log
# generate result_dic
def add_res_dic(dsl_params, functin_pattern, def_key):
result_dic = {}
match_obj = re.match(functin_pattern, dsl_params)
if match_obj is not None:
"""
senario:
Flight, verifyEle=center_content_layout, verifyAction=position
textMatches=shanghai.?
.?economic.?, fuzzyMatch=true
text=freshmode, timeout=15, swipeCount=40
multi properities,example:text=freshmode, timeout=15, swipeCount=40
Match from back to front, match back first,swipeCount=40
match_obj_group_1(text=freshmode, timeout=15)
f the conditions are still met, split again, Until the split to the
last item: text=
"""
group_1 = match_obj.group(1).strip().replace(u"\u200b", "")
result_dic[match_obj.group(2)] = match_obj.group(3)
match_obj_group_1 = re.match(functin_pattern, group_1)
while match_obj_group_1 is not None:
match_obj_group_1 = re.match(functin_pattern, group_1)
if match_obj_group_1 is not None:
group_1 = (
match_obj_group_1.group(1).strip().replace(u"\u200b", "")
)
result_dic[
match_obj_group_1.group(2)
] = match_obj_group_1.group(3)
else:
result_dic[def_key] = group_1
break
else:
result_dic[def_key] = group_1
else:
result_dic[def_key] = dsl_params.strip().replace(u"\u200b", "")
# print('result_dic44444', result_dic)
return result_dic
# generate result_dic
def params_to_dic(dsl_params, def_key="selector"):
"""
Convert the parameters in the dsl statement into dict format for use in
subsequent processes
"""
result_dic = {}
functin_pattern = re.compile(r"([\S\s]+),\s*([a-zA-Z0-9_]+)\s*=\s*(\S+)")
if isinstance(dsl_params, str):
result_dic = add_res_dic(dsl_params, functin_pattern, def_key)
log.info("result_dic: {}".format(result_dic))
return result_dic
def split_must_param(dsl_params):
"""
Get must and optional parameters
"""
result = dsl_params.split(",", 1)
result[0] = result[0].strip().replace(u"\u200b", "")
result[1] = result[1].strip().replace(u"\u200b", "")
return result
def get_params(context, *args):
"""
Get param from context
:param context: step context
:param args: A tuple containing value and parameter name
:return:
"""
items = []
for (val, param_name) in args:
if val is not None:
items.append(val)
elif hasattr(context, param_name):
items.append(getattr(context, param_name))
return items
def return_value(value, def_value=None):
"""
get global attribute value
"""
if value is not None:
return value
return def_value
| 2.578125 | 3 |
solver.py | hahachicken/EA_Proj | 0 | 12798227 | import networkx as nx
from parse import read_input_file, write_output_file
from Utility import is_valid_network, average_pairwise_distance, average_pairwise_distance_fast
import sys
import time
import multiprocessing
def solve(G, depth):
"""
Args:
G: networkx.Graph
Returns:
T: networkx.Graph
"""
result = []
STs = genST(G, depth)
print("STs gen!")
i = 0
for ST in STs:
weight = 0
for edge in ST.edges:
weight += ST.edges[edge]['weight']
for ST in STs:
if i < depth:
i += 1
result += [starter(ST,G)]
print("MDT gen!")
result = sorted(result, key=lambda G: average_pairwise_distance_fast(G))
#t = time.time() - start_time
#print("total time takes:%d"%t)
#print(result[0])
return result[0]
def deletenode(T,O):
oldcost = average_pairwise_distance_fast(T)
leaves = []
P = T.copy()
for node in T.nodes:
if T.degree[node] == 1:
leaves += [node]
leaves = sorted( leaves, key=lambda node: T.edges[ (list(T[node])[0], node) ]['weight'],reverse=True)
for i in range(len(leaves)):
G = T.copy()
G.remove_node(leaves[i])
if is_valid_network(O,G):
newcost = average_pairwise_distance_fast(G)
if newcost < oldcost:
P = deletenode(G,O)
return P
return P
def starter(T,O):
GraphArray = []
oldcost = average_pairwise_distance_fast(T)
leaves = []
for node in T.nodes:
if T.degree[node] == 1:
leaves += [node]
leaves = sorted( leaves, key=lambda node: T.edges[ (list(T[node])[0], node) ]['weight'],reverse=True)
for i in range(len(leaves)):
G = T.copy()
G.remove_node(leaves[i])
if is_valid_network(O,G):
newcost = average_pairwise_distance_fast(G)
if newcost < oldcost:
GraphArray += [G]
if len(GraphArray) < 2:
return deletenode(T,O)
elif len(GraphArray) == 2:
return delete3node_S(GraphArray,O)
else:
return delete3node(GraphArray[:3], O)
def delete3node_S(GraphArray,O):
newGraphArray = GraphArray.copy()
for T in GraphArray:
oldcost = average_pairwise_distance_fast(T)
leaves = []
for node in T.nodes:
if T.degree[node] == 1:
leaves += [node]
leaves = sorted( leaves, key=lambda node: T.edges[ (list(T[node])[0], node) ]['weight'],reverse=True)
cnt = 0
for i in range(len(leaves)):
if cnt < 3:
G = T.copy()
G.remove_node(leaves[i])
if is_valid_network(O,G):
cnt += 1
newcost = average_pairwise_distance_fast(G)
if newcost < oldcost:
newGraphArray += [G]
newGraphArray = sorted( newGraphArray, key=lambda tree: average_pairwise_distance_fast(tree))
if len(newGraphArray) == 2:
return newGraphArray[0]
else:
newGraphArray = newGraphArray[:3]
return delete3node(newGraphArray,O)
def delete3node(GraphArray,O):
newGraphArray = GraphArray.copy()
for T in GraphArray:
oldcost = average_pairwise_distance_fast(T)
leaves = []
for node in T.nodes:
if T.degree[node] == 1:
leaves += [node]
leaves = sorted( leaves, key=lambda node: T.edges[ (list(T[node])[0], node) ]['weight'],reverse=True)
cnt = 0
for i in range(len(leaves)):
if cnt < 3:
G = T.copy()
G.remove_node(leaves[i])
if is_valid_network(O,G):
cnt += 1
newcost = average_pairwise_distance_fast(G)
if newcost < oldcost:
newGraphArray += [G]
newGraphArray = sorted( newGraphArray, key=lambda tree: average_pairwise_distance_fast(tree))
if len(newGraphArray) == 3:
return newGraphArray[0]
else:
newGraphArray = newGraphArray[:3]
return delete3node(newGraphArray,O)
def genST(G, depth):
output = []
outgraphs = []
for u, v in G.edges:
G.edges[u, v]['property'] = 'normal'
List = {G}
G.graph['MST'] = KruskalMST(G)
MST = {tuple(G.graph['MST'])}
if depth == -1:
while len(MST) != 0:
temp = min(List, key = lambda g: g.graph['cost'])
output.append(temp.graph['MST'])
List.remove(temp)
MST.remove(tuple(temp.graph['MST']))
Partition(temp, List, MST)
else:
while len(MST) != 0 and len(output) < depth:
temp = min(List, key = lambda g: g.graph['cost'])
output.append(temp.graph['MST'])
List.remove(temp)
MST.remove(tuple(temp.graph['MST']))
Partition(temp, List, MST)
for edges in output:
P = nx.Graph()
for edge in edges:
P.add_edge(edge[0], edge[1])
P.edges[edge[0], edge[1]]['weight'] = G.edges[edge[0], edge[1]]['weight']
outgraphs += [P]
return outgraphs
def Partition(P, List, MST):
P1 = nx.Graph()
P2 = nx.Graph()
P1 = P.copy()
P2 = P.copy()
for u, v in P.graph['MST']:
if P.edges[u, v]['property'] == 'normal':
P1.edges[u, v]['property'] = 'excluded'
P2.edges[u, v]['property'] = 'included'
MSTP1 = KruskalMST(P1)
P1.graph['MST'] = MSTP1
#check if P1 is connected
P3 = P1.copy()
for u, v in P1.edges:
if P1.edges[u, v]['property'] == 'excluded':
P3.remove_edge(u, v)
if len(list(nx.dfs_edges(P3, source=1))) == P3.number_of_nodes() - 1:
List.add(P1)
MST.add(tuple(MSTP1))
P1 = P2.copy()
def KruskalMST(P):
G = P.copy()
cost = 0
normal_edges = [] #store all the normal edges
result =[] #This will store the resultant MST
i = 0 # An index variable, used for sorted edges
e = 0 # An index variable, used for result[]
for node in G.nodes:
G.nodes[node]['parent'] = node
G.nodes[node]['rank'] = 0
for u, v in G.edges:
if G.edges[u, v]['property'] == 'included':
x = find(G, u)
y = find(G ,v)
union(G, x, y)
result.append((u, v))
e += 1
elif G.edges[u, v]['property'] == 'excluded':
G.edges[u, v]['weight'] = 1000
normal_edges.append((u, v))
else: normal_edges.append((u, v))
# Step 1: Sort all the edges in non-decreasing
# order of their
# weight. If we are not allowed to change the
# given graph, we can create a copy of graph
if len(normal_edges) == 0:
return result
sortedges = sorted( normal_edges, key=lambda edge: G.edges[edge]['weight'])
# Create V subsets with single elements
# Number of edges to be taken is equal to V-1
while e < G.number_of_nodes() - 1:
# Step 2: Pick the smallest edge and increment
# the index for next iteration
if i > len(sortedges) - 1:
return []
u,v = sortedges[i]
i = i + 1
x = find(G, u)
y = find(G ,v)
# If including this edge does't cause cycle,
# include it in result and increment the index
# of result for next edge
if x != y:
e = e + 1
result.append((u,v))
union(G, x, y)
# Else discard the edge
for i,j in result:
cost += P.edges[i, j]['weight']
P.graph['cost'] = cost
P.graph['MST'] = result
return result
def union(G, x, y):
xroot = find(G, x)
yroot = find(G, y)
# Attach smaller rank tree under root of
# high rank tree (Union by Rank)
if G.nodes[xroot]['rank'] < G.nodes[yroot]['rank']:
G.nodes[xroot]['parent'] = yroot
elif G.nodes[xroot]['rank'] > G.nodes[yroot]['rank']:
G.nodes[yroot]['parent'] = xroot
# If ranks are same, then make one as root
# and increment its rank by one
else :
G.nodes[yroot]['parent'] = xroot
G.nodes[xroot]['rank'] += 1
def find(G, i):
if G.nodes[i]['parent'] == i:
return i
return find(G, G.nodes[i]['parent'])
# Here's an example of how to run your solver.
# Usage: python3 solver.py
def solver_multi_threading(i, depth = 1000):
path = "inputs/{}-{}.in".format(i[0], i[1])
G = read_input_file(path)
print("Input {} success!".format(path))
T = solve(G, depth)
#print("Average pairwise distance: {}".format(average_pairwise_distance_fast(T)))
print("Output {} success!".format(path))
write_output_file("outputs/{}-{}.out".format(i[0], i[1]), T)
def main():
tt = sys.argv[1]
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
task = []
small_index = list(range(1, 304))
med_index = list(range(304, 607))
large_index = list(range(607, 1007))
if tt == "all":
task = large_index + med_index + small_index
elif tt == 'small':
task = small_index
elif tt == 'medium':
task = med_index
elif tt == 'large':
task = large_index
pool.map(solver_multi_threading, task)
def p_main():
path = sys.argv[1]
f = open(path, 'r')
lines = f.readlines()
task = []
for line in lines:
(l, r) = line.split()
(n, i) = l.split('-')
i = int(i)
print(n,i,r)
if(int(r) > 10):
task.append((n, i))
cores = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=cores)
pool.map(solver_multi_threading, task)
if __name__ == "__main__":
p_main()
| 2.9375 | 3 |
xastropy/cgm/core.py | bpholden/xastropy | 3 | 12798228 | <filename>xastropy/cgm/core.py
"""
#;+
#; NAME:
#; cgm.core
#; Version 1.0
#;
#; PURPOSE:
#; Module for core routines of CGM analysis
#; 29-Nov-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from astropy import units as u
from xastropy.igm.abs_sys.abssys_utils import AbslineSystem
from xastropy.galaxy.core import Galaxy
from xastropy.obs import radec as xra
from xastropy.xutils import xdebug as xdb
from xastropy.xutils import arrays as xu_array
# Path for xastropy
#xa_path = imp.find_module('xastropy')[1]
########################## ##########################
########################## ##########################
class CGMSys(object):
""" Class for a CGM system
Combines absorption lines with a Galaxy
Inputs:
----------
gal_ra: str, float, Quantity
RA for galaxy
gal_dec: str, float, Quantity
DEC for galaxy
gal_z: float
Galaxy redshift
bg_ra: str, float, Quantity
RA for background source
bg_dec: str, float, Quantity
DEC for background source
bg_z: float
Redshift of background source
Attributes
----------
rho: float
Impact parameter (u.kpc)
JXP on 29 Nov 2014
"""
# Initialize
def __init__(self, gal_ra, gal_dec, gal_z, bg_ra, bg_dec, bg_z,
cosmo=None, verbose=False):
# Galaxy
self.galaxy = Galaxy(gal_ra, gal_dec, z=gal_z)
# Name
self.name = ('CGM'+
self.galaxy.coord.ra.to_string(unit=u.hour,sep='',pad=True)+
self.galaxy.coord.dec.to_string(sep='',pad=True,alwayssign=True))
# Absorption system
self.abs_sys = CGMAbs()
self.abs_sys.coord = xra.to_coord( (bg_ra,bg_dec) ) # Background source
self.abs_sys.zem = bg_z
# Calcualte rho
if cosmo is None:
from astropy.cosmology import WMAP9 as cosmo
if verbose is True:
print('cgm.core: Using WMAP9 cosmology')
ang_sep = self.abs_sys.coord.separation(self.galaxy.coord).to('arcmin')
kpc_amin = cosmo.kpc_comoving_per_arcmin( self.galaxy.z ) # kpc per arcmin
self.rho = ang_sep * kpc_amin / (1+self.galaxy.z) # Physical
#xdb.set_trace()
def print_abs_type(self):
""""Return a string representing the type of vehicle this is."""
return 'CGM'
# Output
def __repr__(self):
return ('[{:s}: Galaxy RA/DEC={:s}{:s}, zgal={:g}, rho={:g}]'.format(
self.__class__.__name__,
self.abs_sys.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.abs_sys.coord.dec.to_string(sep=':',pad=True,alwayssign=True),
self.galaxy.z, self.rho))
# Class for CGM Absorption
class CGMAbs(AbslineSystem):
"""A CGM absorption system
Attributes:
"""
def __init__(self):
# Generate with type
AbslineSystem.__init__(self,'CGM')
# Init
self.ions = None
# Output
def __repr__(self):
return ('[{:s}: {:s} {:s}, {:g}]'.format(
self.__class__.__name__,
self.coord.ra.to_string(unit=u.hour,sep=':',pad=True),
self.coord.dec.to_string(sep=':',pad=True),
self.zabs))
def print_abs_type(self):
""""Return a string representing the type of vehicle this is."""
return 'CGM'
# ###################### #######################
# Testing
if __name__ == '__main__':
# Initialize
tmp = CGMAbs()
print(tmp)
tmp2 = CGMAbsSurvey()
print(tmp2)
| 2.15625 | 2 |
courseevaluations/lib/async.py | rectory-school/rectory-apps | 0 | 12798229 | <reponame>rectory-school/rectory-apps<filename>courseevaluations/lib/async.py
from courseevaluations.models import StudentEmailTemplate
from academics.models import Student
from django.core.mail import send_mail
import time
def send_student_email_from_template(template_id, student_id, override_email=None):
time.sleep(1) #Make an e-mail take at least one second to slow down for SES
student = Student.objects.get(pk=student_id)
template = StudentEmailTemplate.objects.get(pk=template_id)
msg = template.get_message(student)
if override_email:
msg.to = [override_email]
msg.send()
def send_confirmation_email(addresses, to_addresses):
time.sleep(1) #Make an e-mail take at least one second to slow down for SES
body = "Your e-mail was sent to the following {count:} people: \n\n{addresses:}".format(count=len(addresses), addresses="\n".join(addresses))
send_mail("Message confirmation", body, "<EMAIL>", to_addresses)
def send_msg(message):
time.sleep(1)
message.send() | 2.203125 | 2 |
tests/test_dev_mode.py | adrienbrunet/mixt | 27 | 12798230 | # coding: mixt
"""Ensure that dev-mode can be toggled and does not validate data if off."""
from typing import Union
import pytest
from mixt.internal.base import Base
from mixt.exceptions import InvalidPropChoiceError, InvalidPropBoolError, InvalidPropValueError
from mixt.internal.proptypes import BasePropTypes as PropTypes
from mixt.internal.dev_mode import set_dev_mode, unset_dev_mode, override_dev_mode, in_dev_mode
from mixt.proptypes import Choices
class DummyBase(Base):
def _to_list(self, acc):
pass
def test_proptypes_default_dev_mode_is_true():
assert PropTypes.__in_dev_mode__()
def test_proptypes_set_dev_mode_toggle():
assert PropTypes.__in_dev_mode__()
try:
PropTypes.__unset_dev_mode__()
assert not PropTypes.__in_dev_mode__()
PropTypes.__unset_dev_mode__()
assert not PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__()
assert PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__()
assert PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(False)
assert not PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(False)
assert not PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(True)
assert PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(True)
assert PropTypes.__in_dev_mode__()
finally:
PropTypes.__dev_mode__ = True # force restore the normal state
def test_proptypes_context_manager():
assert PropTypes.__in_dev_mode__()
try:
with PropTypes.__override_dev_mode__(False):
assert not PropTypes.__in_dev_mode__()
assert PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(False)
with PropTypes.__override_dev_mode__(False):
assert not PropTypes.__in_dev_mode__()
assert not PropTypes.__in_dev_mode__()
with PropTypes.__override_dev_mode__(True):
assert PropTypes.__in_dev_mode__()
assert not PropTypes.__in_dev_mode__()
PropTypes.__set_dev_mode__(True)
with PropTypes.__override_dev_mode__(True):
assert PropTypes.__in_dev_mode__()
assert PropTypes.__in_dev_mode__()
# encapstulated
with override_dev_mode(False):
assert not in_dev_mode()
with override_dev_mode(True):
assert in_dev_mode()
assert not in_dev_mode()
assert in_dev_mode()
finally:
PropTypes.__dev_mode__ = True # force restore the normal state
def test_global_default_dev_mode_is_true():
assert in_dev_mode()
def test_global_set_dev_mode_toggle():
assert in_dev_mode()
try:
unset_dev_mode()
assert not in_dev_mode()
unset_dev_mode()
assert not in_dev_mode()
set_dev_mode()
assert in_dev_mode()
set_dev_mode()
assert in_dev_mode()
set_dev_mode(False)
assert not in_dev_mode()
set_dev_mode(False)
assert not in_dev_mode()
set_dev_mode(True)
assert in_dev_mode()
set_dev_mode(True)
assert in_dev_mode()
finally:
PropTypes.__dev_mode__ = True # force restore the normal state
def test_global_context_manager():
assert in_dev_mode()
try:
with override_dev_mode(False):
assert not in_dev_mode()
assert in_dev_mode()
set_dev_mode(False)
with override_dev_mode(False):
assert not in_dev_mode()
assert not in_dev_mode()
with override_dev_mode(True):
assert in_dev_mode()
assert not in_dev_mode()
set_dev_mode(True)
with override_dev_mode(True):
assert in_dev_mode()
assert in_dev_mode()
finally:
PropTypes.__dev_mode__ = True # force restore the normal state
def test_choices_are_not_checked_in_non_dev_mode():
class Foo(DummyBase):
class PropTypes:
value: Choices = ['a', 'b']
with override_dev_mode(dev_mode=False):
assert (<Foo value='c' />.value) == 'c'
with pytest.raises(InvalidPropChoiceError):
<Foo value='c' />
def test_boolean_is_not_validated_in_non_dev_mode():
class Foo(DummyBase):
class PropTypes:
value: bool
with override_dev_mode(dev_mode=False):
# normal behavior still works
assert (<Foo value='value' />.value) is True
assert (<Foo value={False} />.value) is False
assert (<Foo value='false' />.value) is False
# but this only works in non-dev mode
assert (<Foo value='fake' />.value) is True
assert (<Foo value={0} />.value) is False
with pytest.raises(InvalidPropBoolError):
<Foo value='fake' />
with pytest.raises(InvalidPropBoolError):
<Foo value={0} />
def test_normal_value_is_not_validated_in_non_dev_mode():
class Foo(DummyBase):
class PropTypes:
value: int
complex: Union[int, float]
with override_dev_mode(dev_mode=False):
assert (<Foo value='foo' />.value) == 'foo'
assert (<Foo complex='bar' />.complex) == 'bar'
with pytest.raises(InvalidPropValueError):
<Foo value='foo' />
with pytest.raises(InvalidPropValueError):
<Foo complex='bar' />
| 1.835938 | 2 |
set_matrix_zeros.py | lutianming/leetcode | 0 | 12798231 | class Solution:
# @param matrix, a list of lists of integers
# RETURN NOTHING, MODIFY matrix IN PLACE.
def setZeroes(self, matrix):
x = len(matrix)
if x > 0:
y = len(matrix[0])
else:
y = 0
rows = [1]*x
cols = [1]*y
for i in range(x):
for j in range(y):
if matrix[i][j] == 0:
rows[i] = 0
cols[j] = 0
for i in range(x):
if rows[i] == 0:
for j in range(y):
matrix[i][j] = 0
for j in range(y):
if cols[j] == 0:
for i in range(x):
matrix[i][j] = 0
m = [[1,1,1],[0,1,2]]
s = Solution()
s.setZeroes(m)
print(m)
| 3.59375 | 4 |
botforces/utils/discord_common.py | coniferousdyer/Botforces | 0 | 12798232 | """
Contains functions related to Discord-specific features, such as embeds.
"""
import discord
import datetime
import time
from botforces.utils.constants import (
NUMBER_OF_ACS,
USER_WEBSITE_URL,
PROBLEM_WEBSITE_URL,
)
from botforces.utils.services import enclose_tags_in_spoilers
"""
User embeds.
"""
async def create_user_embed(user, author, color):
"""
Creates an embed with user information.
"""
Embed = discord.Embed(
title=user["handle"],
url=f"{USER_WEBSITE_URL}{user['handle']}",
color=color,
)
Embed.set_thumbnail(url=user["avatar"])
if "firstName" in user and "lastName" in user:
Embed.add_field(
name="Name",
value=f"{user['firstName']} {user['lastName']}",
inline=False,
)
if "city" in user and "country" in user:
Embed.add_field(
name="City",
value=f"{user['city']}, {user['country']}",
inline=False,
)
if "rank" in user:
Embed.add_field(
name="Rank",
value=user["rank"].title(),
inline=False,
)
else:
Embed.add_field(name="Rank", value="Unranked", inline=False)
if "rating" in user:
Embed.add_field(
name="Rating",
value=user["rating"],
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Problem embeds.
"""
async def create_problem_embed(problem, author):
"""
Creates an embed with problem information.
"""
Embed = discord.Embed(
title=f"{problem['contestId']}{problem['contestIndex']}. {problem['name']}",
url=f"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}",
color=0xFF0000,
)
Embed.add_field(name="Rating", value=problem[4], inline=False)
# Printing the tags in spoilers
if problem["tags"] != "[]":
tags = await enclose_tags_in_spoilers(problem["tags"])
Embed.add_field(name="Tags", value=tags)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Upcoming contests embeds.
"""
async def create_contest_embed(contestList, author):
"""
Creates an embed with contest information.
"""
Embed = discord.Embed(title="List of upcoming contests", color=0xFF0000)
# Adding each contest as a field to the embed
for contest in contestList:
# Obtaining the start time of the contest
date = datetime.datetime.fromtimestamp(contest["startTimeSeconds"])
dateString = date.strftime("%b %d, %Y, %H:%M")
# Obtaining contest duration
duration = datetime.timedelta(seconds=contest["durationSeconds"])
hours = duration.seconds // 3600
minutes = (duration.seconds // 60) % 60
Embed.add_field(
name=contest["name"],
value=f"{contest['id']} - {dateString} {time.tzname[0]} - {hours} hrs, {minutes} mins",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Stalk embeds.
"""
async def create_submissions_embed(submissions, count, handle, author):
"""
Creates an embed with information about a user's last n solved problems.
"""
Embed = discord.Embed(
title=f"Last {count} solved by {handle}",
description=submissions,
color=0xFF0000,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Graph embeds.
"""
async def create_rating_plot_embed(handle, author):
"""
Creates an embed with the rating plot of a user.
"""
Embed = discord.Embed(
title=f"{handle}'s solved problems",
description="Note: ? refers to problems that do not have a rating on Codeforces.",
color=0xFF0000,
)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_index_plot_embed(handle, author):
"""
Creates an embed with the index plot of a user.
"""
Embed = discord.Embed(title=f"{handle}'s solved problems", color=0xFF0000)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_tags_plot_embed(handle, author):
"""
Creates an embed with the tags plot of a user.
"""
Embed = discord.Embed(title=f"{handle}'s solved problems", color=0xFF0000)
Embed.set_image(url="attachment://figure.png")
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Help embeds.
"""
async def create_general_help_embed(author):
"""
Displays an embed with instructions on how to use all commands.
"""
Embed = discord.Embed(
title="Help Menu",
description="Type `-help command` to learn about a specific command.",
color=0xFF0000,
)
Embed.add_field(
name="user", value="Displays information about a user.", inline=False
)
Embed.add_field(
name="stalk",
value="Displays the last n problems solved by a user.",
inline=False,
)
Embed.add_field(name="problem", value="Displays a random problem.", inline=False)
Embed.add_field(
name="upcoming",
value="Displays the list of upcoming Codeforces contests.",
inline=False,
)
Embed.add_field(
name="duel",
value="Challenges another user to a duel over a problem.",
inline=False,
)
Embed.add_field(
name="plotrating",
value="Plots the problems done by a user, grouped by rating.",
inline=False,
)
Embed.add_field(
name="plotindex",
value="Plots the problems done by a user, grouped by contest index.",
inline=False,
)
Embed.add_field(
name="plottags",
value="Plots the problems done by a user, grouped by tags.",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_user_help_embed(author):
"""
Displays an embed with instructions on how to use the user command.
"""
Embed = discord.Embed(
title="user", description="Displays information about a user.", color=0xFF0000
)
Embed.add_field(name="Syntax", value="`-user <codeforces_handle>`", inline=False)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_stalk_help_embed(author):
"""
Displays an embed with instructions on how to use the stalk command.
"""
Embed = discord.Embed(
title="stalk",
description=f"Displays the last n problems solved by a user ({NUMBER_OF_ACS} by default).",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value=f"`-stalk <codeforces_handle>` - Displays last {NUMBER_OF_ACS} submissions of the user\n`-stalk <codeforces_handle> <n>` - Displays last n submissions of the user",
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_problem_help_embed(author):
"""
Displays an embed with instructions on how to use the problem command.
"""
Embed = discord.Embed(
title="problem",
description="Displays a random problem of optional rating and/or tags.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value='`-problem` - Displays a random problem.\n`-problem <rating>` - Displays a random problem of that rating.\n`-problem <list_of_tags>` - Displays a random problem of those tags (multiple tags are allowed).\n`-problem <rating> <list_of_tags>` - Displays a random problem of those tags and rating (order does not matter).\n\nNote: For tags like "binary search", enclose the tag in double quotes.',
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_upcoming_help_embed(author):
"""
Displays an embed with instructions on how to use the upcoming command.
"""
Embed = discord.Embed(
title="upcoming",
description="Displays information about upcoming contests.",
color=0xFF0000,
)
Embed.add_field(name="Syntax", value="`-upcoming`", inline=False)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_duel_help_embed(author):
"""
Displays an embed with instructions on how to use the duel command.
"""
Embed = discord.Embed(
title="duel",
description="Challenges another user to a duel over a problem.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax",
value="`-duel @<discord_user> <optional_rating> <optional_tags>` - To challenge a user\n`-endduel` - To end a duel and decide the result (only if a duel is in progress).",
inline=False,
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plotrating_help_embed(author):
"""
Displays an embed with instructions on how to use the plotrating command.
"""
Embed = discord.Embed(
title="plotrating",
description="Plots the problems done by a user, grouped by rating.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plotrating <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plotindex_help_embed(author):
"""
Displays an embed with instructions on how to use the plotindex command.
"""
Embed = discord.Embed(
title="plotindex",
description="Plots the problems done by a user, grouped by contest index.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plotindex <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
async def create_plottags_help_embed(author):
"""
Displays an embed with instructions on how to use the plottags command.
"""
Embed = discord.Embed(
title="plottags",
description="Plots the problems done by a user, grouped by tags.",
color=0xFF0000,
)
Embed.add_field(
name="Syntax", value="`-plottags <codeforces_handle>`", inline=False
)
Embed.set_footer(icon_url=author.avatar_url, text=str(author))
return Embed
"""
Duel embeds.
"""
async def create_duel_begin_embed(problem, author, opponent):
"""
Displays an embed with information about the duel.
"""
Embed = discord.Embed(
title=f"{problem['contestId']}{problem['contestIndex']}. {problem['name']}",
url=f"{PROBLEM_WEBSITE_URL}{problem['contestId']}/{problem['contestIndex']}",
description="The duel starts now!",
color=0xFF0000,
)
Embed.add_field(name="Rating", value=problem["rating"], inline=False)
# Printing the tags in spoilers
if problem["tags"] != "[]":
tags = await enclose_tags_in_spoilers(problem["tags"])
Embed.add_field(name="Tags", value=tags)
Embed.add_field(
name="Duel",
value=f"{author.display_name} vs {opponent.display_name}",
inline=False,
)
return Embed
async def create_duels_embed(duels):
"""
Displays an embed with information about all ongoing duels.
"""
Embed = discord.Embed(
title="Ongoing duels",
color=0xFF0000,
)
# Adding fields to embed
for duel in duels:
date = datetime.datetime.strptime(
duel["startTime"], "%Y-%m-%d %H:%M:%S.%f"
).strftime("%b %d, %Y %H:%M:%S")
Embed.add_field(
name=f"{duel['handle_1']} vs {duel['handle_2']}",
value=f"Problem: {PROBLEM_WEBSITE_URL}{duel['contestId']}/{duel['contestIndex']}\nStart Time: {date} {time.tzname[0]}",
inline=False,
)
return Embed
| 2.953125 | 3 |
tests/test_circuit/test_undriven_unused.py | leonardt/magma | 167 | 12798233 | """
Test the ability to ignore undriven inputs (useful for formal verification
tools that use undriven inputs to mark wires that can take on any value)
"""
import pytest
import magma as m
from magma.testing import check_files_equal
def test_ignore_unused_undriven_basic():
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I=m.In(m.Bit), O=m.Out(m.Bit))
temp = ~io.I
m.compile("build/test_ignore_unused_undriven_basic", Main, inline=True,
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_unused_undriven_basic.v",
"gold/test_ignore_unused_undriven_basic.v")
def test_ignore_unused_undriven_hierarchy():
# For backwards compatability test
with pytest.warns(DeprecationWarning):
Bar = m.DeclareCircuit("Bar", "I", m.In(m.Bit))
class Foo(m.Circuit):
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit),
O0=m.Out(m.Bit), O1=m.Out(m.Bit))
io.O1 @= io.I0
Bar()(io.I1)
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bit), I1=m.In(m.Bit),
O0=m.Out(m.Bit), O1=m.Out(m.Bit),
O2=m.Out(m.Tuple[m.Bit, m.Bit]),
O3=m.Out(m.Array[2, m.Bit]))
foo = Foo()
foo.I0 @= io.I0
io.O0 @= foo.O0
# partially undriven
io.O2[0] @= 1
io.O3[0] @= 1
m.compile("build/test_ignore_unused_undriven_hierarchy", Main, inline=True,
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_unused_undriven_hierarchy.v",
"gold/test_ignore_unused_undriven_hierarchy.v")
def test_ignore_undriven_coreir():
class Foo(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bit), O0=m.Out(m.Bit), O1=m.Out(m.Bit))
io += m.ClockIO()
io.O1 @= io.I0
class Main(m.Circuit):
_ignore_undriven_ = True
io = m.IO(I0=m.In(m.Bits[2]), I1=m.In(m.Bits[2]), O0=m.Out(m.Bit),
O1=m.Out(m.Bit)) + m.ClockIO()
foo = Foo()
foo.I0 @= io.I0 == io.I1
io.O0 @= foo.O0
m.compile("build/test_ignore_undriven_coreir", Main, output="coreir",
drive_undriven=True, terminate_unused=True)
assert check_files_equal(__file__,
"build/test_ignore_undriven_coreir.json",
"gold/test_ignore_undriven_coreir.json")
| 2.6875 | 3 |
ehlers_danlos_syndrome/snpedia.py | thehappydinoa/ehlers-danlos-syndrome | 0 | 12798234 | import re
from typing import Optional, List, NamedTuple, Dict, Any
import requests
import bs4
GENO_REGEX = re.compile(r"\(.;.\)")
DESC_STYLE = (
"border: 1px; background-color: #FFFFC0;"
+ "border-style: solid; margin:1em; width:90%;"
)
class SNP:
def __init__(
self, rsid: str, table: Optional[list] = None, description: Optional[str] = None
):
self.rsid = rsid
self.table = table
self.description = description
def table_to_dict(table: bs4.element.Tag) -> Dict[str, Any]:
html_headers = table.find_all("th")
headers: List[str] = []
for header in html_headers:
h_str = header.string
if not h_str:
link = header.find("a")
h_str = link.string
headers.append(h_str.strip().lower())
DataTuple = NamedTuple("Row", ((header, str) for header in headers)) # type: ignore
html_rows = table.find_all("tr")
data: Dict[str, DataTuple] = {}
for row in html_rows:
cols = row.find_all("td")
if not cols:
continue
row_data = []
for col in cols:
data_str = col.string
if not data_str:
link = col.find("a")
data_str = link.string
data_str = data_str.strip()
if re.match(GENO_REGEX, data_str):
data_str = "".join(c for c in data_str if c not in ["(", ";", ")"])
row_data.append(data_str)
tup = DataTuple(*row_data)
data[tup.geno] = tup # type: ignore
return data
def get_snp_details(rsid: str) -> SNP:
snp_kwargs: Dict[str, Any] = {}
snp_url = f"https://bots.snpedia.com/index.php/{rsid}"
res = requests.get(snp_url)
if not res.ok:
raise Exception(f"Received code: {res.status_code} from {snp_url}")
bs = bs4.BeautifulSoup(res.text, "html.parser")
table = bs.find("table", {"class": "sortable smwtable"})
if table:
snp_kwargs["table"] = table_to_dict(table)
description_table = bs.find("table", {"style": DESC_STYLE})
if description_table:
description_html = description_table.find("td")
if description_html:
snp_kwargs["description"] = description_html.string
return SNP(rsid, **snp_kwargs)
if __name__ == "__main__":
snp = get_snp_details("rs28937869")
print(snp.table)
| 2.625 | 3 |
src/optimizers.py | nunocclourenco/BAIcO | 0 | 12798235 | <filename>src/optimizers.py
'''
This is the implentations of Simulated Annealing and NSGA-II used in the paper.
Created on Nov, 2020
@author: <NAME> <<EMAIL>>
NSGA - Adapted from https://github.com/ChengHust/NSGA-II
Updated to handle constraint optimization and fast non-dominated sorting
SA - Addapted from <NAME>'s code from https://github.com/perrygeo/simanneal/blob/master/simanneal/anneal.py
'''
import numpy as np
import math
from itertools import repeat
from collections import Sequence
class Problem(object):
"""
The problem related parameters and variation operators of
cross over and mutation for GA, and move for SA.
Parameters are handled in variation operators as real values.
Extending classes should round integers in the cost function if needed.
cost_fun() is defined for multi-objective multi-constraint optimization.
Its up to the single objective optimizers to implement objective weigthing.
objectives = ["name", ... ]
parameters = ["name", ... ]
min = [min, ... ]
max = [max, ... ]
"""
def __init__(self, d, min, max):
self.d = d
self.upper = max
self.lower = min
def __str__(self):
return "Target: {}".format(self.target)
def cost_fun(self, x):
"""
calculate the objective and constraints vectors
:param x: the decision vectors
:return: the objective, constraints, and additional data vectors
"""
n = x.shape[0]
obj = np.zeros((n, 1))
cstr = np.zeros(n)
# data associated with the solutions but not used in the optimization
# it can be usefull to debug the cost function.
# In analog IC optimization we will use this
# data to store the simulation outputs
data = np.zeros((n, 1))
return obj, cstr, data
def individual(self, pop_vars):
"""
turn decision vectors into individuals
:param pop_vars: decision vectors
:return: (pop_vars, pop_obj, pop_cstr)
"""
pop_obj, pop_cstr, pop_data = self.cost_fun(pop_vars)
return (pop_vars, pop_obj, pop_cstr, pop_data)
def initialize(self, N):
"""
initialize the population
:param N: number of elements in the population
:return: the initial population
"""
pop_dec = (np.random.random((N, self.d)) * (self.upper - self.lower)) + self.lower
return pop_dec
def variation(self, pop_dec, mutation=0.1, crossover=0.6):
"""
Generate offspring individuals
:param boundary: lower and upper boundary of pop_dec once d != self.d
:param pop_dec: decision vectors
:return:
"""
dis_c = 10
dis_m = 20
pop_dec = pop_dec[:(len(pop_dec) // 2) * 2][:]
(n, d) = np.shape(pop_dec)
parent_1_dec = pop_dec[:n // 2, :]
parent_2_dec = pop_dec[n // 2:, :]
beta = np.zeros((n // 2, d))
mu = np.random.random((n // 2, d))
beta[mu <= 0.5] = np.power(2 * mu[mu <= 0.5], 1 / (dis_c + 1))
beta[mu > 0.5] = np.power(2 * mu[mu > 0.5], -1 / (dis_c + 1))
beta = beta * ((-1)** np.random.randint(2, size=(n // 2, d)))
beta[np.random.random((n // 2, d)) < 0.5] = 1
beta[np.tile(np.random.random((n // 2, 1)) > crossover, (1, d))] = 1
offspring_dec = np.vstack(((parent_1_dec + parent_2_dec) / 2 + beta * (parent_1_dec - parent_2_dec) / 2,
(parent_1_dec + parent_2_dec) / 2 - beta * (parent_1_dec - parent_2_dec) / 2))
site = np.random.random((n, d)) < mutation
mu = np.random.random((n, d))
temp = site & (mu <= 0.5)
lower, upper = np.tile(self.lower, (n, 1)), np.tile(self.upper, (n, 1))
norm = (offspring_dec[temp] - lower[temp]) / (upper[temp] - lower[temp])
offspring_dec[temp] += (upper[temp] - lower[temp]) * \
(np.power(2. * mu[temp] + (1. - 2. * mu[temp]) * np.power(1. - norm, dis_m + 1.),
1. / (dis_m + 1)) - 1.)
temp = site & (mu > 0.5)
norm = (upper[temp] - offspring_dec[temp]) / (upper[temp] - lower[temp])
offspring_dec[temp] += (upper[temp] - lower[temp]) * \
(1. - np.power(
2. * (1. - mu[temp]) + 2. * (mu[temp] - 0.5) * np.power(1. - norm, dis_m + 1.),
1. / (dis_m + 1.)))
offspring_dec = np.maximum(np.minimum(offspring_dec, upper), lower)
return offspring_dec
def move(self, parameter_values):
"""
Inputs:
- value - new device sizes
self.values update is self.value += self.value + action*(self.ranges[:,1] - self.ranges[:, 0])
-
Outouts: observation, reward, done, {}
- observations array of concat [ values, measures]
- reward +1 improved, -1 worsen, -1000 no sim, 1000 meet specs
"""
action = np.random.normal(scale=0.1, size=len(parameter_values))
parameter_values = parameter_values + action*(self.ranges[:,1] - self.ranges[:, 0])
parameter_values = np.round(parameter_values / self.ranges[:,2])*self.ranges[:,2]
parameter_values = np.fmin(np.fmax(parameter_values,self.ranges[:, 0]),self.ranges[:,1])
return parameter_values
def half_tournemant(rank, cdist):
n = len(rank)
index_a = np.arange(n)
np.random.shuffle(index_a)
eq_rank = rank[index_a[:n//2]] == rank[index_a[n//2:]]
larger_cdist = cdist[index_a[:n//2]] > cdist[index_a[n//2:]]
decision_a = np.logical_or(np.logical_and(eq_rank, larger_cdist), rank[index_a[:n//2]] < rank[index_a[n//2:]])
return index_a[np.r_[decision_a, ~decision_a]]
def tournament(rank, cdist):
'''
tournament selection
:param K: number of solutions to be compared
:param N: number of solutions to be selected
:param fit: fitness vectors
:return: index of selected solutions
'''
n = len(rank)
mate = np.zeros(n, dtype=np.int16)
mate[::2] = half_tournemant(rank, cdist)
mate[1::2] = half_tournemant(rank, cdist)
return mate
def objective_dominance(pop_obj, i, j):
"""
Computes objective-wise dominance between elements i and j of the population.
:param pop_obj: the value of the populations' objectives
:param i, j: the elems being compared
:returns: dominator: the index of the dominator, None if i and j are non-dominated
dominated: the index of the dominated, None if i and j are non-dominated
"""
_,M = pop_obj.shape
i_dominates_j = False
j_dominates_i = False
for obj_idx in range(M):
if pop_obj[i,obj_idx] < pop_obj[j,obj_idx] :
i_dominates_j = True
elif pop_obj[i,obj_idx] > pop_obj[j,obj_idx] :
j_dominates_i = True
if i_dominates_j and (not j_dominates_i):
return i, j
if (not i_dominates_j) and j_dominates_i:
return j, i
return None, None
def fnd_sort(pop_obj, pop_cstr):
"""
Computes and sets the ranks of the population elements using the fast non-dominated sorting method.
:param pop_obj: population objectives (NxM)
:param pop_cstr: population constraint violation (Nx1)
:returns: ranks: an array with the ranks
max_rank: max rank
"""
N,M = pop_obj.shape
# structures for holding the domination info required for fast nd sorting
dominate = [[] for x in range(N)]
dominated_by_counter = np.zeros(N, dtype=int)
for i in range(N):
for j in range(i+1,N):
#constrained pareto dominance
if pop_cstr[i] == pop_cstr[j]:
#objective pareto dominance
dominator, dominated = objective_dominance(pop_obj, i, j)
if dominator is not None:
dominate[dominator].append(dominated)
dominated_by_counter[dominated]+=1
elif pop_cstr[i] < pop_cstr[j]:
# j dominates i
dominate[j].append(i)
dominated_by_counter[i]+=1
else:
# i dominates j
dominate[i].append(j)
dominated_by_counter[j]+=1
#assign the ranks and return
return assign_rank(dominate, dominated_by_counter)
def assign_rank(dominate, dominated_by_counter):
"""
sets the ranks of the population elements using the fast non-dominated sorting method.
:param dominate: list of dominated population elements [[]*N]
:param dominated_by_counter: counter of elements dominating (Nx1)
:returns: ranks: an array with the ranks
max_rank: max rank
"""
N = len(dominate)
ranks = np.inf * np.ones(N)
current_rank = 1
# if non dominated is part of front 1*/
current_front = [i for i in range(N) if dominated_by_counter[i] == 0]
while np.sum(ranks < np.inf) < N/2:
ranks[current_front] = current_rank
next_front = []
for index_a in current_front:
# reduce the numbers of domination to the ones in its set of dominance
for index_b in dominate[index_a]:
dominated_by_counter[index_b]-=1
# if( they become non dominated - then they are part of next front)
if dominated_by_counter[index_b] == 0:
next_front.append(index_b)
current_front = next_front
current_rank+=1
return ranks, current_rank-1
def crowding_distance(pop_obj, rank):
"""
The crowding distance of the Pareto front "front_id"
:param pop_obj: objective vectors
:param rank: front numbers
:return: crowding distance
"""
n, M = np.shape(pop_obj)
crowd_dis = np.zeros(n)
fronts = np.unique(rank)
fronts = fronts[fronts != np.inf]
for f in range(len(fronts)):
front = np.array([k for k in range(len(rank)) if rank[k] == fronts[f]])
fmax = pop_obj[front, :].max(0)
fmin = pop_obj[front, :].min(0)
for i in range(M):
sorted_index = np.argsort(pop_obj[front, i])
crowd_dis[front[sorted_index[0]]] = np.inf
crowd_dis[front[sorted_index[-1]]] = np.inf
for j in range(1, len(front) - 1):
crowd_dis[front[sorted_index[j]]] += \
(
pop_obj[(front[sorted_index[j + 1]], i)] -
pop_obj[(front[sorted_index[j - 1]], i)]
) / ((fmax[i] - fmin[i]) if fmax[i] != fmin[i] else 1.0)
return crowd_dis
def environment_selection(pop_dec, pop_obj, pop_cstr, pop_data, n):
'''
Environmental selection in NSGA-II
:param population: current population
:param n: number of selected individuals
:return: next generation population (
decison vars, objectives, constraints, data,
rank, and cdist)
'''
# fast non-dominated sorting and crowding distance
# arguably they could be refractored out of this function
fronts, max_front = fnd_sort(pop_obj, pop_cstr)
crowd_dis = crowding_distance(pop_obj, fronts)
#Select elements from all fronts except the last. Note that fnd_sort only
#sorts half the population. extra elements are only from the in the last from
index = [i for i in range(len(fronts)) if fronts[i] < max_front]
last = [i for i in range(len(fronts)) if fronts[i]== max_front]
delta_n = np.argsort(-crowd_dis[last])[: (n - len(index))]
index.extend([last[i] for i in delta_n])
return pop_dec[index,:], pop_obj[index,:], pop_cstr[index], [pop_data[i] for i in index], fronts[index], crowd_dis[index],index
class NSGA2:
def minimize(self, problem, pop_size=100, evaluations=100 * 500, mutation=0.2, crossover=0.8, initial_pop=None):
"""
NSGA-II algorithm
"""
if initial_pop is None:
self.pop, self.pop_obj, self.pop_cstr, self.pop_data = problem.individual(problem.initialize(pop_size))
else:
self.pop, self.pop_obj, self.pop_cstr, self.pop_data = problem.individual(initial_pop)
front_no, max_front = fnd_sort(self.pop_obj, self.pop_cstr)
crowd_dis = crowding_distance(self.pop_obj, front_no)
evals = evaluations
yield self.pop, self.pop_obj, self.pop_cstr, self.pop_data, evals, front_no
while evals > 0:
mating_pool = tournament(front_no, crowd_dis)
self.offspring_dec, self.offspring_obj, self.offspring_cstr, self.offspring_data = problem.individual(
problem.variation(self.pop[mating_pool, :], mutation = mutation, crossover= crossover ))
self.pop = np.vstack((self.pop, self.offspring_dec))
self.pop_obj = np.vstack((self.pop_obj, self.offspring_obj))
self.pop_cstr = np.concatenate((self.pop_cstr, self.offspring_cstr))
self.pop_data = self.pop_data + self.offspring_data
self.pop, self.pop_obj,self.pop_cstr, self.pop_data, front_no, crowd_dis,_ = environment_selection(self.pop, self.pop_obj, self.pop_cstr,self.pop_data, pop_size)
evals = evals - pop_size
yield self.pop, self.pop_obj, self.pop_cstr, self.pop_data, evals, front_no
#remove duplicates
vals, index = np.unique(self.pop.round(decimals=9), axis=0, return_index =True)
if len(index) < self.pop.shape[0] :
select = np.in1d(range(self.pop.shape[0]), index)
self.pop[~select, :], self.pop_obj[~select, :], self.pop_cstr[~select], data = problem.individual(problem.initialize(self.pop.shape[0] - len(index)))
for i, v in zip(select, data):
self.pop_data[i] = v
return self.pop, self.pop_obj, self.pop_cstr, self.pop_data
def default_mo_2_so(objs, cstr) :
return sum(objs)/len(objs) + cstr
def simulated_annealing(problem, steps = 10000, t_max = 1500.0, t_min = 2.5, initial_state=None, convert_multi_obj = default_mo_2_so):
'''
Minimizes the energy of a system by simulated annealing.
Parameters
state : an initial arrangement of the system
Returns
(state, energy, objectives, constraints, data): the best state and energy found.
'''
if t_min <= 0.0: raise ValueError('Exponential cooling requires a minimum temperature greater than zero.')
# Note initial state
if initial_state is None :
best_state, best_obj, best_cstr, best_data = problem.initialize(1)
else:
best_state, best_obj, best_cstr, best_data = problem.individual(initial_state)
state = best_state
prev_state, prev_obj, prev_cstr, prev_data = best_state, best_obj, best_cstr, best_data
best_value = prev_value = convert_multi_obj(best_obj, best_cstr)
step = 0
# Precompute factor for exponential cooling from Tmax to Tmin
cooling_factor = -math.log(t_max / t_min)
# Attempt moves to new states
while step < steps:
step += 1
T = t_max * math.exp(cooling_factor * step / steps)
state, obj, cstr, data = problem.individual(problem.move(state))
value = convert_multi_obj(obj, cstr)
dV = 100*(value - prev_value)
if dV > 0.0 and math.exp(-dV / T) < np.random.random():
# Restore previous state
state, obj, cstr, data, value = prev_state, prev_obj, prev_cstr, prev_data, prev_value
else:
# Accept new state and compare to best state
prev_state, prev_obj, prev_cstr, prev_data, prev_value = state, obj, cstr, data, value
if value < best_value:
best_state, best_obj, best_cstr, best_data, best_value, = state, obj, cstr, data, value
# Return best state and energy
return best_state, best_value
if __name__ == '__main__':
seed = 17
np.random.seed(seed)
sat_conditions = {}
sat_conditions["vov_mpm0"] = 0.05
sat_conditions["vov_mpm1"] = 0.05
sat_conditions["vov_mpm2"] = 0.05
sat_conditions["vov_mpm3"] = 0.05
sat_conditions["vov_mnm4"] = 0.05
sat_conditions["vov_mnm5"] = 0.05
sat_conditions["vov_mnm6"] = 0.05
sat_conditions["vov_mnm7"] = 0.05
sat_conditions["vov_mnm8"] = 0.05
sat_conditions["vov_mnm9"] = 0.05
sat_conditions["vov_mnm10"] = 0.05
sat_conditions["vov_mnm11"] = 0.05
sat_conditions["delta_mpm0"] = 0.1
sat_conditions["delta_mpm1"] = 0.1
sat_conditions["delta_mpm2"] = 0.1
sat_conditions["delta_mpm3"] = 0.1
sat_conditions["delta_mnm4"] = 0.1
sat_conditions["delta_mnm5"] = 0.1
sat_conditions["delta_mnm6"] = 0.1
sat_conditions["delta_mnm7"] = 0.1
sat_conditions["delta_mnm8"] = 0.1
sat_conditions["delta_mnm9"] = 0.1
sat_conditions["delta_mnm10"] = 0.1
sat_conditions["delta_mnm11"] = 0.1
gt={'gdc': 50,'gbw': 35e6,'pm' : 45.0, 'fom': 900}
gt.update(sat_conditions)
circuit = VCAmplifierCircuitOptProblem(
ng.Specifications(objective=[('idd', 1)], lt={'idd': 35e-5,'pm' : 90.0},gt=gt), discrete_actions = False)
sa = SA()
print(circuit)
for iter, stats in sa.minimize(circuit):
print("\r iter {}: {}".format(iter, stats))
print(sa.best_state)
print(circuit.simulate(sa.best_state))
print(circuit.target.verify(circuit.simulate(sa.best_state))) | 2.984375 | 3 |
Programs/Chapter8-programs/python/unit_test_example/src/PairingBasisGenerated.py | cpmoca/LectureNotesPhysics | 24 | 12798236 | class PairingBasisGen:
def generateFermi(self):
for i in range(0,self.nParticles):
self.below_fermi.append(i)
for j in range(self.nParticles, self.nSpStates):
self.above_fermi.append(j)
def generateStates(self):
for sp in range(0,self.nSpStates/2):
self.states.append((sp+1,1))
self.states.append((sp+1,-1))
def __init__(self, statesIn, particlesIn):
self.nSpStates = statesIn
self.nParticles = particlesIn
self.below_fermi = []
self.above_fermi = []
self.states = []
self.generateFermi()
self.generateStates()
| 2.78125 | 3 |
driving.py | julianx4/skippycar | 5 | 12798237 | import time
import math as m
import redis
import struct
import numpy as np
from adafruit_servokit import ServoKit
r = redis.Redis(host='localhost', port=6379, db=0)
kit = ServoKit(channels=16)
#controllable variables
def rget_and_float(name, default = None):
output = r.get(name)
if output == None:
return default
else:
return float(output)
rear_diff_locked = int(rget_and_float('rear_diff_locked', 1))
front_diff_locked = int(rget_and_float('front_diff_locked', 1))
gear = int(rget_and_float('gear', 1))
low_battery_voltage = rget_and_float('low_battery_voltage', 3.5)
#----
speed_cap = 45 #percentage of max speed
#steering angle 30 - 150
throttle_stop = 72
throttle_full_forward = 180
throttle_full_reverse = 0
steering_pin = 15
esc_pin = 14
frontdiff_pin = 11
reardiff_pin = 13
gearbox_pin = 12
gear_servo_pos = [0, 60, 110]
rear_diff_servo_pos = [78, 15] #0 locked, 1 open
front_diff_servo_pos = [120, 55] #0 locked, 1 open
def steering_angle(angle):
if angle > 55:
angle = 55
if angle < -55:
angle = -55
kit.servo[steering_pin].angle = -angle + 88
def driving_speed_signal(speed):
if speed > 100:
speed = 100
if speed < -72:
speed = -72
kit.servo[esc_pin].angle = speed * speed_cap / 100 + 72
driving = True
in_motion_start = time.time()
while driving:
rear_diff_locked = int(rget_and_float('rear_diff_locked', 1))
front_diff_locked = int(rget_and_float('front_diff_locked', 1))
gear = int(rget_and_float('gear', 1))
kit.servo[gearbox_pin].angle = gear_servo_pos[gear]
kit.servo[reardiff_pin].angle = rear_diff_servo_pos[rear_diff_locked]
kit.servo[frontdiff_pin].angle = front_diff_servo_pos[front_diff_locked]
low_battery_voltage = rget_and_float('low_battery_voltage', 3.5)
voltages_received = r.get('voltages')
if voltages_received is None:
print("no battery info")
break
else:
voltages = np.array(struct.unpack('%sf' %2, voltages_received))
if voltages.min() < low_battery_voltage:
print(voltages.min())
break
target_speed = r.get('target_speed')
current_speed_received = r.get('current_speed')
if current_speed_received is not None:
current_speed = float(current_speed_received)
#print(current_speed)
if target_speed is None:
#print("no driving input received")
driving_speed_signal(0)
in_motion_start = time.time()
else:
target_speed = float(target_speed)
if target_speed > 0:
if current_speed < 0.05 and time.time() - in_motion_start > 2:
driving_speed_signal(target_speed * 1.5)
#print("driving faster")
else:
driving_speed_signal(target_speed * 1)
#print("driving normal speed")
else:
driving_speed_signal(0)
#print("stopped")
in_motion_start = time.time()
angle_received = r.get('angle')
if angle_received is None:
#print("no steering input received")
steering_angle(0)
else:
steering_angle(float(angle_received))
r.psetex('log_driving_running', 1000, "on")
time.sleep(0.03) # ???
print("stopping")
driving_speed_signal(0)
steering_angle(-20)
time.sleep(1)
steering_angle(20)
time.sleep(1)
steering_angle(-20)
time.sleep(1)
steering_angle(0)
| 2.734375 | 3 |
02-make_figures.py | kochanczyk/covid19-pareto | 1 | 12798238 | #!/usr/bin/env python3
#pylint: disable = C, R
#pylint: disable = E1101 # no-member (generated-members)
#pylint: disable = C0302 # too-many-lines
"""
This code features the article
"Pareto-based evaluation of national responses to COVID-19 pandemic shows
that saving lives and protecting economy are non-trade-off objectives"
by Kochanczyk & Lipniacki (Scientific Reports, 2021).
License: MIT
Last changes: November 09, 2020
"""
# --------------------------------------------------------------------------------------------------
import re
from operator import itemgetter
from multiprocessing import Pool
import pandas as pd
import seaborn as sns
import numpy as np
import scipy.stats
import statsmodels.stats.weightstats as wstats
import matplotlib.pyplot as plt
import matplotlib.dates as dts
import matplotlib.ticker as tckr
import matplotlib.patheffects as pthff
from colorsys import rgb_to_hls
from pandas.plotting import register_matplotlib_converters
import locale
import dill
import gzip
from shared import *
register_matplotlib_converters()
locate_set = False
try:
locale.setlocale(locale.LC_TIME, 'en_US')
locale.setlocale(locale.LC_ALL, 'en_US')
locate_set = True
except:
try:
locale.setlocale(locale.LC_TIME, 'en_US.utf8')
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
locate_set = True
except:
locale.setlocale(locale.LC_TIME, 'POSIX')
locale.setlocale(locale.LC_ALL, 'POSIX')
if not locate_set:
print('Warning: US English locale could not be set. Check tick labels in generated figures.')
# -- Shared plot settings --------------------------------------------------------------------------
plt.rcParams['axes.linewidth'] = 0.5
plt.rcParams['xtick.major.width'] = 0.5
plt.rcParams['ytick.major.width'] = 0.5
plt.rcParams['xtick.minor.width'] = 0.5
plt.rcParams['ytick.minor.width'] = 0.5
plt.rcParams['xtick.major.pad'] = 1.67
plt.rcParams['ytick.major.pad'] = 1.33
plt.rc('font', size=8, family='sans-serif')
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'''\usepackage{cmbright}''')
# -- Plotting auxiliary functions ------------------------------------------------------------------
# manual tweaks:
OUT_OF_FRONT = ['Greece', 'Hungary', 'Canada', 'Netherlands', 'Czechia']
# colors:
SNAIL_GREEN, SNAIL_NONGREEN, SNAIL_ORANGE = '#77ffaa', '#aabbdd', '#885500'
ANNOT_COLOR = '#777777'
def color_of(country, dull_color=(0.15, 0.15, 0.15)):
colors = {
'Austria': plt.cm.tab10(6),
'Belgium': plt.cm.tab10(5),
'Bulgaria': plt.cm.tab10(2),
'Croatia': (0.50, 0.55, 0.00),
'Czechia': plt.cm.tab10(4),
'Denmark': (0.85, 0.20, 0.00),
'Finland': plt.cm.tab10(9),
'France': (0.95, 0.25, 0.75),
'Germany': (0.55, 0.25, 0.70),
'Hungary': (0.35, 0.35, 0.35),
'Greece': (0.45, 0.75, 1.00),
'Italy': plt.cm.tab10(2),
'Netherlands': (0.88, 0.50, 0.00),
'Norway': plt.cm.tab10(0),
'Poland': (0.15, 0.65, 1.00),
'Portugal': (0.95, 0.65, 0.00),
'Romania': plt.cm.tab10(8),
'Russia': (0.80, 0.45, 0.15),
'Slovakia': (0.25, 0.90, 0.50),
'Slovenia': plt.cm.tab10(1),
'Spain': plt.cm.tab10(3),
'Sweden': (0.10, 0.20, 0.90),
'Switzerland': (1.00, 0.05, 0.05),
'United Kingdom': (0.20, 0.00, 0.99),
'Japan': (0.9, 0.00, 0.00),
'South Korea': (0.70, 0.60, 0.65),
'Taiwan': (0.10, 0.80, 0.00),
'California': (0.90, 0.70, 0.00),
'Canada': (0.00, 0.45, 0.80),
'Florida': (0.95, 0.40, 0.00),
'Georgia': (0.80, 0.10, 0.60),
'Illinois': (0.75, 0.50, 0.00),
'Michigan': (0.05, 0.50, 0.15),
'North Carolina': (0.10, 0.00, 0.95),
'New York': (0.60, 0.30, 0.00),
'Ohio': (0.65, 0.00, 0.00),
'Pennsylvania': (0.20, 0.25, 1.00),
'Texas': (0.35, 0.40, 0.40),
'Argentina': (0.30, 0.75, 1.00),
'Bolivia': (0.20, 0.65, 0.00),
'Brazil': (0.00, 0.70, 0.20),
'Chile': (0.65, 0.15, 0.00),
'Colombia': (0.00, 0.10, 0.65),
'Ecuador': (0.65, 0.65, 0.00),
'Mexico': (0.00, 0.50, 0.60),
'Peru': (0.75, 0.50, 0.25),
}
if country in colors.keys():
return colors[country]
else:
return dull_color
def correlations(values, weights):
rho = scipy.stats.pearsonr(values[:,0], values[:,1])[0]
wrho = wstats.DescrStatsW(values, weights=weights).corrcoef[0][1]
return (rho, wrho)
def adjust_spines(ax, spines, left_shift=15, bottom_shift=0):
for loc, spine in ax.spines.items():
if loc in spines:
if loc == 'left':
spine.set_position(('outward', left_shift))
elif loc == 'bottom':
spine.set_position(('outward', bottom_shift))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
def set_ticks_lengths(ax):
ax.tick_params(which='major', length=2., labelsize=7)
ax.tick_params(which='minor', length=1.)
def darken(color, scale=0.5):
lightness = min(1, rgb_to_hls(*color[0:3])[1] * scale)
return sns.set_hls_values(color=color, h=None, l=lightness, s=None)
def pareto_front(data, optima=True):
sorted_data = sorted(data, key=itemgetter(0, 1), reverse=not optima) # x-ascending
front = [ sorted_data[0][2] ]
cutoff = sorted_data[0][1]
for sd in sorted_data[1:]:
if (optima and sd[1] < cutoff) or (not optima and sd[1] > cutoff):
front += [sd[2]]
cutoff = sd[1]
return front
def put_final_dot(ax, location, x, y, is_extra_country=False, is_tail_shown=False,
show_population_halo=False, label_shifting='A', italic=False):
label_shifts = {
'Denmark': (940, 1.0 ),
'Norway': ( 20, 0.88 ),
'South Korea': ( 52, 0.59 ),
'Portugal': ( 0, 0.97 ),
'Bulgaria': (830, 0.994),
'Switzerland': ( 80, 0.92 ),
'Ohio': ( 40, 1.014),
'Michigan': (800, 1.018),
'Florida': ( 0, 0.987),
'Illinois': ( 90, 1.016),
'North Carolina': (-10, 0.97 ),
'Pennsylvania': ( 0, 0.999),
'Georgia': (825, 0.991)
} if label_shifting == 'A' else {}
if show_population_halo:
marker_size = 3.5
diameter = np.sqrt(population(location)) * 3
light_color = color_of(location)
ax.plot([x], [y], '-.', marker='8' if is_extra_country else 'o',
linewidth=1, markersize=diameter, markeredgewidth=0, alpha=0.2, clip_on=False,
color=light_color, markerfacecolor=light_color)
else:
marker_size = 6
ax.plot([x], [y], '-.', marker='8' if is_extra_country else 'o',
linewidth=1, markersize=marker_size, markeredgewidth=0, alpha=0.8, clip_on=False,
color=color_of(location), markerfacecolor=color_of(location))
loc = location.replace('United Kingdom', 'UK')
if italic:
loc = r'\textit{' + loc + r'}'
if label_shifting == 'A':
ax.annotate(loc, xycoords='data',
xy=(x + 65 - (0 if location not in label_shifts else label_shifts[location][0]),
y**0.9999 * (1 if location not in label_shifts else label_shifts[location][1])),
color=sns.set_hls_values(color_of(location), l=0.3), clip_on=False)
else:
ax.annotate(loc, xycoords='data',
xy=(x + 0.13,
y + 0.04),
color=sns.set_hls_values(color_of(location), l=0.3), clip_on=False)
def jointly_trimmed_trajs(trajs, locations, cols, force_end=None, skipped=None, cleanup=True,
verbose=False):
assert len(cols) == 2
col1, col2 = cols
days_of_last_available_data = set()
for country in locations:
if skipped and country in skipped:
continue
df = trajs[country]
df_sel = df[ ~df[col1].isnull() & ~df[col2].isnull() ]
last_day = df_sel.iloc[-1].name
days_of_last_available_data.add(last_day)
if verbose:
print(country, last_day.strftime('%b%d'))
day_of_last_available_data = min(days_of_last_available_data)
if force_end is None:
if verbose:
print(f"Last shared available day ({' & '.join(cols)}):",
day_of_last_available_data.strftime('%b%d'))
else:
if verbose:
print(f"Last shared available day ({' & '.join(cols)}):",
day_of_last_available_data.strftime('%b%d'), '==FORCED=>',
force_end.strftime('%b%d'))
day_of_last_available_data = force_end
edited_trajs = {}
assert len(cols) == 2
for country in locations:
df = trajs[country].loc[:day_of_last_available_data]
edited_trajs[country] = df[ ~df[col1].isnull() & ~df[col2].isnull() ] if cleanup else df
return day_of_last_available_data, edited_trajs
def extract_cumulative_immobilization_and_deaths(trajectories, country, interval):
trajectory = trajectories[country]
immobi = -trajectory[['mobility_reduction']]
deaths = trajectory[['new_deaths']].astype('Float64')
ppl = population(country)
if interval == 'monthly':
immobi = immobi.cumsum().groupby(pd.Grouper(freq='M')).nth(0)
deaths = deaths.cumsum().groupby(pd.Grouper(freq='M')).nth(0) / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
ii = df.index
df.index = [i.replace(day=1) for i in ii]
return df
elif interval == 'weekly':
immobi = immobi.resample('W').sum().cumsum()
deaths = deaths.resample('W').sum().cumsum() / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
return df
elif interval == 'daily':
immobi = immobi.cumsum()
deaths = deaths.cumsum() / ppl
df = immobi.join(deaths).rename(columns={
'mobility_reduction': f"immobilization_cumul_{country}",
'new_deaths': f"new_deaths_cumul_per_1M_{country}"})
return df
def make_sqrt_deaths_yaxis(ax, ymax=40, sep=5):
ax.set_ylim((0, ymax))
ticks = list(range(0, ymax + sep, sep))
ax.set_yticks(ticks)
ax.set_yticklabels(['0'] + [r'$\sqrt{' + str(t**2) + '}$' for t in ticks[1:]])
def plot_cumulative_immobilization_and_deaths(trajectories, locations, final_day, show_fronts,
show_tail, show_corr_history, show_population_halo,
fig_name='X', scale_deaths=np.sqrt):
def draw_pareto_fronts_(ax, finals, n_fronts, optimal):
fronts = []
for i in range(n_fronts):
fronts_locations = [__ for _ in fronts for __ in _]
finals_remaining = [(*im_de, loc) for loc, im_de in finals.items()
if loc not in fronts_locations and loc not in OUT_OF_FRONT]
front = pareto_front(finals_remaining, optimal)
fronts.append(front)
for front_i, front in enumerate(fronts):
color = sns.set_hls_values('gray', l=0.1 + 0.04*(max(0, front_i - 1*optimal))) # TMP: was 0.15+0.1*
front_coords = np.array([finals[loc] for loc in front]).T
if len(front_coords.T) > 1:
ax.plot(*front_coords, ':' if optimal else '--', c=color, alpha=0.8,
linewidth=1.1 if optimal else 0.8)
else:
if optimal:
front_coords = [[front_coords[0][0] + 0.707*180 + 180*np.cos((180 + i)/360*2*3.14159),
front_coords[1][0] + 0.8 + 1.2*np.sin((180 + i)/360*2*3.14159)]
for i in range(0, 91, 10)]
else:
front_coords = [[front_coords[0][0] - 0.707*180 + 180*np.cos((180 + i)/360*2*3.14159),
front_coords[1][0] - 0.8 + 1.2*np.sin((180 + i)/360*2*3.14159)]
for i in range(180+0, 180+91, 10)]
ax.plot(*np.array(front_coords).T, ':' if optimal else '--', c=color, alpha=0.8,
linewidth=1.1 if optimal else 0.8, clip_on=False)
def make_subplot_(ax, trajs, locations, final_day, show_fronts, panel_letter=None):
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
ax.set_xlim((0, 8e3))
ax.set_xlabel(r'Cumulative lockdown')
ax.set_ylabel(r'$\sqrt{\textrm{\sf Cumulative deaths/M}}$')
make_sqrt_deaths_yaxis(ax)
# plot "flares" (tails are optional)
finals = {}
for loc in locations:
im, de = extract_cumulative_immobilization_and_deaths(trajs, loc, 'monthly').values.T
de = scale_deaths(de)
put_final_dot(ax, loc, im[-1], de[-1], show_population_halo=show_population_halo)
if show_tail:
color = color_of(loc)
darker_color = darken(color_of(loc))
alpha = 0.7
ax.plot(im, de, '-', linewidth=0.8, alpha=alpha, color=color)
for i in range(1, len(im)):
m, ms = [('s', 1.7), ('D', 1.55), ('p', 2.2)][i % 3]
ax.plot(im[-1 - i], de[-1 - i], '.', marker=m, markersize=ms,
fillstyle=None, markeredgewidth=0.33, markerfacecolor=darken(color, 0.9),
markeredgecolor=darker_color, alpha=alpha)
ax.plot(im[-1], de[-1], '.', marker='o', markersize=1., markeredgewidth=0,
markerfacecolor=darker_color, alpha=alpha)
finals[loc] = (im[-1], de[-1])
if show_fronts:
draw_pareto_fronts_(ax, finals, n_fronts=3+2, optimal=True)
draw_pareto_fronts_(ax, finals, n_fronts=2, optimal=False)
# annotation: last day
ax.annotate(str('Date:' if show_corr_history else 'Last day:') + \
f" {final_day.strftime('%B %d, %Y')}", xy=(0.0, 1.01), xycoords='axes fraction',
color=ANNOT_COLOR)
# annotation: correlation coefficients
values = np.array(list(finals.values()))
weights = np.array([population(loc) for loc in finals.keys()])
rho, wrho = correlations(values, weights)
ax.annotate(r'Correlation:',
xy=(0.0, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f}",
xy=(0.16 - 0.03*show_tail, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f}",
xy=(0.16 - 0.03*show_tail, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
if panel_letter is not None:
csv_fn = f"Figure{fig_name}{panel_letter}.csv"
np.savetxt(csv_fn, values, header='lockdown,sqrt_deaths', delimiter=',')
cols = ['mobility', 'new_deaths']
# set up the figure
if show_corr_history:
fig, axes = plt.subplots(ncols=2, figsize=(11, 5))
for i, fday in enumerate(final_day):
last_avail_day, trajs = jointly_trimmed_trajs(trajectories, locations, cols, force_end=fday)
assert fday <= last_avail_day
panel_letter = chr(ord('A') + i)
make_subplot_(axes[i], trajs, locations, fday, show_fronts=show_fronts and i>0,
panel_letter=panel_letter)
axes[i].annotate(r'\large\textbf{' + panel_letter + r'}',
xy=(-0.175, 1.04), xycoords='axes fraction', clip_on=False)
ax = axes[1].inset_axes([0.92, 0.09, 0.45, 0.2])
adjust_spines(ax, ['left', 'bottom'], left_shift=7)
ax.annotate(r'\large\textbf{C}', xy=(-0.275, 1.06), xycoords='axes fraction', clip_on=False)
x, y1, y2 = [], [], []
for i in range(9):
points, weights = [], []
for loc in locations:
im_de = extract_cumulative_immobilization_and_deaths(trajs, loc, 'monthly').iloc[-1 - i]
points.append([im_de[0], scale_deaths(im_de[1])])
weights.append(population(loc))
points = np.array(points)
rho, wrho = correlations(points, weights)
x.append(im_de.name)
y1.append(rho)
y2.append(wrho)
ax.xaxis.set_major_formatter(dts.DateFormatter('%b')) # %d
ax.yaxis.set_major_locator(tckr.MultipleLocator(0.1))
ax.plot(x, y2, '.-', linestyle='dotted', linewidth=0.5, color='#333333', markersize=7,
markerfacecolor='#00000000', markeredgecolor='black', markeredgewidth=0.5,
label=r'population-weighted $\rho$')
ax.plot(x, y1, '.-', linestyle='dashed', linewidth=0.5, color='#333333', markersize=5.5,
label=r'non-weighted $\rho$')
ax.set_ylim((0.5, 0.9))
ax.set_xlabel(r'First days of months of 2020')
ax.set_ylabel(r"Pearson's $\rho$")
ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.48), fancybox=False, fontsize=6.75)
for item in (ax.xaxis.label, ax.yaxis.label): item.set_fontsize(7.00)
for label in (ax.get_xticklabels() + ax.get_yticklabels()): label.set_fontsize(6.25)
else:
last_avail_day, trajs = jointly_trimmed_trajs(trajectories, locations, cols, force_end=final_day)
assert final_day <= last_avail_day
fig, axes = plt.subplots(ncols=1, figsize=(6, 5))
make_subplot_(axes, trajs, locations, final_day, show_fronts=False, panel_letter='_')
# export
fig.tight_layout()
fn = f"Figure{fig_name}.pdf" # _{last_day.strftime('%b%d')}
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def put_legend_cases(ax_leg, thr_weekly_cases_per_1M):
z = [3, 10, 30, 100, 300, 1000, 3000, 10000]
x = np.array(list(range(len(z))))
y1 = np.ones(len(x))*0.62
y2 = np.ones(len(x))*0.31
y3 = np.ones(len(x))*0.0
ax_leg.set_xlim((0 +0, len(z)-1 -0))
ax_leg.set_ylim((0, 1))
# tracer line
for y in [y1, y2, y3]:
xx = [float(x[0]) + 0.125] + list(x[1:-1]) + [float(x[-1]) - 0.125]
ax_leg.plot(xx, y, linestyle='-', linewidth=0.75, alpha=1, solid_capstyle='round',
color='#ffaaee', clip_on=False, zorder=10)
# variable thickness line (BEGIN)
lwidths = [0.7 * (0 + np.log(z))]
points = np.array([x, y1]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
color = sns.set_hls_values(SNAIL_NONGREEN, l=0.15 + (lwidths[0][segi] - 0.)/8)
ax_leg.plot(seg[0]+0.05, seg[1], '-', color=color, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt', zorder=20, clip_on=False)
# variable thickness line (END)
points = np.array([x, y2]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
el = min(1, 0.075 + ((lwidths[0][segi] - 0.)/7)**1.3)
co = sns.set_hls_values(SNAIL_GREEN, l=el)
ax_leg.plot(seg[0]+0.05, seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt', zorder=20, clip_on=False)
# dots + thin black
for y in [y1, y2, y3]:
xx, yy = x[:-1], y[:-1]
ax_leg.scatter(xx + 0.5, yy, s=0.025, marker='o', facecolor='#000000', alpha=0.5,
clip_on=False, zorder=30)
ax_leg.plot(xx + 0.5, yy, linestyle='--', linewidth=0.1, color='#000000', alpha=0.33,
clip_on=False, zorder=40)
ax_leg.annotate(text=r'Tests per case:', xy=(0.5, 0.84), xycoords='axes fraction', fontsize=8,
ha="center", va="center")
ax_leg.annotate(text=r'when \textbf{$>$ ' + str(thr_weekly_cases_per_1M) + r'} '
r'new cases /week /M', xy=(0.5, 0.62-0.09),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
ax_leg.annotate(text=r'when \textbf{$<$ ' + str(thr_weekly_cases_per_1M) + '} '
r'new cases /week /M', xy=(0.5, 0.31-0.09),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
ax_leg.annotate(text=r'no data on testing', xy=(0.5, 0.055), xycoords='axes fraction',
fontsize=6.5, ha="center", va="center")
for vi, v in enumerate(z):
for y in [y1, y2]:
extra_shift = -0.08 if v in [100, 300, 1000] else 0
ax_leg.annotate(text=f"{v}"[::-1].replace('000', 'k')[::-1], color='black',
xy=(x[vi]+extra_shift + 0.5, y[vi]+0.05+0.005*vi), xycoords='data',
fontsize=5.75, ha="center", va="center", zorder=30, clip_on=False)
def put_legend_deaths(ax_leg):
z = [1, 3, 10, 30, 100, 300]
x = np.array(list(range(len(z))))
y2 = np.ones(len(x))*0.37
ax_leg.set_xlim((0-0.1, len(z)-1+0.1))
ax_leg.set_ylim((0, 1))
# variable thickness line (BEGIN)
lwidths = [1*np.log(1 + np.array(z))]
points = np.array([x, y2]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
for segi, seg in enumerate(segments):
seg = seg.T
el = 0.1 + (lwidths[0][segi] - 0.)/14
color = sns.set_hls_values(SNAIL_ORANGE, l=el)
ax_leg.plot(seg[0]-0.025, seg[1], '-', color=color, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='butt',
zorder=20, clip_on=False)
# dots + thin black
for y in [y2]:
xx, yy = x[:-1], y[:-1]
ax_leg.scatter(xx + 0.5, yy, s=0.025, marker='o', facecolor='black', alpha=0.5,
clip_on=False, zorder=30)
ax_leg.plot(xx + 0.5, yy, linestyle='--', linewidth=0.1, color='black', alpha=0.33,
clip_on=False, zorder=40)
ax_leg.annotate(s=r'Cases per death:', xy=(0.5, 0.63), xycoords='axes fraction', fontsize=8,
ha="center", va="center")
ax_leg.annotate(s=r'when \textbf{at least 1} new death /week /M', xy=(0.5, 0.22),
xycoords='axes fraction', fontsize=6.5, ha="center", va="center")
for vi, v in enumerate(z):
for y in [y2]:
ax_leg.annotate(s=f"{v}", xy=(x[vi] + 0.5, y[vi]+0.05 + 0.005*vi), xycoords='data',
fontsize=6, ha="center", va="center", zorder=30, clip_on=False,
color='black')
def plot_R_vs_mobility_reduction(trajs, locations, final_day, missing_days, fig_name, kind='cases',
thr_weekly_cases_per_1M=20):
assert kind in ('cases', 'deaths')
trajs_orig = trajs.copy()
low_mortality_locations = ['Taiwan', 'Slovakia', 'New Zealand']
mob_col, Rt_col = f"mobility_historical_{kind}", f"Rt_{kind}"
last_day, trajs_trimmed = jointly_trimmed_trajs(trajs, locations, [mob_col, Rt_col],
force_end=final_day,
skipped=low_mortality_locations)
def by_per_capita(cc):
if kind == 'cases':
assert last_day in trajs[cc].index, \
print(f"Day {last_day} not available for {cc} that ends on",
trajs[cc].tail(1).index)
return trajs[cc].loc[last_day, f"total_{kind}"] / population(cc) + 1e6*is_USA_state(cc)
elif kind == 'deaths':
if cc in low_mortality_locations:
return trajs[cc].loc[last_day, f"total_{kind}"] / 1e9 + 1e6*is_USA_state(cc)
else:
return trajs[cc].loc[last_day, f"total_{kind}"] / population(cc) + 1e6*is_USA_state(cc)
locations = sorted(locations, key=by_per_capita, reverse=True)
facecolor = '#f8f6f4'
ncols = 6
nrows = (len(locations))//ncols + 1
fig, _ = plt.subplots(nrows=nrows, ncols=ncols, figsize=(8/5*ncols, 8/6*nrows))
for ci, country in enumerate(locations):
ax = fig.axes[ci]
ax.set_facecolor(facecolor)
# PLOT: deaths in low-mortality locations
if kind == 'deaths' and country in low_mortality_locations:
ax.annotate(s=country, xy=(0.5, 0.88), xycoords='axes fraction', fontsize=9,
color='#666666', ha="center", va="center", clip_on=False, zorder=100)
total = trajs_orig[country].loc[last_day, f"total_{kind}"]
ax.annotate(s="{:d} {:s} in total".format(int(round(total)), kind),
xy=(0.5, 0.77), xycoords='axes fraction', fontsize=6.5, color='#666666',
ha="center", va="center", clip_on=False, zorder=100)
ax.annotate(s="(plot not shown)",
xy=(0.5, 0.67), xycoords='axes fraction', fontsize=6.5, color='#666666',
ha="center", va="center", clip_on=False, zorder=100)
adjust_spines(ax, ['left', 'bottom'] if ax.is_first_col() else ['bottom'])
ax.set_xticks(())
continue
# PLOT: X-axis
row_i = ci//ncols
if row_i == nrows-1:
ax.set_xlabel('Mobility', labelpad=-1)
ax.set_xlim((-100, 0))
ax.set_xticks((-100, 0))
#ax.xaxis.set_major_formatter(tckr.PercentFormatter(decimals=0))
ax.set_xticklabels((r'$-100\%$', r'$0\%$'))
# PLOT: Y-axis
if ax.is_first_col():
ax.set_ylabel(r'$R$')
ax.set_ylim((0, 4))
ax.yaxis.set_major_locator(tckr.MultipleLocator(1))
ax.axhline(1, linestyle='--', linewidth=0.5, color='#666666')
# DATA
df = trajs_trimmed[country].copy()
# DATA: begin each trajectory since 100 cumulative cases
min_cumul = 100
above_min_cumul_indices = df['total_cases'] >= min_cumul # cases even if kind == 'deaths'
df = df[above_min_cumul_indices]
# DATA: nullify missing days to obtain visual discontinuities
for missing_day in missing_days[country]:
if df.index[0] <= missing_day and missing_day <= FINAL_DAY:
df.at[missing_day,mob_col] = np.nan # cannot be pd.NA because used in mpl.plot
df.at[missing_day, Rt_col] = np.nan # cannot be pd.NA because used in mpl.plot
df.sort_index(inplace=True)
if kind == 'cases': # ==---
# PLOT: pink tracer line
ax.plot(*df[[mob_col, Rt_col]].values.T, linestyle='-', linewidth=0.75, alpha=1,
solid_capstyle='round', color='#ffaaee', clip_on=True, zorder=10)
# DATA: partition trajectory into temporally-ordered stretches
df_freq = df[f"new_{kind}"].ffill().rolling(window=7, min_periods=7, **ROLL_OPTS).sum()\
/ population(country)
assert len(df_freq) == len(df)
green_indices = df[df_freq < thr_weekly_cases_per_1M].index
nongreen_indices = df[df_freq >= thr_weekly_cases_per_1M].index
green_stretches, nongreen_stretches = [], []
last_index_is_green = None
for index, value in df.iterrows():
if index in green_indices:
if last_index_is_green is None or last_index_is_green == False:
green_stretches += [ [index] ]
elif last_index_is_green == True:
green_stretches[-1] += [index]
last_index_is_green = True
elif index in nongreen_indices:
if last_index_is_green is None or last_index_is_green == True:
if green_stretches:
green_stretches[-1] += [index] # extra point for smooth joins
nongreen_stretches += [ [index] ]
elif last_index_is_green == False:
nongreen_stretches[-1] += [index]
last_index_is_green = False
stretches = [( g, SNAIL_GREEN ) for g in green_stretches] \
+ [(ng, SNAIL_NONGREEN) for ng in nongreen_stretches]
def by_first_day(cs): return cs[0][0]
stretches = sorted(stretches, key=by_first_day)
# PLOT: variable thickness line
for stretch, color in stretches:
x, y = df.loc[stretch, [mob_col, Rt_col]].values.T
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
tests_per_hit = df.loc[stretch, 'tests_per_hit'].values
np.place(tests_per_hit, np.isinf(tests_per_hit) | (tests_per_hit > 10000), 10000)
z = 0.7*np.log(0 + tests_per_hit)
np.place(z, np.isnan(z), 0)
np.place(z, np.isinf(z), 1000)
np.place(z, z < 0, 0)
lwidths = [z]
for segi, seg in enumerate(segments):
seg = seg.T
if kind == 'cases': el = 0.15 + lwidths[0][segi] / 8
else: el = 0.10 + lwidths[0][segi] / 14
co = sns.set_hls_values(color, l=el)
ax.plot(seg[0], seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='round', zorder=20)
elif kind == 'deaths': # ==---
days_back = 14
x, y = df[[mob_col, Rt_col]].values.T
points = np.array([x, y]).T.reshape(-1, 1, 2)
segments = np.concatenate([points[:-1], points[1:]], axis=1)
de = df[['new_deaths14']]
ca = df[['new_cases14' ]]
ca = ca.set_index( ca.index.shift(+days_back, freq ='D') ) # <-- THIS
#de = de.set_index( de.index.shift(-days_back, freq ='D') ) # <-- not this
z = de.join(ca)
z['cases14_per_death14'] = z['new_cases14'] / z['new_deaths14']
z = z['cases14_per_death14'].values
np.place(z, np.isnan(z), 0)
np.place(z, np.isinf(z), 1000)
np.place(z, z < 0, 0)
lwidths = [1*np.log(1 + z)]
for segi, seg in enumerate(segments):
seg = seg.T
if kind == 'cases': el = 0.15 + lwidths[0][segi] / 8
else: el = 0.10 + lwidths[0][segi] / 14
co = sns.set_hls_values(SNAIL_ORANGE, l=el)
ax.plot(seg[0], seg[1], '-', color=co, linewidth=lwidths[0][segi],
alpha=1, solid_capstyle='round', zorder=20)
# PLOT: dots + thin black
x, y = df[[mob_col, Rt_col]].values.T
ax.scatter(x, y, s=0.025, marker='o', facecolor='#000000', alpha=0.5, clip_on=True, zorder=30)
ax.plot(x, y, linestyle='--', linewidth=0.1, color='#000000', alpha=0.33, zorder=40)
# PLOT: panel title
ax.annotate(text=country, xy=(0.5, 0.88), xycoords='axes fraction', fontsize=9, ha="center",
va="center", clip_on=False, zorder=100,
path_effects=[pthff.Stroke(linewidth=2, foreground=facecolor), pthff.Normal()])
pop = population(country)
total_per_1M = trajs_orig[country].loc[last_day, f"total_{kind}"] / pop
heading = "{:d} {:s}/M".format(int(round(total_per_1M)), kind)
ax.annotate(text=heading, xy=(0.5, 0.77), xycoords='axes fraction', fontsize=6.5,
ha="center", va="center", clip_on=False, zorder=100,
path_effects=[pthff.Stroke(linewidth=1.33, foreground=facecolor),
pthff.Normal()])
adjust_spines(ax, ['left', 'bottom'] if ax.is_first_col() else ['bottom'])
set_ticks_lengths(ax)
# PLOT: legend
for ax in fig.axes:
if ax.is_last_row() and ax.is_last_col():
ax.set_axis_off()
if kind == 'cases':
put_legend_cases(fig.axes[-1], thr_weekly_cases_per_1M)
elif kind == 'deaths':
put_legend_deaths(fig.axes[-1])
# PLOT: export and return
fig.tight_layout(w_pad=0.4, h_pad=0.15)
l, b, w, h = fig.axes[-1].get_position().bounds
fig.axes[-1].set_position([l, b - 0.0185, w, h])
fig.axes[-1].annotate('Last day:' + f" {final_day.strftime('%B %d, %Y')}",
xy=(0.0, 1.01), xycoords='axes fraction', color=ANNOT_COLOR)
fn = f"Figure{fig_name}_{last_day.strftime('%b%d')}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def plot_cumulative_immobilization_and_gdp_drop(trajectories, locations, final_day, gdp_2020h1,
fig_name):
df = pd.DataFrame(columns='location cumul_2020H1_mobility_reduction gdp_2020H1_drop'.split())
df = df.set_index('location')
for loc in locations:
if not loc in gdp_2020h1:
print(f"{loc}: missing GDP data in figure {fig_name}")
continue
gdp_drop = -gdp_2020h1[loc]
immob, _ = extract_cumulative_immobilization_and_deaths(trajectories, loc, 'daily').loc[final_day]
df.loc[loc] = [immob, gdp_drop]
fig, ax = plt.subplots(figsize=(5, 5))
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
set_ticks_lengths(ax)
ax.set_xlabel(r'Cumulated mobility reduction in the 1\textsuperscript{st} half of 2020')
ax.set_ylabel(r'GDP loss in the 1\textsuperscript{st} half of 2020 (year-on-year \%)')
ax.set_xlim((0, 5000))
ax.set_ylim((-2, 14))
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(*df.values.T)
ax.plot([0, 5000], [intercept, intercept + slope*5000],
linewidth=0.75, linestyle='--', color='#aaaaaa', zorder=5)
weights = []
for _, row in df.iterrows():
location = row.name
color = color_of(location)
mob_red, gdp_drop = row[['cumul_2020H1_mobility_reduction', 'gdp_2020H1_drop']]
ax.scatter([mob_red], [gdp_drop], color=color, zorder=10)
ax.annotate(text=location.replace('United Kingdom', 'UK'),
xy=(mob_red + 49, gdp_drop + 0.028), xycoords='data',
color=sns.set_hls_values(color, l=0.3), fontsize=7, zorder=10)
weights.append(population(location))
rho, wrho = correlations(df.values, weights)
ax.annotate(r'Correlation:',
xy=(0.0, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f}",
xy=(0.15, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f}",
xy=(0.15, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
csv_fn = f"Figure{fig_name}.csv"
np.savetxt(csv_fn, df.values, header='lockdown,gdp_loss', delimiter=',')
# export image as PDF
fig.tight_layout()
fn = f"Figure{fig_name}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
def plot_gdp_drop_and_excess_deaths(trajectories, locations, final_day, excess_deaths, gdp_2020h1,
fig_name, scale_deaths=np.sqrt):
fig, ax = plt.subplots(figsize=(5, 5))
adjust_spines(ax, ['left', 'bottom'], left_shift=10)
ax.set_xlabel(r'GDP loss in the 1\textsuperscript{st} half of 2020 (year-on-year \%)')
ax.set_ylabel(r'$\sqrt{\textrm{\sf COVID-19-related deaths in the 1\textsuperscript{st} half of 2020 / M}}$')
ax.set_xlim((-2, 14))
make_sqrt_deaths_yaxis(ax)
ed_locations = excess_deaths.keys()
points, weights = [], []
points_eur, weights_eur = [], []
for loc in locations:
if population(loc) < MIN_POPULATION_M or loc=='Serbia':
print(f"{loc} skipped in figure {fig_name}")
continue
if loc not in ed_locations:
print(f"{loc} in figure {fig_name}: deaths will be used in place of excess deaths")
if loc not in gdp_2020h1:
print(f"{loc} skipped in figure {fig_name} because of missing GDP data")
continue
is_in_Europe = not loc in STATE_TO_ABBREV and not loc in ['Canada', 'Taiwan', 'Japan', 'South Korea']
deaths = max(excess_deaths[loc] if loc in excess_deaths else 0,
trajectories[loc].loc[final_day]['total_deaths'])
x, y = -gdp_2020h1[loc], np.sqrt(deaths / population(loc) )
put_final_dot(ax, loc, x, y, show_population_halo=True, label_shifting=False,
italic=not is_in_Europe)
points.append([x, y])
weights.append(population(loc))
if is_in_Europe:
points_eur.append([x, y])
weights_eur.append(population(loc))
values, values_eur = np.array(points), np.array(points_eur)
rho, wrho = correlations(values, weights)
rho_eur, wrho_eur = correlations(values_eur, weights_eur)
ax.annotate(r'Correlation:',
xy=(-0.01, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"(non-weighted) Pearson's $\rho$ = " + f"{rho:.2f} (Europe-only: {rho_eur:.2f})",
xy=(0.155, 0.97), xycoords='axes fraction', color=ANNOT_COLOR)
ax.annotate(r"population-weighted Pearson's $\rho$ = " + f"{wrho:.2f} (Europe-only: {wrho_eur:.2f})",
xy=(0.155, 0.94), xycoords='axes fraction', color=ANNOT_COLOR)
# export coordinates
csv_fn = f"Figure{fig_name}_all.csv"
np.savetxt(csv_fn, values, header='gdp_loss,sqrt_deaths', delimiter=',')
csv_fn = f"Figure{fig_name}_eur.csv"
np.savetxt(csv_fn, values_eur, header='gdp_loss,sqrt_deaths', delimiter=',')
# export image as PDF
fig.tight_layout()
fn = f"Figure{fig_name}.pdf"
fig.savefig(fn)
print(f"Saved figure file {fn}.")
return fig
if __name__ == '__main__':
with gzip.open('processed_data.dill.gz', 'rb') as f:
trajectories, locations, final_day, missing_days, excess_deaths, gdp_2020h1 = dill.load(f)
print('Locations count:', len(locations))
jul01 = pd.to_datetime('2020-07-01')
fig1 = plot_cumulative_immobilization_and_deaths(trajectories, locations, [jul01, final_day],
show_fronts=True, show_tail=False, show_corr_history=True, show_population_halo=True,
fig_name='1')
figS1 = plot_cumulative_immobilization_and_deaths(trajectories, locations, final_day,
show_fronts=False, show_tail=True, show_corr_history=False, show_population_halo=False,
fig_name='S1')
fig2 = plot_R_vs_mobility_reduction(trajectories, locations, jul01, missing_days, fig_name='2')
fig4 = plot_cumulative_immobilization_and_gdp_drop(trajectories, locations, jul01, gdp_2020h1,
fig_name='4')
fig5 = plot_gdp_drop_and_excess_deaths(trajectories, locations, jul01, excess_deaths,
gdp_2020h1, fig_name='5')
| 1.726563 | 2 |
website/src/contact_form/models.py | iamcholo/videoplatform | 0 | 12798239 | from django.db import models
from django.utils.translation import ugettext_lazy as _
from utilities.models import BaseDateTime
class Contact(BaseDateTime):
title = models.CharField(
_('TITLE_LABEL'),
max_length=255
)
name = models.CharField(
_('NAME_LABEL'),
max_length=100
)
email = models.EmailField(
_('EMAIL_LABEL'),
max_length=255
)
body = models.TextField(_('MESSAGE_LABEL'))
def __unicode__(self):
return self.name
class Meta:
verbose_name = _('CONTACTS_TITLE')
verbose_name_plural = _('CONTACTS_TITLE_PLURAL')
get_latest_by = 'created'
ordering = ('-id',)
db_table = 'contact_form_contacts'
app_label = 'contact_form'
| 2.234375 | 2 |
danlp/datasets/ddisco.py | alexandrainst/DaNLP | 1 | 12798240 | <gh_stars>1-10
import os
import pandas as pd
from danlp.download import DEFAULT_CACHE_DIR, download_dataset, _unzip_process_func, DATASETS
class DDisco:
"""
Class for loading the DDisco dataset.
The DDisco dataset is annotated for discourse coherence.
It contains user-generated texts from Reddit and Wikipedia.
Annotation labels are:
* 1: low coherence
* 2: medium coherence
* 3: high coherence
:param str cache_dir: the directory for storing cached models
:param bool verbose: `True` to increase verbosity
"""
def __init__(self, cache_dir: str = DEFAULT_CACHE_DIR):
self.dataset_name = 'ddisco'
self.file_extension = DATASETS[self.dataset_name]['file_extension']
self.dataset_dir = download_dataset(self.dataset_name, process_func=_unzip_process_func, cache_dir=cache_dir)
def load_with_pandas(self):
"""
Loads the DDisco dataset in dataframes with pandas.
:return: 2 dataframes -- train, test
"""
df_train = pd.read_csv(os.path.join(self.dataset_dir, self.dataset_name + '.train' + self.file_extension), sep='\t', index_col=0, encoding='utf-8').dropna()
df_test = pd.read_csv(os.path.join(self.dataset_dir, self.dataset_name + '.test' + self.file_extension), sep='\t', index_col=0, encoding='utf-8').dropna()
return df_train, df_test
| 2.890625 | 3 |
test2/stalking/stalking.py | gr0mph/OceanOfCode | 0 | 12798241 | <filename>test2/stalking/stalking.py
import sys
sys.path.append('../../')
# Global variables
from test2.test_main import TREASURE_MAP
# From OceanOfCode
# Class
from OceanOfCode import StalkAndLegal
from OceanOfCode import StalkAndTorpedo
from OceanOfCode import Submarine
from OceanOfCode import Board
# Global
from OceanOfCode import EMPTY_SYMBOLS
from OceanOfCode import DIRS
# Method
from OceanOfCode import manhattan
from OceanOfCode import update_order
import unittest
class _stalking(unittest.TestCase):
def _set_up(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
def _read_move(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
read_move = StalkAndTorpedo.read_move
me.update(read_move,['N'])
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_move,['E'])
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_move,['S'])
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_move,['S'])
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_move,['W'])
me = StalkAndTorpedo(me)
print(len(me.inp))
def _read_surface(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
read_surface = StalkAndTorpedo.read_surface
for i in range(0,5):
me.update(read_surface,TREASURE_MAP)
me = StalkAndTorpedo(me)
board, stalk = next(iter(me.inp))
print(len(me.inp))
print(board.life)
me.update(read_surface,TREASURE_MAP)
me = StalkAndTorpedo(me)
print()
print(len(me.inp))
def _read_torpedo(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
read_torpedo = StalkAndTorpedo.read_torpedo
me.update(read_torpedo,(8,4))
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_torpedo,(6,6))
me = StalkAndTorpedo(me)
print(len(me.inp))
def test_read_silence(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
read_torpedo = StalkAndTorpedo.read_torpedo
read_silence = StalkAndTorpedo.read_silence2
read_move = StalkAndTorpedo.read_move
me.update(read_torpedo,(8,4))
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_torpedo,(5,6))
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_move,['S'])
me = StalkAndTorpedo(me)
print(len(me.inp))
me.update(read_silence,None)
me = StalkAndTorpedo(me)
print(len(me.inp))
def _small_read_and_update(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
for c1, f1, d1 in update_order('MOVE N'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
for c1, f1, d1 in update_order('TORPEDO 0 0|MOVE E'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
for c1, f1, d1 in update_order('SURFACE 1'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
def _medium_read_and_update(self):
me = StalkAndTorpedo(None)
me.set_up(TREASURE_MAP)
print(len(me.inp))
for c1, f1, d1 in update_order('MOVE N|SURFACE 5|TORPEDO 11 1|SILENCE'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
if f1 is not None:
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
def _na_read(self):
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
for c1, f1, d1 in update_order('NA'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
def _silence(self):
for t_r in TREASURE_MAP:
print(t_r)
me = StalkAndTorpedo(None)
me.set_up((1,2,3,4,5,6,7,8,9),TREASURE_MAP)
print(len(me.inp))
for c1, f1, d1 in update_order('MOVE N|SILENCE'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
if f1 is not None:
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
for c1, f1, d1 in update_order('MOVE N|MOVE N'):
print("c1 {} f1 {} d1 {}".format(c1,f1,d1))
if f1 is not None:
me.update(f1,d1)
me = StalkAndTorpedo(me)
print(len(me.inp))
if __name__ == '__main__':
unittest.main()
| 2.890625 | 3 |
segme/backbone/port/big_transfer/tests/test_applications_predict.py | shkarupa-alex/segme | 2 | 12798242 | import numpy as np
from absl.testing import parameterized
from keras.preprocessing import image
from keras.utils import data_utils
from tensorflow.python.platform import test
from ..bit import BiT_S_R50x1, BiT_S_R50x3, BiT_S_R101x1, BiT_S_R101x3, BiT_S_R152x4
from ..bit import BiT_M_R50x1, BiT_M_R50x3, BiT_M_R101x1, BiT_M_R101x3, BiT_M_R152x4
from ..bit import preprocess_input
MODEL_LIST_S = [
BiT_S_R50x1,
# Bad weights
# BiT_S_R50x3, BiT_S_R101x1,
BiT_S_R101x3, BiT_S_R152x4
]
MODEL_LIST_M = [BiT_M_R50x1, BiT_M_R50x3, BiT_M_R101x1, BiT_M_R101x3, BiT_M_R152x4]
TEST_IMAGE_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/tests/elephant.jpg')
_IMAGENET_CLASSES = 1000
class ApplicationsLoadWeightTest(test.TestCase, parameterized.TestCase):
@parameterized.parameters(*MODEL_LIST_S)
def test_application_predict_odd_s(self, app):
model = app()
_assert_shape_equal(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant((224, 224))
x = preprocess_input(x)
preds = model.predict(x)
label = np.argmax(preds[0], axis=-1)
self.assertIn(label, [348, 386])
@parameterized.parameters(*MODEL_LIST_S)
def test_application_predict_even_s(self, app):
model = app()
_assert_shape_equal(model.output_shape, (None, _IMAGENET_CLASSES))
x = _get_elephant((299, 299))
x = preprocess_input(x)
preds = model.predict(x)
label = np.argmax(preds[0], axis=-1)
self.assertIn(label, [348, 386])
# @parameterized.parameters(*MODEL_LIST_M)
# def test_application_predict_odd_m(self, app):
# model = app()
# _assert_shape_equal(model.output_shape, (None, 21843))
# x = _get_elephant((224, 224))
# x = preprocess_input(x)
# preds = model.predict(x)
# label = np.argmax(preds[0], axis=-1)
# self.assertIn(label, [3671, 3673, 3674])
#
#
# @parameterized.parameters(*MODEL_LIST_M)
# def test_application_predict_even_m(self, app):
# model = app()
# _assert_shape_equal(model.output_shape, (None, 21843))
# x = _get_elephant((299, 299))
# x = preprocess_input(x)
# preds = model.predict(x)
# label = np.argmax(preds[0], axis=-1)
# self.assertIn(label, [3671, 3673, 3674])
def _get_elephant(target_size):
# For models that don't include a Flatten step,
# the default is to accept variable-size inputs
# even when loading ImageNet weights (since it is possible).
# In this case, default to 299x299.
if target_size[0] is None:
target_size = (299, 299)
test_image = data_utils.get_file('elephant.jpg', TEST_IMAGE_PATH)
img = image.load_img(test_image, target_size=tuple(target_size))
x = image.img_to_array(img)
return np.expand_dims(x, axis=0)
def _assert_shape_equal(shape1, shape2):
if len(shape1) != len(shape2):
raise AssertionError(
'Shapes are different rank: %s vs %s' % (shape1, shape2))
if shape1 != shape2:
raise AssertionError('Shapes differ: %s vs %s' % (shape1, shape2))
if __name__ == '__main__':
test.main()
| 2.421875 | 2 |
nnunet/inference/validate_nifti_folder.py | PawelPeczek/Abdomen-CT-Image-Segmentation | 15 | 12798243 | <reponame>PawelPeczek/Abdomen-CT-Image-Segmentation
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.evaluation.evaluator import aggregate_scores
def validate(folder, gt_folder):
patient_ids = subfiles(folder, suffix=".nii.gz", join=False)
pred_gt_tuples = []
for p in patient_ids:
file = join(folder, p)
gt_file = join(gt_folder, p)
pred_gt_tuples.append([file, gt_file])
task = folder.split("/")[-4]
job_name = 'esembly fullres and lowres'
num_classes = 3
_ = aggregate_scores(pred_gt_tuples, labels=list(range(num_classes)),
use_label=None,
json_output_file=join(folder, "summary.json"),
json_name=job_name + folder.split("/")[-2],
json_author="Bety",
json_task=task, num_threads=3)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Computes metrics scores for validation folder containing nifti files")
parser.add_argument('-f', '--folder', type=str, help="Folder with nifti files to evaluate", required=True)
parser.add_argument('-gtf', '--gtfolder', type=str, help="Folder with GT nifti files.", required=False)
args = parser.parse_args()
folder = args.folder
gt_folder = args.gtfolder
validate(folder, gt_folder) | 2.34375 | 2 |
movement/test_steering.py | pwoosam/JaloPy | 0 | 12798244 | #!/usr/bin/env python3
from Adafruit_PCA9685 import PCA9685
import time
pwm = PCA9685()
servo_min = 250
servo_max = 450
pulse = servo_min
increasing = True
step_size = 1
while True:
pwm.set_pwm(0, 0, pulse)
if pulse < servo_max and increasing:
pulse += step_size
increasing = True
elif pulse > servo_min:
pulse -= step_size
increasing = False
else:
pulse += step_size
increasing = True
time.sleep(0.01)
print(pulse)
while False:
pwm.set_pwm(0, 0, servo_min)
time.sleep(0.5)
pwm.set_pwm(0, 0, servo_max)
time.sleep(0.5)
pwm.set_pwm(0, 0, 0)
| 3.21875 | 3 |
drones/app/routers/__init__.py | codeshard/drones-api | 0 | 12798245 | from .deliveries import * # noqa
from .drones import * # noqa
from .medications import * # noqa
| 1.046875 | 1 |
tests/test_sockets.py | initbar/SIPd | 1 | 12798246 | # MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# https://github.com/initbar/sipd
import re
import unittest
from src.sockets import *
from src.parser import *
class TestSockets(unittest.TestCase):
#
# ip address
#
def test_sockets_get_server_address(self):
ip_address = get_server_address()
self.assertTrue(ip_address != "no IP found")
self.assertTrue(REGX_IPV4.match(ip_address))
#
# udp sockets
#
def test_sockets_unsafe_allocate_udp_socket_empty_both(self):
self.assertFalse(unsafe_allocate_udp_socket("", ""))
def test_sockets_unsafe_allocate_udp_socket_empty_host(self):
self.assertFalse(unsafe_allocate_udp_socket("", 8080))
def test_sockets_unsafe_allocate_udp_socket_empty_port(self):
self.assertFalse(unsafe_allocate_udp_socket("127.0.0.1", ""))
def test_sockets_unsafe_allocate_udp_socket_hostname_1(self):
self.assertFalse(unsafe_allocate_udp_socket("127.0.1", 8080))
def test_sockets_unsafe_allocate_udp_socket_hostname_2(self):
self.assertFalse(unsafe_allocate_udp_socket("localhose", 8080))
def test_sockets_unsafe_allocate_udp_socket_hostname_3(self):
self.assertFalse(unsafe_allocate_udp_socket("0.0.0.0.0", 8080))
def test_sockets_unsafe_allocate_udp_socket(self):
with safe_allocate_random_udp_socket() as udp_socket:
socket_port = udp_socket.getsockname()[1]
with safe_allocate_udp_client() as udp_client:
udp_client.connect(("127.0.0.1", socket_port))
| 2 | 2 |
Indian Sign Language Recognation For Static and Dynamic Gestures/ISL-CNN-Model.py | itsjaysuthar/Projetcts | 0 | 12798247 | <filename>Indian Sign Language Recognation For Static and Dynamic Gestures/ISL-CNN-Model.py
# Importing all the libraries
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
# Initialising the CNN
classifier = Sequential()
# Step 1 - Convolution
classifier.add(Conv2D(16, kernel_size=3, activation='relu', input_shape=(110, 110, 1)))
# Step 2 - Max Pooling
classifier.add(MaxPooling2D(pool_size = (2,2)))
# Adding extra convolution layers
classifier.add(Conv2D(16, kernel_size=3, activation='relu'))
classifier.add(MaxPooling2D(pool_size = (2,2)))
#classifier.add(Conv2D(256, kernel_size=2, activation='relu'))
#classifier.add(MaxPooling2D(pool_size = (2,2)))
# Step 3 - Flatten
classifier.add(Flatten())
# Step 4 - Fully Connected Layer
classifier.add(Dense(128, activation='relu'))
classifier.add(Dropout(0.3))
classifier.add(Dense(36, activation='softmax'))
# Compile the Model
classifier.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Keras Image Preprocessing
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(rescale=1./255)
#test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
'ISL Gestures DataSet',
target_size=(110, 110),
batch_size=1,
color_mode='grayscale',
class_mode='categorical')
#validation_generator = test_datagen.flow_from_directory(
# 'data/validation',
# target_size=(150, 150),
# batch_size=32,
# class_mode='binary')
classifier.fit_generator(
train_generator,
steps_per_epoch=7920,
epochs=5)
# Save the Model
import joblib
joblib.dump(classifier, 'ISL-CNN-Model') | 2.640625 | 3 |
Hackerearth Set/TheOldMonk.py | Siddharth2016/PYTHON3_prog | 2 | 12798248 | <gh_stars>1-10
# THE OLD MONK
for _ in range(int(input())):
N = int(input())
A = [int(a) for a in input().split()]
B = [int(a) for a in input().split()]
res = 0
mx = 0
for i in range(N):
for j in range(i,N,1):
if A[i]>B[j]:
break
res = j-i
if res>mx:
mx = res
print(mx)
| 2.828125 | 3 |
payment/tests/integ/test_api.py | Al-bambino/aws-serverless-ecommerce-platform | 758 | 12798249 | <filename>payment/tests/integ/test_api.py
import uuid
import pytest
import requests
from fixtures import iam_auth # pylint: disable=import-error
from helpers import get_parameter # pylint: disable=import-error,no-name-in-module
@pytest.fixture(scope="module")
def payment_3p_api_url():
return get_parameter("/ecommerce/{Environment}/payment-3p/api/url")
@pytest.fixture(scope="module")
def payment_api_url():
return get_parameter("/ecommerce/{Environment}/payment/api/url")
def test_backend_validate(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == True
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_non_existent(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a non-existent token
"""
payment_token = str(<KEY>())
total = 3000
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == False
def test_backend_validate_smaller_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a smaller total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total-1000
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == True
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_higher_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate with a higher total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token,
"total": total+2000
}
)
assert res.status_code == 200
body = res.json()
assert "ok" in body
assert "message" not in body
assert body["ok"] == False
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_iam(payment_3p_api_url, payment_api_url):
"""
Test /backend/validate without IAM authorization
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
json={
"paymentToken": payment_token,
"total": total
}
)
assert res.status_code == 403
body = res.json()
assert "ok" not in body
assert "message" in body
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_total(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate without an total
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"paymentToken": payment_token
}
)
assert res.status_code == 400
body = res.json()
assert "ok" not in body
assert "message" in body
assert "total" in body["message"]
# Cleanup
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
})
def test_backend_validate_no_payment_token(payment_3p_api_url, payment_api_url, iam_auth):
"""
Test /backend/validate without a payment token
"""
card_number = "1234567890123456"
total = 3000
# Create a payment token
res_3p = requests.post(payment_3p_api_url+"/preauth", json={
"cardNumber": card_number,
"amount": total
})
payment_token = res_3p.json()["paymentToken"]
# Validate the token
res = requests.post(
payment_api_url+"/backend/validate",
auth=iam_auth(payment_api_url),
json={
"total": total
}
)
assert res.status_code == 400
body = res.json()
assert "ok" not in body
assert "message" in body
assert "paymentToken" in body["message"]
# Cleanup cancelPayment
requests.post(payment_3p_api_url+"/cancelPayment", json={
"paymentToken": payment_token
}) | 2.28125 | 2 |
e2xgrader/tests/utils/test_extra_cells.py | divindevaiah/e2xgrader | 2 | 12798250 | <filename>e2xgrader/tests/utils/test_extra_cells.py
import nbformat
import unittest
from e2xgrader.models import PresetModel
from e2xgrader.utils.extra_cells import (
is_extra_cell,
is_multiplechoice,
is_singlechoice,
get_choices,
get_num_of_choices,
clear_choices,
has_solution,
)
from ..test_utils.test_utils import create_temp_course
class TestExtraCells(unittest.TestCase):
def setUp(self):
tmp_dir, coursedir = create_temp_course()
self.tmp_dir = tmp_dir
self.model = PresetModel(coursedir)
self.multiplechoice = "Multiple Choice"
def test_extra_cell(self):
assert not is_extra_cell(nbformat.v4.new_code_cell())
cells = self.model.get_question_preset(self.multiplechoice)
assert is_extra_cell(cells[0])
def test_multiplechoice_cell(self):
cells = self.model.get_question_preset(self.multiplechoice)
assert is_multiplechoice(cells[0])
def test_singlechoice_cell(self):
cells = self.model.get_question_preset("Single Choice")
assert is_singlechoice(cells[0])
def test_get_choices(self):
cells = self.model.get_question_preset(self.multiplechoice)
assert len(get_choices(cells[0])) == 0
cells[0].metadata.extended_cell.choice = [1, 2, 3]
assert len(get_choices(cells[0])) == 3
assert all([i in get_choices(cells[0]) for i in [1, 2, 3]])
def test_get_num_of_choices(self):
cells = self.model.get_question_preset("Single Choice")
assert get_num_of_choices(cells[0]) is None
cells = self.model.get_question_preset(self.multiplechoice)
assert get_num_of_choices(cells[0]) == 3
def test_clear_choices(self):
cells = self.model.get_question_preset(self.multiplechoice)
cells[0].metadata.extended_cell.choice = [1, 2, 3]
clear_choices(cells[0])
assert len(get_choices(cells[0])) == 0
def test_has_solution(self):
cells = self.model.get_question_preset(self.multiplechoice)
assert not has_solution(cells[0])
def tearDown(self):
self.tmp_dir.cleanup()
| 2.515625 | 3 |
Subsets and Splits