content
stringlengths 5
1.05M
|
---|
from nltk import pos_tag, word_tokenize, RegexpParser, ngrams, FreqDist
from nltk.collocations import BigramCollocationFinder
from nltk.corpus import wordnet
from nltk.stem import WordNetLemmatizer, PorterStemmer
from preprocessing.analyzer import ThesaurusExpansionAnalyzer, WikimediaAnalyzer
from preprocessing.utils import clean
from enum import Enum
from nltk.tree import Tree
from functools import reduce
import operator
from math import log
from whoosh.analysis import StemmingAnalyzer
from searching.fragmenter import Fragmenter, PhraseTokenizer
import re
import math
from pywsd import disambiguate, adapted_lesk
# from pke.unsupervised import TopicRank
def get_cosine(vec1, vec2):
intersection = set(vec1.keys()) & set(vec2.keys())
numerator = sum([vec1[x] * vec2[x] for x in intersection])
sum1 = sum([vec1[x] ** 2 for x in vec1.keys()])
sum2 = sum([vec2[x] ** 2 for x in vec2.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator
class POSTag(Enum):
J = wordnet.ADJ
V = wordnet.VERB
N = wordnet.NOUN
A = wordnet.ADV
# ALL = 'ALL'
@classmethod
def to_wordnet(cls, nltk_pos):
for pos in cls:
if nltk_pos.startswith(pos.name):
return pos
return cls.N # TODO check. Not founded tag are threatened as nouns. Maybe None?
def lemmatizer(tokens):
w_lemmatizer = WordNetLemmatizer()
return [w_lemmatizer.lemmatize(token, POSTag.to_wordnet(pos).value) for (token, pos) in pos_tag(tokens)]
def extract(tokens, tags=None):
if tags is None:
tags = [POSTag.J, POSTag.N]
t = [token for token in pos_tag(tokens) if POSTag.to_wordnet(token[1][0]) in tags]
return list(filter(None, t))
def stemming(tokens):
stemmer = PorterStemmer()
return [stemmer.stem(t) for t in tokens]
def pke_key_phrase_extract(text, n=10):
# create a TopicRank extractor
extractor = TopicRank()
# load the content of the document, here in CoreNLP XML format
# the input language is set to English (used for the stoplist)
# normalization is set to stemming (computed with Porter's stemming algorithm)
extractor.load_document(text,
language="en",
normalization='stemming')
# select the keyphrase candidates, for TopicRank the longest sequences of
# nouns and adjectives
extractor.candidate_selection(pos={'NOUN', 'PROPN', 'ADJ'})
# weight the candidates using a random walk. The threshold parameter sets the
# minimum similarity for clustering, and the method parameter defines the
# linkage method
extractor.candidate_weighting(threshold=0.74,
method='average')
# print the n-highest (10) scored candidates
return extractor.get_n_best(n=n, stemming=True)
def thesaurus_expand(query, wikimedia, size=3, threshold=4.23):
"""
Wordent hierarchy
- hyponyms concepts that are more specific (immediate), navigate down to the tree
- hypernyms general concept, navigate up the hierarchy
- meronyms components. For instance a tree have trunk, ...so on as meronym
- holonyms things that contain meronyms (i.e. tree)
Query expansion require good relevance feedback methods. Using a thesaurus based query expansion might decrease
performance and has query drift problems with polysemic words. This method picks up keyword from gloss of the synsets
and uses a lesk algorithm to disambiguate terms from each other
:param query:
:return:
"""
analyzer = ThesaurusExpansionAnalyzer()
wikimedia_analyzer = WikimediaAnalyzer()
original_tokens = [i.text for i in analyzer(query)]
# original_tokens = set([i.text for i in query.all_tokens()])
print(original_tokens)
synonyms = set()
rule = r"""
NBAR: {<NN>}
{<JJ>}
# {<JJS>}
{<NNS>}
# {<NNP>}
"""
synsets = []
# for i in original_tokens:
# for s in wordnet.synsets(i):
# for h in s.hypernyms():
# print(s, h , s.wup_similarity(h))
# for i in original_tokens:
# for s in wordnet.synsets(i):
# print(s.definition())
for w, s in disambiguate(" ".join(original_tokens), algorithm=adapted_lesk):
if s:
definition = s.definition()
pke_text = definition + ' ' + ' '.join(s.lemma_names())
# print(pke_key_phrase_extract(pke_text))
tokens = [i.text for i in wikimedia_analyzer(definition)]
synsets.append((w, wordnet.synset(s.name()), tokens))
for word, sense, definition in synsets:
if sense:
synonyms = synonyms.union(noun_groups(word_tokenize(sense.definition()), chunk_size=1, rule=rule))
text = " ".join([i.name() for i in sense.lemmas()])
for lemma in wikimedia_analyzer(text):
if lemma.text not in original_tokens:
synonyms.add(lemma.text)
# vfor tok in wikimedia_analyzer(lemma.text):
# print(tok.text)
# if tok.text not in original_tokens:
# synonyms.add(tok.text)
# for token in tokens: for _, original_sense, _ in synsets: for child_synset in wordnet.synsets(token):
# if child_synset: # definition = [i.text for i in analyzer(child_synset.definition())] # pywsd. score =
# wordnet.synset(original_sense.name()).path_similarity(child_synset, simulate_root=False) print(
# child_synset, child_synset.definition(), original_sense, score)
# print(tokens)
# print([j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.simple_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.adapted_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.cosine_lesk)], '\n',
# [j.definition() for i, j in pywsd.disambiguate(query, algorithm=pywsd.max_similarity)])
# if len(_concept) > 0:
# concept, similarity_strength = _concept[0]
# if similarity_strength > 0.7:
# __retrieve_definition_groupings(synsets)
# else:
# print(__retrieve_definition_groupings(synsets))
# disambiguated_senses = disambiguate(query, algorithm=adapted_lesk)
# print(disambiguated_senses, '\n\n', simple_lesk, '\n\n', resnik_wsd(word_tokenize(query)), '\n')
# for token in original_tokens:
# senses = wordnet.synsets(token, 'n')
# if len(senses) == 1:
# synonyms = synonyms.union(set(senses[0].lemma_names()))
# else:
#
# tokens += [i.text for i in analyzer(' '.join(list(synonyms)))]
# return original_tokens + [i for i in tokens if i not in original_tokens]
reader = wikimedia.reader
terms_vec = {}
for syn in synonyms:
score = calc_syn_score(syn, reader)
terms_vec[syn] = score
# else:
# terms_vec[syn] = 0
ranked_terms = sorted(terms_vec, key=lambda c: terms_vec[c], reverse=True)
print('***Ranked terms')
for i in list(map(lambda q: (q, terms_vec[q]), ranked_terms)):
print(i[0], ' ', i[1], '\n')
return list(map(lambda q: q[0], filter(lambda v: v[1] >= threshold, terms_vec.items())))
def calc_syn_score(syn, reader):
terms_vec = []
for i in word_tokenize(syn):
doc_frequency = reader.doc_frequency('text', i)
term_frequency = reader.frequency('text', i)
if doc_frequency != 0:
terms_vec.append(term_frequency / doc_frequency)
else:
terms_vec.append(0)
return max(terms_vec)
def noun_groups(tokens, chunk_size=2, analyzer=StemmingAnalyzer(), rule=None):
grammar = r"""
NBAR: {<NN|JJ><|JJ|NN>} # Nouns and Adjectives, terminated with Nouns
# {<NN>} # If pattern not found just a single NN is ok
"""
if rule is not None:
grammar = rule
cp = RegexpParser(grammar)
result = cp.parse(pos_tag(tokens))
nouns = set()
for chunk in result:
if type(chunk) == Tree:
if chunk.label() == 'NBAR':
words = list(map(lambda entry: entry[0], chunk.leaves()))
tokens = analyzer(" ".join(words))
nouns.add(" ".join([i.text for i in tokens]))
# nouns.add(tuple([i.text for i in tokens]))
else:
continue
# print('Leaf', '\n', chunk)
return nouns
class Passage:
""" Deprecated """
def __init__(self, doc, passage):
self._doc = doc
self._passage = passage
self.concept = []
def __repr__(self):
return f'{self._passage[0:3]}...[{len(self._passage)}] [{self._doc["title"]}]'
class DocStats:
"""
In-memory bigram index for text statistics
"""
def __init__(self, tokens):
self._bigram = BigramCollocationFinder.from_words(tokens)
@staticmethod
def _score_from_ngram(*args):
return args[0]
def _frequency(self, gram: tuple):
fd_score = self._bigram.score_ngram(self._score_from_ngram, *gram) or 0
bd_score = self._bigram.score_ngram(self._score_from_ngram, *gram[::-1]) or 0
return max(fd_score, bd_score)
def frequency(self, term: str):
grams = [i for i in ngrams(term.split(" "), 2)]
if len(grams) == 0: return self._bigram.word_fd[term]
return max([self._frequency(gram) for gram in grams])
def __count_docs_containing(c, docs):
docs_containing_c = list(filter(lambda f: f > 0, [d.frequency(c) for d in docs]))
return len(docs_containing_c)
def prod(products):
return reduce(operator.mul, products)
def _calculate_qterm_correlation(query_terms, concept, idf_c, docs):
for qterm, idf_i in query_terms:
N = len(docs)
# IDFc = max(1.0, log(N / npc, 10) / 5)
# IDFi = max(1.0, log(N / npi, 10) / 5)
y = 0.1
f = sum([doc_stat.frequency(qterm) * doc_stat.frequency(concept) for doc_stat in docs])
if f == 0:
yield y
else:
# print(f, N, y, idf_c, idf_i, concept, qterm)
yield (y + (log(f) * idf_c) / log(N)) ** idf_i
# yield d
def lca_expand(query, documents, size=15, passage_size=400, threshold=1.4):
"""
Implements the Local Context Analysis algorithm to expand query based on top ranked concept that
maximize the sim to the query
sim(q,c) = ∏ (y + (log(f(ci,ki) + IDFc) / log(n))^IDFi
where:
* f(ci, ki) = quantifies the correlation between the concept c and the query term ki:
and is given by: Σ pfi_j * pfc_j where pf(i,c)_j is the frequency of term ki or concept c in the j-th doc
* IDFc = inverse document frequency of concept c calculated as max(1, log_10(N/npc)/5)
IDFi = inverse document frequency of query term i calculated as max(1, log_10(N/npi)/5) to emphasizes infrequent query terms
where npc is number of documents containing the concept c nad npi number of docs containing the query term i
and N is number of documents
IDFi
* y is a smoothing constant set to 0.1 to avoid zeros values in the product calculation
A concept is a noun group of single, two, or three words.
"""
fragmenter = Fragmenter(max_size=passage_size)
query_terms = set([i.text for i in query.all_tokens()])
regex = re.compile(r"|".join(query_terms))
analyzer = StemmingAnalyzer()
concepts = set()
doc_stats = []
for doc in documents:
text = clean(doc['text']).lower()
fragment = fragmenter.merge_fragments(PhraseTokenizer().tokenize(text)[:3])
# fragment = fragmenter.merge_fragments(
# fragmenter.calculate_phrase_ranking(
# text,
# query_terms)[:3])
tokens = word_tokenize(fragment.text)
stemmed_tokens = [i.text for i in analyzer(text)]
key_terms = noun_groups(tokens)
concepts = concepts.union(key_terms)
doc_stats.append(DocStats(stemmed_tokens))
query_terms_with_idf = list()
for q in query_terms:
npi = __count_docs_containing(q, doc_stats)
if npi == 0:
query_terms_with_idf.append((q, 1))
else:
query_terms_with_idf.append((q, log(len(documents) / npi, 10) / 5))
concepts = set(filter(lambda c: len(c) > 2, concepts)) # Removing blank entries or spurious pos_tag entries
# tagged as NN
# breakpoint()
ranking = []
for concept in concepts:
if concept in query_terms: continue
N = len(documents)
npc = __count_docs_containing(concept, doc_stats) or 1
idf_c = max(1.0, log(N / npc, 10) / 5)
prods = _calculate_qterm_correlation(query_terms_with_idf, concept, idf_c, doc_stats)
sim = prod([i for i in prods])
ranking.append((concept, sim))
print(sorted(ranking, key=lambda c: c[1], reverse=True))
filtered = filter(lambda c: c[1] > threshold, ranking)
return list(map(lambda q: q[0], sorted(filtered, key=lambda c: c[1], reverse=True)))[:size]
# return [re.sub(regex, "", term).strip() for term in top_terms]
|
class Solution:
"""
@param n: An integer
@return: An integer
"""
def climbStairs(self, n):
# write your code here
if n < 1:
return 1
a = 1
b = 1
for i in xrange(n):
a, b = b, a + b
return a
|
import azaka
client = azaka.Client()
@client.register
async def main(ctx: azaka.Context) -> None:
interface = azaka.Interface(type=ctx.vn, flags=(azaka.Flags.BASIC,))
interface.set_condition(lambda VN: VN.SEARCH % "fate")
paginator: azaka.Paginator = azaka.Paginator(client, interface)
async for page in paginator:
if (
paginator.current_page_num < 3
): # Not necessary, i did it to avoid getting throttled.
print(page)
else:
break
client.start()
|
import sqlite3
from contextlib import closing
dados = [
("João","111-111"),
("Edinara","222-222"),
("Jonas","333-333"),
("Raquel","444-444")
]
with sqlite3.connect("agenda2.db") as conexao:
with closing(conexao.cursor()) as cursor:
cursor.execute("select * from agenda2 where nome = 'Jonas' ")
while True:
resultado = cursor.fetchone()
if resultado is None:
break
print(f"Nome: {resultado[0]}\nTelefone: {resultado[1]}") |
import discord
from discord.ext import commands
import os, sys, inspect
import asyncio
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
import botlib
import wowapi
DEVMODE = os.getenv("DEVMODE") == "TRUE" # Boolean flag for devmode
class Database(commands.Cog):
def __init__(self, client):
self.client = client
## On_Ready event for cog
@commands.Cog.listener()
async def on_ready(self):
print("Database is initialized.")
### db_members
@commands.command()
@commands.is_owner()
async def db_members(self, ctx):
membersList = wowapi.getAllTableRows("members")
msg = ""
for member in membersList:
print(member)
for x in range(len(member)):
msg += f"{member[x]} "
msg += "\n"
await ctx.send(msg)
### get_table_structure
@commands.command()
@commands.is_owner()
async def get_table_structure(self, ctx, table):
retList = wowapi.getTableStructure(table)
msg = ""
for item in retList:
for x in range(len(item)):
msg += f"{item[x]} "
msg += "\n"
await ctx.send(msg)
### get_table_contents
@commands.command()
@commands.is_owner()
async def get_table_contents(self, ctx, table):
retList = wowapi.getTableContents(table)
msgList = []
msg = "\n"
for item in retList:
curLen = len(msg)
newMsg = "\n"
for x in range(len(item)):
newMsg += f"{item[x]} "
if curLen + len(newMsg) < 2000:
msg += newMsg
else:
msgList.append(msg)
msg = newMsg
msgList.append(msg)
for message in msgList:
await ctx.send(message)
@commands.command(hidden=True)
@commands.is_owner()
async def recreate_members_table(self, ctx):
wowapi.initMembersTable()
# wowapi.updateAllMemberData()
await ctx.send("Members table is created and set to initial values.")
@commands.command()
@commands.is_owner()
async def recreate_full_database(self, ctx):
wowapi.initConfigTable()
wowapi.initMembersTable()
wowapi.initRaidmatsTable()
wowapi.initDTCacheTable()
# wowapi.updateAllMemberData()
await ctx.send("Fresh database initialized.")
@commands.command()
@commands.is_owner()
async def recreate_mythicplus_database(self, ctx):
wowapi.initMythicPlusTable()
await ctx.send("Mythic Plus database initialized.")
@commands.command()
@commands.is_owner()
async def recreate_raidmats_table(self, ctx):
wowapi.initRaidmatsTable()
# wowapi.updateAllMemberData()
await ctx.send("Raidmats table is created and set to initial values.")
@commands.command()
@commands.is_owner()
async def recreate_config_table(self, ctx):
wowapi.initConfigTable()
# wowapi.updateAllMemberData()
await ctx.send("Config table is created and set to initial values.")
## Initialize cog
def setup(client):
client.add_cog(Database(client))
|
#!/usr/bin/python3
class Person:
name = ''
age = 0
__weight = 0
def __init__(self, n, a, w):
self.name = n
self.age = a
self.__weight = w
def speak(self):
print('%s 说:我今年 %d 岁。' %(self.name, self.age))
class Student(Person):
grade = ''
def __init__(self, n, a, w, g):
Person.__init__(self, n,a,w)
self.grade = g
def speak(self):
print("%s 说: 我 %d 岁了,我在读 %d 年级" % (self.name, self.age, self.grade))
class Speaker():
topic = ''
name = ''
def __init__(self, n, t):
self.name = n
self.topic = t
def speak(self):
print("我叫 %s,我是一个演说家,我演讲的主题是 %s" % (self.name, self.topic))
class Sample(Speaker, Student):
a = ''
def __init__(self, n,a,w,g,t):
Student.__init__(self,n,a,w,g)
Speaker.__init__(self,n,t)
p = Person('runoob', 10, 30)
p.speak()
s = Student('ken',10,60,3)
s.speak()
sam = Sample('zw', 22, 120, 6, 'tw')
sam.speak()
|
# -*- coding: utf-8 -*-
from flask import Flask, request, abort, flash, redirect, render_template, url_for
from notes import factory as note_factory
## ######################################################################
## App configuration
##
# instantiate the application
app = Flask(__name__)
app.config.update(dict(
DATABASE='',
SECRET_KEY='dev-key-no-security-needed',
DEBUG=True,
))
app.config.from_envvar('WEBDROP_SETTINGS', silent=True)
db = None ## TODO - SQLAlchemy(app)
note_dom = note_factory(app.config, db)
## ######################################################################
## App Views
##
@app.route('/', methods=['GET'])
def index():
return render_template('home.j2')
@app.route('/', methods=['POST'])
def post():
print('TRACE - POST[ new-note ]')
k = request.form['key-text']
v = request.form['note-text']
note_dom.add(k, v)
flash("added: '{}'".format(k))
return redirect(url_for('index'))
@app.route('/list', methods=['GET'])
def list():
note_list = note_dom.list()
return render_template('list.j2', notes=note_list)
@app.route('/resetdb', methods=['GET'])
def resetdb():
note_dom.reset()
flash('DB Reset')
return redirect(url_for('index'))
@app.route('/<string:note_id>', methods=['GET'])
def get_note(note_id):
n = note_dom.get(note_id)
if n is None:
abort(404)
return render_template('list.j2', notes=[n])
## Local Variables:
## mode: python
## End:
|
from typing import List
class Solution:
def group_anagrams(self, strs: List[str]) -> List[List[str]]:
groupings = {}
for word in strs:
key = self.convert(word)
if key in groupings:
groupings[key].append(word)
else:
groupings[key] = [word]
return [*groupings.values()]
@staticmethod
def convert(word):
alphabet = [0] * 26
for letter in word:
alphabet[ord(letter) - ord('a')] += 1
return tuple(alphabet)
values = ["eat", "tea", "tan", "ate", "nat", "bat"]
solution = Solution()
print(solution.group_anagrams(values))
|
import unittest
from util import *
ROUND_TRIP_CASES = [
# RFC 4648
('f', 'Zg=='),
('fo', 'Zm8='),
('foo', 'Zm9v'),
('foob', 'Zm9vYg=='),
('fooba', 'Zm9vYmE='),
('foobar', 'Zm9vYmFy'),
# Cases from https://commons.apache.org/proper/commons-codec/xref-test/org/apache/commons/codec/binary/Base64Test.html
('Hello World', 'SGVsbG8gV29ybGQ='),
('A', 'QQ=='),
('AA', 'QUE='),
('AAA', 'QUFB'),
('The quick brown fox jumped over the lazy dogs.',
'VGhlIHF1aWNrIGJyb3duIGZveCBqdW1wZWQgb3ZlciB0aGUgbGF6eSBkb2dzLg=='),
('It was the best of times, it was the worst of times.',
'SXQgd2FzIHRoZSBiZXN0IG9mIHRpbWVzLCBpdCB3YXMgdGhlIHdvcnN0IG9mIHRpbWVzLg=='),
('http://jakarta.apache.org/commmons', 'aHR0cDovL2pha2FydGEuYXBhY2hlLm9yZy9jb21tbW9ucw=='),
('AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz',
'QWFCYkNjRGRFZUZmR2dIaElpSmpLa0xsTW1Obk9vUHBRcVJyU3NUdFV1VnZXd1h4WXlaeg=='),
('xyzzy!', 'eHl6enkh'),
]
class Base64Tests(unittest.TestCase):
def test_vectors(self):
"""Tests for encoding and decoding a base 64 string"""
buf, buf_len = make_cbuffer('00' * 1024)
for str_in, b64_in in ROUND_TRIP_CASES:
ret, max_len = wally_base64_get_maximum_length(b64_in, 0)
self.assertEqual(ret, WALLY_OK)
self.assertTrue(max_len >= len(str_in))
ret, b64_out = wally_base64_from_bytes(utf8(str_in), len(str_in), 0)
self.assertEqual((ret, b64_out), (WALLY_OK, b64_in))
ret, written = wally_base64_to_bytes(utf8(b64_in), 0, buf, max_len)
self.assertEqual((ret, buf[:written]), (WALLY_OK, utf8(str_in)))
def test_get_maximum_length(self):
# Invalid args
valid_b64 = utf8(ROUND_TRIP_CASES[0][1])
for args in [(None, 0), # Null base64 string
(bytes(), 0), # Zero-length base64 string
(valid_b64, 1), # Invalid flags
]:
ret, max_len = wally_base64_get_maximum_length(*args)
self.assertEqual((ret, max_len), (WALLY_EINVAL, 0))
def test_base64_from_bytes(self):
# Invalid args
valid_str = utf8(ROUND_TRIP_CASES[0][0])
valid_str_len = len(valid_str)
for args in [
(None, valid_str_len, 0), # Null input bytes
(valid_str, 0, 0), # Zero-length input bytes
(valid_str, valid_str_len, 1), # Invalid flags
]:
ret, b64_out = wally_base64_from_bytes(*args)
self.assertEqual((ret, b64_out), (WALLY_EINVAL, None))
def test_base64_to_bytes(self):
# Invalid args
buf, buf_len = make_cbuffer('00' * 1024)
valid_b64 = utf8(ROUND_TRIP_CASES[0][1])
_, max_len = wally_base64_get_maximum_length(valid_b64, 0)
for args in [
(None, 0, buf, max_len), # Null base64 string
(valid_b64, 1, buf, max_len), # Invalid flags
(valid_b64, 0, None, max_len), # Null output buffer
(valid_b64, 0, buf, 0), # Zero output length
]:
ret, written = wally_base64_to_bytes(*args)
self.assertEqual((ret, written), (WALLY_EINVAL, 0))
# Too short output length returns the number of bytes needed
ret, written = wally_base64_to_bytes(valid_b64, 0, buf, max_len-1)
self.assertEqual((ret, written), (WALLY_OK, max_len))
if __name__ == '__main__':
unittest.main()
|
from WMCore.WebTools.RESTModel import RESTModel, restexpose
from cherrypy import HTTPError
DUMMY_ROLE = "dummy"
DUMMY_GROUP = "dummies"
DUMMY_SITE = "dummyHome"
class DummyDAO1:
"""
A DAO that has no arguments and does nothing but return 123
"""
def execute(self):
return 123
class DummyDAO2:
"""
A DAO that takes a single argument
"""
def execute(self, num):
return {'num': num}
class DummyDAO3:
"""
A DAO with keyword arguments
TODO: use this
"""
def execute(self, num, thing=None):
return {'num': num, 'thing': thing}
class DummyDAOFac:
"""
Something that replicates a Factory that loads our dummy DAO classes
"""
def __call__(self, classname='DummyDAO1'):
dao = None
if classname == 'DummyDAO1':
dao = DummyDAO1()
elif classname == 'DummyDAO2':
dao = DummyDAO2()
elif classname == 'DummyDAO3':
dao = DummyDAO3()
return dao
class DummyRESTModel(RESTModel):
def __init__(self, config):
'''
Initialise the RESTModel and add some methods to it.
'''
RESTModel.__init__(self, config)
self.defaultExpires = config.default_expires
self.methods = {'GET':{
'ping': {'default_data':1234,
'call':self.ping,
'version': 1,
'args': [],
'expires': 3600,
'validation': []},
#'list1': {'call':self.list1,
# 'version': 1}
},
'POST':{
'echo': {'call':self.echo,
'version': 1,
'args': ['message'],
'validation': []},
}
}
self._addMethod('GET', 'gen', self.gen)
self._addMethod('GET', 'list', self.list, args=['input_int', 'input_str'],
validation=[self.val_0,
self.val_1,
self.val_2,
self.val_3,
self.val_4 ],
version=2)
self._addMethod('GET', 'list1', self.list1)
self._addMethod('GET', 'list2', self.list2, args=['num0', 'num1', 'num2'])
self._addMethod('GET', 'list3', self.list3, args=['a', 'b'])
self._addMethod('POST', 'list3', self.list3, args=['a', 'b'])
self._addMethod('PUT', 'list1', self.list1, secured = True,
security_params = {'role':DUMMY_ROLE,
'group': DUMMY_GROUP,
'site':DUMMY_SITE})
# a will take list of numbers. i.e. a[1,2,3]
self._addMethod('GET', 'listTypeArgs', self.listTypeArgs, args=['aList'],
validation = [self.listTypeValidate])
self.daofactory = DummyDAOFac()
self._addDAO('GET', 'data1', 'DummyDAO1', [])
self._addDAO('GET', 'data2', 'DummyDAO2', ['num'])
self._addDAO('GET', 'data3', 'DummyDAO3', ['num', 'thing'])
@restexpose
def ping(self):
"""
Return a simple message
"""
return 'ping'
@restexpose
def echo(self, *args, **kwargs):
"""
Echo back the arguments sent to the call. If sanitise needed to be called
explicitly (e.g. method not added via _addMethod) method signature of callee
should be (*args, **kwargs).
"""
input_data = self._sanitise_input(args, kwargs, 'echo')
return input_data
def gen(self):
"""Generator method which produce generator dicts"""
data = ({'idx':i} for i in range(10))
return data
def list(self, input_int, input_str):
return {'input_int':input_int, 'input_str':input_str}
def list1(self):
""" test no argument case, return 'No argument' string """
return 'No argument'
def list2(self, num0, num1, num2):
""" test multiple argment string return the dictionary of key: value pair of
the arguments """
return {'num0': num0, 'num1': num1, 'num2': num2}
def list3(self, *args, **kwargs):
""" test sanitise without any validation specified """
return kwargs
def listTypeArgs(self, aList):
""" test whether it handles ?aList=1&aList=2 types of query """
return aList
def listTypeValidate(self, request_input):
if not isinstance(request_input["aList"], list):
request_input["aList"] = [int(request_input["aList"])]
else:
request_input["aList"] = map(int, request_input["aList"])
return request_input
def val_0(self, request_input):
# checks whether request_input is right number
if len(request_input) != 2:
raise HTTPError(400, 'val_0 failed: request_input length is not 2 -- (%s)' % len(request_input))
return request_input
def val_1(self, request_input):
# Convert the request_input data to an int (will be a string), ignore if it
# fails as the next validation will kill that, and it makes the unit test
# trickier...
try:
request_input['input_int'] = int(request_input['input_int'])
except:
pass
# Checks its first request_input contains a int
try:
assert isinstance(request_input['input_int'], type(123))
except AssertionError:
raise AssertionError('val_1 failed: %s not int' % type(request_input['input_int']))
return request_input
def val_2(self, request_input):
# Checks its second request_input is a string
try:
assert isinstance(request_input['input_str'], basestring)
except AssertionError:
raise HTTPError(400, 'val_2 failed: %s not string or unicode' % type(request_input['input_str']))
return request_input
def val_3(self, request_input):
# Checks the int is 123
try:
assert request_input['input_int'] == 123
except AssertionError:
raise HTTPError(400, 'val_3 failed: %s != 123' % request_input['input_int'])
return request_input
def val_4(self, request_input):
# Checks the str is 'abc'
try:
assert request_input['input_str'] == 'abc'
except AssertionError:
raise HTTPError(400, 'val_4 failed: %s != "abc"' % request_input['input_str'])
return request_input
|
import django
import os
from parse_data.pars import parser
from movies.models import Movie
parser() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
train_x=np.load('./twitter/idx_q.npy', mmap_mode='r')
train_y=np.load('./twitter/idx_a.npy', mmap_mode='r')
print(train_x.shape)
print(train_x[0])
batch_size=128
sequence_length=4
num_encoder_symbols=10
num_decoder_symbols=10
embedding_size=16
learning_rate=0.01
MAX_GRAD_NORM=5
hidden_size=16
encoder_inputs=tf.placeholder(dtype=tf.int32,shape=[batch_size,sequence_length])
decoder_inputs=tf.placeholder(dtype=tf.int32,shape=[batch_size,sequence_length])
logits=tf.placeholder(dtype=tf.float32,shape=[batch_size,sequence_length,num_decoder_symbols])
targets=tf.placeholder(dtype=tf.int32,shape=[batch_size,sequence_length])
weights=tf.placeholder(dtype=tf.float32,shape=[batch_size,sequence_length])
train_decoder_inputs=np.zeros(shape=[batch_size,sequence_length],dtype=np.int32)
train_weights=np.ones(shape=[batch_size,sequence_length],dtype=np.float32)
cell=tf.nn.rnn_cell.BasicLSTMCell(hidden_size)
def seq2seq(encoder_inputs,decoder_inputs,cell,num_encoder_symbols,num_decoder_symbols,embedding_size):
encoder_inputs = tf.unstack(encoder_inputs, axis=1)
decoder_inputs = tf.unstack(decoder_inputs, axis=1)
results,states=tf.contrib.legacy_seq2seq.embedding_rnn_seq2seq(
encoder_inputs,
decoder_inputs,
cell,
num_encoder_symbols,
num_decoder_symbols,
embedding_size,
output_projection=None,
feed_previous=False,
dtype=None,
scope=None
)
return results
def get_loss(logits,targets,weights):
loss=tf.contrib.seq2seq.sequence_loss(
logits,
targets=targets,
weights=weights
)
return loss
results=seq2seq(encoder_inputs,decoder_inputs,cell,num_encoder_symbols,num_decoder_symbols,embedding_size)
logits=tf.stack(results,axis=0)
print(logits)
loss=get_loss(logits,targets,weights)
trainable_variables=tf.trainable_variables()
grads,_=tf.clip_by_global_norm(tf.gradients(loss,trainable_variables),MAX_GRAD_NORM)
optimizer=tf.train.GradientDescentOptimizer(learning_rate)
train_op = optimizer.apply_gradients(zip(grads,trainable_variables))
with tf.Session() as sess:
count=0
while(count<100):
print("count:", count)
for step in range(0,100):
#print("step:",step)
sess.run(tf.global_variables_initializer())
train_encoder_inputs=train_x[step*batch_size:step*batch_size+batch_size,:]
train_targets=train_y[step*batch_size:step*batch_size+batch_size,:]
#results_value=sess.run(results,feed_dict={encoder_inputs:train_encoder_inputs,decoder_inputs:train_decoder_inputs})
op = sess.run(train_op, feed_dict={encoder_inputs: train_encoder_inputs, targets: train_targets,
weights: train_weights, decoder_inputs: train_decoder_inputs})
step=step+1
if(step%10==0):
cost = sess.run(loss, feed_dict={encoder_inputs: train_encoder_inputs, targets: train_targets,
weights: train_weights, decoder_inputs: train_decoder_inputs})
print(cost)
count=count+1 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
### RATING SCRIPT
###
### PURPOSE
### The purpose of the rating script is to rate the different stocks found on the norwegian market based upon their
### historical value and dividends.
###
### PS! DO NOT use the results from the rating script as the decider for whether or not to buy a specific stock. Make
### sure to do enough research about the companies you are thinking of buying stocks for. This program is just a
### tool to give rating evaluation of their historical performance and does not predict the future.
###
### @Author: Fredrik Bakken
### Email: fredrik.bakken(at)gmail.com
### Website: https://www.fredrikbakken.no/
### Github: https://github.com/FredrikBakken
###
### Last update: 17.10.2017
'''
import sys
import datetime
from prettytable import PrettyTable
from db import db_get_dividends, db_get_stock_value, db_get_stock_value_year, db_number_of_stocks, db_id_stocks
def calculate_profit(ticker, dividend, start, end):
if not start == 0:
profit = (end + dividend) / start
dividend_percent_start = dividend / start
else:
profit = 0
dividend_percent_start = 0
if not end == 0:
dividend_percent_end = dividend / end
else:
dividend_percent_end = 0
return [ticker, profit, start, end, dividend, dividend_percent_start, dividend_percent_end]
def sort_on_date(data):
date_list = []
# Loop through database data and append date to list
for x in range(len(data)):
date_list.append(data[x]['d'])
# Sort the date_list from first date to last date
sorted_list = sorted(date_list, key=lambda x: datetime.datetime.strptime(x, '%Y%m%d'))
return sorted_list
def rating(arg):
number_of_arguments = 0
int_arg = 0
if not arg == 'all':
number_of_arguments = (len(arg) - 1)
profit_list = []
try:
int_arg = int(arg[1]) - 1
bool_year = True
except:
bool_year = False
# If rate select is 'all' historical stocks
if ((number_of_arguments == 1 and arg[1] == 'all') or (arg == 'all')):
print("\nRate stocks for all historical values...")
# Get the number of stocks in the database
number_of_stocks = db_number_of_stocks()
# Loop through the database and get the tickers
for x in range(number_of_stocks):
# Variables
total_dividend = 0
# Find ticker
stock_id = (x + 1)
ticker = db_id_stocks(stock_id)
# Find total dividend data
dividend_data = db_get_dividends(ticker)
dividend_date_list = sort_on_date(dividend_data)
if len(dividend_date_list) > 0:
from_date_dividend = dividend_date_list[0]
to_date_dividend = dividend_date_list[(len(dividend_date_list) - 1)]
if ((float(from_date_dividend) > 0) and (float(to_date_dividend) > 0)):
for x in range(len(dividend_data)):
current_date = dividend_data[x]['d']
if from_date_dividend <= current_date <= to_date_dividend:
total_dividend = total_dividend + float(dividend_data[x]['di'])
# Find start and end stock value
stock_data = db_get_stock_value(ticker)
stock_date_list = sort_on_date(stock_data)
# Resetting stock values
start_stock_value = 0
end_stock_value = 0
if len(stock_date_list) > 0:
from_date_stock = stock_date_list[0]
to_date_stock = stock_date_list[-1]
if ((float(from_date_stock) > 0) and (float(to_date_stock) > 0)):
val_f = False
val_t = False
for x in range(len(stock_data)):
if stock_data[x]['d'] == from_date_stock:
start_stock_value = float(stock_data[x]['c'])
val_f = True
elif stock_data[x]['d'] == to_date_stock:
end_stock_value = float(stock_data[x]['c'])
val_t = True
if from_date_stock == to_date_stock:
end_stock_value = start_stock_value
if val_f and val_t:
break
else:
start_stock_value = 0
end_stock_value = 0
profit = calculate_profit(ticker, total_dividend, start_stock_value, end_stock_value)
profit_list.append(profit)
# If rate select is specific year
elif number_of_arguments == 1 and bool_year:
print("\nRate stocks for year " + arg[1] + '...')
# Get the number of stocks in the database
number_of_stocks = db_number_of_stocks()
# Loop through the database and get the tickers
for x in range(number_of_stocks):
# Variables
total_dividend = 0
# Find ticker
stock_id = (x + 1)
ticker = db_id_stocks(stock_id)
# Find total dividend data
dividend_data = db_get_dividends(ticker)
dividend_date_list = sort_on_date(dividend_data)
for x in range(len(dividend_date_list)):
if dividend_data[x]['d'].startswith(arg[1]):
total_dividend = total_dividend + float(dividend_data[x]['di'])
# Find start and end stock value
stock_data = db_get_stock_value_year(ticker, arg[1])
stock_date_list = sort_on_date(stock_data)
# Resetting stock values
start_stock_value = 0
end_stock_value = 0
if len(stock_date_list) > 0:
from_date_stock = stock_date_list[0]
to_date_stock = stock_date_list[-1]
if ((float(from_date_stock) > 0) and (float(to_date_stock) > 0)):
val_f = False
val_t = False
for x in range(len(stock_data)):
if stock_data[x]['d'] == from_date_stock:
start_stock_value = float(stock_data[x]['c'])
val_f = True
elif stock_data[x]['d'] == to_date_stock:
end_stock_value = float(stock_data[x]['c'])
val_t = True
if from_date_stock == to_date_stock:
end_stock_value = start_stock_value
if val_f and val_t:
break
else:
start_stock_value = 0
end_stock_value = 0
profit = calculate_profit(ticker, total_dividend, start_stock_value, end_stock_value)
profit_list.append(profit)
# If rate selected is from/to specific years
elif number_of_arguments == 2:
print("\nRating stocks from " + arg[1] + " to " + arg[2] + '...')
# Add relevant years to a list
year_from = int(arg[1])
year_to = int(arg[2])
difference = (year_to - year_from)
years = []
for x in range(difference + 1):
years.append(year_from + x)
# Get the number of stocks in the database
number_of_stocks = db_number_of_stocks()
# Loop through the database and get the tickers
for x in range(number_of_stocks):
# Variables
total_dividend = 0
# Find ticker
stock_id = (x + 1)
ticker = db_id_stocks(stock_id)
# Find total dividend data
dividend_data = db_get_dividends(ticker)
for x in range(len(years)):
for y in range(len(dividend_data)):
if dividend_data[y]['d'].startswith(str(years[x])):
total_dividend = total_dividend + float(dividend_data[y]['di'])
# Find start and end stock value
start_stock_data = db_get_stock_value_year(ticker, arg[1])
start_stock_date_list = sort_on_date(start_stock_data)
end_stock_data = db_get_stock_value_year(ticker, arg[2])
end_stock_date_list = sort_on_date(end_stock_data)
# Resetting stock values
start_stock_value = 0
end_stock_value = 0
if len(start_stock_data) > 0 and len(end_stock_data) > 0:
from_date_stock = start_stock_date_list[0]
to_date_stock = end_stock_date_list[-1]
if ((float(from_date_stock) > 0) and (float(to_date_stock) > 0)):
for x in range(len(start_stock_data)):
if start_stock_data[x]['d'] == from_date_stock:
start_stock_value = float(start_stock_data[x]['c'])
break
for x in range(len(end_stock_data)):
if end_stock_data[x]['d'] == to_date_stock:
end_stock_value = float(end_stock_data[x]['c'])
break
if from_date_stock == to_date_stock:
end_stock_value = start_stock_value
else:
start_stock_value = 0
end_stock_value = 0
profit = calculate_profit(ticker, total_dividend, start_stock_value, end_stock_value)
profit_list.append(profit)
# Else: Invalid arguments
else:
print("That is an invalid argument, please use one of the following arguments:\n"
"'all', 'year', 'from_year to_year'")
# Rating parameters
# TOTAL = x[1]
# DIVIDEND NOW = x[6]
sorted_list = sorted(profit_list, key=lambda x: x[1], reverse=True)
t = PrettyTable(['Loss / Profit', 'Ticker', 'Total (%)', 'From stock value', 'To stock value', 'Total dividend (After split)', 'Dividend to start', 'Dividend to end'])
for x in range(len(sorted_list)):
if sorted_list[x][1] > 1:
loss_profit = 'PROFIT'
elif sorted_list[x][1] < 1:
loss_profit = 'LOSS'
else:
loss_profit = 'NO CHANGE'
if sorted_list[x][4] == 0:
dividend = '-'
else:
dividend = '{0:.2f}'.format(sorted_list[x][4]) + ' kr'
if not (sorted_list[x][1] == 0 and sorted_list[x][2] == 0 and sorted_list[x][4] == 0):
t.add_row([loss_profit, sorted_list[x][0], '{:.3%}'.format(sorted_list[x][1]),
'{0:.2f}'.format(sorted_list[x][2]), '{0:.2f}'.format(sorted_list[x][3]), dividend,
'{:.3%}'.format(sorted_list[x][5]), '{:.3%}'.format(sorted_list[x][6])])
print(t)
developer = '\nSoftware has been developed by Fredrik Bakken.\n ' \
'\nEmail: fredrik.bakken(at)gmail.com' \
'\nWebsite: https://www.fredrikbakken.no/' \
'\nGithub: https://www.github.com/FredrikBakken\n' \
'\nThank you for trying out this free stock rating software.'
print(developer)
if ((number_of_arguments == 1 and arg[1] == 'all') or (arg == 'all')):
today = datetime.datetime.today().strftime('%d.%m.%Y')
table_txt = t.get_string()
with open('results/profit_results.txt', 'w') as file:
file.write('DATA IS FROM ' + today + '.\n')
file.write(developer + '\n')
file.write(table_txt)
if __name__ == "__main__":
rating(sys.argv)
|
'''
Created on Aug 12, 2016
@author: Andy Zhang
'''
import turtle
def draw_square():
window = turtle.Screen()
window.bgcolor("red")
brad = turtle.Turtle()
brad.forward(100)
draw_square() |
from schematics.models import Model
from schematics.types import URLType, BooleanType, StringType
from .fields import XmsTextField
class XML(Model):
"""
A metadata object that allows for more fine-tuned XML model definitions.
When using arrays, XML element names are not inferred (for singular/plural forms) and the name property should be used to add that information. See examples for expected behavior.
"""
name = StringType() # Replaces the name of the element/attribute used for the described schema property. When defined within the Items Object (items), it will affect the name of the individual XML elements within the list. When defined alongside type being array (outside the items), it will affect the wrapping element and only if wrapped is true. If wrapped is false, it will be ignored.
namespace = URLType() # The URL of the namespace definition.
prefix = StringType() # The prefix to be used for the name.
attribute = BooleanType(default=False) # Declares whether the property definition translates to an attribute instead of an element. Default value is false.
wrapped = BooleanType(default=False) # MAY be used only for an array definition. Signifies whether the array is wrapped (for example, <books><book/><book/></books>) or unwrapped (<book/><book/>). Default value is false. The definition takes effect only when defined alongside type being array (outside the items).
x_ms_text = XmsTextField()
|
from django.db import models
from django.contrib.auth.models import User, Group
from onadata.apps.api.models.project import Project
class Team(Group):
"""
TODO: documentation
TODO: Whenever a member is removed from members team,
we should remove them from all teams and projects
within the organization.
"""
class Meta:
app_label = 'api'
OWNER_TEAM_NAME = "Owners"
organization = models.ForeignKey(User)
projects = models.ManyToManyField(Project)
def __unicode__(self):
# return a clear group name without username to user for viewing
return self.name.split('#')[1]
@property
def team_name(self):
return self.__unicode__()
def save(self, *args, **kwargs):
# allow use of same name in different organizations/users
# concat with #
if not self.name.startswith('#'.join([self.organization.username])):
self.name = u'%s#%s' % (self.organization.username, self.name)
super(Team, self).save(*args, **kwargs)
|
import os
import subprocess
import sys
from colorama import init, Fore, Back, Style
import time
init()
def console_picture():
print(Style.BRIGHT + Fore.YELLOW)
print("Картинки не будет(")
time.sleep(0.1)
console_picture()
while True:
choose = input("Нажми 'BOT', 'BALANCE', 'CLIENT', 'DB', 'VIVOD' и Enter чтобы запустить...\n")
choose = choose.lower()
if choose == 'bot':
bot = subprocess.Popen([sys.executable, "bot.py"])
bot.wait()
continue
elif choose == 'balance':
balance = subprocess.Popen([sys.executable, "balance.py"])
balance.wait()
continue
elif choose == 'client':
client = subprocess.Popen([sys.executable, "create_client.py"])
client.wait()
continue
elif choose == 'db':
db = subprocess.Popen([sys.executable, "create_db.py"])
db.wait()
continue
elif choose == 'vivod':
Vivod = subprocess.Popen([sys.executable, "vivod.py"])
Vivod.wait()
continue
else:
print("'BOT', 'BALANCE', 'CLIENT', 'DB', 'VIVOD'")
|
#! /usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
def get_xy(fname):
poss_str=[]
x=[]
y=[]
f= open(fname, "r")
text= f.readlines()
for i in range(len(text)):
if i>=18:
line =text[i][:-1].split()
if len(line)==2:
x.append(float(line[0]))
y.append(float(line[1]))
return x, y
def ave(poss):
print(np.array(poss).shape)
poss_ave=[]
poss_ave = np.average(np.array(poss), axis=0).tolist()
return poss_ave
if __name__ == "__main__":
argvs = sys.argv
dir_list=["0_31410", "0_31414", "0_31418"]
for dir in dir_list:
x, y = get_xy(dir+"/protein_gpu/equil_n1/rmsd.xvg")
oname=dir+"_rmsd.png"
plt.figure()
plt.xlabel(r'Time [ns]')
plt.ylabel(r'RMSD [nm]')
plt.plot(x, y)
plt.savefig(oname)
|
import os, sys
import pandas as pd
import numpy as np
from params import get_params
if __name__ == "__main__":
params = get_params()
file = open(os.path.join(params['root'],params['database'],'val','annotation.txt'),'r')
file.readline()
x=file.readlines()
for lines in x:
os.remove(os.path.join(params['root'],params['database'],'train', 'images',lines[0:10]+'.jpg')) |
from bitresource import resource_registry
from bitutils.objects import Currency, Market, Ticker
from dictutils import AttrDict
from http_resource import HttpResource
@resource_registry.register()
class BinanceResource(HttpResource):
name = 'binance'
endpoint_url = 'https://www.binance.com/api'
def results_iter(self, response, **kwargs):
resp_json = response.json()
result_rows = []
if type(resp_json) is dict:
if 'symbols' in resp_json:
result_rows = resp_json.get('symbols')
else:
yield AttrDict(resp_json)
if result_rows:
for result_row in result_rows:
yield AttrDict(result_row)
@classmethod
def get_currencies(cls):
"""
{'icebergAllowed': True,
'baseAssetPrecision': 8,
'symbol': 'ETHBTC',
'baseAsset': 'ETH',
'quoteAsset': 'BTC',
'status': 'TRADING',
'quotePrecision': 8}
"""
results = []
for currency in CurrencyResource.data():
currency = Currency(code=currency.quoteAsset, decimals=currency.quotePrecision)
results.append(currency)
return results
@classmethod
def get_markets(cls):
"""
{'icebergAllowed': True,
'baseAssetPrecision': 8,
'symbol': 'ETHBTC',
'baseAsset': 'ETH',
'quoteAsset': 'BTC',
'status': 'TRADING',
'quotePrecision': 8}
"""
results = []
for market in MarketResource.data():
market_name = '%s-%s' % (market.quoteAsset, market.baseAsset)
market = Market(code=market_name.replace('-', ''), name=market_name, base=market.quoteAsset,
quote=market.baseAsset)
results.append(market)
return results
@classmethod
def ticker(cls, market, exchange):
symbol = ''.join([market.name.split('-')[1], market.name.split('-')[0]])
data = TickerResource.data(symbol=symbol).first()
return Ticker(last=data.price, market=market.code, exchange=exchange)
CurrencyResource = BinanceResource('v1', 'exchangeInfo')
MarketResource = BinanceResource('v1', 'exchangeInfo')
TickerResource = BinanceResource('v3', 'ticker/price')
|
from matplotlib.pyplot import ginput
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.nxutils as nxutils
import numpy as np
import os
import pdb
import scipy.interpolate
import Galaxies
from TrilegalUtils import get_stage_label
from scatter_contour import scatter_contour
def poly_fmt(polygon_str):
polygon_str = polygon_str.replace(')', '').replace('(', '').strip()
polygon_list = map(float, polygon_str.split(','))
return np.column_stack((polygon_list[0::2], polygon_list[1::2]))
class CMDregion(object):
'''
class for messing with the region files that accompany the
tagged data. These files are made in define_color_mag_region
'''
def __init__(self, region_file):
self.__initialised = True
self.base, self.name = os.path.split(region_file)
CMDregion._load_data(self, region_file)
self.region_names = self.regions.keys()
def _load_data(self, region_file):
self.regions = {}
self.region_names = []
with open(region_file) as f:
for i, line in enumerate(f):
if i == 0:
col_keys = line[i].strip().replace('#', '').split()
else:
pid, reg, filt1, filt2 = line.split('polygon')[0].split()
reg = reg.lower()
polygon = line.split('polygon')[1].strip()
if not reg in self.regions.keys():
self.regions[reg] = {}
self.regions[reg] = poly_fmt(polygon)
self.filter1 = filt1
self.filter2 = filt2
lixo, self.survey, __, camera, pidtarget = pid.split('_')
self.target = '-'.join(pidtarget.split('-')[1:])
self.photsys = camera.replace('-', '_')
def shift_regions(self, shift_verts, regs, dcol=0., dmag=0.):
'''
shift part of a region (defined by N,2 array of vertices: shift_verts)
adds attribute regions['%s_dc%.2f_dm%.2f'%(reg,dcol,dmag)] N,2 array
'''
for reg in regs:
reg = reg.lower()
polygon = self.regions[reg]
new_verts = polygon.copy()
# region to shift
mask = nxutils.points_inside_poly(polygon, shift_verts)
inds_to_shift, = np.nonzero(mask)
# shift
color = polygon[:, 0][inds_to_shift] + dcol
mag = polygon[:, 1][inds_to_shift] + dmag
shifted = np.column_stack((color, mag))
# new region
new_verts[inds_to_shift] = shifted
self.regions['%s_dc%.2f_dm%.2f' % (reg, dcol, dmag)] = new_verts
def join_regions(self, *regs):
'''
join two polygons into one, call by self.regions[reg]
'''
comb_region = np.concatenate([self.regions[r.lower()] for r in regs])
new_verts = get_cmd_shape(comb_region[:, 0], comb_region[:, 1])
self.regions['_'.join(regs).lower()] = new_verts
def average_color(self, *regs):
'''
ave color of a region, is this stupid?
I fill a box of npts randomly and cut out those not in the region.
Then I take the average in bins of mag.min,mag.max dm=binsize...
probably.
new attribute(s):
self.regions['%s_mean'%reg]: (N,2) array of color, mag.
'''
for reg in regs:
reg = reg.lower()
polygon = self.regions[reg]
color = polygon[:, 0]
mag = polygon[:, 1]
# get a ton of random points inside the polygon
x = random_array(color)
y = random_array(mag)
xy = np.column_stack((x, y))
inds = nxutils.points_inside_poly(xy, polygon).nonzero()
rand_mag = y[inds]
rand_col = x[inds]
# take the average of the points in each magbin
magbins = np.linspace(mag.min(), mag.max(), 5)
dinds = np.digitize(rand_mag, magbins)
mean_colors = [np.mean(rand_col[dinds == i])
for i in range(magbins.size)]
# get rid of nans
shit, = np.isnan(mean_colors).nonzero()
mean_colors = np.delete(np.asarray(mean_colors), shit)
magbins = np.delete(magbins, shit)
self.regions['%s_mean' % reg] = np.column_stack((mean_colors,
magbins))
return
def split_regions(self, *regs, **kwargs):
'''
splits a joined region on the average color - or by a spliced arr.
adds attribues self.regions[name1] and self.regions[name2]: N,2 arrays.
'''
name1 = kwargs.get('name1', ' rheb_bym')
name2 = kwargs.get('name2', 'bheb_bym')
on_mean = kwargs.get('on_mean', True)
on_mag = kwargs.get('on_mag', True)
for reg in regs:
reg = reg.lower()
if on_mean:
# make sure we have average colors.
if not '%s_mean' % reg in self.regions.keys():
CMDregion.average_color(self, reg)
mean_split = self.regions['%s_mean' % reg]
else:
mean_split = kwargs.get('splice_arr')
polygon = self.regions[reg]
polygon = uniquify_reg(polygon)
# insert extreme points and sort them
i = 0
if on_mag:
i = 1
maxpoint = mean_split[np.argmax(mean_split[:, i])]
minpoint = mean_split[np.argmin(mean_split[:, i])]
polygoni = polar_sort(insert_points(polygon, maxpoint, minpoint))
# split the array by the inserted points
imaxs = np.where(polygoni == maxpoint)[0]
imax, = not_unique(imaxs)
imins = np.where(polygoni == minpoint)[0]
imin, = not_unique(imins)
if on_mag:
if imin < imax:
sideA = polygoni[imin:imax]
sideB = np.vstack((polygoni[imax:], polygoni[:imin]))
if imin > imax:
sideA = polygoni[:imax]
sideB = polygoni[imax:imin]
else:
sideA = np.vstack((polygoni[imin:], polygoni[:imax]))
sideB = polygoni[imax:imin]
# attach the mean values
stitchedA = np.vstack((sideA, mean_split[::-1]))
stitchedB = np.vstack((sideB, mean_split))
self.regions[name1] = stitchedA
self.regions[name2] = stitchedB
return
def add_points(self, *regs, **kwargs):
'''
add points to each line segment, creates new attribute reg_HD.
'''
npts = kwargs.get('npts', 10)
for reg in regs:
polygon = self.regions[reg]
color = np.array([])
mag = np.array([])
polygon = uniquify_reg(polygon)
for (c1, m1), (c2, m2) in zip(polygon,
np.roll(polygon, -1, axis=0)):
# 1d fit
z = np.polyfit((c1, c2), (m1, m2), 1)
p = np.poly1d(z)
# new array of npts
x = np.linspace(c1, c2, npts)
y = p(x)
# plt.plot(x,y,'o')
color = np.append(color, x)
mag = np.append(mag, y)
self.regions['%s_hd' % reg] = np.column_stack((color, mag))
def __setattr__(self, item, value):
if not self.__dict__.has_key('__initialised'): # this test allows attributes to be set in the __init__ method
return dict.__setattr__(self, item, value)
elif self.__dict__.has_key(item): # any normal attributes are handled normally
dict.__setattr__(self, item, value)
self.region_names = self.regions.keys()
else:
self.__setitem__(item, value)
self.region_names = self.regions.keys()
def uniquify_reg(reg):
lixo, inds = np.unique(reg[:, 0], return_index=True)
# unique will sort the array...
inds = np.sort(inds)
return reg[inds]
def random_array(arr, offset=0.1, npts=1e4):
'''
returns a random array of npts within the extremes of arr +/- offset
'''
return np.random.uniform(arr.min() - offset, arr.max() + offset, npts)
def insert_points(arr, *points):
'''
adds rows to array.
'''
arrcopy = np.vstack((arr, points))
return arrcopy
def closest_point(point, verts):
'''
fortran days... smallest radius from point.
'''
return np.argmin(np.sqrt((verts[:, 0] - point[0]) ** 2 +
(verts[:, 1] - point[1]) ** 2))
def not_unique(arr):
'''
returns array of values that are repeated.
'''
arrcopy = arr.copy()
uarr = np.unique(arrcopy)
for u in uarr:
arrcopy = np.delete(arrcopy, list(arrcopy).index(u))
return arrcopy
def get_bounds(color, mag):
'''
find cmd bounding box, don't need it...
'''
min_color = np.min(color)
max_color = np.max(color)
bright_mag = np.min(mag)
faint_mag = np.max(mag)
bl = (min_color, faint_mag)
br = (max_color, faint_mag)
ul = (min_color, bright_mag)
ur = (max_color, bright_mag)
verts = np.vstack((ul, ur, br, bl, ul))
return verts
def polar_sort(l):
'''
polar sorting, you know, in a circle like. I got it from some website.
'''
import math
mean_col = np.mean([l[i][0] for i in range(len(l))])
mean_mag = np.mean([l[i][1] for i in range(len(l))])
slist = sorted(l, key=lambda c: math.atan2(c[0] - mean_col,
c[1] - mean_mag), reverse=True)
return np.array(slist).squeeze()
def get_cmd_shape(color, mag):
'''
gets the outline of a cmd. Guesses at a large polygon, and then add points
that are outside of the polygon, ignores points within.
then polar sorts the result.
returns: N,2 array.
'''
# make a guess at the polygon.
left = (np.min(color), mag[np.argmin(color)])
right = (np.max(color), mag[np.argmax(color)])
up = (color[np.argmin(mag)], np.min(mag))
down = (color[np.argmax(mag)], np.max(mag))
verts = np.vstack((left, right, up, down))
points = np.column_stack((color, mag))
for point in points:
if nxutils.pnpoly(point[0], point[1], verts) == 0.:
# add point to verts
col = verts[:, 0]
m = verts[:, 1]
col = np.append(col, point[0])
m = np.append(m, point[1])
# order the new points in a circle
verts = polar_sort(zip(col, m))
verts = np.append(verts, [verts[0]], axis=0)
# plt.plot(verts[:, 0], verts[:, 1], lw = 2)
return verts
def define_color_mag_region(fitsfiles, region_names, **kwargs):
'''
Define a polygon on a cmd of a fitsfile or a list of fitsfiles.
the tagged fitsfile can then be used in match_light, or all other b/r
modules that make use of MS vs BHeB vs RHeB vs RGB (which isn't picked
out well)
writes to two text files:
1) for each fits file,
fitsfile.dat: with ra dec mag1 mag2 mag1err mag2err stage
stage is set in the code to match trilegal's parametri.h, will be -1
if not assigned
2) kwarg 'region file' [cmd_regions.dat] with
propid_target region filter1 filter2 polygon
also saves annotated cmd
locations:
all in base BRFOLDER/data/cmd_regions
tagged fits file: base/tagged_photometery/[fitsfile].dat
plot: base/plots/[fitsfile].png
region file: base/regions/cmd_regions_[fitsfile].dat
Draw points for each region with mouse click,
ctrl click to delete,
command+click to exit (edit: I think it's actually alt)
'''
cmd_regions_loc = kwargs.get('cmd_regions_loc')
if not cmd_regions_loc:
#from BRparams import *
#cmd_regions_loc = os.path.join(BRFOLDER, 'data', 'cmd_regions')
print 'need cmd_regions_loc!'
return -1
tagged_data_loc = os.path.join(cmd_regions_loc, 'tagged_photometry')
plot_dir = os.path.join(cmd_regions_loc, 'plots')
xlim = kwargs.get('xlim', (-1, 3))
ylim = kwargs.get('ylim')
if type(fitsfiles) == str:
fitsfiles = [fitsfiles]
for j, fitsfile in enumerate(fitsfiles):
fileIO.ensure_file(fitsfile)
datafile = os.path.join(tagged_data_loc,
fileIO.replace_ext(fitsfile, '.dat'))
if os.path.isfile(datafile):
logger.warning('%s exists, skipping' % datafile)
continue
logger.debug('%s of %s fitsfiles' % (j + 1, len(fitsfiles)))
logger.debug('now working on %s' % fitsfile)
# begin outfiles
outname = os.path.split(fitsfile)[1].replace('.fits', '.dat')
outfile = os.path.join(cmd_regions_loc, 'regions',
'cmd_regions_%s' % outname)
out = open(outfile, 'a')
header = '# propid_target region filter1 filter2 polygon\n'
out.write(header)
dout = open(datafile, 'w')
dheader = '# ra dec mag1 mag2 mag1err mag2err stage\n'
dout.write(dheader)
fits = Galaxies.galaxy(fitsfile, fitstable=1)
photsys = fits.photsys
extrastr = '_%s' % photsys.upper()[0]
ra = fits.data['ra']
dec = fits.data['dec']
mag1err = fits.data['mag1_err']
mag2err = fits.data['mag2_err']
mag1 = fits.mag1
mag2 = fits.mag2
filter1 = fits.filter1
filter2 = fits.filter2
errlimit, = np.nonzero(np.sqrt(mag1err ** 2 + mag2err ** 2) < 0.5)
stage = np.zeros(shape=(len(mag1), )) - 1
color = mag1 - mag2
datapts = np.column_stack((color, mag2))
#z,dx,dy = GenUtils.bin_up(color,mag2,dx=0.1,dy=0.1)
fig = plt.figure(figsize=(20, 12))
ax = plt.axes()
ax.set_xlim(-1, 4)
ax.set_ylim(mag2[errlimit].max(), mag2[errlimit].min())
ncolbin = int(np.diff((np.min(color), np.max(color))) / 0.05)
nmagbin = int(np.diff((np.min(mag2), np.max(mag2))) / 0.05)
ax.set_xlabel('$%s-%s%s$' % (filter1, filter2, extrastr), fontsize=20)
ax.set_ylabel('$%s%s$' % (filter2, extrastr), fontsize=20)
ax.annotate('%s' % fits.target, xy=(0.97, 0.1),
xycoords = 'axes fraction', fontsize = 20, ha = 'right')
contour_args = {'cmap': cm.gray_r, 'zorder': 100}
scatter_args = {'marker': '.', 'color': 'black', 'alpha': 0.2,
'edgecolors': None, 'zorder': 1}
plt_pts, cs = scatter_contour(color[errlimit], mag2[errlimit],
threshold=10, levels=20,
hist_bins=[ncolbin, nmagbin],
contour_args=contour_args,
scatter_args=scatter_args,
ax=ax)
ax.plot((np.min(color), np.max(color)), (fits.trgb, fits.trgb),
color='red', lw=2)
for region in region_names:
ax.set_title(region)
plt.draw()
stage_lab = get_stage_label(region)
pts = ginput(0, timeout=-1)
xs, ys = np.transpose(pts)
inds, = np.nonzero(nxutils.points_inside_poly(datapts, pts))
stage[inds] = stage_lab
ax.plot(color[inds], mag2[inds], '.', alpha=0.5, zorder=stage_lab)
out.write('%s %s %s %s polygon(' %
(fits.target, region, filter1, filter2))
outdata = ['%.4f, %.4f' % (x, y) for x, y in zip(xs, ys)]
out.write(', '.join(outdata) + ')\n')
ax.set_title('')
plt.draw()
figname = os.path.split(datafile)[1].replace('.dat', '.png')
plt.savefig(os.path.join(plot_dir, figname))
plt.close()
logger.info('%s wrote %s' % (define_color_mag_region.__name__,
datafile.replace('dat', 'png')))
fmt = '%.8f %.8f %.4f %.4f %.4f %.4f %.0f\n'
[dout.write(fmt % (ra[i], dec[i], mag1[i], mag2[i], mag1err[i],
mag2err[i], stage[i])) for i in range(len(ra))]
dout.close()
logger.info('%s wrote %s' % (define_color_mag_region.__name__,
datafile))
plt.cla()
out.close()
logger.info('%s wrote %s' % (define_color_mag_region.__name__, outfile))
return outfile
def test(regions):
Regions = [CMDregion(r) for r in regions]
for R in Regions:
logger.info(R.target, R.filter1, R.filter2)
try:
R.join_regions('BHeB', 'RHeB')
except:
logger.warning('nope!')
continue
R.split_regions_on_mean('BHeB_RHeB')
plt.figure()
plt.title('%s' % R.target)
for key in ('bheb_bym', 'rheb_bym'):
plt.plot(R.regions[key][:, 0], R.regions[key][:, 1])
if __name__ == '__main__':
pdb.set_trace()
test()
|
import base64
import hashlib
import os
import logging
import select
import socket
import struct
import sys
import threading
import time
from aiy.vision.streaming.presence import PresenceServer
import aiy.vision.streaming.proto.messages_pb2 as pb2
from http.server import BaseHTTPRequestHandler
from io import BytesIO
from itertools import cycle
from picamera import PiVideoFrameType
AVAHI_SERVICE = '_aiy_vision_video._tcp'
ENCODING_BIT_RATE = 1000000
TX_QUEUE_SIZE = 15
WS_PORT = 4664
TCP_PORT = 4665
ANNEXB_PORT = 4666
def _close_socket(sock):
try:
sock.shutdown(socket.SHUT_RDWR)
except OSError:
pass
class StreamingServer(object):
def __init__(self, camera):
self._lock = threading.Lock()
self._logger = logging.getLogger(__name__)
self._camera = camera
self._stream_count = 0
self._tcp_socket = None
self._web_socket = None
self._annexb_socket = None
self._thread = None
self._closed = False
self._waiting_for_key = False
self._start_time = time.monotonic()
self._seq = 0
self._clients = []
def run(self):
with self._lock:
if self._thread:
self._logger.error('Server already running')
return
self._closed = False
self._thread = threading.Thread(target=self._server_thread)
self._thread.start()
def close(self):
to_join = None
clients = None
with self._lock:
if self._closed:
return
self._closed = True
clients = self._clients
self._clients = []
if self._tcp_socket:
_close_socket(self._tcp_socket)
self._tcp_socket = None
if self._web_socket:
_close_socket(self._web_socket)
self._web_socket = None
if self._annexb_socket:
_close_socket(self._annexb_socket)
self._annexb_socket = None
if self._thread:
to_join = self._thread
self._thread = None
if clients:
self._logger.info('Closing %d clients', len(clients))
for client in clients:
client.close()
if to_join:
to_join.join()
self._logger.info('Server closed')
def _server_thread(self):
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tcp_socket.bind(('', TCP_PORT))
tcp_socket.listen()
tcp_port = tcp_socket.getsockname()[1]
web_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
web_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
web_socket.bind(('', WS_PORT))
web_socket.listen()
web_port = web_socket.getsockname()[1]
annexb_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
annexb_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
annexb_socket.bind(('', ANNEXB_PORT))
annexb_socket.listen()
annexb_port = annexb_socket.getsockname()[1]
with self._lock:
self._tcp_socket = tcp_socket
self._web_socket = web_socket
self._annexb_socket = annexb_socket
self._logger.info('Listening on ports tcp: %d web: %d annexb: %d',
tcp_port, web_port, annexb_port)
presence = PresenceServer(AVAHI_SERVICE, tcp_port)
presence.run()
while True:
with self._lock:
if self._closed:
break
socks = [tcp_socket, web_socket, annexb_socket]
try:
rlist, _, _ = select.select(socks, socks, socks)
for ready in rlist:
client_sock, client_addr = ready.accept()
if ready == tcp_socket:
kind = 'tcp'
client = _ProtoClient(self, client_sock, client_addr)
elif ready == web_socket:
kind = 'web'
client = _WsProtoClient(self, client_sock, client_addr)
elif ready == annexb_socket:
kind = 'annexb'
client = _AnnexbClient(self, client_sock, client_addr)
else:
# Shouldn't happen.
client_sock.close()
continue
self._logger.info('New %s connection from %s:%d', kind,
client_addr[0], client_addr[1])
with self._lock:
self._clients.append(client)
client.start()
except:
self._logger.info('Server sockets closed')
self._logger.info('Server shutting down')
presence.close()
_close_socket(tcp_socket)
_close_socket(web_socket)
_close_socket(annexb_socket)
with self._lock:
self._tcp_socket = None
self._web_socket = None
self._annexb_socket = None
def _stream_control(self, enable):
start_recording = False
stop_recording = False
with self._lock:
if enable:
self._stream_count += 1
start_recording = self._stream_count == 1
else:
self._stream_count -= 1
stop_recording = self._stream_count == 0
if start_recording:
self._logger.info('Start recording')
self._camera.start_recording(
_EncoderSink(self),
format='h264',
profile='baseline',
inline_headers=True,
bitrate=ENCODING_BIT_RATE,
intra_period=0)
if stop_recording:
self._logger.info('Stop recording')
self._camera.stop_recording()
def _client_closed(self, client):
with self._lock:
if client in self._clients:
self._clients.remove(client)
def _on_video_data(self, data):
frame_type = self._camera.frame.frame_type
is_key = frame_type == PiVideoFrameType.key_frame
is_delta = frame_type == PiVideoFrameType.frame
is_codec_data = frame_type == PiVideoFrameType.sps_header
if is_key:
self._waiting_for_key = False
needs_key = False
if is_codec_data:
with self._lock:
for client in self._clients:
needs_key |= client.send_codec_data(self._camera.resolution, data)
elif is_key or is_delta:
needs_key = False
pts = int((time.monotonic() - self._start_time) * 1e6)
with self._lock:
for client in self._clients:
needs_key |= client.send_frame_data(is_key, self._seq, pts, data)
self._seq += 1
else:
self._logger.info('Unknown frame %d bytes', len(data))
if needs_key:
self._request_key_frame()
def send_inference_data(self, data):
needs_key = False
with self._lock:
for client in self._clients:
needs_key |= client.send_inference_data(data)
if needs_key:
self._request_key_frame()
def _request_key_frame(self):
if not self._waiting_for_key:
self._logger.info('Requesting key frame')
self._camera.request_key_frame()
self._waiting_for_key = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
class _EncoderSink(object):
def __init__(self, server):
self._server = server
def write(self, data):
self._server._on_video_data(data)
def flush(self):
pass
class _Client(object):
def __init__(self, server, socket, addr):
self._lock = threading.Lock()
self._cond = threading.Condition(self._lock)
self._logger = logging.getLogger(__name__)
self._streaming = False
self._closed = False
self._server = server
self._socket = socket
self._ip = addr[0]
self._port = addr[1]
self._tx_q = []
self._needs_codec_data = True
self._needs_key = True
self._rx_thread = threading.Thread(target=self._rx_thread)
self._tx_thread = threading.Thread(target=self._tx_thread)
def start(self):
self._rx_thread.start()
self._tx_thread.start()
def __del__(self):
self.close()
def close(self):
with self._lock:
if self._closed:
return
self._closed = True
self._cond.notifyAll()
streaming = self._streaming
self._streaming = False
_close_socket(self._socket)
self._log_info('Connection closed')
if streaming:
self._server._stream_control(False)
self._server._client_closed(self)
def send_codec_data(self, resolution, data):
with self._lock:
if not self._streaming:
return False
self._needs_codec_data = False
return self._queue_codec_data_locked(resolution, data)
def send_frame_data(self, is_key, seq, pts, data):
with self._lock:
if not self._streaming:
return False
if self._needs_codec_data:
return True
if self._needs_key and not is_key:
return True
self._needs_key = False
return self._queue_frame_data_locked(is_key, seq, pts, data)
def send_inference_data(self, data):
with self._lock:
if not self._streaming:
return False
return self._queue_inference_data_locked(data)
def _log(self, func, fmt, *args):
args = (self._ip, self._port) + args
func('%s:%d: ' + fmt, *args)
def _log_info(self, fmt, *args):
self._log(self._logger.info, fmt, *args)
def _log_warning(self, fmt, *args):
self._log(self._logger.warning, fmt, *args)
def _log_error(self, fmt, *args):
self._log(self._logger.error, fmt, *args)
def _queue_message_locked(self, message):
dropped = False
self._tx_q.append(message)
while len(self._tx_q) > TX_QUEUE_SIZE:
dropped = True
self._tx_q.pop(0)
self._needs_codec_data = True
self._needs_key = True
self._log_warning('running behind, dropping messages')
self._cond.notifyAll()
return dropped
def _tx_thread(self):
while True:
with self._lock:
if self._closed:
break
if self._tx_q:
message = self._tx_q.pop(0)
else:
self._cond.wait()
continue
try:
self._send_message(message)
except Exception as e:
self._log_error('Failed to send data: %s', e)
self.close()
def _rx_thread(self):
while True:
with self._lock:
if self._closed:
break
message = self._receive_message()
if message:
self._handle_message(message)
else:
self.close()
class _ProtoClient(_Client):
def __init__(self, server, socket, addr):
_Client.__init__(self, server, socket, addr)
def _queue_codec_data_locked(self, resolution, data):
message = pb2.ClientBound()
message.stream_data.codec_data.width = resolution[0]
message.stream_data.codec_data.height = resolution[1]
message.stream_data.codec_data.data = data
return self._queue_message_locked(message)
def _queue_frame_data_locked(self, is_key, seq, pts, data):
message = pb2.ClientBound()
if is_key:
message.stream_data.frame_data.type = pb2.FrameData.KEY
else:
message.stream_data.frame_data.type = pb2.FrameData.DELTA
message.stream_data.frame_data.seq = seq
message.stream_data.frame_data.pts = pts
message.stream_data.frame_data.data = data
return self._queue_message_locked(message)
def _queue_inference_data_locked(self, data):
return self._queue_message_locked(data.GetMessage())
def _handle_message(self, message):
which = message.WhichOneof('message')
try:
if which == 'stream_control':
self._handle_stream_control(message.stream_control)
else:
self._log_warning('unhandled message %s', which)
except Exception as e:
self._log_error('Error handling message %s: %s', which, e)
self.close()
def _handle_stream_control(self, stream_control):
self._log_info('stream_control %s', stream_control.enabled)
enabled = stream_control.enabled
with self._lock:
if enabled == self._streaming:
self._log_info('ignoring NOP stream_control')
return
else:
self._streaming = enabled
self._server._stream_control(enabled)
def _send_message(self, message):
buf = message.SerializeToString()
self._socket.sendall(struct.pack('!I', len(buf)))
self._socket.sendall(buf)
def _receive_bytes(self, num_bytes):
received = bytearray(b'')
while num_bytes > len(received):
buf = self._socket.recv(num_bytes - len(received))
if not buf:
break
received.extend(buf)
return bytes(received)
def _receive_message(self):
try:
buf = self._receive_bytes(4)
num_bytes = struct.unpack('!I', buf)[0]
buf = self._receive_bytes(num_bytes)
message = pb2.AiyBound()
message.ParseFromString(buf)
return message
except:
return None
class _WsProtoClient(_ProtoClient):
class WsPacket(object):
def __init__(self):
self.fin = True
self.opcode = 2
self.masked = False
self.mask = None
self.length = 0
self.payload = bytearray()
def append(self, data):
if self.masked:
data = bytes([c ^ k for c, k in zip(data, cycle(self.mask))])
self.payload.extend(data)
def serialize(self):
self.length = len(self.payload)
buf = bytearray()
b0 = 0
b1 = 0
if self.fin:
b0 |= 0x80
b0 |= self.opcode
buf.append(b0)
if self.length <= 125:
b1 |= self.length
buf.append(b1)
elif self.length >= 126 and self.length <= 65535:
b1 |= 126
buf.append(b1)
buf.extend(struct.pack('!H', self.length))
else:
b1 |= 127
buf.append(b1)
buf.extend(struct.pack('!Q', self.length))
if self.payload:
buf.extend(self.payload)
return bytes(buf)
def __init__(self, server, socket, addr):
self._handshaked = False
_ProtoClient.__init__(self, server, socket, addr)
def _receive_message(self):
try:
while True:
if self._handshaked:
break
self._process_web_request()
packets = []
while True:
packet = self._receive_packet()
if packet.opcode == 0:
# Continuation
if not packets:
self._log_error('Invalid continuation received')
return None
packets.append(packet)
elif packet.opcode == 1:
# Text, not supported.
self._log_error('Received text packet')
return None
elif packet.opcode == 2:
# Binary.
packets.append(packet)
if packet.fin:
joined = bytearray()
for p in packets:
joined.extend(p.payload)
message = pb2.AiyBound()
message.ParseFromString(joined)
return message
elif packet.opcode == 8:
# Close.
self._log_info('WebSocket close requested')
return None
elif packet.opcode == 9:
# Ping, send pong.
self._log_info('Received ping')
response = self.WsPacket()
response.opcode = 10
response.append(packet.payload)
with self._lock:
self._queue_message_locked(response)
elif packet.opcode == 10:
# Pong. Igore as we don't send pings.
self._log_info('Dropping pong')
else:
self._log_info('Dropping opcode %d', packet.opcode)
except:
return None
def _receive_packet(self):
packet = self.WsPacket()
buf = super()._receive_bytes(2)
packet.fin = buf[0] & 0x80 > 0
packet.opcode = buf[0] & 0x0F
packet.masked = buf[1] & 0x80 > 0
packet.length = buf[1] & 0x7F
if packet.length == 126:
packet.length = struct.unpack('!H', super()._receive_bytes(2))[0]
elif packet.length == 127:
packet.length = struct.unpack('!Q', super()._receive_bytes(8))[0]
if packet.masked:
packet.mask = super()._receive_bytes(4)
packet.append(super()._receive_bytes(packet.length))
return packet
def _send_message(self, message):
if isinstance(message, (bytes, bytearray)):
buf = message
else:
if isinstance(message, self.WsPacket):
packet = message
else:
packet = self.WsPacket()
packet.append(message.SerializeToString())
buf = packet.serialize()
self._socket.sendall(buf)
class HTTPRequest(BaseHTTPRequestHandler):
def __init__(self, request_buf):
self.rfile = BytesIO(request_buf)
self.raw_requestline = self.rfile.readline()
self.parse_request()
def _process_web_request(self):
response_template = (
'HTTP/1.1 200 OK\r\n'
'Content-Length: %(content_length)s\r\n'
'Connection: Keep-Alive\r\n\r\n'
)
try:
header_buf = bytearray()
while b'\r\n\r\n' not in header_buf:
buf = self._socket.recv(2048)
if not buf:
raise Exception('Socket closed while receiving header')
header_buf.extend(buf)
if len(header_buf) >= 10 * 1024:
raise Exception('HTTP header too large')
request = self.HTTPRequest(header_buf)
connection = request.headers['Connection']
upgrade = request.headers['Upgrade']
if 'Upgrade' in connection and upgrade == 'websocket':
self._handshake(request)
elif request.command == 'GET':
content = self._get_asset(request.path)
response_hdr = response_template % {'content_length': len(content)}
response = bytearray(response_hdr.encode('ascii'))
response.extend(content)
with self._lock:
self._queue_message_locked(response)
else:
raise Exception('Unsupported request')
except Exception as e:
self.close()
raise e
def _handshake(self, request):
magic = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11'
response_template = (
'HTTP/1.1 101 Switching Protocols\r\n'
'Upgrade: WebSocket\r\n'
'Connection: Upgrade\r\n'
'Sec-WebSocket-Accept: %(sec_key)s\r\n\r\n'
)
try:
sec_key = request.headers['Sec-WebSocket-Key']
sec_key = sec_key.encode('ascii') + magic.encode('ascii')
sec_key = base64.b64encode(hashlib.sha1(sec_key).digest()).decode('ascii')
response = response_template % {'sec_key': sec_key}
with self._lock:
self._queue_message_locked(response.encode('ascii'))
self._handshaked = True
self._log_info('Upgraded to WebSocket')
except Exception as e:
self._log_error('WebSocket handshake error: %s', e)
self.close()
def _get_asset(self, path):
if not path or '..' in path:
return 'Nice try'.encode('ascii')
if path == '/':
path = 'index.html'
elif path[0] == '/':
path = path[1:]
path = os.path.join(os.path.dirname(__file__), 'assets', path)
try:
with open(path, 'rb') as asset:
return asset.read()
except:
return b''
class _AnnexbClient(_Client):
def __init__(self, server, socket, addr):
_Client.__init__(self, server, socket, addr)
with self._lock:
self._streaming = True
self._server._stream_control(True)
def start(self):
super().start()
with self._lock:
self._streaming = True
self._server._stream_control(True)
def _queue_codec_data_locked(self, resolution, data):
return self._queue_message_locked(data)
def _queue_frame_data_locked(self, is_key, seq, pts, data):
return self._queue_message_locked(data)
def _queue_inference_data_locked(self, data):
# Silently drop inference data.
return False
def _handle_message(self, message):
pass
def _send_message(self, message):
self._socket.sendall(message)
def _receive_message(self):
try:
buf = self._socket.recv(1024)
if not buf:
return None
else:
return buf
except:
return None
class InferenceData(object):
def __init__(self):
self._message = pb2.ClientBound()
self._message.stream_data.inference_data.SetInParent()
def _get_color(value):
if isinstance(value, int):
return value
if isinstance(value, tuple):
if len(value) == 3:
color = 0xFF000000
color |= (value[0] & 0xff) << 16
color |= (value[1] & 0xff) << 8
color |= (value[2] & 0xff) << 0
return color
if len(value) == 4:
color = 0
color |= (value[0] & 0xff) << 24
color |= (value[1] & 0xff) << 16
color |= (value[2] & 0xff) << 8
color |= (value[3] & 0xff) << 0
return color
return 0xFFFFFFFF
def add_rectangle(self, x, y, w, h, color, weight):
element = self._message.stream_data.inference_data.elements.add()
element.rectangle.x = x
element.rectangle.y = y
element.rectangle.w = w
element.rectangle.h = h
element.rectangle.color = InferenceData._get_color(color)
element.rectangle.weight = weight
def add_label(self, text, x, y, color, size):
element = self._message.stream_data.inference_data.elements.add()
element.label.text = text
element.label.x = x
element.label.y = y
element.label.color = InferenceData._get_color(color)
element.label.size = size
def GetMessage(self):
return self._message
|
# -*- coding: utf-8 -*-
##### import from other modules
from checkdata import *
from configurationplusfiles_runner import input_data_inst, config, output_data_inst
##### running functions called from elsewhere #####
tops = TopsAvailable(input_data_inst, config)
print("finding unique tops:")
tops.find_unique_tops_list()
#### Take out wells with no tops, this assumes some data structures that might not exist in your data, so check code!
tops.take_out_wells_with_no_tops()
tops_counts = tops.get_df_of_top_counts_in_picks_df()
print("tops_counts = ", tops_counts)
print("number of wells with any tops:")
tops.get_number_wells_with_any_top()
#### Will print: returning list of wells names that have the required tops. The length of list is : ### If this number is too small, consider changing the required tops in the configuration object.
test = tops.findWellsWithAllTopsGive()
##### Example use pattern if you just want to initiate Class and run all the functions using variables defined in configruation object
#### This just creates a class instance and then calls run_all()
new_tops2 = TopsAvailable(input_data_inst, config)
wells_with_required_tops = new_tops2.run_all()
print("first well that meets requirements:", wells_with_required_tops[0])
print("number of wells that meet requirements so far:", len(wells_with_required_tops))
print("configuration variables so far, gotten by printing vars(config):", vars(config))
#### Find & understand available curves
curvesInst2 = CurvesAvailable(input_data_inst, config)
curves_results = curvesInst2.run_all()
curves_results.keys()
print(
"curves_results['wellsWithWantedCurves'][0:5]",
curves_results["wellsWithWantedCurves"][0:5],
)
print("len(curves_results['wellsWithWantedCurves'])")
len(curves_results["wellsWithWantedCurves"])
print("vars(curvesInst2).keys()", vars(curvesInst2).keys())
curvesInst2.config.threshold_returnCurvesThatArePresentInThisManyWells = 1916
onlyPlentifulCurvesArray = curvesInst2.getCurvesInMinNumberOfWells()
onlyPlentifulCurvesArray
wells_with_tops_and_curves = list(
set(wells_with_required_tops).intersection(curves_results["wellsWithWantedCurves"])
)
print("len(wells_with_tops_and_curves)", len(wells_with_tops_and_curves))
objectOfCurves = curves_results["objectOfCurves"]
wellsWithNeededCurvesList = findWellsWithCertainCurves(
objectOfCurves, onlyPlentifulCurvesArray
)
print("number of wells with all the required curves is", len(wellsWithNeededCurvesList))
#### NOTE! when we import the wells for real, we should add in the wells that have DEPTH instead of DEPT and rename the curve to DEPT!¶
print(onlyPlentifulCurvesArray)
newCurveList = getCurvesListWithDifferentCurveName(
onlyPlentifulCurvesArray, "DEPT", "DEPTH"
)
print("newCurveList", newCurveList)
wellsWithNeededCurvesListButDEPTHinsteadDEPT = findWellsWithCertainCurves(
objectOfCurves, newCurveList
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
#### Hmmm, zero? Let's see if we can get those 7 wells that we know have DEPTH instead of DEPT to appear if we reduce the other curve names?
wellsWithNeededCurvesListButDEPTHinsteadDEPT0 = findWellsWithCertainCurves(
objectOfCurves, ["GR", "DEPTH"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT1 = findWellsWithCertainCurves(
objectOfCurves, ["GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT2 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT3 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DPHI", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT4 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT5 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "GR", "DEPTH"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT6 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DPHI", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
wellsWithNeededCurvesListButDEPTHinsteadDEPT7 = findWellsWithCertainCurves(
objectOfCurves, ["ILD", "NPHI", "GR", "DEPT"]
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesListButDEPTHinsteadDEPT),
)
#### final try
print("final version:")
wellsWithNeededCurvesList_real = findWellsWithCertainCurves(
objectOfCurves, config.must_have_curves_list
)
print(
"number of wells with all the required curves but DEPTH instead of DEPT is",
len(wellsWithNeededCurvesList_real),
)
print(
"wellsWithNeededCurvesList_real, first 3 wells:",
wellsWithNeededCurvesList_real[0:3],
)
#### Make list of wells that includes both the minimum required curves & minimum required tops
#### These two lists are different. One is SITEID the other is LAS file name. We'll convert them in the function below and find the ones in common and returnt that as a new list of wells.
# WellsWithGivenTopsCurves = findWellsWithGivenTopsCurves(input_data_inst.wells_df,wells_with_required_tops,wellsWithNeededCurvesList_real)
# print("len(WellsWithGivenTopsCurves)",len(WellsWithGivenTopsCurves))
wells_with_required_tops_and_curves_list = list(
set(wells_with_required_tops).intersection(wellsWithNeededCurvesList_real)
)
print("length wells_test", len(wells_with_required_tops_and_curves_list))
print("wells_test = ", wells_with_required_tops_and_curves_list)
# print("wells with needed curves list real",wellsWithNeededCurvesList_real)
# print("wells wells_with_required_tops",wells_with_required_tops)
#### NOW LETS SAVE RESULTS
print(
"type of wells_with_required_tops_and_curves_list",
type(wells_with_required_tops_and_curves_list),
)
wells_with_required_tops_and_curves_list_df = pd.DataFrame(
np.array(wells_with_required_tops_and_curves_list), columns=["wells"]
)
print("type", type(wells_with_required_tops_and_curves_list_df))
checkdata_path_results = (
output_data_inst.base_path_for_all_results
+ "/"
+ output_data_inst.path_checkData
+ "/"
+ "wellnames_with_required_tops_and_curves_list.h5"
)
print("will now save results in hdf5 file in:", checkdata_path_results)
key_for_file_path_for_results = "wellsWTopsCurves"
print("key for hdf file is", key_for_file_path_for_results)
wells_with_required_tops_and_curves_list_df.to_hdf(
checkdata_path_results, key=key_for_file_path_for_results, mode="w"
)
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from factory import Sequence
from factory.django import DjangoModelFactory
RecommendationTag = swapper.load_model('accelerator', 'RecommendationTag')
class RecommendationTagFactory(DjangoModelFactory):
class Meta:
model = RecommendationTag
text = Sequence(lambda n: "tag_{0}".format(n))
|
# LOAD ENV VARIABLES USING DOTENV
import dotenv
dotenv.load_dotenv(".env")
# APP
from app.main import app
import os
APP_PORT = int(os.getenv("APP_PORT"))
def run():
print("it works")
|
import tensorflow as tf
import functools
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# import io
# import itertools
# import numpy as np
def lazy_scope(function):
"""Creates a decorator for methods that makes their return values load lazily.
A method with this decorator will only compute the return value once when called
for the first time. Afterwards, the value will be cached as an object attribute.
Inspired by: https://danijar.com/structuring-your-tensorflow-models
Args:
function (func): Function to be decorated.
Returns:
decorator: Decorator for the function.
"""
attribute = "_cache_" + function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(function.__name__):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class somTF:
def __init__(self, inputs=None, latent_dim=None , som_dim=None, learning_rate=None,
decay_factor=None, decay_steps=None, batch_size = None, alpha = None):
self.inputs = inputs
self.som_dim = som_dim
self.latent_dim = latent_dim
self.learning_rate = learning_rate
self.decay_factor = decay_factor
self.decay_steps = decay_steps
self.batch_size = batch_size
self.alpha = alpha
self.codebook
self.global_step
self.k_index
# self.loss_bmu
# self.loss_neigh
self.loss_bmu
self.loss_bmu_up
self.loss_bmu_bottom
self.loss_bmu_right
self.loss_bmu_left
self.loss
self.optimize
# self.plot_neurons
@lazy_scope
def codebook(self):
"""Creates variable for the SOM embeddings."""
embeddings = tf.get_variable("codebook", [self.som_dim[0]*self.som_dim[1] , self.latent_dim],
initializer=tf.truncated_normal_initializer(stddev=0.05))
tf.compat.v1.summary.tensor_summary("codebook", embeddings)
return embeddings
@lazy_scope
def global_step(self):
"""Creates global_step variable for the optimization."""
global_step = tf.Variable(0, trainable=False, name="global_step")
return global_step
@lazy_scope
def k_index(self):
squared_distances = tf.math.squared_difference(tf.expand_dims(self.inputs, 1),
tf.expand_dims(self.codebook, 0))
component_sum = tf.reduce_sum(input_tensor=squared_distances, axis=-1)
min_idx = tf.argmin(input=component_sum, axis=-1)
k_0 = min_idx // self.som_dim[0]
k_1 = min_idx % self.som_dim[1]
tf.compat.v1.summary.histogram("bmu_0", k_0)
tf.compat.v1.summary.histogram("bmu_1", k_1)
return min_idx
@lazy_scope
def BMU(self):
min_idx = self.k_index
nearest_neuron = tf.gather(params=self.codebook, indices=min_idx)
return nearest_neuron
# @lazy_scope
# def neighbors(self):
# min_idx = self.k_index
# nearest_neuron = tf.gather(params=self.codebook, indices=min_idx)
# up_nearest_neuron = tf.gather(params=self.codebook, indices=self.select_up)
# bottom_nearest_neuron = tf.gather(params=self.codebook, indices=self.select_bottom)
# left_nearest_neuron = tf.gather(params=self.codebook, indices=self.select_left)
# right_nearest_neuron = tf.gather(params=self.codebook, indices=self.select_right)
# # k_1 = min_idx // self.som_dim[0]
# # k_2 = min_idx % self.som_dim[1]
# # k_stacked = tf.stack([k_1, k_2], axis=1)
# # k1_not_top = tf.less(k_1, tf.constant(self.som_dim[0] - 1, dtype=tf.int64))
# # k1_not_bottom = tf.greater(k_1, tf.constant(0, dtype=tf.int64))
# # k2_not_right = tf.less(k_2, tf.constant(self.som_dim[1] - 1, dtype=tf.int64))
# # k2_not_left = tf.greater(k_2, tf.constant(0, dtype=tf.int64))
# # k1_up = tf.where(k1_not_top, tf.add(k_1, 1), k_1)
# # k1_down = tf.where(k1_not_bottom, tf.subtract(k_1, 1), k_1)
# # k2_right = tf.where(k2_not_right, tf.add(k_2, 1), k_2)
# # k2_left = tf.where(k2_not_left, tf.subtract(k_2, 1), k_2)
# # nearest_neuron = tf.gather_nd(params=self.codebook, indices=k_stacked, batch_dims=self.batch_size)
# # up_nearest_neuron = tf.where(k1_not_top, tf.gather_nd(self.codebook, tf.stack([k1_up, k_2], axis=1),
# # batch_dims=self.batch_size),
# # tf.zeros([self.batch_size, self.latent_dim]))
# # bottom_nearest_neuron = tf.where(k1_not_bottom, tf.gather_nd(self.codebook, tf.stack([k1_down, k_2], axis=1),
# # batch_dims=self.batch_size),
# # tf.zeros([self.batch_size, self.latent_dim]))
# # right_nearest_neuron = tf.where(k2_not_right, tf.gather_nd(self.codebook, tf.stack([k_1, k2_right], axis=1),
# # batch_dims=self.batch_size),
# # tf.zeros([self.batch_size, self.latent_dim]))
# # left_nearest_neuron = tf.where(k2_not_left, tf.gather_nd(self.codebook, tf.stack([k_1, k2_left], axis=1),
# # batch_dims=self.batch_size),
# # tf.zeros([self.batch_size, self.latent_dim]))
# som_neighbors = tf.stack([nearest_neuron, up_nearest_neuron, bottom_nearest_neuron, left_nearest_neuron,
# right_nearest_neuron], axis=1)
# return som_neighbors
@lazy_scope
def loss_bmu(self):
loss_som = tf.reduce_mean(
tf.squared_difference(self.inputs, self.BMU))
tf.compat.v1.summary.scalar("bmu_loss", loss_som)
return loss_som
@lazy_scope
def loss_bmu_up(self):
bmu = self.k_index
_bmu = tf.unravel_index(bmu,[self.som_dim[0], self.som_dim[1]])
zeros = tf.zeros_like(_bmu[0])
mask = tf.greater(_bmu[0], zeros) # boolean tensor, mask[i] = True iff x[i] > 0
_bmu_movable_sx = tf.boolean_mask(_bmu[0], mask)
_bmu_movable_dx = tf.boolean_mask(_bmu[1], mask)
_bmu_raveled = tf.multiply(tf.add(_bmu_movable_sx, -1),self.som_dim[0]) + (_bmu_movable_dx % self.som_dim[1])
_inputs_idx = tf.boolean_mask(self.inputs, mask)
current_loss = tf.reduce_mean(
tf.squared_difference(_inputs_idx, tf.gather(self.codebook, _bmu_raveled)))
return current_loss
# return _bmu_raveled
@lazy_scope
def loss_bmu_bottom(self):
bmu = self.k_index
_bmu = tf.unravel_index(bmu,[self.som_dim[0], self.som_dim[1]])
bottomers = tf.multiply(tf.ones_like(_bmu[0]), self.som_dim[1])
mask = tf.less(_bmu[0], bottomers) # boolean tensor, mask[i] = True iff x[i] > 0
_bmu_movable_sx = tf.boolean_mask(_bmu[0], mask)
_bmu_movable_dx = tf.boolean_mask(_bmu[1], mask)
_bmu_raveled = tf.multiply(tf.add(_bmu_movable_sx, 1),self.som_dim[0]) + (_bmu_movable_dx % self.som_dim[1])
_inputs_idx = tf.boolean_mask(self.inputs, mask)
current_loss = tf.reduce_mean(
tf.squared_difference(_inputs_idx, tf.gather(self.codebook, _bmu_raveled)))
return current_loss
@lazy_scope
def loss_bmu_left(self):
bmu = self.k_index
_bmu = tf.unravel_index(bmu,[self.som_dim[0], self.som_dim[1]])
zeros = tf.zeros_like(_bmu[1])
mask = tf.greater(_bmu[1], zeros) # boolean tensor, mask[i] = True iff x[i] > 0
_bmu_movable_sx = tf.boolean_mask(_bmu[0], mask)
_bmu_movable_dx = tf.boolean_mask(_bmu[1], mask)
_bmu_raveled = tf.multiply(_bmu_movable_sx,self.som_dim[0]) + (tf.add(_bmu_movable_dx, -1) % self.som_dim[1])
_inputs_idx = tf.boolean_mask(self.inputs, mask)
current_loss = tf.reduce_mean(
tf.squared_difference(_inputs_idx, tf.gather(self.codebook, _bmu_raveled)))
return current_loss
@lazy_scope
def loss_bmu_right(self):
bmu = self.k_index
_bmu = tf.unravel_index(bmu,[self.som_dim[0], self.som_dim[1]])
righters = tf.multiply(tf.ones_like(_bmu[1]), self.som_dim[1])
mask = tf.less(_bmu[1], righters) # boolean tensor, mask[i] = True iff x[i] > 0
_bmu_movable_sx = tf.boolean_mask(_bmu[0], mask)
_bmu_movable_dx = tf.boolean_mask(_bmu[1], mask)
_bmu_raveled = tf.multiply(_bmu_movable_sx,self.som_dim[0]) + (tf.add(_bmu_movable_dx, 1) % self.som_dim[1])
_inputs_idx = tf.boolean_mask(self.inputs, mask)
current_loss = tf.reduce_mean(
tf.squared_difference(_inputs_idx, tf.gather(self.codebook, _bmu_raveled)))
return current_loss
@lazy_scope
def loss(self):
loss = (self.loss_bmu + self.alpha*self.loss_bmu_up + self.alpha*self.loss_bmu_bottom
+ self.alpha*self.loss_bmu_left + self.alpha*self.loss_bmu_right )
tf.compat.v1.summary.scalar("loss", loss)
return loss
@lazy_scope
def optimize(self):
"""Optimizes the model's loss using Adam with exponential learning rate decay."""
lr_decay = tf.compat.v1.train.exponential_decay(self.learning_rate, self.global_step, self.decay_steps, self.decay_factor, staircase=True)
optimizer = tf.compat.v1.train.AdamOptimizer(lr_decay)
train_step = optimizer.minimize(self.loss, global_step=self.global_step)
return train_step
# @lazy_scope
# def plot_neurons(self):
# codebook = np.reshape(self.codebook.eval(), [self.som_dim[0], self.som_dim[1],28,28])
# fig = plt.figure(figsize=(20,20))
# fig.subplots_adjust(hspace=0.2, wspace=0.05)
# k = 0
# for i, j in itertools.product(range(self.som_dim[0]), range(self.som_dim[1])):
# ax = fig.add_subplot(self.som_dim[0], self.som_dim[1], k + 1)
# ax.matshow(codebook[i,j])
# ax.get_xaxis().set_visible(False)
# ax.get_yaxis().set_visible(False)
# k += 1
# plt.show()
# buf = io.BytesIO()
# plt.savefig(buf, format='png')
# plt.savefig('neurons.png')
# # Closing the figure prevents it from being displayed directly inside
# # the notebook.
# plt.close(fig)
# buf.seek(0)
# # Convert PNG buffer to TF image
# image = tf.image.decode_png(buf.getvalue(), channels=1)
# # Add the batch dimension
# image = tf.expand_dims(image, 0)
# tf.compat.v1.summary.image("SOM Neurons", image) |
import logging
import threading
import requests
logger = logging.getLogger(__name__)
_threadlocal = threading.local()
def get_http_session():
""" A thread-safe wrapper for requests.session"""
try:
return _threadlocal.session
except AttributeError:
logger.debug("Starting new HTTP Session")
_threadlocal.session = requests.Session()
return get_http_session()
|
# -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class UpdateNetworkDeviceWirelessRadioSettingsModel(object):
"""Implementation of the 'updateNetworkDeviceWirelessRadioSettings' model.
TODO: type model description here.
Attributes:
rf_profile_id (int): The ID of an RF profile to assign to the device.
If the value of this parameter is null, the appropriate basic RF
profile (indoor or outdoor) will be assigned to the device.
Assigning an RF profile will clear ALL manually configured
overrides on the device (channel width, channel, power).
"""
# Create a mapping from Model property names to API property names
_names = {
"rf_profile_id":'rfProfileId'
}
def __init__(self,
rf_profile_id=None):
"""Constructor for the UpdateNetworkDeviceWirelessRadioSettingsModel class"""
# Initialize members of the class
self.rf_profile_id = rf_profile_id
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
rf_profile_id = dictionary.get('rfProfileId')
# Return an object of this model
return cls(rf_profile_id)
|
import pandas as pd
import sys
dataset = pd.read_csv(sys.argv[1])
df = pd.DataFrame(dataset)
from sklearn.preprocessing import LabelEncoder
label = LabelEncoder()
df['college'] = label.fit_transform(df['College name'])
df['Areas'] = label.fit_transform(df['Areas of interest'])
df['Standard'] = label.fit_transform(df['Which-year are you studying in?'])
df['gender'] = label.fit_transform(df['Gender'])
df['major'] = label.fit_transform(df['Major/Area of Study'])
df['Creteria'] = label.fit_transform(df['Label'])
#Finding best binary classifier to classify data into “eligible/1” and “not eligible/0
import sklearn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
data=df[["CGPA/ percentage","Rate your written communication skills [1-10]","Rate your verbal communication skills [1-10]","Standard","gender","major"]]
target=df['Creteria']
X_train, X_test,y_train,y_test = train_test_split(data,target,random_state =42)
model=RandomForestClassifier(n_estimators=10)
model.fit(X_train, y_train)
expected = y_test
predicted = model.predict(X_test)
print(f1_score(expected,predicted))
|
from .pgf import main |
class BoaException(Exception):
"""Base exception for Boa application"""
class InvalidSourceException(BoaException):
"""Invalid source provided"""
class InvalidDestinationException(BoaException):
"""Invalid destination provided"""
|
from csrv.model import actions
from csrv.model import cost
from csrv.model import timing_phases
from csrv.model.cards import agenda
from csrv.model.cards import card_info
class DoMeatDamage(actions.Action):
"""Do meat damage to the runner."""
DESCRIPTION = '[Click] Do 1 meat damage'
def __init__(self, game, player, card=None):
cost_obj = cost.Cost(game, player, clicks=1)
actions.Action.__init__(self, game, player, card=card, cost=cost_obj)
def is_usable(self):
return actions.Action.is_usable(self) and self.game.runner.tags
def resolve(self, response=None, ignore_clicks=False, ignore_all_costs=False):
actions.Action.resolve(
self, response,
ignore_clicks=ignore_clicks,
ignore_all_costs=ignore_all_costs)
self.game.insert_next_phase(
timing_phases.TakeMeatDamage(self.game, self.game.runner, 1))
class Card01107(agenda.Agenda):
NAME = u'Card01107'
SET = card_info.CORE
NUMBER = 107
SIDE = card_info.CORP
FACTION = card_info.NEUTRAL
INFLUENCE = 0
UNIQUE = False
KEYWORDS = set([
card_info.SECURITY,
])
ADVANCEMENT_REQUIREMENT = 4
AGENDA_POINTS = 2
IMAGE_SRC = '01107.png'
WHEN_IN_CORP_SCORE_AREA_PROVIDES_CHOICES_FOR = {
timing_phases.CorpTurnActions: 'meat_damage_actions',
}
def build_actions(self):
agenda.Agenda.build_actions(self)
self._do_meat_damage = DoMeatDamage(
self.game, self.player, card=self)
def meat_damage_actions(self):
return [self._do_meat_damage]
|
# https://quantsoftware.gatech.edu/CartPole_DQN
import numpy as np
from keras.layers import Input, Dense
from keras.optimizers import RMSprop
from keras.models import Model
from collections import deque
class DQNAgent:
def __init__(self, input_dim, output_dim, learning_rate=.005,
mem_size=5000, batch_size=64, gamma=.99, decay_rate=.0002):
# Save instance variables.
self.input_dim = input_dim
self.output_dim = output_dim
self.batch_size = batch_size
self.gamma = gamma
self.decay_rate = decay_rate
# Define other instance variables.
self.explore_p = 1 # The current probability of taking a random action.
self.memory = deque(maxlen=mem_size) # Define our experience replay bucket as a deque with size mem_size.
# Define and compile our DQN. This network has 3 layers of 24 nodes. This is sufficient to solve
# CartPole, but you should definitely tweak the architecture for other implementations.
input_layer = Input(shape=(input_dim,))
hl = Dense(24, activation="relu")(input_layer)
hl = Dense(24, activation="relu")(hl)
hl = Dense(24, activation="relu")(hl)
output_layer = Dense(output_dim, activation="linear")(hl)
self.model = Model(input_layer, output_layer)
self.model.compile(loss="mse", optimizer=RMSprop(lr=learning_rate))
def act(self, state):
# First, decay our explore probability
self.explore_p *= 1 - self.decay_rate
# With probability explore_p, randomly pick an action
if self.explore_p > np.random.rand():
return np.random.randint(self.output_dim)
# Otherwise, find the action that should maximize future rewards according to our current Q-function policy.
else:
return np.argmax(self.model.predict(np.array([state]))[0])
def remember(self, state, action, next_state, reward):
# Create a blank state. Serves as next_state if this was the last experience tuple before the epoch ended.
terminal_state = np.array([None]*self.input_dim)
# Add experience tuple to bucket. Bucket is a deque, so older tuple falls out on overflow.
self.memory.append((state, action, terminal_state if next_state is None else next_state, reward))
def replay(self):
# Only conduct a replay if we have enough experience to sample from.
if len(self.memory) < self.batch_size:
return
# Pick random indices from the bucket without replacement. batch_size determines number of samples.
idx = np.random.choice(len(self.memory), size=self.batch_size, replace=False)
minibatch = np.array(self.memory)[idx]
self.train(minibatch)
# Extract the columns from our sample
states = np.array(list(minibatch[:,0]))
actions = minibatch[:,1]
next_states = np.array(list(minibatch[:,2]))
rewards = np.array(minibatch[:,3])
# Compute a new estimate for each Q-value. This uses the second half of Bellman's equation.
estimate = rewards + self.gamma * np.amax(self.model.predict(next_states), axis=1)
# Get the network's current Q-value predictions for the states in this sample.
predictions = self.model.predict(states)
# Update the network's predictions with the new predictions we have.
for i in range(len(predictions)):
# Flag states as terminal (the last state before a epoch ended).
terminal_state = (next_states[i] == np.array([None]*self.input_dim)).all()
# Update each state's Q-value prediction with our new estimate.
# Terminal states have no future, so set their Q-value to their immediate reward.
predictions[i][actions[i]] = rewards[i] if terminal_state else estimate[i]
# Propagate the new predictions through our network.
self.model.fit(states, predictions, epochs=1, verbose=0)
|
from __future__ import division
import chiplotle as ch
plotter = ch.instantiate_plotters()[0]
# c = ch.shapes.circle(300, 100)
# ch.transforms.offset(c, (1500, 1000))
# plotter.write(c)
plotter.write(ch.hpgl.VS(vel=1))
plotter.write(ch.hpgl.FS(force=8))
plotter.pen_up([(6000, 1000)])
plotter.write(ch.hpgl.PD())
plotter.write(ch.hpgl.AR((700, 0), -180))
# plotter.write(ch.hpgl.LB("Hello, World"))
# plotter.write(ch.hpgl.CI(600))
plotter.pen_up([(0,0)])
|
#!/usr/bin/env python
#
# Copyright 2015, 2016 Thomas Timm Andersen (original version)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import math
import roslib; roslib.load_manifest('ur_driver')
import rospy
import actionlib
from openpyxl import Workbook
from control_msgs.msg import *
from trajectory_msgs.msg import *
from sensor_msgs.msg import JointState
from math import pi
import numpy as np
import tensorflow as tf
import numpy as np
from scipy.integrate import odeint
from math import exp
# From Files
from object_detection import IntelRealsense
from universal_robot_kinematics import invKine
from kinematics import fwd_kin
from last_kalman_filter import *
IntelRealsense = IntelRealsense()
JOINT_NAMES = ['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint',
'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
home = [0, -pi/2, pi/2, 0, pi/2, pi]
straight = [0, -pi/2, 0, -pi/2, 0, 0]
client = None
# Kanut Thummaraksa
#### Input Tensors ####
## Common Input ##
s = tf.placeholder(tf.float64,name='s')
tau = tf.placeholder(tf.float64,name='tau')
xg = tf.placeholder(tf.float64,name='xg')
yg = tf.placeholder(tf.float64,name='yg')
zg = tf.placeholder(tf.float64,name='zg')
## joints ##
g = (tf.placeholder(tf.float64,name='g1'),
tf.placeholder(tf.float64,name='g2'),
tf.placeholder(tf.float64,name='g3'),
tf.placeholder(tf.float64,name='g4'),
tf.placeholder(tf.float64,name='g5'),
tf.placeholder(tf.float64,name='g6'))
q = (tf.placeholder(tf.float64,name='q1'),
tf.placeholder(tf.float64,name='q2'),
tf.placeholder(tf.float64,name='q3'),
tf.placeholder(tf.float64,name='q4'),
tf.placeholder(tf.float64,name='q5'),
tf.placeholder(tf.float64,name='q6'))
qd = (tf.placeholder(tf.float64,name='qd1'),
tf.placeholder(tf.float64,name='qd2'),
tf.placeholder(tf.float64,name='qd3'),
tf.placeholder(tf.float64,name='qd4'),
tf.placeholder(tf.float64,name='qd5'),
tf.placeholder(tf.float64,name='q06'))
q0 = (tf.placeholder(tf.float64,name='q01'),
tf.placeholder(tf.float64,name='q02'),
tf.placeholder(tf.float64,name='q03'),
tf.placeholder(tf.float64,name='q04'),
tf.placeholder(tf.float64,name='q05'),
tf.placeholder(tf.float64,name='q06'))
def canoSystem(tau,t):
alpha_s = 4
s = exp(-tau*alpha_s*t)
return s
def dmp(g,q,qd,tau,s,q0,W,Name = "DMP"):
alpha = tf.constant(25,dtype=tf.float64)
beta = alpha/4
w,c,h = W
n_gaussian = w.shape[0]
with tf.name_scope(Name):
w_tensor = tf.constant(w,dtype=tf.float64,name='w')
c_tensor = tf.constant(c,dtype=tf.float64,name='c')
h_tensor = tf.constant(h,dtype=tf.float64,name='h')
with tf.name_scope('s'):
s_tensor = s*tf.ones(n_gaussian,dtype=tf.float64)
smc_pow = tf.pow(s_tensor-c_tensor,2)
h_smc_pow = tf.math.multiply(smc_pow,(-h_tensor))
with tf.name_scope('psi'):
psi = tf.math.exp(h_smc_pow)
sum_psi = tf.math.reduce_sum(psi,0)
wpsi = tf.math.multiply(w_tensor,psi)
wpsis = tf.math.reduce_sum(wpsi*s,0)
with tf.name_scope('fs'):
fs =wpsis/sum_psi
qdd = alpha*(beta*(g-q)-tau*qd)+fs*(g-q0)
return qdd
#### Movement Library #####
dmps = [{},{},{},{},{},{}]
for i in range(15):
path = 'Demonstration/Demo{}/Weights/'.format(i+1)
for j in range(6): ### j = joint number
path_j = path+'Joint{}/'.format(j+1)
w = np.load(path_j+'w.npy')
w = np.reshape(w,(len(w),))
c = np.load(path_j+'c.npy')
c = np.reshape(c,(len(c),))
h = np.load(path_j+'h.npy')
h = np.reshape(h,(len(h),))
W = (w,c,h)
# def dmp(g,q,qd,tau,s,q0,W,Name = "DMP"):
dmps[j]['{}_{}'.format(j+1,i+1)] =tf.reshape(dmp(g[j], q[j], qd[j], tau, s, q0[j], W, Name="DMP{}_{}".format(j+1,i+1)),(1,))
##### Final Catesian Position of Demonstration) #####
demo_x = np.array([-8.15926729e-01, -0.75961731, -0.3964087, -0.29553788, -0.04094927, -0.14693912, -0.41827111, -8.16843140e-01, -0.09284764, -0.57153495, -0.67251442, -0.36517125, -7.62308039e-01, -0.78029185, -6.57512038e-01])
demo_y = np.array([-2.96043917e-01, -0.18374539, 0.6690932, 0.21733157, 0.78624892, 0.7281835, -0.66857267, -2.92201916e-01, -0.77947085, -0.28442803, 0.36890422, -0.41997883, -1.20031233e-01, -0.19321253, -1.05877890e-01])
demo_z = np.array([-3.97988321e-03, 0.35300285, 0.13734106, 0.1860831, 0.06178831, 0.06178831, 0.10958549, -5.64177448e-03, 0.0383235, 0.33788756, 0.30410704, 0.47738503, 8.29937352e-03, 0.17253172, 3.62063583e-01])
#### Contributin Functions ####
with tf.name_scope("Con"):
xg_ref = tf.constant(demo_x, dtype=tf.float64,name="x_con")
yg_ref = tf.constant(demo_y, dtype=tf.float64,name="y_con")
zg_ref = tf.constant(demo_z, dtype=tf.float64,name="z_con")
xg2 = tf.pow(xg_ref-xg, 2)
yg2 = tf.pow(yg_ref-yg, 2)
zg2 = tf.pow(zg_ref-zg, 2)
sum = xg2+yg2+zg2
con = 1.9947114020071635 * tf.math.exp(-0.5*sum/0.4472135954999579) # Normal Distribution
#### Gating Network #####
dmp_joint = []
dmpNet = []
for i in range(len(dmps)):
values = list(dmps[i].values())
joint = tf.concat(values, axis=0)
with tf.name_scope('DMPNet{}'.format(i+1)):
dmpNet_i = tf.reduce_sum(tf.math.multiply(joint,con),axis=0)/tf.reduce_sum(con, axis=0)
dmpNet.append(dmpNet_i)
# Supitcha Klanpradit
def move_dmp_path(path_from_ode,time_from_ode):
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
for via in range(0,len(path_from_ode)):
joint_update = path_from_ode[via][0:6]
joint_update[0:5] = joint_update[0:5] - (joint_update[0:5]>math.pi)*2*math.pi + (joint_update[0:5]<-math.pi)*2*math.pi
# print('Step %d %s' % (via,joint_update))
g.trajectory.points.append(JointTrajectoryPoint(positions=joint_update, velocities=[0]*6, time_from_start=rospy.Duration(time_from_ode[via])))
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
def set_home(set_position=home, set_duration=10):
g = FollowJointTrajectoryGoal()
g.trajectory = JointTrajectory()
g.trajectory.joint_names = JOINT_NAMES
try:
g.trajectory.points = [JointTrajectoryPoint(positions=set_position, velocities=[0]*6, time_from_start=rospy.Duration(set_duration))]
client.send_goal(g)
client.wait_for_result()
except KeyboardInterrupt:
client.cancel_goal()
raise
except:
raise
# def cost_func(out_invKine):
# out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
# # print('inverse pingpong %s' %out_invKine)
# weight = [1, 1.2, 1.2, 1, 1, 1]
# weight = np.resize(weight,(6,8))
# cost = np.multiply(np.square(out_invKine), weight)
# cost = np.sum(cost, axis=0)
# # print('cost %s' %cost)
# index_minimum = np.argmin(cost)
# print('index minimum %s' %index_minimum)
# return [joint[0,index_minimum] for joint in out_invKine]
def cost_func(out_invKine):
print('Pre-Inverse Kinematics : %s' %out_invKine)
mean = [-0.12973529, -1.17866925, 1.6847758, -0.60829703, 1.53953145, 3.1315828]
out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
print('Inverse Kinematics : %s' %out_invKine)
mean = np.resize(mean,(6,8))
cost = np.square( np.add(out_invKine, mean) )
print('Pre-Cost : %s' %cost)
weight = [1.5, 1.5, 1.5, 1, 1, 1]
weight = np.resize(weight,(6,8))
cost = np.multiply(cost, weight)
print('Pre-Cost : %s' %cost)
cost = np.sum(cost[0:5,:], axis=0)
print('cost %s' %cost)
index_minimum = np.argmin(cost)
print('Index Minimum %s' %index_minimum)
return [joint[0,index_minimum] for joint in out_invKine]
def choose_q(out_invKine, index):
out_invKine[0:5,:] = out_invKine[0:5,:] - (out_invKine[0:5,:]>math.pi)*2*math.pi + (out_invKine[0:5,:]<-math.pi)*2*math.pi
# print(out_invKine)1)
for i in range(8):
q = [joint[0,i] for joint in out_invKine]
print('q %s', q)
print('fwd %s' %fwd_kin(q, o_unit = 'p'))
return [joint[0,index] for joint in out_invKine]
mu = 0
sigma_a_x = 0.8
sigma_a_y = 0.8
sigma_a_d = 0.8
rho_x = 0.1
rho_y = 0.1
rho_d = 0.1
rho_v_x = 0.5
rho_v_y = 0.5
rho_v_d = 0.5
def main():
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
def dynamics(x,t,tau_v,g_v,q0_v,grasping_point,sess):
position = grasping_point
s_v = canoSystem(tau_v,t)
feeddict = {g[0]:g_v[0],g[1]:g_v[1],g[2]:g_v[2],g[3]:g_v[3],g[4]:g_v[4],g[5]:g_v[5],
q[0]:x[0],q[1]:x[1],q[2]:x[2],q[3]:x[3],q[4]:x[4],q[5]:x[5],
qd[0]:x[6],qd[1]:x[7],qd[2]:x[8],qd[3]:x[9],qd[4]:x[10],qd[5]:x[11],
q0[0]:q0_v[0],q0[1]:q0_v[1],q0[2]:q0_v[2],q0[3]:q0_v[3],q0[4]:q0_v[4],q0[5]:q0_v[5],
tau:tau_v,s:s_v,xg:position[0],yg:position[1],zg:position[2]
}
qdd1_v,qdd2_v,qdd3_v,qdd4_v,qdd5_v,qdd6_v = sess.run(dmpNet,feed_dict = feeddict)
dx = [x[6],x[7],x[8],x[9],x[10],x[11],qdd1_v,qdd2_v,qdd3_v,qdd4_v,qdd5_v,qdd6_v]
return dx
period = 20
t = np.linspace(5, period, 20)
tau_v = float(1)/period
q0_v = [0, -pi/2, pi/2, 0, pi/2, pi]
v0 = [0,0,0,0,0,0]
x0 = []; x0.extend(q0_v); x0.extend(v0)
print("Initiate Object Detection")
STATE = 'NONE'
_,_,_, last_capture = IntelRealsense.pingpong_detection(display = False)
T_camera2base = IntelRealsense.transformation_camera2base()
T_end2base = IntelRealsense.transformation_end2base()
gripper = np.array([ [0],
[0],
[0.1],
[1] ])
Y = np.array([ [0],
[0],
[0],
[0],
[0],
[0] ])
X = np.array([ [np.random.normal(0, sigma_x)],
[np.random.normal(0, sigma_y)],
[np.random.normal(0, sigma_d)],
[np.random.normal(0, sigma_v_x)],
[np.random.normal(0, sigma_v_y)],
[np.random.normal(0, sigma_v_d)] ])
X, P,_ = init_state(X, Y, 0)
print('First State is %s' %X)
I = np.identity(6, dtype=float)
H = np.identity(6, dtype=float)
count = 0
global client
try:
rospy.init_node("dmp", anonymous=True, disable_signals=True)
client = actionlib.SimpleActionClient('follow_joint_trajectory', FollowJointTrajectoryAction)
client.wait_for_server()
print ("Connected to server")
parameters = rospy.get_param(None)
index = str(parameters).find('prefix')
if (index > 0):
prefix = str(parameters)[index+len("prefix': '"):(index+len("prefix': '")+str(parameters)[index+len("prefix': '"):-1].find("'"))]
for i, name in enumerate(JOINT_NAMES):
JOINT_NAMES[i] = prefix + name
book = Workbook()
sheet = book.active
excel_row = 1
while(True):
initial_time = time.time()
straight_inp = raw_input("Set Straight y/n?")
if straight_inp == 'y':
set_home(straight,5)
pp_x, pp_y, pp_depth, capture = IntelRealsense.pingpong_detection(display = True)
pingpong_camera = IntelRealsense.reverse_perspective_projection(pp_x, pp_y, pp_depth); pingpong_camera[3] = 1
processing = capture - last_capture; timestep = processing
pingpong_base = T_camera2base.dot(pingpong_camera)
v_x, v_depth, v_z, a_x, a_y, a_depth, STATE = IntelRealsense.pingpong_velocity(STATE, pingpong_base[0,0], pingpong_base[1,0], pingpong_base[2,0], capture, display = False)
last_capture = capture
# print('Real World Coordinates From KF \n %s \n' %pingpong_camera)
print('Ping-Pong Position (Base Frame) \n %s \n' %pingpong_base)
grasping_point = [pingpong_base[0], pingpong_base[1], pingpong_base[2]]
# print('grasping point %s' %grasping_point)
pingpong_base = pingpong_base - T_end2base.dot(gripper)
inp_invKine = IntelRealsense.transformation_end2base(pingpong_base[0,0],pingpong_base[1,0],pingpong_base[2,0])
inv_pingpong = invKine(inp_invKine)
print('IK \n %s \n' %inv_pingpong)
# grasping_q = cost_func(inv_pingpong)
grasping_q = choose_q(inv_pingpong,0)
print('Selected IK \n%s\n' %grasping_q)
g_v = grasping_q
fwd_grasping = fwd_kin(g_v, o_unit = 'p')
print('Selected FK \n%s\n' %fwd_grasping)
inp = raw_input("Set Home! (Enter to continue)")
set_home(set_duration = 10)
# inp = raw_input("Continue? y/n: ")[0]
# if (inp == 'y'):
# q_ode = odeint(dynamics,x0,t,args=(tau_v,g_v,q0_v,grasping_point,sess))
# q_ode = np.load('q_sample2.npy')
set_home(g_v, 3)
# print('q_ode %s' %q_ode)
# move_dmp_path(q_ode,t)
final_time = time.time()
print("Processing Time : %s" %(final_time-initial_time))
# else:
# print ("Halting program")
# break
# print('\n#############################################\n')
except KeyboardInterrupt:
rospy.signal_shutdown("KeyboardInterrupt")
raise
if __name__ == '__main__': main()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
sc_x=StandardScaler()
sc_y=StandardScaler()
x=sc_x.fit_transform(x)
y=sc_y.fit_transform(y)
dataset=pd.read_csv("Position_Salaries.csv")
x=dataset.iloc[:,1:2].values
y=dataset.iloc[:,2].values
y_pred=sc_y.inverse_transform(regressor.predict(sc_x.transform(np.array([6.5]))))
plt.scatter(x,y,color='red')
plt.plot(x,regressor.predict(x),color='blue')
plt.title("truth or bluff")
plt.xlabel("positionlevel")
plt.ylabel("salaries")
plt.show |
from unittest.mock import patch # allows us to sim whether db is ready or not
from django.core.management import call_command # call command in source
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""Test waiting for db when db is available"""
# Patch will mock the behaviour of the assigned task
# we mock this function by returning trye, which simulates the
# return value of the assigned task. We can also use this to monitor
# how often this command is called
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""Test waiting for db"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
# patch has the ability to raise 'sideeffects' which are
# side effects to the function that we are mocking
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
import logging
import operator
from dataclasses import dataclass
from functools import cached_property
from typing import List, Optional, Sequence, Tuple
from django.db.models import Q
from cache_memoize import cache_memoize
from cachetools import TTLCache, cachedmethod
from web3 import Web3
from gnosis.eth import EthereumClient, EthereumClientProvider
from gnosis.eth.ethereum_client import EthereumNetwork, InvalidERC20Info
from gnosis.eth.oracles import (BalancerOracle, CannotGetPriceFromOracle,
CurveOracle, KyberOracle, MooniswapOracle,
OracleException, SushiswapOracle,
UniswapOracle, UniswapV2Oracle)
from gnosis.eth.oracles.oracles import PriceOracle, PricePoolOracle
from safe_transaction_service.tokens.clients import (BinanceClient,
CannotGetPrice,
CoingeckoClient,
KrakenClient,
KucoinClient)
from safe_transaction_service.tokens.models import Token
from ..exceptions import NodeConnectionError
from ..models import EthereumEvent
logger = logging.getLogger(__name__)
class BalanceServiceException(Exception):
pass
@dataclass
class Erc20InfoWithLogo:
address: str
name: str
symbol: str
decimals: int
logo_uri: str
@classmethod
def from_token(cls, token: Token):
return cls(token.address,
token.name,
token.symbol,
token.decimals,
token.get_full_logo_uri())
@dataclass
class Balance:
token_address: Optional[str] # For ether, `token_address` is `None`
token: Optional[Erc20InfoWithLogo]
balance: int
@dataclass
class BalanceWithFiat(Balance):
fiat_balance: float
fiat_conversion: float
fiat_code: str = 'USD'
class BalanceServiceProvider:
def __new__(cls):
if not hasattr(cls, 'instance'):
from django.conf import settings
cls.instance = BalanceService(EthereumClientProvider(),
settings.ETH_UNISWAP_FACTORY_ADDRESS,
settings.ETH_KYBER_NETWORK_PROXY_ADDRESS)
return cls.instance
@classmethod
def del_singleton(cls):
if hasattr(cls, "instance"):
del cls.instance
class BalanceService:
def __init__(self, ethereum_client: EthereumClient,
uniswap_factory_address: str, kyber_network_proxy_address: str):
self.ethereum_client = ethereum_client
self.binance_client = BinanceClient()
self.coingecko_client = CoingeckoClient()
self.kraken_client = KrakenClient()
self.kucoin_client = KucoinClient()
self.curve_oracle = CurveOracle(self.ethereum_client) # Curve returns price in usd
self.kyber_oracle = KyberOracle(self.ethereum_client, kyber_network_proxy_address)
self.sushiswap_oracle = SushiswapOracle(self.ethereum_client)
self.uniswap_oracle = UniswapOracle(self.ethereum_client, uniswap_factory_address)
self.uniswap_v2_oracle = UniswapV2Oracle(self.ethereum_client)
self.balancer_oracle = BalancerOracle(self.ethereum_client, self.uniswap_v2_oracle)
self.mooniswap_oracle = MooniswapOracle(self.ethereum_client, self.uniswap_v2_oracle)
self.cache_eth_price = TTLCache(maxsize=2048, ttl=60 * 30) # 30 minutes of caching
self.cache_token_eth_value = TTLCache(maxsize=2048, ttl=60 * 30) # 30 minutes of caching
self.cache_token_usd_value = TTLCache(maxsize=2048, ttl=60 * 30) # 30 minutes of caching
self.cache_token_info = {}
@cached_property
def enabled_price_oracles(self) -> Tuple[PriceOracle]:
if self.ethereum_network == EthereumNetwork.MAINNET:
return self.kyber_oracle, self.uniswap_v2_oracle, self.sushiswap_oracle, self.uniswap_oracle
else:
return self.kyber_oracle, self.uniswap_v2_oracle # They provide versions in another networks
@cached_property
def enabled_pool_price_oracles(self) -> Tuple[PricePoolOracle]:
if self.ethereum_network == EthereumNetwork.MAINNET:
return self.uniswap_v2_oracle, self.balancer_oracle, self.mooniswap_oracle
else:
return tuple()
@cached_property
def ethereum_network(self):
return self.ethereum_client.get_network()
def _filter_addresses(self, erc20_addresses: Sequence[str], only_trusted: bool, exclude_spam: bool) -> List[str]:
"""
:param erc20_addresses:
:param only_trusted:
:param exclude_spam:
:return: ERC20 tokens filtered by spam or trusted
"""
base_queryset = Token.objects.filter(
Q(address__in=erc20_addresses) | Q(events_bugged=True)
).order_by('name')
if only_trusted:
addresses = list(base_queryset.erc20().filter(trusted=True).values_list('address', flat=True))
elif exclude_spam:
addresses = list(base_queryset.erc20().filter(spam=False).values_list('address', flat=True))
else:
# There could be some addresses that are not in the list
addresses_set = set(erc20_addresses)
addresses = []
for token in base_queryset:
if token.is_erc20():
addresses.append(token.address)
if token.address in addresses_set: # events_bugged tokens might not be on the `addresses_set`
addresses_set.remove(token.address)
# Add unknown addresses
addresses.extend(addresses_set)
return addresses
def get_balances(self, safe_address: str, only_trusted: bool = False, exclude_spam: bool = False) -> List[Balance]:
"""
:param safe_address:
:param only_trusted: If True, return balance only for trusted tokens
:param exclude_spam: If True, exclude spam tokens
:return: `{'token_address': str, 'balance': int}`. For ether, `token_address` is `None`
"""
assert Web3.isChecksumAddress(safe_address), f'Not valid address {safe_address} for getting balances'
all_erc20_addresses = list(EthereumEvent.objects.erc20_tokens_used_by_address(safe_address))
for address in all_erc20_addresses:
# Store tokens in database if not present
self.get_token_info(address) # This is cached
erc20_addresses = self._filter_addresses(all_erc20_addresses, only_trusted, exclude_spam)
try:
raw_balances = self.ethereum_client.erc20.get_balances(safe_address, erc20_addresses)
except IOError as exc:
raise NodeConnectionError from exc
balances = []
for balance in raw_balances:
if not balance['token_address']: # Ether
balance['token'] = None
elif balance['balance'] > 0:
balance['token'] = self.get_token_info(balance['token_address'])
if not balance['token']: # Ignore ERC20 tokens that cannot be queried
continue
else:
continue
balances.append(Balance(**balance))
return balances
def get_ewt_usd_price(self) -> float:
try:
return self.kucoin_client.get_ewt_usd_price()
except CannotGetPrice:
return self.coingecko_client.get_ewt_usd_price()
@cachedmethod(cache=operator.attrgetter('cache_eth_price'))
@cache_memoize(60 * 30, prefix='balances-get_eth_price') # 30 minutes
def get_eth_price(self) -> float:
"""
Get USD price for Ether. It depends on the ethereum network:
- On mainnet, use ETH/USD
- On xDAI, use DAI/USD.
- On EWT/VOLTA, use EWT/USD
:return: USD price for Ether
"""
if self.ethereum_network == EthereumNetwork.XDAI:
try:
return self.kraken_client.get_dai_usd_price()
except CannotGetPrice:
return 1 # DAI/USD should be close to 1
elif self.ethereum_network in (EthereumNetwork.ENERGY_WEB_CHAIN, EthereumNetwork.VOLTA):
return self.get_ewt_usd_price()
else:
try:
return self.kraken_client.get_eth_usd_price()
except CannotGetPrice:
return self.binance_client.get_eth_usd_price()
@cachedmethod(cache=operator.attrgetter('cache_token_eth_value'))
@cache_memoize(60 * 30, prefix='balances-get_token_eth_value') # 30 minutes
def get_token_eth_value(self, token_address: str) -> float:
"""
Return current ether value for a given `token_address`
"""
for oracle in self.enabled_price_oracles:
try:
return oracle.get_price(token_address)
except OracleException:
logger.info('Cannot get eth value for token-address=%s from %s', token_address,
oracle.__class__.__name__)
# Try pool tokens
for oracle in self.enabled_pool_price_oracles:
try:
return oracle.get_pool_token_price(token_address)
except OracleException:
logger.info('Cannot get eth value for token-address=%s from %s', token_address,
oracle.__class__.__name__)
logger.warning('Cannot find eth value for token-address=%s', token_address)
return 0.
@cachedmethod(cache=operator.attrgetter('cache_token_usd_value'))
@cache_memoize(60 * 30, prefix='balances-get_token_usd_price') # 30 minutes
def get_token_usd_price(self, token_address: str) -> float:
"""
Return current usd value for a given `token_address` using Curve, if not use Coingecko as last resource
"""
if self.ethereum_network == EthereumNetwork.MAINNET:
try:
return self.curve_oracle.get_pool_token_price(token_address)
except CannotGetPriceFromOracle:
try:
return self.coingecko_client.get_token_price(token_address)
except CannotGetPrice:
pass
return 0.
@cachedmethod(cache=operator.attrgetter('cache_token_info'))
@cache_memoize(60 * 60 * 24, prefix='balances-get_token_info') # 1 day
def get_token_info(self, token_address: str) -> Optional[Erc20InfoWithLogo]:
try:
token = Token.objects.get(address=token_address)
return Erc20InfoWithLogo.from_token(token)
except Token.DoesNotExist:
try:
erc20_info = self.ethereum_client.erc20.get_info(token_address)
token = Token.objects.create(address=token_address,
name=erc20_info.name,
symbol=erc20_info.symbol,
decimals=erc20_info.decimals)
return Erc20InfoWithLogo.from_token(token)
except InvalidERC20Info:
logger.warning('Cannot get erc20 token info for token-address=%s', token_address)
return None
def get_usd_balances(self, safe_address: str, only_trusted: bool = False,
exclude_spam: bool = False) -> List[BalanceWithFiat]:
"""
All this could be more optimal (e.g. batching requests), but as everything is cached
I think we should be alright
:param safe_address:
:param only_trusted: If True, return balance only for trusted tokens
:param exclude_spam: If True, exclude spam tokens
:return: List of BalanceWithFiat
"""
balances: List[Balance] = self.get_balances(safe_address, only_trusted, exclude_spam)
eth_value = self.get_eth_price()
balances_with_usd = []
for balance in balances:
token_address = balance.token_address
if not token_address: # Ether
fiat_conversion = eth_value
fiat_balance = fiat_conversion * (balance.balance / 10**18)
else:
token_to_eth_price = self.get_token_eth_value(token_address)
if token_to_eth_price:
fiat_conversion = eth_value * token_to_eth_price
else: # Use curve/coingecko as last resource
fiat_conversion = self.get_token_usd_price(token_address)
balance_with_decimals = balance.balance / 10**balance.token.decimals
fiat_balance = fiat_conversion * balance_with_decimals
balances_with_usd.append(BalanceWithFiat(balance.token_address,
balance.token,
balance.balance,
round(fiat_balance, 4),
round(fiat_conversion, 4),
'USD'))
return balances_with_usd
|
from django.urls import path, include
from .views import GetUIDView
urlpatterns = [
path('all/', GetUIDView.as_view(), name='list_flights')
]
|
from nltk.tokenize import word_tokenize, sent_tokenize
from nltk.stem import WordNetLemmatizer
from part_of_speech import get_part_of_speech
lemmatizer = WordNetLemmatizer()
oprah_wiki = '<p>Working in local media, she was both the youngest news anchor and the first black female news anchor at Nashville\'s WLAC-TV. </p>' |
#!/usr/bin/env python3
"""
Abstract base class (ABC) for custom reusable Telegram inline keyboards.
This script serves as an interface for documenting function implementation.
Usage:
This script should not be used directly, other than its ABC functionalities.
"""
from abc import ABC, abstractmethod
from telegram import InlineKeyboardMarkup
from typing import Any, Mapping, Optional, Tuple, Union
class AbstractMarkup(ABC):
"""AbstractMarkup class as ABC for custom reusable Telegram inline keyboards."""
@staticmethod
@abstractmethod
def get_pattern(*datas: str) -> str:
"""Gets the pattern regex for matching in ConversationHandler."""
pass
@staticmethod
@abstractmethod
def get_markup(*option_rows: Union[str, Tuple[str, ...]], option_datas: Optional[Mapping[str, str]] = None) \
-> InlineKeyboardMarkup:
"""Initialises the markup with parsed options."""
pass
class AbstractOptionMarkup(ABC):
"""AbstractOptionMarkup class as ABC for custom reusable option menus as Telegram inline keyboards."""
@abstractmethod
def _is_option(self, option: str) -> bool:
"""Verify if the option parsed is defined."""
pass
@abstractmethod
def perform_action(self, option: str) -> Any:
"""Perform action according to the callback data."""
pass
if __name__ == '__main__':
pass
|
import numpy as np
from typing import List, Union, Dict
from pathlib import Path
import os
import torch
import logging
import flair
import gensim
from flair.data import Sentence
from flair.embeddings.token import TokenEmbeddings
from flair.embeddings import StackedEmbeddings
from flair.embeddings import WordEmbeddings, CharacterEmbeddings
from flair.file_utils import cached_path
log = logging.getLogger("flair")
def get_embedding(embedding, finetune_bert=False):
embeddings = embedding.split('+')
result = [CaseEmbedding()]
# skip updating to new flair version
old_base_path = "https://flair.informatik.hu-berlin.de/resources/embeddings/token/"
cache_dir = Path("embeddings")
cached_path(f"{old_base_path}glove.gensim.vectors.npy", cache_dir=cache_dir)
cached_path(
f"{old_base_path}glove.gensim", cache_dir=cache_dir
)
cached_path(f"https://flair.informatik.hu-berlin.de/resources/characters/common_characters", cache_dir="datasets")
for embedding in embeddings:
if embedding == 'char':
result.append(CustomCharacterEmbeddings())
if embedding == 'glove':
result.append(LargeGloveEmbeddings('./data/glove'))
return StackedEmbeddings(embeddings=result)
class LargeGloveEmbeddings(WordEmbeddings):
"""Standard static word embeddings, such as GloVe or FastText."""
def __init__(self, glove_dir):
"""
Initializes classic word embeddings - made for large glove embedding
"""
super().__init__('glove')
embeddings = '840b-300d-glove'
self.field = ""
self.embeddings = embeddings
self.static_embeddings = True
# Large Glove embeddings
embeddings = os.path.join(glove_dir, 'glove.bin')
self.name: str = str(embeddings)
self.precomputed_word_embeddings = gensim.models.KeyedVectors.load(embeddings)
self.__embedding_length: int = self.precomputed_word_embeddings.vector_size
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
for i, sentence in enumerate(sentences):
for token, token_idx in zip(sentence.tokens, range(len(sentence.tokens))):
word_embedding = self.get_cached_vec(word=token.text)
token.set_embedding(self.name, word_embedding)
return sentences
@property
def embedding_length(self) -> int:
return 300
class CaseEmbedding(TokenEmbeddings):
"""Static Case Embedding"""
def __init__(self):
self.name: str = 'case-embedding-shun'
self.static_embeddings = False
self.__embedding_length: int = 3
super().__init__()
@property
def embedding_length(self) -> int:
return self.__embedding_length
def embed(self, sentences: Union[Sentence, List[Sentence]]) -> List[Sentence]:
for sentence in sentences:
for token in sentence:
text = token.text
is_lower = 1 if text == text.lower() else 0
is_upper = 1 if text == text.upper() else 0
is_mix = 1 if is_lower + is_upper == 0 else 0
word_embedding = torch.tensor(
np.array([is_lower, is_upper, is_mix]), device=flair.device, dtype=torch.float
)
token.set_embedding('case-embedding-shun', word_embedding)
return sentences
def _add_embeddings_internal(self, sentences: List[Sentence]) -> List[Sentence]:
return sentences
def __str__(self):
return self.name
class CustomCharacterEmbeddings(CharacterEmbeddings):
"""Batched-version of CharacterEmbeddings. """
def _add_embeddings_internal(self, sentences: List[Sentence]):
token_to_embeddings = {}
for sentence in sentences:
for token in sentence.tokens:
token_to_embeddings[token.text] = None
tokens_char_indices = []
for token in token_to_embeddings:
char_indices = [
self.char_dictionary.get_idx_for_item(char) for char in token
]
tokens_char_indices.append(char_indices)
# sort words by length, for batching and masking
tokens_sorted_by_length = sorted(
tokens_char_indices, key=lambda p: len(p), reverse=True
)
d = {}
for i, ci in enumerate(tokens_char_indices):
for j, cj in enumerate(tokens_sorted_by_length):
if ci == cj:
d[j] = i
continue
chars2_length = [len(c) for c in tokens_sorted_by_length]
longest_token_in_sentence = max(chars2_length)
tokens_mask = torch.zeros(
(len(tokens_sorted_by_length), longest_token_in_sentence),
dtype=torch.long,
device=flair.device,
)
for i, c in enumerate(tokens_sorted_by_length):
tokens_mask[i, : chars2_length[i]] = torch.tensor(
c, dtype=torch.long, device=flair.device
)
# chars for rnn processing
chars = tokens_mask
character_embeddings = self.char_embedding(chars).transpose(0, 1)
packed = torch.nn.utils.rnn.pack_padded_sequence(
character_embeddings, chars2_length
)
lstm_out, self.hidden = self.char_rnn(packed)
outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(lstm_out)
outputs = outputs.transpose(0, 1)
chars_embeds_temp = torch.zeros(
(outputs.size(0), outputs.size(2)),
dtype=torch.float,
device=flair.device,
)
for i, index in enumerate(output_lengths):
chars_embeds_temp[i] = outputs[i, index - 1]
character_embeddings = chars_embeds_temp.clone()
for i in range(character_embeddings.size(0)):
character_embeddings[d[i]] = chars_embeds_temp[i]
for token_number, token in enumerate(token_to_embeddings.keys()):
token_to_embeddings[token] = character_embeddings[token_number]
for sentence in sentences:
for token in sentence.tokens:
token.set_embedding(self.name, token_to_embeddings[token.text])
|
import logging
log = logging.getLogger("BinaryDependencyGraph")
log.setLevel("DEBUG")
class Plugin:
def __init__(self, name, p, cfg, fw_path, memcmp_like_functions=None, *kargs, **kwargs):
global log
self._fw_path = fw_path
self._cfg = cfg
self._p = p
self._log = kwargs['log'] if 'log' in kwargs else log
self._name = name
self._memcmp_like = memcmp_like_functions if memcmp_like_functions is not None else []
self._blob = True if not hasattr(self._p.loader.main_bin, 'reverse_plt') else False
@property
def name(self):
return self._name
def run(self, *kargs, **kwargs):
raise Exception("You have to implement at least a plugin's run")
def discover_new_binaries(self, *kargs, **kwargs):
return []
def discover_strings(self, *kargs, **kwargs):
return {}
@property
def role_strings_info(self):
return {}
|
from .halo_transformer import HaloTransformer
from .nfnet import NFNet
from .pvt import PyramidVisionTransformer
from .swin_transformer import SwinTransformer
from .efficientnet import efficientnet, efficientnetv2
from .nfefficientnet import nfefficientnetv2
from .vit import dino
|
import base64 as b64
from binascii import unhexlify
from GMforms import GMform
from phpserialize import serialize, unserialize
import time
import sys
import os.path
def _obj_decode(s):
s=b64.b64decode(s)
s=unhexlify(s.decode())
return s
def main():
if (len(sys.argv))<=1:
print("Usage: formgui.exe <filename.prs>")
sys.exit(2)
filein=sys.argv[1]
namepy=filein.split(sep=".",maxsplit=2)[0]
dirname=os.path.dirname(filein)
fileout=namepy+".py"
print("Convertion: "+filein+" -> "+ fileout)
with open(filein) as f:
dane=f.read()
ldane=dane.split(sep="^", maxsplit=5);
#print(ldane[3])
lldane=ldane[3].split(sep="#frm#")
frm=GMform()
for a in lldane:
s=_obj_decode(a)
ds=s.decode().split(sep="#key#", maxsplit=3)
if (ds[0]=='main'):
frm.addForm(unserialize(ds[2].encode()))
elif(ds[0]=='lay'):
frm.addLayout(ds[1],unserialize(ds[2].encode()))
elif(ds[0]=='fld'):
frm.addField(ds[1],unserialize(ds[2].encode()))
PltObjList={}
PltParentList={}
PltWlsList={'0':{'TYP': 'Zero','PARENT_ID_LAY':0}}
V={}
for l in frm._arrLayounts:
VARS=frm.getLayount(l)
V={}
for k in VARS:
V[k.decode()]=VARS[k].decode()
id_lay=V['ID_LAY']
PltObjList[id_lay]=''
PltWlsList[id_lay]=V
PltParentList[id_lay]=V['PARENT_ID_LAY']
#print(PltWlsList)
#print(PltObjList)
#print(PltParentList)
mainLayount=''
for lay in PltObjList:
parent=PltParentList[lay]
if (parent=='0'):
mainLayount+='S_'+lay+'\nLAY_'+lay+'E_'+lay+'\n'
else:
if (len(PltObjList[parent])<=0):
PltObjList[parent]='S_'+lay+'\nLAY_'+lay+'E_'+lay+'\n'
else:
PltObjList[parent] +='S_'+lay+'\nLAY_' + lay + 'E_'+lay+'\n'
for l in PltObjList:
PltObjList[l]+='LAY_' + l + '\n'
mainLayount="S_0\n"+mainLayount+"LAY_0\nE_0"
#print(mainLayount)
#print(PltWlsList)
#exit(0)
while (len(PltObjList)>0):
PltDelList={}
for l in PltObjList:
layount=PltObjList[l]
if (mainLayount.find('LAY_'+l)>-1):
mainLayount=mainLayount.replace('LAY_'+l,layount)
#print(mainLayount)
PltDelList[l]='1'
#print(PltObjList)
for d in PltDelList:
del PltObjList[d]
#print(PltObjList)
#print(mainLayount)
PltFldList={}
for f in frm._arrFields:
FLD=frm.getField(f)
V = {}
for k in FLD:
V[k.decode()] = FLD[k].decode()
id_fld = V['ID_FLD']
PltFldList[id_fld] = V
#print(PltFldList)
#exit(0);
ReqFldList=[]
MLFldList=[]
PaternList={}
prev_parent='-1'
start_row=False
for lay in PltWlsList:
LAYER=PltWlsList[lay]
#print(LAYER)
parent=LAYER['PARENT_ID_LAY']
parent_typ=PltWlsList[str(parent)]['TYP']
if (prev_parent=='-1'):
prev_parent_typ='Zero'
else:
prev_parent_typ = PltWlsList[str(prev_parent)]['TYP']
typ=LAYER['TYP']
PaternList['LAY_' + lay]=''
pre=''
post=''
fpre="["
fpost="],"
if (typ=="Grid"):
typ="Row"
if (parent_typ in ('Row','Grid')):
fpre=""
fpost=","
if(typ=='Group'):
pre='['
post=']'
PaternList['S_' + lay] = fpre+"sg.Frame('" + LAYER['TITLE'] + "',\n["
PaternList['E_' + lay] = "],title_location=sg.TITLE_LOCATION_TOP_LEFT,title_color=self.descr_color, element_justification='"+LAYER['EL_ALIGN']+"',key='-GROUP_" + lay + "-', border_width=1)"+fpost
elif(typ=='Col'):
pre = '['
post = ']'
PaternList['S_' + lay] = fpre + "sg.Column(["
PaternList['E_' + lay] = "])" + fpost
elif(typ=='Zero'):
PaternList['S_' + lay] = fpre + "sg.Frame('',\n["
PaternList['E_' + lay] = "],title_location=sg.TITLE_LOCATION_TOP_LEFT, key='-ZERO_" + lay + "-', border_width=0)" + fpost
else:
EL_VALIGN = LAYER['EL_VALIGN']
if (EL_VALIGN not in ('top', 'bottom')):
EL_VALIGN = 'center'
PaternList['S_' + lay] = fpre+"sg.Frame('',\n[["
PaternList['E_' + lay] = "]],title_location=sg.TITLE_LOCATION_TOP_LEFT,title_color=self.descr_color, element_justification='"+LAYER['EL_ALIGN']+"',vertical_alignment='"+EL_VALIGN+"',key='-ROW_" + lay + "-', border_width=0)"+fpost
col=0
field_sep=""
PaternList['LAY_' + lay]=''
#print("LAYER:="+LAYER['TNAME']+" "+LAYER['ID_LAY'])
for f in PltFldList:
FIELD=PltFldList[f]
#print(FIELD)
parent=FIELD['ID_LAY']
id_lay=FIELD['ID_LAY']
if (id_lay==lay):
if (FIELD['TFLD'] in ('TF','PD','TA')):
if (FIELD['RO']=='1'):
disabled=",disabled=True"
else:
disabled=""
if (FIELD['TFLD']=='PD'):
pass_char=",password_char='*'"
else:
pass_char=""
try:
value=FIELD['VAL']
except:
value=''
star="'"
if (FIELD['REQ']=='1'):
star="'+self.req_char"
ReqFldList.append(FIELD['TNAME'])
if (int(FIELD['LEN'])<=100):
field_ptrn=pre+"sg.Column(" \
"[[sg.T('"+FIELD['DESCR']+star+",size=(None,1),text_color=self.descr_color,key='-"+FIELD['TNAME']+"-')]," \
"[sg.I(self._set_val(trb,'"+value+"',list_values,'"+FIELD['TNAME']+"'),size=("+FIELD['LEN']+",1)"+disabled+pass_char+",key='"+FIELD['TNAME']+"')]])"+post
else:
FIELD['LEN']='100'
MLFldList.append(FIELD['TNAME'])
rows=int(int(FIELD['LEN'])/100)+1
field_ptrn = pre + "sg.Column(" \
"[[sg.T('" + FIELD['DESCR']+star + ",size=(None,1),text_color=self.descr_color,key='-" + FIELD[
'TNAME'] + "-')]," \
"[sg.ML(self._set_val(trb,'" + value + "',list_values,'" + FIELD['TNAME'] + "'),size=(" + FIELD['LEN'] + ","+str(rows)+")" + disabled + pass_char + ",key='" + \
FIELD['TNAME'] + "')]])" + post
elif(FIELD['TFLD']=='TX'):
field_ptrn=pre+"sg.T('"+FIELD['DESCR']+"')"+post
elif(FIELD['TFLD'] in ('CB','CO')):
if (FIELD['UNI']=='1'):
selected="True"
else:
selected="False"
field_ptrn=pre+"sg.Checkbox('"+FIELD['VAL']+"', key='"+FIELD['TNAME']+"',default=self._set_val(trb,"+selected+",list_values,'"+FIELD['TNAME']+"',type='boolean'))"+post
elif(FIELD['TFLD']=='RB'):
field_ptrn = pre + "sg.Column("
field_ptrn+="["
field_ptrn+="\n[sg.Radio('"+FIELD['VAL1']+"','"+FIELD['TNAME']+"',default=self._set_val(trb,True,list_values,'"+FIELD['TNAME']+"1',type='boolean'),key='"+FIELD['TNAME']+"1')],"
for x in range(2,5):
try:
if (len(FIELD['VAL'+str(x)])>0):
field_ptrn += "\n[sg.Radio('" + FIELD['VAL'+str(x)] + "','" + FIELD['TNAME']+ "',key='"+FIELD['TNAME']+str(x)+"',default=self._set_val(trb,False,list_values,'"+FIELD['TNAME']+str(x)+"',type='boolean'))],"
except:
pass
field_ptrn+="\n])"+post
elif(FIELD['TFLD']=='BT'):
field_ptrn=pre+"sg.Button('"+FIELD['DESCR']+"',key='"+FIELD['TNAME']+"')"+post
elif(FIELD['TFLD']=='DF'):
field_ptrn = pre + "sg.Column(" \
"[[sg.T('" + FIELD['DESCR'] + "',text_color=self.descr_color,size=(18,1))]," \
"[sg.I(self._set_val(trb,str(time.strftime('%Y-%m-%d',time.localtime())),list_values,'"+FIELD['TNAME']+"'),enable_events=True,disabled=True,key='"+FIELD['TNAME']+"'" + ",size=(11,1))," \
"sg.CalendarButton('x', target=(1,0), format='%Y-%m-%d', default_date_m_d_y=self._set_val('edit',(None,None,None),list_values,'"+FIELD['TNAME']+"',type='date'))]])"+post
elif(FIELD['TFLD'] in ('LF','La','Lz','LA','LZ')):
star = "'"
FIELD['REQ']='1'
if (FIELD['REQ'] == '1'):
star="'+self.req_char"
ReqFldList.append(FIELD['TNAME'])
field_ptrn = pre + "sg.Column("
field_ptrn += "["
field_ptrn += "\n[sg.T('" + FIELD['DESCR'] +star+ ",text_color=self.descr_color,key='-"+FIELD['TNAME']+"-',size=(20,1))],\n"
if ((FIELD['SIZE']!='1') or (FIELD['MULTI']=='1')):
if (FIELD['MULTI']=='1'):
if (FIELD['SIZE']=='1'):
FIELD['SIZE']='3'
select_mode="select_mode=sg.LISTBOX_SELECT_MODE_MULTIPLE"
else:
select_mode = "select_mode=sg.LISTBOX_SELECT_MODE_SINGLE"
field_ptrn += "[sg.Listbox([['A','a'],['B','b'],['C','c']],default_values=self._set_val(trb,[['B','b']],list_values,'"+FIELD['TNAME']+"',type='list'),disabled=False, size=(20,"+FIELD['SIZE']+"),enable_events=True,"+select_mode+",key='"+FIELD['TNAME']+"')]"
else:
field_ptrn += "[sg.Combo([['A','a'],['B','b'],['C','c']],default_value=self._set_val(trb,['B','b'],list_values,'"+FIELD['TNAME']+"',type='list'),disabled=False, size=(20," + FIELD['SIZE'] + "),enable_events=True,key='" + FIELD['TNAME'] + "')]"
field_ptrn += "\n])" + post
else:
field_ptrn=pre+"sg.T('FLD_"+FIELD['TFLD']+"_"+FIELD['ID_FLD']+"')"+post
PaternList['LAY_'+lay] += field_sep+field_ptrn
field_sep=","
prev_parent=parent
prev_parent_typ = PltWlsList[str(prev_parent)]['TYP']
#print(field_ptrn)
#if (len(PaternList['LAY_' + lay])>0):
# if ((prev_parent_typ!='Row') and (prev_parent_typ!='Grid') ):
# PaternList['LAY_' + lay]= fpre+PaternList['LAY_'+lay]+fpost
for p in PaternList:
body=PaternList[p]
mainLayount=mainLayount.replace(p,body)
FormList={}
for f in frm._arrForm:
FormList[f.decode()] = frm._arrForm[f].decode()
func='''
\tdef prepareInsert(self,prefix,fld_list):
\t\tsqlsttm=prefix
\t\tfirst=True
\t\tfor key,val in fld_list.items():
\t\t\tif (first):
\t\t\t\tsqlsttm+=key
\t\t\t\tfirst=False
\t\t\telse:
\t\t\t\tsqlsttm+=","+key
\t\tsqlsttm+=") VALUES ("
\t\tfirst=True
\t\tfor key,val in fld_list.items():
\t\t\tif (first):
\t\t\t\tsqlsttm+="'"+str(val)+"'"
\t\t\t\tfirst=False
\t\t\telse:
\t\t\t\tsqlsttm+=",'"+str(val)+"'"
\t\tsqlsttm+=")"
\t\treturn sqlsttm
\n
\tdef prepareUpdate(self,prefix,fld_list):
\t\tsqlsttm=""
\t\tfirst=True
\t\tfor key,val in fld_list.items():
\t\t\tif (first):
\t\t\t\tsqlsttm+=prefix+key+"='"+str(val)+"'"
\t\t\t\tfirst=False
\t\t\telse:
\t\t\t\tsqlsttm+=","+prefix+key+"='"+str(val)+"'"
\t\treturn sqlsttm
\n
\tdef _set_val(self,trb,v1,lst,fld,**kwargs):
\t\tif trb!='default':
\t\t\tif fld in lst:
\t\t\t\tif ('type' in kwargs):
\t\t\t\t\tif (kwargs['type']=='date'):
\t\t\t\t\t\tif (fld not in lst):
\t\t\t\t\t\t\tlst[fld]=''
\t\t\t\t\t\tif (len(str(lst[fld]).strip())<=0):
\t\t\t\t\t\t\treturn (None,None,None)
\t\t\t\t\t\telse:
\t\t\t\t\t\t\treturn (int(lst[fld].split("-")[1]),int(lst[fld].split("-")[2]),int(lst[fld].split("-")[0]))
\t\t\t\t\telif(kwargs['type']=='boolean'):
\t\t\t\t\t\tif (lst[fld]=='1'):
\t\t\t\t\t\t\treturn True
\t\t\t\t\t\telse:
\t\t\t\t\t\t\treturn False
\t\t\t\t\telse:
\t\t\t\t\t\treturn lst[fld]
\t\t\t\telse:
\t\t\t\t\treturn lst[fld]
\t\t\telse:
\t\t\t\tif ('type' in kwargs):
\t\t\t\t\tif (kwargs['type']=='list'):
\t\t\t\t\t\treturn []
\t\t\t\t\telif(kwargs['type']=='boolean'):
\t\t\t\t\t\treturn False
\t\t\t\t\telif(kwargs['type']=='date'):
\t\t\t\t\t\tif (fld not in lst):
\t\t\t\t\t\t\tlst[fld]=''
\t\t\t\t\t\tif (len(str(lst[fld]).strip())<=0):
\t\t\t\t\t\t\treturn (None,None,None)
\t\t\t\t\t\telse:
\t\t\t\t\t\t\treturn (int(lst[fld].split("-")[1]),int(lst[fld].split("-")[2]),int(lst[fld].split("-")[0]))
\t\t\t\t\telse:
\t\t\t\t\t\treturn ''
\t\t\t\telse:
\t\t\t\t\treturn ''
\t\telse:
\t\t\treturn v1
'''
code1='''
\n
\tdef display(self):
\t\twindow = sg.Window('FORM',self.mainLayout, return_keyboard_events=True,modal=True)
\n
\t\twhile True:
\t\t\tevent, values = window.read()
\n
\t\t\tif event=='-OK-':
\n\t\t\t\tprint(event, values)
\n
'''
code2='''
\t\t\t\t\tvalues[f]=values[f].replace('\\n','')
'''
code3='''
\t\t\t\tfor f in lst_req:
\t\t\t\t\tif (type(values[f])==str):
\t\t\t\t\t\tvalues[f]=values[f].strip()
\t\t\t\t\tif (len(values[f])<=0):
\t\t\t\t\t\twindow['-'+f+'-'].update(text_color=self.req_color)
\t\t\t\t\t\tif zero_req:
\t\t\t\t\t\t\tpass
\t\t\t\t\t\telse:
\t\t\t\t\t\t\tzero_req=True
\t\t\t\t\t\t\twindow[f].SetFocus(force=True)
\t\t\t\t\telse:
\t\t\t\t\t\twindow['-'+f+'-'].update(text_color=self.descr_color)
\t\t\t\tif (zero_req):
\t\t\t\t\tsg.PopupOK('Please complete all required fields!',title='INFO',modal=True)
\t\t\t\tpass
\t\t\tif event=='-Cancel-':
\t\t\t\tbreak
\t\t\tif (event==None) or (event == sg.WIN_CLOSED):
\t\t\t\tbreak
\t\twindow.close()
\n
\n
\t\nif __name__ == '__main__':
\t\ttrb="default"
\t\tlist_values={}
\t\tfrm=WinForm(trb,list_values)
\t\tfrm.display()
'''
with open(fileout, 'w', encoding='utf8') as w:
w.write("import PySimpleGUI as sg")
w.write("\nimport time")
w.write("\n\nclass WinForm:")
w.write("\n\tdef __init__(self,trb,list_values):")
w.write("\n\n")
w.write("\n\t\tself.trb=trb")
w.write("\n\t\tself.list_values=list_values")
w.write("\n\t\tself.descr_color='white'")
w.write("\n\t\tself.req_color='yellow'")
w.write("\n\t\tself.req_char='*'\n")
w.write("\n\t\tself.mainLayout=[\n")
w.write(mainLayount)
w.write("\n[sg.Button('Cancel', key='-Cancel-'), sg.Button('OK', key='-OK-')]")
w.write("\n]")
w.write("\n")
w.write(func)
w.write(code1)
w.write("\t\t\t\tfor f in "+str(MLFldList)+":")
w.write(code2)
w.write("\t\t\t\tlst_req=" +str(ReqFldList)+'\n')
w.write("\t\t\t\tzero_req=False")
w.write(code3)
if __name__ == "__main__":
main() |
import sys
import math
import pickle
import copy
import pandas as pd
class DecisionTree:
def __init__(self, training_data=None, col_with_category=None, path_to_file=None):
self.leaf_counter = 0
self.current_node_id = 0
if path_to_file:
infile = open(path_to_file, 'rb')
self.__dict__.update(pickle.load(infile).__dict__)
infile.close()
else:
self.training_data = training_data
self.col_with_category = col_with_category
print('Building tree ...')
self.root = self.__build_tree(training_data)
def __str__(self):
map = self.__traverse(self.root)
leafes = 0
for row in map:
for element in row:
if type(element).__name__ == 'Leaf':
leafes += 1
return f'Decision tree built from {len(self.training_data)} elements, leafs: {leafes}'
def trim_tree(self, data):
tree = self
processed_levels = 0
while True:
print('! Trimmed !')
temp, changed, processed_levels = tree.__trim_one_node(
data, processed_levels=processed_levels)
if not changed:
break
tree = temp
return tree
def test_accuracy(self, test_data):
pos = 0
for index, row in test_data.iterrows():
if self.find_category(row) == row[self.col_with_category]:
pos += 1
return pos/len(test_data)
def find_category(self, row):
current_node = self.root
while(type(current_node).__name__ != 'Leaf'):
current_node = current_node.get_child_node(
more_or_equal=row[current_node.attribute] >= current_node.value)
return current_node.category
def save_to_file(self, path):
outfile = open(path, 'wb')
pickle.dump(self, outfile)
outfile.close()
def __build_tree(self, data):
print(f'build tree from {len(data)} elements')
self.current_node_id += 1
if self.__stop_criteria(data):
tree = Leaf(self.current_node_id, category=self.__get_category(
data), n_of_elements=len(data))
self.leaf_counter += 1
else:
tests = self.__generate_test_pool(data)
results = ([], [])
while len(results[0]) == 0 or len(results[1]) == 0:
choosen_test = self.__choose_test(tests)
results = self.__split_by_attribute(data, choosen_test)
tree = Node(self.current_node_id, attribute=choosen_test[0], value=choosen_test[1])
tree.set_child_node(more_or_equal=True, node=self.__build_tree(results[0]))
tree.set_child_node(more_or_equal=False, node=self.__build_tree(results[1]))
return tree
def __test_quality(self, data, attr, split_point):
subsets = self.__split_by_attribute(data, (attr, split_point, 0))
sum0 = len(subsets[1])
sum1 = len(subsets[0])
suk0 = subsets[1][self.col_with_category].sum()
suk1 = subsets[0][self.col_with_category].sum()
if (sum0 == 0):
E_0 = 0
elif (suk0 == 0 or suk0 == sum0):
E_0 = 0
else:
E_0 = -(suk0/sum0)*math.log10(suk0/sum0) - \
((sum0-suk0)/sum0)*math.log10((sum0-suk0)/sum0)
if (sum1 == 0):
E_1 = 0
elif (suk1 == 0 or suk1 == sum1):
E_1 = 0
else:
E_1 = -(suk1/sum1)*math.log10(suk1/sum1) - \
((sum1-suk1)/sum1)*math.log10((sum1-suk1)/sum1)
E_w = sum0/len(data) * E_0 + sum1/len(data) * E_1
return(E_w)
def __stop_criteria(self, data):
if len(data) == 0 or data[self.col_with_category].nunique() == 1:
return True
return False
def __split_by_attribute(self, data, test):
subset1 = data[data[test[0]] >= test[1]]
subset2 = data[data[test[0]] < test[1]]
return (subset1, subset2)
def __get_category(self, data):
if len(data) == 0:
return -1
sum = data[self.col_with_category].sum()
if sum/len(data) >= 0.5:
return 1
else:
return 0
def __generate_test(self, data, attr, min, max):
mid = (min + max) / 2
entropy1 = self.__test_quality(data, attr, mid)
entropy2 = self.__test_quality(data, attr, (min+mid)/2)
entropy3 = self.__test_quality(data, attr, (max+mid)/2)
if entropy1 <= entropy2 and entropy1 <= entropy3:
split_point = mid
result_entropy = entropy1
elif entropy2 <= entropy3:
split_point, result_entropy = self.__generate_test(
data, attr, min, mid)
else:
split_point, result_entropy = self.__generate_test(
data, attr, mid, max)
return split_point, result_entropy
def __generate_test_pool(self, data):
result = []
for c in data.columns:
if c != self.col_with_category:
split_point, result_entropy = self.__generate_test(
data, c, data[c].min(), data[c].max())
result.append((c, split_point, result_entropy))
return result
def __choose_test(self, tests):
tests_df = pd.DataFrame(
tests, columns=['index', 'split_point', 'entropy'])
weights = tests_df.loc[:, 'entropy'].values.tolist()
for i in range(len(weights)):
if weights[i] == 0:
weights[i] = sys.maxsize * 2 + 1
else:
weights[i] = 1 / weights[i]
chosen_test_df = tests_df.sample(n=1, weights=weights)
chosen_test = chosen_test_df.iloc[0].to_numpy()
return chosen_test
def __traverse(self, rootnode, id=None):
tree_map = []
thislevel = [rootnode]
while thislevel:
nextlevel = list()
for n in thislevel:
if n.node_id == id:
return n
if type(n).__name__ != 'Leaf':
nextlevel.append(n.get_child_node(True))
nextlevel.append(n.get_child_node(False))
tree_map.append(thislevel)
thislevel = nextlevel
return tree_map
def __tree_to_leaf(self, rootnode):
categories = {}
thislevel = [rootnode]
while thislevel:
nextlevel = list()
for n in thislevel:
if type(n).__name__ != 'Leaf':
nextlevel.append(n.get_child_node(True))
nextlevel.append(n.get_child_node(False))
else:
if n.category not in categories:
categories[n.category] = 0
categories[n.category] += n.n_of_elements
thislevel = nextlevel
winning_cat = max(categories, key=categories.get)
leaf = Leaf(-1, category=winning_cat,
n_of_elements=sum(categories.values()))
return leaf
def __trim_one_node(self, data, processed_levels):
trimed_tree = copy.deepcopy(self)
trimed_tree_map = self.__traverse(trimed_tree.root)
row_number = 0
for row in trimed_tree_map:
row_number += 1
if row_number < processed_levels:
continue
for node in row:
if type(node).__name__ != 'Leaf':
print(f'Working on {type(node).__name__} {node.node_id}')
for side in [True, False]:
temp_node = node.get_child_node(side)
if type(temp_node).__name__ == 'Leaf':
continue
node.set_child_node(side, self.__tree_to_leaf(
node.get_child_node(side)))
old_acc = self.test_accuracy(data)
new_acc = trimed_tree.test_accuracy(data)
print(
f'\tFor {side} branch:\t old={old_acc} new={new_acc} better? {new_acc>=old_acc}')
if new_acc >= old_acc:
print(f'Trimming {side} side')
return trimed_tree, True, processed_levels
node.set_child_node(side, temp_node)
processed_levels += 1
return None, False, 0
class Node():
def __init__(self, node_id, attribute=None, value=None):
self.node_id = node_id
self.attribute = attribute
self.value = value
self.__more_eq_node = None
self.__less_node = None
def set_child_node(self, more_or_equal, node):
if more_or_equal:
self.__more_eq_node = node
else:
self.__less_node = node
def get_child_node(self, more_or_equal):
if more_or_equal:
return self.__more_eq_node
else:
return self.__less_node
class Leaf():
def __init__(self, node_id, category=None, n_of_elements=0):
self.node_id = node_id
self.category = category
self.n_of_elements = n_of_elements
|
#!/usr/bin/env python
#
import cgi
import wsgiref.handlers
import json
import logging
from actingweb import actor
from actingweb import auth
from on_aw import on_aw_resources
import webapp2
class MainPage(webapp2.RequestHandler):
def get(self, id, name):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='resources', subpath=name)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='resources', subpath=name, method='GET'):
self.response.set_status(403)
return
pair = on_aw_resources.on_get_resources(myself=myself,
req=self,
auth=check,
name=name)
if pair and any(pair):
out = json.dumps(pair)
self.response.write(out.encode('utf-8'))
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200)
else:
self.response.set_status(404)
def delete(self, id, name):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='resources', subpath=name)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='resources', subpath=name, method='DELETE'):
self.response.set_status(403)
return
pair = on_aw_resources.on_delete_resources(myself=myself,
req=self,
auth=check,
name=name)
if pair:
if pair >= 100 and pair <= 999:
return
if any(pair):
out = json.dumps(pair)
self.response.write(out.encode('utf-8'))
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200)
else:
self.response.set_status(404)
def put(self, id, name):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='resources', subpath=name)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='resources', subpath=name, method='PUT'):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
except:
self.response.set_status(405, "Error in json body")
return
pair = on_aw_resources.on_put_resources(myself=myself,
req=self,
auth=check,
name=name,
params=params)
if pair:
if pair >= 100 and pair <= 999:
return
if any(pair):
out = json.dumps(pair)
self.response.write(out.encode('utf-8'))
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(200)
else:
self.response.set_status(404)
def post(self, id, name):
(Config, myself, check) = auth.init_actingweb(appreq=self,
id=id, path='resources', subpath=name)
if not myself or check.response["code"] != 200:
return
if not check.checkAuthorisation(path='resources', subpath=name, method='POST'):
self.response.set_status(403)
return
try:
params = json.loads(self.request.body.decode('utf-8', 'ignore'))
except:
self.response.set_status(405, "Error in json body")
return
pair = on_aw_resources.on_post_resources(myself=myself,
req=self,
auth=check,
name=name,
params=params)
if pair:
if pair >= 100 and pair <= 999:
return
if any(pair):
out = json.dumps(pair)
self.response.write(out.encode('utf-8'))
self.response.headers["Content-Type"] = "application/json"
self.response.set_status(201, 'Created')
else:
self.response.set_status(404)
application = webapp2.WSGIApplication([
webapp2.Route(r'/<id>/resources<:/?><name:(.*)>', MainPage, name='MainPage'),
], debug=True)
|
"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
#! /bin/python3
import sys
import socket
import curses
import pickle
from random import choice
try:
from AI import AI
AI_AVAILABLE = True
except:
AI_AVAILABLE = False
__author__ = 'Felipe V. Calderan'
__copyright__ = 'Copyright (C) 2021 Felipe V. Calderan'
__license__ = 'BSD 3-Clause "New" or "Revised" License'
__version__ = '1.0'
# Arena config variables
SCR_H = 18
SCR_W = 78
# Message variables
MSG_SCR_SMALL = 'Terminal screen is too small (80x20 required)'
MSG_ARG_WRONG = 'Usage: python3 spong.py host/join ip port player_name'
MSG_CANT_HOST = 'Could not open the server on this IP/port'
MSG_CANT_JOIN = 'Could not join the game on this IP/port'
MSG_WAITING = 'Waiting for another player... (Ctrl+C to cancel)'
MSG_DISCONN = '----------Disconnected----------'
class Arena:
"""Used to draw and store informations about the arena"""
def __init__(
self,
x : int,
y : int,
size_x : int,
size_y : int
):
"""Initialize arena with the top-left corner located at (x,y) and with
size (size_x, size_y)"""
self.x, self.y = x, y
self.size_x, self.size_y = size_x, size_y
self.bound_x, self.bound_y = x+size_x, y+size_y
def draw(self, screen : curses.window):
"""Draws the arena on the screen"""
for i in range(self.y, self.bound_y):
screen.addstr(i, self.x, '|')
screen.addstr(i, self.bound_x, '|')
for i in range(self.x, self.bound_x):
screen.addstr(self.y, i, '-')
screen.addstr(self.bound_y, i, '-')
screen.addstr(self.y, self.x, '+')
screen.addstr(self.bound_y, self.x, '+')
screen.addstr(self.y, self.bound_x, '+')
screen.addstr(self.bound_y, self.bound_x, '+')
class Player:
"""Deals with players position, score and drawing"""
def __init__(self, side : str, arena : Arena):
"""Define player position based on if it's player 1 or player 2"""
self.x = arena.x+2 if side == 'left' else arena.bound_x-2
self.y = arena.bound_y//2+arena.y//2
self.score = 0
def goal(self):
"""Player scored a point"""
self.score += 1
def move(self, direction : str):
"""Move player up or down"""
if direction == 'up': self.y -= 1
else : self.y += 1
def draw(self, screen : curses.window, arena : Arena):
"""Draw player on the screen's defined y position"""
# clear player's row
for i in range(arena.y+1, arena.bound_y-1):
screen.addstr(i, self.x, ' ')
# draw the player
screen.addstr(self.y-1, self.x, '|')
screen.addstr(self.y, self.x, '|')
screen.addstr(self.y+1, self.x, '|')
class Ball:
"""Deals with balls's collisions, goals, position and velocity"""
def __init__(self, x : int, y : int, vx : int, vy : int):
"""Define ball position and initial velocity"""
self.old_x, self.old_y = x, y
self.x, self.y = x, y
self.vx, self.vy = vx, vy
def move(self, player1 : Player, player2 : Player, arena : Arena) -> int:
"""Move ball given its pos, velocity, collision and check for goals"""
goal = 0
# Check for map borders
if self.y + self.vy > arena.bound_y-1 or self.y + self.vy < arena.y+1:
self.vy *= -1
if self.x == arena.x+1 or self.x == arena.bound_x-1:
if self.x == arena.bound_x-1: player1.goal()
else : player2.goal()
self.old_x, self.old_y = self.x, self.y
self.x = arena.bound_x//2+arena.x//2
self.y = arena.bound_y//2+arena.y//2
self.vx, self.vy = choice((-1, 1)), choice((-1, 0, 1))
return
# Check for player hit
if self.x == player1.x+1:
if self.y == player1.y : self.vx, self.vy = (1, 0)
elif self.y == player1.y-1: self.vx, self.vy = (1,-1)
elif self.y == player1.y+1: self.vx, self.vy = (1, 1)
elif self.y == player1.y-2 and self.vy == 1: self.vx,self.vy=(1,-1)
elif self.y == player1.y+2 and self.vy ==-1: self.vx,self.vy=(1, 1)
if self.x == player2.x-1:
if self.y == player2.y : self.vx, self.vy = (-1, 0)
elif self.y == player2.y-1: self.vx, self.vy = (-1, -1)
elif self.y == player2.y+1: self.vx, self.vy = (-1, 1)
elif self.y == player2.y-2 and self.vy== 1: self.vx,self.vy=(-1,-1)
elif self.y == player2.y+2 and self.vy==-1: self.vx,self.vy=(-1, 1)
# Set the new ball position (and old position)
self.old_x, self.old_y = self.x, self.y
self.x, self.y = self.x + self.vx, self.y + self.vy
def draw(self, screen : curses.window):
"""(Re)draw the ball"""
# Erase the ball from the old position
screen.addstr(self.old_y, self.old_x, ' ')
# Draw the ball on the new position
screen.addstr(self.y, self.x, 'O')
def upload(self) -> (int, int, int, int):
"""Returns only the essential informations about the ball, so that the
whole object doesn't need to be passed through the network
Returns
-------
stats : tuple(int, int, int, int)
quadruple with ball's x, y, vx and vy
"""
return (self.x, self.y, self.vx, self.vy)
def download(self, info : tuple):
"""Update the essential informations about the ball"""
self.x, self.y, self.vx, self.vy = info
def show_msg(
screen : curses.window,
screen_height : int,
screen_width : int,
message : str
):
"""Generic routine to generate an error message"""
screen.addstr(screen_height//2, screen_width//2-len(message)//2, message)
screen.nodelay(0)
screen.getch()
sys.exit(0)
def get_args(
screen : curses.window,
screen_height : int,
screen_width : int
) -> (str, str, int, str):
"""Verify if the arguments are correctly formatted, if they are, return
them type-casted and further formatted for convenience
Returns
-------
tuple(mode : str, ip : str, port : int, name : str)
"""
# Wrong number of arguments
if len(sys.argv) != 5:
show_msg(screen, screen_height, screen_width, MSG_ARG_WRONG)
# Invalid mode (only host/join permitted)
if sys.argv[1].lower() not in ('host', 'join'):
show_msg(screen, screen_height, screen_width, MSG_ARG_WRONG)
# Invalid port type
if not sys.argv[3].isdigit():
show_msg(screen, screen_height, screen_width, MSG_ARG_WRONG)
return sys.argv[1].lower(), sys.argv[2], int(sys.argv[3]), sys.argv[4][:16]
def get_action(
screen : curses.window,
arena : Arena,
player : Player,
keys : dict,
is_AI : bool,
game_status : dict
) -> str or None:
"""Get player's action. The purpuse of this function is to be a wrapper,
for convenience if one day another kind of control is to be implemented
Parameters
----------
screen : curses.window
arena : Arena
player : Player
keys : dict
dictionary containing all the configured key to play the game
is_AI : bool
if True, the AI will control the character (if an AI is available)
game_status : dict
dictionary containing players y position and ball position & velocity.
Useful to create an AI, for exemple.
Returns
-------
action : str
string containing what the player wants to do
action : None
if no action (or invalid action) is taken
"""
if is_AI and AI_AVAILABLE:
return AI(screen, arena, player, keys, is_AI, game_status)
else:
key = screen.getch()
if key in keys['up_key'] : return 'up'
elif key in keys['down_key'] : return 'down'
elif key in keys['quit_key'] : return 'quit'
return None
def main(scr : curses.window):
# Remove blinking cursor
curses.curs_set(0)
# Get screen's height and width & check if the screen is big enough
sh, sw = scr.getmaxyx()
if sh < SCR_H+2 or sw < SCR_W+2: show_msg(scr, sh, sw, MSG_SCR_SMALL)
# Get args
mode, ip, port, plname = get_args(scr, sh, sw)
# Start socket for host/join mode
skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if mode == 'host':
try:
skt.bind((ip, port))
skt.listen(1)
except:
show_msg(scr, sh, sw, MSG_CANT_HOST)
else:
try:
skt.connect((ip, port))
except:
show_msg(scr, sh, sw, MSG_CANT_JOIN)
# Setup keys
up_key = set((curses.KEY_UP, ord('k'), ord('K'), ord('w'), ord('W')))
down_key = set((curses.KEY_DOWN, ord('j'), ord('J'), ord('s'), ord('S')))
quit_key = set((ord('q'), ord('Q')))
keys = {'up_key' : up_key, 'down_key' : down_key, 'quit_key' : quit_key}
# Activate nodelay (so getch won't interrupt the execution)
scr.nodelay(1)
scr.timeout(33)
# Create arena
arena = Arena(0, 1, SCR_W, SCR_H)
# Create players
player1 = Player('left', arena)
player2 = Player('right', arena)
# Create the ball
ball = Ball(
arena.bound_x//2, arena.bound_y//2, choice((-1, 1)), choice((-1, 0, 1))
)
# Connection accepted
accepted = False
# Waiting connection message
scr.addstr(sh//2, sw//2-len(MSG_WAITING)//2, MSG_WAITING)
scr.refresh()
scr.addstr(sh//2, 0, " "*sw)
# Draw the arena
arena.draw(scr)
# Game loop
while True:
# Start networking
if mode == 'host':
if not accepted:
# Accept client
try:
clskt, claddr = skt.accept()
except:
sys.exit()
# Write host name on the screen and send it
scr.addstr(0, 0, plname)
clskt.send(plname.encode().ljust(16))
# Receive client name and add to screen
try:
clname = clskt.recv(16).strip().decode()[:16]
except:
show_msg(scr, 0, SCR_W, MSG_DISCONN)
scr.addstr(0, SCR_W+1-len(clname), clname)
# Mark client as accpeted
accepted = True
else:
if not accepted:
# Receive host name and add to screen
try:
scr.addstr(0, 0, skt.recv(16).strip().decode()[:16])
except:
show_msg(scr, 0, SCR_W, MSG_DISCONN)
# Write client name on the screen and send it
scr.addstr(0, SCR_W+1-len(plname), plname)
skt.send(plname.encode().ljust(16))
accepted = True
# Draw the game score
scr.addstr(0, SCR_W//2-6, str(player1.score))
scr.addstr(0, SCR_W//2+6, str(player2.score))
# Draw players
player1.draw(scr, arena)
player2.draw(scr, arena)
# Draw ball (host) and check goals
if mode == 'host':
ball.move(player1, player2, arena)
ball.draw(scr)
# Get button press, perform action and send over the network
if mode == 'host':
action = get_action(
scr, arena, player1, keys, plname=='AI',
{'p1' : player1.y, 'p2' : player2.y, 'ball' : ball.upload()}
)
if action == 'up' and player1.y > arena.y+3:
player1.move('up')
elif action == 'down' and player1.y < arena.bound_y-3:
player1.move('down')
elif action == 'quit' :
clskt.close()
sys.exit(0)
else: action = None
# Send ball and host's action
try:
clskt.send(pickle.dumps((str(action), ball.upload())))
player2_action = clskt.recv(16).strip().decode()
if player2_action == 'up' and player2.y > arena.y+3:
player2.move('up')
elif player2_action == 'down' and player2.y < arena.bound_y-3:
player2.move('down')
except:
show_msg(scr, 0, SCR_W, MSG_DISCONN)
else:
action = get_action(
scr, arena, player2, keys, plname=='AI',
{'p1' : player1.y, 'p2' : player2.y, 'ball': ball.upload()}
)
if action == 'up' and player2.y > arena.y+3 :
player2.move('up')
elif action == 'down' and player2.y < arena.bound_y-3:
player2.move('down')
elif action == 'quit':
skt.close()
sys.exit(0)
else: action = None
# Send client's action, then get ball and host's position
try:
skt.send(str(action).encode().ljust(16))
player1_action, ball_info = pickle.loads(
skt.recv(sys.getsizeof(('down', ball.upload()))*3)
)
if player1_action == 'up' and player1.y > arena.y+3:
player1.move('up')
elif player1_action == 'down' and player1.y < arena.bound_y-3:
player1.move('down')
ball.download(ball_info)
except:
show_msg(scr, 0, SCR_W, MSG_DISCONN)
# Draw ball (join) and check goals
ball.move(player1, player2, arena)
ball.draw(scr)
scr.refresh()
if __name__ == '__main__':
curses.wrapper(main)
|
#!/usr/bin/python
# Copyright 2006 Vladimir Prus.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
from time import sleep
t = BoostBuild.Tester()
t.write("jamroot.jam", """
import pch ;
project : requirements <warnings-as-errors>on ;
cpp-pch pch : pch.hpp : <toolset>msvc:<source>pch.cpp <include>. ;
cpp-pch pch-afx : pch.hpp : <define>HELLO <toolset>msvc:<source>pch.cpp <include>. ;
exe hello : hello.cpp pch : <include>. ;
exe hello-afx : hello-afx.cpp pch-afx : <define>HELLO <include>. ;
""")
t.write("pch.hpp.bad", """
THIS WILL NOT COMPILE
""")
# Note that pch.hpp is written after pch.hpp.bad, so its timestamp will not be
# less than timestamp of pch.hpp.bad.
sleep(1)
t.write("pch.hpp", """
#undef HELLO
class TestClass
{
public:
TestClass( int, int ) {}
};
""")
t.write("pch.cpp", """#include <pch.hpp>
""")
for name in ("hello.cpp", "hello-afx.cpp"):
t.write(name, """#include <pch.hpp>
int main() { TestClass c(1, 2); }
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/hello.exe")
t.expect_addition("bin/$toolset/debug*/hello-afx.exe")
# Now make the header unusable, without changing timestamp. If everything is OK,
# B2 will not recreate PCH, and compiler will happily use pre-compiled
# header, not noticing that the real header is bad.
t.copy_preserving_timestamp("pch.hpp.bad", "pch.hpp")
t.rm("bin/$toolset/debug*/hello.obj")
t.rm("bin/$toolset/debug*/hello-afx.obj")
t.run_build_system()
t.expect_addition("bin/$toolset/debug*/hello.obj")
t.expect_addition("bin/$toolset/debug*/hello-afx.obj")
t.cleanup()
|
# #! /usr/bin/env python
import pytest
import sys
import numpy as np
import scipy as sp
# This filters out an innocuous warning when pandas is imported,
# but the version has not been compiled against the newest numpy.
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
import pandas as pd
from .. import _bootstrap_tools as bst
def create_dummy_dataset(seed=None, n=30, base_mean=0, expt_groups=6,
scale_means=2, scale_std=1.2):
"""
Creates a dummy dataset for plotting.
Returns the seed used to generate the random numbers,
the maximum possible difference between mean differences,
and the dataset itself.
"""
# Set a random seed.
if seed is None:
random_seed = np.random.randint(low=1, high=1000, size=1)[0]
else:
if isinstance(seed, int):
random_seed = seed
else:
raise TypeError('{} is not an integer.'.format(seed))
# Generate a set of random means
np.random.seed(random_seed)
MEANS = np.repeat(base_mean, expt_groups) + np.random.random(size=expt_groups) * scale_means
SCALES = np.random.random(size=expt_groups) * scale_std
max_mean_diff = np.ptp(MEANS)
dataset = list()
for i, m in enumerate(MEANS):
pop = sp.stats.norm.rvs(loc=m, scale=SCALES[i], size=10000)
sample = np.random.choice(pop, size=n, replace=False)
dataset.append(sample)
df = pd.DataFrame(dataset).T
df.columns = [str(c) for c in df.columns]
return random_seed, max_mean_diff, df
def is_difference(result):
assert result.is_difference == True
def is_paired(result):
assert result.is_paired == True
def check_pvalue_1samp(result):
assert result.pvalue_1samp_ttest != 'NIL'
def check_pvalue_2samp_unpaired(result):
assert result.pvalue_2samp_ind_ttest != 'NIL'
def check_pvalue_2samp_paired(result):
assert result.pvalue_2samp_related_ttest != 'NIL'
def check_mann_whitney(result):
"""Nonparametric unpaired"""
assert result.pvalue_mann_whitney != 'NIL'
assert result.pvalue_wilcoxon == 'NIL'
def check_wilcoxon(result):
"""Nonparametric Paired"""
assert result.pvalue_wilcoxon != 'NIL'
assert result.pvalue_mann_whitney == 'NIL'
# def test_mean_within_ci_bca(mean, result):
# assert mean >= result.bca_ci_low
# assert mean <= result.bca_ci_high
#
# def test_mean_within_ci_pct(mean, result):
# assert mean >= result.pct_ci_low
# assert mean <= result.pct_ci_high
def single_samp_stat_tests(sample, result):
assert result.is_difference == False
assert result.is_paired == False
ttest_result = sp.stats.ttest_1samp(sample, 0).pvalue
assert result.pvalue_1samp_ttest == pytest.approx(ttest_result)
def unpaired_stat_tests(control, expt, result):
is_difference(result)
check_pvalue_2samp_unpaired(result)
check_mann_whitney(result)
true_mean = expt.mean() - control.mean()
assert result.summary == pytest.approx(true_mean)
scipy_ttest_ind_result = sp.stats.ttest_ind(control, expt).pvalue
assert result.pvalue_2samp_ind_ttest == pytest.approx(scipy_ttest_ind_result)
mann_whitney_result = sp.stats.mannwhitneyu(control, expt,
alternative='two-sided').pvalue
assert result.pvalue_mann_whitney == pytest.approx(mann_whitney_result)
def paired_stat_tests(control, expt, result):
is_difference(result)
is_paired(result)
check_wilcoxon(result)
true_mean = np.mean(expt - control)
assert result.summary == pytest.approx(true_mean)
scipy_ttest_paired = sp.stats.ttest_rel(control, expt).pvalue
assert result.pvalue_2samp_paired_ttest == pytest.approx(scipy_ttest_paired)
wilcoxon_result = sp.stats.wilcoxon(control, expt).pvalue
assert result.pvalue_wilcoxon == pytest.approx(wilcoxon_result)
def does_ci_capture_mean_diff(control, expt, paired, nreps=100, alpha=0.05):
if expt is None:
mean_diff = control.mean()
else:
if paired is True:
mean_diff = np.mean(expt - control)
elif paired is False:
mean_diff = expt.mean() - control.mean()
ERROR_THRESHOLD = nreps * alpha
error_count_bca = 0
error_count_pct = 0
for i in range(1, nreps):
results = bst.bootstrap(control, expt, paired=paired, alpha_level=alpha)
print("\n95CI BCa = {}, {}".format(results.bca_ci_low, results.bca_ci_high))
try:
# test_mean_within_ci_bca(mean_diff, results)
assert mean_diff >= results.bca_ci_low
assert mean_diff <= results.bca_ci_high
except AssertionError:
error_count_bca += 1
print("\n95CI %tage = {}, {}".format(results.pct_ci_low, results.pct_ci_high))
try:
# test_mean_within_ci_pct(mean_diff, results)
assert mean_diff >= results.pct_ci_low
assert mean_diff <= results.pct_ci_high
except AssertionError:
error_count_pct += 1
print('\nNumber of BCa CIs not capturing the mean is {}'.format(error_count_bca))
assert error_count_bca < ERROR_THRESHOLD
print('\nNumber of Pct CIs not capturing the mean is {}'.format(error_count_pct))
assert error_count_pct < ERROR_THRESHOLD
# Start tests below.
def test_single_sample_bootstrap(mean=100, sd=10, n=25, nreps=100, alpha=0.05):
print("Testing single sample bootstrap.")
# Set the random seed.
random_seed = np.random.randint(low=1, high=1000, size=1)[0]
np.random.seed(random_seed)
print("\nRandom seed = {}".format(random_seed))
# single sample
pop = sp.stats.norm.rvs(loc=mean, scale=sd * np.random.random(1)[0], size=10000)
sample = np.random.choice(pop, size=n, replace=False)
print("\nMean = {}".format(mean))
results = bst.bootstrap(sample, alpha_level=alpha)
single_samp_stat_tests(sample, results)
does_ci_capture_mean_diff(sample, None, False, nreps, alpha)
def test_unpaired_difference(mean=100, sd=10, n=25, nreps=100, alpha=0.05):
print("Testing unpaired difference bootstrap.\n")
rand_delta = np.random.randint(-10, 10) # randint between -10 and 10
SCALES = sd * np.random.random(2)
pop1 = sp.stats.norm.rvs(loc=mean, scale=SCALES[0], size=10000)
sample1 = np.random.choice(pop1, size=n, replace=False)
pop2 = sp.stats.norm.rvs(loc=mean+rand_delta, scale=SCALES[1], size=10000)
sample2 = np.random.choice(pop2, size=n, replace=False)
results = bst.bootstrap(sample1, sample2, paired=False, alpha_level=alpha)
unpaired_stat_tests(sample1, sample2, results)
does_ci_capture_mean_diff(sample1, sample2, False, nreps, alpha)
def test_paired_difference(mean=100, sd=10, n=25, nreps=100, alpha=0.05):
print("Testing paired difference bootstrap.\n")
# Assume equal variances here, given that the samples
# are supposed to be paired.
rand_delta = np.random.randint(-10, 10) # randint between -10 and 10
print('difference={}'.format(rand_delta))
SCALE = sd * np.random.random(1)[0]
pop1 = sp.stats.norm.rvs(loc=mean, scale=SCALE, size=10000)
sample1 = np.random.choice(pop1, size=n, replace=False)
pop2 = sp.stats.norm.rvs(loc=mean+rand_delta, scale=SCALE, size=10000)
sample2 = np.random.choice(pop2, size=n, replace=False)
results = bst.bootstrap(sample1, sample2, alpha_level=alpha, paired=True)
paired_stat_tests(sample1, sample2, results)
does_ci_capture_mean_diff(sample1, sample2, True, nreps, alpha)
|
# Copyright (C) 2017 Beijing Didi Infinity Technology and Development Co.,Ltd.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base class for model."""
import re
import delta.compat as tf
from tensorflow.python.keras import backend as K # pylint: disable=no-name-in-module
class Model(tf.keras.Model):
"""Base class for model."""
def __init__(self, **kwargs): # pylint: disable=useless-super-delegation
super().__init__(**kwargs)
def __setattr__(self, key, value):
if key.startswith("temp_"):
# this is for temporary attributes avoiding keras check
self.__dict__[key] = value
else:
super().__setattr__(key, value)
def call(self, inputs, training=None, mask=None):
raise NotImplementedError()
class RawModel:
"""Raw model."""
def __init__(self, **kwargs):
name = kwargs.get('name')
if not name:
prefix = self.__class__.__name__
name = self._to_snake_case(prefix) + '_' + str(K.get_uid(prefix))
self.name = name
@staticmethod
def _to_snake_case(name):
"""Transform name to snake case."""
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def __call__(self, inputs, **kwargs):
with tf.variable_scope(self.name):
return self.call(inputs, **kwargs)
def call(self, inputs, **kwargs):
"""call"""
raise NotImplementedError()
|
import cv2
import numpy as np
import pprint
w = h = 360
n = 6
np.random.seed(0)
pts = np.random.randint(0, w, (n, 2))
print(pts)
# [[172 47]
# [117 192]
# [323 251]
# [195 359]
# [ 9 211]
# [277 242]]
print(type(pts))
# <class 'numpy.ndarray'>
print(pts.shape)
# (6, 2)
img = np.zeros((w, h, 3), np.uint8)
for p in pts:
cv2.drawMarker(img, tuple(p), (255, 255, 255), thickness=2)
cv2.imwrite('data/dst/opencv_random_pts.png', img)
# True
# 
rect = (0, 0, w, h)
subdiv = cv2.Subdiv2D(rect)
for p in pts:
subdiv.insert((p[0], p[1]))
triangles = subdiv.getTriangleList()
print(triangles)
# [[ 1080. 0. 0. 1080. 323. 251.]
# [ 0. 1080. 1080. 0. -1080. -1080.]
# [ 0. 1080. -1080. -1080. 9. 211.]
# [-1080. -1080. 1080. 0. 172. 47.]
# [ 172. 47. 1080. 0. 323. 251.]
# [ 172. 47. 323. 251. 277. 242.]
# [-1080. -1080. 172. 47. 9. 211.]
# [ 172. 47. 117. 192. 9. 211.]
# [ 117. 192. 172. 47. 277. 242.]
# [ 9. 211. 195. 359. 0. 1080.]
# [ 195. 359. 9. 211. 117. 192.]
# [ 323. 251. 0. 1080. 195. 359.]
# [ 195. 359. 117. 192. 277. 242.]
# [ 323. 251. 195. 359. 277. 242.]]
pols = triangles.reshape(-1, 3, 2)
print(pols)
# [[[ 1080. 0.]
# [ 0. 1080.]
# [ 323. 251.]]
#
# [[ 0. 1080.]
# [ 1080. 0.]
# [-1080. -1080.]]
#
# [[ 0. 1080.]
# [-1080. -1080.]
# [ 9. 211.]]
#
# [[-1080. -1080.]
# [ 1080. 0.]
# [ 172. 47.]]
#
# [[ 172. 47.]
# [ 1080. 0.]
# [ 323. 251.]]
#
# [[ 172. 47.]
# [ 323. 251.]
# [ 277. 242.]]
#
# [[-1080. -1080.]
# [ 172. 47.]
# [ 9. 211.]]
#
# [[ 172. 47.]
# [ 117. 192.]
# [ 9. 211.]]
#
# [[ 117. 192.]
# [ 172. 47.]
# [ 277. 242.]]
#
# [[ 9. 211.]
# [ 195. 359.]
# [ 0. 1080.]]
#
# [[ 195. 359.]
# [ 9. 211.]
# [ 117. 192.]]
#
# [[ 323. 251.]
# [ 0. 1080.]
# [ 195. 359.]]
#
# [[ 195. 359.]
# [ 117. 192.]
# [ 277. 242.]]
#
# [[ 323. 251.]
# [ 195. 359.]
# [ 277. 242.]]]
img_draw = img.copy()
cv2.polylines(img_draw, pols.astype(int), True, (0, 0, 255), thickness=2)
cv2.imwrite('data/dst/opencv_delaunay.png', img_draw)
# True
# 
print(np.all(pols[:, :, 0] < w, axis=1))
# [False False True False False True True True True True True True
# True True]
pols_inner = pols[np.all(pols[:, :, 0] < w, axis=1) &
np.all(pols[:, :, 0] > 0, axis=1) &
np.all(pols[:, :, 1] < h, axis=1) &
np.all(pols[:, :, 1] > 0, axis=1)]
print(pols_inner)
# [[[172. 47.]
# [323. 251.]
# [277. 242.]]
#
# [[172. 47.]
# [117. 192.]
# [ 9. 211.]]
#
# [[117. 192.]
# [172. 47.]
# [277. 242.]]
#
# [[195. 359.]
# [ 9. 211.]
# [117. 192.]]
#
# [[195. 359.]
# [117. 192.]
# [277. 242.]]
#
# [[323. 251.]
# [195. 359.]
# [277. 242.]]]
img_draw = img.copy()
cv2.polylines(img_draw, pols_inner.astype(int), True, (0, 0, 255), thickness=2)
cv2.imwrite('data/dst/opencv_delaunay_inner.png', img_draw)
# True
# 
facets, centers = subdiv.getVoronoiFacetList([])
pprint.pprint(facets)
# [array([[ 331.1972 , 87.04766],
# [ 218.6763 , 147.63583],
# [ 41.71519, 80.51266],
# [ -503.5626 , -461.44025],
# [ 540.8418 , -1621.6836 ],
# [ 618.2897 , -125.45706]], dtype=float32),
# array([[218.6763 , 147.63583],
# [182.60144, 263.07538],
# [ 82.09151, 310.02014],
# [ 41.71519, 80.51266]], dtype=float32),
# array([[ 282.99118, 333.434 ],
# [ 331.1972 , 87.04766],
# [ 618.2897 , -125.45706],
# [ 987.2233 , 987.2233 ],
# [ 759.8912 , 898.6488 ]], dtype=float32),
# array([[ 182.60144, 263.07538],
# [ 282.99118, 333.434 ],
# [ 759.8912 , 898.6488 ],
# [-183.30182, 643.555 ],
# [ 82.09151, 310.02014]], dtype=float32),
# array([[ 82.09151, 310.02014],
# [ -183.30182, 643.555 ],
# [-1793.752 , 626.876 ],
# [ -503.5626 , -461.44025],
# [ 41.71519, 80.51266]], dtype=float32),
# array([[218.6763 , 147.63583],
# [331.1972 , 87.04766],
# [282.99118, 333.434 ],
# [182.60144, 263.07538]], dtype=float32)]
print(centers)
# [[172. 47.]
# [117. 192.]
# [323. 251.]
# [195. 359.]
# [ 9. 211.]
# [277. 242.]]
img_draw = img.copy()
cv2.polylines(img_draw, [f.astype(int) for f in facets], True, (255, 255, 255), thickness=2)
cv2.imwrite('data/dst/opencv_voronoi.png', img_draw)
# True
# 
img_draw = img.copy()
step = int(255 / len(facets))
for i, p in enumerate(f.astype(int) for f in facets):
cv2.fillPoly(img_draw, [p], (step * i, step * i, step * i))
cv2.imwrite('data/dst/opencv_voronoi_fill.png', img_draw)
# True
# 
img_draw = img.copy()
step = int(255 / len(facets))
for i, p in enumerate(f.astype(int) for f in facets):
cv2.fillPoly(img_draw, [p], (step * i, step * i, step * i))
cv2.polylines(img_draw, pols_inner.astype(int), True, (0, 0, 255), thickness=2)
for c in centers:
cv2.drawMarker(img_draw, tuple(c), (255, 255, 255), thickness=2)
cv2.imwrite('data/dst/opencv_delaunay_voronoi.png', img_draw)
# True
# 
|
from .train import train_fn
|
import argparse
import json
import sys
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError
parser = argparse.ArgumentParser()
parser.add_argument("file", help="input XML file (ie: userPropertyTypes.xml)")
args = parser.parse_args()
tree = '';
checked = 0;
try:
tree = ET.parse(args.file)
for elem in tree.iter():
checked += 1
if (elem.text and (elem.text[0] == '[')):
print ("**JSON** %s: '%s'" % (elem.tag, elem.text))
try:
json.loads(elem.text)
print ("**JSON OK** %s: '%s'" % (elem.tag, elem.text))
except:
print ("**JSON ERROR** %s: '%s'" % (elem.tag, elem.text))
else:
print ("%s: '%s'" % (elem.tag, elem.text))
except ParseError as p:
print("** XML ERROR** \"" + str(p.msg) + "\"")
exit
except TypeError as p:
print("** XML ERROR** \"" + str(p.msg) + "\"")
exit
except:
print("Unexpected error:", sys.exc_info()[0])
exit
print(" === Checked " + str(checked)+ " items: File is verfied and valid === ")
|
"""
====================================
Bias and variance of polynomial fit
====================================
Demo overfitting, underfitting, and validation and learning curves with
polynomial regression.
Fit polynomes of different degrees to a dataset: for too small a degree,
the model *underfits*, while for too large a degree, it overfits.
"""
import numpy as np
import matplotlib.pyplot as plt
def generating_func(x, err=0.5):
return np.random.normal(10 - 1. / (x + 0.1), err)
############################################################
# A polynomial regression
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
############################################################
# A simple figure to illustrate the problem
n_samples = 8
np.random.seed(0)
x = 10 ** np.linspace(-2, 0, n_samples)
y = generating_func(x)
x_test = np.linspace(-0.2, 1.2, 1000)
titles = ['d = 1 (under-fit; high bias)',
'd = 2',
'd = 6 (over-fit; high variance)']
degrees = [1, 2, 6]
fig = plt.figure(figsize=(9, 3.5))
fig.subplots_adjust(left=0.06, right=0.98, bottom=0.15, top=0.85, wspace=0.05)
for i, d in enumerate(degrees):
ax = fig.add_subplot(131 + i, xticks=[], yticks=[])
ax.scatter(x, y, marker='x', c='k', s=50)
model = make_pipeline(PolynomialFeatures(d), LinearRegression())
model.fit(x[:, np.newaxis], y)
ax.plot(x_test, model.predict(x_test[:, np.newaxis]), '-b')
ax.set_xlim(-0.2, 1.2)
ax.set_ylim(0, 12)
ax.set_xlabel('house size')
if i == 0:
ax.set_ylabel('price')
ax.set_title(titles[i])
############################################################
# Generate a larger dataset
from sklearn.model_selection import train_test_split
n_samples = 200
test_size = 0.4
error = 1.0
# randomly sample the data
np.random.seed(1)
x = np.random.random(n_samples)
y = generating_func(x, error)
# split into training, validation, and testing sets.
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=test_size)
# show the training and validation sets
plt.figure(figsize=(6, 4))
plt.scatter(x_train, y_train, color='red', label='Training set')
plt.scatter(x_test, y_test, color='blue', label='Test set')
plt.title('The data')
plt.legend(loc='best')
############################################################
# Plot a validation curve
from sklearn.model_selection import validation_curve
degrees = np.arange(1, 21)
model = make_pipeline(PolynomialFeatures(), LinearRegression())
# The parameter to vary is the "degrees" on the pipeline step
# "polynomialfeatures"
train_scores, validation_scores = validation_curve(
model, x[:, np.newaxis], y,
param_name='polynomialfeatures__degree',
param_range=degrees)
# Plot the mean train error and validation error across folds
plt.figure(figsize=(6, 4))
plt.plot(degrees, validation_scores.mean(axis=1), lw=2,
label='cross-validation')
plt.plot(degrees, train_scores.mean(axis=1), lw=2, label='training')
plt.legend(loc='best')
plt.xlabel('degree of fit')
plt.ylabel('explained variance')
plt.title('Validation curve')
plt.tight_layout()
############################################################
# Learning curves
############################################################
#
# Plot train and test error with an increasing number of samples
# A learning curve for d=1, 5, 15
for d in [1, 5, 15]:
model = make_pipeline(PolynomialFeatures(degree=d), LinearRegression())
from sklearn.model_selection import learning_curve
train_sizes, train_scores, validation_scores = learning_curve(
model, x[:, np.newaxis], y,
train_sizes=np.logspace(-1, 0, 20))
# Plot the mean train error and validation error across folds
plt.figure(figsize=(6, 4))
plt.plot(train_sizes, validation_scores.mean(axis=1),
lw=2, label='cross-validation')
plt.plot(train_sizes, train_scores.mean(axis=1),
lw=2, label='training')
plt.ylim(ymin=-.1, ymax=1)
plt.legend(loc='best')
plt.xlabel('number of train samples')
plt.ylabel('explained variance')
plt.title('Learning curve (degree=%i)' % d)
plt.tight_layout()
plt.show()
|
from setuptools import find_packages, setup
setup(
name="traffic_ipywidgets",
packages=find_packages(),
entry_points={
"traffic.plugins": ["TrafficWidget = traffic_ipywidgets.ipywidgets"]
},
install_requires=[
"traffic",
"ipympl", # interactive matplotlib in notebooks
"tornado", # dependency for matplotlib with WebAgg
],
)
|
# MIT License
#
# Copyright (c) 2020 Arkadiusz Netczuk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import logging
from datetime import datetime, date, time, timedelta
from typing import List, Tuple
from worklog import persist
_LOGGER = logging.getLogger(__name__)
class WorkLogEntry( persist.Versionable ):
## 1: added "description" field
## 2: added "work" field
## 3: reset seconds value to zero
## 4: add properties
_class_version = 4
def __init__(self):
self._startTime: datetime = None
self._endTime: datetime = None
self.work = True ## is work time?
self.description = ""
def _convertstate_(self, dict_, dictVersion_ ):
_LOGGER.info( "converting object from version %s to %s", dictVersion_, self._class_version )
if dictVersion_ < 1:
dict_["description"] = ""
if dictVersion_ < 2:
dict_["work"] = True
if dictVersion_ < 3:
pass
if dictVersion_ < 4:
dict_["_startTime"] = dict_["startTime"]
dict_["_endTime"] = dict_["endTime"]
del dict_["startTime"]
del dict_["endTime"]
## ensure no seconds
dict_["_startTime"] = dict_["_startTime"].replace( second=0, microsecond=0 )
dict_["_endTime"] = dict_["_endTime"].replace( second=0, microsecond=0 )
# pylint: disable=W0201
self.__dict__ = dict_
# def __str__(self):
# return self.printData()
#
# def __repr__(self):
# return self.printData()
@property
def startTime(self) -> datetime:
return self._startTime
@startTime.setter
def startTime(self, value: datetime):
if value is not None:
value = value.replace( second=0, microsecond=0 )
self._startTime = value
@property
def endTime(self) -> datetime:
return self._endTime
@endTime.setter
def endTime(self, value: datetime):
if value is not None:
value = value.replace( second=0, microsecond=0 )
self._endTime = value
def getDuration(self):
return self.endTime - self.startTime
def calculateTimeSpan(self, entryDate: date):
startDate: datetime = self.startTime
endDate: datetime = self.endTime
return calc_time_span( entryDate, startDate, endDate )
def printData( self ):
return str( self.startTime ) + " " + str( self.getDuration() ) + " " + str( self.work )
class WorkLogData( persist.Versionable ):
## 1 - rename field
_class_version = 1
def __init__(self):
self.entries: List[ WorkLogEntry ] = list()
def _convertstate_(self, dict_, dictVersion_ ):
_LOGGER.info( "converting object from version %s to %s", dictVersion_, self._class_version )
if dictVersion_ < 1:
## skip old version data
self.entries = list()
return
# pylint: disable=W0201
self.__dict__ = dict_
## [] (array) operator
def __getitem__(self, arg):
return self.getEntry( arg )
def size(self):
return len( self.entries )
def getEntry(self, row) -> WorkLogEntry:
return self.entries[ row ]
def recentEntry(self) -> WorkLogEntry:
if self.entries:
return self.entries[-1]
return None
def nextEntry(self, referenceEntry) -> WorkLogEntry:
entryIndex = self.entries.index( referenceEntry )
if entryIndex == len( self.entries ) - 1:
## last element
return None
return self.entries[ entryIndex + 1 ]
def prevEntry(self, referenceEntry) -> WorkLogEntry:
entryIndex = self.entries.index( referenceEntry )
if entryIndex < 1:
## first element
return None
return self.entries[ entryIndex - 1 ]
def getEntriesForDate(self, dateValue: date) -> List[ WorkLogEntry ]:
retList = []
for entry in self.entries:
startDate = entry.startTime.date()
endDate = entry.endTime.date()
if startDate <= dateValue and endDate >= dateValue:
retList.append( entry )
continue
return retList
def findEntriesInRange(self, fromDate: datetime, toDate: datetime) -> List[ WorkLogEntry ]:
retList = []
for entry in self.entries:
if entry.endTime < fromDate:
continue
if entry.startTime > toDate:
continue
retList.append( entry )
return retList
def addEntry(self, entry):
self.entries.append( entry )
self.sort()
def addEntryTime(self, entryDate: date, startTime: time, endTime: time, desc: str = "", work: bool = True):
dateTimeStart = datetime.combine( entryDate, startTime )
dateTimeEnd = datetime.combine( entryDate, endTime )
entry = WorkLogEntry()
entry.startTime = dateTimeStart
entry.endTime = dateTimeEnd
entry.work = work
entry.description = desc
self.addEntry( entry )
return entry
def addEntryTimeList(self, timeList: List[ Tuple[datetime, datetime] ]):
for span in timeList:
entry = WorkLogEntry()
entry.startTime = span[0]
entry.endTime = span[1]
self.addEntry( entry )
def replaceEntry( self, oldEntry: WorkLogEntry, newEntry: WorkLogEntry ):
_LOGGER.debug( "replacing entry %s with %s", oldEntry, newEntry )
for i, _ in enumerate( self.entries ):
currItem = self.entries[i]
if currItem == oldEntry:
self.entries[i] = newEntry
self.sort()
return True
_LOGGER.debug( "replacing failed" )
return False
def removeEntry(self, entry):
self.entries.remove( entry )
def joinEntryUp(self, entry):
try:
prevEntry = self.prevEntry( entry )
if prevEntry is None:
return
self.joinUp( entry, prevEntry )
except ValueError:
print("entry:", entry, self.entries)
raise
def joinUp(self, entry, prevEntry):
if prevEntry is None:
return
entry.startTime = prevEntry.endTime
def joinEntryDown(self, entry):
nextEntry = self.nextEntry( entry )
if nextEntry is None:
return
entry.endTime = nextEntry.startTime
self.joinDown( entry, nextEntry )
def joinDown(self, entry, nextEntry):
if nextEntry is None:
return
entry.endTime = nextEntry.startTime
def mergeEntryUp(self, entry):
prevEntry = self.prevEntry( entry )
if prevEntry is None:
return
self.mergeUp( entry, prevEntry )
def mergeUp(self, sourceEntry, targetEntry):
targetEntry.endTime = sourceEntry.endTime
if sourceEntry.description:
targetEntry.description = sourceEntry.description + "\n" + targetEntry.description
targetEntry.description = targetEntry.description.strip()
self.entries.remove( sourceEntry )
def mergeEntryDown(self, entry):
nextEntry = self.nextEntry( entry )
if nextEntry is None:
return
self.mergeDown( entry, nextEntry )
def mergeDown(self, sourceEntry, targetEntry):
targetEntry.startTime = sourceEntry.startTime
if sourceEntry.description:
targetEntry.description = sourceEntry.description + "\n" + targetEntry.description
targetEntry.description = targetEntry.description.strip()
self.entries.remove( sourceEntry )
def sort(self):
self.entries.sort( key=self._sortKey, reverse=False )
@staticmethod
def _sortKey( entry: WorkLogEntry ):
retDate = entry.startTime
if retDate is None:
return datetime.min
return retDate
def printData( self ):
retStr = ""
for currItem in self.entries:
retStr += "item: " + currItem.printData() + "\n"
return retStr
## ==================================================================
class DataContainer( persist.Versionable ):
## 0 - first version
## 1 - add worklog history
_class_version = 1
def __init__(self):
self.history: WorkLogData = WorkLogData()
self.notes = { "notes": "" } ## default notes
## ==================================================================
def calc_time_span(entryDate: date, start: datetime, end: datetime):
midnight = datetime.combine( entryDate, datetime.min.time() )
daySecs = timedelta( days=1 ).total_seconds()
startFactor = 0.0
if start is not None:
startDate = start.date()
if entryDate < startDate:
return None
elif entryDate == startDate:
startDiff = start - midnight
startFactor = startDiff.total_seconds() / daySecs
dueFactor = 1.0
if end is not None:
endDate = end.date()
if entryDate > endDate:
return None
elif entryDate == endDate:
startDiff = end - midnight
dueFactor = startDiff.total_seconds() / daySecs
ret = [startFactor, dueFactor]
return ret
|
"""
Views for generating and serving policy files.
"""
import warnings
from typing import Iterable, Optional
from django.http import HttpRequest, HttpResponse
from . import policies
def serve(request: HttpRequest, policy: policies.Policy) -> HttpResponse:
"""
Given a ``flashpolicies.policies.Policy`` instance, serializes it
to XML and serve it.
Internally, this is used by all other views as the mechanism which
actually serves the policy file.
**Required arguments:**
``policy``
The ``flashpolicies.policies.Policy`` instance to serve.
**Optional arguments:**
None.
"""
return HttpResponse(
policy.serialize(), content_type="text/x-cross-domain-policy; charset=utf-8"
)
def allow_domains(request: HttpRequest, domains: Iterable[str]) -> HttpResponse:
"""
Serves a cross-domain access policy allowing a list of domains.
Note that if this is returned from the URL ``/crossdomain.xml`` on
a domain, it will act as a master policy and will not permit other
policies to exist on that domain. If you need to set meta-policy
information and allow other policies, use the view
:view:`flashpolicies.views.metapolicy` for the master policy instead.
**Required arguments:**
``domains``
A list of domains from which to allow access. Each value may
be either a domain name (e.g., ``example.com``) or a wildcard
(e.g., ``*.example.com``). Due to serious potential security
issues, it is strongly recommended that you not use wildcard
domain values.
**Optional arguments:**
None.
"""
return serve(request, policies.Policy(*domains))
def simple(request: HttpRequest, domains: Iterable[str]) -> HttpResponse:
"""
Deprecated name for the ``allow_domains`` view.
"""
warnings.warn(
"flashpolicies.views.simple has been renamed to "
"flashpolicies.views.allow_domains. Support for referring to it as "
"flashpolicies.views.simple is deprecated and will be removed in a "
"future release of django-flashpolicies.",
DeprecationWarning,
)
return allow_domains(request, domains)
def metapolicy(
request: HttpRequest, permitted: str, domains: Optional[Iterable[str]] = None
) -> HttpResponse:
"""
Serves a cross-domain policy which can allow other policies
to exist on the same domain.
Note that this view, if used, must be the master policy for the
domain, and so must be served from the URL ``/crossdomain.xml`` on
the domain: setting metapolicy information in other policy files
is forbidden by the cross-domain policy specification.
**Required arguments:**
``permitted``
A string indicating the extent to which other policies are
permitted. A set of constants is available in
``flashpolicies.policies``, defining acceptable values for
this argument.
**Optional arguments:**
``domains``
A list of domains from which to allow access. Each value may
be either a domain name (e.g., ``example.com``) or a wildcard
(e.g., ``*.example.com``). Due to serious potential security
issues, it is strongly recommended that you not use wildcard
domain values.
"""
if domains is None:
domains = []
policy = policies.Policy(*domains)
policy.metapolicy(permitted)
return serve(request, policy)
def no_access(request: HttpRequest) -> HttpResponse:
"""
Serves a cross-domain access policy which permits no access of any
kind, via a metapolicy declaration disallowing all policy files.
Note that this view, if used, must be the master policy for the
domain, and so must be served from the URL ``/crossdomain.xml`` on
the domain: setting metapolicy information in other policy files
is forbidden by the cross-domain policy specification.
**Required arguments:**
None.
**Optional arguments:**
None.
"""
return metapolicy(request, permitted=policies.SITE_CONTROL_NONE)
|
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class CommissionForm(models.Model):
form_id = models.CharField(max_length=8, default='')
pub_date = models.DateTimeField('date published', default=None)
def __str__(self):
return self.form_id
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Agency(models.Model):
commission_form = models.ForeignKey(
CommissionForm,
on_delete=models.CASCADE
)
agency_id = models.CharField(max_length=12, default='')
area = models.IntegerField(default=0)
def __str__(self):
return self.agency_id
|
#**********************************************************************
# Copyright 2020 Advanced Micro Devices, Inc
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#********************************************************************
import math
import bpy
from bpy.props import (
BoolProperty,
FloatProperty,
PointerProperty,
IntProperty,
EnumProperty,
)
import pyrpr
from . import RPR_Properties
from rprblender.utils import logging
log = logging.Log(tag='properties.object')
class RPR_ObjectProperites(RPR_Properties):
""" Properties for objects. Should be available only for meshes and area lights """
# Visibility
visibility_in_primary_rays: BoolProperty(
name="Camera",
description="This object will be visible in camera rays",
default=True,
)
reflection_visibility: BoolProperty(
name="Reflections",
description="This object will be visible in reflections",
default=True,
)
refraction_visibility: BoolProperty(
name="Refraction",
description="This object will be visible in refractions",
default=True,
)
diffuse_visibility: BoolProperty(
name="Diffuse",
description="This object will be visible in indirect diffuse reflections",
default=True,
)
shadows: BoolProperty(
name="Shadows",
description="This object will cast shadows",
default=True,
)
shadowcatcher: BoolProperty(
name="Shadow Catcher",
description="Use this object as a shadowcatcher",
default=False,
)
reflection_catcher: BoolProperty(
name="Reflection Catcher",
description="Use this object as a reflection catcher",
default=False,
)
portal_light: BoolProperty(
name="Portal Light",
description="Use this object as a portal light",
default=False,
)
visibility_contour: BoolProperty(
name="Contour",
description="This object will be visible in Contour render mode",
default=True,
)
# Motion and Deformation Blur
motion_blur: BoolProperty(
name="Motion Blur",
description="Enable Motion Blur",
default=True,
)
deformation_blur: BoolProperty(
name="Deformation Blur",
description="Enable Deformation Blur",
default=False,
)
# Subdivision
subdivision: BoolProperty(
name="Subdivision",
description="Enable subdivision",
default=False,
)
subdivision_factor: FloatProperty(
name="Subdiv Polygon Size",
description="Subdivision polygon size, in pixels that it should be subdivided to.\n"
"For finer subdivision set lower",
min=0.5, soft_max=512.0,
default=16.0
)
subdivision_level: IntProperty(
name="Level",
description="Subdivision level for mesh. For finer subdivision set upper",
min=0, max=12, soft_max=8,
default=3
)
subdivision_boundary_type: EnumProperty(
name="Boundary Type",
description="Subdivision boundary type",
items=(
('EDGE_CORNER', "Edge and Corner", "Edge and corner"),
('EDGE', "Edge only", "Edge only")
),
default='EDGE_CORNER',
)
subdivision_crease_weight: FloatProperty(
name="Crease Weight",
description="Subdivision crease weight",
min=0.0,
default=1.0,
)
def set_catchers(self, rpr_shape):
rpr_shape.set_shadow_catcher(self.shadowcatcher)
rpr_shape.set_reflection_catcher(self.reflection_catcher)
def export_subdivision(self, rpr_shape):
""" Exports subdivision settings """
if self.subdivision:
# convert factor from size of subdivision in pixel to RPR
# RPR wants the subdivision factor as the "number of faces per pixel"
# the setting gives user the size of face in number pixels.
# rpr internally does: subdivision size in pixel = 2^factor / 16.0
factor = int(math.log2(16.0 / self.subdivision_factor))
rpr_shape.subdivision = {
'factor': factor,
'level': self.subdivision_level,
'boundary': pyrpr.SUBDIV_BOUNDARY_INTERFOP_TYPE_EDGE_AND_CORNER if self.subdivision_boundary_type == 'EDGE_CORNER' else
pyrpr.SUBDIV_BOUNDARY_INTERFOP_TYPE_EDGE_ONLY,
'crease_weight': self.subdivision_crease_weight
}
else:
rpr_shape.subdivision = None
@classmethod
def register(cls):
log("Register")
bpy.types.Object.rpr = PointerProperty(
name="RPR Object Settings",
description="RPR Object settings",
type=cls,
)
@classmethod
def unregister(cls):
log("Unregister")
del bpy.types.Object.rpr
|
#!/usr/bin/python
from twitter import *
import os.path
## I'm using this library: https://github.com/sixohsix/twitter
#As soon as the app has the credentials on an account it creates this file
MY_TWITTER_CREDS = os.path.expanduser('~/.library_credentials')
#Personal API keys (You should put your key here)
CONSUMER_KEY=''
CONSUMER_SECRET=''
#The screen name of the account that the bot will use
BOT_NAME=''
#Ask for credentials
if not os.path.exists(MY_TWITTER_CREDS):
oauth_dance(BOT_NAME, CONSUMER_KEY, CONSUMER_SECRET,
MY_TWITTER_CREDS)
oauth_token, oauth_secret = read_token_file(MY_TWITTER_CREDS)
twitter = Twitter(auth=OAuth(oauth_token, oauth_secret, CONSUMER_KEY, CONSUMER_SECRET))
## Get followers and add as friends (Autofollow)
fo = twitter.followers.ids()
followers = fo['ids']
##Get friends
fr = twitter.friends.ids()
friends = fr['ids']
## Pending friend requests
og = twitter.friendships.outgoing()
pending = og['ids']
for i in followers:
#If I haven't added them yet
if (i not in pending and i not in friends):
twitter.friendships.create(user_id=i)
##Get my tweets
mt = twitter.statuses.user_timeline(screen_name = 'library_gossip',count = 50)
mytweets =[]
##Get text only
mt_len = len(mt)
for i in range(mt_len):
mytweets.append(mt[i]['text'])
## Read direct messages
dm = twitter.direct_messages()
dm_len = len(dm)
for i in range(dm_len):
text = dm[i]['text']
OP = dm[i]['sender_id']
ID = dm[i]['id']
## Check length of the message
if len(text) > 118:
##Send a message back
twitter.direct_messages.new(user_id=OP, text='Message too long, it must not exceed 118 characters.')
else:
##Post gossip
msg = 'A birdie told me that ' + text
#Check for duplicates
if (msg not in mytweets):
print msg
twitter.statuses.update(status = msg)
else:
##Send a message back
twitter.direct_messages.new(user_id=OP, text='Message duplicated.')
print 'Message duplicated'
##Delete direct msg
twitter.direct_messages.destroy( _id=ID )
|
from django.shortcuts import render
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.template import loader
from .models import Produto, Cliente
def index(request):
produtos = Produto.objects.all()
context = {
'curso': 'Programação Web com Django Framework',
'outro': 'Django é massa',
'produtos': produtos
}
return render(request, 'index.html', context)
def contato(request):
clientes = Cliente.objects.all()
context = {
'clientes': clientes
}
return render(request, 'contato.html', context)
def produto(request, pk):
# print(f'PK: {pk}')
# prod = Produto.objects.get(id=pk) # Versão sem tratamento de erro
prod = get_object_or_404(Produto, id=pk)
context = {
'produto': prod
}
return render(request, 'produto.html', context)
def error404(request, exception):
template = loader.get_template('404.html')
return HttpResponse(content=template.render(), content_type='text/html; charset=utf8', status=404)
def error500(request):
template = loader.get_template('500.html')
return HttpResponse(content=template.render(), content_type='text/html; charset=utf8', status=500)
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry import decorators
from telemetry.core import platform as platform_module
from telemetry.testing import browser_test_case
from telemetry.testing import tab_test_case
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_config
class TracingControllerTest(tab_test_case.TabTestCase):
def testModifiedConsoleTime(self):
tracing_controller = self._tab.browser.platform.tracing_controller
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
self.Navigate('blank.html')
self.assertEquals(
self._tab.EvaluateJavaScript('document.location.pathname;'),
'/blank.html')
self._tab.EvaluateJavaScript("""
window.__console_time = console.time;
console.time = function() { };
""")
with self.assertRaisesRegexp(Exception, 'Page stomped on console.time'):
tracing_controller.StopTracing()
# Restore console.time
self._tab.EvaluateJavaScript("""
console.time = window.__console_time;
delete window.__console_time;
""")
# Check that subsequent tests will be able to use tracing normally.
self.assertFalse(tracing_controller.is_tracing_running)
tracing_controller.StartTracing(config)
self.assertTrue(tracing_controller.is_tracing_running)
tracing_controller.StopTracing()
self.assertFalse(tracing_controller.is_tracing_running)
def testExceptionRaisedInStopTracing(self):
tracing_controller = self._tab.browser.platform.tracing_controller
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
self.Navigate('blank.html')
self._tab.EvaluateJavaScript("""
window.__console_time = console.time;
console.time = function() { };
""")
with self.assertRaisesRegexp(Exception, 'Page stomped on console.time'):
tracing_controller.StopTracing()
# Tracing is stopped even if there is exception.
self.assertFalse(tracing_controller.is_tracing_running)
def testGotTrace(self):
tracing_controller = self._browser.platform.tracing_controller
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
trace_data = tracing_controller.StopTracing()
# Test that trace data is parsable
model = model_module.TimelineModel(trace_data)
assert len(model.processes) > 0
def testStartAndStopTraceMultipleTimes(self):
tracing_controller = self._browser.platform.tracing_controller
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
tracing_controller.StartTracing(config)
self.assertFalse(tracing_controller.StartTracing(config))
trace_data = tracing_controller.StopTracing()
# Test that trace data is parsable
model_module.TimelineModel(trace_data)
self.assertFalse(tracing_controller.is_tracing_running)
# Calling stop again will raise exception
self.assertRaises(Exception, tracing_controller.StopTracing)
def _StartupTracing(self, platform):
# Stop browser
browser_test_case.teardown_browser()
# Start tracing
self.assertFalse(platform.tracing_controller.is_tracing_running)
config = tracing_config.TracingConfig()
config.enable_chrome_trace = True
platform.tracing_controller.StartTracing(config)
self.assertTrue(platform.tracing_controller.is_tracing_running)
try:
# Start browser
self.setUpClass()
self._browser.tabs[0].Navigate('about:blank')
self._browser.tabs[0].WaitForDocumentReadyStateToBeInteractiveOrBetter()
self.assertEquals(platform, self._browser.platform)
# Calling start tracing again will return False
self.assertFalse(
self._browser.platform.tracing_controller.StartTracing(config))
trace_data = self._browser.platform.tracing_controller.StopTracing()
# Test that trace data is parsable
model_module.TimelineModel(trace_data)
self.assertFalse(
self._browser.platform.tracing_controller.is_tracing_running)
# Calling stop tracing again will raise exception
self.assertRaises(Exception,
self._browser.platform.tracing_controller.StopTracing)
finally:
if self._browser:
self._browser.Close()
self._browser = None
@decorators.Enabled('android')
def testStartupTracingOnAndroid(self):
self._StartupTracing(self._browser.platform)
# Not enabled on win because of crbug.com/570955
@decorators.Enabled('linux', 'mac')
@decorators.Isolated
def testStartupTracingOnDesktop(self):
self._StartupTracing(platform_module.GetHostPlatform())
|
from typing import Dict, List, Tuple, Any, Union
import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from schafkopf.backend.calculator import RufspielCalculator, SoloCalculator, HochzeitCalculator, RamschCalculator
from schafkopf.database.configs import RufspielConfig, SoloConfig, HochzeitConfig, RamschConfig
from schafkopf.database.data_model import Teilnehmer, Runde, Base, Punkteconfig, Farbgebung, Spielart
S1 = 'Spieler_1'
S2 = 'Spieler_2'
S3 = 'Spieler_3'
S4 = 'Spieler_4'
data_rufspiel_hochzeit = [
([], None, None, 0, 0, True, {S1: -40, S2: -40, S3: 40, S4: 40}),
([], None, None, 0, 0, False, {S1: -30, S2: -30, S3: 30, S4: 30}),
([], None, None, 0, 29, False, {S1: -30, S2: -30, S3: 30, S4: 30}),
([], None, None, 0, 30, False, {S1: -30, S2: -30, S3: 30, S4: 30}),
([], None, None, 0, 31, False, {S1: -20, S2: -20, S3: 20, S4: 20}),
([], None, None, 0, 59, False, {S1: -20, S2: -20, S3: 20, S4: 20}),
([], None, None, 0, 60, False, {S1: -20, S2: -20, S3: 20, S4: 20}),
([], None, None, 0, 61, False, {S1: 20, S2: 20, S3: -20, S4: -20}),
([], None, None, 0, 90, False, {S1: 20, S2: 20, S3: -20, S4: -20}),
([], None, None, 0, 91, False, {S1: 30, S2: 30, S3: -30, S4: -30}),
([], None, None, 0, 120, True, {S1: 40, S2: 40, S3: -40, S4: -40}),
([], None, None, 3, 0, True, {S1: -70, S2: -70, S3: 70, S4: 70}),
([], None, None, 3, 29, False, {S1: -60, S2: -60, S3: 60, S4: 60}),
([], None, None, 3, 30, False, {S1: -60, S2: -60, S3: 60, S4: 60}),
([], None, None, 3, 59, False, {S1: -50, S2: -50, S3: 50, S4: 50}),
([], None, None, 3, 60, False, {S1: -50, S2: -50, S3: 50, S4: 50}),
([], None, None, 3, 61, False, {S1: 50, S2: 50, S3: -50, S4: -50}),
([], None, None, 0, 89, False, {S1: 20, S2: 20, S3: -20, S4: -20}),
([], None, None, 0, 90, False, {S1: 20, S2: 20, S3: -20, S4: -20}),
([], None, None, 0, 91, False, {S1: 30, S2: 30, S3: -30, S4: -30}),
([], None, None, 6, 120, True, {S1: 100, S2: 100, S3: -100, S4: -100}),
([S1], None, None, 0, 0, True, {S1: -80, S2: -80, S3: 80, S4: 80}),
([S2], None, None, 0, 29, False, {S1: -60, S2: -60, S3: 60, S4: 60}),
([S3], None, None, 0, 30, False, {S1: -60, S2: -60, S3: 60, S4: 60}),
([S4], None, None, 0, 59, False, {S1: -40, S2: -40, S3: 40, S4: 40}),
([S1], S3, None, 0, 60, False, {S1: -80, S2: -80, S3: 80, S4: 80}),
([S1], S3, S2, 0, 61, False, {S1: 160, S2: 160, S3: -160, S4: -160}),
([S1, S2], None, None, 0, 0, True, {S1: -160, S2: -160, S3: 160, S4: 160}),
([S1, S2, S3, S4], None, None, 0, 0, True, {S1: -640, S2: -640, S3: 640, S4: 640}),
]
@pytest.mark.parametrize(
('gelegt_teilnehmer_names',
'kontriert_teilnehmer_name',
're_teilnehmer_name',
'laufende',
'spieler_augen',
'schwarz',
'expected'), data_rufspiel_hochzeit)
def test_rufspiele(gelegt_teilnehmer_names: List[str],
kontriert_teilnehmer_name: Union[None, str],
re_teilnehmer_name: Union[None, str],
laufende: int,
spieler_augen: int,
schwarz: bool,
expected: Dict[str, int]):
runde, ansager, partner, gegner1, gegner2 = init_in_memory_database()
teilnehmers = [ansager, partner, gegner1, gegner2]
kontriert_id = None if kontriert_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([kontriert_teilnehmer_name], teilnehmers)[0]
re_id = None if re_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([re_teilnehmer_name], teilnehmers)[0]
c = RufspielConfig(runde_id=runde.id,
punkteconfig=runde.punkteconfig,
geber_id=ansager.id,
teilnehmer_ids=[ansager.id, partner.id, gegner1.id, gegner2.id],
gelegt_ids=transform_teilnehmer_names_to_teilnehmer_ids(gelegt_teilnehmer_names, teilnehmers),
ansager_id=ansager.id,
rufsau=Farbgebung.BLATT,
kontriert_id=kontriert_id,
re_id=re_id,
partner_id=partner.id,
laufende=laufende,
spieler_augen=spieler_augen,
nicht_spieler_augen=120 - spieler_augen,
schwarz=schwarz)
rufspiel = RufspielCalculator(c)
result = rufspiel.get_teilnehmer_id_to_punkte()
assert transform_dc_teilnehmer_id_to_teilnehmer_name(result, teilnehmers) == expected
data_solo = [
([], None, None, 0, False, False, Spielart.WENZ, None, 0, True, {S1: -210, S2: 70, S3: 70, S4: 70}),
([], None, None, 0, False, False, Spielart.WENZ, None, 0, False, {S1: -180, S2: 60, S3: 60, S4: 60}),
([], None, None, 0, False, False, Spielart.GEIER, None, 29, False, {S1: -180, S2: 60, S3: 60, S4: 60}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.EICHEL, 30, False,
{S1: -180, S2: 60, S3: 60, S4: 60}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.BLATT, 31, False,
{S1: -150, S2: 50, S3: 50, S4: 50}),
(
[], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.HERZ, 59, False,
{S1: -150, S2: 50, S3: 50, S4: 50}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.SCHELLEN, 60, False,
{S1: -150, S2: 50, S3: 50, S4: 50}),
([], None, None, 0, False, False, Spielart.WENZ, None, 61, False, {S1: 150, S2: -50, S3: -50, S4: -50}),
([], None, None, 0, False, False, Spielart.GEIER, None, 89, False, {S1: 150, S2: -50, S3: -50, S4: -50}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.EICHEL, 90, False,
{S1: 150, S2: -50, S3: -50, S4: -50}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.BLATT, 91, False,
{S1: 180, S2: -60, S3: -60, S4: -60}),
([], None, None, 0, False, False, Spielart.FARBSOLO, Farbgebung.HERZ, 120, True,
{S1: 210, S2: -70, S3: -70, S4: -70}),
([], None, None, 2, False, False, Spielart.WENZ, None, 61, False, {S1: 210, S2: -70, S3: -70, S4: -70}),
([S1, S2], None, None, 4, False, False, Spielart.WENZ, None, 91, False, {S1: 1200, S2: -400, S3: -400, S4: -400}),
# Tout Spiele
([], None, None, 0, True, False, Spielart.WENZ, None, 120, False, {S1: 300, S2: -100, S3: -100, S4: -100}),
([], None, None, 0, False, True, Spielart.WENZ, None, 120, False, {S1: -300, S2: 100, S3: 100, S4: 100}),
([], None, None, 0, False, True, Spielart.WENZ, None, 0, False, {S1: -300, S2: 100, S3: 100, S4: 100}),
([S1, S2], None, None, 0, False, True, Spielart.WENZ, None, 0, False, {S1: -1200, S2: 400, S3: 400, S4: 400}),
([], None, None, 2, True, False, Spielart.WENZ, None, 120, False, {S1: 420, S2: -140, S3: -140, S4: -140}),
([], None, None, 3, False, True, Spielart.WENZ, None, 98, False, {S1: -480, S2: 160, S3: 160, S4: 160}),
]
@pytest.mark.parametrize(
('gelegt_teilnehmer_names',
'kontriert_teilnehmer_name',
're_teilnehmer_name',
'laufende',
'tout_gespielt_gewonnen',
'tout_gespielt_verloren',
'spielart',
'farbe',
'spieler_augen',
'schwarz',
'expected'), data_solo)
def test_solo(gelegt_teilnehmer_names: List[str],
kontriert_teilnehmer_name: Union[None, str],
re_teilnehmer_name: Union[None, str],
laufende: int,
tout_gespielt_gewonnen: bool,
tout_gespielt_verloren: bool,
spielart: Spielart,
farbe: Union[None, Farbgebung],
spieler_augen: int,
schwarz: bool,
expected: Dict[str, int]):
runde, spieler_1, gegner_1, gegner_2, gegner3 = init_in_memory_database()
teilnehmers = [spieler_1, gegner_1, gegner_2, gegner3]
kontriert_id = None if kontriert_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([kontriert_teilnehmer_name], teilnehmers)[0]
re_id = None if re_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([re_teilnehmer_name], teilnehmers)[0]
c = SoloConfig(runde_id=runde.id,
punkteconfig=runde.punkteconfig,
geber_id=spieler_1.id,
teilnehmer_ids=[spieler_1.id, gegner_1.id, gegner_2.id, gegner3.id],
gelegt_ids=transform_teilnehmer_names_to_teilnehmer_ids(gelegt_teilnehmer_names, teilnehmers),
ansager_id=spieler_1.id,
spielart=spielart,
kontriert_id=kontriert_id,
re_id=re_id,
farbe=farbe,
tout_gespielt_gewonnen=tout_gespielt_gewonnen,
tout_gespielt_verloren=tout_gespielt_verloren,
laufende=laufende,
spieler_augen=spieler_augen,
nicht_spieler_augen=120 - spieler_augen,
schwarz=schwarz)
solo = SoloCalculator(c)
result = solo.get_teilnehmer_id_to_punkte()
assert transform_dc_teilnehmer_id_to_teilnehmer_name(result, teilnehmers) == expected
@pytest.mark.parametrize(
('gelegt_teilnehmer_names',
'kontriert_teilnehmer_name',
're_teilnehmer_name',
'laufende',
'spieler_augen',
'schwarz',
'expected'), data_rufspiel_hochzeit)
def test_hochzeit(gelegt_teilnehmer_names: List[str],
kontriert_teilnehmer_name: Union[None, str],
re_teilnehmer_name: Union[None, str],
laufende: int,
spieler_augen: int,
schwarz: bool,
expected: Dict[str, int]):
runde, ansager, partner, gegner1, gegner2 = init_in_memory_database()
teilnehmers = [ansager, partner, gegner1, gegner2]
kontriert_id = None if kontriert_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([kontriert_teilnehmer_name], teilnehmers)[0]
re_id = None if re_teilnehmer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([re_teilnehmer_name], teilnehmers)[0]
c = HochzeitConfig(runde_id=runde.id,
punkteconfig=runde.punkteconfig,
geber_id=ansager.id,
teilnehmer_ids=[ansager.id, partner.id, gegner1.id, gegner2.id],
gelegt_ids=transform_teilnehmer_names_to_teilnehmer_ids(gelegt_teilnehmer_names, teilnehmers),
ansager_id=ansager.id,
kontriert_id=kontriert_id,
re_id=re_id,
partner_id=partner.id,
laufende=laufende,
spieler_augen=spieler_augen,
nicht_spieler_augen=120 - spieler_augen,
schwarz=schwarz)
hochzeit = HochzeitCalculator(c)
result = hochzeit.get_teilnehmer_id_to_punkte()
assert transform_dc_teilnehmer_id_to_teilnehmer_name(result, teilnehmers) == expected
data_ramsch = [
([], [], None, S1, {S1: -60, S2: 20, S3: 20, S4: 20}),
([], [], None, S2, {S1: 20, S2: -60, S3: 20, S4: 20}),
([], [], None, S3, {S1: 20, S2: 20, S3: -60, S4: 20}),
([], [], None, S4, {S1: 20, S2: 20, S3: 20, S4: -60}),
([S1], [], None, S1, {S1: -120, S2: 40, S3: 40, S4: 40}),
([S1, S2], [S3], None, S1, {S1: -480, S2: 160, S3: 160, S4: 160}),
([], [], S1, None, {S1: 150, S2: -50, S3: -50, S4: -50}),
([S2], [], S1, None, {S1: 300, S2: -100, S3: -100, S4: -100}),
([S1, S3], [], S1, None, {S1: 600, S2: -200, S3: -200, S4: -200}),
]
@pytest.mark.parametrize(
('gelegt_teilnehmer_names',
'jungfrau_teilnehmer_names',
'durchmarsch_name',
'verlierer_name',
'expected'), data_ramsch)
def test_ramsch(gelegt_teilnehmer_names: List[str],
jungfrau_teilnehmer_names: List[str],
durchmarsch_name: Union[None, str],
verlierer_name: Union[None, str],
expected: Dict[str, int]):
runde, ansager, partner, gegner1, gegner2 = init_in_memory_database()
teilnehmers = [ansager, partner, gegner1, gegner2]
verlierer_id = None if verlierer_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([verlierer_name], teilnehmers)[0]
durchmarsch_id = None if durchmarsch_name is None else \
transform_teilnehmer_names_to_teilnehmer_ids([durchmarsch_name], teilnehmers)[0]
c = RamschConfig(runde_id=runde.id,
punkteconfig=runde.punkteconfig,
geber_id=ansager.id,
teilnehmer_ids=[ansager.id, partner.id, gegner1.id, gegner2.id],
gelegt_ids=transform_teilnehmer_names_to_teilnehmer_ids(gelegt_teilnehmer_names, teilnehmers),
jungfrau_ids=transform_teilnehmer_names_to_teilnehmer_ids(jungfrau_teilnehmer_names, teilnehmers),
ausspieler_augen=30,
mittelhand_augen=30,
hinterhand_augen=30,
geberhand_augen=30,
verlierer_id=verlierer_id,
durchmarsch_id=durchmarsch_id,
durchmarsch=True if durchmarsch_id is not None else False)
ramsch = RamschCalculator(c)
result = ramsch.get_teilnehmer_id_to_punkte()
assert transform_dc_teilnehmer_id_to_teilnehmer_name(result, teilnehmers) == expected
def transform_teilnehmer_names_to_teilnehmer_ids(inputs: List[str], teilnehmer: List[Teilnehmer]) -> List[int]:
return [{s.name: s.id for s in teilnehmer}[i] for i in inputs]
def transform_dc_teilnehmer_id_to_teilnehmer_name(inputs: Dict[int, Any], teilnehmer: List[Teilnehmer]) -> Dict[
str, Any]:
return {{s.id: s.name for s in teilnehmer}[i]: inputs[i] for i in inputs}
def init_in_memory_database() -> Tuple[Runde, Teilnehmer, Teilnehmer, Teilnehmer, Teilnehmer]:
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
my_session = sessionmaker(bind=engine)
session = my_session()
# After this setting, Hochzeit should return the same results as rufspiel
punkteconfig = Punkteconfig(hochzeit=20.0)
session.add(punkteconfig)
session.commit()
runde = Runde(name='Sonntagsspiel', punkteconfig_id=punkteconfig.id, ort='Nürnberg')
spieler_1 = Teilnehmer(vorname='ansager_vorname', nachname='ansager_nachname', name=S1)
spieler_2 = Teilnehmer(vorname='partner_vorname', nachname='partner_nachname', name=S2)
spieler_3 = Teilnehmer(vorname='erster_geg_vorname', nachname='erster_geg_nachname', name=S3)
spieler_4 = Teilnehmer(vorname='zweiter_geg_vorname', nachname='zweiter_geg_nachname', name=S4)
session.add_all([spieler_1, spieler_2, spieler_3, spieler_4] + [runde])
session.commit()
return runde, spieler_1, spieler_2, spieler_3, spieler_4
|
import unittest
from redisor import get_client, setup
from redisor.structure import List, Set, Hash
class BaseTestMixin(unittest.TestCase):
def setUp(self):
setup(db=12)
self.db = get_client()
self.db.flushdb()
super().setUp()
def tearDown(self):
self.db.flushdb()
super().tearDown()
class ListTestCase(BaseTestMixin, unittest.TestCase):
def test_operation(self):
self.lst = List(db=self.db, key="test_list")
self.assertEqual(self.lst.unshift("a"), 1)
self.assertEqual(len(self.lst), 1)
self.assertEqual(self.lst.append("b"), 2)
self.assertEqual(self.lst.pop(), "b")
self.assertFalse("b" in self.lst)
self.assertEqual(list(self.lst), ["a"])
class SetTestCase(BaseTestMixin, unittest.TestCase):
def test_base_operation(self):
self.set_a = Set(db=self.db, key="test_set_a")
self.set_a.add('one')
self.assertEqual(len(self.set_a), 1)
self.set_a.remove("one")
self.set_a.add("two")
self.assertEqual(list(self.set_a), ["two"])
def test_tow_set_operation(self):
self.set_b = Set(db=self.db, key="test_set_b")
self.set_c = Set(db=self.db, key="test_set_c")
self.set_b.add("one")
self.set_b.add("two")
self.set_c.add("two")
self.set_c.add("three")
self.assertEqual(self.set_b & self.set_c, {"two"})
self.assertEqual(self.set_b | self.set_c, {"one", "two", "three"})
self.assertEqual(self.set_b - self.set_c, {"one"})
self.assertEqual(self.set_c - self.set_b, {"three"})
class HashTestCase(BaseTestMixin, unittest.TestCase):
def test_base_operation(self):
self.hash_a = Hash(db=self.db, key="test_hash_a")
self.hash_a.update({"a": 3}, b=4)
self.assertEqual(self.hash_a.all(), {"a": '3', "b": '4'})
del self.hash_a["a"]
self.assertEqual(self.hash_a.all(), {"b": "4"})
self.hash_a.update({"1":1,"2":2,"3":3,"4":4,"5":5,"6":6})
print(self.hash_a)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
""" Shelter (Camp) Registry, model
@copyright: 2009-2016 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3ShelterModel",
"S3ShelterRegistrationModel",
"cr_shelter_rheader",
"cr_update_shelter_population",
"cr_update_housing_unit_population",
"cr_update_capacity_from_housing_units",
"cr_check_population_availability",
"cr_notification_dispatcher",
)
import json
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3layouts import S3PopupLink
NIGHT = 1
DAY_AND_NIGHT = 2
# =============================================================================
class S3ShelterModel(S3Model):
names = ("cr_shelter_type",
"cr_shelter_service",
"cr_shelter",
"cr_shelter_id",
"cr_shelter_status",
"cr_shelter_person",
"cr_shelter_allocation",
"cr_shelter_unit",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
configure = self.configure
crud_strings = s3.crud_strings
define_table = self.define_table
messages = current.messages
super_link = self.super_link
set_method = self.set_method
NAME = T("Name")
location_id = self.gis_location_id
# ---------------------------------------------------------------------
# Shelter types
# e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.)
tablename = "cr_shelter_type"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
requires = [IS_NOT_EMPTY(),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER_TYPE = T("Add Camp Type")
SHELTER_TYPE_LABEL = T("Camp Type")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_TYPE,
title_display = T("Camp Type Details"),
title_list = T("Camp Types"),
title_update = T("Edit Camp Type"),
label_list_button = T("List Camp Types"),
msg_record_created = T("Camp Type added"),
msg_record_modified = T("Camp Type updated"),
msg_record_deleted = T("Camp Type deleted"),
msg_list_empty = T("No Camp Types currently registered"))
else:
ADD_SHELTER_TYPE = T("Create Shelter Type")
SHELTER_TYPE_LABEL = T("Shelter Type")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_TYPE,
title_display = T("Shelter Type Details"),
title_list = T("Shelter Types"),
title_update = T("Edit Shelter Type"),
label_list_button = T("List Shelter Types"),
msg_record_created = T("Shelter Type added"),
msg_record_modified = T("Shelter Type updated"),
msg_record_deleted = T("Shelter Type deleted"),
msg_list_empty = T("No Shelter Types currently registered"))
configure(tablename,
deduplicate = S3Duplicate(),
)
represent = S3Represent(lookup=tablename, translate=True)
shelter_type_id = S3ReusableField("shelter_type_id", "reference %s" % tablename,
label = SHELTER_TYPE_LABEL,
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cr_shelter_type.id",
represent)),
comment=S3PopupLink(c = "cr",
f = "shelter_type",
label = ADD_SHELTER_TYPE,
),
)
# -------------------------------------------------------------------------
# Shelter services
# e.g. medical, housing, food, ...
tablename = "cr_shelter_service"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER_SERVICE = T("Add Camp Service")
SHELTER_SERVICE_LABEL = T("Camp Service")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_SERVICE,
title_display = T("Camp Service Details"),
title_list = T("Camp Services"),
title_update = T("Edit Camp Service"),
label_list_button = T("List Camp Services"),
msg_record_created = T("Camp Service added"),
msg_record_modified = T("Camp Service updated"),
msg_record_deleted = T("Camp Service deleted"),
msg_list_empty = T("No Camp Services currently registered"))
else:
ADD_SHELTER_SERVICE = T("Create Shelter Service")
SHELTER_SERVICE_LABEL = T("Shelter Service")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER_SERVICE,
title_display = T("Shelter Service Details"),
title_list = T("Shelter Services"),
title_update = T("Edit Shelter Service"),
label_list_button = T("List Shelter Services"),
msg_record_created = T("Shelter Service added"),
msg_record_modified = T("Shelter Service updated"),
msg_record_deleted = T("Shelter Service deleted"),
msg_list_empty = T("No Shelter Services currently registered"))
service_represent = S3Represent(lookup=tablename, translate=True)
service_multirepresent = S3Represent(lookup=tablename,
translate=True,
multiple=True
)
shelter_service_id = S3ReusableField("shelter_service_id",
"list:reference cr_shelter_service",
label = SHELTER_SERVICE_LABEL,
ondelete = "RESTRICT",
represent = service_multirepresent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"cr_shelter_service.id",
service_represent,
multiple=True)),
sortby = "name",
comment = S3PopupLink(c = "cr",
f = "shelter_service",
label = ADD_SHELTER_SERVICE,
),
widget = S3MultiSelectWidget(header=False,
)
)
# -------------------------------------------------------------------------
# Shelter Environmental Characteristics
# e.g. Lake, Mountain, ground type.
tablename = "cr_shelter_environment"
define_table(tablename,
Field("name", notnull=True,
label = NAME,
requires = IS_NOT_EMPTY(),
),
s3_comments(),
*s3_meta_fields())
environment_represent = S3Represent(lookup=tablename, translate=True)
environment_multirepresent = S3Represent(lookup=tablename,
translate=True,
multiple=True
)
shelter_environment_id = S3ReusableField("cr_shelter_environment_id",
"list:reference cr_shelter_environment",
label = T("Environmental Characteristics"),
ondelete = "RESTRICT",
represent = environment_multirepresent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"cr_shelter_environment.id",
environment_represent,
multiple=True)),
sortby = "name",
widget = S3MultiSelectWidget()
)
# -------------------------------------------------------------------------
# Shelters
#
cr_shelter_opts = {1 : T("Closed"),
2 : T("Open"),
}
day_and_night = settings.get_cr_day_and_night()
dynamic = settings.get_cr_shelter_population_dynamic()
if settings.get_cr_shelter_housing_unit_management():
if day_and_night:
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Capacity (Day)"),
T("Capacity of the shelter during the day"),
T("Capacity evaluated adding all defined housing unit capacities")))
capacity_night_comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Capacity (Night)"),
T("Capacity of the shelter during the night"),
T("Capacity evaluated adding all defined housing unit capacities")))
else:
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s|%s" % (T("Capacity"),
T("Capacity of the shelter as a number of people"),
T("Capacity evaluated adding all defined housing unit capacities")))
capacity_night_comment = None
else:
if day_and_night:
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Capacity (Day)"),
T("Capacity of the shelter during the day")))
capacity_night_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Capacity (Night)"),
T("Capacity of the shelter during the night")))
else:
capacity_day_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Capacity"),
T("Capacity of the shelter as a number of people")))
capacity_night_comment = None
tablename = "cr_shelter"
define_table(tablename,
super_link("doc_id", "doc_entity"),
super_link("pe_id", "pr_pentity"),
super_link("site_id", "org_site"),
# @ToDo: code_requires
#Field("code", length=10, # Mayon compatibility
# label=T("Code")
# ),
Field("name", notnull=True,
length=64, # Mayon compatibility
label = T("Shelter Name"),
requires = IS_NOT_EMPTY(),
),
self.org_organisation_id(
requires = self.org_organisation_requires(updateable=True),
),
shelter_type_id(), # e.g. NGO-operated, Government evacuation center, School, Hospital -- see Agasti opt_camp_type.)
shelter_service_id(), # e.g. medical, housing, food, ...
shelter_environment_id(readable = False,
writable = False,),# Enable in template if-required
location_id(),
Field("phone",
label = T("Phone"),
requires = IS_EMPTY_OR(s3_phone_requires),
),
Field("email", "string",
label = T("Email"),
),
self.pr_person_id(label = T("Contact Person / Camp Owner")),
#Static field
Field("population", "integer",
label = T("Estimated Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = not dynamic,
writable = not dynamic,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Current estimated population"),
T("Current estimated population in shelter. Staff, Volunteers and Evacuees."))),
),
Field("capacity_day", "integer",
default = 0,
label = T("Capacity (Day)") if day_and_night else T("Capacity"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = capacity_day_comment,
),
Field("capacity_night", "integer",
default = 0,
label = T("Capacity (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = day_and_night,
writable = day_and_night,
comment = capacity_night_comment,
),
# Dynamic field
Field("available_capacity_day", "integer",
default = 0,
label = T("Available Capacity (Day)") if day_and_night else T("Available Capacity"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = dynamic and day_and_night,
# Automatically updated
writable = False,
),
# Dynamic field
Field("available_capacity_night", "integer",
default = 0,
label = T("Available Capacity (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = dynamic and day_and_night,
# Automatically updated
writable = False,
),
# Dynamic field
Field("population_day", "integer",
default = 0,
label = T("Current Population (Day)") if day_and_night else T("Current Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Population (Day)"),
T("Number of people registered in the shelter for day and night"))),
readable = dynamic,
# Automatically updated
writable = False
),
# Dynamic field
Field("population_night", "integer",
default = 0,
label = T("Current Population (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Population (Night)"),
T("Number of people registered in the shelter for the night"))),
readable = dynamic and day_and_night,
# Automatically updated
writable = False
),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
cr_shelter_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_shelter_opts)
),
),
s3_comments(),
Field("obsolete", "boolean",
default = False,
label = T("Obsolete"),
represent = lambda opt: \
(opt and [T("Obsolete")] or [messages["NONE"]])[0],
readable = False,
writable = False,
),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
ADD_SHELTER = T("Add Camp")
SHELTER_LABEL = T("Camp")
SHELTER_HELP = T("The Camp this Request is from")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER,
title_display = T("Camp Details"),
title_list = T("Camps"),
title_update = T("Edit Camp"),
label_list_button = T("List Camps"),
msg_record_created = T("Camp added"),
msg_record_modified = T("Camp updated"),
msg_record_deleted = T("Camp deleted"),
msg_list_empty = T("No Camps currently registered"))
else:
ADD_SHELTER = T("Create Shelter")
SHELTER_LABEL = T("Shelter")
SHELTER_HELP = T("The Shelter this Request is from")
crud_strings[tablename] = Storage(
label_create = ADD_SHELTER,
title_display = T("Shelter Details"),
title_list = T("Shelters"),
title_update = T("Edit Shelter"),
label_list_button = T("List Shelters"),
msg_record_created = T("Shelter added"),
msg_record_modified = T("Shelter updated"),
msg_record_deleted = T("Shelter deleted"),
msg_list_empty = T("No Shelters currently registered"))
# Which levels of Hierarchy are we using?
levels = current.gis.get_relevant_hierarchy_levels()
report_fields = ["name",
"shelter_type_id",
#"organisation_id",
"status",
]
if dynamic:
report_fields.append("population_day")
if day_and_night:
report_fields.append("population_night")
else:
# Manual
report_fields.append("population")
text_fields = ["name",
"code",
"comments",
"organisation_id$name",
"organisation_id$acronym",
"location_id$name",
]
list_fields = ["name",
"status",
"shelter_type_id",
#"shelter_service_id",
]
if dynamic:
list_fields.append("capacity_day")
if day_and_night:
list_fields.append("capacity_night")
list_fields.append("population_day")
if day_and_night:
list_fields.append("population_night")
else:
# Manual
list_fields.append("population")
list_fields.append("location_id$addr_street")
#list_fields.append("person_id")
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
text_fields.append(lfield)
list_fields.append(lfield)
cr_shelter_status_filter_opts = dict(cr_shelter_opts)
cr_shelter_status_filter_opts[None] = T("Unspecified")
if settings.get_org_branches():
org_filter = S3HierarchyFilter("organisation_id",
leafonly = False,
)
else:
org_filter = S3OptionsFilter("organisation_id",
filter = True,
header = "",
#hidden = True,
)
filter_widgets = [
S3TextFilter(text_fields,
label = T("Search"),
#_class = "filter-search",
),
S3OptionsFilter("shelter_type_id",
label = T("Type"),
# Doesn't translate
#represent = "%(name)s",
),
org_filter,
S3LocationFilter("location_id",
label = T("Location"),
levels = levels,
),
S3OptionsFilter("status",
label = T("Status"),
options = cr_shelter_status_filter_opts,
none = True,
),
]
if dynamic:
if day_and_night:
filter_widgets.append(S3RangeFilter("available_capacity_night",
label = T("Available Capacity (Night)"),
))
else:
filter_widgets.append(S3RangeFilter("available_capacity_day",
label = T("Available Capacity"),
))
if day_and_night:
filter_widgets.append(S3RangeFilter("capacity_night",
label = T("Total Capacity (Night)"),
))
else:
filter_widgets.append(S3RangeFilter("capacity_day",
label = T("Total Capacity"),
))
if settings.get_cr_shelter_people_registration():
# Go to People check-in for this shelter after creation
create_next = URL(c="cr", f="shelter",
args=["[id]", "shelter_registration"])
else:
create_next = None
configure(tablename,
create_next = create_next,
deduplicate = S3Duplicate(),
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.cr_shelter_onaccept,
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(rows = lfield, # Lowest-level of hierarchy
cols="status",
fact="count(name)",
totals=True)
),
super_entity = ("org_site", "doc_entity", "pr_pentity"),
)
# Reusable field
represent = S3Represent(lookup=tablename)
shelter_id = S3ReusableField("shelter_id", "reference %s" % tablename,
label = SHELTER_LABEL,
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "cr_shelter.id",
represent,
sort=True)),
comment = S3PopupLink(c = "cr",
f = "shelter",
label = ADD_SHELTER,
title = SHELTER_LABEL,
tooltip = "%s (%s)." % (SHELTER_HELP,
T("optional"),
),
),
widget = S3AutocompleteWidget("cr", "shelter")
)
self.add_components(tablename,
cr_shelter_allocation = "shelter_id",
cr_shelter_registration = "shelter_id",
cr_shelter_unit = "shelter_id",
cr_shelter_status = {"name": "status",
"joinby": "shelter_id",
},
event_event_shelter = "shelter_id",
evr_case = "shelter_id",
)
# Custom Method to Assign HRs
set_method("cr", "shelter",
method = "assign",
action = self.hrm_AssignMethod(component="human_resource_site"))
set_method("cr", "shelter",
method="check-in",
action = self.org_SiteCheckInMethod())
set_method("cr", "shelter",
method = "dispatch",
action = cr_notification_dispatcher)
# -------------------------------------------------------------------------
# Shelter statuses
# - a historical record of shelter status: opening/closing dates & populations
#
tablename = "cr_shelter_status"
define_table(tablename,
shelter_id(ondelete = "CASCADE"),
s3_date(),
Field("status", "integer",
label = T("Status"),
represent = lambda opt: \
cr_shelter_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_shelter_opts)
),
),
Field("population", "integer",
label = T("Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
s3_comments(),
*s3_meta_fields())
# CRUD strings
if settings.get_ui_label_camp():
crud_strings[tablename] = Storage(
label_create = T("Add Camp Status"),
title_display = T("Camp Status Details"),
title_list = T("Camp Statuses"),
title_update = T("Edit Camp Status"),
label_list_button = T("List Camp Statuses"),
msg_record_created = T("Camp Status added"),
msg_record_modified = T("Camp Status updated"),
msg_record_deleted = T("Camp Status deleted"),
msg_list_empty = T("No Camp Statuses currently registered"))
else:
crud_strings[tablename] = Storage(
label_create = T("Create Shelter Status"),
title_display = T("Shelter Status Details"),
title_list = T("Shelter Statuses"),
title_update = T("Edit Shelter Status"),
label_list_button = T("List Shelter Statuses"),
msg_record_created = T("Shelter Status added"),
msg_record_modified = T("Shelter Status updated"),
msg_record_deleted = T("Shelter Status deleted"),
msg_list_empty = T("No Shelter Statuses currently registered"))
# -------------------------------------------------------------------------
# Housing units
#
cr_housing_unit_opts = {1: T("Available"),
2: T("Not Available"),
}
cr_housing_unit_handicap_facilities = {1: T("Available"),
2: T("Suitable"),
3: T("Not Available"),
}
tablename = "cr_shelter_unit"
define_table(tablename,
Field("name", notnull=True, length = 64,
label = T("Housing Unit Name"),
requires = IS_NOT_EMPTY(),
),
# @ToDo: Using site_id would be more flexible & link
# better to default_site/auth.user.site_id
shelter_id(ondelete = "CASCADE"),
location_id(widget = S3LocationSelector(#catalog_layers=True,
points = False,
polygons = True,
),
),
Field("status", "integer",
default = 1,
label = T("Status"),
represent = lambda opt: \
cr_housing_unit_opts.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_opts))
),
Field("transitory", "boolean",
default = False,
label = T("Transitory Accommodation"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Transitory Accommodation"),
T("This unit is for transitory accommodation upon arrival."),
),
),
# Enable in template as required:
readable = False,
writable = False,
),
Field("bath", "boolean",
default = True,
label = T("Available Bath"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Bath Availability"),
T("Integrated bath within housing unit"),
),
),
),
Field("handicap_bath", "integer",
default = 1,
label = T("Bath with handicap facilities"),
represent = lambda opt: \
cr_housing_unit_handicap_facilities.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_handicap_facilities)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Bath Handicap Facilities"),
T("Availability of bath handicap facilities"),
),
),
),
Field("shower", "boolean",
default = True,
label = T("Available Shower"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Shower Availability"),
T("Integrated shower within housing unit"))),
),
Field("handicap_shower", "integer",
default = 1,
label = T("Shower with handicap facilities"),
represent = lambda opt: \
cr_housing_unit_handicap_facilities.get(opt, messages.UNKNOWN_OPT),
requires = IS_EMPTY_OR(
IS_IN_SET(cr_housing_unit_handicap_facilities)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Shower Handicap Facilities"),
T("Availability of shower handicap facilities"),
),
),
),
Field("capacity_day", "integer",
default = 0,
label = T("Housing Unit Capacity (Day)") if day_and_night else T("Housing Unit Capacity"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Capacity (Day)"),
T("Capacity of the housing unit for people during the day"),
),
),
),
Field("capacity_night", "integer",
default = 0,
label = T("Housing Unit Capacity (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = day_and_night,
writable = day_and_night,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Capacity (Night)"),
T("Capacity of the housing unit for people during the night"),
),
),
),
Field("available_capacity_day", "integer",
default = 0,
label = T("Available Capacity (Day)") if day_and_night else T("Available Capacity"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class = "tooltip",
_title = T("Currently Available Capacity (Day)"),
),
# Automatically updated
readable = dynamic,
writable = False,
),
Field("available_capacity_night", "integer",
default = 0,
label = T("Population Availability (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class = "tooltip",
_title = T("Currently Available Capacity (Night)"),
),
# Automatically updated
readable = dynamic and day_and_night,
writable = False,
),
Field("population_day", "integer",
default = 0,
label = T("Current Population (Day)") if day_and_night else T("Current Population"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Current Population"),
T("Number of people registered in this housing unit for day and night"),
),
),
# Automatically updated
readable = False,
writable = False,
),
Field("population_night", "integer",
default = 0,
label = T("Current Population (Night)"),
represent = lambda v: IS_INT_AMOUNT.represent(v),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Housing Unit Current Population"),
T("Number of evacuees registered in this housing unit for the night"),
),
),
readable = day_and_night,
# Automatically updated
writable = False,
),
Field("domestic_animals", "boolean",
default = False,
label = T("Free for domestic animals"),
represent = s3_yes_no_represent,
),
Field.Method("cstatus", self.cr_shelter_unit_status),
s3_comments(),
*s3_meta_fields())
list_fields = ["id",
"name",
]
if day_and_night:
list_fields += ["status", # @ToDO: Move to EVASS template
"handicap_bath", # @ToDO: Move to EVASS template
"capacity_day",
"capacity_night",
"population_day",
"population_night",
]
else:
list_fields += ["available_capacity_day",
"capacity_day",
"population_day",
]
population_onaccept = lambda form: \
self.cr_shelter_population_onaccept(
form,
tablename="cr_shelter_unit",
)
configure(tablename,
# @ToDo: Allow multiple shelters to have the same
# name of unit (Requires that Shelter is in dvr/person.xsl/csv)
#deduplicate = S3Duplicate(primary=("shelter_id", "name")),
deduplicate = S3Duplicate(),
list_fields = list_fields,
# Extra fields for cr_shelter_unit_status:
extra_fields = ["capacity_day",
"available_capacity_day",
"status",
],
onaccept = population_onaccept,
ondelete = population_onaccept,
)
represent = S3Represent(lookup="cr_shelter_unit")
housing_unit_id = S3ReusableField("shelter_unit_id", db.cr_shelter_unit,
label = T("Housing Unit"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db, "cr_shelter_unit.id",
represent,
orderby="shelter_id",
#sort=True
)),
#widget = S3AutocompleteWidget("cr", "shelter_unit")
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
return dict(ADD_SHELTER = ADD_SHELTER,
SHELTER_LABEL = SHELTER_LABEL,
cr_shelter_id = shelter_id,
cr_housing_unit_id = housing_unit_id,
)
# -------------------------------------------------------------------------
@staticmethod
def defaults():
"""
Return safe defaults in case the model has been deactivated.
"""
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False)
return dict(cr_shelter_id = lambda **attr: dummy("shelter_id"),
)
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_onaccept(form):
"""
After DB I/O
"""
form_vars = form.vars
# Update Affiliation, record ownership and component ownership
current.s3db.org_update_affiliations("cr_shelter", form_vars)
if current.deployment_settings.get_cr_shelter_population_dynamic():
# Update population and available capacity
cr_update_shelter_population(form_vars.id)
# @ToDo: Update/Create a cr_shelter_status record
return
# -----------------------------------------------------------------------------
@staticmethod
def cr_shelter_service_multirepresent(shelter_service_ids):
"""
"""
if not shelter_service_ids:
return current.messages["NONE"]
db = current.db
table = db.cr_shelter_service
if isinstance(shelter_service_ids, (list, tuple)):
query = (table.id.belongs(shelter_service_ids))
shelter_services = db(query).select(table.name)
return ", ".join([s.name for s in shelter_services])
else:
query = (table.id == shelter_service_ids)
shelter_service = db(query).select(table.name,
limitby=(0, 1)).first()
try:
return shelter_service.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_unit_status(row):
"""
Virtual Field to show the status of the unit by available capacity
- used to colour features on the map
0: Full
1: Partial
2: Empty
3: Not Available
"""
if hasattr(row, "cr_shelter_unit"):
row = row.cr_shelter_unit
if hasattr(row, "status"):
status = row.status
else:
status = None
if status == 2:
# Not Available
return 3
if hasattr(row, "available_capacity_day"):
actual = row.available_capacity_day
else:
actual = None
if status is not None and actual is not None:
if actual <= 0:
# Full (or over-capacity)
return 0
if hasattr(row, "capacity_day"):
total = row.capacity_day
if total == 0:
# No capacity ever, so Full
return 0
else:
total = None
if status is not None and total is not None and actual is not None:
if actual == total:
# Empty
return 2
else:
# Partial
return 1
if hasattr(row, "id"):
# Reload the record
s3_debug("Reloading cr_shelter_unit record")
table = current.s3db.cr_shelter_unit
r = current.db(table.id == row.id).select(table.status,
table.capacity_day,
table.available_capacity_day,
limitby=(0, 1)
).first()
if r:
status = r.status
if status == 2:
# Not Available
return 3
actual = r.available_capacity_day
if actual <= 0:
# Full (or over-capacity)
return 0
total = r.capacity_day
if total == 0:
# No capacity ever, so Full
return 0
elif actual == total:
# Empty
return 2
else:
# Partial
return 1
return current.messages["NONE"]
# =============================================================================
class S3ShelterRegistrationModel(S3Model):
names = ("cr_shelter_allocation",
"cr_shelter_registration",
"cr_shelter_registration_history",
"cr_shelter_registration_status_opts",
"cr_shelter_population_onaccept",
)
def model(self):
T = current.T
configure = self.configure
define_table = self.define_table
settings = current.deployment_settings
shelter_id = self.cr_shelter_id
person_id = self.pr_person_id
day_and_night = settings.get_cr_day_and_night()
# ---------------------------------------------------------------------
# Shelter Allocation: table to allocate shelter capacity to a group
#
allocation_status_opts = {1: T("requested"),
2: T("available"),
3: T("allocated"),
4: T("occupied"),
5: T("departed"),
6: T("obsolete"),
7: T("unavailable"),
}
tablename = "cr_shelter_allocation"
define_table(tablename,
shelter_id(empty = False,
ondelete = "CASCADE",
),
self.pr_group_id(comment = None),
Field("status", "integer",
default = 3,
label = T("Status"),
requires = IS_IN_SET(allocation_status_opts),
represent = S3Represent(options = allocation_status_opts),
),
Field("group_size_day", "integer",
default = 0,
label = T("Group Size (Day)") if day_and_night else T("Group Size"),
),
Field("group_size_night", "integer",
default = 0,
label = T("Group Size (Night)"),
readable = day_and_night,
writable = day_and_night,
),
*s3_meta_fields())
population_onaccept = lambda form: \
self.cr_shelter_population_onaccept(
form,
tablename="cr_shelter_allocation",
)
configure(tablename,
onaccept = population_onaccept,
ondelete = population_onaccept,
)
# ---------------------------------------------------------------------
# Shelter Registration: table to register a person to a shelter
#
cr_day_or_night_opts = {NIGHT: T("Night only"),
DAY_AND_NIGHT: T("Day and Night")
}
# Registration status
reg_status_opts = {1: T("Planned"),
2: T("Checked-in"),
3: T("Checked-out"),
}
reg_status = S3ReusableField("registration_status", "integer",
label = T("Status"),
represent = S3Represent(
options=reg_status_opts,
),
requires = IS_IN_SET(reg_status_opts,
zero=None
),
)
housing_unit = settings.get_cr_shelter_housing_unit_management()
tablename = "cr_shelter_registration"
define_table(tablename,
# @ToDo: Convert to site_id? (More flexible & easier to default, etc)
shelter_id(empty = False,
ondelete = "CASCADE",
),
# The comment explains how to register a new person
# it should not be done in a popup
person_id(
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Person"),
# @ToDo: Generalise (this is EVASS-specific)
T("Type the name of a registered person \
or to add an unregistered person to this \
shelter click on Evacuees")
)
),
),
self.cr_housing_unit_id(readable = housing_unit,
writable = housing_unit,
),
Field("day_or_night", "integer",
default = DAY_AND_NIGHT,
label = T("Presence in the shelter"),
represent = S3Represent(options=cr_day_or_night_opts
),
requires = IS_IN_SET(cr_day_or_night_opts,
zero=None
),
readable = day_and_night,
writable = day_and_night,
),
reg_status(),
s3_datetime("check_in_date",
label = T("Check-in date"),
default = "now",
#empty = False,
future = 0,
),
s3_datetime("check_out_date",
label = T("Check-out date"),
),
s3_comments(),
*s3_meta_fields())
registration_onaccept = self.shelter_registration_onaccept
configure(tablename,
deduplicate = S3Duplicate(primary = ("person_id",
"shelter_unit_id",
),
),
onaccept = registration_onaccept,
ondelete = registration_onaccept,
)
if housing_unit:
configure(tablename,
onvalidation = self.cr_shelter_registration_onvalidation,
)
# Custom Methods
self.set_method("cr", "shelter_registration",
method = "assign",
action = cr_AssignUnit())
# ---------------------------------------------------------------------
# Shelter Registration History: history of status changes
#
tablename = "cr_shelter_registration_history"
define_table(tablename,
person_id(),
self.cr_shelter_id(),
s3_datetime(default = "now",
),
reg_status("previous_status",
label = T("Old Status"),
),
reg_status("status",
label = T("New Status"),
),
*s3_meta_fields())
configure(tablename,
list_fields = ["shelter_id",
"date",
(T("Status"), "status"),
(T("Modified by"), "modified_by"),
],
insertable = False,
editable = False,
deletable = False,
orderby = "%s.date desc" % tablename,
)
# ---------------------------------------------------------------------
# Pass variables back to global scope (response.s3.*)
return dict(cr_shelter_population_onaccept = self.shelter_population_onaccept,
cr_shelter_registration_status_opts = reg_status_opts,
)
# -------------------------------------------------------------------------
@staticmethod
def cr_shelter_registration_onvalidation(form):
"""
Check if the housing unit belongs to the requested shelter
"""
request = current.request
controller = request.controller
if controller == "dvr":
# Housing Unit is not mandatory during Case Registration
return
unit_id = None
if type(form) is Row:
form_vars = form
else:
form_vars = form.vars
if controller == "evr":
# Registration form includes the Shelter
shelter_id = form_vars.shelter_id
unit_id = form_vars.shelter_unit_id
elif controller == "cr":
# Registration form doesn't include the Shelter
# @ToDo: don't assume that we are running as component of the shelter
shelter_id = form_vars.shelter_id or (form.record and form.record.shelter_id) or request.args[0]
unit_id = form_vars.shelter_unit_id
if unit_id is None:
current.response.warning = current.T("Warning: No housing unit selected")
else:
db = current.db
htable = db.cr_shelter_unit
record = db(htable.id == unit_id).select(htable.shelter_id,
limitby=(0, 1)).first()
if str(record.shelter_id) != str(shelter_id):
error = current.T("You have to select a housing unit belonging to the shelter")
form.errors["branch_id"] = error
current.response.error = error
# -------------------------------------------------------------------------
@classmethod
def shelter_registration_onaccept(cls, form):
"""
Registration onaccept: track status changes, update
shelter population
@param form: the FORM (also accepts Row)
"""
try:
if type(form) is Row:
formvars = form
else:
formvars = form.vars
registration_id = formvars.id
except AttributeError:
unit_id = None
else:
unit_id = formvars.get("shelter_unit_id")
if registration_id:
s3db = current.s3db
db = current.db
# Get the current status
rtable = s3db.cr_shelter_registration
query = (rtable.id == registration_id) & \
(rtable.deleted != True)
reg = db(query).select(rtable.id,
rtable.shelter_id,
rtable.shelter_unit_id,
rtable.registration_status,
rtable.check_in_date,
rtable.check_out_date,
rtable.modified_on,
rtable.person_id,
limitby = (0, 1),
).first()
if reg:
person_id = reg.person_id
# Unit to check availability for
unit_id = reg.shelter_unit_id
# Get the previous status
htable = s3db.cr_shelter_registration_history
query = (htable.person_id == person_id) & \
(htable.shelter_id == reg.shelter_id) & \
(htable.deleted != True)
row = db(query).select(htable.status,
htable.date,
orderby = ~htable.created_on,
limitby = (0, 1),
).first()
if row:
previous_status = row.status
previous_date = row.date
else:
previous_status = None
previous_date = None
# Get the current status
current_status = reg.registration_status
# Get the effective date
if current_status == 2:
effective_date_field = "check_in_date"
elif current_status == 3:
effective_date_field = "check_out_date"
else:
effective_date_field = None
if effective_date_field:
# Read from registration
effective_date = reg[effective_date_field]
else:
# Use modified_on for history
effective_date = reg.modified_on
if current_status != previous_status or \
effective_date_field and not effective_date:
if effective_date_field:
# If the new status has an effective date,
# make sure it gets updated when the status
# has changed:
if effective_date_field not in formvars or \
not effective_date or \
previous_date and effective_date < previous_date:
effective_date = current.request.utcnow
reg.update_record(**{
effective_date_field: effective_date,
})
# Insert new history entry
htable.insert(previous_status = previous_status,
status = current_status,
date = effective_date,
person_id = person_id,
shelter_id = reg.shelter_id,
)
# Update last_seen_on
if current.deployment_settings.has_module("dvr"):
s3db.dvr_update_last_seen(person_id)
# Update population
cls.shelter_population_onaccept(form,
tablename = "cr_shelter_registration",
unit_id = unit_id,
)
# -------------------------------------------------------------------------
@staticmethod
def shelter_population_onaccept(form, tablename=None, unit_id = None):
"""
Update the shelter population, onaccept
@param form: the FORM
@param tablename: the table name
@param unit_id: the shelter unit ID (to warn if full)
"""
db = current.db
s3db = current.s3db
if not tablename:
return
table = s3db[tablename]
try:
if type(form) is Row:
record_id = form.id
else:
record_id = form.vars.id
except AttributeError:
# Nothing we can do
return
if tablename == "cr_shelter_unit":
unit_id = record_id
# Get the record
row = db(table._id == record_id).select(table._id,
table.shelter_id,
table.deleted,
table.deleted_fk,
limitby=(0, 1)).first()
if row:
if row.deleted:
if row.deleted_fk:
deleted_fk = json.loads(row.deleted_fk)
else:
return
shelter_id = deleted_fk.get("shelter_id")
else:
shelter_id = row.shelter_id
if shelter_id:
if current.deployment_settings.get_cr_shelter_housing_unit_management():
# First update housing units census
cr_update_capacity_from_housing_units(shelter_id)
# Shelter census
cr_update_shelter_population(shelter_id)
# Warn if unit is full
if unit_id:
cr_check_population_availability(unit_id,
table = s3db.cr_shelter_unit,
)
# Warn if shelter is full
cr_check_population_availability(shelter_id,
table = s3db.cr_shelter,
)
# =============================================================================
def cr_shelter_rheader(r, tabs=[]):
""" Resource Headers """
if r.representation != "html":
# RHeaders only used in interactive views
return None
rheader = None
tablename, record = s3_rheader_resource(r)
if tablename == "cr_shelter" and record:
T = current.T
s3db = current.s3db
if not tabs:
settings = current.deployment_settings
tabs = [(T("Basic Details"), None),
(T("Status Reports"), "status"),
]
if settings.get_L10n_translate_org_site():
tabs.append((T("Local Names"), "name"))
if settings.get_cr_tags():
tabs.append((T("Tags"), "tag"))
if settings.get_cr_shelter_people_registration():
tabs.extend([(T("Client Reservation"), "shelter_allocation"),
(T("Client Registration"), "shelter_registration"),
])
if settings.has_module("hrm"):
STAFF = settings.get_hrm_staff_label()
tabs.append((STAFF, "human_resource"))
permit = current.auth.s3_has_permission
if permit("update", tablename, r.id) and \
permit("create", "hrm_human_resource_site"):
tabs.append((T("Assign %(staff)s") % dict(staff=STAFF), "assign"))
if settings.get_cr_shelter_housing_unit_management():
tabs.append((T("Housing Units"), "shelter_unit"))
#tabs.append((T("Events"), "event_shelter"))
#if settings.has_module("assess"):
# tabs.append((T("Assessments"), "rat"))
try:
tabs = tabs + s3db.req_tabs(r, match=False)
except:
pass
try:
tabs = tabs + s3db.inv_tabs(r)
except:
pass
if settings.has_module("msg"):
tabs.append((T("Send Notification"), "dispatch"))
rheader_tabs = s3_rheader_tabs(r, tabs)
if r.name == "shelter":
location = r.table.location_id.represent(record.location_id)
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")), record.name
),
TR(TH("%s: " % T("Location")), location
),
),
rheader_tabs)
else:
rheader = DIV(TABLE(TR(TH("%s: " % T("Name")), record.name
),
),
rheader_tabs)
return rheader
# =============================================================================
def cr_update_housing_unit_population(shelter_id):
"""
Update housing unit population number.
To be called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param shelter_id: the Shelter ID
"""
db = current.db
settings = current.deployment_settings
htable = db.cr_shelter_unit
rtable = db.cr_shelter_registration
rjoin = (htable.id == rtable.shelter_unit_id) & \
(rtable.deleted != True)
check_out_is_final = settings.get_cr_check_out_is_final()
if check_out_is_final:
rtable &= (rtable.registration_status != 3)
query = (htable.shelter_id == shelter_id) & \
(htable.status == 1) & \
(htable.deleted != True)
rcount = rtable.id.count()
day_and_night = settings.get_cr_day_and_night()
if day_and_night:
for daytime in (True, False):
if daytime:
fn_capacity = "capacity_day"
fn_population = "population_day"
fn_available_capacity = "available_capacity_day"
left = rtable.on(rjoin & (rtable.day_or_night == DAY_AND_NIGHT))
else:
fn_capacity = "capacity_night"
fn_population = "population_night"
fn_available_capacity = "available_capacity_night"
left = rtable.on(rjoin)
rows = db(query).select(htable.id,
htable[fn_capacity],
htable[fn_population],
htable[fn_available_capacity],
rtable.id.count(),
groupby = htable.id,
left = left,
)
for row in rows:
data = {}
unit = row[str(htable)]
population = row[rcount]
# Update population
current_population = unit[fn_population]
if current_population != population:
data[fn_population] = population
# Update daytime capacity
capacity = unit[fn_capacity]
if capacity > 0:
available_capacity = capacity - population
else:
available_capacity = 0
if unit[fn_available_capacity] != available_capacity:
data[fn_available_capacity] = available_capacity
# Write only if data have changed
if data:
db(htable.id == unit.id).update(**data)
else:
left = rtable.on(rjoin)
rows = db(query).select(htable.id,
htable.capacity_day,
htable.capacity_night,
htable.population_day,
htable.population_night,
htable.available_capacity_day,
htable.available_capacity_night,
rcount,
groupby = htable.id,
left = left,
)
for row in rows:
data = {}
unit = row[str(htable)]
population = row[rcount]
# Update daytime population/capacity
current_population = unit.population_day
if current_population != population:
data["population_day"] = population
capacity = unit.capacity_day
if capacity > 0:
available_capacity = capacity - population
else:
available_capacity = 0
if unit.available_capacity_day != available_capacity:
data["available_capacity_day"] = available_capacity
# Update daytime population/capacity
current_population = unit.population_night
if current_population != population:
data["population_night"] = population
capacity = unit.capacity_night
if capacity > 0:
available_capacity = capacity - population
else:
available_capacity = 0
if unit.available_capacity_night != available_capacity:
data["available_capacity_night"] = available_capacity
# Write only if data have changed
if data:
unit_id = unit.id
db(htable.id == unit_id).update(**data)
# =============================================================================
def cr_update_shelter_population(shelter_id):
"""
Update population and available capacity numbers, to be
called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param shelter_id: the shelter record ID
"""
db = current.db
s3db = current.s3db
settings = current.deployment_settings
stable = s3db.cr_shelter
# Get the shelter record
record = db(stable._id == shelter_id).select(stable.id,
stable.capacity_day,
stable.capacity_night,
limitby=(0, 1)).first()
# Get population numbers
rtable = s3db.cr_shelter_registration
query = (rtable.shelter_id == shelter_id) & \
(rtable.deleted != True)
if settings.get_cr_check_out_is_final():
query &= (rtable.registration_status != 3)
cnt = rtable._id.count()
rows = db(query).select(rtable.day_or_night,
cnt,
groupby=rtable.day_or_night,
orderby=rtable.day_or_night)
population_day = population_night = 0
for row in rows:
reg_type = row[rtable.day_or_night]
number = row[cnt]
if reg_type == NIGHT and number:
population_night = number
elif reg_type == DAY_AND_NIGHT and number:
population_day = number
# population_day is both day /and/ night
population_night += population_day
# Get allocation numbers
# @ToDo: deployment_setting to disable Allocations
atable = s3db.cr_shelter_allocation
query = (atable.shelter_id == shelter_id) & \
(atable.status.belongs((1, 2, 3, 4))) & \
(atable.deleted != True)
dcnt = atable.group_size_day.sum()
ncnt = atable.group_size_night.sum()
row = db(query).select(dcnt, ncnt, limitby=(0, 1), orderby=dcnt).first()
if row:
if row[dcnt] is not None:
allocated_capacity_day = row[dcnt]
else:
allocated_capacity_day = 0
if row[ncnt] is not None:
allocated_capacity_night = row[ncnt]
else:
allocated_capacity_night = 0
else:
allocated_capacity_day = allocated_capacity_night = 0
# Compute available capacity
capacity_day = record.capacity_day
if capacity_day:
available_capacity_day = capacity_day - \
population_day - \
allocated_capacity_day
else:
available_capacity_day = 0
capacity_night = record.capacity_night
if capacity_night:
available_capacity_night = record.capacity_night - \
population_night - \
allocated_capacity_night
else:
available_capacity_night = 0
if settings.get_cr_shelter_housing_unit_management():
cr_update_housing_unit_population(shelter_id)
# Update record
record.update_record(population_day=population_day,
population_night=population_night,
available_capacity_day=available_capacity_day,
available_capacity_night=available_capacity_night)
# =============================================================================
def cr_check_population_availability(unit_id, table):
"""
Evaluate the population capacity availability.
Show a non blocking warning in case the people in the shelter/housing unit are more than its capacity
@param unit_id: the shelter ID / housing unit ID
@param table: related tablename (cr_shelter or cr_shelter_housing_unit)
"""
T = current.T
db = current.db
response = current.response
record = db(table.id == unit_id).select(table.capacity_day,
table.population_day,
table.capacity_night,
table.population_night,
limitby=(0, 1)
).first()
day_and_night = current.deployment_settings.get_cr_day_and_night()
warning = None
full_day = full_night = False
capacity_day = record.capacity_day
population_day = record.population_day
if capacity_day is not None and \
population_day and \
population_day >= capacity_day:
full_day = True
if day_and_night:
capacity_night = record.capacity_night
population_night = record.population_night
if capacity_night is not None and \
population_night and \
population_night >= capacity_night:
full_night = True
tablename = table._tablename
if not day_and_night and full_day or full_day and full_night:
if tablename == "cr_shelter":
warning = T("Warning: this shelter is full")
elif tablename == "cr_shelter_unit":
warning = T("Warning: this housing unit is full")
elif full_day:
if tablename == "cr_shelter":
warning = T("Warning: this shelter is full for daytime")
elif tablename == "cr_shelter_unit":
warning = T("Warning: this housing unit is full for daytime")
elif full_night:
if tablename == "cr_shelter":
warning = T("Warning: this shelter is full for the night")
elif tablename == "cr_shelter_unit":
warning = T("Warning: this housing unit is full for the night")
if warning:
response = current.response
response_warning = response.warning
if response_warning:
response.warning = "%s - %s" % (response_warning, warning)
else:
response.warning = warning
# =============================================================================
def cr_update_capacity_from_housing_units(shelter_id):
"""
Update shelter capacity numbers, new capacity numbers are evaluated
adding together all housing unit capacities.
To be called onaccept/ondelete of cr_shelter_registration and
cr_shelter_allocation.
@param shelter_id: the shelter record ID
"""
db = current.db
stable = db.cr_shelter
htable = db.cr_shelter_unit
query = (htable.shelter_id == shelter_id) & \
(htable.status == 1) & \
(htable.deleted != True)
total_capacity_day = htable.capacity_day.sum()
total_capacity_night = htable.capacity_night.sum()
row = db(query).select(total_capacity_day,
total_capacity_night,
).first()
if row:
total_capacity_day = row[total_capacity_day]
total_capacity_night = row[total_capacity_night]
else:
total_capacity_day = total_capacity_night = 0
db(stable._id==shelter_id).update(capacity_day = total_capacity_day,
capacity_night = total_capacity_night,
)
# =============================================================================
def cr_notification_dispatcher(r, **attr):
"""
Send a notification.
"""
if r.representation == "html" and \
r.name == "shelter" and r.id and not r.component:
T = current.T
msg = current.msg
s3db = current.s3db
record = r.record
ctable = s3db.pr_contact
stable = s3db.cr_shelter
message = ""
text = ""
s_id = record.id
s_name = record.name
s_phone = record.phone
s_email = record.email
s_status = record.status
if s_phone in ("", None):
s_phone = T("Not Defined")
if s_email in ("", None):
s_phone = T("Not Defined")
if s_status in ("", None):
s_status = T("Not Defined")
else:
if s_status == 1:
s_status = "Open"
elif s_status == 2:
s_status = "Close"
else:
s_status = "Unassigned Shelter Status"
text += "************************************************"
text += "\n%s " % T("Automatic Message")
text += "\n%s: %s " % (T("Shelter ID"), s_id)
text += " %s: %s" % (T("Shelter name"), s_name)
text += "\n%s: %s " % (T("Email"), s_email)
text += " %s: %s" % (T("Phone"), s_phone)
text += "\n%s: %s " % (T("Working Status"), s_status)
text += "\n************************************************\n"
# Encode the message as an OpenGeoSMS
#message = msg.prepare_opengeosms(record.location_id,
# code="ST",
# map="google",
# text=text)
# URL to redirect to after message sent
url = URL(c="cr", f="shelter", args=r.id)
# Create the form
opts = dict(type="SMS",
# @ToDo: deployment_setting
subject = T("Deployment Request"),
message = message + text,
url = url,
)
output = msg.compose(**opts)
# Maintain RHeader for consistency
if attr.get("rheader"):
rheader = attr["rheader"](r)
if rheader:
output["rheader"] = rheader
output["title"] = T("Send Notification")
current.response.view = "msg/compose.html"
return output
else:
raise HTTP(501, current.messages.BADMETHOD)
# =============================================================================
class cr_AssignUnit(S3CRUD):
"""
Assign a Person to a Housing Unit
"""
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST API
@param r: the S3Request
@param attr: controller arguments
"""
try:
person_id = int(r.get_vars["person_id"])
except:
raise HTTP(400, current.messages.BAD_REQUEST)
self.settings = current.response.s3.crud
sqlform = self._config("crud_form")
self.sqlform = sqlform if sqlform else S3SQLDefaultForm()
self.data = None
# Create or Update?
table = current.s3db.cr_shelter_registration
query = (table.deleted == False) & \
(table.person_id == person_id)
exists = current.db(query).select(table.id,
limitby=(0, 1)
).first()
if exists:
# Update form
r.method = "update" # Ensure correct View template is used
self.record_id = exists.id
output = self.update(r, **attr)
else:
# Create form
r.method = "create" # Ensure correct View template is used
self.data = {"person_id": person_id}
output = self.create(r, **attr)
return output
# END =========================================================================
|
import arcade
import random
SCREEN_WIDTH = 600
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Bouncing Balls"
class Ball:
def __init__(self):
self.x = 0
self.y = 0
self.change_x = 0
self.change_y = 0
self.size = 0
def make_ball():
ball = Ball()
ball.size = random.randrange(10, 30)
ball.x = random.randrange(ball.size, SCREEN_WIDTH - ball.size)
ball.y = random.randrange(ball.size, SCREEN_HEIGHT - ball.size)
ball.change_x = random.randrange(-2, 3)
ball.change_y = random.randrange(-2, 3)
ball.color = (random.randrange(256), random.randrange(256), random.randrange(256))
return ball
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
self.ball_list = []
ball = make_ball()
self.ball_list.append(ball)
def on_draw(self):
arcade.start_render()
for ball in self.ball_list:
arcade.draw_circle_filled(ball.x, ball.y, ball.size, ball.color)
output = "Balls: {}".format(len(self.ball_list))
arcade.draw_text(output, 10, 20, arcade.color.WHITE, 14)
def update(self, delta_time):
for ball in self.ball_list:
ball.x += ball.change_x
ball.y += ball.change_y
if ball.x < ball.size:
ball.change_x *= -1
if ball.y < ball.size:
ball.change_y *= -1
if ball.x > SCREEN_WIDTH - ball.size:
ball.change_x *= -1
if ball.y > SCREEN_HEIGHT - ball.size:
ball.change_y *= -1
def on_mouse_press(self, x, y, button, modifiers):
"""
Called whenever the mouse button is clicked.
"""
ball = make_ball()
self.ball_list.append(ball)
def main():
MyGame()
arcade.run()
if __name__ == "__main__":
main() |
# -*- coding: utf-8 -*-
from pyfr.solvers.base import get_opt_view_perm
from pyfr.solvers.baseadvec import (BaseAdvectionIntInters,
BaseAdvectionMPIInters,
BaseAdvectionBCInters)
class BaseAdvectionDiffusionIntInters(BaseAdvectionIntInters):
def __init__(self, be, lhs, rhs, elemap, cfg):
base = super(BaseAdvectionDiffusionIntInters, self)
base.__init__(be, lhs, rhs, elemap, cfg)
# Generate the additional view matrices
self._scal1_lhs = self._view_onto(lhs, 'get_scal_fpts1_for_inter')
self._scal1_rhs = self._view_onto(rhs, 'get_scal_fpts1_for_inter')
self._vect0_lhs = self._view_onto(lhs, 'get_vect_fpts0_for_inter')
self._vect0_rhs = self._view_onto(rhs, 'get_vect_fpts0_for_inter')
# Additional kernel constants
self._tpl_c.update(cfg.items_as('solver-interfaces', float))
def _gen_perm(self, lhs, rhs):
# In the special case of β = -0.5 it is better to sort by the
# RHS interface; otherwise we simply opt for the LHS
beta = self._cfg.getfloat('solver-interfaces', 'ldg-beta')
side = lhs if beta != -0.5 else rhs
# Compute the relevant permutation
self._perm = get_opt_view_perm(side, 'get_scal_fpts0_for_inter',
self._elemap)
class BaseAdvectionDiffusionMPIInters(BaseAdvectionMPIInters):
def __init__(self, be, lhs, rhsrank, rallocs, elemap, cfg):
base = super(BaseAdvectionDiffusionMPIInters, self)
base.__init__(be, lhs, rhsrank, rallocs, elemap, cfg)
lhsprank = rallocs.prank
rhsprank = rallocs.mprankmap[rhsrank]
# Generate second set of view matrices
self._scal1_lhs = self._view_onto(lhs, 'get_scal_fpts1_for_inter')
self._vect0_lhs = self._mpi_view_onto(lhs, 'get_vect_fpts0_for_inter')
self._vect0_rhs = be.mpi_matrix_for_view(self._vect0_lhs)
# Additional kernel constants
self._tpl_c.update(cfg.items_as('solver-interfaces', float))
# We require cflux(l,r,n_l) = -cflux(r,l,n_r) and
# conu(l,r) = conu(r,l) and where l and r are left and right
# solutions at an interface and n_[l,r] are physical normals.
# The simplest way to enforce this at an MPI interface is for
# one side to take β = -β for the cflux and conu kernels. We
# pick this side (arbitrarily) by comparing the physical ranks
# of the two partitions.
self._tpl_c['ldg-beta'] *= 1.0 if lhsprank > rhsprank else -1.0
def get_vect_fpts0_pack_kern(self):
return self._be.kernel('pack', self._vect0_lhs)
def get_vect_fpts0_send_pack_kern(self):
return self._be.kernel('send_pack', self._vect0_lhs,
self._rhsrank, self.MPI_TAG)
def get_vect_fpts0_recv_pack_kern(self):
return self._be.kernel('recv_pack', self._vect0_rhs,
self._rhsrank, self.MPI_TAG)
def get_vect_fpts0_unpack_kern(self):
return self._be.kernel('unpack', self._vect0_rhs)
class BaseAdvectionDiffusionBCInters(BaseAdvectionBCInters):
def __init__(self, be, lhs, elemap, cfgsect, cfg):
super(BaseAdvectionDiffusionBCInters, self).__init__(be, lhs, elemap,
cfgsect, cfg)
# Additional view matrices
self._scal1_lhs = self._view_onto(lhs, 'get_scal_fpts1_for_inter')
self._vect0_lhs = self._view_onto(lhs, 'get_vect_fpts0_for_inter')
# Additional kernel constants
self._tpl_c.update(cfg.items_as('solver-interfaces', float))
|
from problems import threeQubitCircuit
import qutip as qt
import numpy as np
import pickle
from qutip.qip.circuit import QubitCircuit, Gate
from qutip.qip.operations import gate_sequence_product
from qutip.tensor import tensor
spins_0 = []
spins_1 = []
for i in range(1, 4):
spins_0.append(qt.basis(2,0))
spins_1.append(qt.basis(2,1))
state_0 = tensor(spins_0)
state_1 = tensor(spins_1)
initial_state_list = [state_0]
transferResults = []
controlResults = []
for _ in range(1):
p = threeQubitCircuit(initialState_list=initial_state_list, configPath='./configs/three_q/delay/config_2.yaml')
p.default_opt()
p.plot_result()
tResult, tCost, cResult, cCost = p.get_result()
transferResults.append(tResult)
controlResults.append(cResult)
print("Transfer:", p.TransferOptimizer.max['params'])
print("Transfer: ", tResult[-1])
print("Control:", p.ControlOptimizer.max['params'])
print("Control: ", cResult[-1])
resultsToPickle = {
'transfer': np.array(transferResults),
'transfer_cost': tCost,
'control': np.array(controlResults),
'control_cost': cCost,
}
# pickle.dump( resultsToPickle, open( "test.pickle", "wb" ) ) |
from __future__ import absolute_import
import multiprocessing
import time
import atexit
import os
import imp
import logging
import traceback
import math
import multiprocessing
import uuid
from flask import Flask, current_app, render_template, g, request, flash, redirect, url_for
from flask_login import LoginManager, user_loaded_from_request
from flask_mail import Mail
from flask_migrate import Migrate
from flask_principal import Principal, identity_loaded, identity_changed, Identity, RoleNeed, UserNeed, AnonymousIdentity, PermissionDenied
from alembic import command
from alembic.migration import MigrationContext
from datetime import datetime
from werkzeug import url_encode
import knowledge_repo
from . import roles, routes
from .auth_provider import KnowledgeAuthProvider
from .proxies import db_session, current_repo, current_user
from .index import update_index, set_up_indexing_timers, time_since_index, time_since_index_check
from .models import db as sqlalchemy_db, Post, User, Tag
from .utils.auth import AnonymousKnowledgeUser, populate_identity_roles, prepare_user
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class KnowledgeFlask(Flask):
def __init__(self, repo, db_uri=None, debug=None, config=None, **kwargs):
Flask.__init__(self, __name__,
template_folder='templates',
static_folder='static')
# Add unique identifier for this application isinstance
self.uuid = str(uuid.uuid4())
if 'KNOWLEDGE_REPO_MASTER_UUID' not in os.environ:
os.environ['KNOWLEDGE_REPO_MASTER_UUID'] = self.uuid
# Preload default configuration
self.config.from_object('knowledge_repo.app.config_defaults')
# Load configuration from file or provided object
if config:
if isinstance(config, str):
config = imp.load_source('knowledge_server_config', os.path.abspath(config))
self.config.from_object(config)
# Add configuration passed in as keyword arguments
self.config.update(kwargs)
# Prepare repository, and add it to the app
if hasattr(config, 'prepare_repo'):
repo = config.prepare_repo(repo)
self.repository = repo
assert isinstance(self.repository, knowledge_repo.KnowledgeRepository), "Invalid repository object provided."
# Set debug mode from kwargs or else maintain current setting
if debug is not None:
self.config['DEBUG'] = debug
# Set the secret key for this instance (creating one if one does not exist already)
self.config['SECRET_KEY'] = self.config['SECRET_KEY'] or str(uuid.uuid4())
# Configure database
if db_uri:
self.config['SQLALCHEMY_DATABASE_URI'] = db_uri
logger.debug(u"Using database: {}".format(self.config['SQLALCHEMY_DATABASE_URI']))
# Register database schema with flask app
sqlalchemy_db.init_app(self)
# Set up database migration information
# Registers Migrate plugin in self.extensions['migrate']
Migrate(self, self.db)
# Try to create the database if it does not already exist
# Existence is determined by whether there is an existing alembic migration revision
db_auto_create = self.config.get('DB_AUTO_CREATE', True)
db_auto_upgrade = self.config.get('DB_AUTO_UPGRADE', True)
if db_auto_create and self.db_revision is None:
self.db_init()
elif db_auto_upgrade:
self.db_upgrade()
# Initialise login manager to keep track of user sessions
LoginManager().init_app(self)
self.login_manager.login_view = 'auth.login'
self.login_manager.anonymous_user = AnonymousKnowledgeUser
@self.login_manager.user_loader
def load_user(user_id):
return User(identifier=user_id)
# Attempt login via http headers if header is specified
if self.config.get('AUTH_USER_IDENTIFIER_REQUEST_HEADER'):
@self.login_manager.request_loader
def load_user_from_request(request):
identifier = request.headers.get(current_app.config['AUTH_USER_IDENTIFIER_REQUEST_HEADER'])
if identifier:
if current_app.config['AUTH_USER_IDENTIFIER_REQUEST_HEADER_MAPPING']:
identifier = current_app.config['AUTH_USER_IDENTIFIER_REQUEST_HEADER_MAPPING'](identifier)
user = User(identifier=identifier)
user.can_logout = False
user = prepare_user(user, session_start=False)
return user
# Intialise access policies
self.principal = Principal(self)
# Add AnonymousIdentity fallback so that on_identity_loaded is called for
# anonymous users too.
self.principal.identity_loaders.append(lambda: AnonymousIdentity())
# Synchronise user permissions with data model
@user_loaded_from_request.connect
def on_user_loaded_from_request(sender, user):
self.principal.set_identity(Identity(user.id))
@identity_loaded.connect_via(self)
def on_identity_loaded(sender, identity):
populate_identity_roles(identity, user=current_user)
@self.errorhandler(PermissionDenied)
def handle_insufficient_permissions(error):
flash("You have insufficient permissions to access this resource.")
return render_template('base.html'), 403
# Add mail object if configuration is supplied
if self.config.get('MAIL_SERVER'):
self.config['mail'] = Mail(self)
# Register routes to be served
self.register_blueprint(routes.posts.blueprint)
self.register_blueprint(routes.health.blueprint)
self.register_blueprint(routes.index.blueprint)
self.register_blueprint(routes.tags.blueprint)
self.register_blueprint(routes.vote.blueprint)
self.register_blueprint(routes.comment.blueprint)
self.register_blueprint(routes.stats.blueprint)
self.register_blueprint(routes.editor.blueprint)
self.register_blueprint(routes.groups.blueprint)
self.register_blueprint(routes.auth.blueprint)
KnowledgeAuthProvider.register_auth_provider_blueprints(self)
if self.config['DEBUG']:
self.register_blueprint(routes.debug.blueprint)
# Register error handler
@self.errorhandler(500)
def show_traceback(self):
""" If LOCAL MODE: show the stack trace on a server error
otherwise show a nice error template to the users
"""
if current_app.config.get("DEBUG"):
return render_template('traceback.html', info=traceback.format_exc()), 500
else:
return render_template('error.html')
@self.before_first_request
def ensure_excluded_tags_exist():
# For every tag in the excluded tags, create the tag object if it doesn't exist
excluded_tags = current_app.config['EXCLUDED_TAGS']
for tag in excluded_tags:
Tag(name=tag)
db_session.commit()
# Set up indexing timers
set_up_indexing_timers(self)
@self.before_request
def open_repository_session():
if not request.path.startswith('/static'):
current_repo.session_begin()
@self.after_request
def close_repository_session(response):
if not request.path.startswith('/static'):
current_repo.session_end()
return response
@self.context_processor
def webediting_enabled():
# TODO: Link this more to KnowledgeRepository capability and
# configuration rather than a specific name.
return {"webeditor_enabled": 'webposts' in current_repo.uris}
@self.context_processor
def inject_version():
version = knowledge_repo.__version__
version_revision = None
if '_' in knowledge_repo.__version__:
version, version_revision = version.split('_')
return dict(version=version,
version_revision=version_revision,
last_index=time_since_index(human_readable=True),
last_index_check=time_since_index_check(human_readable=True))
@self.template_global()
def modify_query(**new_values):
args = request.args.copy()
for key, value in new_values.items():
args[key] = value
return u'{}?{}'.format(request.path, url_encode(args))
@self.template_global()
def pagination_pages(current_page, page_count, max_pages=5, extremes=True):
page_min = int(max(1, current_page - math.floor(1.0 * max_pages // 2)))
page_max = int(min(page_count, current_page + math.floor(1.0 * max_pages / 2)))
to_acquire = max_pages - (page_max - page_min + 1)
while to_acquire > 0 and page_min > 1:
page_min -= 1
to_acquire -= 1
while to_acquire > 0 and page_max < page_count:
page_max += 1
to_acquire -= 1
pages = list(range(page_min, page_max + 1))
if extremes:
if 1 not in pages:
pages[0] = 1
if page_count not in pages:
pages[-1] = page_count
return pages
@self.template_filter('format_date')
def format_date(date):
"""
This will be a Jinja filter that string formats a datetime object.
If we can't correctly format, we just return the object.
:type date: Datetime
:return: A string of the format of YYYY-MM-DD
:rtype: String
"""
try:
return datetime.strftime(date, '%Y-%m-%d')
except:
return date
@property
def repository(self):
return getattr(self, '_repository')
@repository.setter
def repository(self, repo):
self._repository = repo
@property
def db(self):
return sqlalchemy_db
@property
def _alembic_config(self):
if not hasattr(self, 'extensions') or 'migrate' not in self.extensions:
raise RuntimeError("KnowledgeApp has not yet been configured. Please instantiate it via `get_app_for_repo`.")
migrations_path = os.path.join(os.path.dirname(__file__), "migrations")
return self.extensions['migrate'].migrate.get_config(migrations_path)
def db_init(self):
with self.app_context():
# Create all tables
sqlalchemy_db.create_all()
# Stamp table as being current
command.stamp(self._alembic_config, "head")
@property
def db_revision(self):
with self.app_context():
conn = self.db.engine.connect()
context = MigrationContext.configure(conn)
return context.get_current_revision()
def db_upgrade(self):
with self.app_context():
command.upgrade(self._alembic_config, "head")
def db_migrate(self, message, autogenerate=True):
with self.app_context():
command.revision(self._alembic_config, message=message, autogenerate=autogenerate)
def db_update_index(self, check_timeouts=True, force=False, reindex=False):
with self.app_context():
update_index(check_timeouts=check_timeouts, force=force, reindex=reindex)
def check_thread_support(self, check_index=True, check_repositories=True):
# If index database is an sqlite database, it will lock on any write action, and so breaks on multiple threads
# Repository uris will break as above (but less often since they are not often written too), but will also
# end up being a separate repository per thread; breaking consistency of presented content.
if check_index:
index_db = self.config['SQLALCHEMY_DATABASE_URI']
if index_db.startswith('sqlite://'):
return False
if check_repositories:
for uri in self.repository.uris.values():
if uri.startswith('sqlite://') or ':memory:' in uri:
return False
return True
|
from Dmail.mixin import MarkdownMixin, MimeMixin
from Dmail.api._gmail_api_base import GmailApiBase
class GmailApi(GmailApiBase, MarkdownMixin, MimeMixin):
default_subtype = 'md'
default_scope = 'compose'
def __init__(self, sender_email, token_file='token.pickle', credentials_file=None, scopes='compose', md_extensions=None):
super(GmailApi, self).__init__(sender_email=sender_email, token_file=token_file,
credentials_file=credentials_file, scopes=scopes, md_extensions=md_extensions)
def create_draft(self, email_text, to=None, subject=None, cc=None, bcc=None, subtype=None, attachments=None):
subtype = subtype or self.default_subtype
self._pre_send(email_text, to=to, subject=subject, cc=cc, bcc=bcc, subtype=subtype, attachments=attachments)
email_body = self.get_message(email_text, to=to, subject=subject, cc=cc, bcc=bcc,
subtype=subtype, attachments=attachments)
self.createdraft(email_body)
self._post_send(email_text, to=to, subject=subject, cc=cc, bcc=bcc, subtype=subtype, attachments=attachments)
def createdraft(self, message):
"""Create and insert a draft email. Print the returned draft's message and id.
Args:
message: The body of the email message, including headers.
Returns:
Draft object, including draft id and message meta data.
"""
body = {'message': self.get_request_body(message)}
return self.service.users().drafts().create(userId=self.sender_email, body=body).execute()
|
# -*- coding: utf-8 -*-
import sys
import os
from workflow import Workflow3
reload(sys)
sys.setdefaultencoding('utf8')
def getargs(wf):
query = sys.argv[1]
query = query.split('$%')
part = int(sys.argv[2])
if query[4]:
import webbrowser
new = 2
url = "https://blog.naaln.com/2017/04/alfred-youdao-intro/"
webbrowser.open(url, new=new)
return 0
if part == 0:
# 查询的单词
sys.stdout.write(query[0].strip())
elif part == 1:
# 翻译的结果
sys.stdout.write(query[1].strip())
elif part == 2:
# 发音
if query[2]:
bashCommand = "say --voice='Samantha' " + query[2]
os.system(bashCommand)
if query[3]:
bashCommand = "say --voice='Ting-Ting' " + query[3]
os.system(bashCommand)
if __name__ == '__main__':
wf = Workflow3()
sys.exit(wf.run(getargs))
|
import sys
import pickle
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
sys.path.append('/path/to/caffe/python')
import caffe
net = caffe.Net('test.prototxt', 'simple_mlp_iter_2000.caffemodel', caffe.TEST)
# load original data
with open('../data.pkl', 'rb') as f:
samples, labels = pickle.load(f)
samples = np.array(samples)
labels = np.array(labels)
# Visualize result
X = np.arange(0, 1.05, 0.05)
Y = np.arange(0, 1.05, 0.05)
X, Y = np.meshgrid(X, Y)
# Plot the surface of probability
grids = np.array([[X[i][j], Y[i][j]] for i in range(X.shape[0]) for j in range(X.shape[1])])
grid_probs = []
for grid in grids:
net.blobs['data'].data[...] = grid.reshape((1, 2))[...]
output = net.forward()
grid_probs.append(output['prob'][0][1])
grid_probs = np.array(grid_probs).reshape(X.shape)
fig = plt.figure('Sample Surface')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, grid_probs, alpha=0.15, color='k', rstride=2, cstride=2, lw=0.5)
# Plot the predicted probability of samples
samples0 = samples[labels==0]
samples0_probs = []
for sample in samples0:
net.blobs['data'].data[...] = sample.reshape((1, 2))[...]
output = net.forward()
samples0_probs.append(output['prob'][0][1])
samples1 = samples[labels==1]
samples1_probs = []
for sample in samples1:
net.blobs['data'].data[...] = sample.reshape((1, 2))[...]
output = net.forward()
samples1_probs.append(output['prob'][0][1])
ax.scatter(samples0[:, 0], samples0[:, 1], samples0_probs, c='b', marker='^', s=50)
ax.scatter(samples1[:, 0], samples1[:, 1], samples1_probs, c='r', marker='o', s=50)
plt.show()
|
from run_pipeline import run_pipeline
from src.cool_visitor import FormatVisitor
def test():
text = """
class Point {
x : AUTO_TYPE ;
y : AUTO_TYPE ;
init ( n : Int , m : Int ) : SELF_TYPE {
{
x <- n ;
y <- m ;
} } ;
} ;
"""
ast = run_pipeline(text)
formatter = FormatVisitor()
tree = formatter.visit(ast)
tree = tree.replace("\t", "")
tree = tree.replace("\n", "")
tree = tree.replace("\\", "")
assert (
tree
== "__ProgramNode [<class> ... <class>]__ClassDeclarationNode: class Point { <feature> ... <feature> }__AttrDeclarationNode: x : AUTO_TYPE <- <exp>__NONE__AttrDeclarationNode: y : AUTO_TYPE <- <exp>__NONE__FuncDeclarationNode: init(n:Int, m:Int) : SELF_TYPE { <body> }__BlockNode: {<exp>; ... <exp>;}__AssignNode: x <- <expr>__ VariableNode: n__AssignNode: y <- <expr>__ VariableNode: m"
)
|
# -*- coding: utf-8 -*-
from django.db import models
from apps.postitulos.models import Solicitud
"""
Informe de Solicitud de validez de Título nacional
"""
class InformeSolicitud(models.Model):
solicitud = models.ForeignKey(Solicitud, related_name='informe')
denominacion_titulo = models.BooleanField(default=True)
observaciones = models.CharField(max_length=999, null=True)
class Meta:
app_label = 'postitulos'
db_table = 'postitulos_informe_solicitud'
def __unicode__(self):
return 'Informe de Solicitud'
|
# coding: utf-8
from .lstm_vae import create_lstm_vae |
import math
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
from eight_mile.utils import Offsets, is_sequence, calc_nfeats
from eight_mile.pytorch.layers import *
class PyTorchEmbeddings(nn.Module):
def __init__(self, **kwargs):
super().__init__()
def get_vsz(self):
pass
def get_dsz(self):
pass
@property
def output_dim(self):
return self.get_dsz()
def encode(self, x):
return self(x)
class LookupTableEmbeddings(PyTorchEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.vsz = kwargs.get("vsz")
self.dsz = kwargs.get("dsz")
self.finetune = kwargs.get("finetune", True)
self.dropin = kwargs.get("dropin", 0.0)
weights = kwargs.get("weights")
if weights is None:
self.embeddings = nn.Embedding(self.vsz, self.dsz, padding_idx=kwargs.get('padding_idx', Offsets.PAD))
else:
self.embeddings = pytorch_embedding(weights, self.finetune)
# This makes sure that if you init with a weight and not vsz it will still be available
self.vsz, self.dsz = weights.shape
def get_vsz(self):
return self.vsz
def get_dsz(self):
return self.dsz
def forward(self, x):
if not self.dropin:
return self.embeddings(x)
mask = self.embeddings.weight.data.new().resize_((self.embeddings.weight.size(0),
1)).bernoulli_(1 - self.dropin).expand_as(self.embeddings.weight) / (1 - self.dropin)
masked_embed_weight = mask * self.embeddings.weight
output = torch.nn.functional.embedding(x, masked_embed_weight,
self.embeddings.padding_idx, self.embeddings.max_norm, self.embeddings.norm_type,
self.embeddings.scale_grad_by_freq, self.embeddings.sparse)
return output
def extra_repr(self):
return f"finetune=False" if not self.finetune else ""
class CharConvEmbeddings(PyTorchEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.nfeat_factor = kwargs.get("nfeat_factor")
self.cfiltsz = kwargs.get("cfiltsz", kwargs.get("filtsz", [3]))
self.max_feat = kwargs.get("max_feat", 30)
self.gating = kwargs.get("gating", "skip")
self.num_gates = kwargs.get("num_gates", 1)
self.activation = kwargs.get("activation", "tanh")
self.wsz = kwargs.get("wsz", 30)
self.projsz = kwargs.get("projsz", 0)
self.pdrop = kwargs.get("pdrop", 0.5)
self.filtsz, self.nfeats = calc_nfeats(self.cfiltsz, self.nfeat_factor, self.max_feat, self.wsz)
self.conv_outsz = int(np.sum(self.nfeats))
self.outsz = self.conv_outsz
if self.projsz > 0:
self.outsz = self.projsz
self.proj = pytorch_linear(self.conv_outsz, self.outsz)
self.embeddings = LookupTableEmbeddings(**kwargs)
self.char_comp = WithDropout(
ParallelConv(self.embeddings.output_dim, self.nfeats, self.filtsz, self.activation), self.pdrop
)
GatingConnection = SkipConnection if self.gating == "skip" else Highway
self.gating_seq = nn.Sequential(
OrderedDict(
[("gate-{}".format(i), GatingConnection(self.char_comp.output_dim)) for i in range(self.num_gates)]
)
)
def get_dsz(self):
return self.outsz
def get_vsz(self):
return self.vsz
def forward(self, xch):
# For starters we need to perform embeddings for each character
# (TxB) x W -> (TxB) x W x D
_0, _1, W = xch.shape
char_vecs = self.embeddings(xch.view(-1, W))
# (TxB) x D x W
# char_vecs = char_embeds.transpose(1, 2).contiguous()
# pytorch_activation(self.activation_type)
mots = self.char_comp(char_vecs)
gated = self.gating_seq(mots)
if self.projsz:
gated = self.proj(gated)
return gated.view(_0, _1, self.get_dsz())
class CharLSTMEmbeddings(PyTorchEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embed = LookupTableEmbeddings(**kwargs)
self.lstmsz = kwargs.get("lstmsz", 50)
layers = kwargs.get("layers", 1)
pdrop = kwargs.get("pdrop", 0.5)
unif = kwargs.get("unif", 0)
weight_init = kwargs.get("weight_init", "uniform")
self.char_comp = BiLSTMEncoderHidden(
self.embed.output_dim, self.lstmsz, layers, pdrop, unif=unif, initializer=weight_init
)
def forward(self, xch):
B, T, W = xch.shape
flat_chars = xch.view(-1, W)
char_embeds = self.embed(flat_chars)
# Calculate the lengths of each word
lengths = torch.sum(flat_chars != Offsets.PAD, dim=1)
# Sort the input to appease the cuDNN gods
sorted_word_lengths, perm_idx = lengths.sort(0, descending=True)
sorted_feats = char_embeds[perm_idx].transpose(0, 1).contiguous()
# cuDNN throws an error if there is an input with a length of 0, this happens when the "word"
# is actually a "<PAD>" so there are no characters to run the LSTM over. Here we just say
# that the lengths is 1. This will make cudnn happy and we will just get junk in that spot
patched_lengths = sorted_word_lengths.masked_fill(sorted_word_lengths == 0, 1)
# Run the LSTM
hidden = self.char_comp((sorted_feats, patched_lengths))
# Create a mask that is true when the sorted length is 0 (where the word was a pad) so that
# we can mask out the junk that the lstm created because we needed a length of 1
hidden = hidden.masked_fill((sorted_word_lengths == 0).unsqueeze(-1), 0)
# Undo the sort so that the representations of the words are in the correct part of the sentence.
results = unsort_batch(hidden, perm_idx)
return results.reshape((B, T, -1))
def get_dsz(self):
return self.lstmsz
def get_vsz(self):
return self.embed.get_vsz()
class CharTransformerEmbeddings(PyTorchEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.embed = LookupTableEmbeddings(**kwargs)
self.d_model = kwargs.get("wsz", 30)
self.num_heads = kwargs.get("num_heads", 3)
self.rpr_k = kwargs.get("rpr_k", 10)
layers = kwargs.get("layers", 1)
pdrop = kwargs.get("pdrop", 0.5)
self.char_comp = TransformerEncoderStackWithLengths(
self.num_heads, self.d_model, pdrop, False, layers, rpr_k=self.rpr_k, input_sz=self.embed.output_dim
)
def forward(self, xch):
B, T, W = xch.shape
flat_chars = xch.view(-1, W)
char_embeds = self.embed(flat_chars)
# Calculate the lengths of each word
lengths = torch.sum(flat_chars != Offsets.PAD, dim=1)
results = self.char_comp((char_embeds, lengths))
# B,T,H output, how to pool this
pooled = torch.max(results, -2, keepdims=False)[0]
return pooled.reshape((B, T, -1))
def get_dsz(self):
return self.d_model
def get_vsz(self):
return self.embed.get_vsz()
class PositionalMixin(nn.Module):
"""A Mixin that provides functionality to generate positional embeddings to be added to the normal embeddings.
Note, mixins need to be before the base case when used, i.e.
`Embedding(Mixin, BaseEmbed)` NOT `Embedding(BaseEmbed, Mixin)`
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def positional(self, length):
pass
def extra_repr(self):
return f"mxlen={self.mxlen}"
class SinusoidalPositionalMixin(PositionalMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# This could get us in trouble, if in doubt, pick something big
self.mxlen = kwargs.get("mxlen", 1000)
max_timescale = kwargs.get("max_timescale", 1.0e4)
word_dsz = self.get_dsz()
log_timescale_increment = math.log(max_timescale) / word_dsz
inv_timescales = torch.exp(torch.arange(0, word_dsz, 2).float() * -log_timescale_increment)
pe = torch.zeros(self.mxlen, word_dsz)
position = torch.arange(0, self.mxlen).float().unsqueeze(1)
pe[:, 0::2] = torch.sin(position * inv_timescales)
pe[:, 1::2] = torch.cos(position * inv_timescales)
pe = pe.unsqueeze(0)
self.register_buffer("pe", pe)
def positional(self, length):
return self.pe[:, :length]
class LearnedPositionalMixin(PositionalMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.mxlen = int(kwargs.get("mxlen", 512))
self.pos_embeddings = nn.Embedding(self.mxlen, self.get_dsz())
def positional(self, length):
return self.pos_embeddings(
torch.arange(length, dtype=torch.long, device=self.pos_embeddings.weight.device)
).unsqueeze(0)
class BERTLookupTableEmbeddings(LookupTableEmbeddings):
"""
BERT style embeddings with a 0 token type
TODO: Get rid of this, we dont need it anymore
If you want to use BERT with token types, make a `LearnedPositionalLookupTableEmbeddings` feature
and a `LookupTableEmbeddings` feature (for the token type)
and put them in an `EmbeddingsStack` with an embeddings_reduction='sum-layer-norm' on the model
Otherwise, if you do not plan on setting the token type, use the `LearnedPositionalLookupTableEmbeddingsWithBias`,
which will add the BERT token_type=0 weights into the pos + word_embed and is more efficient
than this class, since it doesnt do any memory allocation on the fly
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get('dropout', 0.1))
self.mxlen = int(kwargs.get('mxlen', 512))
self.tok_type_vsz = kwargs['tok_type_vsz']
self.pos_embeddings = nn.Embedding(self.mxlen, self.get_dsz())
self.tok_embeddings = nn.Embedding(self.tok_type_vsz, self.get_dsz())
self.ln = nn.LayerNorm(self.get_dsz(), eps=1e-12)
def forward(self, x):
zeros = torch.zeros_like(x)
x = super().forward(x)
x = x + self.positional(x.size(1)) + self.tok_embeddings(zeros)
x = self.ln(x)
return self.dropout(x)
def positional(self, length):
return self.pos_embeddings(
torch.arange(length, dtype=torch.long, device=self.pos_embeddings.weight.device)
).unsqueeze(0)
class LearnedPositionalLookupTableEmbeddingsWithBias(LookupTableEmbeddings):
"""Learned positional lookup table embeddings wih a bias and layer norm
This is just a typical learned positional embedding but with a learnable
bias and a layer norm. This is equivalent to BERT embeddings when the
token_type is not set.
If you are using BERT but you have no interest in using token type embeddings
(IOW if you are setting all the values of that feature zero anyhow), using this
object is faster and simpler than having a separate vectorizer for token type.
If you have a need for token type embeddings, you will want to create 2 sets of embeddings,
one that acts on the tokens, of type `LearnedPositionalLookupTableEmbeddings` and one of the type
`LookupTableEmbeddings` for the token type feature
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get('dropout', 0.0))
self.mxlen = int(kwargs.get('mxlen', 512))
self.pos_embeddings = nn.Embedding(self.mxlen, self.get_dsz())
self.bias = nn.Parameter(torch.zeros(self.get_dsz()))
def forward(self, x):
x = super().forward(x)
x = x + self.positional(x.size(1)) + self.bias
return x
def positional(self, length):
return self.pos_embeddings(
torch.arange(length, dtype=torch.long, device=self.pos_embeddings.weight.device)
).unsqueeze(0)
class PositionalLookupTableEmbeddings(SinusoidalPositionalMixin, LookupTableEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
self.scale = math.sqrt(self.get_dsz())
def forward(self, x):
"""Add a positional encoding to the embedding, followed by dropout
:param x: The temporal signal in, to which the positional embeddings are applied
:return: Embedded output
"""
x = super().forward(x) * self.scale
x = x + self.positional(x.size(1))
return self.dropout(x)
class LearnedPositionalLookupTableEmbeddings(LearnedPositionalMixin, LookupTableEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
def forward(self, x):
T = x.size(1)
x = super().forward(x)
pos = self.positional(T)
return self.dropout(x + pos)
class PositionalCharConvEmbeddings(SinusoidalPositionalMixin, CharConvEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
self.scale = math.sqrt(self.get_dsz())
def forward(self, xch):
"""Add a positional encoding to the embedding, followed by dropout
:param xch: The temporal signal in, to which the positional embeddings are applied
:return: Embedded output
"""
xch = super().forward(xch) * self.scale
xch = xch + self.positional(xch.size(1))
return self.dropout(xch)
class LearnedPositionalCharConvEmbeddings(LearnedPositionalMixin, CharConvEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
def forward(self, xch):
"""Add a positional encoding to the embedding, followed by dropout
:param xch: The temporal signal in, to which the positional embeddings are applied
:return: Embedded output
"""
xch = super().forward(xch)
xch = xch + self.positional(xch.size(1))
return self.dropout(xch)
class PositionalCharLSTMEmbeddings(SinusoidalPositionalMixin, CharLSTMEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
self.scale = math.sqrt(self.get_dsz())
def forward(self, xch):
xch = super().forward(xch) * self.scale
xch = xch + self.positional(xch.size(1))
return self.dropout(xch)
class LearnedPositionalCharLSTMEmbeddings(LearnedPositionalMixin, CharLSTMEmbeddings):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.dropout = nn.Dropout(kwargs.get("dropout", 0.0))
def forward(self, xch):
xch = super().forward(xch)
xch = xch + self.positional(xch.size(1))
return self.dropout(xch)
|
# -*- coding: utf-8 -*-
DEFAULT_ASSET_BASIC_TYPE = 'basic'
DEFAULT_LEVEL_OF_ACCREDITATION = {'create': [1],
'edit': [2]}
|
import pytest, torch, fastai
from fastai.torch_core import *
from fastai.layers import *
a=[1,2,3]
exp=torch.tensor(a)
b=[3,6,6]
def test_tensor_with_list():
r = tensor(a)
assert torch.all(r==exp)
def test_tensor_with_ndarray():
b=np.array(a)
r = tensor(b)
assert np_address(r.numpy()) == np_address(b)
assert torch.all(r==exp)
def test_tensor_with_tensor():
c=torch.tensor(a)
r = tensor(c)
assert r.data_ptr()==c.data_ptr()
assert torch.all(r==exp)
def test_requires_grad():
m = simple_cnn(b)
assert requires_grad(m) == True
def test_requires_grad_set():
m = simple_cnn(b)
requires_grad(m,False)
allF = np.all([not p.requires_grad for p in m.parameters()])
assert allF, "requires_grad(m,False) did not set all parameters to False"
requires_grad(m,True)
allT = np.all([p.requires_grad for p in m.parameters()])
assert allT, "requires_grad(m,True) did not set all parameters to True"
def test_apply_init():
m = simple_cnn(b,bn=True)
all2 = lambda m: nn.init.constant_(m.weight,0.2) if hasattr(m, 'weight') else m
all7 = lambda m: nn.init.constant_(m,0.7)
apply_leaf(m,all2)
apply_init(m,all7)
conv1_w = torch.full([6,3,3,3],0.7)
bn1_w = torch.full([6],0.2)
assert conv1_w.equal(m[0][0].weight), "Expected first colvulition layer's weights to be %r" % conv1_w
assert bn1_w.equal(m[0][2].weight), "Expected first batch norm layers weights to be %r" % bn1_w
def test_in_channels():
m = simple_cnn(b)
assert in_channels(m) == 3
def test_in_channels_no_weights():
with pytest.raises(Exception) as e_info:
in_channels(nn.Sequential())
assert e_info.value.args[0] == 'No weight layer'
|
from dartcms import get_model
from dartcms.utils.config import DartCMSConfig
from .forms import OrderDatailForm
app_name = 'order_details'
Order = get_model('shop', 'OrderDetail')
config = DartCMSConfig({
'model': Order,
'parent_kwarg_name': 'order',
'parent_model_fk': 'order_id',
'grid': {
'grid_columns': [
{'field': 'name', 'width': '60%'},
{'field': 'price', 'width': '20%'},
{'field': 'quantity', 'width': '20%'},
],
'base_grid_actions': ['update', 'delete'],
},
'form': {
'form_class': OrderDatailForm
}
})
urlpatterns = config.get_urls(exclude=['insert'])
|
import sys,os
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import PyEFVLib
import numpy as np
from scipy import sparse
import scipy.sparse.linalg
import time
class StressEquilibriumSolver(PyEFVLib.Solver):
def __init__(self, workspaceDirectory, gravity=False, **kwargs):
# kwargs -> outputFileName, extension, transient, verbosity
PyEFVLib.Solver.__init__(self, workspaceDirectory, **kwargs)
self.gravity = gravity
def init(self):
self.displacements = np.repeat(0.0, self.dimension*self.numberOfVertices)
self.coords,self.matrixVals = [], []
def mainloop(self):
self.assembleLinearSystem()
self.solveLinearSystem()
self.saveIterationResults()
def add(self, i, j, val):
self.coords.append((i,j))
self.matrixVals.append(val)
def getConstitutiveMatrix(self, region):
shearModulus = self.propertyData.get(region.handle, "ShearModulus")
poissonsRatio = self.propertyData.get(region.handle, "PoissonsRatio")
lameParameter=2*shearModulus*poissonsRatio/(1-2*poissonsRatio)
if self.dimension == 2:
constitutiveMatrix = np.array([[2*shearModulus+lameParameter ,lameParameter ,0 ],
[lameParameter ,2*shearModulus+lameParameter ,0 ],
[0 ,0 ,shearModulus]])
elif self.dimension == 3:
constitutiveMatrix = np.array([[2*shearModulus+lameParameter ,lameParameter ,lameParameter ,0 ,0 ,0],
[lameParameter ,2*shearModulus+lameParameter,lameParameter ,0 ,0 ,0],
[lameParameter ,lameParameter ,2*shearModulus+lameParameter,0 ,0 ,0],
[0 ,0 ,0 ,shearModulus,0,0],
[0 ,0 ,0 ,0,shearModulus,0],
[0 ,0 ,0 ,0,0,shearModulus]])
return constitutiveMatrix
def getTransposedVoigtArea(self, face):
Sx, Sy, Sz = face.area.getCoordinates()
if self.dimension == 2:
return np.array([[Sx,0,Sy],[0,Sy,Sx]])
elif self.dimension == 3:
return np.array([[Sx,0,0,Sy,0,Sz],[0,Sy,0,Sx,Sz,0],[0,0,Sz,0,Sy,Sx]])
@staticmethod
def getVoigtGradientOperator(globalDerivatives):
if len(globalDerivatives) == 2:
Nx,Ny = globalDerivatives
zero=np.zeros(Nx.size)
return np.array([[Nx,zero],[zero,Ny],[Ny,Nx]])
if len(globalDerivatives) == 3:
Nx,Ny,Nz = globalDerivatives
zero=np.zeros(Nx.size)
return np.array([[Nx,zero,zero],[zero,Ny,zero],[zero,zero,Nz],[Ny,Nx,zero],[zero,Nz,Ny],[Nz,zero,Nx]])
@staticmethod
def getOuterFaceGlobalDerivatives(outerFace):
localDerivatives = outerFace.facet.element.shape.vertexShapeFunctionDerivatives[ outerFace.vertexLocalIndex ]
return outerFace.facet.element.getGlobalDerivatives(localDerivatives)
def assembleLinearSystem(self):
self.independent = np.zeros(self.dimension*self.numberOfVertices)
U = lambda handle: handle + self.numberOfVertices * 0
V = lambda handle: handle + self.numberOfVertices * 1
W = lambda handle: handle + self.numberOfVertices * 2
def gravityTerm():
# Gravity Term
for region in self.grid.regions:
density = self.propertyData.get(region.handle, "Density")
gravity = self.propertyData.get(region.handle, "Gravity")
for element in region.elements:
local = 0
for vertex in element.vertices:
self.independent[V(vertex.handle)] += - density * gravity * element.subelementVolumes[local]
local += 1
def stressTerm():
# Stress Term
for region in self.grid.regions:
constitutiveMatrix = self.getConstitutiveMatrix(region)
for element in region.elements:
for innerFace in element.innerFaces:
transposedVoigtArea = self.getTransposedVoigtArea(innerFace)
voigtGradientOperator = self.getVoigtGradientOperator(innerFace.globalDerivatives)
matrixCoefficient = np.einsum("ij,jk,kmn->imn", transposedVoigtArea, constitutiveMatrix, voigtGradientOperator)
backwardVertexHandle = element.vertices[element.shape.innerFaceNeighborVertices[innerFace.local][0]].handle
forwardVertexHandle = element.vertices[element.shape.innerFaceNeighborVertices[innerFace.local][1]].handle
for local, vertex in enumerate(element.vertices):
for neighborVertex in [backwardVertexHandle, forwardVertexHandle]:
self.add( U(neighborVertex), U(vertex.handle), matrixCoefficient[0][0][local] )
self.add( U(neighborVertex), V(vertex.handle), matrixCoefficient[0][1][local] )
self.add( V(neighborVertex), U(vertex.handle), matrixCoefficient[1][0][local] )
self.add( V(neighborVertex), V(vertex.handle), matrixCoefficient[1][1][local] )
if self.dimension == 3:
self.add( W(neighborVertex), W(vertex.handle), matrixCoefficient[2][2][local] )
self.add( U(neighborVertex), W(vertex.handle), matrixCoefficient[0][2][local] )
self.add( V(neighborVertex), W(vertex.handle), matrixCoefficient[1][2][local] )
self.add( W(neighborVertex), U(vertex.handle), matrixCoefficient[2][0][local] )
self.add( W(neighborVertex), V(vertex.handle), matrixCoefficient[2][1][local] )
matrixCoefficient *= -1
def boundaryConditions():
# Boundary Conditions
for bc in self.problemData.boundaryConditions:
boundary=bc["u"].boundary
uBoundaryType = bc["u"].__type__
vBoundaryType = bc["v"].__type__
wBoundaryType = bc["w"].__type__ if "w" in bc.keys() else None
# Neumann Boundary Conditions
if uBoundaryType == "NEUMANN":
for facet in boundary.facets:
for outerFace in facet.outerFaces:
self.independent[U(outerFace.vertex.handle)] -= bc["u"].getValue(outerFace.handle) * np.linalg.norm(outerFace.area.getCoordinates())
if vBoundaryType == "NEUMANN":
for facet in boundary.facets:
for outerFace in facet.outerFaces:
self.independent[V(outerFace.vertex.handle)] -= bc["v"].getValue(outerFace.handle) * np.linalg.norm(outerFace.area.getCoordinates())
if wBoundaryType == "NEUMANN":
for facet in boundary.facets:
for outerFace in facet.outerFaces:
self.independent[W(outerFace.vertex.handle)] -= bc["w"].getValue(outerFace.handle) * np.linalg.norm(outerFace.area.getCoordinates())
# Dirichlet Boundary Conditions
if uBoundaryType == "DIRICHLET":
for vertex in boundary.vertices:
self.independent[U(vertex.handle)] = bc["u"].getValue(vertex.handle)
self.matrixVals = [val for coord, val in zip(self.coords, self.matrixVals) if coord[0] != U(vertex.handle)]
self.coords = [coord for coord in self.coords if coord[0] != U(vertex.handle)]
self.add(U(vertex.handle), U(vertex.handle), 1.0)
if vBoundaryType == "DIRICHLET":
for vertex in boundary.vertices:
self.independent[V(vertex.handle)] = bc["v"].getValue(vertex.handle)
self.matrixVals = [val for coord, val in zip(self.coords, self.matrixVals) if coord[0] != V(vertex.handle)]
self.coords = [coord for coord in self.coords if coord[0] != V(vertex.handle)]
self.add(V(vertex.handle), V(vertex.handle), 1.0)
if wBoundaryType == "DIRICHLET":
for vertex in boundary.vertices:
self.independent[W(vertex.handle)] = bc["w"].getValue(vertex.handle)
self.matrixVals = [val for coord, val in zip(self.coords, self.matrixVals) if coord[0] != W(vertex.handle)]
self.coords = [coord for coord in self.coords if coord[0] != W(vertex.handle)]
self.add(W(vertex.handle), W(vertex.handle), 1.0)
if self.gravity:
gravityTerm()
stressTerm()
boundaryConditions()
def solveLinearSystem(self):
self.matrix = sparse.csc_matrix( (self.matrixVals, zip(*self.coords)) )
self.inverseMatrix = sparse.linalg.inv( self.matrix )
self.displacements = self.inverseMatrix * self.independent
def saveIterationResults(self):
self.saver.save('u', self.displacements[0*self.numberOfVertices:1*self.numberOfVertices], self.currentTime)
self.saver.save('v', self.displacements[1*self.numberOfVertices:2*self.numberOfVertices], self.currentTime)
if self.dimension == 3:
self.saver.save('w', self.displacements[2*self.numberOfVertices:3*self.numberOfVertices], self.currentTime)
def stressEquilibrium(workspaceDirectory,solve=True,extension="csv",gravity=False,verbosity=False):
solver = StressEquilibriumSolver(workspaceDirectory,extension=extension,gravity=gravity,verbosity=verbosity)
if solve:
solver.solve()
return solver
if __name__ == "__main__":
if len(sys.argv)>1 and not "-" in sys.argv[1]: model=sys.argv[1]
extension = "xdmf" if not [1 for arg in sys.argv if "--extension" in arg] else [arg.split('=')[1] for arg in sys.argv if "--extension" in arg][0]
problemData = PyEFVLib.ProblemData(
meshFilePath = "{MESHES}/msh/2D/Fine.msh",
outputFilePath = "{RESULTS}/stress_equilibrium",
propertyData = PyEFVLib.PropertyData({
"Body": {
"Density": 1800.0,
"PoissonsRatio": 0.4,
"ShearModulus": 6.0e+06,
"Gravity": 0.0,
},
}),
boundaryConditions = PyEFVLib.BoundaryConditions({
"u": {
"InitialValue": 0.0,
"West": { "condition": PyEFVLib.Dirichlet, "type": PyEFVLib.Constant, "value": 0.0 },
"East": { "condition": PyEFVLib.Dirichlet, "type": PyEFVLib.Constant, "value": 0.0 },
"South": { "condition": PyEFVLib.Neumann, "type": PyEFVLib.Constant, "value": 0.0 },
"North": { "condition": PyEFVLib.Neumann, "type": PyEFVLib.Constant, "value": 0.0 }
},
"v": {
"InitialValue": 0.0,
"West": { "condition": PyEFVLib.Neumann, "type": PyEFVLib.Constant, "value": 0.0 },
"East": { "condition": PyEFVLib.Neumann, "type": PyEFVLib.Constant, "value": 0.0 },
"South": { "condition": PyEFVLib.Dirichlet, "type": PyEFVLib.Constant, "value": 0.0 },
"North": { "condition": PyEFVLib.Neumann, "type": PyEFVLib.Constant, "value": -1e4 }
}
}),
)
solver=stressEquilibrium(problemData,extension=extension,gravity="-G" in sys.argv)
|
from django.utils.safestring import mark_safe
from markdown import markdown
from pygments import highlight
from pygments.formatters import get_formatter_by_name
from pygments.lexers import get_lexer_by_name
from django.utils.safestring import mark_safe
from markdown import markdown
from wagtail.core.blocks import (
StreamBlock,
RichTextBlock,
TextBlock,
CharBlock
)
from wagtail.images.blocks import ImageChooserBlock
#from wagtail.documents.blocks import DocumentChooserBlock
from wagtail.embeds.blocks import EmbedBlock
from wagtailcodeblock.blocks import CodeBlock
from wagtailmarkdown.blocks import MarkdownBlock
class ContentStreamBlock(StreamBlock):
h2 = CharBlock(icon="title", classname="title")
h3 = CharBlock(icon="title", classname="title")
h4 = CharBlock(icon="title", classname="title")
paragraph = RichTextBlock(required=False)
image = ImageChooserBlock(required=False)
code = CodeBlock(label='Code', required=False)
embeds = EmbedBlock(required=False)
markdown = MarkdownBlock(required=False)
# class Meta:
# template = 'puput/code_block1.html'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InteligentGeneralMerchantPromo import InteligentGeneralMerchantPromo
class InteligentMerchantPromo(object):
def __init__(self):
self._general_promo = None
self._promo_type = None
@property
def general_promo(self):
return self._general_promo
@general_promo.setter
def general_promo(self, value):
if isinstance(value, InteligentGeneralMerchantPromo):
self._general_promo = value
else:
self._general_promo = InteligentGeneralMerchantPromo.from_alipay_dict(value)
@property
def promo_type(self):
return self._promo_type
@promo_type.setter
def promo_type(self, value):
self._promo_type = value
def to_alipay_dict(self):
params = dict()
if self.general_promo:
if hasattr(self.general_promo, 'to_alipay_dict'):
params['general_promo'] = self.general_promo.to_alipay_dict()
else:
params['general_promo'] = self.general_promo
if self.promo_type:
if hasattr(self.promo_type, 'to_alipay_dict'):
params['promo_type'] = self.promo_type.to_alipay_dict()
else:
params['promo_type'] = self.promo_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InteligentMerchantPromo()
if 'general_promo' in d:
o.general_promo = d['general_promo']
if 'promo_type' in d:
o.promo_type = d['promo_type']
return o
|
#!/usr/bin/python
# vim: set expandtab ts=4 sw=4:
#%% -----------------------------------------------------
#
# This script runs the main analysis on 5 minutes of data from a single run.
# The analysis is repeated a specified number of time with jittered mask
# frequencies to assess the robustness of the theta waveform shape to mask
# parameter selection.
#%% -----------------------------------------------------
# Imports and definitions
import os
import emd
import h5py
import logging
import numpy as np
from emd_waveform_utils import config, load_dataset
def run_iter(raw, sample_rate, seconds, sift_config):
try:
# Run sift
imf, mf = emd.sift.mask_sift(raw[:sample_rate*seconds], **sift_config)
except EMDSiftCovergeError:
return None
# Frequency Transform
IP, IF, IA = emd.spectra.frequency_transform(imf, sample_rate, 'hilbert', smooth_phase=3)
# Compute cycle statistics - only those needed to find subset
C = emd.cycles.Cycles(IP[:, 5])
C.compute_cycle_metric('duration_samples', imf[:, 5], len)
C.compute_cycle_metric('max_amp', IA[:, 5], np.max)
C.compute_cycle_metric('speed', speed, np.mean)
# Extract included subset of cycles
amp_thresh = np.percentile(IA[:, 5], 25)
lo_freq_duration = 1250/4
hi_freq_duration = 1250/12
conditions = ['is_good==1',
f'duration_samples<{lo_freq_duration}',
f'duration_samples>{hi_freq_duration}',
f'max_amp>{amp_thresh}',
'speed>1']
C.pick_cycle_subset(conditions)
# phase-aligned waveforms
pa, phasex = emd.cycles.phase_align(IP[:, 5], IF[:, 5], C.iterate(through='subset'))
return pa.mean(axis=1)
#%% ----------------------------------------------------
# Main loop
# Load dataset
run = 2
run_name = config['recordings'][run]
logfile = os.path.join(config['analysisdir'], run_name+'_maskjitter.log')
emd.logger.set_up(prefix=run_name, log_file=logfile)
logger = logging.getLogger('emd')
logger.info('STARTING: {0}'.format(run_name))
raw, speed, time, sample_rate = load_dataset(run_name)
# Load sift specification
conf_file = os.path.join(config['basedir'], 'emd_masksift_CA1_config.yml')
sift_config = emd.sift.SiftConfig.from_yaml_file(conf_file)
orig_masks = sift_config['mask_freqs'].copy()
# Specify number of iterations and jitter ranges
niters = 25
mask_jitters = [0.1, 0.2, 0.3]
seconds = 300
# Start main analysis
logger.info('STARTING: sift with original parameters')
pa_orig = run_iter(raw, sample_rate, seconds, sift_config)
pas = np.zeros((48, niters, len(mask_jitters)))
for ii in range(niters):
for jj in range(len(mask_jitters)):
logger.info('STARTING: Iteration {0} of {1} with jitter {2}'.format(ii+1, niters, mask_jitters[jj]))
flag = True
while flag:
jitter = np.random.uniform(1-mask_jitters[jj], 1+mask_jitters[jj], len(orig_masks))
sift_config['mask_freqs'] = orig_masks * jitter
p = run_iter(raw, sample_rate, seconds, sift_config)
if p is None:
logger.info('Iteration failed - trying again with new masks')
continue
else:
flag = False
pas[:, ii, jj] = p
#%% ----------------------------------------------------
# Summary Figure
phasex = np.linspace(0, 2*np.pi, 48)
titles = ['Manuscript Masks', '10% Mask Jitter', '20% Mask Jitter', '30% Mask Jitter']
plt.figure(figsize=(12,6))
plt.subplot(141)
plt.plot(phasex, pa_orig, 'k', linewidth=2)
plt.ylim(7, 11)
plt.xticks(np.linspace(0, 2*np.pi, 5), ['0', 'pi/2', 'pi', '3pi/2', '2pi'])
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.title(titles[0])
plt.ylabel('Instantaneous Frequency (Hz)')
for ii in range(3):
plt.subplot(1, 4, ii+2)
plt.plot(phasex, pas[:,:,ii], color=[0.6, 0.6, 0.6], linewidth=0.5)
plt.plot(phasex, pas[:,:,ii].mean(axis=1), 'k', linewidth=2)
plt.ylim(7, 11)
plt.gca().set_yticklabels([])
plt.xticks(np.linspace(0, 2*np.pi, 5), ['0', 'pi/2', 'pi', '3pi/2', '2pi'])
for tag in ['top', 'right']:
plt.gca().spines[tag].set_visible(False)
plt.title(titles[ii+1])
plt.xlabel('Theta Phase (rads)')
outname = os.path.join(config['figdir'], 'emd_supp1_maskjitter.png')
plt.savefig(outname, dpi=300, transparent=True)
|
from github import Github
from cloudmesh.configuration.Config import Config
from pprint import pprint
import requests
from textwrap import dedent
from pathlib import Path
import time
import csv
from cloudmesh.common.util import readfile
class Manager(object):
def __init__(self, organization="cloudmesh-community"):
config = Config()
g = Github(config["cloudmesh.github.user"],
config["cloudmesh.github.password"])
if organization != "cloudmesh-community":
raise ValueError(
"currently we support only organization cloudmesh-community")
self.org = g.get_organization(organization)
self.ta_team = self.org.get_team(2631498)
def list(self, match=None):
for r in self.org.get_repos():
if match is None:
print(r.name, r.description)
else:
name = r.name or ""
description = r.description or ""
if match in name or match in description:
print(r.name, r.description)
def create_repo(self,
firstname=None,
lastname=None,
name=None,
community=None,
semester="fa19",
githubid=None
):
description = f"{firstname} {lastname}"
repo = self.org.create_repo(name,
description=description,
license_template="apache-2.0")
readme = dedent(f"""
---
owner:
firstname: "{firstname}"
lastname: "{lastname}"
hid: "{name}"
community: "{community}"
semester: "{semester}"
""").strip()
print(readme)
print("Add README.yml")
repo.create_file("README.yml",
"Create the Readme.yml",
readme,
branch="main")
print("Add .gitignore")
# bug find file within distribution
with open(Path(".gitignore").resolve()) as file:
gitignore = file.read()
repo.create_file(".gitignore", "create the .gitignore", gitignore,
branch="main")
try:
repo.add_to_collaborators(githubid, permission="write")
except Exception as e:
pass
self.ta_team.add_to_repos(repo)
self.ta_team.set_repo_permission(repo, "write")
def create_repos(self, filename=None):
with open(filename, newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
firstname = row['firstname']
lastname = row['lastname']
githubid = row['githubid']
community = row['community']
semester = row['semester']
name = row['repo']
print(f"Create: {name} {firstname} {lastname} {githubid}")
self.create_repo(
firstname=firstname,
lastname=lastname,
name=name,
community=community,
semester=semester,
githubid=githubid
)
def issue(self, repos=None, title=None, file=None):
pprint(repos)
for repo in repos:
if file is not None:
content = readfile(file).strip()
if title is None:
title = content.splitlines()[0]
title = title.replace("#", "").strip()
repository_obj = self.org.get_repo(repo)
repository_obj.create_issue(title=title, body=content)
|
class pair_stats_tcp:
src = 0
dst = 0
count = 0
# tcp.srcport
# tcp.dstport
# tcp.flags.res
# tcp.flags.ns
# tcp.flags.cwr
# tcp.flags.ecn
# tcp.flags.urg
# tcp.flags.ack
# tcp.flags.push
# tcp.flags.reset
# tcp.flags.syn
# tcp.flags.fin
class pair_stats_udp:
src = 0
dst = 0
count = 0
# udp.srcport
# udp.dstport
class pair_stats_arp:
src = 0
dst = 0
count = 0
# arp.src.hw_mac
# arp.dst.hw_mac
|
# Generated by Django 2.1.1 on 2019-08-21 00:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0013_loanpayment'),
]
operations = [
migrations.AlterField(
model_name='loan',
name='loan_type',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='loan',
name='payment_period',
field=models.IntegerField(default=1),
),
]
|
import json
import os
import time
import requests
from bs4 import BeautifulSoup
from datetime import datetime
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
def run_driver():
""" Run webdriver Chrome """
try:
current_path = os.getcwd()
options = Options()
options.add_experimental_option("prefs", {
"download.default_directory": current_path,
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing.enabled": True})
options.add_argument('--headless')
options.add_argument('--disable-gpu')
options.add_argument('--enable-javascript')
options.add_argument('--no-sandbox')
options.add_argument('--ignore-certificate-errors')
driver = webdriver.Chrome(executable_path=r"D:\Program\Anaconda3\Scripts\chromedriver.exe", chrome_options=options)
return driver
except Exception as e:
print(f'Error: {e}')
def get_esia_session(login_url: str, personal_page: str, driver) -> bytes or str:
""" Get session in ESIA """
try:
driver.get(login_url)
if requests.get(login_url).status_code == 200:
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'login'))).send_keys(os.environ['ESIA_LOGIN'])
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID,'password'))).send_keys(os.environ['ESIA_PASSWORD'])
WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.ID, 'loginByPwdButton'))).click()
time.sleep(5)
driver.get(personal_page)
session = requests.Session()
r = session.get(personal_page, cookies={c['name']:c['value'] for c in driver.get_cookies()}).text.encode("UTF-8")
time.sleep(5)
print("ESIA session: OK!")
time.sleep(2)
# Unlock to get document ...
# print("Get document...")
# get_doc(driver)
# time.sleep(10)
return r
else:
print("Error! Status: ", requests.get(login_url).status_code)
except Exception as e:
print(f'Error in get session: {e}')
finally:
driver.delete_all_cookies()
driver.close()
driver.quit()
# Unlock to get document...
# def get_doc(driver):
# """ Get document: The employment history """
# try:
# driver.get("https://www.gosuslugi.ru/600302/1/form")
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button.button.font-"))).click()
# time.sleep(10)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "button.button.font-"))).click()
# time.sleep(30)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "h4.normal.black.text-plain.mb-4.bold"))).click()
# time.sleep(5)
# WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, "a.text-plain.gray.small-text.file-name"))).click()
# time.sleep(10)
# print("Get document: OK")
# except Exception as e:
# print("Error: ", e)
def get_info_from_script(r: bytes):
""" Get information from JS Object """
try:
soup = BeautifulSoup(r, 'html.parser')
script = soup.find('script')
if script:
script_tag_contents = script.string
with open('source.txt', 'w') as f:
f.write(script_tag_contents)
f.close()
print("Get info: OK!")
return f
except Exception as e:
print(f'Error in get info: {e}')
def transform_txt_to_json(f):
""" Transorm JS Object to JSON with the necessary information """
try:
f = open('source.txt', 'rt')
lines = f.readlines()
f.close()
with open('source.json', 'w') as json_file:
line_with_data = lines[2][:-2]
json_file.write(line_with_data.replace(' data', '{"data"'))
json_file.write('}')
json_file.close()
print("Transform data: OK!")
return json_file
except Exception as e:
print(f'Error in transform: {e}')
def write_info_to_txt(json_file):
""" Write passport data to .txt file in folder passport_data """
try:
if os.path.exists('passport_data'):
print("Folder already exists!")
else:
os.mkdir('passport_data')
print("Folder created successfully!")
with open("source.json") as jsonFile:
json_object = json.load(jsonFile)
get_id = str(json_object["data"]["user"]["userId"])
if not os.path.exists(r'./passport_data/passport' + '[' + get_id + ']' + '.txt'):
type_docs = json_object["data"]["user"]["person"]["docs"][0]["type"]
if type_docs != "RF_PASSPORT":
with open('./passport_data/passport.txt', 'a') as passport_data:
passport_data.write(f'Время добавления: {datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M")}\n')
passport_data.write(f'ФИО: {json_object["data"]["user"]["formattedName"]}\n')
passport_data.write(f'Паспорт РФ:\ncерия: {json_object["data"]["user"]["person"]["docs"][1]["series"]} номер: {json_object["data"]["user"]["person"]["docs"][1]["number"]}\n')
passport_data.write(f'Выдан: {json_object["data"]["user"]["person"]["docs"][1]["issuedBy"]}')
passport_data.close()
os.rename(r'./passport_data/passport.txt', r'./passport_data/passport' + '[' + get_id + ']' + '.txt')
else:
with open('./passport_data/passport.txt', 'a') as passport_data:
passport_data.write(f'Время добавления: {datetime.strftime(datetime.now(), "%d/%m/%Y %H:%M")}\n')
passport_data.write(f'ФИО: {json_object["data"]["user"]["formattedName"]}\n')
passport_data.write(f'Паспорт РФ:\ncерия: {json_object["data"]["user"]["person"]["docs"][0]["series"]} номер: {json_object["data"]["user"]["person"]["docs"][0]["number"]}\n')
passport_data.write(f'Выдан: {json_object["data"]["user"]["person"]["docs"][0]["issuedBy"]}')
passport_data.close()
os.rename(r'./passport_data/passport.txt', r'./passport_data/passport' + '[' + get_id + ']' + '.txt')
else:
print("This passport already exists!")
print("Write info: OK!")
except Exception as e:
print(f'Error in write to file: {e}')
def delete_file():
""" Delete temp file """
try:
os.remove('source.json')
os.remove('source.txt')
print("TMP files was delete!")
except OSError as e:
print(f'Error: {e.strerror}')
if __name__ == '__main__':
try:
runner = run_driver()
get_session = get_esia_session(login_url='https://esia.gosuslugi.ru/', personal_page='https://lk.gosuslugi.ru/profile/personal', driver=runner)
if get_session:
get_info = get_info_from_script(r=get_session)
transform_data = transform_txt_to_json(f=get_info)
write_info_to_txt(json_file=transform_data)
delete_file()
except Exception as e:
print(f'Error: {e}')
|
import unittest
import numpy as np
from kaggle_metrics.regression import *
class TestRegression(unittest.TestCase):
# TODO: Additional tests
def test_mse(self):
y_pred = np.array([.1, 2., 3.4, 1., 5.3])
y_true = np.array([.3, 2.2, 3.6, 1., 4.3])
assert mae(y_true, y_pred) == 0.32
def test_rmse(self):
y_pred = np.array([.1, 2., 3.4, 1., 5.3])
y_true = np.array([.3, 2.2, 3.6, 1., 4.3])
assert rmse(y_true, y_pred) == 0.473286382648
def test_rmsle(self):
y_pred = np.array([.1, 2., 3.4, 1., 5.3])
y_true = np.array([.3, 2.2, 3.6, 1., 4.3])
assert rmsle(y_true, y_pred) == 0.113068903823
if __name__ == '__main__':
unittest.main()
|
####################################################################################
#############################Language Views###########################################
@admin_blueprint.route('/admin/list_languages', methods=['GET', 'POST'] )
@roles_required('admin')
def admin_list_languages():
languages = Language.query.order_by(Language.name.asc())
for language in languages:
print(language.name)
return render_template('admin/admin_list_languages.html', languages=languages)
@admin_blueprint.route('/admin/create_language', methods=['GET', 'POST'])
@roles_required('admin')
def admin_create_language():
error_msg = ""
language = Language()
# users = User.query.join(UsersRoles).join(Role).filter(Role.name == 'student').all()
if request.method == 'POST':
# email validation
other_language = Language.query.filter(Language.name == request.form['name'] ).first()
if (other_language is not None) and (other_language.id != language.id):
# language.user_ids = request.form.getlist('users') # keeps appropriate users selected
error_msg = "This language name is already being used"
flash('This language name is already being used!!', 'error')
else:
language.name = request.form['name']
# language.users = []
# for user_id in request.form.getlist('users'):
# userObj = User.query.filter(User.id == user_id).first()
# language.users.append(userObj)
db.session.add(language)
db.session.commit()
flash('Language Created!!', 'success')
return redirect(url_for('admin.admin_list_languages'))
return render_template('admin/admin_create_edit_language.html', language=language, error_msg=error_msg, verb="Create")
@admin_blueprint.route('/admin/edit_language/<language_id>', methods=['GET', 'POST'] )
@roles_required('admin')
def admin_edit_language(language_id):
error_msg=""
language = Language.query.filter(Language.id == language_id).first()
# users = User.query.join(UsersRoles).join(Role).filter(Role.name == 'student').all()
# using user.roles creates complications. so we make a new attribute instead. this
# is used in the form to select what roles are associated with the user
# language.user_ids = []
# for user in language.users:
# language.user_ids.append(str(user.id))
if request.method == 'GET':
request.form.name = language.name
elif request.method == 'POST':
# email validation
other_language = Language.query.filter(Language.name == request.form['name'] ).first()
if (other_language is not None) and (other_language.id != language.id):
language.user_ids = request.form.getlist('users') # keeps appropriate users selected
error_msg = "This language name is already being used"
flash('This language name is already being used!!', 'error')
else:
language.name = request.form['name']
# language.users = []
# for user_id in request.form.getlist('users'):
# userObj = User.query.filter(User.id == user_id).first()
# language.users.append(userObj)
db.session.add(language)
db.session.commit()
flash('Language Created!!', 'success')
return redirect(url_for('admin.admin_list_languages'))
return render_template('admin/admin_create_edit_language.html', language=language, error_msg=error_msg, verb="Edit")
@admin_blueprint.route('/admin/delete_language/<language_id>')
@roles_required('admin')
def admin_delete_language(language_id):
language = Language.query.filter(Language.id == language_id).first()
db.session.delete(language)
db.session.commit()
flash('Language Deleted!!', 'success')
return redirect(url_for('admin.admin_list_languages'))
#############################End Language Views#######################################
#################################################################################### |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#modified by Marquette REU team
import BaseHTTPServer, urllib, re, os,sys
from BaseHTTPServer import HTTPServer
from SocketServer import ThreadingMixIn
import random
import threading
import string
absPathMUBD ="/var/www/html/MUBlocklyDuino/"
lock = threading.Lock()
lock3 = threading.Lock()
lock2 = threading.Lock()
server_info=("134.48.6.40", 8080)
#make the HTTP Server multi threaded
class BrylowHTTPServer(ThreadingMixIn, HTTPServer):
pass
#Handle requests in seperate process
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
#template_begin = u"""<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
#"http://www.w3.org/TR/html4/strict.dtd"><html><body>"""
#template_end=u"<h1>Arduino INO web server</h1>To upload to an Arduino board connected to this computer, POST to /.</body></html>"
template_begin="<html>"
template_end="</html>"
def escape_html(self, text):
"""Replace special HTML characters with HTML entities"""
return text.replace(
"&", "&").replace(">", ">").replace("<", "<")
def do_HEAD(self):
"""Send response headers"""
self.send_response(200)
self.send_header("content-type", "text/html;charset=utf-8")
self.end_headers()
def do_GET(self):
"""Send a link of a hex file corresponding with your ip address."""
#don't send header html to the extension
#self.do_HEAD()
print self.path
self.wfile.write(self.template_begin)
#critical section where we try to find the hex_file corresponding to the client's ip address and when we find it send the link with html tags to the extension
#if there is a hex file success and tell the user the link if not tellt them file not found
if os.path.exists(absPathMUBD+"ardusers/hex_files/"+ self.path[1:]+".txt"):
print "success "+self.client_address[0]+" got the hex file "+self.path[1:]+".txt"
print "http://"+server_info[0]+"/MUBlocklyDuino/ardusers/hex_files/"+self.path[1:]+".txt"
self.wfile.write("http://"+server_info[0]+"/ardusers/hex_files/"+self.path[1:]+".txt")
else:
print "file not found"
print server_info[0]
self.wfile.write("file_not_found")
self.wfile.write(self.template_end)
print threading.currentThread().getName()+" handled GET Request from "+self.client_address[0]
def do_POST(self):
"""Save new page text and display it"""
length = int(self.headers.getheader('content-length'))
print threading.currentThread().getName()+" handling post request from "+self.client_address[0]
if length:
text = self.rfile.read(length)
arduinoCode = text[:-12] #arduinoCode: hashCode (11 characters)
hashCode = text[-11:]
print "hashCode is " + hashCode
print "sketch to upload: " + arduinoCode
# create ino project (if it doesn't exist already)
os.system("mkdir ino_project")
os.chdir("ino_project")
rc = os.system("ino init")
# 32512 probably means ino is not installed
if rc == 32512:
print "ino init returned " + `rc`
self.send_response(501)
else:
# write to file: critical section
lock2.acquire()
fo = open("src/sketch.ino", "wb")
fo.write(arduinoCode + "\n");
fo.close()
lock2.release()
print "created src/sketch.ino"
# invoke ino to build
print "ino building"
lock3.acquire()
rc = os.system("ino build")
lock3.release()
# 512 probably means invalid option (skip_lib_includes)
if not rc == 0:#didn't work
print "ino build returned " + `rc`
self.send_response(400)
else:
#no upload
#rc = os.system("ino upload")
if not rc == 0:#didn't work
print "ino build returned " + `rc`
self.send_response(500)
else:
#edit hosts. We are inside the ino_project: critical section
#read the hosts and if we haven't seen the client's ip address yet then write to the hosts file with a randomly generated name for the hex_file
# if we have seen the client's ip address be fore then get the hex file that corresponds to the clients and overwrite it with the new hex file
os.system("cp "+absPathMUBD+"ino_project/.build/leonardo/firmware.hex "+absPathMUBD+"ardusers/hex_files/"+hashCode+".txt")
print "updated the users hex file with the compiled hex"
self.send_response(200)
self.send_header("Access-Control-Allow-Origin","*")
os.chdir("..")
#this occurs if the person doesn't click the arduino tab atleast once: the length of content is 0
else:
print "post request failed because content was "+ str(length)+" bytes"
self.send_response(300)
self.send_header("Access-Control-Allow-Origin","*")
if __name__ == '__main__':
print "running local web server at 134.48.6.40:8080..."
fpid = os.fork()
if fpid!=0:
sys.exit(0)
server = BrylowHTTPServer(server_info, Handler)
print 'Starting server, use <Ctrl-C> to stop'
server.pages = {}
server.serve_forever()
|
# -*- coding: utf-8 -*-
'''
Module to provide access to the power DNS http API
:configuration: This module uses the pdnaspi python library
parameters as well as configuration settings::
pdns.url: "http://192.168.10.65:8081"
pdns.server_id: "localhost"
pdns.api_key: "f5d2abcd"
This data can also be passed into pillar. Options passed into opts will
overwrite options passed into pillar.
'''
from __future__ import absolute_import
# Import python libs
import logging
from distutils.version import LooseVersion # pylint: disable=import-error,no-name-in-module
import json
import re
from pprint import pformat
# Import salt libs
from salt.ext.six import string_types
from salt.exceptions import get_error_message as _get_error_message
# Import third party libs
try:
import pdnsapi as api
from pdnsapi.exceptions import (
PDNSAccessDeniedException, PDNSNotFoundException,
PDNSProtocolViolationException, PDNSServerErrorException,
PDNSException)
HAS_PDNSAPI = True
except ImportError:
HAS_PDNSAPI = False
log = logging.getLogger(__name__)
def __virtual__():
'''
Only load this module if pdnsapi is installed
'''
if HAS_PDNSAPI:
return 'powerdns'
else:
return (False, 'The powerdns execution module cannot be loaded: the pdnsapi library is not available.')
def _canonicalize_name(name):
if not name.endswith('.'):
return name + '.'
else:
return name
def _connect():
url = __salt__['config.option']('pdns.url')
server_id = __salt__['config.option']('pdns.server_id')
api_key = __salt__['config.option']('pdns.api_key')
log.debug("Attempting to connect: '%s' '%s' '%s'" % (url, server_id, api_key))
try:
conn = api.init_api(url, server_id, api_key)
except PDNSException as e:
log.error("Exception while opening API connection: '%s'" % (e))
return False
log.debug("connected: '%s' '%s' '%s'" % (url, server_id, api_key))
return conn
def list_zones():
conn = _connect()
if not conn:
return "Failed to connect to powerDNS"
log.debug("Attempting to pull zonelist")
zonelist = conn.zones
log.debug("Zonelist: %s" % (zonelist))
return [zone.name for zone in zonelist]
def zone_exists(name):
conn = _connect()
if not conn:
return False
try:
zone = conn.get_zone(name)
except PDNSException as e:
return False
return True
def get_zone(name):
conn = _connect()
if not conn:
return "Failed to connect to powerDNS"
try:
zone = conn.get_zone(name)
except PDNSException as e:
return "Exception while getting zone: '%s'" % (e)
return [{'name': record.name, 'type': record.type, 'ttl': record.ttl, 'records': [record2 for record2 in record.records]} for record in zone.records]
def get_record(zone, name, rtype):
conn = _connect()
if not conn:
return "Failed to connect to powerDNS"
try:
record, _ = _get_record_zone(conn, zone, name, rtype)
except PDNSException as e:
return "Could not get record '%s'" % (e)
return { 'zone': zone, 'name': record.name, 'type': record.type, 'ttl': record.ttl, 'records': [rec for rec in record.records]}
def _get_record_zone(conn, zone, name, rtype):
canonical_zone = _canonicalize_name(zone)
zone_rec = conn.get_zone(canonical_zone)
if not name.endswith(zone):
name = name + '.' + zone
record = zone_rec.get_record(_canonicalize_name(name), rtype)
return record, zone_rec
def del_record(zone, name, rtype):
conn = _connect()
if not conn:
return "Failed to connect to powerDNS"
try:
record, zone_rec = _get_record_zone(conn, zone, name, rtype)
except PDNSException as e:
return "Could not get record '%s'" % (e)
try:
zone_rec.delete_record(record)
except PDNSException as e:
return "Could not delete record '%s'" % (e)
return True
def add_zone(zone, name_servers=None, records=None):
conn = _connect()
if not conn:
return "Failed to connect to powerDNS"
canonical_zone = _canonicalize_name(zone)
try:
zone = conn.create_zone(canonical_zone, name_servers, records)
except PDNSException as e:
return "Failed to create zone: '%s'" % (e)
return [{'name': record.name, 'type': record.type, 'ttl': record.ttl, 'records': [record2 for record2 in record.records]} for record in zone.records]
def del_zone(zone):
conn = _connect()
if not conn:
log.error("Failed to connect to powerDNS")
return False
canonical_zone = _canonicalize_name(zone)
try:
zone = conn.delete_zone(canonical_zone)
except PDNSException as e:
log.error("Failed to delete zone: '%s'" % (e))
return False
return True
def add_record(zone, name, rtype, ttl=300, **kwargs):
conn = _connect()
if not conn:
log.error("Failed to connect to powerDNS")
return False
if 'records' not in kwargs:
log.error("Must specify records. Ex: records='[ list, of, records ]'")
return False
canonical_zone = _canonicalize_name(zone)
try:
zone_rec = conn.get_zone(canonical_zone)
except PDNSException as e:
log.error("Could not get zone '%s': '%s'" % (canonical_zone, e))
return False
if not name.endswith(zone):
name = name + '.' + zone
record = api.Record(_canonicalize_name(name), rtype, kwargs['records'], ttl)
try:
foo = zone_rec.add_record(record)
except PDNSException as e:
log.error("add_record failed: '%s'" % (e))
return False
return True
# return { 'zone': canonical_zone, 'name': record.name, 'type': record.type, 'ttl': record.ttl, 'records': [rec for rec in record.records]}
def argtest(*args, **kwargs):
#log.error("'%s'" % (pformat(kwargs)))
if '__id__' in kwargs:
kwargs['YAY'] = 'Called from STATE'
kwargs['args'] = args
return kwargs
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import requests
import logging
from typing import Optional
logger = logging.getLogger('simuleval.online.client')
class Client(object):
def __init__(self, args):
self.hostname = args.hostname
self.port = args.port
self.timeout = getattr(args, 'timeout', 10)
self.args = args
self.base_url = f'http://{self.hostname}:{self.port}'
def reset_scorer(self):
# start eval session
url = f'{self.base_url}'
try:
_ = requests.post(url, timeout=self.timeout)
except Exception as e:
raise SystemExit(e)
def get_scores(self, instance_id=None):
# end eval session
url = f'{self.base_url}/result'
params = {"instance_id": instance_id}
try:
r = requests.get(url, params=params)
return r.json()
except Exception as e:
logger.error(f'Failed to retreive scores: {e}')
return None
def get_source(self, instance_id: int,
extra_params: Optional[dict] = None) -> str:
url = f'{self.base_url}/src'
params = {"instance_id": instance_id}
if extra_params is not None:
for key in extra_params.keys():
params[key] = extra_params[key]
try:
r = requests.get(url, params=params)
except Exception as e:
logger.error(f'Failed to request a source segment: {e}')
return r.json()
def send_hypo(self, instance_id: int, hypo: str) -> None:
url = f'{self.base_url}/hypo'
params = {"instance_id": instance_id}
try:
requests.put(url, params=params, data=hypo.encode("utf-8"))
except Exception as e:
logger.error(f'Failed to send a translated segment: {e}')
def corpus_info(self):
url = f'{self.base_url}'
try:
r = requests.get(url)
except Exception as e:
logger.error(f'Failed to request corpus information: {e}')
return r.json()
def start_client(args):
client = Client(args)
client.reset_scorer()
return client
|
import array_tools as at
from sklearn.decomposition import PCA
import numpy as np
from scipy import stats as st
import sys
def oe(mat):
n = len(mat)
tots = np.zeros(n-1)
counts = np.zeros(n-1)
for i in range(n):
for j in range(i):
observed = mat[i,j]
if observed != 0:
s = i - j
tots[s - 1] += observed
counts[s - 1] += 1
avgs = np.zeros(n-1)
for i, count in enumerate(counts):
if count != 0:
avgs[i] = tots[i]/count
oe_mat = np.zeros_like(mat)
for i in range(n):
for j in range(i):
observed = mat[i,j]
s = i-j
expected = avgs[s-1]
if expected != 0:
oe_mat[i,j] = observed/expected
return oe_mat
def cor(mat):
"""Correlation of rows with columns of mat"""
n = len(mat)
cor_mat = np.zeros_like(mat)
for i in range(n):
for j in range(i):
r, p = st.pearsonr(mat[i], mat[j])
cor_mat[i,j] = r
return cor_mat
def get_compartments(mat, enrichments=None, active=True):
"""From Lieberman-Aiden et al (2009)"""
oe_mat = oe(mat)
at.makeSymmetric(oe_mat)
cor_mat = cor(oe_mat)
at.makeSymmetric(cor_mat)
pca = PCA(n_components=1)
pca.fit(cor_mat)
scores = pca.fit_transform(cor_mat)[:,0]
#enforce positive score = active chromatin
if enrichments is not None:
r, p = st.pearsonr(scores, enrichments)
if active and r < 0:
scores = -scores
elif not active and r > 0:
scores = -scores
#normalize
max_val = max(scores)
min_val = -min(scores)
for i, score in enumerate(scores):
if score > 0:
scores[i] = score/max_val
else:
scores[i] = score/min_val
return scores
def load_enrichments(path, structure, column):
enrichments = np.array(np.loadtxt(path, dtype=object)[:,column], dtype=float)
bin_nums = structure.nonzero_abs_indices() + structure.chrom.minPos/structure.chrom.res
return enrichments[bin_nums]
|
### IMPORTS
### =================================
from player import Player
from random import randint
class Game:
def __init__(self, options, ui, testing=False):
self.player1 = Player(options.player_names[0], options.ship_counts)
self.player2 = Player(options.player_names[1], options.ship_counts)
self.winner = None
self.ui = ui
if not testing:
self.player_setup()
self.run_game()
if testing:
self.test_setup()
self.test_run_game()
def player_setup(self):
self.place_ships(self.player1)
self.place_ships(self.player2)
# Game flow
# - Prompts & setup
# - Initialize with prompt & setup results
def place_ships(self, player):
self.ui.display_screen_turn_start(player)
automatic_placement = self.ui.prompt_for_placement_option(player)
ships_to_place = []
for key in player.fleet.ships.keys():
current_ships = player.fleet.ships[key]
for ship in current_ships:
ships_to_place.append(ship)
for ship in ships_to_place:
if automatic_placement:
self.automatically_place_ships(ship, player)
else:
self.get_input_and_place_ship(ship, player)
def get_input_and_place_ship(self, ship, player):
self.ui.display_screen_player_board(player)
selected_coordinates = self.ui.prompt_for_ship_start_position()
possible_directions = player.player_board.get_possible_ship_placement_directions(ship, selected_coordinates)
# Check to make sure this is a valid cell and that there are possible directions to choose from
has_possible_directions = False
for direction in possible_directions:
if possible_directions[direction]:
has_possible_directions = True
break
if has_possible_directions:
selected_direction = self.ui.prompt_for_ship_direction(possible_directions)
player.place_ship(ship, selected_coordinates, selected_direction)
else:
print("Invalid selection, please try again")
self.get_input_and_place_ship(ship, player)
def automatically_place_ships(self, ship, player):
rows = "ABCDEFGHIJ"
columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
row_selection = rows[randint(0, 9)]
column_selection = columns[randint(0, 9)]
selected_coordinates = (row_selection, column_selection)
possible_directions = player.player_board.get_possible_ship_placement_directions(ship, selected_coordinates)
possible_directions_filtered = [key for key in possible_directions.keys() if possible_directions[key]]
if len(possible_directions_filtered) == 0:
self.automatically_place_ships(ship, player)
else:
selected_direction = possible_directions_filtered[randint(0, len(possible_directions_filtered)-1)]
player.place_ship(ship, selected_coordinates, selected_direction)
def run_game(self):
while not self.winner:
self.player_turn(self.player1, self.player2)
if self.winner:
break
else:
self.player_turn(self.player2, self.player1)
self.end_game()
def player_turn(self, player, opponent, repeated=False):
if not repeated:
self.ui.display_screen_turn_start(player)
self.ui.display_screen_game(player)
attack_coordinates = self.ui.prompt_for_attack_coordinates()
result = opponent.player_board.check_for_hit(attack_coordinates)
if result == -1:
print("Something went wrong, try again")
self.player_turn(player, opponent, True)
else:
player.update_attack_results(attack_coordinates, result)
self.ui.display_screen_outcome(result)
self.check_for_winner()
def check_for_winner(self):
player1_lost = self.player1.check_for_defeat()
player2_lost = self.player2.check_for_defeat()
if player1_lost:
self.winner = self.player2
elif player2_lost:
self.winner = self.player1
def end_game(self):
self.ui.display_screen_winner(self.winner)
# - enter while loop
# - - Player one turn
# - - - Check for winner
# - - Player two turn
# - - - Check for winner
# - exit while loop when winner
# - display exit
# - prompt restart
def test_setup(self):
self.test_place_ships(self.player1)
self.test_place_ships(self.player2)
def test_run_game(self):
self.test_player_turns()
self.end_game()
def test_player_turns(self):
for letter in "ABCDE":
for number in [0, 1, 2, 3, 4]:
attack_coordinates = (letter, number)
result = self.player2.player_board.check_for_hit(attack_coordinates)
self.player1.update_attack_results(attack_coordinates, result)
print(self.player1.tracker_board)
print(self.player2.player_board)
self.check_for_winner()
if self.winner:
return
def test_place_ships(self, player):
row_names = "ABCDE"
row_index = 0
i = 0
ships_to_place = []
for key in player.fleet.ships.keys():
current_ships = player.fleet.ships[key]
for ship in current_ships:
ships_to_place.append(ship)
for ship in ships_to_place:
player.place_ship(ship, (row_names[i], row_index), "right")
i += 1
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Const Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.text
class ParagraphVertAlign(object):
"""
Const Class
These enumeration values are used to specify the vertical alignment of paragraphs.
See Also:
`API ParagraphVertAlign <https://api.libreoffice.org/docs/idl/ref/namespacecom_1_1sun_1_1star_1_1text_1_1ParagraphVertAlign.html>`_
"""
__ooo_ns__: str = 'com.sun.star.text'
__ooo_full_ns__: str = 'com.sun.star.text.ParagraphVertAlign'
__ooo_type_name__: str = 'const'
AUTOMATIC = 0
"""
In automatic mode, horizontal text is aligned to the baseline.
The same applies to text that is rotated 90°. Text that is rotated 270 ° is aligned to the center.
"""
BASELINE = 1
"""
The text is aligned to the baseline.
"""
TOP = 2
"""
The text is aligned to the top.
"""
CENTER = 3
"""
The text is aligned to the center.
"""
BOTTOM = 4
"""
The text is aligned to bottom.
"""
__all__ = ['ParagraphVertAlign']
|
'''
Set of objets that can be imported as custom losses.
Losses can be both pytorch specific and numpy specific.
'''
import numpy as np
import matplotlib.pyplot as plt
import torch
from utils import tools
def squared_error(x, x_hat, show_plot: bool = False, return_map: bool = True):
if isinstance(x, np.ndarray):
x_err = (x - x_hat)**2
if show_plot:
x_stats = tools.BatchStatistics(x)
x_hat_stats = tools.BatchStatistics(x_hat)
x_err_stats = tools.BatchStatistics(x_err)
fig, ax = plt.subplots(1, 3, figsize=(20,13))
ax[0].imshow(tools.unstandardize_batch(x), interpolation='nearest')
ax[0].set_title(f'x: [{x_stats.min:.2f}, {x_stats.max:.2f}]')
ax[1].imshow(tools.unstandardize_batch(x_hat), interpolation='nearest')
ax[1].set_title(f'x_hat: [{x_hat_stats.min:.2f}, {x_hat_stats.max:.2f}]')
ax[2].imshow(tools.unstandardize_batch(x_err))
ax[2].set_title(f'x_err: [{x_err_stats.min:.2f}, {x_err_stats.max:.2f}], mse: {np.mean(x_err):.2f}')
for i in range(len(ax)): ax[i].grid(False)
plt.show()
del fig, ax
if return_map:
# Return the *mean* of the squared error map (e.g. MSE)
# AND the error map itself
return np.mean(x_err), x_err
else:
return np.mean(x_err)
else:
raise TypeError('Only numpy available array\'s supported')
def mse_loss(x, x_hat):
"""
Returns the MSE between an image and its reconstruction
INPUT:
x: Tensor (B, C, H, W) -> source image
x_hat: Tensor (B, C, H, W) -> reconstruction
OUTPUT:
mse_loss_sum: Tensor (B, 1) -> sum of the MSE for each image pair
"""
mse_loss = torch.nn.MSELoss(reduction='none')
recons_error = mse_loss(x, x_hat)
mse_loss_sum = torch.sum(recons_error, dim=(1, 2, 3))
return mse_loss_sum
def recons_probability(x_hat):
"""
Returns the sum of the probabilities of each pixel in the image
INPUT:
x_hat: Tensor (B, C, H, W) -> reconstruction
OUTPUT:
recons_probability: Tensor (B, 1) -> sum of of the reconstruction
probabilities in the image
"""
probs = -torch.log(x_hat)
sum_probs = torch.sum(probs, dim=(1, 2, 3))
return sum_probs
def kl_divergence(mu, logvar):
"""
Returns the KLD of the sampled latent vector w.r.t. to a Standard Normal Gaussian
INPUT:
mu: Tensor (B, 1) -> mean of a Gaussian distribution q(z|x_test)
logvar: Tensor (B, 1) -> logvar of a Gaussian distribution q(z|x_test)
OUTPUT:
kld_loss: Tensor(B, 1) -> Kullback-Leibler divergence
"""
kld_loss = torch.mean(-0.5 * torch.sum(1 + logvar - mu ** 2 - logvar.exp(), dim=1), dim=0)
return kld_loss
def mixed_loss(x_hat, mu, logvar):
"""
Implements the mixed loss M1 in the Sintini, Kuntze paper
INPUT:
x_hat: Tensor (B, C, H, W) -> reconstruction
mu: Tensor (B, 1) -> mean of a Gaussian distribution q(z|x_test)
logvar: Tensor (B, 1) -> logvar of a Gaussian distribution q(z|x_test)
OUTPUT:
mixed_loss: Tensor
"""
recons_prob_loss = recons_probability(x_hat)
kld_loss = kl_divergence(mu, logvar)
mixed_loss = recons_prob_loss + kld_loss
return mixed_loss
|
# Python tkinter hello world program
from tkinter import *
root = Tk()
a = Label(root, text ="Hello World")
a.pack()
root.mainloop()
|
# ---------------------------------------------------------------
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# This work is licensed under the License
# located at the root directory.
# ---------------------------------------------------------------
import json
import os, sys
import pickle
from os.path import join
from copy import deepcopy
from pathlib import Path
from sys import argv
import random
import dataclasses
from munch import Munch
import useful_utils
import offline_early_stop
import COSMO_utils
from useful_utils import SummaryWriter_withCSV, wandb_myinit
import torch.nn
import torch
import numpy as np
import signal
from data import CompDataFromDict
from params import CommandlineArgs, TrainCfg, ExperimentCfg
from train import train, alternate_training
def set_random_seeds(base_seed):
random.seed(base_seed)
np.random.seed(base_seed+7205) # 7205 is a base seed that was randomly chosen
torch.random.manual_seed(base_seed+1000)
torch.cuda.manual_seed(base_seed+1001)
torch.backends.cudnn.deterministic = True
def main(args: CommandlineArgs):
train_cfg: TrainCfg = args.train
exp_cfg: ExperimentCfg = args.exp
set_random_seeds(base_seed=train_cfg.seed)
# init logging
writer = init_logging(args)
# load data
test_dataset, train_dataset, valid_dataset = load_data(args)
train_cfg.set_n_iter(len(train_dataset.data))
# training
with useful_utils.profileblock('complete training'):
if train_cfg.alternate_ys == 0:
### train both heads jointly ##
train(args, train_dataset, valid_dataset, test_dataset, writer)
else:
### alternate between heads ###
alternate_training(args, train_dataset, valid_dataset, test_dataset, writer)
# ----- Finalizing ------
# dump log to csv
writer.dump_to_csv(verbose=1, float_format=f'%.{args.exp.csv_precision}f')
writer.close()
# Indicate run has complete
COSMO_utils.run_bash(f'touch {join(exp_cfg.output_dir, "completed_training.touch")}')
# Process offline early stopping according to results at output_dir
early_stop_results_dict = process_offline_early_stopping(exp_cfg)
# Print results
print_results(early_stop_results_dict, exp_cfg)
# Delete temporary artifacts from output dir
clear_output_dir(exp_cfg)
print('Done.\n')
def print_results(early_stop_results_dict, exp_cfg):
from munch import munchify
early_stop_results_dict = munchify(early_stop_results_dict)
print('\n\n####################################')
if exp_cfg.report_imbalanced_metrics:
# E.g. Zappos
U = 100 * early_stop_results_dict.open_H_IMB_valid.metrics.unseen_open_acc_test
S = 100 * early_stop_results_dict.open_H_IMB_valid.metrics.seen_open_acc_test
H = 100 * early_stop_results_dict.open_H_IMB_valid.metrics.open_H_IMB_test
closed = 100 * early_stop_results_dict.AUC_open_valid.metrics.closed_acc_test
AUC = 100 * early_stop_results_dict.AUC_open_valid.metrics.AUC_open_test
print('Reporting IMbalanced metrics')
print(f'Unseen={U:.1f}, Seen={S:.1f}, Harmonic={H:.1f}, Closed={closed:.1f}, AUC={AUC:.1f}')
else:
# e.g. AO-CLEVr
U = 100 * early_stop_results_dict.open_H_valid.metrics.open_balanced_unseen_acc_test
S = 100 * early_stop_results_dict.open_H_valid.metrics.open_balanced_seen_acc_test
H = 100 * early_stop_results_dict.open_H_valid.metrics.open_H_test
closed = 100 * early_stop_results_dict.closed_balanced_acc_valid.metrics.closed_balanced_acc_test
print('Reporting Balanced metrics')
print(f'Unseen={U:.1f}, Seen={S:.1f}, Harmonic={H:.1f}, Closed={closed:.1f}')
print('####################################\n\n')
def init_logging(args):
exp_cfg: ExperimentCfg = args.exp
output_dir = Path(exp_cfg.output_dir)
if not exp_cfg.ignore_existing_output_contents and len(list(output_dir.iterdir())) > 0:
raise ValueError(f'Output directory {output_dir} is not empty')
args_dict = dataclasses.asdict(args)
if exp_cfg.use_wandb:
import wandb
wandb_myinit(project_name=exp_cfg.project_name, experiment_name=exp_cfg.experiment_name,
instance_name=exp_cfg.instance_name, config=args_dict, workdir=exp_cfg.output_dir,
username=exp_cfg.wandb_user)
# printing starts here - after initializing w&b
print('commandline was:')
print(' '.join(argv))
print(vars(args))
writer = SummaryWriter_withCSV(log_dir=exp_cfg.output_dir, suppress_tensorboard=True, wandb=exp_cfg.use_wandb)
writer.set_print_options(pandas_max_columns=500, pandas_max_width=200)
to_json(args_dict, exp_cfg.output_dir, filename='args.json')
return writer
def clear_output_dir(exp_cfg):
# Always delete dumped (per-epoch) logits when done, because it takes a lot of space
delete_dumped_logits(exp_cfg.output_dir)
# Delete dumped (per epoch) decisions if required
if exp_cfg.delete_dumped_preds:
print('Delete logging of per-epoch dumped predictions')
cmd = f'rm -rf {join(exp_cfg.output_dir, "dump_preds")}'
print(cmd)
COSMO_utils.run_bash(cmd)
def process_offline_early_stopping(exp_cfg: ExperimentCfg):
cfg_offline_early_stop = Munch()
cfg_offline_early_stop.dir = exp_cfg.output_dir
cfg_offline_early_stop.early_stop_metrics = 'open_H_valid,closed_balanced_acc_valid,open_H_IMB_valid,AUC_open_valid'
early_stop_results_dict = offline_early_stop.main(cfg_offline_early_stop)
if exp_cfg.use_wandb:
# dump each early_stop result to currents project
offline_early_stop.early_stop_results_to_wandb_summary(early_stop_results_dict)
# and save the dumped predictions at its epoch
offline_early_stop.dump_preds_at_early_stop(early_stop_results_dict, exp_cfg.output_dir, use_wandb=exp_cfg.use_wandb)
return early_stop_results_dict
def load_data(args: CommandlineArgs):
if args.data.metadata_from_pkl:
train_dataset, valid_dataset, test_dataset = load_pickled_metadata(args)
print('load data from PKL')
else:
train_dataset, valid_dataset, test_dataset = load_TMN_data(args)
print('load data using TMN project')
return test_dataset, train_dataset, valid_dataset
def to_json(args_dict, log_dir, filename):
args_json = os.path.join(log_dir, filename)
with open(args_json, 'w') as f:
json.dump(args_dict, f)
print(f'\nDump configuration to JSON file: {args_json}\n\n')
def SIGINT_KeyboardInterrupt_handler(sig, frame):
raise KeyboardInterrupt()
def load_TMN_data(args: CommandlineArgs):
import sys
sys.path.append('taskmodularnets')
import taskmodularnets.data.dataset as tmn_data
dict_data = dict()
for subset in ['train', 'val', 'test']:
dTMN = tmn_data.CompositionDatasetActivations(root=args.data.data_dir,
phase=subset,
split='compositional-split-natural')
# Add class attributes according to the current project API
dTMN.all_open_pairs, dTMN.seen_pairs = \
dTMN.pairs, dTMN.train_pairs
# Get TMN unseen pairs, because val/test_pairs include both seen and unseen pairs
dTMN.unseen_closed_val_pairs = list(set(dTMN.val_pairs).difference(dTMN.seen_pairs))
dTMN.unseen_closed_test_pairs = list(set(dTMN.test_pairs).difference(dTMN.seen_pairs))
dTMN.closed_unseen_pairs = dict(
train=[],
val=dTMN.unseen_closed_val_pairs,
test=dTMN.unseen_closed_test_pairs)[subset]
dict_data[f'{subset}'] = deepcopy(vars(dTMN))
train_dataset = CompDataFromDict(dict_data['train'], data_subset='train_data', data_dir=args.data.data_dir)
valid_dataset = CompDataFromDict(dict_data['val'], data_subset='val_data', data_dir=args.data.data_dir)
test_dataset = CompDataFromDict(dict_data['test'], data_subset='test_data', data_dir=args.data.data_dir)
print('Seen (train) pairs: ', train_dataset.seen_pairs)
print('Unseen (val) pairs: ', train_dataset.unseen_closed_val_pairs)
print('Unseen (test) pairs: ', train_dataset.unseen_closed_test_pairs)
return train_dataset, valid_dataset, test_dataset
def load_pickled_metadata(args: CommandlineArgs):
data_cfg = args.data
dataset_name = deepcopy(data_cfg['dataset_name'])
dataset_variant = deepcopy(data_cfg['dataset_variant'])
meta_path = Path(f"{data_cfg['data_dir']}/metadata_pickles")
random_state_path = Path(f"{data_cfg['data_dir']}/np_random_state_pickles")
meta_path = meta_path.expanduser()
dict_data = dict()
seen_seed = args.train.seed
for subset in ['train', 'valid', 'test']:
metadata_full_filename = meta_path / f"metadata_{dataset_name}__{dataset_variant}__comp_seed_{data_cfg['num_split']}__seen_seed_{seen_seed}__{subset}.pkl"
dict_data[f'{subset}'] = deepcopy(pickle.load(open(metadata_full_filename, 'rb')))
np_rnd_state_fname = random_state_path / f"np_random_state_{dataset_name}__{dataset_variant}__comp_seed_{data_cfg['num_split']}__seen_seed_{seen_seed}.pkl"
np_seed_state = pickle.load(open(np_rnd_state_fname, 'rb'))
np.random.set_state(np_seed_state)
train_dataset = CompDataFromDict(dict_data['train'], data_subset='train_data', data_dir=data_cfg['data_dir'])
valid_dataset = CompDataFromDict(dict_data['valid'], data_subset='val_data', data_dir=data_cfg['data_dir'])
test_dataset = CompDataFromDict(dict_data['test'], data_subset='test_data', data_dir=data_cfg['data_dir'])
print('Seen (train) pairs: ', train_dataset.seen_pairs)
print('Unseen (val) pairs: ', train_dataset.unseen_closed_val_pairs)
print('Unseen (test) pairs: ', train_dataset.unseen_closed_test_pairs)
return train_dataset, valid_dataset, test_dataset
def delete_dumped_logits(logdir):
# Delete dumped logits
f"find {join(logdir, 'dump_preds')} -name 'logits*' -delete"
if __name__ == '__main__':
args = CommandlineArgs.get_args()
main(args)
|
Subsets and Splits