content
stringlengths 5
1.05M
|
---|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# pereIBILITY OF SUCH DAMAGE.
"""Ce fichier définit le contexte-éditeur EdtCycle."""
from primaires.interpreteur.editeur import Editeur
from primaires.interpreteur.editeur.env_objet import EnveloppeObjet
from .edt_cycle import EdtCycle
class EdtCycles(Editeur):
"""Contexte-éditeur d'édition des cycles du prototype végétal.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
self.ajouter_option("n", self.opt_ajouter_cycle)
self.ajouter_option("d", self.opt_supprimer_cycle)
def opt_ajouter_cycle(self, arguments):
"""Ajout d'un cycle.
Syntaxe : /n <age> <nom du cycle>
"""
prototype = self.objet
arguments = arguments.strip()
if not arguments:
self.pere << "|err|Précisez l'âge minimum du cycle suivi " \
"d'un espace et de son nom.|ff|"
return
args = arguments.split(" ")
age = args[0]
nom = " ".join(args[1:])
if not nom:
self.pere << "|err|Précisez un nom.|ff|"
return
try:
age = int(age)
assert age >= 0
except (ValueError, AssertionError):
self.pere << "|err|Age invalide.|ff|"
return
if prototype.est_cycle(nom):
self.pere << "|err|Ce nom de cycle est déjà utilisé.|ff|"
return
prototype.ajouter_cycle(nom, age)
self.actualiser()
def opt_supprimer_cycle(self, arguments):
"""Suppression d'un cycle.
Syntaxe : /d <nom du cycle>
"""
prototype = self.objet
arguments = arguments.strip()
if not arguments:
self.pere << "|err|Précisez le nom du cycle.|ff|"
return
nom = arguments
if not prototype.est_cycle(nom):
self.pere << "|err|Ce nom de cycle n'existe pas.|ff|"
return
if len(prototype.cycles) == 1:
self.pere << "|err|Vous ne pouvez supprimer tous les " \
"cycles d'un prototype.|ff|"
return
prototype.supprimer_cycle(nom)
self.actualiser()
def accueil(self):
"""Message d'accueil du contexte"""
prototype = self.objet
msg = "| |tit|" + "Edition des cycles dans le " \
"prototype {}".format(prototype.cle).ljust(76)
msg += "|ff||\n" + self.opts.separateur + "\n"
msg += self.aide_courte + "\n"
msg += "Cycles définis :"
# Parcours des cycles
cycles = prototype.cycles
lignes = []
for cycle in cycles:
s = "s" if len(cycle.periodes) > 1 else ""
lignes.append("{:<20} : {} période{s}".format(
cycle.nom, len(cycle.periodes), s=s))
if cycles:
msg += "\n\n " + "\n ".join(lignes)
else:
msg += "\n\n Aucun"
return msg
def interpreter(self, msg):
"""Interprétation de l'éditeur."""
prototype = self.objet
try:
cycle = prototype.get_cycle(msg.strip())
except ValueError:
self.pere << "|err|Cycle introuvable.|ff|"
return
enveloppe = EnveloppeObjet(EdtCycle, cycle, None)
enveloppe.parent = self
contexte = enveloppe.construire(self.pere)
self.migrer_contexte(contexte)
|
from cas.extensions import db
from flask_sqlalchemy import sqlalchemy
class UserPermission(db.Model):
__tablename__ = 'user_permission'
permission_id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), nullable=False)
domain = db.Column(db.String(64), nullable=False)
tag = db.Column(db.String(32), nullable=False)
available = db.Column(db.Boolean, nullable=False)
def tags(username, domain):
permissions = UserPermission.query.filter(sqlalchemy.and_(UserPermission.username == username,
UserPermission.domain == domain)).all()
results = set()
for permission in permissions:
if permission.available is False:
continue
results.add(permission.tag)
return results
|
# crawls dbpedia and populates a neo4j database
from recgraph.settings import PROJECT_PATH, GRAPHDB
import os,datetime,subprocess,time,sys
from py2neo import neo4j, node, rel
from recgraph.transformer.neo4jToGexf import neo4jToGexf
# what file has the links
links_file = os.path.join(PROJECT_PATH, "data/page_links_en.nt")
# helpers
def stripLink(link):
link = link[1:-1]
return link
def getNameFromLink(link):
name = link.replace("http://dbpedia.org/resource/", "")
return name
# iteratively expand the set of found pages
def crawlDbPedia(starting_pages, search_depth):
found_pages = set(starting_pages)
found_links = set([])
# after this loop has run, found_links and found_pages should both be fully populated to appropriate search depth
for i in range(search_depth):
is_last_step = (i == search_depth-1) # on the last step of crawling, we only look for links between pages already found
if is_last_step:
expand = False
else:
expand = True
# crawl one step deeper
found_pages, found_links = crawlOneStepDeeper(found_pages, expand)
return found_pages, found_links
def crawlOneStepDeeper(starting_pages, expand=True):
found_pages = set(starting_pages.copy()) # just making sure found_pages is distinct object from starting_pages
found_links = set([])
# iterate through file
with open(links_file, "r") as f:
total = 172308908 # total number of links in dbpedia set
i = 0
start_time = datetime.datetime.now()
for line in f:
splitted = line.split()
pageA = stripLink(splitted[0])
pageB = stripLink(splitted[2])
# if expand=True, then we include any link which starts in starting set even if it goes outside
if expand:
if (pageA in starting_pages):
found_links.add((pageA, pageB))
found_pages.add(pageA)
found_pages.add(pageB)
# elif expand=False, then we only include links which are between nodes already in starting set
else:
if (pageA in starting_pages) and (pageB in starting_pages):
found_links.add((pageA, pageB))
found_pages.add(pageA)
found_pages.add(pageB)
# profiling
if not (i % 1000000):
print pageA + " - " + pageB
now = datetime.datetime.now()
time_delta = (now - start_time).total_seconds()
percent_complete = float(i) / float(total)
if percent_complete:
percent_remaining = 1 - percent_complete
total_seconds_eta = time_delta * (1/percent_complete)
eta_seconds_remaining = total_seconds_eta * percent_remaining
print "---------"
print "percent complete: " + str(percent_complete)
print "elapsed: " + str(time_delta)
print "remaining: " + str(eta_seconds_remaining)
else:
print "..."
# increment
i+=1
print "=================="
print "FOUND PAGES: " + str(len(found_pages))
print "FOUND LINKS: " + str(len(found_links))
print "=================="
return found_pages, found_links
def saveToNeo4jBatch(found_pages, found_links):
url_index = GRAPHDB.get_or_create_index(neo4j.Node, "UrlIndex")
pageToNode = {}
for page in found_pages:
name = getNameFromLink(page)
node = GRAPHDB.create({"name":name, "url":page})[0]
# TODO: add labels based on infobox
pageToNode[page] = node
# save links
i = 0
batch = neo4j.WriteBatch(GRAPHDB)
for link in found_links:
pageA, pageB = link
nodeA = pageToNode.get(pageA)
nodeB = pageToNode.get(pageB)
batch.get_or_create_path(nodeA, "links_to", nodeB)
if not i % 100:
print "i: " + str(i)
batch.run()
batch = neo4j.WriteBatch(GRAPHDB)
i += 1
batch.run()
print "total num links created: " + str(i)
def saveToNeo4j(found_pages, found_links):
print "++: saving to neo4j"
title_index = GRAPHDB.get_or_create_index(neo4j.Node, "TitleIndex")
i=0
for link in found_links:
pageA, pageB = link
nodeA = title_index.get_or_create("title", pageA, {"title": pageA})
nodeB = title_index.get_or_create("title", pageB, {"title": pageB})
GRAPHDB.create((nodeA, "links_to", nodeB))
if not i % 100:
print i
i += 1
def crawlAndSave(starting_pages, search_depth):
# crawl
found_pages, found_links = crawlDbPedia(starting_pages, search_depth)
# save to neo4j
saveToNeo4jBatch(found_pages, found_links)
# TODO: this function doesn't work
def backupDatabaseAndClear(db_title):
# move old database to output_path and then clear it
old_db_path = "/usr/local/Cellar/neo4j/2.1.2/libexec/data"
output_dir = os.path.join(PROJECT_PATH, "data/output")
output_path = os.path.join(output_dir, db_title + ".neo4j")
script_path = os.path.join(PROJECT_PATH, "scripts/saveandclearneo.sh")
# execute in subprocess
my_env = os.environ.copy()
my_env["PATH"] = "/usr/local/bin:" + my_env["PATH"]
args = [script_path, output_path]
subprocess.Popen(args, env=my_env)
# just in case neo4j stopped for some reason
print "++: sleeping 10 and then restarting neo4j ..."
# TODO: neo4j won't start for some reason :(
time.sleep(20)
start_script_path = os.path.join(PROJECT_PATH, "scripts/startneo.sh")
subprocess.Popen([start_script_path], env=my_env)
##################################################################################
# to crawl
# TO_CRAWL = [
# "Music","Art","Book","Mathematics","Science","History","Sex","Philosophy",
# "United_States","Computer_Science","Sport","Drug","Coffee","Palantir","Refrigerator"
# ]
TO_CRAWL = [
"Music"
]
def multiPopulate():
search_depth = 2
errors = []
success = []
for crawl_set in TO_CRAWL:
try:
startings_pages = ["http://dbpedia.org/resource/" + crawl_set]
crawlAndSave(startings_pages, search_depth)
success.append(crawl_set)
backupDatabaseAndClear(crawl_set)
except Exception as e:
print "e: " + e.message
errors.append(crawl_set)
print "$$$$$$$$$$$$$$$$$$$$$$$"
for s in success:
print "success: " + s
for e in errors:
print "error: " + e
def populateNeo4j(name, search_depth):
startings_pages = ["http://dbpedia.org/resource/" + name]
crawlAndSave(startings_pages, search_depth)
if __name__ == "__main__":
crawl_name = sys.argv[1]
print "PROCESSING: " + crawl_name
search_depth = 3
populateNeo4j(crawl_name, search_depth)
# make .gefx file
filename = crawl_name + str(search_depth) + ".gexf"
gexf_output_path = os.path.join(os.path.join(PROJECT_PATH, "data"), filename)
neo4jToGexf(gexf_output_path)
|
from django.contrib import admin
from .models import Plant, Room, House, Symbol, WateredAtEntry
admin.site.register(House)
admin.site.register(Room)
admin.site.register(Plant)
admin.site.register(Symbol)
admin.site.register(WateredAtEntry) |
# https://faust.readthedocs.io/en/latest/userguide/agents.html#concurrency
import faust
import ssl
import certifi
from dotenv import load_dotenv
import os
import aiohttp
import logging
from faust import Worker
load_dotenv(dotenv_path="../.env")
ssl_context = ssl.create_default_context()
ssl_context.load_verify_locations(cafile=certifi.where())
kafka_broker = os.getenv("kafka_broker")
kafka_user = os.getenv("kafka_user")
kafka_password = os.getenv("kafka_password")
app = faust.App(
id='concurrency',
broker=kafka_broker,
broker_credentials=faust.SASLCredentials(
username=kafka_user,
password=kafka_password,
ssl_context=ssl_context
),
store='rocksdb://',
version=1,
topic_replication_factor=3
)
class MyRecord(faust.Record):
value: int
topic = app.topic('concurrency', value_type=MyRecord)
@app.agent(topic, concurrency=10)
async def mytask(records):
session = aiohttp.ClientSession()
async for record in records:
await session.get(f'http://www.google.com/?#q={record.value}')
print("consumed")
@app.timer(interval=60)
async def producer():
for i in range(10):
await topic.send(value=MyRecord(value=i))
print("Message Send")
if __name__ == '__main__':
worker = Worker(app=app, loglevel=logging.INFO)
worker.execute_from_commandline()
|
import json
def read_json_file(json_file):
with open(json_file) as f:
data = json.load(f)
return data |
import unittest
# Palindromic Substrings
input_value = "aba"
output_value = 4
class funcTest(unittest.TestCase):
def test(self):
solution = Solution()
self.assertEqual(solution.countSubstrings(input_value), output_value)
class Solution:
def countSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
n = len(s)
num_pand = 0
for i in range(2 * n - 1):
left = int(i / 2)
right = left + i % 2
# while true, center expansion search for more matches
while left >= 0 and right < n and s[left] == s[right]:
num_pand += 1
left -= 1
right += 1
print(left, right)
return num_pand
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False) # extra conditions for jupyter notebook
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
import tensorflow as tf
from lib.models import nn
from vocab import Vocab
from lib.models.parsers.base_parser import BaseParser
#***************************************************************
class Parser(BaseParser):
""""""
def print_once(self, *args, **kwargs):
if self.print_stuff:
print(*args, **kwargs)
#=============================================================
def __call__(self, dataset, moving_params=None):
""""""
self.print_stuff = dataset.name == "Trainset"
self.multi_penalties = {k: float(v) for k, v in map(lambda s: s.split(':'), self.multitask_penalties.split(';'))} if self.multitask_penalties else {}
self.multi_layers = {k: set(map(int, v.split(','))) for k, v in map(lambda s: s.split(':'), self.multitask_layers.split(';'))} if self.multitask_layers else {}
# todo use variables for vocabs this indexing is stupid
vocabs = dataset.vocabs
inputs = dataset.inputs
targets = dataset.targets
step = dataset.step
num_pos_classes = len(vocabs[1])
num_rel_classes = len(vocabs[2])
num_srl_classes = len(vocabs[3])
num_pred_classes = len(vocabs[4])
# need to add batch dim for batch size 1
# inputs = tf.Print(inputs, [tf.shape(inputs), tf.shape(targets)], summarize=10)
reuse = (moving_params is not None)
self.tokens_to_keep3D = tf.expand_dims(tf.to_float(tf.greater(inputs[:,:,0], vocabs[0].ROOT)), 2)
self.sequence_lengths = tf.reshape(tf.reduce_sum(self.tokens_to_keep3D, [1, 2]), [-1,1])
self.n_tokens = tf.reduce_sum(self.sequence_lengths)
self.moving_params = moving_params
if self.use_elmo:
print("using elmo w/ reuse = ", reuse)
with tf.variable_scope(tf.get_variable_scope(), reuse=reuse):
# # with tf.variable_scope('elmo', reuse=reuse):
# from lib.models.bilm import ElmoLSTMEncoder
# elmo_encoder = ElmoLSTMEncoder(dataset)
word_inputs = dataset.elmo_encoder.embed_text()
else:
if self.add_to_pretrained:
word_inputs, pret_inputs = vocabs[0].embedding_lookup(inputs[:, :, 0], inputs[:, :, 1],
moving_params=self.moving_params)
word_inputs += pret_inputs
else:
word_inputs = vocabs[0].embedding_lookup(inputs[:, :, 1], moving_params=self.moving_params)
if self.word_l2_reg > 0:
unk_mask = tf.expand_dims(tf.to_float(tf.greater(inputs[:,:,1], vocabs[0].UNK)), 2)
word_loss = self.word_l2_reg*tf.nn.l2_loss((word_inputs - pret_inputs) * unk_mask)
inputs_to_embed = [word_inputs]
if self.add_pos_to_input:
pos_inputs = vocabs[1].embedding_lookup(inputs[:, :, 2], moving_params=self.moving_params)
inputs_to_embed.append(pos_inputs)
embed_inputs = self.embed_concat(*inputs_to_embed)
if self.add_predicates_to_input:
predicate_embed_inputs = vocabs[4].embedding_lookup(inputs[:, :, 3], moving_params=self.moving_params)
embed_inputs = tf.concat([embed_inputs, predicate_embed_inputs], axis=2)
top_recur = tf.nn.dropout(embed_inputs, self.input_dropout if self.moving_params is None else 1.0)
attn_weights_by_layer = {}
hidden_size = self.num_heads * self.head_size
self.print_once("n_recur: ", self.n_recur)
self.print_once("num heads: ", self.num_heads)
self.print_once("cnn dim: ", self.cnn_dim)
self.print_once("relu hidden size: ", self.relu_hidden_size)
self.print_once("head size: ", self.head_size)
self.print_once("cnn2d_layers: ", self.cnn2d_layers)
self.print_once("cnn_dim_2d: ", self.cnn_dim_2d)
self.print_once("multitask penalties: ", self.multi_penalties)
self.print_once("multitask layers: ", self.multi_layers)
self.print_once("sampling schedule: ", self.sampling_schedule)
# maps joint predicate/pos indices to pos indices
preds_to_pos_map = np.zeros([num_pred_classes, 1], dtype=np.int32)
if self.joint_pos_predicates:
for pred_label, pred_idx in vocabs[4].iteritems():
if pred_label in vocabs[4].SPECIAL_TOKENS:
postag = pred_label
else:
_, postag = pred_label.split('/')
pos_idx = vocabs[1][postag]
preds_to_pos_map[pred_idx] = pos_idx
# todo these are actually wrong because of nesting
bilou_constraints = np.zeros((num_srl_classes, num_srl_classes))
if self.transition_statistics:
with open(self.transition_statistics, 'r') as f:
for line in f:
tag1, tag2, prob = line.split("\t")
bilou_constraints[vocabs[3][tag1], vocabs[3][tag2]] = float(prob)
###### stuff for multitask attention ######
multitask_targets = {}
mask2d = self.tokens_to_keep3D * tf.transpose(self.tokens_to_keep3D, [0, 2, 1])
# compute targets adj matrix
shape = tf.shape(targets[:, :, 1])
batch_size = shape[0]
bucket_size = shape[1]
i1, i2 = tf.meshgrid(tf.range(batch_size), tf.range(bucket_size), indexing="ij")
idx = tf.stack([i1, i2, targets[:, :, 1]], axis=-1)
adj = tf.scatter_nd(idx, tf.ones([batch_size, bucket_size]), [batch_size, bucket_size, bucket_size])
adj = adj * mask2d
# roots_mask = 1. - tf.expand_dims(tf.eye(bucket_size), 0)
# create parents targets
parents = targets[:, :, 1]
multitask_targets['parents'] = parents
# create children targets
multitask_targets['children'] = parents
# create grandparents targets
i1, i2 = tf.meshgrid(tf.range(batch_size), tf.range(bucket_size), indexing="ij")
idx = tf.reshape(tf.stack([i1, tf.nn.relu(parents)], axis=-1), [-1, 2])
grandparents = tf.reshape(tf.gather_nd(parents, idx), [batch_size, bucket_size])
multitask_targets['grandparents'] = grandparents
grand_idx = tf.stack([i1, i2, grandparents], axis=-1)
grand_adj = tf.scatter_nd(grand_idx, tf.ones([batch_size, bucket_size]), [batch_size, bucket_size, bucket_size])
grand_adj = grand_adj * mask2d
# whether to condition on gold or predicted parse
use_gold_parse = self.inject_manual_attn and not ((moving_params is not None) and self.gold_attn_at_train)
sample_prob = self.get_sample_prob(step)
if use_gold_parse and (moving_params is None):
use_gold_parse_tensor = tf.less(tf.random_uniform([]), sample_prob)
else:
use_gold_parse_tensor = tf.equal(int(use_gold_parse), 1)
print("use gold parse (%s): " % dataset.name, use_gold_parse)
##### Functions for predicting parse, Dozat-style #####
def get_parse_logits(parse_inputs):
if self.full_parse or (self.role_loss_penalty == 0. and self.predicate_loss_penalty == 0.):
######## do parse-specific stuff (arcs) ########
with tf.variable_scope('MLP', reuse=reuse):
dep_mlp, head_mlp = self.MLP(parse_inputs, self.class_mlp_size + self.attn_mlp_size, n_splits=2)
dep_arc_mlp, dep_rel_mlp = dep_mlp[:, :, :self.attn_mlp_size], dep_mlp[:, :, self.attn_mlp_size:]
head_arc_mlp, head_rel_mlp = head_mlp[:, :, :self.attn_mlp_size], head_mlp[:, :, self.attn_mlp_size:]
with tf.variable_scope('Arcs', reuse=reuse):
arc_logits = self.bilinear_classifier(dep_arc_mlp, head_arc_mlp)
arc_logits = tf.cond(tf.less_equal(tf.shape(tf.shape(arc_logits))[0], 2),
lambda: tf.reshape(arc_logits, [batch_size, 1, 1]), lambda: arc_logits)
# arc_logits = tf.Print(arc_logits, [tf.shape(arc_logits), tf.shape(tf.shape(arc_logits))])
return arc_logits, dep_rel_mlp, head_rel_mlp
else:
return dummy_parse_logits()
def dummy_parse_logits():
dummy_rel_mlp = tf.zeros([batch_size, bucket_size, self.class_mlp_size])
return tf.zeros([batch_size, bucket_size, bucket_size]), dummy_rel_mlp, dummy_rel_mlp
arc_logits, dep_rel_mlp, head_rel_mlp = dummy_parse_logits()
###########################################
with tf.variable_scope("crf", reuse=reuse): # to share parameters, change scope here
if self.viterbi_train:
transition_params = tf.get_variable("transitions", [num_srl_classes, num_srl_classes], initializer=tf.constant_initializer(bilou_constraints))
elif self.viterbi_decode:
transition_params = tf.get_variable("transitions", [num_srl_classes, num_srl_classes], initializer=tf.constant_initializer(bilou_constraints), trainable=False)
else:
transition_params = None
self.print_once("using transition params: ", transition_params)
assert (self.cnn_layers != 0 and self.n_recur != 0) or self.num_blocks == 1, "num_blocks should be 1 if cnn_layers or n_recur is 0"
assert self.dist_model == 'bilstm' or self.dist_model == 'transformer', 'Model must be either "transformer" or "bilstm"'
for b in range(self.num_blocks):
with tf.variable_scope("block%d" % b, reuse=reuse): # to share parameters, change scope here
# Project for CNN input
if self.cnn_layers > 0:
with tf.variable_scope('proj0', reuse=reuse):
top_recur = self.MLP(top_recur, self.cnn_dim, n_splits=1)
####### 1D CNN ########
with tf.variable_scope('CNN', reuse=reuse):
kernel = 3
for i in xrange(self.cnn_layers):
with tf.variable_scope('layer%d' % i, reuse=reuse):
if self.cnn_residual:
top_recur += self.CNN(top_recur, 1, kernel, self.cnn_dim, self.recur_keep_prob, self.info_func)
top_recur = nn.layer_norm(top_recur, reuse)
else:
top_recur = self.CNN(top_recur, 1, kernel, self.cnn_dim, self.recur_keep_prob, self.info_func)
if self.cnn_residual and self.n_recur > 0:
top_recur = nn.layer_norm(top_recur, reuse)
# if layer is set to -2, these are used
pos_pred_inputs = top_recur
self.print_once("Setting pos_pred_inputs to: %s" % top_recur.name)
predicate_inputs = top_recur
self.print_once("Setting predicate_inputs to: %s" % top_recur.name)
# Project for Tranformer / residual LSTM input
if self.n_recur > 0:
if self.dist_model == "transformer":
with tf.variable_scope('proj1', reuse=reuse):
top_recur = self.MLP(top_recur, hidden_size, n_splits=1)
if self.lstm_residual and self.dist_model == "bilstm":
with tf.variable_scope('proj1', reuse=reuse):
top_recur = self.MLP(top_recur, (2 if self.recur_bidir else 1) * self.recur_size, n_splits=1)
# if layer is set to -1, these are used
if self.pos_layer == -1:
pos_pred_inputs = top_recur
self.print_once("Setting pos_pred_inputs to: %s" % top_recur.name)
if self.predicate_layer == -1:
predicate_inputs = top_recur
self.print_once("Setting predicate_inputs to: %s" % top_recur.name)
##### Transformer #######
if self.dist_model == 'transformer':
with tf.variable_scope('Transformer', reuse=reuse):
top_recur = nn.add_timing_signal_1d(top_recur)
for i in range(self.n_recur):
with tf.variable_scope('layer%d' % i, reuse=reuse):
manual_attn = None
hard_attn = False
# todo make this into gold_at_train and gold_at_test flags... + scheduled sampling
if 'parents' in self.multi_layers.keys() and i in self.multi_layers['parents'] and (use_gold_parse or self.full_parse):
# if use_gold_parse:
# manual_attn = adj
# # manual_attn = tf.Print(manual_attn, [tf.shape(manual_attn), manual_attn], "gold attn", summarize=100)
# if self.full_parse:
arc_logits, dep_rel_mlp, head_rel_mlp = get_parse_logits(top_recur)
# # arc_logits = tf.Print(arc_logits, [tf.shape(arc_logits), arc_logits], "arc_logits", summarize=100)
# # if not use_gold_parse:
# # # compute full parse and set it here
manual_attn = tf.cond(use_gold_parse_tensor, lambda: adj, lambda: tf.nn.softmax(arc_logits))
this_layer_capsule_heads = self.num_capsule_heads if i > 0 else 0
if 'children' in self.multi_layers.keys() and i in self.multi_layers['children']:
this_layer_capsule_heads = 1
if use_gold_parse:
manual_attn = tf.transpose(adj, [0, 2, 1])
self.print_once("Layer %d capsule heads: %d" % (i, this_layer_capsule_heads))
# if use_gold_parse:
# if 'parents' in self.multi_layers.keys() and i in self.multi_layers['parents']:
# manual_attn = adj
# elif 'grandparents' in self.multi_layers.keys() and i in self.multi_layers['grandparents']:
# manual_attn = grand_adj
# elif 'children' in self.multi_layers.keys() and i in self.multi_layers['children']:
# manual_attn = tf.transpose(adj, [0, 2, 1])
# only at test time
if moving_params is not None and self.hard_attn:
hard_attn = True
# if 'children' in self.multi_layers.keys() and i in self.multi_layers['children'] and \
# self.multi_penalties['children'] != 0.:
# this_layer_capsule_heads = 1
# else:
top_recur, attn_weights = self.transformer(top_recur, hidden_size, self.num_heads,
self.attn_dropout, self.relu_dropout, self.prepost_dropout,
self.relu_hidden_size, self.info_func, self.ff_kernel, reuse,
this_layer_capsule_heads, manual_attn, hard_attn)
# head x batch x seq_len x seq_len
attn_weights_by_layer[i] = tf.transpose(attn_weights, [1, 0, 2, 3])
if i == self.pos_layer:
pos_pred_inputs = top_recur
self.print_once("Setting pos_pred_inputs to: %s" % top_recur.name)
if i == self.predicate_layer:
predicate_inputs = top_recur
self.print_once("Setting predicate_inputs to: %s" % top_recur.name)
if i == self.parse_layer:
parse_pred_inputs = top_recur
self.print_once("Setting parse_pred_inputs to: %s" % top_recur.name)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
if self.n_recur > 0:
top_recur = nn.layer_norm(top_recur, reuse)
##### BiLSTM #######
if self.dist_model == 'bilstm':
with tf.variable_scope("BiLSTM", reuse=reuse):
for i in range(self.n_recur):
with tf.variable_scope('layer%d' % i, reuse=reuse):
if self.lstm_residual:
top_recur_curr, _ = self.RNN(top_recur)
top_recur += top_recur_curr
# top_recur = nn.layer_norm(top_recur, reuse)
else:
top_recur, _ = self.RNN(top_recur)
# if self.lstm_residual and self.n_recur > 0:
# top_recur = nn.layer_norm(top_recur, reuse)
if self.num_blocks > 1:
top_recur = nn.layer_norm(top_recur, reuse)
if self.pos_layer == self.n_recur - 1:
pos_pred_inputs = top_recur
self.print_once("Setting pos_pred_inputs to: %s" % top_recur.name)
if self.predicate_layer == self.n_recur - 1:
predicate_inputs = top_recur
self.print_once("Setting predicate_inputs to: %s" % top_recur.name)
if self.parse_layer == self.n_recur - 1:
parse_pred_inputs = top_recur
self.print_once("Setting parse_pred_inputs to: %s" % top_recur.name)
####### 2D CNN ########
# if self.cnn2d_layers > 0:
# with tf.variable_scope('proj2', reuse=reuse):
# top_recur_rows, top_recur_cols = self.MLP(top_recur, self.cnn_dim_2d//2, n_splits=2)
# # top_recur_rows, top_recur_cols = self.MLP(top_recur, self.cnn_dim // 4, n_splits=2)
#
# top_recur_rows = nn.add_timing_signal_1d(top_recur_rows)
# top_recur_cols = nn.add_timing_signal_1d(top_recur_cols)
#
# with tf.variable_scope('2d', reuse=reuse):
# # set up input (split -> 2d)
# input_shape = tf.shape(embed_inputs)
# bucket_size = input_shape[1]
# top_recur_rows = tf.tile(tf.expand_dims(top_recur_rows, 1), [1, bucket_size, 1, 1])
# top_recur_cols = tf.tile(tf.expand_dims(top_recur_cols, 2), [1, 1, bucket_size, 1])
# top_recur_2d = tf.concat([top_recur_cols, top_recur_rows], axis=-1)
#
# # apply num_convs 2d conv layers (residual)
# for i in xrange(self.cnn2d_layers): # todo pass this in
# with tf.variable_scope('CNN%d' % i, reuse=reuse):
# top_recur_2d += self.CNN(top_recur_2d, kernel, kernel, self.cnn_dim_2d, # todo pass this in
# self.recur_keep_prob if i < self.cnn2d_layers - 1 else 1.0,
# self.info_func if i < self.cnn2d_layers - 1 else tf.identity)
# top_recur_2d = nn.layer_norm(top_recur_2d, reuse)
#
# with tf.variable_scope('Arcs', reuse=reuse):
# arc_logits = self.MLP(top_recur_2d, 1, n_splits=1)
# arc_logits = tf.squeeze(arc_logits, axis=-1)
# arc_output = self.output_svd(arc_logits, targets[:, :, 1])
# if moving_params is None:
# predictions = targets[:, :, 1]
# else:
# predictions = arc_output['predictions']
#
# # Project each predicted (or gold) edge into head and dep rel representations
# with tf.variable_scope('MLP', reuse=reuse):
# # flat_labels = tf.reshape(predictions, [-1])
# original_shape = tf.shape(arc_logits)
# batch_size = original_shape[0]
# bucket_size = original_shape[1]
# # num_classes = len(vocabs[2])
# i1, i2 = tf.meshgrid(tf.range(batch_size), tf.range(bucket_size), indexing="ij")
# targ = i1 * bucket_size * bucket_size + i2 * bucket_size + predictions
# idx = tf.reshape(targ, [-1])
# conditioned = tf.gather(tf.reshape(top_recur_2d, [-1, self.cnn_dim_2d]), idx)
# conditioned = tf.reshape(conditioned, [batch_size, bucket_size, self.cnn_dim_2d])
# dep_rel_mlp, head_rel_mlp = self.MLP(conditioned, self.class_mlp_size + self.attn_mlp_size, n_splits=2)
# else:
# if arc_logits already computed, return them. else if arc_loss_penalty != 0, compute them, else dummy
# arc_logits, dep_rel_mlp, head_rel_mlp = tf.cond(tf.greater(self.arc_loss_penalty, 0.0),
# lambda: tf.cond(tf.equal(int(self.full_parse), 1),
# lambda: (arc_logits, dep_rel_mlp, head_rel_mlp),
# lambda: get_parse_logits(parse_pred_inputs)),
# lambda: dummy_parse_logits())
if not self.full_parse and self.role_loss_penalty == 0. and self.predicate_loss_penalty == 0.0:
arc_logits, dep_rel_mlp, head_rel_mlp = get_parse_logits(parse_pred_inputs)
arc_output = self.output_svd(arc_logits, targets[:, :, 1])
if moving_params is None:
predictions = targets[:, :, 1]
else:
predictions = arc_output['predictions']
parse_probs = arc_output['probabilities']
######## do parse-specific stuff (rels) ########
def get_parse_rel_logits():
with tf.variable_scope('Rels', reuse=reuse):
rel_logits, rel_logits_cond = self.conditional_bilinear_classifier(dep_rel_mlp, head_rel_mlp, num_rel_classes, predictions)
return rel_logits, rel_logits_cond
rel_logits, rel_logits_cond = tf.cond(tf.not_equal(self.rel_loss_penalty, 0.0),
lambda: get_parse_rel_logits(),
lambda: (tf.constant(0.), tf.constant(0.)))
rel_output = self.output(rel_logits, targets[:, :, 2], num_rel_classes)
rel_output['probabilities'] = tf.cond(tf.not_equal(self.rel_loss_penalty, 0.0),
lambda: self.conditional_probabilities(rel_logits_cond),
lambda: rel_output['probabilities'])
# def compute_rels_output():
# with tf.variable_scope('Rels', reuse=reuse):
# rel_logits, rel_logits_cond = self.conditional_bilinear_classifier(dep_rel_mlp, head_rel_mlp, len(vocabs[2]), predictions)
# rel_output = self.output(rel_logits, targets[:, :, 2])
# rel_output['probabilities'] = self.conditional_probabilities(rel_logits_cond)
# return rel_output
# def dummy_compute_rels_output():
multitask_losses = {}
multitask_correct = {}
multitask_loss_sum = 0
# multitask_parents_preds = arc_logits
##### MULTITASK ATTN LOSS ######
if not self.full_parse:
for l, attn_weights in attn_weights_by_layer.iteritems():
# attn_weights is: head x batch x seq_len x seq_len
# idx into attention heads
attn_idx = self.num_capsule_heads
cap_attn_idx = 0
if 'parents' in self.multi_layers.keys() and l in self.multi_layers['parents']:
outputs = self.output(attn_weights[attn_idx], multitask_targets['parents'])
parse_probs = tf.nn.softmax(attn_weights[attn_idx])
# todo this is a bit of a hack
attn_idx += 1
loss = self.multi_penalties['parents'] * outputs['loss']
multitask_losses['parents%s' % l] = loss
multitask_correct['parents%s' % l] = outputs['n_correct']
multitask_loss_sum += loss
if 'grandparents' in self.multi_layers.keys() and l in self.multi_layers['grandparents']:
outputs = self.output(attn_weights[attn_idx], multitask_targets['grandparents'])
attn_idx += 1
loss = self.multi_penalties['grandparents'] * outputs['loss']
multitask_losses['grandparents%s' % l] = loss
multitask_loss_sum += loss
if 'children' in self.multi_layers.keys() and l in self.multi_layers['children']:
outputs = self.output_transpose(attn_weights[cap_attn_idx], multitask_targets['children'])
cap_attn_idx += 1
loss = self.multi_penalties['children'] * outputs['loss']
multitask_losses['children%s' % l] = loss
multitask_loss_sum += loss
######## Predicate detection ########
# predicate_targets = tf.where(tf.greater(targets[:, :, 3], self.predicate_true_start_idx), tf.ones([batch_size, bucket_size], dtype=tf.int32),
# tf.zeros([batch_size, bucket_size], dtype=tf.int32))
predicate_targets = inputs[:, :, 3]
def compute_predicates(predicate_input, name):
with tf.variable_scope(name, reuse=reuse):
predicate_classifier_mlp = self.MLP(predicate_input, self.predicate_pred_mlp_size, n_splits=1)
with tf.variable_scope('SRL-Predicates-Classifier', reuse=reuse):
predicate_classifier = self.MLP(predicate_classifier_mlp, num_pred_classes, n_splits=1)
output = self.output_predicates(predicate_classifier, predicate_targets, vocabs[4].predicate_true_start_idx)
return output
# aux_trigger_loss = tf.constant(0.)
# if self.train_aux_trigger_layer:
# aux_trigger_output = compute_predicates(aux_trigger_inputs, 'SRL-Triggers-Aux', False)
# aux_trigger_loss = self.aux_trigger_penalty * aux_trigger_output['loss']
predicate_targets_binary = tf.where(tf.greater(predicate_targets, vocabs[4].predicate_true_start_idx),
tf.ones_like(predicate_targets), tf.zeros_like(predicate_targets))
# predicate_targets_binary = tf.Print(predicate_targets_binary, [predicate_targets], "predicate targets", summarize=200)
def dummy_predicate_output():
return {
'loss': 0.0,
'predicate_predictions': predicate_targets_binary,
'predictions': predicate_targets,
'logits': 0.0,
# 'gold_trigger_predictions': tf.transpose(predictions, [0, 2, 1]),
'count': 0.,
'correct': 0.,
'targets': 0,
}
predicate_output = tf.cond(tf.greater(self.predicate_loss_penalty, 0.0),
lambda: compute_predicates(predicate_inputs, 'SRL-Predicates'),
dummy_predicate_output)
if moving_params is None or self.add_predicates_to_input or self.predicate_loss_penalty == 0.0:
# gold
predicate_predictions = predicate_targets_binary
else:
# predicted
predicate_predictions = predicate_output['predicate_predictions']
# predicate_predictions = tf.Print(predicate_predictions, [predicate_targets], "predicate_targets", summarize=50)
# predicate_predictions = tf.Print(predicate_predictions, [predicate_predictions], "predicate_predictions", summarize=50)
######## POS tags ########
def compute_pos(pos_input, pos_target):
with tf.variable_scope('POS-Classifier', reuse=reuse):
pos_classifier = self.MLP(pos_input, num_pos_classes, n_splits=1)
output = self.output(pos_classifier, pos_target)
return output
pos_target = targets[:,:,0]
pos_loss = tf.constant(0.)
pos_correct = tf.constant(0.)
pos_preds = pos_target
if self.train_pos:
pos_output = compute_pos(pos_pred_inputs, pos_target)
pos_loss = self.pos_penalty * pos_output['loss']
pos_correct = pos_output['n_correct']
pos_preds = pos_output['predictions']
elif self.joint_pos_predicates:
pos_preds = tf.squeeze(tf.nn.embedding_lookup(preds_to_pos_map, predicate_output['predictions']), -1)
pos_correct = tf.reduce_sum(tf.cast(tf.equal(pos_preds, pos_target), tf.float32) * tf.squeeze(self.tokens_to_keep3D, -1))
elif self.add_pos_to_input:
pos_correct = tf.reduce_sum(tf.cast(tf.equal(inputs[:,:,2], pos_target), tf.float32) * tf.squeeze(self.tokens_to_keep3D, -1))
pos_preds = inputs[:,:,2]
######## do SRL-specific stuff (rels) ########
def compute_srl(srl_target):
with tf.variable_scope('SRL-MLP', reuse=reuse):
predicate_role_mlp = self.MLP(top_recur, self.predicate_mlp_size + self.role_mlp_size, n_splits=1)
predicate_mlp, role_mlp = predicate_role_mlp[:,:,:self.predicate_mlp_size], predicate_role_mlp[:, :, self.predicate_mlp_size:]
with tf.variable_scope('SRL-Arcs', reuse=reuse):
# gather just the triggers
# predicate_predictions: batch x seq_len
# gathered_predicates: num_triggers_in_batch x 1 x self.trigger_mlp_size
# role mlp: batch x seq_len x self.role_mlp_size
# gathered roles: need a (bucket_size x self.role_mlp_size) role representation for each trigger,
# i.e. a (num_triggers_in_batch x bucket_size x self.role_mlp_size) tensor
predicate_gather_indices = tf.where(tf.equal(predicate_predictions, 1))
# predicate_gather_indices = tf.Print(predicate_gather_indices, [predicate_predictions, tf.shape(predicate_gather_indices), tf.shape(predicate_predictions)], "predicate gather shape", summarize=200)
gathered_predicates = tf.expand_dims(tf.gather_nd(predicate_mlp, predicate_gather_indices), 1)
tiled_roles = tf.reshape(tf.tile(role_mlp, [1, bucket_size, 1]), [batch_size, bucket_size, bucket_size, self.role_mlp_size])
gathered_roles = tf.gather_nd(tiled_roles, predicate_gather_indices)
# now multiply them together to get (num_triggers_in_batch x bucket_size x num_srl_classes) tensor of scores
srl_logits = self.bilinear_classifier_nary(gathered_predicates, gathered_roles, num_srl_classes)
srl_logits_transpose = tf.transpose(srl_logits, [0, 2, 1])
srl_output = self.output_srl_gather(srl_logits_transpose, srl_target, predicate_predictions, transition_params if self.viterbi_train else None)
return srl_output
def compute_srl_simple(srl_target):
with tf.variable_scope('SRL-MLP', reuse=reuse):
# srl_logits are batch x seq_len x num_classes
srl_logits = self.MLP(top_recur, num_srl_classes, n_splits=1)
# srl_target is targets[:, :, 3:]: batch x seq_len x targets
output = self.output(srl_logits, srl_target)
srl_output = {f: output[f] for f in ['loss', 'probabilities', 'predictions', 'correct', 'count']}
srl_output['logits'] = srl_logits
srl_output['transition_params'] = tf.constant(0.)
srl_output['correct'] = output['n_correct']
return srl_output
srl_targets = targets[:, :, 3:]
if self.role_loss_penalty == 0:
# num_triggers = tf.reduce_sum(tf.cast(tf.where(tf.equal(predicate_targets_binary, 1)), tf.int32))
srl_output = {
'loss': tf.constant(0.),
'probabilities': tf.constant(0.), # tf.zeros([num_triggers, bucket_size, num_srl_classes]),
'predictions': tf.reshape(tf.transpose(srl_targets, [0, 2, 1]), [-1, bucket_size]), # tf.zeros([num_triggers, bucket_size]),
'logits': tf.constant(0.), # tf.zeros([num_triggers, bucket_size, num_srl_classes]),
'correct': tf.constant(0.),
'count': tf.constant(0.)
}
elif self.srl_simple_tagging:
srl_output = compute_srl_simple(srl_targets)
else:
srl_output = compute_srl(srl_targets)
predicate_loss = self.predicate_loss_penalty * predicate_output['loss']
srl_loss = self.role_loss_penalty * srl_output['loss']
arc_loss = self.arc_loss_penalty * arc_output['loss']
rel_loss = self.rel_loss_penalty * rel_output['loss']
# if this is a parse update, then actual parse loss equal to sum of rel loss and arc loss
# actual_parse_loss = tf.cond(tf.equal(int(self.full_parse), 1), lambda: tf.add(rel_loss, arc_loss), lambda: tf.constant(0.))
# actual_parse_loss = tf.cond(do_parse_update, lambda: tf.add(rel_loss, arc_loss), lambda: tf.constant(0.))
parse_combined_loss = rel_loss + arc_loss
# if this is a parse update and the parse proportion is not one, then no srl update. otherwise,
# srl update equal to sum of srl_loss, predicate_loss
srl_combined_loss = srl_loss + predicate_loss
# actual_srl_loss = tf.cond(tf.logical_and(do_parse_update, tf.not_equal(self.parse_update_proportion, 1.0)), lambda: tf.constant(0.), lambda: srl_combined_loss)
output = {}
output['multitask_losses'] = multitask_losses
output['probabilities'] = tf.tuple([parse_probs,
rel_output['probabilities']])
output['predictions'] = tf.stack([arc_output['predictions'],
rel_output['predictions']])
output['correct'] = arc_output['correct'] * rel_output['correct']
output['tokens'] = arc_output['tokens']
output['n_correct'] = tf.reduce_sum(output['correct'])
output['n_tokens'] = self.n_tokens
output['accuracy'] = output['n_correct'] / output['n_tokens']
output['loss'] = srl_combined_loss + parse_combined_loss + multitask_loss_sum + pos_loss
# output['loss'] = srl_loss + predicate_loss + actual_parse_loss
# output['loss'] = actual_srl_loss + arc_loss + rel_loss
if self.word_l2_reg > 0:
output['loss'] += word_loss
output['embed'] = embed_inputs
output['recur'] = top_recur
# output['dep_arc'] = dep_arc_mlp
# output['head_dep'] = head_arc_mlp
output['dep_rel'] = dep_rel_mlp
output['head_rel'] = head_rel_mlp
output['arc_logits'] = arc_logits
output['rel_logits'] = rel_logits
output['rel_loss'] = rel_loss # rel_output['loss']
output['log_loss'] = arc_loss # arc_output['log_loss']
output['2cycle_loss'] = arc_output['2cycle_loss']
output['roots_loss'] = arc_output['roots_loss']
output['svd_loss'] = arc_output['svd_loss']
output['n_cycles'] = arc_output['n_cycles']
output['len_2_cycles'] = arc_output['len_2_cycles']
output['srl_loss'] = srl_loss
output['srl_preds'] = srl_output['predictions']
output['srl_probs'] = srl_output['probabilities']
output['srl_logits'] = srl_output['logits']
output['srl_correct'] = srl_output['correct']
output['srl_count'] = srl_output['count']
output['transition_params'] = transition_params if transition_params is not None else tf.constant(bilou_constraints)
output['srl_predicates'] = predicate_predictions
output['srl_predicate_targets'] = predicate_targets_binary
output['predicate_loss'] = predicate_loss
output['predicate_count'] = predicate_output['count']
output['predicate_correct'] = predicate_output['correct']
output['predicate_preds'] = predicate_output['predictions']
output['sample_prob'] = sample_prob
output['pos_loss'] = pos_loss
output['pos_correct'] = pos_correct
output['pos_preds'] = pos_preds
# transpose and softmax attn weights
attn_weights_by_layer_softmaxed = {k: tf.transpose(tf.nn.softmax(v), [1, 0, 2, 3]) for k, v in
attn_weights_by_layer.iteritems()}
output['attn_weights'] = attn_weights_by_layer_softmaxed
output['attn_correct'] = multitask_correct
return output
#=============================================================
def prob_argmax(self, parse_probs, rel_probs, tokens_to_keep, n_cycles=-1, len_2_cycles=-1):
""""""
start_time = time.time()
parse_preds, roots_lt, roots_gt = self.parse_argmax(parse_probs, tokens_to_keep, n_cycles, len_2_cycles)
rel_probs = rel_probs[np.arange(len(parse_preds)), parse_preds]
rel_preds = self.rel_argmax(rel_probs, tokens_to_keep)
total_time = time.time() - start_time
return parse_preds, rel_preds, total_time, roots_lt, roots_gt
|
from structure_factor.tapered_estimators_isotropic import (
allowed_k_norm_bartlett_isotropic,
)
k = allowed_k_norm_bartlett_isotropic(dimension=2, radius=20, nb_values=6)
print(k)
|
from ray.autoscaler._private import cli_logger
import pytest
def test_colorful_mock_with_style():
cm = cli_logger._ColorfulMock()
with cm.with_style("some_style") as c:
assert c.color_choice("text") == "text"
assert c.another_color_choice("more_text") == "more_text"
def test_colorful_mock_random_function():
cm = cli_logger._ColorfulMock()
assert cm.bold("abc") == "abc"
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
import os
import time
import numpy as np
import pandas as pd
from .data import check_data, set_defaults_for_data, add_intercept, FORMAT_NAME_RULES, FORMAT_NAME_DCP
from .cross_validation import validate_folds, validate_cvindices, validate_fold_id, to_fold_id, is_inner_fold_id
RAW_DATA_OUTCOME_COL_IDX = 0
RAW_DATA_FILE_VALID_EXT = {'csv', 'data'}
RAW_HELPER_FILE_VALID_EXT = {'csv', 'helper'}
RAW_WEIGHTS_FILE_VALID_EXT = {'csv', 'weights'}
PROCESSED_DATA_FILE_RDATA = {'rdata'}
PROCESSED_DATA_FILE_PICKLE = {'p', 'pk', 'pickle'}
PROCESSED_DATA_FILE_VALID_EXT = PROCESSED_DATA_FILE_RDATA.union(PROCESSED_DATA_FILE_PICKLE)
#### reading raw data from disk
def load_raw_data_from_disk(dataset_file, helper_file = None, weights_file = None, include_intercept = False):
"""
Parameters
----------
dataset_file comma separated file ending in ".data" or "_data.csv"
contains the training data, stored as a table with N+1 rows and d+1 columns
column 1 contains the outcome variable entries; must be (-1,1) or (0,1)
column 2 to d+1 contain the d input variables
row 1 contains unique names for the outcome variable, and the input variables
helper_file comma separated file
contains additional information on each of the columns in dataset file:
- header
- is_outcome
- is_partition #todo fill this out
- type (boolean, categorical, numeric, ordinal)
- ordering (for ordinal variables)
if no file is provided or file does not exist, then the function will look for
a file with the same name as dataset_file but that ends in either
".helper" (if dataset_file ends in ".data")
"helper.csv" (if dataset_file ends in "_data.csv")
weights_file comma separated file
weights stored as a table with N rows and 1 column
all sample weights must be non-negative
if no file is provided or file does not exist, then the function will look for
a file with the same name as dataset_file but that ends in either
".weights" (if dataset_file ends in ".data")
"_weights.csv" (if dataset_file ends in "_data.csv")
include_intercept if True then an intercept is added to the X matrix
Returns
-------
dictionary containing training data for a binary classification problem with fields
- 'X' N x P matrix of features (numpy.ndarray) with a column of 1s for the '(Intercept)'
- 'Y' N x 1 vector of labels (+1/-1) (numpy.ndarray)
- 'variable_names' list of strings containing the names of each feature (list)
- 'outcome_name' string containing the name of the output (optional)
- 'sample_weights' N x 1 vector of sample weights, must all be positive
"""
dataset_file, helper_file, weights_file = _validate_raw_data_files(dataset_file, helper_file, weights_file)
has_helper_file = helper_file is not None and os.path.isfile(helper_file)
has_weights_file = weights_file is not None and os.path.isfile(weights_file)
# load data
df = pd.read_csv(dataset_file, sep = ',')
raw_data = df.values
data_headers = list(df.columns.values)
N = raw_data.shape[0]
# load variable types and orderings
if has_helper_file:
helper_df = pd.read_csv(helper_file, sep=',')
# get variable types
outcome_column_idx = int(np.flatnonzero(helper_df['is_outcome'].values))
variable_dict = helper_df[~helper_df['is_outcome']].set_index('header').to_dict()
variable_types = variable_dict['type']
# get ordering for variables
ordering_dict = helper_df[helper_df['type'] == 'ordinal'].set_index('header').to_dict()
variable_orderings = ordering_dict['ordering']
for var_name in ordering_dict['ordering'].keys():
variable_orderings[var_name] = variable_orderings[var_name].split('|')
# get variable partitions
if 'is_partition' in helper_df.columns:
partitions = (helper_df['header'][helper_df['is_partition']]).values.tolist()
else:
raise NotImplementedError()
outcome_column_idx = RAW_DATA_OUTCOME_COL_IDX
partitions = []
variable_orderings = {}
variable_types = _infer_variable_types_from_data(raw_data)
# load weights from disk
if has_weights_file:
sample_weights = pd.read_csv(weights_file, sep=',', header=None)
sample_weights = sample_weights.values
if len(sample_weights) == N + 1:
sample_weights = sample_weights[1:]
sample_weights = np.array(sample_weights, dtype = np.float).flatten()
else:
sample_weights = np.ones(N, dtype = np.float)
# setup X and X_names
X_col_idx = [j for j in range(raw_data.shape[1]) if j != outcome_column_idx]
variable_names = [data_headers[j] for j in X_col_idx]
X = raw_data[:, X_col_idx]
# setup Y vector and Y_name
Y = raw_data[:, outcome_column_idx]
Y_name = data_headers[outcome_column_idx]
Y[Y == 0] = -1
Y = np.array(Y, dtype = int).flatten()
#todo add this in as a required field
meta = {
'read_date': time.strftime("%d/%m/%y", time.localtime()),
'dataset_file': dataset_file,
'helper_file': helper_file,
'weights_file': weights_file,
}
data = {
'X': X,
'Y': Y,
'partitions': partitions,
'sample_weights': sample_weights,
'variable_names': variable_names,
'variable_types': variable_types,
'variable_orderings': variable_orderings,
'outcome_name': Y_name,
}
# insert a column of ones to X for the intercept
if include_intercept:
data = add_intercept(data)
# assert check_data(data)
return data
def _validate_raw_data_files(dataset_file, helper_file = None, weights_file = None):
if not os.path.isfile(dataset_file):
raise IOError('could not find dataset_file: %s' % dataset_file)
dataset_dir = os.path.dirname(dataset_file) + '/'
file_header, _, file_extension = (os.path.basename(dataset_file)).rpartition('.')
file_extension = file_extension.lower()
assert file_extension in RAW_DATA_FILE_VALID_EXT
#check for helper file
if helper_file is not None:
if not os.path.isfile(helper_file):
raise IOError('could not find helper_file: %s' % helper_file)
else:
for check_ext in RAW_HELPER_FILE_VALID_EXT:
if check_ext == 'csv':
check_file = dataset_dir + file_header.rpartition('_data')[0] + '_helper.csv'
else:
check_file = dataset_dir + file_header + '.' + check_ext
if os.path.isfile(check_file):
helper_file = check_file
break
# check for weights file
if weights_file is not None:
if not os.path.isfile(weights_file):
raise IOError('could not find weights_file: %s' % weights_file)
assert os.path.isfile(weights_file), ("%s does not exist" % weights_file)
else:
for check_ext in RAW_WEIGHTS_FILE_VALID_EXT:
if check_ext == 'csv':
check_file = dataset_dir + file_header.rpartition('_data')[0] + '_weights.csv'
else:
check_file = dataset_dir + file_header + '.' + check_ext
if os.path.isfile(check_file):
weights_file = check_file
break
return dataset_file, helper_file, weights_file
def _infer_variable_types_from_data(raw_data):
"""
infers variable types
last column is outcome
first column if the outcome variable
can be (0,1) or (-1,1)
other columns are the input variables
numeric by default
boolean if values are 0,1 or true/false
categorical if entries are text
"""
raise NotImplementedError()
#### saving data to disk
def save_data(file_name, data, cvindices = None, overwrite = False, stratified = True, check_save = True):
if overwrite is False:
if os.path.isfile(file_name):
raise IOError('file %s already exist on disk' % file_name)
try:
file_extension = file_name.rsplit('.')[-1]
except IndexError:
raise ValueError('could not find extension in file_name (%r)', file_name)
file_type = file_extension.lower()
assert file_type in PROCESSED_DATA_FILE_VALID_EXT, \
'unsupported extension %s\nsupported extensions: %s' % (file_type, ", ".join(PROCESSED_DATA_FILE_VALID_EXT))
data = set_defaults_for_data(data)
assert check_data(data)
if cvindices is not None:
cvindices = validate_cvindices(cvindices, stratified)
if file_type in PROCESSED_DATA_FILE_RDATA:
saved_file_flag = _save_data_as_rdata(file_name, data, cvindices)
elif file_type in PROCESSED_DATA_FILE_PICKLE:
saved_file_flag = _save_data_as_pickle(file_name, data, cvindices)
assert os.path.isfile(file_name), 'could not locate saved file on disk'
if check_save:
loaded_data, loaded_cvindices = load_processed_data(file_name)
assert np.all(loaded_data['X'] == data['X'])
assert loaded_cvindices.keys() == cvindices.keys()
return saved_file_flag
def _save_data_as_pickle(file_name, data, cvindices):
try:
import cPickle as pickle
except:
import pickle
data = set_defaults_for_data(data)
file_contents = {
'data': data,
'cvindices': cvindices
}
with open(file_name, 'wb') as outfile:
pickle.dump(file_contents, outfile, protocol = pickle.HIGHEST_PROTOCOL)
return True
def _save_data_as_rdata(file_name, data, cvindices):
import rpy2.robjects as rn
from .rpy2_helper import r_assign, r_save_to_disk
from rpy2.robjects import pandas2ri
data = set_defaults_for_data(data)
assert check_data(data)
fields_to_save = ["format", "Y", "sample_weights", "outcome_name", "variable_names", "variable_types", "variable_orderings"]
if data['format'] == FORMAT_NAME_RULES:
fields_to_save += ["feature_groups", "feature_names", "feature_types", "feature_orderings",
"feature_group_limits"]
elif data['format'] == FORMAT_NAME_DCP:
fields_to_save += ['partitions']
try:
for k in fields_to_save:
r_assign(data[k], k)
except:
from dcptree.debug import ipsh
ipsh()
r_assign(cvindices, "cvindices")
# feature matrix
var_type_to_col_type = {'boolean': 'bool',
'categorical': 'str',
'numeric': 'float',
'ordinal': 'str',
}
col_types = {n: var_type_to_col_type[data['variable_types'][n]] for n in data['variable_names']}
pandas2ri.activate()
X_df = pd.DataFrame(data = data['X'])
X_df.columns = data['variable_names']
X_df = X_df.astype(col_types)
rn.r.assign('X', X_df)
# test set
has_test_set = ('X_test' in data) and ('Y_test' in data) and ('sample_weights_test' in data)
if has_test_set:
X_test_df = pd.DataFrame(data = data['X_test'])
X_test_df.columns = data['variable_names']
X_test_df = X_test_df.astype(col_types)
rn.r.assign('X_test', pandas2ri.py2ri(X_test_df))
r_assign(data['Y_test'], 'Y_test')
r_assign(data['sample_weights_test'], 'sample_weights_test')
else:
rn.reval(
"""
X_test = matrix(data=NA, nrow = 0, ncol = ncol(X));
Y_test = matrix(data=NA, nrow = 0, ncol = 1);
sample_weights_test = matrix(data=1.0, nrow = 0, ncol = 1);
"""
)
pandas2ri.deactivate()
variables_to_save = fields_to_save + ["cvindices", "X", "X_test", "Y_test", "sample_weights_test"]
r_save_to_disk(file_name, variables_to_save)
return True
#### loading data from disk
def load_processed_data(file_name):
assert os.path.isfile(file_name), \
'file %s not found' % file_name
try:
file_extension = file_name.rsplit('.')[-1]
file_type = file_extension.lower()
except IndexError:
raise ValueError('could not find extension in file_name (%r)', file_name)
assert file_type in PROCESSED_DATA_FILE_VALID_EXT, \
'unsupported file type; supported file types: %s' % ", ".join(PROCESSED_DATA_FILE_VALID_EXT)
if file_type == 'rdata':
data, cvindices = _load_processed_data_rdata(file_name)
else:
data, cvindices = _load_processed_data_pickle(file_name)
assert check_data(data)
data = set_defaults_for_data(data)
cvindices = validate_cvindices(cvindices)
return data, cvindices
def _load_processed_data_pickle(file_name):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(file_name, 'rb') as infile:
file_contents = pickle.load(infile)
assert 'data' in file_contents
assert 'cvindices' in file_contents
return file_contents['data'], file_contents['cvindices']
def _load_processed_data_rdata(file_name):
import rpy2.robjects as rn
rn.reval("data = new.env(); load('%s', data)" % file_name)
r_data = rn.r.data
data_fields = list(rn.r.data.keys())
loaded_data = dict()
for xf, yf, sw in [('X', 'Y', 'sample_weights'),
('X_test', 'Y_test', 'sample_weights_test'),
('X_validation', 'Y_validation', 'sample_weights_validation')]:
if xf in data_fields and yf in data_fields and len(np.array(r_data[yf])) > 0:
loaded_data[yf] = np.array(r_data[yf]).flatten()
loaded_data[yf][loaded_data[yf] == 0] = -1
loaded_data[xf] = np.array(r_data[xf])
if loaded_data[xf].shape[1] == len(loaded_data[yf]):
loaded_data[xf] = np.transpose(loaded_data[xf])
if sw in data_fields:
loaded_data[sw] = np.array(r_data[sw]).flatten()
if 'variable_names' in data_fields:
loaded_data['variable_names'] = np.array(rn.r.data['variable_names']).tolist()
elif 'X_headers' in data_fields:
loaded_data['variable_names'] = np.array(rn.r.data['X_headers']).tolist()
elif 'X_header' in data_fields:
loaded_data['variable_names'] = np.array(rn.r.data['X_header']).tolist()
if 'outcome_name' in data_fields:
loaded_data['outcome_name'] = np.array(r_data['outcome_name'])[0]
elif 'Y_headers' in data_fields:
loaded_data['outcome_name'] = np.array(r_data['Y_headers'])[0]
elif 'Y_header' in data_fields:
loaded_data['outcome_name'] = np.array(r_data['Y_header'])[0]
if 'format' in data_fields:
loaded_data['format'] = np.array(r_data['format'])[0]
if 'partitions' in data_fields:
loaded_data['partitions'] = np.array(rn.r.data['partitions']).tolist()
cvindices = _load_cvindices_from_rdata(file_name)
data = set_defaults_for_data(loaded_data)
return data, cvindices
#### loading cvindices from processed file on disk ####
def load_cvindices_from_disk(fold_file):
"""
Reads cross-validation indices from various file types including:
- CSV file containing with exactly N data points
- RData file containing cvindices object
- mat file containing cvindices object
:param fold_file:
:return: dictionary containing folds
keys have the form
"""
# load fold indices from disk
if not os.path.isfile(fold_file):
raise IOError('could not find fold file on disk: %s' % fold_file)
if fold_file.lower().ends_with('.csv'):
folds = pd.read_csv(fold_file, sep=',', header=None)
folds = validate_folds(folds=folds)
fold_id = to_fold_id(total_folds = max(folds), replicate_idx = 1)
cvindices = {fold_id: folds}
if fold_file.lower().endswith('.rdata'):
cvindices = _load_cvindices_from_rdata(data_file=fold_file)
if fold_file.lower().endswith('.mat'):
raise NotImplementedError()
# import scipy.io as sio
# cvindices = sio.loadmat(file_name = fold_file,
# matlab_compatible=False,
# chars_as_strings=True,
# squeeze_me=True,
# variable_names=['cvindices'],
# verify_compressed_data_integrity=False)
cvindices = validate_cvindices(cvindices)
return cvindices
def _load_folds_from_rdata(data_file, fold_id):
"""
(internal) returns folds from RData file in the pipeline
:param data_file:
:param fold_id:
:param inner_fold_id:
:return:
"""
if os.path.isfile(data_file):
file_extension = data_file.rsplit('.')[-1]
assert file_extension.lower() == 'rdata', 'unsupported file extension: %r' % file_extension
else:
raise IOError('could not find data_file: %s' % data_file)
fold_id = validate_fold_id(fold_id)
r_variables = "data_file='%s'; fold_id='%s'" % (data_file, fold_id)
import rpy2.robjects as rn
from .rpy2_helper import r_clear
if is_inner_fold_id(fold_id):
r_cmd = """raw_data = new.env()
load(data_file, envir=raw_data)
folds = raw_data$cvindices[[fold_id]][,1]
"""
else:
r_cmd = """raw_data = new.env()
load(data_file, envir=raw_data)
folds = raw_data$cvindices[[substring(fold_id, 1, 3)]][, as.double(substr(fold_id, 5, 6))]
"""
rn.reval(r_variables)
rn.reval(r_cmd)
folds = np.array(rn.r['folds'])
folds = validate_folds(folds, fold_id)
r_clear()
return folds
def _load_cvindices_from_rdata(data_file):
"""
(internal) cvindices object stored in a RData file in the pipeline
:param data_file:
"""
if not os.path.isfile(data_file):
raise IOError('could not find data_file: %s' % data_file)
import rpy2.robjects as rn
from .rpy2_helper import r_clear
r_variables = "data_file = '%s'" % data_file
r_cmd = """
raw_data = new.env()
load(data_file, envir=raw_data)
all_fold_ids = names(raw_data$cvindices)
list2env(raw_data$cvindices, globalenv())
remove(raw_data, cvindices);
"""
rn.reval(r_variables)
rn.reval(r_cmd)
all_fold_ids = np.array(rn.r['all_fold_ids'])
cvindices = {}
max_fold_value = 0
for key in all_fold_ids:
try:
folds = np.array(rn.r[key])
if (folds.shape[0] == 1) or (folds.shape[1] == 1):
folds = folds.flatten()
max_fold_value = max(max_fold_value, np.max(folds))
cvindices[key] = folds
except Exception:
pass
#cast as unsigned integers to save storage space
if max_fold_value < 255:
storage_type = 'uint8'
elif max_fold_value < 65535:
storage_type = 'uint16'
elif max_fold_value < 4294967295:
storage_type = 'uint32'
else:
storage_type = 'uint64'
for key in cvindices.keys():
cvindices[key] = cvindices[key].astype(storage_type)
#break down matrices to just folds
all_keys = list(cvindices.keys())
for key in all_keys:
if key[0] == 'K' and len(key) == 3:
fold_matrix = cvindices.pop(key)
n_repeats = fold_matrix.shape[1]
for r in range(n_repeats):
folds = np.array(fold_matrix[:,r])
folds = folds.flatten()
folds = folds.astype(storage_type)
fold_id = '%sN%02d' % (key, r)
cvindices[fold_id] = folds
# cleanup in the R environment just in case
r_clear()
return cvindices
|
'''
Created on Aug 9, 2016
@author: David Zwicker <[email protected]>
'''
from __future__ import division, absolute_import
import itertools
import random
import numpy as np
from six.moves import range
try:
from scipy.special import comb
except ImportError:
from scipy.misc import comb
def log_uniform(v_min, v_max, size):
""" returns random variables that a distributed uniformly in log space """
log_min, log_max = np.log(v_min), np.log(v_max)
res = np.random.uniform(log_min, log_max, size)
return np.exp(res)
def _take_random_combinations_gen(data, r, num, repeat=False):
""" a generator yielding `num` random combinations of length `r` of the
items in `data`. If `repeat` is False, none of the combinations is yielded
twice. Note that the generator will be caught in a infinite loop if there
are less then `num` possible combinations. """
count, seen = 0, set()
while True:
# choose a combination
s = tuple(sorted(random.sample(data, r)))
# check whether it has been seen already
if s in seen:
continue
# return the combination
yield s
# keep track of what combinations we have already seen
if not repeat:
seen.add(s)
# check how many we have produced
count += 1
if count >= num:
break
def take_combinations(iterable, r, num='all'):
""" returns a generator yielding at most `num` random combinations of
length `r` of the items in `iterable`. """
if num == 'all':
# yield all combinations
return itertools.combinations(iterable, r)
else:
# check how many combinations there are
data = list(iterable)
num_combs = comb(len(data), r, exact=True)
if num_combs <= num:
# yield all combinations
return itertools.combinations(data, r)
elif num_combs <= 10 * num:
# yield a chosen sample of the combinations
choosen = set(random.sample(range(num_combs), num))
gen = itertools.combinations(data, r)
return (v for k, v in enumerate(gen) if k in choosen)
else:
# yield combinations at random
return _take_random_combinations_gen(data, r, num)
def take_product(data, r, num='all'):
""" returns a generator yielding at most `num` random instances from the
product set of `r` times the `data` """
if num == 'all':
# yield all combinations
return itertools.product(data, repeat=r)
else:
# check how many combinations there are
num_items = len(data)**r
if num_items <= num:
# yield all combinations
return itertools.product(data, repeat=r)
else:
# yield a chosen sample of the combinations
choosen = set(random.sample(range(num_items), num))
gen = itertools.product(data, repeat=r)
return (v for k, v in enumerate(gen) if k in choosen)
|
# The MIT License (MIT)
# Copyright (c) 2013, Groupon, Inc.
# All rights reserved.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Neither the name of GROUPON nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
import json
import csv
import os
import itertools as it
from datetime import datetime, timedelta
import time
from string import Template
import luigi
from luigi.contrib.simulate import RunAnywayTarget
from django.utils.encoding import smart_str
import pandas as pd
from . import sources
# helper function to grab the latest record timestamp from the tickets or ticket_events table
def get_last_import_time(table='zendesk_tickets'):
try:
timestamp_col = 'updated_at' if (table == 'zendesk_tickets') else 'created_at'
redshift = sources.Redshift()
redshift.cursor.execute('SELECT MAX({0}) FROM zendesk.{1};'.format(timestamp_col,table))
updated_at = redshift.cursor.fetchall()[0][0]
return convert_to_epoch(updated_at)
except:
updated_at = (datetime.now() - timedelta(days=7))
return convert_to_epoch(updated_at)
def convert_to_epoch(d):
return int((d - datetime(1970,1,1)).total_seconds())
class RemoveLastImport(luigi.ExternalTask):
host_resources = {'s3': 1}
resources = {'s3': 1}
table = luigi.Parameter()
s3_bucket = luigi.Parameter(default=luigi.configuration.get_config().get('redshift', 'bucket'))
def run(self):
#remove all files on local EC2 box, S3 before doing the import
if os.path.isfile('data/{0}.csv'.format(self.table)):
os.remove('data/{0}.csv'.format(self.table))
client = luigi.s3.S3Client()
if client.exists(self.s3_bucket + '/{0}.csv'.format(self.table)):
client.remove(self.s3_bucket + '/{0}.csv'.format(self.table))
self.done = ((not client.exists(self.s3_bucket + '/{0}.csv'.format(self.table))) & (not os.path.isfile('data/{0}.csv'.format(self.table))))
self.output().done()
def output(self):
return RunAnywayTarget(self)
class ExtractZendeskTicketsToCSV(luigi.ExternalTask):
host_resources = {'api': 1}
table = 'zendesktickets5'
start_time = get_last_import_time(table)
client = sources.Zendesk()
def requires(self):
return [RemoveLastImport(self.table)]
def columns(self):
return [ 'agent_wait_time_in_minutes'
,'agent_wait_time_in_minutes_within_business_hours'
,'assigned_at'
,'assigned_stations'
,'assignee_external_id'
,'assignee_id'
,'assignee_name'
,'assignee_stations'
,'brand_name'
,'created_at'
,'current_tags'
,'domain'
,'due_date'
,'first_reply_time_in_minutes'
,'first_reply_time_in_minutes_within_business_hours'
,'first_resolution_time_in_minutes'
,'first_resolution_time_in_minutes_within_business_hours'
,'full_resolution_time_in_minutes'
,'full_resolution_time_in_minutes_within_business_hours'
,'generated_timestamp'
,'group_id'
,'group_name'
,'group_stations'
,'id'
,'initially_assigned_at'
,'on_hold_time_in_minutes'
,'on_hold_time_in_minutes_within_business_hours'
,'organization_name'
,'priority'
,'reopens'
,'replies'
,'req_email'
,'req_external_id'
,'req_id'
,'req_name'
,'requester_wait_time_in_minutes'
,'requester_wait_time_in_minutes_within_business_hours'
,'resolution_time'
,'satisfaction_score'
,'solved_at'
,'status'
,'subject'
,'submitter_name'
,'ticket_type'
,'updated_at'
,'url'
,'via']
def transformed_keys(self):
return ['agent_wait_time_in_minutes',
'agent_wait_time_in_minutes_within_business_hours',
'assigned_at',
'assigned_stations',
'first_reply_time_in_minutes',
'first_reply_time_in_minutes_within_business_hours',
'first_resolution_time_in_minutes',
'first_resolution_time_in_minutes_within_business_hours',
'full_resolution_time_in_minutes',
'full_resolution_time_in_minutes_within_business_hours',
'group_stations',
'on_hold_time_in_minutes',
'on_hold_time_in_minutes_within_business_hours',
'reopens',
'replies',
'req_id',
'requester_wait_time_in_minutes',
'resolution_time',]
def dictionary_string_to_integer(self, dictionary, key):
try: dictionary[key] = int(dictionary[key])
except: dictionary[key] = None
return dictionary
def run(self):
try:
response = self.client.incremental_tickets(self.start_time)
if response.status_code == 429:
self.client.status_handler(response)
response = self.client.incremental_tickets(self.start_time)
else:
self.client.status_handler(response)
clientresult = response.json()
tickets = clientresult['results']
header = clientresult['field_headers']
header['assigned_stations']='assigned_stations'
transformed_tickets = []
while bool(clientresult['end_time']): # != '':
for ticket in tickets:
for k in self.transformed_keys():
ticket_temp = self.dictionary_string_to_integer(ticket, k)
for key in sorted(ticket_temp):
if isinstance(ticket_temp[key],str):
ticket_temp[key] = ticket_temp[key].replace('"','').replace("'",'')
transformed_tickets = transformed_tickets + [ticket_temp]
response = self.client.incremental_tickets(clientresult['end_time'])
if response.status_code == 429:
try: self.client.status_handler(response)
except: print('Zendesk ticket extraction error: rate limited & status_handler failed'.format(e))
response = self.client.incremental_tickets(clientresult['end_time'])
elif response.status_code == 422:
break
clientresult = response.json()
tickets = clientresult['results']
df = pd.DataFrame(transformed_tickets)
df[self.columns()].to_csv('data/{0}.csv'.format(self.table),sep=',',na_rep=None,header=False,index=False)
time.sleep(120)
self.output().done()
except Exception as e:
print('Zendesk ticket extraction error: {0}'.format(e))
raise
def output(self):
return RunAnywayTarget(self)
class ExtractZendeskUsersToCSV(luigi.ExternalTask):
host_resources = {'api': 1}
table = 'zendesk_users'
client = sources.Zendesk().user_client
def requires(self):
return [RemoveLastImport(self.table)]
def columns(self):
return ['active',
'alias',
'chat_only',
'created_at',
'custom_role_id',
'details',
'email',
'external_id',
'id',
'last_login_at',
'locale',
'locale_id',
'moderator',
'name',
'notes',
'only_private_comments',
'organization_id',
'phone',
'photo',
'restricted_agent',
'role',
'shared',
'shared_agent',
'signature',
'suspended',
'tags',
'ticket_restriction',
'time_zone',
'two_factor_auth_enabled',
'updated_at',
'url',
'user_fields',
'verified']
def run(self):
try:
users = self.client.users_list(get_all_pages=True)['users']
df = pd.DataFrame(users)[self.columns()].to_csv('data/{0}.csv'.format(self.table),sep=',',na_rep=None,header=False,index=False)
self.output().done()
except Exception as e:
print('Zendesk ticket extraction error: {0}'.format(e))
raise
def output(self):
return RunAnywayTarget(self)
class ExtractZendeskOrganizationsToCSV(luigi.ExternalTask):
host_resources = {'api': 1}
table = 'zendesk_organizations'
start_time = 0
now = int(convert_to_epoch(datetime.now()))
client = sources.Zendesk().user_client
def requires(self):
return RemoveLastImport('zendesk_organizations')
def run(self):
try:
result = self.client.incremental_organizations_list(start_time=self.start_time) #can't do get_all_pages=True because of API limit
except:
time.sleep(60)
result = self.client.incremental_organizations_list(start_time=self.start_time) #can't do get_all_pages=True because of API limit
orgs = result['organizations']
all_headers = [list(i.keys()) for i in orgs]
header = set([item for sublist in all_headers for item in sublist])
while bool(result['end_time'] < self.now):
try:
result = self.client.incremental_organizations_list(start_time = int(result['end_time']))
orgs += result['organizations']
except:
print('API LIMIT REACHED...')
time.sleep(60)
if result['count'] < 1000:
break
orgs = pd.DataFrame(orgs).fillna('')
for index in orgs.index:
#clean details
orgs.loc[index,'details'] = orgs.at[index,'details'].replace(',','').replace('//','')
#clean timestamps
orgs.loc[index,'created_at'] = orgs.at[index,'created_at'].replace('T',' ').replace('Z','')
orgs.loc[index,'deleted_at'] = orgs.at[index,'deleted_at'].replace('T',' ').replace('Z','')
orgs.loc[index,'updated_at'] = orgs.at[index,'updated_at'].replace('T',' ').replace('Z','')
orgs.to_csv('data/{0}.csv'.format(self.table), index=False, header=False)
self.output().done()
def output(self):
return RunAnywayTarget(self)
class ExtractZendeskTicketEventsToCSV(luigi.ExternalTask):
host_resources = {'api': 1}
table = 'zendesk_ticket_events'
start_time = get_last_import_time(table = 'zendesk_ticket_events')
now = int(convert_to_epoch(datetime.now()))
client = sources.Zendesk().user_client
def requires(self):
return RemoveLastImport('zendesk_ticket_events')
def columns(self):
return ['child_events'
,'created_at'
,'id'
,'merged_ticket_ids'
,'system'
,'ticket_id'
,'timestamp'
,'updater_id'
,'via'
,'tags'
,'removed_tags'
,'added_tags'
,'priority'
,'status'
,'comment_public']
def run(self):
print('{0}\nStart_time = {1}'.format(datetime.now(), self.start_time))
clientresult = self.client.incremental_ticket_events_list(start_time = self.start_time) #can't do get_all_pages=True because of API limit
ticket_events = clientresult['ticket_events']
all_headers = [list(i.keys()) for i in clientresult['ticket_events']]
header = set([item for sublist in all_headers for item in sublist])
while bool(clientresult['end_time'] < self.now - 2 * 60 * 60): #get results ending less than 2 hours before now
try:
clientresult = self.client.incremental_ticket_events_list(start_time = clientresult['end_time'])
ticket_events += clientresult['ticket_events']
except:
print('API LIMIT REACHED...')
time.sleep(60)
ticket_events = pd.DataFrame(ticket_events)
for index in ticket_events.index:
try:
ticket_events.loc[index,'tags'] = str([i for i in ticket_events.at[index,'child_events'] if 'tags' in i.keys()][0]['tags'])
except:
ticket_events.loc[index,'tags'] = None
try:
ticket_events.loc[index,'removed_tags'] = str([i for i in ticket_events.at[index,'child_events'] if 'tags' in i.keys()][0]['removed_tags'])
except:
ticket_events.loc[index,'removed_tags'] = None
try:
ticket_events.loc[index,'added_tags'] = str([i for i in ticket_events.at[index,'child_events'] if 'tags' in i.keys()][0]['added_tags'])
except:
ticket_events.loc[index,'added_tags'] = None
try:
ticket_events.loc[index,'priority'] = [i for i in ticket_events.at[index,'child_events'] if 'priority' in i.keys()][0]['priority']
except:
ticket_events.loc[index,'priority'] = None
try:
ticket_events.loc[index,'status'] = [i for i in ticket_events.at[index,'child_events'] if 'status' in i.keys()][0]['status']
except:
ticket_events.loc[index,'status'] = None
try:
children = [i for i in ticket_events.at[index,'child_events'] if 'comment_public' in i.keys()]
ticket_events.loc[index,'comment_public'] = ''.join([str(child['comment_public']) for child in children])
except:
ticket_events.loc[index,'comment_public'] = None
ticket_events.loc[index,'created_at'] = ticket_events.at[index,'created_at'].replace('T',' ').replace('Z','')#datetime.strftime(ticket_events.at[index,'created_at'], '%Y-%m-%d %H:%M:%H')
ticket_events = ticket_events[self.columns()]
ticket_events.to_csv('data/{0}.csv'.format(self.table), index=False, header=False)
self.output().done()
def output(self):
return RunAnywayTarget(self)
class UploadLocalCSVToS3(luigi.ExternalTask):
host_resources = {'s3': 1}
resources = {'s3': 1}
table = luigi.Parameter()
s3_bucket = luigi.Parameter(default=luigi.configuration.get_config().get('redshift', 'bucket'))
def requires(self):
if (self.table == 'zendesk_tickets'):
return [ExtractZendeskTicketsToCSV()]
elif (self.table == 'zendesk_users'):
return [ExtractZendeskUsersToCSV()]
elif (self.table == 'zendesk_ticket_events'):
return[ExtractZendeskTicketEventsToCSV()]
elif (self.table == 'zendesk_organizations'):
return [ExtractZendeskOrganizationsToCSV()]
def input(self):
return luigi.LocalTarget('data/{0}.csv'.format(self.table))
def output(self):
return luigi.s3.S3Target(
self.s3_bucket + '/{0}.csv'.format(self.table))
def run(self):
client = luigi.s3.S3Client()
client.put('data/{0}.csv'.format(self.table),
self.s3_bucket + '/{0}.csv'.format(self.table) )
class CopyTableFromS3ToRedshift(luigi.ExternalTask):
host_resources = {'redshift': 1}
resources = {'redshift': 1}
table = luigi.Parameter()
s3_bucket = luigi.Parameter(default=luigi.configuration.get_config().get('redshift', 'bucket'))
def requires(self):
return UploadLocalCSVToS3(self.table)
def run(self):
redshift = sources.Redshift()
import_schema_name = luigi.configuration.get_config().get('redshift', 'zendesk_schema_name')
s3_path = "'" + self.s3_bucket + "/{0}.csv".format(self.table) + "'"
aws_credentials = "'aws_access_key_id=" + redshift.aws_access_key_id + ";" + "aws_secret_access_key=" + redshift.aws_secret_access_key + "'"
options = """CSV DELIMITER ',' ACCEPTINVCHARS TRUNCATECOLUMNS
TRIMBLANKS BLANKSASNULL EMPTYASNULL
DATEFORMAT 'auto' ACCEPTANYDATE
REGION '{0}' COMPUPDATE ON MAXERROR 1;""".format(redshift.s3_region)
sql = Template("""
BEGIN;
COPY ${import_schema_name}.${current_table} FROM ${s3_path} CREDENTIALS ${aws_credentials} ${options}
CREATE TEMP TABLE ${current_table}_dupe_row_list AS
SELECT t.$primary_key FROM zendesk.$current_table t WHERE t.$primary_key IS NOT NULL GROUP BY t.$primary_key HAVING COUNT(t.$primary_key)>1;
CREATE TEMP TABLE ${current_table}_duped_rows AS
SELECT $current_table.*,
ROW_NUMBER() OVER (PARTITION BY $current_table.$primary_key ORDER BY $updated_at_col DESC) AS rn
FROM zendesk.$current_table
JOIN ${current_table}_dupe_row_list on $current_table.$primary_key = ${current_table}_dupe_row_list.$primary_key;
CREATE TEMP TABLE ${current_table}_rows_to_keep AS
SELECT * from ${current_table}_duped_rows where rn = 1;
ALTER TABLE ${current_table}_rows_to_keep
DROP COLUMN rn;
DELETE FROM zendesk.$current_table USING ${current_table}_dupe_row_list l WHERE l.$primary_key=$current_table.$primary_key;
INSERT INTO zendesk.$current_table SELECT * FROM ${current_table}_rows_to_keep;
DROP TABLE ${current_table}_dupe_row_list;
DROP TABLE ${current_table}_duped_rows;
DROP TABLE ${current_table}_rows_to_keep;
COMMIT;""")
primary_key = "ticket_id" if (self.table == "zendesktickets4") else "id"
updated_at_col = "timestamp_at" if (self.table == 'zendesk_ticket_events_luigi') else "updated_at"
sql = sql.substitute(current_table=self.table,
primary_key=primary_key,
updated_at_col=updated_at_col,
import_schema_name=import_schema_name,
s3_path=s3_path,
aws_credentials=aws_credentials,
options=options)
redshift.cursor.execute(sql)
self.output().done()
def output(self):
return RunAnywayTarget(self)
class ImportZendeskTablesToRedshift(luigi.WrapperTask):
def relevant_tables(self):
tables = """zendesk_tickets
zendesk_users
zendesk_organizations
zendesk_ticket_events"""
return [table.strip() for table in tables.splitlines()]
def requires(self):
for table in self.relevant_tables():
yield CopyTableFromS3ToRedshift(table)
class Run(luigi.WrapperTask):
def requires(self):
yield ImportZendeskTablesToRedshift()
if __name__ == '__main__':
luigi.run()
########################################################
# SCHEMAS
########################################################
# CREATE TABLE zendesk.zendesk_users (
# active varchar,
# alias varchar,
# chat_only varchar,
# created_at VARCHAR,
# custom_role_id varchar,
# details varchar,
# email varchar,
# external_id varchar,
# id varchar,
# last_login_at VARCHAR,
# locale varchar,
# locale_id varchar,
# moderator varchar,
# name varchar,
# notes varchar,
# only_private_comments varchar,
# organization_id VARCHAR,
# phone varchar,
# photo varchar,
# restricted_agent varchar,
# role varchar,
# shared varchar,
# shared_agent varchar,
# signature varchar,
# suspended varchar,
# tags varchar,
# ticket_restriction varchar,
# time_zone varchar,
# two_factor_auth_enabled varchar,
# updated_at VARCHAR,
# url varchar,
# user_fields varchar,
# verified varchar)
# CREATE TABLE zendesk.zendesk_tickets (
# agent_wait_time_in_minutes varchar
# ,agent_wait_time_in_minutes_within_business_hours varchar
# ,assigned_at varchar
# ,assigned_stations varchar
# ,assignee_external_id varchar
# ,assignee_id varchar
# ,assignee_name varchar
# ,assignee_stations varchar
# ,brand_name varchar
# ,created_at varchar
# ,current_tags varchar
# ,domain varchar
# ,due_date varchar
# ,first_reply_time_in_minutes varchar
# ,first_reply_time_in_minutes_within_business_hours varchar
# ,first_resolution_time_in_minutes varchar
# ,first_resolution_time_in_minutes_within_business_hours varchar
# ,full_resolution_time_in_minutes varchar
# ,full_resolution_time_in_minutes_within_business_hours varchar
# ,generated_timestamp varchar
# ,group_id varchar
# ,group_name varchar
# ,group_stations varchar
# ,id varchar
# ,initially_assigned_at varchar
# ,on_hold_time_in_minutes varchar
# ,on_hold_time_in_minutes_within_business_hours varchar
# ,organization_name varchar
# ,priority varchar
# ,reopens varchar
# ,replies varchar
# ,req_email varchar
# ,req_external_id varchar
# ,req_id varchar
# ,req_name varchar
# ,requester_wait_time_in_minutes varchar
# ,requester_wait_time_in_minutes_within_business_hours varchar
# ,resolution_time varchar
# ,satisfaction_score varchar
# ,solved_at varchar
# ,status varchar
# ,subject varchar
# ,submitter_name varchar
# ,ticket_type varchar
# ,updated_at varchar
# ,url varchar
# ,via varchar
# )
# CREATE TABLE zendesk.zendesk_organizations(
# created_at TIMESTAMP,
# deleted_at TIMESTAMP,
# details VARCHAR(max),
# domain_names VARCHAR,
# external_id VARCHAR,
# group_id VARCHAR,
# id BIGINT distkey sortkey,
# name VARCHAR,
# notes VARCHAR(max),
# organization_fields VARCHAR(max),
# shared_comments BOOLEAN,
# shared_tickets BOOLEAN,
# tags VARCHAR(max),
# updated_at TIMESTAMP,
# url VARCHAR,
# primary key(id)) |
from collections import Counter
def anagram1(s1, s2):
if len(s1) == len(s2):
return sorted(s1) == sorted(s2)
return False
def anagram2(s1, s2):
if len(s1) == len(s2):
return Counter(s1) == Counter(s2)
return False
def test_strings(size=500):
import string
from random import choice, shuffle
letters = getattr(string, 'letters', getattr(string, 'ascii_letters', ''))
all = [choice(letters) for c in range(size)]
s1 = ''.join(all)
shuffle(all)
s2 = ''.join(all)
return s1, s2
if __name__ == '__main__':
import sys
import timeit
num = 100
if len(sys.argv) > 1:
num = int(sys.argv[1])
print(timeit.timeit(
'anagram1(s1, s2)',
'from __main__ import test_strings, anagram1;'
's1, s2 = test_strings(%s)' % num, number=1000)
)
print(timeit.timeit(
'anagram2(s1, s2)',
'from __main__ import test_strings, anagram2;'
's1, s2 = test_strings(%s)' % num, number=1000)
)
|
#===============================
# List of Rooms
#===============================
dimRoom = {'name':'Dim Room', 'description':'You enter a dimmily lit room and start to look around.'}
darkRoom = {'name':'Dark Room', 'description':'You enter a pitch black room, you can not see anything. You reach into your pocket and light a match.'}
watchRoom = {'name':'Watch Room', 'description':'You feel there is something watching you and enter the next room with caution.'}
eerieRoom = {'name':'Eerie Room', 'description':'An eerie presence seems to surround you as you move onward.'}
roomList = [dimRoom, darkRoom, watchRoom, eerieRoom]
|
'''
Potion Name Generator
'''
from data import WordPickFile, SentenceGenerator
Ailments = WordPickFile("data/ailment.txt")
Effect = WordPickFile("data/effect.txt")
Wizard = WordPickFile("data/wizard.txt")
# Potion of Effect
StandardPotion = SentenceGenerator(["Potion of", Effect])
# Wizard Name's Potion
WizardPotion = SentenceGenerator([Wizard, "Potion"])
# Wizard Name's Potion of Effect
WizardPotionEffect = SentenceGenerator([Wizard, "Potion of", Effect])
# Ailment Remover
AilmentRemover = SentenceGenerator([Ailments, "Remover"])
names = [StandardPotion, WizardPotion, WizardPotionEffect, AilmentRemover] |
# TIPS: only used to find the best params of cnn
# MLP
import csv
from itertools import islice
import random
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import KFold, train_test_split
import pandas as pd
from sklearn.utils import shuffle
from time import sleep
import tensorflow as tf
def bit2attr(bitstr) -> list:
attr_vec = list()
for i in range(len(bitstr)):
attr_vec.append(int(bitstr[i]))
return attr_vec
def mean_relative_error(y_pred, y_test):
assert len(y_pred) == len(y_test)
mre = 0.0
for i in range(len(y_pred)):
mre = mre + abs((y_pred[i] - y_test[i]) / y_test[i])
mre = mre * 100/ len(y_pred)
return mre
Large_MRE_points = pd.DataFrame()
Large_MRE_X = []
Large_MRE_y_test = []
Large_MRE_y_pred = []
Large_MRE = []
'''
1) 数据预处理
'''
# filepath = 'data/fp/sjn/R+B+Cmorgan_fp1202.csv'
filepath = 'data/database/22-01-29-descriptor-train.csv'
data = pd.read_csv(filepath, encoding='gb18030')
print(data.shape)
data = data.dropna()
print(data.shape)
data = shuffle(data)
data_x_df = data.drop(['label'], axis=1)
data_y_df = data[['label']]
# 归一化
min_max_scaler_X = MinMaxScaler()
min_max_scaler_X.fit(data_x_df)
x_trans1 = min_max_scaler_X.transform(data_x_df)
x_trans1 = np.reshape(x_trans1, (x_trans1.shape[0], x_trans1.shape[1], 1))
min_max_scaler_y = MinMaxScaler()
min_max_scaler_y.fit(data_y_df)
y_trans1 = min_max_scaler_y.transform(data_y_df)
y_trans1 = np.reshape(y_trans1, (y_trans1.shape[0], 1, 1))
test_filepath = "data/database/22-01-29-descriptor-test-level-1.csv"
test_data = pd.read_csv(test_filepath, encoding='gb18030')
print('test data: ', test_data.shape)
test_data_x_df = test_data.drop(['label'], axis=1)
test_data_y_df = test_data[['label']]
x_trans1_test = min_max_scaler_X.transform(test_data_x_df)
y_trans1_test = min_max_scaler_y.transform(test_data_y_df)
x_trans1_test = np.reshape(x_trans1_test, (x_trans1_test.shape[0], x_trans1_test.shape[1], 1))
y_trans1_test = np.reshape(y_trans1_test, (y_trans1_test.shape[0], 1, 1))
'''
3) 构建模型
'''
from keras.layers import MaxPooling1D, Conv1D, Dense, Flatten, Dropout, BatchNormalization, LayerNormalization
from keras import models
from keras.optimizers import Adam, RMSprop, SGD
def buildModel():
model = models.Sequential()
l1 = Conv1D(6, 25, 1, activation='relu', use_bias=True, padding='same')
l2 = MaxPooling1D(2, 2)
l3 = BatchNormalization(axis=-1)
l4 = Conv1D(16, 25, 1, activation='relu', use_bias=True, padding='same')
l5 = MaxPooling1D(2, 2)
l6 = BatchNormalization(axis=-1)
l7 = Flatten()
l8 = Dense(120, activation='relu')
l9 = Dropout(rate=0.1)
l10 = BatchNormalization(axis=-1)
l11 = LayerNormalization(axis=-1)
l12 = Dense(84, activation='relu')
l13 = Dense(1, activation='linear')
layers = [l1, l2, l4, l5, l7, l8, l9, l11, l12, l13]
for i in range(len(layers)):
model.add(layers[i])
adam = Adam(lr=1e-3)
model.compile(optimizer=adam, loss='logcosh', metrics=['mae', 'mape'])
return model
def scheduler(epoch, lr):
if epoch > 0 and epoch % 500 == 0:
return lr * 0.1
else:
return lr
'''
4) 训练模型
'''
from sklearn import metrics
# n_split = 10
mlp_scores = []
MAEs = []
out_MAEs = []
in_y_test = []
in_y_pred = []
out_y_test = []
out_y_pred = []
X_train = x_trans1
y_train = y_trans1
# model_mlp = buildModel()
# model_mlp.fit(X_train, y_train, epochs=120, verbose=1)
# print(model_mlp.summary())
# sleep(5)
# 外部验证
X_test = x_trans1_test
y_trans1_test = np.reshape(y_trans1_test, (-1, 1))
y_test = y_trans1_test
callback = tf.keras.callbacks.LearningRateScheduler(scheduler, verbose=1)
model_mlp = buildModel()
history = model_mlp.fit(X_train, y_train, epochs=1, verbose=1, validation_data=(X_test, y_test))
print(model_mlp.summary())
exit(0)
losses = history.history['loss']
eval_mres = history.history['val_mape']
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot([x for x in range(len(losses))], losses, 'b', label='loss')
ax1.set_ylabel('loss', color='b')
ax2.plot([x for x in range(len(eval_mres))], eval_mres, 'r', label='eval_mre')
ax2.set_ylabel('eval_mre', color='r')
ax1.set_xlabel('epochs')
plt.title('Training of CNN')
plt.savefig('pics/Training_of_CNN.png')
import os
outdir = 'Out/losses_and_mres'
os.makedirs(outdir, exist_ok=True)
with open(os.path.join(outdir, '1dcnn_descriptor.txt'), 'w') as f:
f.write('loss\n')
f.write(' '.join([str(x) for x in losses]))
f.write('\n')
f.write('mres\n')
f.write(' '.join([str(x) for x in eval_mres]))
|
# Yolo v5 Detection training on CoCo2017 Dataset:
# Yolo v5s train on 320x320 [email protected] (confidence 0.001, test on 320x320 images) ~28.4
# Yolo v5s train in 640x640 [email protected] (confidence 0.001, test on 320x320 images) ~29.1
# Yolo v5 Detection training on CoCo2014 Dataset:
# Yolo v5s train on 320x320 [email protected] (confidence 0.001, test on 320x320 images) ~28.77
# batch size may need to change depending on model size and GPU (2080Ti, V100)
# The code is optimized for running with a Mini-Batch of 64 examples... So depending on the amount of GPUs,
# you should change the "batch_accumulate" param in the training_params dict to be batch_size * gpu_num * batch_accumulate = 64.
import super_gradients
import argparse
import torch
from super_gradients.training import SgModel, MultiGPUMode
from super_gradients.training.datasets import CoCoDetectionDatasetInterface, CoCo2014DetectionDatasetInterface
from super_gradients.training.models.yolov5 import YoloV5PostPredictionCallback
from super_gradients.training.utils.detection_utils import base_detection_collate_fn
from super_gradients.training.datasets.datasets_utils import ComposedCollateFunction, MultiScaleCollateFunction
from super_gradients.common.aws_connection.aws_secrets_manager_connector import AWSSecretsManagerConnector
from super_gradients.training.metrics import DetectionMetrics
super_gradients.init_trainer()
parser = argparse.ArgumentParser()
#################################
# Model Options
################################
parser.add_argument("--model", type=str, required=True, choices=["s", "m", "l", "x", "c"],
help='on of s,m,l,x,c (small, medium, large, extra-large, custom)')
parser.add_argument("--depth", type=float, help='not applicable for default models(s/m/l/x)')
parser.add_argument("--width", type=float, help='not applicable for default models(s/m/l/x)')
parser.add_argument("--reload", action="store_true")
parser.add_argument("--max_epochs", type=int, default=300)
parser.add_argument("--batch", type=int, default=64)
parser.add_argument("--test-img-size", type=int, default=320)
parser.add_argument("--train-img-size", type=int, default=640)
parser.add_argument("--multi-scale", action="store_true")
parser.add_argument("--coco2014", action="store_true")
args, _ = parser.parse_known_args()
models_dict = {"s": "yolo_v5s", "m": "yolo_v5m", "l": "yolo_v5l", "x": "yolo_v5x", "c": "custom_yolov5"}
if args.model == "c":
assert args.depth is not None and args.width is not None, "when setting model type to c (custom), depth and width flags must be set"
assert 0 <= args.depth <= 1, "depth must be in the range [0,1]"
assert 0 <= args.width <= 1, "width must be in the range [0,1]"
else:
assert args.depth is None and args.width is None, "depth and width flags have no effect when the model is not c"
args.model = models_dict[args.model]
distributed = super_gradients.is_distributed()
if args.multi_scale:
train_collate_fn = ComposedCollateFunction([base_detection_collate_fn,
MultiScaleCollateFunction(target_size=args.train_img_size)])
else:
train_collate_fn = base_detection_collate_fn
dataset_params = {"batch_size": args.batch,
"test_batch_size": args.batch,
"train_image_size": args.train_img_size,
"test_image_size": args.test_img_size,
"test_collate_fn": base_detection_collate_fn,
"train_collate_fn": train_collate_fn,
"test_sample_loading_method": "default", # TODO: remove when fixing distributed_data_parallel
"dataset_hyper_param": {
"hsv_h": 0.015, # IMAGE HSV-Hue AUGMENTATION (fraction)
"hsv_s": 0.7, # IMAGE HSV-Saturation AUGMENTATION (fraction)
"hsv_v": 0.4, # IMAGE HSV-Value AUGMENTATION (fraction)
"degrees": 0.0, # IMAGE ROTATION (+/- deg)
"translate": 0.1, # IMAGE TRANSLATION (+/- fraction)
"scale": 0.5, # IMAGE SCALE (+/- gain)
"shear": 0.0} # IMAGE SHEAR (+/- deg)
}
arch_params = {"depth_mult_factor": args.depth,
"width_mult_factor": args.width
}
dataset_string = 'coco2017' if not args.coco2014 else 'coco2014'
model_repo_bucket_name = AWSSecretsManagerConnector.get_secret_value_for_secret_key(aws_env='research',
secret_name='training_secrets',
secret_key='S3.MODEL_REPOSITORY_BUCKET_NAME')
model = SgModel(args.model + '____' + dataset_string,
model_checkpoints_location="s3://" + model_repo_bucket_name,
multi_gpu=MultiGPUMode.DISTRIBUTED_DATA_PARALLEL if distributed else MultiGPUMode.DATA_PARALLEL,
post_prediction_callback=YoloV5PostPredictionCallback())
devices = torch.cuda.device_count() if not distributed else 1
dataset_interface_class = CoCoDetectionDatasetInterface if not args.coco2014 else CoCo2014DetectionDatasetInterface
dataset_interface = dataset_interface_class(dataset_params=dataset_params)
model.connect_dataset_interface(dataset_interface, data_loader_num_workers=20)
model.build_model(args.model, arch_params=arch_params, load_checkpoint=args.reload)
post_prediction_callback = YoloV5PostPredictionCallback()
training_params = {"max_epochs": args.max_epochs,
"lr_mode": "cosine",
"initial_lr": 0.01,
"cosine_final_lr_ratio": 0.2,
"lr_warmup_epochs": 3,
"batch_accumulate": 1,
"warmup_bias_lr": 0.1,
"loss": "yolo_v5_loss",
"criterion_params": {"model": model},
"optimizer": "SGD",
"warmup_momentum": 0.8,
"optimizer_params": {"momentum": 0.937,
"weight_decay": 0.0005 * (args.batch / 64.0),
"nesterov": True},
"mixed_precision": False,
"ema": True,
"train_metrics_list": [],
"valid_metrics_list": [DetectionMetrics(post_prediction_callback=post_prediction_callback,
num_cls=len(
dataset_interface.coco_classes))],
"loss_logging_items_names": ["GIoU", "obj", "cls", "Loss"],
"metric_to_watch": "[email protected]:0.95",
"greater_metric_to_watch_is_better": True}
print(f"Training Yolo v5 {args.model} on {dataset_string.upper()}:\n width-mult={args.width}, depth-mult={args.depth}, "
f"train-img-size={args.train_img_size}, test-img-size={args.test_img_size} ")
model.train(training_params=training_params)
|
suma = 2 + 3
Suma = 2. + 3
resta = 2 - 3
print (suma)
print (Suma)
print (resta)
|
import kinomodel
kinomodel.main(chain='A', coord='pdb', feature='interact', pdb='3PP0')
|
from numpy.lib.shape_base import expand_dims
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch.nn.modules.activation import ReLU
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, 2*(i//2) / np.float(d_model))
return pos * angle_rates
def positional_encoding(position, d_model):
"""Encodes positional information to """
# d_model是位置编码的长度,相当于position encoding的embedding_dim?
# d_model 是embedding_dim, meige word de vector
angle_rads = get_angles(np.arange(position)[:, np.newaxis], # [50, 1]
np.arange(d_model)[np.newaxis, :], # [1, d_model=512]
d_model)
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) # 2i
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) # 2i+2
pos_encoding = angle_rads[np.newaxis, ...] # [50,512]=>[1,50,512]
return torch.tensor(pos_encoding, dtype=torch.float32)
def create_padding_mask(seq, pad):
seq = torch.eq(seq, torch.tensor(pad)).float()
return seq[:, np.newaxis, np.newaxis, :]
def create_look_ahead_mask(size):
mask = torch.triu(torch.ones(size, size), diagonal=1)
return mask
def scaled_dot_product_attention(q, k, v, mask=None):
"""计算注意力权重。
q, k, v 必须具有匹配的前置维度。
k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。
虽然 mask 根据其类型(填充或前瞻)有不同的形状,
但是 mask 必须能进行广播转换以便求和。
参数:
q: 请求的形状 == (..., seq_len_q, depth)
k: 主键的形状 == (..., seq_len_k, depth)
v: 数值的形状 == (..., seq_len_v, depth_v)
mask: Float 张量,其形状能转换成
(..., seq_len_q, seq_len_k)。默认为None。
返回值:
输出,注意力权重
"""
matmul_qk = torch.matmul(q, k.transpose(-2, -1))
depth_k = torch.tensor(k.shape[-1], dtype=torch.float32)
scaled_attion_logits = matmul_qk / torch.sqrt(depth_k)
if mask is not None:
scaled_attion_logits += (mask * -1e9)
attention_weights = torch.nn.functional.softmax(scaled_attion_logits, dim=-1)
output = torch.matmul(attention_weights, v)
return output, attention_weights
def point_wise_feed_forward_network(d_model, d_feedforward):
feed_forward_net = torch.nn.Sequential(
torch.nn.Linear(d_model, d_feedforward),
torch.nn.ReLU(),
torch.nn.Linear(d_feedforward, d_model)
)
return feed_forward_net
class MultiHeadAttention(torch.nn.Module):
"""The definition MultiHeadAttention Layer.
input: [batch, padded_sentence_length, d_model]
Creates the q and k with matrix wq and wk as [d_model, dq] dq = dk must
be satisfied.
dq doesn't has to be the same as d_model, in the paper it is 64.
softmax(q * kt/ sqrt(dk)): [sentence_length, sentence_length]
Creates v with matrix wv [d_model, dv] get v with [sentence_length, dv]
single head z = softmax(q * kt/ sqrt(dk)) * v with [sentence_length, dv]
wq: [num_head * dv, d_model] the column of wo must be d_model to keep the
feature size of each word.
In the parpar "All you need is attention" dq = dk = dv = d_model / num head.
Actually, dq = dk, dz = dv
The num_head is not related to d_model.
"""
def __init__(self, d_model, num_heads, dqk, dv):
super(MultiHeadAttention, self).__init__()
self.d_model = d_model
self.num_heads = num_heads
self.dqk = dqk
self.dv = dv
#assert d_model % self.num_heads == 0, "d_model must be divisible by
# num_heads!"
#self.depth = d_model // self.num_heads
self.wq = torch.nn.Linear(d_model, dqk)
self.wk = torch.nn.Linear(d_model, dqk)
self.wv = torch.nn.Linear(d_model, dv)
self.woutput = torch.nn.Linear(num_heads * dv, d_model)
def split_heads(self, x, batch_size):
x = x.view(batch_size, -1, self.num_heads, self.depth)
return x.transpose(1,2)
def forward(self, x, mask):
batch_size = x.shape[0]
q = self.wq(x)
k = self.wk(x)
v = self.wv(x)
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention, attention_weights = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = scaled_attention.transpose(1, 2)
concat_attention = scaled_attention.reshape(batch_size, -1, self.d_model)
output = self.woutput(concat_attention)
return output, attention_weights
# end class MultiheadAttention
|
# Copyright 2014 Rackspace, Andrew Melton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
IDMapShift is a tool that properly sets the ownership of a filesystem for use
with linux user namespaces.
When using user namespaces with linux containers, the filesystem of the
container must be owned by the targeted user and group ids being applied
to that container. Otherwise, processes inside the container won't be able
to access the filesystem.
For example, when using the id map string '0:10000:2000', this means that
user ids inside the container between 0 and 1999 will map to user ids on
the host between 10000 and 11999. Root (0) becomes 10000, user 1 becomes
10001, user 50 becomes 10050 and user 1999 becomes 11999. This means that
files that are owned by root need to actually be owned by user 10000, and
files owned by 50 need to be owned by 10050, and so on.
IDMapShift will take the uid and gid strings used for user namespaces and
properly set up the filesystem for use by those users. Uids and gids outside
of provided ranges will be mapped to nobody (max uid/gid) so that they are
inaccessible inside the container.
"""
import os
from oslo_log import log as logging
import nova.privsep
LOG = logging.getLogger(__name__)
NOBODY_ID = 65534
def find_target_id(fsid, mappings, nobody, memo):
if fsid not in memo:
for start, target, count in mappings:
if start <= fsid < start + count:
memo[fsid] = (fsid - start) + target
break
else:
memo[fsid] = nobody
return memo[fsid]
def print_chown(path, uid, gid, target_uid, target_gid):
LOG.debug('%s %s:%s -> %s:%s', path, uid, gid, target_uid, target_gid)
def shift_path(path, uid_mappings, gid_mappings, nobody, uid_memo, gid_memo):
stat = os.lstat(path)
uid = stat.st_uid
gid = stat.st_gid
target_uid = find_target_id(uid, uid_mappings, nobody, uid_memo)
target_gid = find_target_id(gid, gid_mappings, nobody, gid_memo)
print_chown(path, uid, gid, target_uid, target_gid)
os.lchown(path, target_uid, target_gid)
def shift_dir(fsdir, uid_mappings, gid_mappings, nobody):
uid_memo = dict()
gid_memo = dict()
def shift_path_short(p):
shift_path(p, uid_mappings, gid_mappings, nobody,
uid_memo=uid_memo, gid_memo=gid_memo)
shift_path_short(fsdir)
for root, dirs, files in os.walk(fsdir):
for d in dirs:
path = os.path.join(root, d)
shift_path_short(path)
for f in files:
path = os.path.join(root, f)
shift_path_short(path)
def confirm_path(path, uid_ranges, gid_ranges, nobody):
stat = os.lstat(path)
uid = stat.st_uid
gid = stat.st_gid
uid_in_range = True if uid == nobody else False
gid_in_range = True if gid == nobody else False
if not uid_in_range or not gid_in_range:
for (start, end) in uid_ranges:
if start <= uid <= end:
uid_in_range = True
break
for (start, end) in gid_ranges:
if start <= gid <= end:
gid_in_range = True
break
return uid_in_range and gid_in_range
def get_ranges(maps):
return [(target, target + count - 1) for (start, target, count) in maps]
def confirm_dir(fsdir, uid_mappings, gid_mappings, nobody):
uid_ranges = get_ranges(uid_mappings)
gid_ranges = get_ranges(gid_mappings)
if not confirm_path(fsdir, uid_ranges, gid_ranges, nobody):
return False
for root, dirs, files in os.walk(fsdir):
for d in dirs:
path = os.path.join(root, d)
if not confirm_path(path, uid_ranges, gid_ranges, nobody):
return False
for f in files:
path = os.path.join(root, f)
if not confirm_path(path, uid_ranges, gid_ranges, nobody):
return False
return True
@nova.privsep.sys_admin_pctxt.entrypoint
def shift(path, uid_map, gid_map):
if confirm_dir(uid_map, gid_map, path, NOBODY_ID):
return
shift_dir(path, uid_map, gid_map, NOBODY_ID)
|
from asyncore import read
import os
import shutil
import yaml
import json
from app_logger import logger
from datetime import datetime
import uuid
def create_directory(path: str, is_recreate: bool = False)->None:
"""Utility to create the dirctory
Args:
path (str): Give the full path with directory name
is_recreate (bool, optional): If True then it will first delete and then ceate the directory . Defaults to False.
"""
if is_recreate:
try:
shutil.rmtree(path)
except Exception:
pass
os.makedirs(path,exist_ok=True) # It will not through error if the folder already exists
def read_params(config_path: str ='config/params.yaml')->dict:
"""Responsible for reading the yaml file
Args:
config_path (str): Path of the Yaml file . Defaults to 'config/params.yaml'
Returns:
dict: Return the details of the yaml file
"""
with open(config_path, 'r') as f:
return yaml.safe_load(f)
def get_log_object_for_training(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for training
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['training_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def get_log_object_for_prediction(collection_name: str, execution_id : str=None, executed_by: str=None, project_id :str=None, is_log_enabled : bool=True):
"""It will give the Log Object for prediction
Args:
collection_name (str): Name of the collection in which the log will be stored
execution_id (str, optional): Execution id. Defaults to None.
executed_by (str, optional): Executed by. Defaults to None.
project_id (str, optional): Id of the project. Defaults to None.
is_log_enabled (bool, optional): If it is set to True then only it will write the logs. Defaults to True.
Returns:
Logger: Logger Object
"""
params=read_params()
if execution_id==None:
execution_id=uuid.uuid4().hex
if executed_by==None:
executed_by=params['base']['author']
if project_id==None:
project_id = params['base']['project_id']
logger_obj = logger.Logger(execution_id=execution_id, executed_by=executed_by, project_id=project_id,
databasename=params['database_logs']['prediction_logs']['database_name'], collection_name=collection_name, is_log_enabled=is_log_enabled)
return logger_obj
def read_prediction_schema():
"""Responsible for reading the schema from schema_prediction.json
"""
params=read_params()
path=params['data_schemas']['prediction_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def read_training_schema():
"""Responsible for reading the schema from schema_training.json
"""
params=read_params()
path = params['data_schemas']['training_schema']
with open(path) as f:
schema=json.load(f)
LengthOfDateStampInFile = schema['LengthOfDateStampInFile']
LengthOfTimeStampInFile = schema['LengthOfTimeStampInFile']
NumberofColumns = schema['NumberofColumns']
ColName = schema['ColName']
return LengthOfDateStampInFile,LengthOfTimeStampInFile,NumberofColumns,ColName
def get_date():
"""Returns the current date.
"""
return datetime.now().date().strftime('%d-%m-%y')
def get_time():
"""Returns the current time
"""
return datetime.now().time().strftime('%H-%M-%S')
|
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the DataLad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Python package for functionality needed at package 'build' time by DataLad and its extensions
__init__ here should be really minimalistic, not import submodules by default
and submodules should also not require heavy dependencies.
"""
__version__ = '0.1'
|
"""pyEMU: python modules for Environmental Model Uncertainty analyses. These
modules are designed to work directly and seamlessly with PEST and PEST++ model
independent interface. pyEMU can also be used to setup this interface.
Several forms of uncertainty analyses are support including FOSM-based
analyses (pyemu.Schur and pyemu.ErrVar), data worth analyses and
high-dimensional ensemble generation.
"""
from .la import LinearAnalysis
from .sc import Schur
from .ev import ErrVar
from .en import Ensemble, ParameterEnsemble, ObservationEnsemble
# from .mc import MonteCarlo
# from .inf import Influence
from .mat import Matrix, Jco, Cov
from .pst import Pst, pst_utils
from .utils import (
helpers,
gw_utils,
optimization,
geostats,
pp_utils,
os_utils,
smp_utils,
metrics,
)
from .plot import plot_utils
from .logger import Logger
from .prototypes import *
from ._version import get_versions
__version__ = get_versions()["version"]
__all__ = [
"LinearAnalysis",
"Schur",
"ErrVar",
"Ensemble",
"ParameterEnsemble",
"ObservationEnsemble",
"Matrix",
"Jco",
"Cov",
"Pst",
"pst_utils",
"helpers",
"gw_utils",
"geostats",
"pp_utils",
"os_utils",
"smp_utils",
"plot_utils",
"metrics",
]
# del get_versions
|
"""
Utilities for importing data
See example usage at the bottom.
"""
import json
from Util import Util
class ApiForm:
"""
Handles interaction with the Catalog databaseq
"""
def __init__(self, catalog_client):
self.catalog_client = catalog_client
def delete_api_by_dbid(self, dbid):
database = self.catalog_client.get_database(dbid)
if database is None:
return Util.error_msg("database is None for dbid: " + str(dbid))
engine = self.catalog_client.get_engine(database[1])
if engine is None:
return Util.error_msg("engine is None for engine_id: " + str(database[1]))
objects = self.catalog_client.get_objects_by_phsyical_db(dbid)
if objects is None:
return Util.error_msg("objects is None for phsyical_db: " + str(dbid))
elif len(objects) == 0:
return Util.error_msg("len(objects) is 0 for phsyical_db: " + str(dbid))
island = self.catalog_client.get_island_by_scope_name('API')
if island is None:
return Util.error_msg("island is none for scope_name API")
deleteEngine = False
databases = self.catalog_client.get_databases_by_engine_id(engine[0])
if databases is None or len(databases) == 0:
return Util.error_msg("databases not found for engine: " + str(engine[0]))
if len(databases) == 1:
deleteEngine = True
object = objects[0]
response = self.catalog_client.delete_object(object[0])
if response != True:
return Util.error_msg(response)
response = self.catalog_client.delete_database(dbid)
if response != True:
return Util.error_msg(response)
if deleteEngine:
response = self.catalog_client.delete_shim(island[0], engine[0])
if response != True:
return Util.error_msg(response)
response = self.catalog_client.delete_engine(engine[0])
if response != True:
return Util.error_msg(response)
return Util.success_msg()
def process_api_form(self, data):
dataApi = json.loads(data.decode("utf-8"))
if not dataApi:
return Util.error_msg("could not parse json")
eid = None
if 'api' in dataApi:
if 'endpoint' in dataApi:
endpointData = dataApi['endpoint']
# Check for duplicate url first, otherwise we'll create the API first and then hit the error
url = endpointData['url']
oid = None
if 'oid' in endpointData:
oid = endpointData['oid']
testObject = self.catalog_client.get_object_by_name_island_name(url, "API")
if oid is not None:
if testObject is None:
testObject = self.catalog_client.get_object(oid)
if testObject is None:
return Util.error_msg("Unknown object id: " + oid)
if int(testObject[0]) != int(oid):
return Util.error_msg("Duplicate url for API Island: " + url)
elif testObject is not None:
return Util.error_msg("Duplicate url for API Island: " + url)
result = self.process_api(dataApi['api'])
if isinstance(result, int):
eid = result
else:
return Util.error_msg(result)
if 'endpoint' in dataApi:
result = self.process_endpoint(dataApi['endpoint'], eid)
if result != True:
return Util.error_msg(result)
return Util.success_msg()
def process_api(self, apiData):
name = apiData['name']
eid = None
if 'eid' in apiData:
eid = apiData['eid']
testEngine = self.catalog_client.get_engine_by_name(name)
if eid is not None:
if testEngine is None:
testEngine = self.catalog_client.get_engine(eid)
if testEngine is None:
return "Unknown engine: " + eid
if int(eid) != int(testEngine[0]):
return "Duplicate engine: " + name
elif testEngine is not None:
return "Duplicate engine: " + name
island = self.catalog_client.get_island_by_scope_name('API')
if island is None:
return "Could not find API Island in catalog"
islandId = island[0]
host = None
if 'host' in apiData:
host = apiData['host']
port = None
if 'port' in apiData:
port = apiData['port']
connection_properties = None
if 'connection_properties' in apiData:
connection_properties = apiData['connection_properties']
# update case
if eid is not None:
result = self.catalog_client.update_engine(eid, name, host, port, connection_properties)
if result != True:
return result
return int(eid)
# insert case
result = self.catalog_client.insert_engine(name, host, port, connection_properties)
if not isinstance(result, int):
return result
shim_result = self.catalog_client.insert_shim(islandId, result)
if not isinstance(shim_result, int):
return shim_result
return result
def process_endpoint(self, endpointData, engineId):
name = endpointData['name']
if engineId is None:
engineId = endpointData['engine_id']
if self.catalog_client.get_engine(engineId) is None:
return "Can't find engine: " + engineId
dbid = None
if 'dbid' in endpointData:
dbid = endpointData['dbid']
testDatabase = self.catalog_client.get_database_by_engine_id_name(engineId, name)
if dbid is not None:
if testDatabase is None:
testDatabase = self.catalog_client.get_database(dbid)
if testDatabase is None:
return "Unknown database: " + dbid
if int(dbid) != int(testDatabase[0]):
return "Duplicate endpoint for engine: " + name
elif testDatabase is not None:
return "Duplicate endpoint for engine: " + name
url = endpointData['url']
oid = None
if 'oid' in endpointData:
oid = endpointData['oid']
testObject = self.catalog_client.get_object_by_name_island_name(url, "API")
if oid is not None:
if testObject is None:
testObject = self.catalog_client.get_object(oid)
if testObject is None:
return "Unknown object id: " + oid
if int(testObject[0]) != int(oid):
return "Duplicate url for API Island: " + url
elif testObject is not None:
return Util.error_msg("Duplicate url for API Island: " + url)
password_field = None
if 'password_field' in endpointData:
password_field = endpointData['password_field']
if dbid is not None:
result = self.catalog_client.update_database(dbid, engineId, name, None, password_field)
if result != True:
return result
else:
result = self.catalog_client.insert_database(engineId, name, None, password_field)
if isinstance(result, str):
return result
dbid = result
fields = None
if 'result_key' in endpointData:
fields = endpointData['result_key']
if oid is not None:
result = self.catalog_client.update_object(oid, url, fields, dbid, dbid)
if result != True:
return result
else:
result = self.catalog_client.insert_object(url, fields, dbid, dbid)
if isinstance(result, str):
return result
return True
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
@pulumi.output_type
class GetAccountResult:
"""
A collection of values returned by getAccount.
"""
def __init__(__self__, droplet_limit=None, email=None, email_verified=None, floating_ip_limit=None, id=None, status=None, status_message=None, uuid=None):
if droplet_limit and not isinstance(droplet_limit, int):
raise TypeError("Expected argument 'droplet_limit' to be a int")
pulumi.set(__self__, "droplet_limit", droplet_limit)
if email and not isinstance(email, str):
raise TypeError("Expected argument 'email' to be a str")
pulumi.set(__self__, "email", email)
if email_verified and not isinstance(email_verified, bool):
raise TypeError("Expected argument 'email_verified' to be a bool")
pulumi.set(__self__, "email_verified", email_verified)
if floating_ip_limit and not isinstance(floating_ip_limit, int):
raise TypeError("Expected argument 'floating_ip_limit' to be a int")
pulumi.set(__self__, "floating_ip_limit", floating_ip_limit)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if status_message and not isinstance(status_message, str):
raise TypeError("Expected argument 'status_message' to be a str")
pulumi.set(__self__, "status_message", status_message)
if uuid and not isinstance(uuid, str):
raise TypeError("Expected argument 'uuid' to be a str")
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter(name="dropletLimit")
def droplet_limit(self) -> int:
return pulumi.get(self, "droplet_limit")
@property
@pulumi.getter
def email(self) -> str:
return pulumi.get(self, "email")
@property
@pulumi.getter(name="emailVerified")
def email_verified(self) -> bool:
return pulumi.get(self, "email_verified")
@property
@pulumi.getter(name="floatingIpLimit")
def floating_ip_limit(self) -> int:
return pulumi.get(self, "floating_ip_limit")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def status(self) -> str:
return pulumi.get(self, "status")
@property
@pulumi.getter(name="statusMessage")
def status_message(self) -> str:
return pulumi.get(self, "status_message")
@property
@pulumi.getter
def uuid(self) -> str:
return pulumi.get(self, "uuid")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
droplet_limit=self.droplet_limit,
email=self.email,
email_verified=self.email_verified,
floating_ip_limit=self.floating_ip_limit,
id=self.id,
status=self.status,
status_message=self.status_message,
uuid=self.uuid)
def get_account(opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Get information on your DigitalOcean account.
## Example Usage
Get the account:
```python
import pulumi
import pulumi_digitalocean as digitalocean
example = digitalocean.get_account()
```
"""
__args__ = dict()
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('digitalocean:index/getAccount:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
droplet_limit=__ret__.droplet_limit,
email=__ret__.email,
email_verified=__ret__.email_verified,
floating_ip_limit=__ret__.floating_ip_limit,
id=__ret__.id,
status=__ret__.status,
status_message=__ret__.status_message,
uuid=__ret__.uuid)
|
"""
This module contains white-box unit tests of CertDB package
"""
# pylint: disable=W0212, C0103, C0302
import sys
import os
import subprocess
import time
import shutil
import string
import random
import unittest
import unittest.mock
from collections import OrderedDict
import toml
from cevast.utils import make_PEM_filename
from cevast.certdb import (
CertDB,
CertFileDB,
CertFileDBReadOnly,
CertNotAvailableError,
CertInvalidError,
CompositeCertDB,
CompositeCertDBReadOnly,
)
# Helper functions
TEST_DATA_PATH = 'tests/data/'
TEST_CERTS_1 = TEST_DATA_PATH + 'test_certs_1.csv'
TEST_CERTS_2 = TEST_DATA_PATH + 'test_certs_2.csv'
def insert_test_certs(database: CertDB, certs_file: str) -> list:
"""
Insert certificates from certs_file to database
Return list of inserted certificates.
"""
certs = []
with open(certs_file) as r_file:
for line in r_file:
els = [e.strip() for e in line.split(',')]
database.insert(els[0], els[1])
certs.append(els[0])
return certs
def insert_random_certs(database: CertDB, certs_cnt: int) -> list:
"""
Insert number(certs_cnt) randomly generated certificates to database
Return list of inserted certificates.
"""
def random_string(length: int) -> str:
return ''.join(random.choice(string.ascii_letters) for i in range(length))
certs = []
for _ in range(certs_cnt):
cert_id = random_string(16)
database.insert(cert_id, random_string(8))
certs.append(cert_id)
return certs
def delete_test_certs(database: CertDB, certs_file: str) -> list:
"""
Delete certificates from certs_file from database
Return list of deleted certificates.
"""
certs = []
with open(certs_file) as r_file:
for line in r_file:
els = [e.strip() for e in line.split(',')]
database.delete(els[0])
certs.append(els[0])
return certs
def commit_test_certs(database: CertDB, certs_file: str) -> list:
"""
Insert and commit certificates from certs_file to database
Return list of committed certificates.
"""
certs = insert_test_certs(database, certs_file)
database.commit()
return certs
class TestCertFileDBReadOnly(unittest.TestCase):
"""Unit test class of CertFileDBReadOnly class"""
TEST_STORAGE = 'tests/test_storage'
def tearDown(self):
# Clear test storage
shutil.rmtree(self.TEST_STORAGE, ignore_errors=True)
def test_setup(self):
"""
Test implementation of CertFileDBReadOnly setup method
"""
# Check wrong paramaters
self.assertRaises(ValueError, CertFileDBReadOnly.setup, self.TEST_STORAGE, 'ass')
# Setup and check DB
CertFileDBReadOnly.setup(self.TEST_STORAGE, 5, 'DES', 'Testing DB', 'unittest')
assert os.path.exists(self.TEST_STORAGE)
cfg = toml.load(os.path.join(self.TEST_STORAGE, CertFileDBReadOnly.CONF_FILENAME))
meta = toml.load(os.path.join(self.TEST_STORAGE, CertFileDBReadOnly.META_FILENAME))
self.assertEqual(cfg['PARAMETERS']['storage'], os.path.abspath(self.TEST_STORAGE))
self.assertEqual(cfg['PARAMETERS']['structure_level'], 5)
self.assertEqual(cfg['PARAMETERS']['cert_format'], 'DES')
self.assertEqual(cfg['PARAMETERS']['maintain_info'], True)
self.assertEqual(meta['INFO']['description'], 'Testing DB')
self.assertEqual(meta['INFO']['owner'], 'unittest')
assert 'compression_method' in cfg['PARAMETERS']
# Try to setup different DB on the same storage
self.assertRaises(ValueError, CertFileDB.setup, self.TEST_STORAGE, 1, 'PEM', 'Testing DB 2', 'unittest')
# Try to open DB configured manually, and commit something
new_loc = os.path.join(self.TEST_STORAGE, 'new')
os.makedirs(new_loc)
shutil.move(
os.path.join(self.TEST_STORAGE, CertFileDBReadOnly.CONF_FILENAME),
os.path.join(new_loc, CertFileDBReadOnly.CONF_FILENAME)
)
assert os.path.exists(os.path.join(new_loc, CertFileDBReadOnly.CONF_FILENAME))
db = CertFileDB(new_loc)
commit_test_certs(db, TEST_CERTS_1)
def test_init(self):
"""
Test of CertFileDBReadOnly initialization
"""
self.assertRaises(ValueError, CertFileDBReadOnly, self.TEST_STORAGE)
CertFileDBReadOnly.setup(self.TEST_STORAGE, structure_level=5)
# Storage should be now properly initialized
db = CertFileDBReadOnly(self.TEST_STORAGE)
self.assertEqual(db._params['structure_level'], 5)
self.assertEqual(db._params['storage'], os.path.abspath(self.TEST_STORAGE))
def test_get(self):
"""
Test implementation of CertDB method GET
"""
CertFileDBReadOnly.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
db_ronly = CertFileDBReadOnly(self.TEST_STORAGE)
fake_cert_id = 'fakecertid'
# Insert and commit some certificates and try to retrieve them back
commit_test_certs(db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
# Certificates should exists - transaction was committed
self.assertEqual(db_ronly.get(cert_id), cert.strip())
# Only insert other certificates and try to retrieve them back
inserted = insert_test_certs(db, TEST_CERTS_2)
for cert_id in inserted:
# Certificates should NOT exists - transaction was NOT committed
self.assertRaises(CertNotAvailableError, db_ronly.get, cert_id)
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, db_ronly.get, fake_cert_id)
def test_export(self):
"""
Test implementation of CertDB method EXPORT
"""
def test_permission(db, valid_cert_id):
if not sys.platform.startswith('linux'):
return # works only on Linux like systems
fake_target_dir = 'tests/fake_export'
os.mkdir(fake_target_dir)
subprocess.call(['chmod', '-w', fake_target_dir])
self.assertRaises(PermissionError, db.export, valid_cert_id, fake_target_dir)
subprocess.call(['chmod', '+w', fake_target_dir])
os.rmdir(fake_target_dir)
CertFileDBReadOnly.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
db_ronly = CertFileDBReadOnly(self.TEST_STORAGE)
target_dir = self.TEST_STORAGE + '/export'
os.mkdir(target_dir)
fake_cert_id = 'fakecertid'
# Insert and commit some certificates and export them
commit_test_certs(db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
expected = os.path.join(target_dir, make_PEM_filename(cert_id))
self.assertEqual(db_ronly.export(cert_id, target_dir), expected)
with open(expected) as target:
self.assertEqual(target.read(), cert.strip())
# Check export without unnecessary copying - should copy anyway because persisted
self.assertEqual(db_ronly.export(cert_id, target_dir, copy_if_exists=False), expected)
# Tests writing permissions for exporting from zipfile
test_permission(db_ronly, cert_id)
# Only insert other certificates and try to retrieve them back
inserted = insert_test_certs(db, TEST_CERTS_2)
for cert_id in inserted:
# Certificates should NOT exists - transaction was NOT committed
self.assertRaises(CertNotAvailableError, db_ronly.export, cert_id, target_dir)
self.assertRaises(CertNotAvailableError, db_ronly.export, cert_id, target_dir, False)
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, db_ronly.export, fake_cert_id, target_dir)
def test_exists(self):
"""
Test implementation of CertDB method EXISTS
"""
CertFileDBReadOnly.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
db_ronly = CertFileDBReadOnly(self.TEST_STORAGE)
fake_cert = 'fakecertid'
# Insert and commit some certificates and check if exists
committed = commit_test_certs(db, TEST_CERTS_1)
for cert in committed:
assert db_ronly.exists(cert)
assert db_ronly.exists_all(committed)
# Only insert other certificates and check if exists
inserted = insert_test_certs(db, TEST_CERTS_2)
for cert in inserted:
assert not db_ronly.exists(cert)
assert not db_ronly.exists_all(inserted)
# Test fake certificate that doesn't exist
committed.append(fake_cert)
assert not db_ronly.exists(fake_cert)
assert not db_ronly.exists_all(committed)
def test_cache(self):
"""
Test implementation of CertFileDB certificate existance cache
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
db_ronly = CertFileDBReadOnly(self.TEST_STORAGE)
# Insert and commit some certificates and check cache
committed = commit_test_certs(db, TEST_CERTS_1)
for cert in committed:
assert cert not in db_ronly._cache
db_ronly.exists(cert)
assert cert in db_ronly._cache
self.assertEqual(db_ronly._cache, set(committed))
# Insert and commit some certificates and check cache after exists_all call
committed = commit_test_certs(db, TEST_CERTS_2)
assert not set(committed).issubset(db_ronly._cache)
db_ronly.exists_all(committed)
assert set(committed).issubset(db_ronly._cache)
# Check DELETE effect on cache
db.exists_all(committed)
self.assertEqual(set(committed), db._cache)
db.delete(committed[0])
assert committed[0] not in db._cache
self.assertNotEqual(set(committed), db._cache)
db.rollback()
# Check speed improvement using cache - on large number of certs
inserted = insert_random_certs(db, 1000)
db.commit()
t0 = time.clock()
for cert in inserted:
db_ronly.exists(cert)
t1 = time.clock()
for cert in inserted:
db_ronly.exists(cert)
t2 = time.clock()
self.assertGreater(t1 - t0, t2 - t1)
class TestCertFileDB(unittest.TestCase):
"""Unit test class of CertFileDB class"""
TEST_STORAGE = 'tests/test_storage'
def tearDown(self):
# Clear test storage
shutil.rmtree(self.TEST_STORAGE, ignore_errors=True)
if os.path.exists(self.TEST_STORAGE + '.zip'):
os.remove(self.TEST_STORAGE + '.zip')
def test_init(self):
"""
Test of CertFileDB initialization
"""
self.assertRaises(ValueError, CertFileDB, self.TEST_STORAGE)
CertFileDB.setup(self.TEST_STORAGE, structure_level=5)
# Storage should be now properly initialized
db = CertFileDB(self.TEST_STORAGE)
self.assertEqual(db._params['structure_level'], 5)
self.assertEqual(db._params['storage'], os.path.abspath(self.TEST_STORAGE))
def test_get(self):
"""
Test implementation of CertDB method GET
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
fake_cert_id = 'fakecertid'
# Insert and commit some certificates and retrieve them back
committed = commit_test_certs(db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
self.assertEqual(db.get(cert_id), cert.strip())
# Only insert other certificates and retrieve them back
inserted = insert_test_certs(db, TEST_CERTS_2)
with open(TEST_CERTS_2) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
self.assertEqual(db.get(cert_id), cert.strip())
# Rollback and try to retrieve them again
db.rollback()
for cert_id in inserted:
self.assertRaises(CertNotAvailableError, db.get, cert_id)
# Test DELETE method effect
db.delete(committed[0])
self.assertRaises(CertNotAvailableError, db.get, committed[0])
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, db.get, fake_cert_id)
def test_export(self):
"""
Test implementation of CertDB method EXPORT
"""
def test_permission(db, valid_cert_id):
if not sys.platform.startswith('linux'):
return # works only on Linux like systems
fake_target_dir = 'tests/fake_export'
os.mkdir(fake_target_dir)
subprocess.call(['chmod', '-w', fake_target_dir])
self.assertRaises(PermissionError, db.export, valid_cert_id, fake_target_dir)
subprocess.call(['chmod', '+w', fake_target_dir])
os.rmdir(fake_target_dir)
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
target_dir = self.TEST_STORAGE + '/export'
os.mkdir(target_dir)
fake_cert_id = 'fakecertid'
# Insert and commit some certificates and export them
committed = commit_test_certs(db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
expected = os.path.join(target_dir, make_PEM_filename(cert_id))
self.assertEqual(db.export(cert_id, target_dir), expected)
with open(expected) as target:
self.assertEqual(target.read(), cert.strip())
# Check export without unnecessary copying - should copy anyway because persisted
self.assertEqual(db.export(cert_id, target_dir, copy_if_exists=False), expected)
# Tests writing permissions for exporting from zipfile
test_permission(db, cert_id)
# Only insert other certificates and retrieve them back
insert_test_certs(db, TEST_CERTS_2)
with open(TEST_CERTS_2) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
expected = os.path.join(target_dir, make_PEM_filename(cert_id))
self.assertEqual(db.export(cert_id, target_dir), expected)
with open(expected) as target:
self.assertEqual(target.read(), cert.strip())
# Check export without unnecessary copying
file = db.export(cert_id, target_dir, copy_if_exists=False)
self.assertNotEqual(file, expected)
with open(file) as target:
self.assertEqual(target.read(), cert.strip())
# Tests writing permissions for exporting from transaction
test_permission(db, cert_id)
# Rollback and try to retrieve them again
db.rollback()
r_file.seek(0)
for line in r_file:
cert_id = line.split(',')[0]
self.assertRaises(CertNotAvailableError, db.export, cert_id, target_dir)
# Test DELETE method effect
db.delete(committed[0])
self.assertRaises(CertNotAvailableError, db.get, committed[0])
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, db.export, fake_cert_id, target_dir)
def test_exists(self):
"""
Test implementation of CertDB method EXISTS
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
fake_cert = 'fakecertid'
# Insert and commit some certificates and check if exists
committed = commit_test_certs(db, TEST_CERTS_1)
for cert in committed:
assert db.exists(cert)
assert db.exists_all(committed)
# Only insert other certificates and check if exists
inserted = insert_test_certs(db, TEST_CERTS_2)
for cert in inserted:
assert db.exists(cert)
assert db.exists_all(inserted)
# Test DELETE method effect
db.delete(committed[0])
assert not db.exists(committed[0])
# Test fake certificate that doesn't exist
committed.append(fake_cert)
assert not db.exists(fake_cert)
assert not db.exists_all(committed)
def test_insert(self):
"""
Test implementation of CertDB method INSERT
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
# Insert some invalid certificates
self.assertRaises(CertInvalidError, db.insert, None, None)
self.assertRaises(CertInvalidError, db.insert, '', '')
self.assertRaises(CertInvalidError, db.insert, '', 'valid')
self.assertRaises(CertInvalidError, db.insert, 'valid', None)
# Insert some valid certificates
inserted = insert_test_certs(db, TEST_CERTS_1)
blocks = {**db._to_insert}
# transaction should contain certificates from open transcation and certs should exist
self.assertTrue(db._to_insert)
for cert in inserted:
block_path = db._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
# Insert different certificates under the same IDs
certs = {}
with open(TEST_CERTS_1) as r_file:
for line in r_file:
els = [e.strip() for e in line.split(',')]
db.insert(els[0], els[1] + '_open')
certs[els[0]] = els[1]
# IDs should be same and certificates should not be changed
self.assertTrue(blocks == db._to_insert)
for k, v in certs.items():
self.assertTrue(db.get(k) == v)
# Commit transaction and commit different certificates under the same IDs
db.commit()
self.assertFalse(db._to_insert)
certs = {}
with open(TEST_CERTS_1) as r_file:
for line in r_file:
els = [el.strip() for el in line.split(',')]
db.insert(els[0], els[1] + '_commit')
certs[els[0]] = els[1]
# IDs should be same and persisted certificates should not be changed
self.assertTrue(blocks == db._to_insert)
db.commit()
self.assertFalse(db._to_insert)
for k, v in certs.items():
self.assertTrue(db.get(k) == v)
def test_delete(self):
"""
Test implementation of CertDB method DELETE
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
# Delete some invalid certificates
self.assertRaises(CertInvalidError, db.delete, None)
self.assertRaises(CertInvalidError, db.delete, '')
# Insert and delete the same certs before commit
inserted = insert_test_certs(db, TEST_CERTS_1)
deleted = delete_test_certs(db, TEST_CERTS_1)
# transaction should be clear and files should not exist
self.assertFalse(db._to_delete)
self.assertFalse(db._to_insert)
for cert in inserted:
block_path = db._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
# Delete and insert the same certs before commit
deleted = delete_test_certs(db, TEST_CERTS_1)
inserted = insert_test_certs(db, TEST_CERTS_1)
# transaction should contain deleted and inserted certificates
self.assertTrue(db._to_delete)
self.assertTrue(db._to_insert)
for certs in db._to_delete.values():
assert certs.issubset(set(deleted))
for certs in db._to_insert.values():
assert certs.issubset(set(inserted))
# and files should exist
for cert in inserted:
block_path = db._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
# now commit and check that files were persisted
ins, dlt = db.commit()
# the certs should be only inserted
self.assertEqual(ins, len(inserted))
self.assertEqual(dlt, 0)
self.assertFalse(db._to_delete)
self.assertFalse(db._to_insert)
# Delete inserted certs, commit and check that they were deleted
assert db.exists_all(inserted)
del_cert = inserted.pop()
db.delete(del_cert)
assert not db.exists(del_cert)
db.commit()
assert not db.exists(del_cert)
for cert in inserted:
db.delete(cert)
ins, dlt = db.commit()
self.assertEqual(ins, 0)
self.assertEqual(dlt, len(inserted))
# storage should be empty
self.assertFalse(os.listdir(db.storage).remove(db.CONF_FILENAME))
# Delete the same cert multiple times should not have effect
self.assertFalse(db._to_delete)
db.delete('validcert')
blocks_to_delete = {**db._to_delete}
self.assertTrue(db._to_delete)
db.delete('validcert')
self.assertTrue(db._to_delete)
self.assertEqual(blocks_to_delete, db._to_delete)
def test_rollback(self):
"""
Test implementation of CertDB method ROLLBACK
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
# Test rollback without inserts
db.rollback()
self.assertFalse(db._to_insert)
self.assertFalse(db._to_delete)
# Insert some certificates, rollback and check that blocks are deleted
inserted = insert_test_certs(db, TEST_CERTS_1)
db.rollback()
for cert in inserted:
block_path = db._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
# Transaction should be empty
self.assertFalse(db._to_insert)
# Commit some certs, insert other certs and rollback
committed = commit_test_certs(db, TEST_CERTS_1)
inserted = insert_test_certs(db, TEST_CERTS_2)
db.rollback()
# Transaction should be empty
self.assertFalse(db._to_insert)
# Commited certs should be compressed in zip files
for cert in committed:
assert not os.path.exists(db._get_block_path(cert) + cert)
assert os.path.exists(db._get_block_archive(cert))
# Rollbacked certs files should not exists
for cert in inserted:
block_path = db._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
# Check rollback of delete method
deleted = delete_test_certs(db, TEST_CERTS_1)
self.assertTrue(db._to_delete)
for cert in deleted:
assert not db.exists(cert)
db.rollback()
self.assertFalse(db._to_delete)
# All deleted certs should still exist
assert db.exists_all(deleted)
def test_commit(self):
"""
Test implementation of CertDB method COMMIT
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE)
# Test commit without inserts
ins, dlt = db.commit()
self.assertEqual(ins, 0)
self.assertEqual(dlt, 0)
self.assertFalse(db._to_insert)
# Insert some certificates and check commit
inserted = insert_test_certs(db, TEST_CERTS_1)
# Certificates and blocks from open transaction should exist
self.assertTrue(db._to_insert)
for certs in db._to_insert.values():
assert certs.issubset(set(inserted))
for cert in inserted:
block_path = db._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
# check correct number of committed certs
ins, dlt = db.commit()
self.assertEqual(ins, len(inserted))
self.assertEqual(dlt, 0)
# transaction should be empty and certs should be compressed in zip files
self.assertFalse(db._to_insert)
for cert in inserted:
assert not os.path.exists(db._get_block_path(cert) + cert)
assert os.path.exists(db._get_block_archive(cert))
# Insert already persisted certs and some others and commit
inserted_again = insert_test_certs(db, TEST_CERTS_1)
inserted_new = insert_test_certs(db, TEST_CERTS_2)
ins, dlt = db.commit()
# only the other certs should be committed
self.assertEqual(ins, len(inserted_new))
self.assertEqual(dlt, 0)
# and the same ones should be deleted from transaction
for cert in inserted_again:
block_path = db._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
# Delete and insert the same not yet persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid']
db.delete(valid_cert[0])
db.insert(*valid_cert)
db.commit()
# check that cert is persisted
assert db.exists(valid_cert[0])
assert os.path.exists(db._get_block_archive(valid_cert[0]))
assert not os.path.exists(db._get_block_path(valid_cert[0]) + valid_cert[0])
# Delete and insert the same already persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid_new']
db.delete(valid_cert[0])
db.insert(*valid_cert)
db.commit()
# check that the cert was replaced
assert db.exists(valid_cert[0])
self.assertEqual(db.get(valid_cert[0]), valid_cert[1])
def test_parallel_transactions(self):
"""
Test of using multiple instances of CertDB with the same storage.
"""
def test_config_info_maintain(self):
"""
Test maintaining commit HISTORY and INFO upon commit
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=True)
db = CertFileDB(self.TEST_STORAGE)
meta_path = os.path.join(db.storage, db.META_FILENAME)
# Insert some certificates and check INFO after commit
committed = commit_test_certs(db, TEST_CERTS_1)
meta = toml.load(meta_path, OrderedDict)
last_commit_nr = str(len(meta['HISTORY']))
self.assertEqual(last_commit_nr, '1')
self.assertEqual(meta['INFO']['number_of_certificates'], len(committed))
self.assertEqual(meta['INFO']['last_commit'], meta['HISTORY'][last_commit_nr]['date'])
self.assertEqual(meta['HISTORY'][last_commit_nr]['inserted'], len(committed))
self.assertEqual(meta['HISTORY'][last_commit_nr]['deleted'], 0)
# Delete all the inserted certs and check INFO after commit
deleted = delete_test_certs(db, TEST_CERTS_1)
db.commit()
meta = toml.load(meta_path, OrderedDict)
last_commit_nr = str(len(meta['HISTORY']))
self.assertEqual(last_commit_nr, '2')
self.assertEqual(meta['INFO']['number_of_certificates'], 0)
self.assertEqual(meta['INFO']['last_commit'], meta['HISTORY'][last_commit_nr]['date'])
self.assertEqual(meta['HISTORY'][last_commit_nr]['inserted'], 0)
self.assertEqual(meta['HISTORY'][last_commit_nr]['deleted'], len(deleted))
# Insert and delete some certs and check INFO after commit
committed = commit_test_certs(db, TEST_CERTS_1)
inserted = insert_test_certs(db, TEST_CERTS_2)
deleted = delete_test_certs(db, TEST_CERTS_1)
db.commit()
meta = toml.load(meta_path, OrderedDict)
last_commit_nr = str(len(meta['HISTORY']))
self.assertEqual(last_commit_nr, '4')
self.assertEqual(meta['INFO']['number_of_certificates'], len(inserted))
self.assertEqual(meta['INFO']['last_commit'], meta['HISTORY'][last_commit_nr]['date'])
self.assertEqual(meta['HISTORY'][last_commit_nr]['inserted'], len(inserted))
self.assertEqual(meta['HISTORY'][last_commit_nr]['deleted'], len(deleted))
def test_zero_structure_level(self):
"""
Test CertFileDB with 0 structure_level
"""
CertFileDB.setup(self.TEST_STORAGE, structure_level=0)
db = CertFileDB(self.TEST_STORAGE)
storage_dir = os.path.join(self.TEST_STORAGE, os.path.basename(self.TEST_STORAGE))
# Commit some certificates and check zipfile
committed = commit_test_certs(db, TEST_CERTS_1)
assert db.exists_all(committed)
assert os.path.exists(storage_dir + '.zip')
# Insert some certificates and check files existance in root folder
inserted = insert_test_certs(db, TEST_CERTS_2)
for cert in inserted:
assert os.path.exists(os.path.join(self.TEST_STORAGE, cert))
assert db.exists(cert)
assert db.exists_all(inserted)
# Rollback check file cleanup
db.rollback()
for cert in inserted:
assert not os.path.exists(os.path.join(storage_dir, cert))
assert not db.exists(cert)
# Delete inserted certificates and check file cleanup
inserted = insert_test_certs(db, TEST_CERTS_2)
delete_test_certs(db, TEST_CERTS_2)
for cert in inserted:
assert not os.path.exists(os.path.join(storage_dir, cert))
assert not db.exists(cert)
self.assertFalse(db._to_insert)
self.assertFalse(db._to_delete)
# Retrieve and check persisted certs
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
self.assertEqual(db.get(cert_id), cert.strip())
# Delete all remaining certificates and check zip cleanup
deleted = delete_test_certs(db, TEST_CERTS_1)
db.commit()
for cert in deleted:
assert not os.path.exists(os.path.join(storage_dir, cert))
assert not db.exists(cert)
assert not os.path.exists(storage_dir + '.zip')
def test_async_commit(self):
"""
Test implementation multiprocessing version of CertDB method COMMIT
"""
CertFileDB.setup(self.TEST_STORAGE, maintain_info=False)
db = CertFileDB(self.TEST_STORAGE, 100)
# Test commit without inserts
ins, dlt = db.commit()
self.assertEqual(ins, 0)
self.assertEqual(dlt, 0)
self.assertFalse(db._to_insert)
# Insert some certificates and check commit
inserted = insert_test_certs(db, TEST_CERTS_1)
# Certificates and blocks from open transaction should exist
for cert in inserted:
block_path = db._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
# check correct number of committed certs
ins, dlt = db.commit()
self.assertEqual(ins, len(inserted))
self.assertEqual(dlt, 0)
# transaction should be empty and certs should be compressed in zip files
self.assertFalse(db._to_insert)
for cert in inserted:
assert not os.path.exists(db._get_block_path(cert) + cert)
assert os.path.exists(db._get_block_archive(cert))
# Insert already persisted certs and some others and commit
inserted_again = insert_test_certs(db, TEST_CERTS_1)
inserted_new = insert_test_certs(db, TEST_CERTS_2)
ins, dlt = db.commit()
# only the other certs should be committed
self.assertEqual(ins, len(inserted_new))
self.assertEqual(dlt, 0)
# and the same ones should be deleted from transaction
for cert in inserted_again:
block_path = db._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
# Delete and insert the same not yet persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid']
db.delete(valid_cert[0])
db.insert(*valid_cert)
db.commit()
# check that cert is persisted
assert db.exists(valid_cert[0])
assert os.path.exists(db._get_block_archive(valid_cert[0]))
assert not os.path.exists(db._get_block_path(valid_cert[0]) + valid_cert[0])
# Delete and insert the same already persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid_new']
db.delete(valid_cert[0])
db.insert(*valid_cert)
db.commit()
# check that the cert was replaced
assert db.exists(valid_cert[0])
self.assertEqual(db.get(valid_cert[0]), valid_cert[1])
class TestCompositeCertDB(unittest.TestCase):
"""Unit test class of CompositeCertDB class"""
TEST_STORAGE_1 = 'tests/test_storage1'
TEST_STORAGE_2 = 'tests/test_storage2'
TEST_STORAGE_3 = 'tests/test_storage3'
def tearDown(self):
# Clear test storage
shutil.rmtree(self.TEST_STORAGE_1, ignore_errors=True)
shutil.rmtree(self.TEST_STORAGE_2, ignore_errors=True)
shutil.rmtree(self.TEST_STORAGE_3, ignore_errors=True)
def setUp(self):
CertFileDB.setup(self.TEST_STORAGE_1)
CertFileDB.setup(self.TEST_STORAGE_2)
CertFileDB.setup(self.TEST_STORAGE_3)
def test_component_management(self):
"""
Test implementation of CompositeCertDB management methods and design
"""
valid_cert = 'validcertid'
real_db = CertFileDBReadOnly(self.TEST_STORAGE_1)
composite_db_read_only = CompositeCertDBReadOnly()
composite_db = CompositeCertDB()
# Mock method EXISTS
real_db.exists = unittest.mock.MagicMock()
real_db.exists.return_value = False
# Check register/unregister method
composite_db_read_only.register(real_db)
assert not composite_db_read_only.exists(valid_cert)
assert composite_db_read_only.is_registered(real_db)
# component's EXISTS method should be executed
real_db.exists.assert_called_once_with(valid_cert)
composite_db_read_only.unregister(real_db)
# component's EXISTS method should NOT be executed
assert not composite_db_read_only.exists(valid_cert)
self.assertEqual(real_db.exists.call_count, 1)
assert not composite_db_read_only.is_registered(real_db)
# Check registering the same object twice
composite_db_read_only.register(real_db)
composite_db_read_only.register(real_db)
assert not composite_db_read_only.exists(valid_cert)
self.assertEqual(real_db.exists.call_count, 2)
assert composite_db_read_only.is_registered(real_db)
# Check unregistering unknown object
composite_db.unregister(real_db)
assert not composite_db.is_registered(real_db)
assert not composite_db.exists(valid_cert)
# Check registering composite DB into another composite DB
self.assertEqual(real_db.exists.call_count, 2)
composite_db.register(real_db)
composite_db.register(composite_db_read_only)
assert not composite_db.exists(valid_cert)
self.assertEqual(real_db.exists.call_count, 4)
assert composite_db.is_registered(real_db)
assert composite_db.is_registered(composite_db_read_only)
assert composite_db_read_only.is_registered(real_db)
def test_combine_read_only(self):
"""
Test implementation of CompositeCertDB management with mixed component types
"""
valid_cert = ('validcertid', 'adadadadadadadadada')
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db_read_only = CertFileDBReadOnly(self.TEST_STORAGE_2)
composite_db = CompositeCertDB()
# Mock method EXISTS and INSERT
real_db.insert = unittest.mock.MagicMock()
real_db_read_only.insert = unittest.mock.MagicMock()
real_db.exists = unittest.mock.MagicMock()
real_db.exists.return_value = False
real_db_read_only.exists = unittest.mock.MagicMock()
real_db_read_only.exists.return_value = False
# Register both DBs to composite DB and call EXISTS
composite_db.register(real_db)
composite_db.register(real_db_read_only)
assert not composite_db.exists(valid_cert[0])
# both component's EXISTS method should be executed
real_db.exists.assert_called_once_with(valid_cert[0])
real_db_read_only.exists.assert_called_once_with(valid_cert[0])
# Call INSERT and check that only CertFileDB was executed
composite_db.insert(*valid_cert)
real_db.insert.assert_called_once_with(*valid_cert)
assert not real_db_read_only.insert.called
def test_get(self):
"""
Test implementation of CompositeCertDB method GET
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
real_db_read_only = CertFileDBReadOnly(self.TEST_STORAGE_1)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
composite_db.register(real_db_read_only)
fake_cert_id = 'fakecertid'
# Insert and commit some certificates and retrieve them back
committed = commit_test_certs(composite_db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
self.assertEqual(composite_db.get(cert_id), cert.strip())
# ReadOnly DB should also have it
self.assertEqual(real_db_read_only.get(cert_id), cert.strip())
# Only insert other certificates and retrieve them back
inserted = insert_test_certs(composite_db, TEST_CERTS_2)
with open(TEST_CERTS_2) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
self.assertEqual(composite_db.get(cert_id), cert.strip())
# ReadOnly DB should not have it
self.assertRaises(CertNotAvailableError, real_db_read_only.get, cert_id)
# Rollback and try to retrieve them again
composite_db.rollback()
for cert_id in inserted:
self.assertRaises(CertNotAvailableError, composite_db.get, cert_id)
# Test DELETE method effect
real_db.delete(committed[0])
# compositeDB should still have it in real_db2
assert composite_db.get(committed[0])
composite_db.delete(committed[0])
# compositeDB should still have it in real_db_read_only before commit
assert composite_db.get(committed[0])
composite_db.commit()
# compositeDB should no longer have the cert
self.assertRaises(CertNotAvailableError, composite_db.get, committed[0])
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, composite_db.get, fake_cert_id)
def test_export(self):
"""
Test implementation of CompositeCertDB method EXPORT
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
real_db_read_only = CertFileDBReadOnly(self.TEST_STORAGE_1)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
composite_db.register(real_db_read_only)
fake_cert_id = 'fakecertid'
target_dir = self.TEST_STORAGE_1 + '/export'
os.mkdir(target_dir)
# Insert and commit some certificates and export them
committed = commit_test_certs(composite_db, TEST_CERTS_1)
with open(TEST_CERTS_1) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
expected = os.path.join(target_dir, make_PEM_filename(cert_id))
self.assertEqual(composite_db.export(cert_id, target_dir), expected)
with open(expected) as target:
self.assertEqual(target.read(), cert.strip())
# Check export without unnecessary copying - should copy anyway because persisted
self.assertEqual(composite_db.export(cert_id, target_dir, copy_if_exists=False), expected)
# ReadOnly DB should also have it
self.assertEqual(real_db_read_only.export(cert_id, target_dir), expected)
# Only insert other certificates and retrieve them back
insert_test_certs(composite_db, TEST_CERTS_2)
with open(TEST_CERTS_2) as r_file:
for line in r_file:
cert_id, cert = line.split(',')
expected = os.path.join(target_dir, make_PEM_filename(cert_id))
self.assertEqual(composite_db.export(cert_id, target_dir), expected)
with open(expected) as target:
self.assertEqual(target.read(), cert.strip())
# Check export without unnecessary copying
file = composite_db.export(cert_id, target_dir, copy_if_exists=False)
self.assertNotEqual(file, expected)
with open(file) as target:
self.assertEqual(target.read(), cert.strip())
# ReadOnly DB should not have it
self.assertRaises(CertNotAvailableError, real_db_read_only.export, cert_id, target_dir)
# Rollback and try to retrieve them again
composite_db.rollback()
r_file.seek(0)
for line in r_file:
cert_id = line.split(',')[0]
self.assertRaises(CertNotAvailableError, composite_db.export, cert_id, target_dir)
# Test DELETE method effect
real_db.delete(committed[0])
# compositeDB should still have it in real_db2
assert composite_db.export(committed[0], target_dir)
composite_db.delete(committed[0])
# compositeDB should still have it in real_db_read_only before commit
assert composite_db.export(committed[0], target_dir)
composite_db.commit()
# compositeDB should no longer have the cert
self.assertRaises(CertNotAvailableError, composite_db.export, committed[0], target_dir)
# Test fake certificate that doesn't exist
self.assertRaises(CertNotAvailableError, composite_db.export, fake_cert_id, target_dir)
def test_exists(self):
"""
Test implementation of CompositeCertDB method EXISTS
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
real_db_read_only = CertFileDBReadOnly(self.TEST_STORAGE_1)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
composite_db.register(real_db_read_only)
fake_cert = 'fakecertid'
# Insert and commit some certificates and check if exists
committed = commit_test_certs(composite_db, TEST_CERTS_1)
for cert in committed:
assert composite_db.exists(cert)
# ReadOnly DB should also have it
assert real_db_read_only.exists(cert)
assert composite_db.exists_all(committed)
# Only insert other certificates and check if exists
inserted = insert_test_certs(composite_db, TEST_CERTS_2)
for cert in inserted:
assert composite_db.exists(cert)
# ReadOnly DB should NOT have it
assert not real_db_read_only.exists(cert)
assert composite_db.exists_all(inserted)
# Test DELETE method effect
real_db.delete(committed[0])
# compositeDB should still have it in real_db2
assert composite_db.exists(committed[0])
composite_db.delete(committed[0])
# compositeDB should still have it in real_db_read_only before commit
assert composite_db.exists(committed[0])
composite_db.commit()
# compositeDB should no longer have the cert but cache in real_db_read_only have
assert not real_db.exists(committed[0])
assert not real_db2.exists(committed[0])
# get method upon failure should clear the cache if seems invalidated
self.assertRaises(CertNotAvailableError, real_db_read_only.get, committed[0])
assert not real_db_read_only.exists(committed[0])
# Have 1 cert in one DB and other cert in other DB and check EXISTS method
real_db.delete(committed[2])
assert not real_db.exists(committed[2])
real_db2.delete(committed[3])
assert not real_db2.exists(committed[3])
# composite_db should return True
assert composite_db.exists(committed[2])
assert composite_db.exists(committed[3])
assert composite_db.exists_all([committed[2], committed[3]])
# Test fake certificate that doesn't exist
committed.append(fake_cert)
assert not composite_db.exists(fake_cert)
assert not composite_db.exists_all(committed)
def test_insert(self):
"""
Test implementation of CompositeCertDB method INSERT
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
# Insert some invalid certificates
self.assertRaises(CertInvalidError, composite_db.insert, None, None)
self.assertRaises(CertInvalidError, composite_db.insert, '', '')
self.assertRaises(CertInvalidError, composite_db.insert, '', 'valid')
self.assertRaises(CertInvalidError, composite_db.insert, 'valid', None)
# Insert some valid certificates
inserted = insert_test_certs(composite_db, TEST_CERTS_1)
blocks = {**real_db._to_insert}
blocks2 = {**real_db2._to_insert}
# transaction should contain certificates from open transcation and certs should exist
self.assertTrue(real_db._to_insert)
self.assertTrue(real_db2._to_insert)
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
assert os.path.exists(os.path.join(block_path2, cert))
# Insert different certificates under the same IDs
certs = {}
with open(TEST_CERTS_1) as r_file:
for line in r_file:
els = [e.strip() for e in line.split(',')]
composite_db.insert(els[0], els[1] + '_open')
certs[els[0]] = els[1]
# IDs should be same and certificates should not be changed
self.assertTrue(blocks == real_db._to_insert)
self.assertTrue(blocks2 == real_db2._to_insert)
for k, v in certs.items():
self.assertTrue(real_db.get(k) == v)
self.assertTrue(real_db2.get(k) == v)
# Commit transaction and commit different certificates under the same IDs
composite_db.commit()
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
certs = {}
with open(TEST_CERTS_1) as r_file:
for line in r_file:
els = [el.strip() for el in line.split(',')]
composite_db.insert(els[0], els[1] + '_commit')
certs[els[0]] = els[1]
# IDs should be same and persisted certificates should not be changed
self.assertTrue(blocks == real_db._to_insert)
self.assertTrue(blocks2 == real_db2._to_insert)
composite_db.commit()
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
for k, v in certs.items():
self.assertTrue(real_db.get(k) == v)
self.assertTrue(real_db2.get(k) == v)
def test_delete(self):
"""
Test implementation of CompositeCertDB method DELETE
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
# Delete some invalid certificates
self.assertRaises(CertInvalidError, composite_db.delete, None)
self.assertRaises(CertInvalidError, composite_db.delete, '')
# Insert and delete the same certs before commit
inserted = insert_test_certs(composite_db, TEST_CERTS_1)
deleted = delete_test_certs(composite_db, TEST_CERTS_1)
# transaction should be clear and files should not exist
self.assertFalse(real_db._to_delete)
self.assertFalse(real_db2._to_delete)
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
# Delete and insert the same certs before commit
deleted = delete_test_certs(composite_db, TEST_CERTS_1)
inserted = insert_test_certs(composite_db, TEST_CERTS_1)
# transaction should contain deleted and inserted certificates
self.assertTrue(real_db._to_delete)
self.assertTrue(real_db2._to_delete)
self.assertTrue(real_db._to_insert)
self.assertTrue(real_db2._to_insert)
for certs in real_db._to_delete.values():
assert certs.issubset(set(deleted))
for certs in real_db2._to_delete.values():
assert certs.issubset(set(deleted))
for certs in real_db._to_insert.values():
assert certs.issubset(set(inserted))
for certs in real_db2._to_insert.values():
assert certs.issubset(set(inserted))
# and files should exist
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
assert os.path.exists(os.path.join(block_path, cert))
assert os.path.exists(os.path.join(block_path2, cert))
# now commit and check that files were persisted
ins, dlt = composite_db.commit()
# the certs should be only inserted
self.assertEqual(ins, len(inserted))
self.assertEqual(dlt, 0)
self.assertFalse(real_db._to_delete)
self.assertFalse(real_db2._to_delete)
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
# Delete inserted certs, commit and check that they were deleted
assert composite_db.exists_all(inserted)
del_cert = inserted.pop()
composite_db.delete(del_cert)
assert not real_db.exists(del_cert)
assert not real_db2.exists(del_cert)
composite_db.commit()
assert not real_db.exists(del_cert)
assert not real_db2.exists(del_cert)
for cert in inserted:
composite_db.delete(cert)
ins, dlt = composite_db.commit()
self.assertEqual(ins, 0)
self.assertEqual(dlt, len(inserted))
# storage should be empty
self.assertFalse(os.listdir(real_db.storage).remove(real_db.CONF_FILENAME))
self.assertFalse(os.listdir(real_db2.storage).remove(real_db2.CONF_FILENAME))
# Delete the same cert multiple times should not have effect
self.assertFalse(real_db._to_delete)
self.assertFalse(real_db2._to_delete)
composite_db.delete('validcert')
blocks_to_delete = {**real_db._to_delete}
blocks_to_delete2 = {**real_db2._to_delete}
self.assertTrue(real_db._to_delete)
self.assertTrue(real_db2._to_delete)
composite_db.delete('validcert')
self.assertTrue(real_db._to_delete)
self.assertTrue(real_db2._to_delete)
self.assertEqual(blocks_to_delete, real_db._to_delete)
self.assertEqual(blocks_to_delete2, real_db2._to_delete)
def test_commit(self):
"""
Test implementation of CompositeCertDB method COMMIT
"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
# Test commit without inserts
ins, dlt = composite_db.commit()
self.assertEqual(ins, 0)
self.assertEqual(dlt, 0)
# Insert some certificates and check correct number of committed certs
inserted = insert_test_certs(composite_db, TEST_CERTS_1)
ins, dlt = composite_db.commit()
self.assertEqual(ins, len(inserted))
self.assertEqual(dlt, 0)
# transaction should be empty and certs should be compressed in zip files
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
archive_path = real_db._get_block_archive(cert)
archive_path2 = real_db2._get_block_archive(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
assert os.path.exists(archive_path)
assert os.path.exists(archive_path2)
# Insert already persisted certs and some others and commit
inserted_again = insert_test_certs(composite_db, TEST_CERTS_1)
inserted_new = insert_test_certs(composite_db, TEST_CERTS_2)
ins, dlt = composite_db.commit()
# only the other certs should be committed
self.assertEqual(ins, len(inserted_new))
self.assertEqual(dlt, 0)
# and the same ones should NOT
for cert in inserted_again:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
# Delete and insert the same not yet persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid']
composite_db.delete(valid_cert[0])
composite_db.insert(*valid_cert)
composite_db.commit()
# check that cert is persisted
block_path = real_db._get_block_path(valid_cert[0])
block_path2 = real_db2._get_block_path(valid_cert[0])
archive_path = real_db._get_block_archive(valid_cert[0])
archive_path2 = real_db2._get_block_archive(valid_cert[0])
assert composite_db.exists(valid_cert[0])
assert not os.path.exists(os.path.join(block_path, valid_cert[0]))
assert not os.path.exists(os.path.join(block_path2, valid_cert[0]))
assert os.path.exists(archive_path)
assert os.path.exists(archive_path2)
# Delete and insert the same already persisted cert and commit
valid_cert = ['valid_cert', 'validvalidvalidvalidvalid_new']
composite_db.delete(valid_cert[0])
composite_db.insert(*valid_cert)
composite_db.commit()
# check that the cert was replaced
assert composite_db.exists(valid_cert[0])
self.assertEqual(real_db.get(valid_cert[0]), valid_cert[1])
self.assertEqual(real_db2.get(valid_cert[0]), valid_cert[1])
def test_rollback(self):
"""Test implementation of CompositeCertDB method ROLLBACK"""
real_db = CertFileDB(self.TEST_STORAGE_1)
real_db2 = CertFileDB(self.TEST_STORAGE_2)
composite_db = CompositeCertDB()
composite_db.register(real_db)
composite_db.register(real_db2)
# Test rollback without inserts
composite_db.rollback()
# Insert some certificates, rollback and check that blocks are deleted
inserted = insert_test_certs(composite_db, TEST_CERTS_1)
composite_db.rollback()
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
# Commit some certs, insert other certs and rollback
committed = commit_test_certs(composite_db, TEST_CERTS_1)
inserted = insert_test_certs(composite_db, TEST_CERTS_2)
composite_db.rollback()
# Transaction should be empty
self.assertFalse(real_db._to_insert)
self.assertFalse(real_db2._to_insert)
# Commited certs should be compressed in zip files
for cert in committed:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
archive_path = real_db._get_block_archive(cert)
archive_path2 = real_db2._get_block_archive(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
assert os.path.exists(archive_path)
assert os.path.exists(archive_path2)
# Rollbacked certs files should not exists
for cert in inserted:
block_path = real_db._get_block_path(cert)
block_path2 = real_db2._get_block_path(cert)
archive_path = real_db._get_block_archive(cert)
archive_path2 = real_db2._get_block_archive(cert)
assert not os.path.exists(os.path.join(block_path, cert))
assert not os.path.exists(os.path.join(block_path2, cert))
assert not os.path.exists(archive_path)
assert not os.path.exists(archive_path2)
# Check rollback of delete method
deleted = delete_test_certs(composite_db, TEST_CERTS_1)
self.assertTrue(real_db._to_delete)
self.assertTrue(real_db2._to_delete)
for cert in deleted:
assert not composite_db.exists(cert)
assert not real_db.exists(cert)
assert not real_db2.exists(cert)
composite_db.rollback()
self.assertFalse(real_db._to_delete)
self.assertFalse(real_db2._to_delete)
# All deleted certs should still exist
assert composite_db.exists_all(deleted)
assert real_db.exists_all(deleted)
assert real_db2.exists_all(deleted)
if __name__ == '__main__':
unittest.main()
|
import socket, sys
host = ''
port = 5000
backlog = 5
size = 1024
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, socket.SO_REUSEADDR, True)
s.bind((host,port))
s.listen(backlog)
while 1:
client, address = s.accept()
data = client.recv(size)
client.send(data)
sys.stdout.write(data)
client.close()
|
'''
Define a function reverse() that computes the reversal
of a string.
For example, reverse("I am testing")
should return the string "gnitset ma I".
'''
def reverse(x):
new =[]
for i in range(len(x))[::-1]:
new.append(x[i])
print ''.join(new)
reverse('I am testing') |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
from mock import Mock
import snowflake.connector.telemetry
def test_telemetry_data_to_dict():
"""Tests that TelemetryData instances are properly converted to dicts."""
assert snowflake.connector.telemetry.TelemetryData({}, 2000).to_dict() == {
"message": {},
"timestamp": "2000",
}
d = {"type": "test", "query_id": "1", "value": 20}
assert snowflake.connector.telemetry.TelemetryData(d, 1234).to_dict() == {
"message": d,
"timestamp": "1234",
}
def get_client_and_mock():
rest_call = Mock()
rest_call.return_value = {"success": True}
rest = Mock()
rest.attach_mock(rest_call, "request")
client = snowflake.connector.telemetry.TelemetryClient(rest, 2)
return client, rest_call
def test_telemetry_simple_flush():
"""Tests that metrics are properly enqueued and sent to telemetry."""
client, rest_call = get_client_and_mock()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert rest_call.call_count == 0
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 3000))
assert rest_call.call_count == 1
def test_telemetry_close():
"""Tests that remaining metrics are flushed on close."""
client, rest_call = get_client_and_mock()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert rest_call.call_count == 0
client.close()
assert rest_call.call_count == 1
assert client.is_closed
def test_telemetry_close_empty():
"""Tests that no calls are made on close if there are no metrics to flush."""
client, rest_call = get_client_and_mock()
client.close()
assert rest_call.call_count == 0
assert client.is_closed
def test_telemetry_send_batch():
"""Tests that metrics are sent with the send_batch method."""
client, rest_call = get_client_and_mock()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert rest_call.call_count == 0
client.send_batch()
assert rest_call.call_count == 1
def test_telemetry_send_batch_empty():
"""Tests that send_batch does nothing when there are no metrics to send."""
client, rest_call = get_client_and_mock()
client.send_batch()
assert rest_call.call_count == 0
def test_telemetry_send_batch_clear():
"""Tests that send_batch clears the first batch and will not send anything on a second call."""
client, rest_call = get_client_and_mock()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert rest_call.call_count == 0
client.send_batch()
assert rest_call.call_count == 1
client.send_batch()
assert rest_call.call_count == 1
def test_telemetry_auto_disable():
"""Tests that the client will automatically disable itself if a request fails."""
client, rest_call = get_client_and_mock()
rest_call.return_value = {"success": False}
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert client.is_enabled()
client.send_batch()
assert not client.is_enabled()
def test_telemetry_add_batch_disabled():
"""Tests that the client will not add logs if disabled."""
client, _ = get_client_and_mock()
client.disable()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert client.buffer_size() == 0
def test_telemetry_send_batch_disabled():
"""Tests that the client will not send logs if disabled."""
client, rest_call = get_client_and_mock()
client.add_log_to_batch(snowflake.connector.telemetry.TelemetryData({}, 2000))
assert client.buffer_size() == 1
client.disable()
client.send_batch()
assert client.buffer_size() == 1
assert rest_call.call_count == 0
|
import scipy as sc
import math as m
import scipy.linalg as linalg
import scipy.optimize as optimize
import scipy.stats as stats
from generate_data import read_data
import matplotlib
matplotlib.use('Agg')
from pylab import *
from matplotlib.pyplot import *
from matplotlib import rc
def ex6a(exclude=sc.array([1,2,3,4]),plotfilename='ex6a.png'):
"""ex6a: solve exercise 6 by optimization of the objective function
Input:
exclude - ID numbers to exclude from the analysis
plotfilename - filename for the output plot
Output:
plot
History:
2009-06-01 - Written - Bovy (NYU)
"""
#Read the data
data= read_data('data_yerr.dat')
ndata= len(data)
nsample= ndata- len(exclude)
#First find the chi-squared solution, which we will use as an
#initial gues for the bi-exponential optimization
#Put the dat in the appropriate arrays and matrices
Y= sc.zeros(nsample)
X= sc.zeros(nsample)
A= sc.ones((nsample,2))
C= sc.zeros((nsample,nsample))
yerr= sc.zeros(nsample)
jj= 0
for ii in range(ndata):
if sc.any(exclude == data[ii][0]):
pass
else:
Y[jj]= data[ii][1][1]
X[jj]= data[ii][1][0]
A[jj,1]= data[ii][1][0]
C[jj,jj]= data[ii][2]**2.
yerr[jj]= data[ii][2]
jj= jj+1
#Now compute the best fit and the uncertainties
bestfit= sc.dot(linalg.inv(C),Y.T)
bestfit= sc.dot(A.T,bestfit)
bestfitvar= sc.dot(linalg.inv(C),A)
bestfitvar= sc.dot(A.T,bestfitvar)
bestfitvar= linalg.inv(bestfitvar)
bestfit= sc.dot(bestfitvar,bestfit)
#Now optimize the bi-exponential objective function
bestfitbiexp1= optimize.fmin(logbiexp,bestfit,(X,Y,yerr),disp=False)
#Restart the optimization once using a different method
bestfitbiexp= optimize.fmin_powell(logbiexp,bestfitbiexp1,(X,Y,yerr),disp=False)
if linalg.norm(bestfitbiexp-bestfitbiexp1) > 10**-12:
if linalg.norm(bestfitbiexp-bestfitbiexp1) < 10**-6:
print("Different optimizers give slightly different results...")
else:
print("Different optimizers give rather different results...")
print("The norm of the results differs by %g" % linalg.norm(bestfitbiexp-bestfitbiexp1))
#Calculate X
XX= 0.
for jj in range(nsample):
XX= XX+m.fabs(Y[jj]-bestfitbiexp[1]*X[jj]-bestfitbiexp[0])/yerr[jj]
#Now plot the solution
fig_width=5
fig_height=5
fig_size = [fig_width,fig_height]
params = {'axes.labelsize': 12,
#'text.fontsize': 11,
'legend.fontsize': 12,
'xtick.labelsize':10,
'ytick.labelsize':10,
'text.usetex': True,
'figure.figsize': fig_size}
rcParams.update(params)
#Plot data
errorbar(X,Y,yerr,color='k',marker='o',linestyle='None')
xlabel(r'$x$')
ylabel(r'$y$')
#Plot the best fit line
xlim(0,300)
ylim(0,700)
xmin, xmax= xlim()
nsamples= 1001
xs= sc.linspace(xmin,xmax,nsamples)
ys= sc.zeros(nsamples)
for ii in range(nsamples):
ys[ii]= bestfitbiexp[0]+bestfitbiexp[1]*xs[ii]
if bestfitbiexp[0] < 0:
sgn_str= '-'
else:
sgn_str= '+'
label= r'$y = %4.2f\, x'% (bestfitbiexp[1]) +sgn_str+ '%4.0f ' % m.fabs(bestfitbiexp[0])+r'; X = '+ '%3.1f' % XX+'$'
plot(xs,ys,color='k',ls='--',label=label)
l=legend(loc=(.3,.1),numpoints=8)
l.draw_frame(False)
plot(xs,ys,'k--')
xlim(0,300)
ylim(0,700)
print('Creating: ', plotfilename)
savefig(plotfilename,format='png')
return 0
def logbiexp(mb,X,Y,yerr):
"""logbiexp: evaluates the logarithm of the objective function
Input:
mb=(b,m) - as in y=mx+b
X - independent variable
Y - dependent variable
yerr - error on the Y
History:
2009-06-01 - Written - Bovy (NYU)
"""
out= 0.
for ii in range(len(X)):
out= out+ m.fabs(Y[ii]-mb[1]*X[ii]-mb[0])/yerr[ii]
return out
def ex6b(exclude=sc.array([1,2,3,4]),plotfilename='ex6b.png'):
"""ex6b: solve exercise 6 using a simulated annealing optimization
Input:
exclude - ID numbers to exclude from the analysis
plotfilename - filename for the output plot
Output:
plot
History:
2009-06-02 - Written - Bovy (NYU)
"""
#Read the data
data= read_data('data_yerr.dat')
ndata= len(data)
nsample= ndata- len(exclude)
#First find the chi-squared solution, which we will use as an
#initial gues for the bi-exponential optimization
#Put the dat in the appropriate arrays and matrices
Y= sc.zeros(nsample)
X= sc.zeros(nsample)
A= sc.ones((nsample,2))
C= sc.zeros((nsample,nsample))
yerr= sc.zeros(nsample)
jj= 0
for ii in range(ndata):
if sc.any(exclude == data[ii][0]):
pass
else:
Y[jj]= data[ii][1][1]
X[jj]= data[ii][1][0]
A[jj,1]= data[ii][1][0]
C[jj,jj]= data[ii][2]**2.
yerr[jj]= data[ii][2]
jj= jj+1
#Now compute the best fit and the uncertainties
bestfit= sc.dot(linalg.inv(C),Y.T)
bestfit= sc.dot(A.T,bestfit)
bestfitvar= sc.dot(linalg.inv(C),A)
bestfitvar= sc.dot(A.T,bestfitvar)
bestfitvar= linalg.inv(bestfitvar)
bestfit= sc.dot(bestfitvar,bestfit)
initialguess= sc.array([bestfit[0],bestfit[1]])
#With this initial guess start off the annealing procedure
initialchisq= nsample*10.
chisq= initialchisq
bestfit= initialguess
nonglobal= True
print("Performing 10 runs of the simulating basinhopping optimization algorithm")
for jj in range(10):#Do ten runs of the sa algorithm
sc.random.seed(jj+1) #In the interest of reproducibility (if that's a word)
minimizer_kwargs = {"args": (X,Y,yerr)}
bestfitbiexp= optimize.basinhopping(logbiexp,x0=initialguess,minimizer_kwargs=minimizer_kwargs,niter=100)
# print(bestfitbiexp.keys()) # dict_keys(['lowest_optimization_result',
# # 'message', 'minimization_failures', 'nit', 'x', 'nfev', 'njev', 'fun'])
# print(bestfitbiexp.x, bestfitbiexp.fun)
# print(chisq)
# print(bestfit)
print(bestfitbiexp)
# NOTE: result of anneal (not basinhopping) res[0] is obtained min
# and res[1] is function value at that minimum.
# but result of basinhopping is OpitimizeResult object
# with attributes .x and .fun with others.
#
# res[0] ==> res.x ndarray
# res[1] ==> res.fun function value at ndarray
# res[6] ==> res.status success(bool) status(int)
#
if bestfitbiexp.fun < chisq:
bestfit= bestfitbiexp.x
chisq= bestfitbiexp.fun
bestfitsbiexp= bestfit
#Now plot the solution
fig_width=5
fig_height=5
fig_size = [fig_width,fig_height]
params = {'axes.labelsize': 12,
#'text.fontsize': 11,
'legend.fontsize': 12,
'xtick.labelsize':10,
'ytick.labelsize':10,
'text.usetex': True,
'figure.figsize': fig_size}
rcParams.update(params)
#Plot data
errorbar(X,Y,yerr,color='k',marker='o',linestyle='None')
xlabel(r'$x$')
ylabel(r'$y$')
xlim(0,300)
ylim(0,700)
xmin, xmax= xlim()
nsamples= 1001
xs= sc.linspace(xmin,xmax,nsamples)
ys= sc.zeros(nsamples)
for ii in range(nsamples):
ys[ii]= bestfitsbiexp[0]+bestfitsbiexp[1]*xs[ii]
if bestfitsbiexp[0] < 0:
sgn_str= '-'
else:
sgn_str= '+'
label= r'$y = %4.2f\, x'% (bestfitsbiexp[1]) +sgn_str+ '%4.0f ' % m.fabs(bestfitsbiexp[0])+r'; X = '+ '%3.1f' % chisq+'$'
plot(xs,ys,color='k',ls='--',label=label)
l=legend(loc=(.3,.1),numpoints=8)
l.draw_frame(False)
xlim(0,300)
ylim(0,700)
print('Creating: ', plotfilename)
savefig(plotfilename,format='png')
return 0
def ex6c(exclude=sc.array([1,2,3,4]),plotfilename='ex6c.png',nburn=100,nsamples=10000,parsigma=[5,.075]):
"""ex6c: solve exercise 6 using MCMC sampling
Input:
exclude - ID numbers to exclude from the analysis
plotfilename - filename for the output plot
nburn - number of burn-in samples
nsamples - number of samples to take after burn-in
parsigma - proposal distribution width (Gaussian)
Output:
plot
History:
2009-06-02 - Written - Bovy (NYU)
"""
sc.random.seed(100) #In the interest of reproducibility (if that's a word)
#Read the data
data= read_data('data_yerr.dat')
ndata= len(data)
nsample= ndata- len(exclude)
#First find the chi-squared solution, which we will use as an
#initial gues for the bi-exponential optimization
#Put the data in the appropriate arrays and matrices
Y= sc.zeros(nsample)
X= sc.zeros(nsample)
A= sc.ones((nsample,2))
C= sc.zeros((nsample,nsample))
yerr= sc.zeros(nsample)
jj= 0
for ii in range(ndata):
if sc.any(exclude == data[ii][0]):
pass
else:
Y[jj]= data[ii][1][1]
X[jj]= data[ii][1][0]
A[jj,1]= data[ii][1][0]
C[jj,jj]= data[ii][2]**2.
yerr[jj]= data[ii][2]
jj= jj+1
#Now compute the best fit and the uncertainties
bestfit= sc.dot(linalg.inv(C),Y.T)
bestfit= sc.dot(A.T,bestfit)
bestfitvar= sc.dot(linalg.inv(C),A)
bestfitvar= sc.dot(A.T,bestfitvar)
bestfitvar= linalg.inv(bestfitvar)
bestfit= sc.dot(bestfitvar,bestfit)
initialguess= sc.array([bestfit[0],bestfit[1]])
#With this initial guess start off the sampling procedure
initialX= 0.
for jj in range(nsample):
initialX= initialX+m.fabs(Y[jj]-bestfit[1]*X[jj]-bestfit[0])/yerr[jj]
currentX= initialX
bestX= initialX
bestfit= initialguess
currentguess= initialguess
naccept= 0
for jj in range(nburn+nsamples):
#Draw a sample from the proposal distribution
newsample= sc.zeros(2)
newsample[0]= currentguess[0]+stats.norm.rvs()*parsigma[0]
newsample[1]= currentguess[1]+stats.norm.rvs()*parsigma[1]
#Calculate the objective function for the newsample
newX= logbiexp(newsample,X,Y,yerr)
#Accept or reject
#Reject with the appropriate probability
u= stats.uniform.rvs()
if u < m.exp(currentX-newX):
#Accept
currentX= newX
currentguess= newsample
naccept= naccept+1
if currentX < bestX:
bestfit= currentguess
bestX= currentX
bestfitsbiexp= bestfit
if double(naccept)/(nburn+nsamples) < .5 or double(naccept)/(nburn+nsamples) > .8:
print("Acceptance ratio was "+str(double(naccept)/(nburn+nsamples)))
#Now plot the solution
fig_width=5
fig_height=5
fig_size = [fig_width,fig_height]
params = {'axes.labelsize': 12,
#'text.fontsize': 11,
'legend.fontsize': 12,
'xtick.labelsize':10,
'ytick.labelsize':10,
'text.usetex': True,
'figure.figsize': fig_size}
rcParams.update(params)
#Plot data
errorbar(X,Y,yerr,color='k',marker='o',linestyle='None')
xlabel(r'$x$')
ylabel(r'$y$')
xlim(0,300)
ylim(0,700)
xmin, xmax= xlim()
nsamples= 1001
xs= sc.linspace(xmin,xmax,nsamples)
ys= sc.zeros(nsamples)
for ii in range(nsamples):
ys[ii]= bestfitsbiexp[0]+bestfitsbiexp[1]*xs[ii]
if bestfitsbiexp[0] < 0:
sgn_str= '-'
else:
sgn_str= '+'
label= r'$y = %4.2f\, x'% (bestfitsbiexp[1]) +sgn_str+ '%4.0f ' % m.fabs(bestfitsbiexp[0])+r'; X = '+ '%3.1f' % bestX+'$'
plot(xs,ys,color='k',ls='--',label=label)
l=legend(loc=(.3,.1),numpoints=8)
l.draw_frame(False)
xlim(0,300)
ylim(0,700)
print('Creating: ', plotfilename)
savefig(plotfilename,format='png')
return 0
if __name__ == '__main__':
# run the program
# ex6a()
# ex6b()
ex6c() |
# coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OOD utilities for CIFAR-10 and CIFAR-100."""
import tensorflow as tf
import uncertainty_baselines as ub
def DempsterShaferUncertainty(logits):
"""Defines the Dempster-Shafer Uncertainty for output logits.
Under the Dempster-Shafer (DS) formulation of a multi-class model, the
predictive uncertainty can be assessed as K/(K + sum(exp(logits))).
This uncertainty metric directly measure the magnitude of the model logits,
and is more properiate for a model that directly trains the magnitude of
logits and uses this magnitude to quantify uncertainty (e.g., [1]).
See Equation (1) of [1] for full detail.
Args:
logits: (tf.Tensor) logits of model prediction, shape (batch_size,
num_classes).
Returns:
(tf.Tensor) DS uncertainty estimate, shape (batch_size, )
"""
num_classes = tf.shape(logits)[-1]
num_classes = tf.cast(num_classes, dtype=logits.dtype)
belief_mass = tf.reduce_sum(tf.exp(logits), axis=-1)
return num_classes / (belief_mass + num_classes)
def create_ood_metrics(ood_dataset_names, tpr_list=(0.95,)):
"""Create OOD metrics."""
ood_metrics = {}
for dataset_name in ood_dataset_names:
ood_dataset_name = f'ood/{dataset_name}'
ood_metrics.update({
f'{ood_dataset_name}_auroc':
tf.keras.metrics.AUC(curve='ROC', num_thresholds=100000),
f'{ood_dataset_name}_auprc':
tf.keras.metrics.AUC(curve='PR', num_thresholds=100000),
})
if tpr_list:
for tpr in tpr_list:
tpr = float(tpr)
tpr_percent = int(tpr * 100)
ood_metrics.update({
f'{ood_dataset_name}_(1-fpr)@{tpr_percent}tpr':
tf.keras.metrics.SpecificityAtSensitivity(
tpr, num_thresholds=100000)
})
return ood_metrics
def load_ood_datasets(ood_dataset_names,
in_dataset_builder,
in_dataset_validation_percent,
batch_size,
drop_remainder=False):
"""Load OOD datasets."""
steps = {}
datasets = {}
for ood_dataset_name in ood_dataset_names:
ood_dataset_class = ub.datasets.DATASETS[ood_dataset_name]
ood_dataset_class = ub.datasets.make_ood_dataset(ood_dataset_class)
# If the OOD datasets are not CIFAR10/CIFAR100, we normalize by CIFAR
# statistics, since all test datasets should be preprocessed the same.
if 'cifar' not in ood_dataset_name:
ood_dataset_builder = ood_dataset_class(
in_dataset_builder,
split='test',
validation_percent=in_dataset_validation_percent,
normalize_by_cifar=True,
drop_remainder=drop_remainder)
else:
ood_dataset_builder = ood_dataset_class(
in_dataset_builder,
split='test',
validation_percent=in_dataset_validation_percent,
drop_remainder=drop_remainder)
ood_dataset = ood_dataset_builder.load(batch_size=batch_size)
steps[f'ood/{ood_dataset_name}'] = ood_dataset_builder.num_examples(
'in_distribution') // batch_size + ood_dataset_builder.num_examples(
'ood') // batch_size
datasets[f'ood/{ood_dataset_name}'] = ood_dataset
return datasets, steps
|
"""
instance equality
"""
class C(object):
def __init__(self, x):
self.x = x
c1 = C(1)
c2 = C(1)
c3 = C(2)
print c1 == c2
print c2 == c3
print
class D(object):
def __init__(self, x):
self.x = x
def __eq__(self, rhs):
return self.x == rhs.x
d1 = D(1)
d2 = D(1)
d3 = D(2)
print d1 == d2
print d2 == d3
print
class E(object):
def __init__(self, eq):
self.__eq__ = eq
e1 = E(lambda rhs: True)
e2 = E(lambda rhs: False)
print e1 == e2
print e2 == e1
|
from resources.create_evaluation import CreateEvaluation
from resources.result import Result
from resources.show_evaluation import ShowEvaluationID
from resources.delete_evaluation import DeleteEvaluation
from resources.edit_evaluation import EditEvaluation
from resources.psychologist_evaluation import PsychologistEvaluationPatient
from resources.edit_result import EditResult
from resources.result_test import ResultTest
from resources.wisc_list import WiscList
from resources.delete_result import DeleteResult |
from django.contrib.auth import get_user_model
from rest_framework import serializers
from posts.models import Comment, Follow, Group, Post
User = get_user_model()
class PostSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(slug_field='username',
read_only=True)
class Meta:
fields = '__all__'
model = Post
class GroupSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Group
class CommentSerializer(serializers.ModelSerializer):
author = serializers.SlugRelatedField(read_only=True,
slug_field='username')
class Meta:
fields = '__all__'
model = Comment
class FollowSerializer(serializers.ModelSerializer):
user = serializers.SlugRelatedField(
slug_field='username',
read_only=True,
default=serializers.CurrentUserDefault()
)
author = serializers.SlugRelatedField(slug_field='username',
queryset=User.objects.all())
class Meta:
fields = '__all__'
model = Follow
validators = [
serializers.UniqueTogetherValidator(
queryset=Follow.objects.all(),
fields=['user', 'author']
)
]
def validate_author(self, value):
if self.context['request'].user == value:
raise serializers.ValidationError(
'Нельзя подписаться на самого себя'
)
return value
|
"""
In search.py, you will implement generic search algorithms which are called
by Pacman agents (in searchAgents.py).
"""
import util
class SearchProblem:
"""
This class outlines the structure of a search problem, but doesn't implement
any of the methods (in object-oriented terminology: an abstract class).
You do not need to change anything in this class, ever.
"""
def getStartState(self):
"""
Returns the start state for the search problem
"""
util.raiseNotDefined()
def isGoalState(self, state):
"""
state: Search state
Returns True if and only if the state is a valid goal state
"""
util.raiseNotDefined()
def getSuccessors(self, state):
"""
state: Search state
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
util.raiseNotDefined()
def getCostOfActions(self, actions):
"""
actions: A list of actions to take
This method returns the total cost of a particular sequence of actions. The sequence must
be composed of legal moves
"""
util.raiseNotDefined()
def tinyMazeSearch(problem):
"""
Returns a sequence of moves that solves tinyMaze. For any other
maze, the sequence of moves will be incorrect, so only use this for tinyMaze
"""
from game import Directions
s = Directions.SOUTH
w = Directions.WEST
return [s,s,w,s,w,w,s,w]
def depthFirstSearch(problem):
"""
Search the deepest nodes in the search tree first [p 85].
Your search algorithm needs to return a list of actions that reaches
the goal. Make sure to implement a graph search algorithm [Fig. 3.7].
To get started, you might want to try some of these simple commands to
understand the search problem that is being passed in:
print "Start:", problem.getStartState()
print "Is the start a goal?", problem.isGoalState(problem.getStartState())
print "Start's successors:", problem.getSuccessors(problem.getStartState())
"""
def breadthFirstSearch(problem):
"Search the shallowest nodes in the search tree first. [p 81]"
def uniformCostSearch(problem):
"Search the node of least total cost first. "
def nullHeuristic(state, problem=None):
"""
A heuristic function estimates the cost from the current state to the nearest
goal in the provided SearchProblem. This heuristic is trivial.
"""
return 0
def aStarSearch(problem, heuristic=nullHeuristic):
"Search the node that has the lowest combined cost and heuristic first."
# Abbreviations
bfs = breadthFirstSearch
dfs = depthFirstSearch
astar = aStarSearch
ucs = uniformCostSearch |
# -*- encoding: utf-8 -*-
import tempfile
import os
import logging
import shelve
import json
import socket
import httplib
import boto3
import math
from bakthat.conf import config, DEFAULT_LOCATION, CONFIG_FILE
from bakthat.models import Inventory, Jobs
log = logging.getLogger(__name__)
class glacier_shelve(object):
"""Context manager for shelve.
Deprecated, here for backward compatibility.
"""
def __enter__(self):
self.shelve = shelve.open(os.path.expanduser("~/.bakthat.db"))
return self.shelve
def __exit__(self, exc_type, exc_value, traceback):
self.shelve.close()
class BakthatBackend(object):
"""Handle Configuration for Backends.
The profile is only useful when no conf is None.
:type conf: dict
:param conf: Custom configuration
:type profile: str
:param profile: Profile name
"""
def __init__(self, conf={}, profile="default"):
self.conf = conf
if not conf:
self.conf = config.get(profile)
if not self.conf:
log.error("No {0} profile defined in {1}.".format(profile, CONFIG_FILE))
if not "access_key" in self.conf or not "secret_key" in self.conf:
log.error("Missing access_key/secret_key in {0} profile ({1}).".format(profile, CONFIG_FILE))
class RotationConfig(BakthatBackend):
"""Hold backups rotation configuration."""
def __init__(self, conf={}, profile="default"):
BakthatBackend.__init__(self, conf, profile)
self.conf = self.conf.get("rotation", {})
class S3Backend(BakthatBackend):
"""Backend to handle S3 upload/download."""
def __init__(self, conf={}, profile="default"):
BakthatBackend.__init__(self, conf, profile)
region_name = self.conf["region_name"]
if region_name == DEFAULT_LOCATION:
region_name = ""
self.client = boto3.client(
's3',
region_name=region_name,
aws_access_key_id=self.conf["access_key"],
aws_secret_access_key=self.conf["secret_key"])
self.bucket = self.conf["s3_bucket"]
self.container = self.conf["s3_bucket"]
self.container_key = "s3_bucket"
def download(self, keyname):
with tempfile.TemporaryFile() as encrypted_out:
encrypted_out_filename = encrypted_out.name
self.client.download_file(
Bucket=self.bucket,
Key=keyname,
Filename=encrypted_out_filename)
return open(encrypted_out_filename, 'rb')
def cb(self, remaining):
"""Upload callback to log upload percentage."""
percent = int((self.total - remaining) * 100.0 / self.total)
log.info("Upload completion: {0}%".format(percent))
def upload(self, keyname, filename, **kwargs):
self.total = os.stat(filename).st_size
self.client.upload_file(
filename,
self.bucket,
keyname,
Callback=self.cb)
def ls(self):
return [key.name for key in self.bucket.get_all_keys()]
def delete(self, keyname):
k = Key(self.bucket)
k.key = keyname
self.bucket.delete_key(k)
class GlacierBackend(BakthatBackend):
"""Backend to handle Glacier upload/download."""
def __init__(self, conf={}, profile="default"):
BakthatBackend.__init__(self, conf, profile)
con = boto.connect_glacier(aws_access_key_id=self.conf["access_key"], aws_secret_access_key=self.conf["secret_key"], region_name=self.conf["region_name"])
self.vault = con.create_vault(self.conf["glacier_vault"])
self.backup_key = "bakthat_glacier_inventory"
self.container = self.conf["glacier_vault"]
self.container_key = "glacier_vault"
def load_archives(self):
return []
def backup_inventory(self):
"""Backup the local inventory from shelve as a json string to S3."""
if config.get("aws", "s3_bucket"):
archives = self.load_archives()
s3_bucket = S3Backend(self.conf).bucket
k = Key(s3_bucket)
k.key = self.backup_key
k.set_contents_from_string(json.dumps(archives))
k.set_acl("private")
def load_archives_from_s3(self):
"""Fetch latest inventory backup from S3."""
s3_bucket = S3Backend(self.conf).bucket
try:
k = Key(s3_bucket)
k.key = self.backup_key
return json.loads(k.get_contents_as_string())
except S3ResponseError, exc:
log.error(exc)
return {}
# def restore_inventory(self):
# """Restore inventory from S3 to DumpTruck."""
# if config.get("aws", "s3_bucket"):
# loaded_archives = self.load_archives_from_s3()
# # TODO faire le restore
# else:
# raise Exception("You must set s3_bucket in order to backup/restore inventory to/from S3.")
def restore_inventory(self):
"""Restore inventory from S3 to local shelve."""
if config.get("aws", "s3_bucket"):
loaded_archives = self.load_archives_from_s3()
with glacier_shelve() as d:
archives = {}
for a in loaded_archives:
print a
archives[a["filename"]] = a["archive_id"]
d["archives"] = archives
else:
raise Exception("You must set s3_bucket in order to backup/restore inventory to/from S3.")
def upload(self, keyname, filename, **kwargs):
archive_id = self.vault.concurrent_create_archive_from_file(filename, keyname)
Inventory.create(filename=keyname, archive_id=archive_id)
#self.backup_inventory()
def get_job_id(self, filename):
"""Get the job_id corresponding to the filename.
:type filename: str
:param filename: Stored filename.
"""
return Jobs.get_job_id(filename)
def delete_job(self, filename):
"""Delete the job entry for the filename.
:type filename: str
:param filename: Stored filename.
"""
job = Jobs.get(Jobs.filename == filename)
job.delete_instance()
def download(self, keyname, job_check=False):
"""Initiate a Job, check its status, and download the archive if it's completed."""
archive_id = Inventory.get_archive_id(keyname)
if not archive_id:
log.error("{0} not found !")
# check if the file exist on S3 ?
return
job = None
job_id = Jobs.get_job_id(keyname)
log.debug("Job: {0}".format(job_id))
if job_id:
try:
job = self.vault.get_job(job_id)
except UnexpectedHTTPResponseError: # Return a 404 if the job is no more available
self.delete_job(keyname)
if not job:
job = self.vault.retrieve_archive(archive_id)
job_id = job.id
Jobs.update_job_id(keyname, job_id)
log.info("Job {action}: {status_code} ({creation_date}/{completion_date})".format(**job.__dict__))
if job.completed:
log.info("Downloading...")
encrypted_out = tempfile.TemporaryFile()
# Boto related, download the file in chunk
chunk_size = 4 * 1024 * 1024
num_chunks = int(math.ceil(job.archive_size / float(chunk_size)))
job._download_to_fileob(encrypted_out, num_chunks, chunk_size, True, (socket.error, httplib.IncompleteRead))
encrypted_out.seek(0)
return encrypted_out
else:
log.info("Not completed yet")
if job_check:
return job
return
def retrieve_inventory(self, jobid):
"""Initiate a job to retrieve Galcier inventory or output inventory."""
if jobid is None:
return self.vault.retrieve_inventory(sns_topic=None, description="Bakthat inventory job")
else:
return self.vault.get_job(jobid)
def retrieve_archive(self, archive_id, jobid):
"""Initiate a job to retrieve Galcier archive or download archive."""
if jobid is None:
return self.vault.retrieve_archive(archive_id, sns_topic=None, description='Retrieval job')
else:
return self.vault.get_job(jobid)
def ls(self):
return [ivt.filename for ivt in Inventory.select()]
def delete(self, keyname):
archive_id = Inventory.get_archive_id(keyname)
if archive_id:
self.vault.delete_archive(archive_id)
archive_data = Inventory.get(Inventory.filename == keyname)
archive_data.delete_instance()
#self.backup_inventory()
def upgrade_from_shelve(self):
try:
with glacier_shelve() as d:
archives = d["archives"]
if "archives" in d:
for key, archive_id in archives.items():
#print {"filename": key, "archive_id": archive_id}
Inventory.create(**{"filename": key, "archive_id": archive_id})
del archives[key]
d["archives"] = archives
except Exception, exc:
log.exception(exc)
class SwiftBackend(BakthatBackend):
"""Backend to handle OpenStack Swift upload/download."""
def __init__(self, conf={}, profile="default"):
BakthatBackend.__init__(self, conf, profile)
from swiftclient import Connection, ClientException
self.con = Connection(self.conf["auth_url"], self.conf["access_key"],
self.conf["secret_key"],
auth_version=self.conf["auth_version"],
insecure=True)
region_name = self.conf["region_name"]
if region_name == DEFAULT_LOCATION:
region_name = ""
try:
self.con.head_container(self.conf["s3_bucket"])
except ClientException, e:
self.con.put_container(self.conf["s3_bucket"])
self.container = self.conf["s3_bucket"]
self.container_key = "s3_bucket"
def download(self, keyname):
headers, data = self.con.get_object(self.container, keyname,
resp_chunk_size=65535)
encrypted_out = tempfile.TemporaryFile()
for chunk in data:
encrypted_out.write(chunk)
encrypted_out.seek(0)
return encrypted_out
def cb(self, complete, total):
"""Upload callback to log upload percentage."""
"""Swift client does not support callbak"""
percent = int(complete * 100.0 / total)
log.info("Upload completion: {0}%".format(percent))
def upload(self, keyname, filename, **kwargs):
fp = open(filename, "rb")
self.con.put_object(self.container, keyname, fp)
def ls(self):
headers, objects = self.con.get_container(self.conf["s3_bucket"])
return [key['name'] for key in objects]
def delete(self, keyname):
self.con.delete_object(self.container, keyname)
|
"""Programa que comparar dois números inteiros e retorna infos de igualdade"""
n1 = int(input('Digite o Primeiro Número Inteiro: '))
n2 = int(input('Digite o Segundo Número Inteiro: '))
if n1 > n2:
print('O nº {} é maior que o nº {}.'.format(n1, n2))
elif n1 < n2:
print('O nº {} é maior que o nº {}.'.format(n2, n1))
else:
print('O nº {} é igual ao nº {}.'.format(n1, n2)) |
# This is the simulation of our evolving RS model under the FIRST framework of our assumptions on edge weights.
import numpy as np
import matplotlib.pyplot as plt
import powerlaw
import pandas as pd
import random
import seaborn as sns
class assumption_1st:
def __init__(self, beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C):
self.init_paramter(beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C)
self.init_assumption()
k = self.stat()
self.iterate()
res = self.stat()
tdu = self.calcdegree_user()
twu = self.calcweight_user()
tdi = self.calcdegree_item()
twi = self.calcweight_item()
k = (res, tdu, twu, tdi, twi)
x = np.zeros(self.rating_scale)
self.degseq_user = np.zeros(self.iterations + 1)
self.weiseq_user = np.zeros(self.iterations + 1)
self.degseq_item = np.zeros(self.iterations + 1)
self.weiseq_item = np.zeros(self.iterations + 1)
x[:res.size] = x[:res.size] + res
self.degseq_user[:min(self.iterations+1,k[1].size)] = self.degseq_user[:min(self.iterations+1,k[1].size)] + k[1][:min(self.iterations+1,k[1].size)]
self.weiseq_user[:min(self.iterations+1,k[2].size)] = self.weiseq_user[:min(self.iterations+1,k[2].size)] + k[2][:min(self.iterations+1,k[2].size)]
self.degseq_item[:min(self.iterations+1,k[3].size)] = self.degseq_item[:min(self.iterations+1,k[3].size)] + k[3][:min(self.iterations+1,k[3].size)]
self.weiseq_item[:min(self.iterations+1,k[4].size)] = self.weiseq_item[:min(self.iterations+1,k[4].size)] + k[4][:min(self.iterations+1,k[4].size)]
np.set_printoptions(threshold=np.inf)
xind = np.zeros(self.iterations + 1)
for i in range(1,self.iterations + 1):
xind[i] = xind[i-1] + 1
self.xind_user = xind
self.xind_item = xind
print("finish all the staff")
def init_paramter(self, beta, iterations, rating_scale, Cu, Ci, Unum, Inum, K, L, C):
#Initial settings of parameters in our weighted bipartite graph model B(U,I).
self.beta = beta # the probability to add a new vertex in U
self.iterations = iterations # the number of iterations to run the simulation
self.rating_scale = rating_scale # the preassigned rating scale
self.Cu = Cu # the least number of edges connected to vertices in U
self.Ci = Ci # the least number of edges connected to vertices in I
self.Unum = Unum # the number of vertices in U in the initial graph at t=0
self.Inum = Inum # the number of vertices in I in the initial graph at t=1
self.K = K # the number of basic user type in our assumption
self.L = L # the number of basic item level in our assumption
self.C = C # the number of adding edge
self.Huser = np.zeros((K,rating_scale)) # the rating pmf for the K user types
self.Hitem = np.zeros((L,rating_scale)) # the rating pmf for the L item levels
self.Fmean = np.zeros((K,)) # the mean of the distribution of users' weight vector (assumed to be Gaussian)
self.Gmean = np.zeros((L,)) # the mean of the distribution of items' weight vector (assumed to be Gaussian)
self.edges = np.zeros((iterations+50,iterations+50), dtype=int) # the matrix storing edge information
def init_weightgenerator(self):
# Initalization of the sampling of edge weights from the mixture distribution
# include K,L,Huser,Hitem,rating_scale,Fmean,Gmean
self.Huser = np.random.sample((self.K, self.rating_scale))
Husersubsum = np.sum(self.Huser, axis=1)
Husersubsum = np.array([Husersubsum] * self.rating_scale)
Husersubsum = np.transpose(Husersubsum)
self.Huser = self.Huser/Husersubsum
self.Hitem = np.random.sample((self.L, self.rating_scale))
Hitemsubsum = np.sum(self.Hitem, axis=1)
Hitemsubsum = np.array([Hitemsubsum] * self.rating_scale)
Hitemsubsum = np.transpose(Hitemsubsum)
self.Hitem = self.Hitem/Hitemsubsum
self.Fmean = np.random.sample(self.K,)
self.Fmean = self.Fmean/np.sum(self.Fmean)
self.Gmean = np.random.sample(self.L,)
self.Gmean = self.Gmean/np.sum(self.Gmean)
def init_assumption(self):
# Initialization for the inital simple graph at t=0
print("Initalizing...", end="")
# include global edges,Unum,Inum
self.init_weightgenerator()
self.edges = np.zeros((self.iterations+50, self.iterations+50), dtype=int)
# We can assume that axis=1 is user sequence and the axis=0 is the item sequence
for i in range(self.Unum):
self.edges[i,0:self.Inum] = self.userweightgenerator(self.Inum)
print("Done.")
def userweightgenerator(self, nb):
# Sample edge weight(s) for new users in U from the mixture distribution
# include K,L,Huser,Hitem,rating_scale,Fmean,Gmean
Uvec = np.random.normal(self.Fmean,0.1)
Uvec[Uvec<0]=0
Uvec = Uvec/np.sum(Uvec)
Uvec = np.array([Uvec]*self.rating_scale)
Uvec = np.transpose(Uvec)
Hu = self.Huser*Uvec
Hu = np.sum(Hu,axis=0)
R = np.random.choice(self.rating_scale,nb,p=Hu)+1
return R
def itemweightgenerator(self, nb):
# Sample edge weight(s) for new items in I from the mixture distribution
# include K,L,Huser,Hitem,rating_scale,Fmean,Gmean
Ivec = np.random.normal(self.Gmean,0.1)
Ivec[Ivec<0]=0
Ivec = Ivec/np.sum(Ivec)
Ivec = np.array([Ivec]*self.rating_scale)
Ivec = np.transpose(Ivec)
Hi = self.Hitem*Ivec
Hi = np.sum(Hi,axis=0)
R = np.random.choice(self.rating_scale,nb,p=Hi)+1
return R
# Select "prototype" from the existing vertex group
def prototype(self, arr, nb):
return np.count_nonzero(arr.cumsum() < nb)
# Conduct Edge-copy and assign new edge weights
def copyedge(self, template, desired,p_prime):
ls = []
new2old = template.nonzero()[0]
tmp = template[new2old].astype(float)
for i in range(desired):
tmp /= tmp.sum()
sampled = np.nonzero(np.random.multinomial(1, tmp))[0][0]
ls.append(sampled)
tmp[sampled] = 0
ls.sort()
return new2old[ls]
# Add new vertices to U (respectively. I)
def addnode(self, nb_axis):
# include edges,Unum,Inum
weightsum = np.sum(self.edges[:self.Unum,:self.Inum], axis=nb_axis)
totalsum = np.sum(weightsum)
randnum = np.random.randint(1, totalsum+1)
p_prime = self.prototype(weightsum, randnum)
weighted = np.zeros(1)
if nb_axis == 1:
template = self.edges[p_prime, :self.Inum]
desired = self.Cu
weighted = self.userweightgenerator(template.shape[0])
else:
template = self.edges[:self.Unum, p_prime]
desired = self.Ci
weighted = self.itemweightgenerator(template.shape[0])
idx = self.copyedge(template, desired, p_prime)
new = np.zeros(template.shape[0],dtype=int)
new[idx] = weighted[idx]
if nb_axis == 1:
self.edges[self.Unum,:self.Inum] = new
self.Unum = self.Unum + 1
else:
self.edges[:self.Unum,self.Inum] = new
self.Inum = self.Inum + 1
# Add new edges to Graph
def addedge(self):
# include edges,Unum,Inum
randnum_user = random.randint(0,self.Unum-1)
randnum_item = random.randint(0,self.Inum-1)
self.edges[randnum_user,randnum_item] = random.randint(1, self.rating_scale)
# Evolution of U (or I)
def evolution(self):
randnum = np.random.rand()
if randnum < self.beta:
# add user
self.addnode(1)
else:
# add item
self.addnode(0)
for i in range(self.C):
self.addedge()
# pass
# Iterate
def iterate(self):
print("Begin iteration...", end="")
for i in range(self.iterations):
self.evolution()
print("Done")
# Gather statistic information
def stat(self):
# include edges
tmps = self.edges.flatten().astype(int)
count = np.bincount(tmps)
count = count[1:]
count = 1.0*count/count.sum()
return count
# Calculate degree distributions
def calcdegree_user(self):
# include edges
sumdegree = self.edges.astype(bool).sum(axis=1)
return np.bincount(sumdegree)
# Calculate vertex weight distributions
def calcweight_user(self):
# include edges
sumdegree = self.edges.sum(axis=1)
return np.bincount(sumdegree)
# Calculate degree distributions
def calcdegree_item(self):
# include edges
sumdegree = self.edges.astype(bool).sum(axis=0)
return np.bincount(sumdegree)
# Calculate vertex weight distributions
def calcweight_item(self):
# include edges
sumdegree = self.edges.sum(axis=0)
return np.bincount(sumdegree)
def get_distribution(self, target="user"):
if target == "item":
return self.degseq_item, self.weiseq_item, self.xind_item
else:
return self.degseq_user, self.weiseq_user, self.xind_user
def get_graph(self):
return self.edges, self.Inum, self.Unum
def get_pvalue_alpha_xmin_1(seq):
results = powerlaw.Fit(seq)
alpha = results.power_law.alpha
xmin = results.power_law.xmin
R, p_value = results.distribution_compare('power_law', 'lognormal')
print("p_value:", p_value, "alpha:", alpha, "xmin:", xmin)
return p_value, alpha, xmin
def cal_big_c(simmodel):
the_edges, Inum, Unum = simmodel.get_graph()
a = the_edges[:Unum,:Inum]
sum = 0
max = 0
min = 9999
flag = 0
for i in range(0,len(a)):
for j in range(0, len(a[0])):
if a[i][j] != 0:
flag += 1
sum+=a[i][j]
if a[i][j]>max:
max = a[i][j]
if a[i][j]<min:
min = a[i][j]
# print(sum, max, min, flag, sum/flag)
er = sum/flag
big_c = er * (1+(simmodel.Ci*(1-simmodel.beta))/(simmodel.Cu*simmodel.beta+simmodel.C))
return big_c |
import numpy as np
from numpy import ndarray
import matplotlib.pyplot as plt
from pathlib import Path
def B_pol_section(R:ndarray=None,Z:ndarray=None, BR:ndarray=None,BZ:ndarray=None, fig=None,ax=None, cbar_mode:str='linear', field_avg:float=None, field_max:float=None, nocbar:bool=False, titletext:str=None, cax=None):
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib import colors
if fig is None: fig, ax = plt.subplots(1, 1, figsize=(5.0, 5.0*4/3))
if nocbar: assert( field_max is not None and field_avg is not None)
Rg, Zg = np.meshgrid(R, Z)
B_pol = np.sqrt(BR**2 + BZ**2)
if field_avg is None: field_avg = np.average(B_pol)
if field_max is None:
field_max = np.max(B_pol)
cbar_up_extend = False
else:
cbar_up_extend = True
strm_kwarg = {'density': 1.3}
if cbar_mode=='linear':
linewidth = 2.0 * B_pol.T / field_max
strm_kwarg.update( {'linewidth':linewidth, 'color':B_pol.T, 'cmap':'gnuplot',
'norm': colors.Normalize(vmin=0.0,vmax=field_max)})
elif cbar_mode=='percent':
p = 0.8
B_pol_norm = (B_pol.T / field_max)**p # A big power greater than 1.0 will expand the [1.0-epsilon, 1.0], while a smaller value than 1.0 will shrink the high value part.
linewidth = 0.4 * B_pol_norm / (field_avg/field_max)**p
strm_kwarg.update( {'linewidth':linewidth,
'color':B_pol.T/field_max, 'cmap':'YlOrRd',
'norm': colors.LogNorm(vmin=1e-4,vmax=1.)} )
else:
raise ValueError("Wrong arg, linewidth_mode should be one of 'factor', 'percent'.")
strm = ax.streamplot(Rg,Zg,BR.T,BZ.T, **strm_kwarg)
if not nocbar:
if cax is not None:
axins = cax
axins.cla()
else:
axins = inset_axes(ax,
width="5%", # width = 5% of parent_bbox width
height="35%", # height : 50%
loc='lower right',
# bbox_to_anchor=(1.05, 0., 1, 1),
# bbox_transform=ax.transAxes,
borderpad=0.5,
)
# cbar.ax.set_title('$B_{pol} (T)$', fontsize=10)
if cbar_mode=='linear':
if cbar_up_extend: cbar = fig.colorbar(strm.lines, cax=axins, extend='max')
else: cbar = fig.colorbar(strm.lines, cax=axins)
cbar.set_label('$B_{pol} (T)$', y=1.10, labelpad=-5, rotation=0)
elif cbar_mode=='percent':
cbar = fig.colorbar(strm.lines, cax=axins, extend='min')
cbar.set_label('$B_{pol} (\%) $', y=0.005, labelpad=-3.5, rotation=0)
cbar.ax.tick_params(labelsize=8.0)
# fig.colorbar(strm.lines, ax=ax, shrink=0.8) # The most primitive style colorbar
ax.set_aspect('equal')
ax.set_xlabel('$R$(m)', fontsize=8.5)
ax.set_ylabel('$Z$(m)', fontsize=8.5)
if titletext is not None:
ax.set_title(titletext, fontsize=10)
elif cbar_mode=='linear':
ax.set_title(" Poloidal Field Components Streamline\nat Poiloidal Cut", fontsize=10)
elif cbar_mode=='percent':
ax.set_title(" Poloidal Field Components Streamline\nat Poiloidal Cut\n(Color indicating the $B_{pol} / B_{pol~max}$)", fontsize=10)
if not nocbar: return fig, ax, axins
elif nocbar: return fig, ax
def s_pol_section(R:ndarray, Z:ndarray, psi_norm:ndarray, fig=None, ax=None):
import matplotlib.pyplot as plt
if fig is None: fig, ax = plt.subplots(1, 1, figsize=(5.0, 5.0*4/3))
Rg, Zg = np.meshgrid(R, Z)
psi_norm_contour = ax.contour(Rg, Zg, psi_norm.T) # levels=np.arange(7)/5.0, NOTE: contour is used for cartesian "xy" index, so psi_norm index shall be like [Z_ind, R_ind].
ax.set_title("$\Psi_{norm}$ Distribution \n at Poloidal Cross-section", fontsize=10)
ax.clabel(psi_norm_contour, inline=1, fontsize=10)
ax.set_aspect('equal')
ax.set_xlabel('$R$(m)', fontsize=8.5)
ax.set_ylabel('$Z$(m)', fontsize=8.5)
return fig, ax
def flt_pol_section(fig,ax, flt_lines):
# TODO: now it only supports the fixde number of field line tracing. Make the markers generic.
for i, color_marker in enumerate(['r*', 'g+', 'b^', 'g^', 'r+']):
ax.plot(flt_lines[i][:,0],flt_lines[i][:,1], color_marker)
def s_levels(fig,ax, psi_norm_isosurface_rzphi:ndarray):
ax.plot(psi_norm_isosurface_rzphi[:,0,0], psi_norm_isosurface_rzphi[:,0,1])
def divertor_edge(fig,ax, div_edge:ndarray):
ax.plot(div_edge[:,0], div_edge[:,1])
# [Possible to make labels appear when hovering over a point in matplotlib? ImportanceOfBeingErnest's answer](https://stackoverflow.com/questions/7908636/possible-to-make-labels-appear-when-hovering-over-a-point-in-matplotlib)
def s_isoline(fig, ax, r_mesh:ndarray,z_mesh:ndarray, TET:ndarray, pad:int=2, S:ndarray=None, s_chosen:float=None):
if s_chosen is not None:
ind = np.argmin(np.abs(S-s_chosen))
assert(pad==2)
pad = len(S) - ind
sep_bound_line, = ax.plot(r_mesh[-pad,:],z_mesh[-pad,:], '--', linewidth=0.5)
annot = ax.annotate(
"", xy=(0,0), xytext=(-20,20), textcoords="offset points",
bbox=dict(boxstyle="round", fc="w"),
arrowprops=dict(arrowstyle="->"))
annot.set_visible(False)
def update_annot(ind):
x,y = sep_bound_line.get_data()
annot.xy = (x[ind["ind"][0]], y[ind["ind"][0]])
text = "$\\theta^*=$" + " ".join(["{0:.1f}°".format(TET[n]/(2*np.pi)*360.0) for n in ind["ind"]])
if S is not None: text += f"\n$s=${S[-pad]:.3f}"
annot.set_text(text)
annot.get_bbox_patch().set_alpha(0.4)
def hover(event):
vis = annot.get_visible()
if event.inaxes == ax:
cont, ind = sep_bound_line.contains(event)
if cont:
update_annot(ind)
annot.set_visible(True)
fig.canvas.draw_idle()
else:
if vis:
annot.set_visible(False)
fig.canvas.draw_idle()
fig.canvas.mpl_connect("motion_notify_event", hover)
# Drwa a mesh whose lines indicate the \theta^* invariant lines.
def theta_star_isolines(fig, ax, r_mesh:ndarray, z_mesh:ndarray, line_per_num=10):
ntheta = r_mesh.shape[1]
for i in range(ntheta):
if i % line_per_num==0: ax.plot(r_mesh[:-1,i],z_mesh[:-1,i], 'r', linewidth=0.3)
def coil_proj_RZ(fig, ax, path_coilsys_folder:str, coil_sel_dict:dict):
from .. import file, coordinate
ylim = ax.get_ylim()
coil_r_xyz, coil_info = file.read_coil(path_coilsys_folder, coil_sel_dict.keys())
for coilsys in coil_sel_dict.keys():
for coil in coil_sel_dict[coilsys]:
coil_r_rzphi_one = coordinate.coord_system_change(coord_from=coil_info[coilsys][coil]['cor'], coord_to='RZPhi', r=coil_r_xyz[coilsys][coil], merge_return=True)
ax.plot(coil_r_rzphi_one[...,0], coil_r_rzphi_one[...,1], '--', linewidth=0.5, label=coil)
ax.legend()
ax.set_ylim(ylim)
|
"""
Problem:
A regular number in mathematics is defined as one which evenly divides some power of
60. Equivalently, we can say that a regular number is one whose only prime divisors are
2, 3, and 5.
These numbers have had many applications, from helping ancient Babylonians keep time to
tuning instruments according to the diatonic scale.
Given an integer N, write a program that returns, in order, the first N regular
numbers.
"""
from typing import List, Set
def get_prime_factors(num: int) -> Set[int]:
factors = set()
curr = 2
while num > 1:
while num > 1 and num % curr == 0:
num = num // curr
factors.add(curr)
curr += 1
return factors
def get_regular_numbers(N: int) -> List[int]:
# using Sieve of Eratosthenes Method to optimally find the required numbers
total_range = 2 * N
SoE = [False for _ in range(total_range)]
result = []
count = 0
factors = set([2, 3, 5])
for factor in factors:
for i in range(factor, total_range, factor):
if not SoE[i] and not (get_prime_factors(i) - factors):
SoE[i] = True
for index, value in enumerate(SoE):
if value:
result.append(index)
count += 1
if count == N:
break
return result
if __name__ == "__main__":
print(get_regular_numbers(10))
"""
SPECS:
TIME COMPLEXITY: O(n log(n))
SPACE COMPLEXITY: O(n)
"""
|
You are given a non-empty array of integers.
One element appears exactly once, with every other element appearing at least twice, perhaps more.
# Write a function that can find and return the element that appears exactly once.
# Example 1:
# Input: [1,1,2,1]
# Output: 2
# Example 2:
# Input: [1,2,1,2,1,2,80]
# Output: 80
# Note: You should be able to develop a solution that has O(n) time complexity.
# [execution time limit] 4 seconds (py3)
# [input] array.integer nums
# [output] integer
from collections import Counter
def csFindTheSingleNumber(nums):
frequency = Counter(nums)
for i in frequency:
if frequency[i] == 1:
return i
|
import pytest
from expiring_links.serializers import ExpiringLinkGeneratorSerializer
@pytest.mark.serializers
def test_fields(db, create_test_expiring_link_serializer_data):
assert list(create_test_expiring_link_serializer_data.keys()) == ['image_id', 'expiration_time']
@pytest.mark.serializers
def test_valid_serializer(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert serializer.is_valid()
@pytest.mark.serializers
def test_user_without_permission(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_user.tier.expired_link_flag = False
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'non_field_errors'}
@pytest.mark.serializers
def test_wrong_image_id(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['image_id'] = create_test_image.pk + 1
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'image_id'}
@pytest.mark.serializers
def test_too_short_expiration_time(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['expiration_time'] = 200
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'expiration_time'}
@pytest.mark.serializers
def test_too_long_expiration_time(db, create_test_expiring_link_serializer_data, create_test_image, create_test_user,
remove_test_data):
create_test_expiring_link_serializer_data['expiration_time'] = 40000
serializer = ExpiringLinkGeneratorSerializer(data=create_test_expiring_link_serializer_data,
context={'user': create_test_user})
assert not serializer.is_valid()
assert set(serializer.errors) == {'expiration_time'}
|
from .shared import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
corporation = db.Column(db.String)
alliance = db.Column(db.String)
refresh_token = db.Column(db.String)
recruiter = db.Column(db.Boolean)
admin = db.Column(db.Boolean)
def __init__(self, name: str, corporation: str, alliance: str, refresh_token: str = None,
recruiter: bool = False, admin: bool = False) -> None:
self.name = name
self.corporation = corporation
self.alliance = alliance
self.refresh_token = refresh_token
self.recruiter = recruiter
self.admin = admin
@property
def is_authenticated(self) -> bool:
return True
@property
def is_active(self) -> bool:
return True
@property
def is_anonymous(self) -> bool:
return False
@property
def is_in_alliance(self) -> bool:
return self.alliance == 'The Society For Unethical Treatment Of Sleepers'
def get_id(self) -> str:
return str(self.id)
def __repr__(self) -> str:
return f'<User-{self.id}-{self.name}>'
class Application(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
user = db.relationship('User', backref=db.backref('applications', lazy=True))
note = db.Column(db.String)
status = db.Column(db.String)
complete = db.Column(db.Boolean)
def __init__(self, user_id: int, note: str, status: str = 'New', complete: bool = False) -> None:
self.user_id = user_id
self.note = note
self.status = status
self.complete = complete
def __repr__(self) -> str:
return f'<Application-{self.id}-{self.user_id}>'
|
#http://www.openstreetmap.org/way/107817218
# Arizona Canal Diversion Channel (ACDC)
assert_has_feature(
16, 12353, 26272, 'water',
{ 'kind': 'river', 'intermittent': True })
#http://www.openstreetmap.org/way/96528126
# 10th Street Wash
assert_has_feature(
16, 12368, 26272, 'water',
{ 'kind': 'drain', 'intermittent': True })
#http://www.openstreetmap.org/way/61954975
# Unnamed drain
assert_has_feature(
16, 12372, 26272, 'water',
{ 'kind': 'drain', 'intermittent': True })
#http://www.openstreetmap.org/way/321690441
# Unnamed stream
assert_has_feature(
16, 12492, 26279, 'water',
{ 'kind': 'stream', 'intermittent': True })
#http://www.openstreetmap.org/way/68709904
# Unnamed water (lake)
assert_has_feature(
16, 12349, 26257, 'water',
{ 'kind': 'water', 'intermittent': True })
|
# TensorFlow Model !
import os
import shutil
import numpy as np
import tensorflow as tf
tf.reset_default_graph()
from cell import ConvLSTMCell
import sys
module_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "..")
if module_path not in sys.path:
sys.path.append(module_path)
from datasets.batch_generator import datasets
class conv_lstm_model():
def __init__(self):
"""Parameter initialization"""
self.batch_size = 4
self.timesteps = 32
self.shape = [64, 64] # Image shape
self.kernel = [3, 3]
self.channels = 3
self.filters = [32, 128, 32, 3] # 4 stacked conv lstm filters
# Create a placeholder for videos.
self.inputs = tf.placeholder(tf.float32, [self.batch_size, self.timesteps] + self.shape + [self.channels], name="conv_lstm_inputs") # (batch_size, timestep, H, W, C)
self.outputs_exp = tf.placeholder(tf.float32, [self.batch_size, self.timesteps] + self.shape + [self.channels], name="conv_lstm_outputs_exp") # (batch_size, timestep, H, W, C)
# model output
self.model_output = None
# loss
self.l2_loss = None
# optimizer
self.optimizer = None
def create_model(self):
with tf.variable_scope('conv_lstm_model'):
cells = []
for i, each_filter in enumerate(self.filters):
cell = ConvLSTMCell(self.shape, each_filter, self.kernel)
cells.append(cell)
cell = tf.nn.rnn_cell.MultiRNNCell(cells, state_is_tuple=True)
states_series, current_state = tf.nn.dynamic_rnn(cell, self.inputs, dtype=self.inputs.dtype)
# current_state => Not used ...
self.model_output = states_series
def loss(self):
frames_difference = tf.subtract(self.outputs_exp, self.model_output)
batch_l2_loss = tf.nn.l2_loss(frames_difference)
# divide by batch size ...
l2_loss = tf.divide(batch_l2_loss, float(self.batch_size))
self.l2_loss = l2_loss
def optimize(self):
train_step = tf.train.AdamOptimizer().minimize(self.l2_loss)
self.optimizer = train_step
def build_model(self):
self.create_model()
self.loss()
self.optimize()
file_path = os.path.abspath(os.path.dirname(__file__))
data_folder = os.path.join(file_path, "../../data/")
log_dir_file_path = os.path.join(file_path, "../../logs/")
model_save_file_path = os.path.join(file_path, "../../checkpoint/")
output_video_save_file_path = os.path.join(file_path, "../../output/")
iterations = "iterations/"
best = "best/"
checkpoint_iterations = 25
best_model_iterations = 25
best_l2_loss = float("inf")
heigth, width = 64, 64
channels = 3
def log_directory_creation(sess):
if tf.gfile.Exists(log_dir_file_path):
tf.gfile.DeleteRecursively(log_dir_file_path)
tf.gfile.MakeDirs(log_dir_file_path)
# model save directory
if os.path.exists(model_save_file_path):
restore_model_session(sess, iterations + "conv_lstm_model")
else:
os.makedirs(model_save_file_path + iterations)
os.makedirs(model_save_file_path + best)
# output dir creation
if not os.path.exists(output_video_save_file_path):
os.makedirs(output_video_save_file_path)
def save_model_session(sess, file_name):
saver = tf.train.Saver()
save_path = saver.save(sess, model_save_file_path + file_name )
def restore_model_session(sess, file_name):
saver = tf.train.Saver() # tf.train.import_meta_graph(model_save_file_path + file_name + ".meta")
saver.restore(sess, model_save_file_path + file_name )
print ("graph loaded!")
def is_correct_batch_shape(X_batch, y_batch, model, info="train"):
# info can be {"train", "val"}
if (X_batch is None or y_batch is None or
X_batch.shape!=(model.batch_size, model.timesteps,heigth,width,channels) or
y_batch.shape!=(model.batch_size, model.timesteps,heigth,width,channels)):
print ("Warning: skipping this " + info + " batch because of shape")
return False
return True
def test():
with tf.Session() as sess:
model = conv_lstm_model()
model.build_model()
init = tf.global_variables_initializer()
sess.run(init)
log_directory_creation(sess)
# data read iterator
data = datasets(batch_size=model.batch_size, heigth=heigth, width=width)
global_step = 0
for X_batch, y_batch, filenames in data.test_next_batch():
# print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
if not is_correct_batch_shape(X_batch, y_batch, model, "test"):
# global step not increased !
continue
input_data = np.zeros_like(X_batch)
input_data[:,0] = X_batch[:,0]
for i in range(model.timesteps):
output_predicted = sess.run(model.model_output , feed_dict={ model.inputs: input_data })
if (i < (model.timesteps-1)):
input_data[:,i+1] = output_predicted[:,i]
print ("global step ",global_step," time step ",i)
data.frame_ext.generate_output_video(output_predicted, filenames)
global_step += 1
print ("test step ",global_step)
def train():
global best_l2_loss
with tf.Session() as sess:
# conv lstm model
model = conv_lstm_model()
model.build_model()
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
sess.run(init)
# clear logs !
log_directory_creation(sess)
# Tensorflow Summary
tf.summary.scalar("train_l2_loss", model.l2_loss)
summary_merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(log_dir_file_path + "/train", sess.graph)
test_writer = tf.summary.FileWriter(log_dir_file_path + "/test", sess.graph)
global_step = 0
while True:
try:
# data read iterator
data = datasets(batch_size=model.batch_size, heigth=heigth, width=width)
for X_batch, y_batch, _ in data.train_next_batch():
# print ("X_batch", X_batch.shape, "y_batch", y_batch.shape)
if not is_correct_batch_shape(X_batch, y_batch, model, "train"):
# global step not increased !
continue
_, summary = sess.run([model.optimizer, summary_merged], feed_dict={
model.inputs: X_batch, model.outputs_exp: y_batch})
train_writer.add_summary(summary, global_step)
if global_step % checkpoint_iterations == 0:
save_model_session(sess, iterations + "conv_lstm_model")
if global_step % best_model_iterations == 0:
val_l2_loss_history = list()
batch_counter = 0
# iterate on validation batch ...
for X_val, y_val, _ in data.val_next_batch():
batch_counter += 1
# print ("X_val", X_val.shape, "y_val", y_val.shape)
if not is_correct_batch_shape(X_val, y_val, model, "val_"+str(batch_counter)):
continue
test_summary, val_l2_loss = sess.run([summary_merged, model.l2_loss], feed_dict={model.inputs: X_val, model.outputs_exp: y_val})
test_writer.add_summary(test_summary, global_step)
val_l2_loss_history.append(val_l2_loss)
temp_loss = sum(val_l2_loss_history) * 1.0 / len(val_l2_loss_history)
# save if better !
if best_l2_loss > temp_loss:
best_l2_loss = temp_loss
save_model_session(sess, best + "conv_lstm_model")
print ("Iteration ", global_step, " best_l2_loss ", best_l2_loss)
global_step += 1
except:
pass # ignore problems and continue looping ...
train_writer.close()
test_writer.close()
def main():
train()
if __name__ == '__main__':
main()
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Perform a GreyNoise GNQL Query"
class Input:
QUERY = "query"
SIZE = "size"
class Output:
COMPLETE = "complete"
COUNT = "count"
DATA = "data"
MESSAGE = "message"
QUERY = "query"
class GnqlQueryInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"query": {
"type": "string",
"title": "GreyNoise Query",
"description": "Query in GreyNoise Query Language (GNQL) Syntax",
"order": 1
},
"size": {
"type": "string",
"title": "Max Size",
"description": "Max Number of IPs to Return Data For",
"default": "10",
"order": 2
}
},
"required": [
"query"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GnqlQueryOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"complete": {
"type": "boolean",
"title": "GreyNoise Query",
"description": "GreyNoise Query Completed",
"order": 1
},
"count": {
"type": "integer",
"title": "GreyNoise Count",
"description": "Count of IPs In Query",
"order": 2
},
"data": {
"type": "array",
"title": "GreyNoise Query Data",
"description": "GreyNoise Query Data",
"items": {
"$ref": "#/definitions/data"
},
"order": 3
},
"message": {
"type": "string",
"title": "GreyNoise Query Message",
"description": "GreyNoise Query Message",
"order": 4
},
"query": {
"type": "string",
"title": "GreyNoise Query",
"description": "GreyNoise Query Sent to API",
"order": 5
}
},
"definitions": {
"data": {
"type": "object",
"title": "data",
"properties": {
"actor": {
"type": "string",
"title": "GreyNoise Actor",
"description": "GreyNoise Actor Associated with IP",
"order": 6
},
"bot": {
"type": "boolean",
"title": "GreyNoise Bot",
"description": "GreyNoise has identified this as a Bot",
"order": 10
},
"classification": {
"type": "string",
"title": "GreyNoise Classification",
"description": "GreyNoise Classification",
"order": 8
},
"cve": {
"type": "array",
"title": "GreyNoise CVEs",
"description": "CVEs associated with GreyNoise Tags",
"items": {
"type": "string"
},
"order": 9
},
"first_seen": {
"type": "string",
"title": "GreyNoise First Seen",
"displayType": "date",
"description": "First Seen By GreyNoise",
"format": "date-time",
"order": 2
},
"ip": {
"type": "string",
"title": "IP Address",
"description": "IP Address",
"order": 1
},
"last_seen": {
"type": "string",
"title": "GreyNoise Last Seen",
"description": "Last Seen By GreyNoise",
"order": 3
},
"metadata": {
"$ref": "#/definitions/metadata",
"title": "GreyNoise Metadata",
"description": "GreyNoise IP Metadata",
"order": 13
},
"raw_data": {
"$ref": "#/definitions/raw_data",
"title": "GreyNoise Raw Data",
"description": "GreyNoise IP Raw Data",
"order": 14
},
"seen": {
"type": "boolean",
"title": "GreyNoise Seen",
"description": "Has this IP been Seen by GreyNoise",
"order": 4
},
"spoofable": {
"type": "boolean",
"title": "GreyNoise Spoofable",
"description": "IP address may be spoofed",
"order": 7
},
"tags": {
"type": "array",
"title": "GreyNoise Tags",
"description": "GreyNoise Tags Associated with IP",
"items": {
"type": "string"
},
"order": 5
},
"vpn": {
"type": "boolean",
"title": "GreyNoise VPN",
"description": "GreyNoise has identified this as a VPN",
"order": 11
},
"vpn_service": {
"type": "string",
"title": "GreyNoise VPN Service",
"description": "Name of VPN Service",
"order": 12
}
},
"definitions": {
"metadata": {
"type": "object",
"title": "metadata",
"properties": {
"asn": {
"type": "string",
"title": "ASN",
"description": "ASN",
"order": 1
},
"category": {
"type": "string",
"title": "Category",
"description": "Category",
"order": 2
},
"city": {
"type": "string",
"title": "City",
"description": "City",
"order": 3
},
"country": {
"type": "string",
"title": "Country",
"description": "Country",
"order": 4
},
"country_code": {
"type": "string",
"title": "Country Code",
"description": "Country Code",
"order": 5
},
"organization": {
"type": "string",
"title": "Organization",
"description": "Organization",
"order": 6
},
"os": {
"type": "string",
"title": "OS",
"description": "OS",
"order": 7
},
"rdns": {
"type": "string",
"title": "rDNS",
"description": "rDNS",
"order": 8
},
"region": {
"type": "string",
"title": "Region",
"description": "Region",
"order": 9
},
"tor": {
"type": "boolean",
"title": "TOR",
"description": "TOR",
"order": 10
}
}
},
"raw_data": {
"type": "object",
"title": "raw_data",
"properties": {
"hassh": {
"type": "array",
"title": "HASSH",
"description": "HASSH",
"items": {
"type": "object"
},
"order": 1
},
"ja3": {
"type": "array",
"title": "JA3",
"description": "Ja3",
"items": {
"type": "object"
},
"order": 2
},
"scan": {
"type": "array",
"title": "Scan",
"description": "Scan",
"items": {
"$ref": "#/definitions/scan"
},
"order": 3
},
"web": {
"type": "object",
"title": "Web",
"description": "Web",
"order": 4
}
},
"definitions": {
"scan": {
"type": "object",
"title": "scan",
"properties": {
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Protocol",
"order": 2
}
}
}
}
},
"scan": {
"type": "object",
"title": "scan",
"properties": {
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Protocol",
"order": 2
}
}
}
}
},
"metadata": {
"type": "object",
"title": "metadata",
"properties": {
"asn": {
"type": "string",
"title": "ASN",
"description": "ASN",
"order": 1
},
"category": {
"type": "string",
"title": "Category",
"description": "Category",
"order": 2
},
"city": {
"type": "string",
"title": "City",
"description": "City",
"order": 3
},
"country": {
"type": "string",
"title": "Country",
"description": "Country",
"order": 4
},
"country_code": {
"type": "string",
"title": "Country Code",
"description": "Country Code",
"order": 5
},
"organization": {
"type": "string",
"title": "Organization",
"description": "Organization",
"order": 6
},
"os": {
"type": "string",
"title": "OS",
"description": "OS",
"order": 7
},
"rdns": {
"type": "string",
"title": "rDNS",
"description": "rDNS",
"order": 8
},
"region": {
"type": "string",
"title": "Region",
"description": "Region",
"order": 9
},
"tor": {
"type": "boolean",
"title": "TOR",
"description": "TOR",
"order": 10
}
}
},
"raw_data": {
"type": "object",
"title": "raw_data",
"properties": {
"hassh": {
"type": "array",
"title": "HASSH",
"description": "HASSH",
"items": {
"type": "object"
},
"order": 1
},
"ja3": {
"type": "array",
"title": "JA3",
"description": "Ja3",
"items": {
"type": "object"
},
"order": 2
},
"scan": {
"type": "array",
"title": "Scan",
"description": "Scan",
"items": {
"$ref": "#/definitions/scan"
},
"order": 3
},
"web": {
"type": "object",
"title": "Web",
"description": "Web",
"order": 4
}
},
"definitions": {
"scan": {
"type": "object",
"title": "scan",
"properties": {
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Protocol",
"order": 2
}
}
}
}
},
"scan": {
"type": "object",
"title": "scan",
"properties": {
"port": {
"type": "integer",
"title": "Port",
"description": "Port",
"order": 1
},
"protocol": {
"type": "string",
"title": "Protocol",
"description": "Protocol",
"order": 2
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
from flask import Flask
from flask_restful import Api
from sequence import FibSequenceResource
errors = {
"NegativeNumberError": {
"message": "Must pass a positive number.",
"status": 400,
}
}
app = Flask(__name__)
api = Api(
app,
errors = errors
)
api.add_resource(
FibSequenceResource,
"/fib/<string:number>"
)
if __name__ == "__main__":
app.run(
host="0.0.0.0",
debug=False
)
|
from setuptools import setup, find_packages
setup(
name='amber-state-machine',
version='0.0.1',
description='Amber State Machine',
long_description='Amber State Machine',
# The project's main homepage.
url='',
# Author details
author='Henri Korpela',
author_email='',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='dfa nfa state_machine automate',
packages=find_packages(exclude=['tests']),
) |
# -*- coding:utf-8 -*-
""" 数据库配置文件 """
config = {
'timeout': 3,
'db_user': '', # 无密码
'db_pass': '',
'db_host': 'localhost',
'db_port': 27017,
'db_name': 'taobao',
'use_tor_proxy': False,
'tor_proxy_port': 9050
} |
from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('', views.HomeView.as_view(), name="home"),
]
|
################################# BeatDetector #################################
# Author: Dan Boehm
#
# Description: The BeatDetector Class is responsible for storing and analyzing
# data necessary for beat detection. It only detects beats for
# one set of data, so, for instance, if you performed an FFT on an
# audio signal, separated the signal into several frequency bands,
# and then wanted to perform beat detection on each band
# simultaneously, then you would need to create a separate
# BeatDetector for each frequency band.
#
from scipy import *
class BeatDetector:
##### Instance Variables #####
#beatDetected; # boolean: True if beat was detected
#triggerConstant; # float: Constant used for comparison of energyLevel to
# historyBuffer
#triggerCalc_A # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#triggerCalc_B # float: Constant used for triggerConstant generation
# from equation: C = AV+B.
#dynamicTrigger # boolean: True if triggerConstant should be calculated
# dynamically using variance and a linear
# regression.
#energyLevel; # float: Intensity of the sample last analyzed.
#historyBuffer; # float[]: bufferSize past energyLevels. Most Recent
# is at pHistoryBuffer.
#beatHistory; # boolean[]: Past beatDetecteds aligned
# with historyBuffer
#bufferSize; # int: Total size of the historyBuffer.
#pHistoryBuffer; # int: Starting location in historyBuffer Array
#pHistoryEnd; # int: Last value that should be included in history
# averaging.
#dynamicHistory; # boolean: True if number of samples for historyBuffer
# averaging should be calculated dynamically.
##### Constructors #####
# __init__
#
# Default constructor. For parameter descriptions, see above.
# If dynamicTrigger = False, then triggerCalc A & B must be specified.
# Otherwise, triggerConst must be specified.
#
# parameters: dynamicTrigger - boolean
# triggerConst - double
# triggerCalc_A - double
# triggerCalc_B - double
# dynamicHistory - boolean
# bufferSize - int
#
def _init_(self, dynamicTrigger, triggerConst, triggerCalc_A, triggerCalc_B,
dynamicHistory, bufferSize):
self.beatDetected = False;
self.triggerConstant = triggerConst;
self.triggerCalc_A = triggerCalc_A;
self.triggerCalc_B = triggerCalc_B;
self.dynamicTrigger = dynamicTrigger;
self.energyLevel = 0;
self.bufferSize = bufferSize;
self.historyBuffer = zeros(bufferSize);
self.beatHistory = zeros(bufferSize);
self.pHistoryBuffer = 0;
self.pHistoryEnd = 0;
self.dynamicHistory = dynamicHistory;
##### Methods #####
# getHistoryBuffer(self)
#
# Author: Dan Boehm
#
# Description: returns the historyBuffer used to calculate last beatDetect.
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array representing the History Buffer used for
# calculations. The most recent value is stored at location
# 0.
#
def getHistoryBuffer(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.historyBuffer[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getHistoryBuffer_Full(self)
#
# Author: Dan Boehm
#
# Description: returns the entire historyBuffer
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing every stored sample in History. The
# most recent value is stored at location 0.
#
def getHistoryBuffer_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = historyBuffer[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory(self)
#
# Author: Dan Boehm
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer(self).
#
def getBeatHistory(self):
a = zeros(self.getBufferSize());
pStart = pHistoryBuffer;
for i in range(0, len(a)-1):
a[i] = self.beatHistory[pStart];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# getBeatHistory_Full(self)
#
# Author: Dan Boehm
#
# Description: returns the beatHistory corresponding to the array returned
# by getHistoryBuffer_Full(self).
#
# Parameters: na
#
# Modifies: none
#
# Returns: An array containing booleans representing beats. One-to-one
# correspondance to the array returned by
# getHistoryBuffer_Full(self).
#
def getBeatHistory_Full(self):
a = zeros(self.bufferSize);
p = pHistoryBuffer;
for i in range(0, bufferSize-1):
a[i] = beatHistory[p];
p = p - 1;
if(p < 0):
p = bufferSize - 1;
return a;
# gettriggerConstant(self)
#
# Author: Dan Boehm
#
# Description: returns the last triggerConstant used. Be it dynamic or
# static.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the triggerConstant last used.
#
def getTriggerConstant(self):
return self.triggerConstant;
# getBufferSize(self)
#
# Author: Dan Boehm
#
# Description: Returns the size of the part of the historyBuffer last used
# for calculations.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the historyBuffer last used.
#
def getBufferSize(self):
return abs(self.pHistoryEnd - self.pHistoryBuffer) + 1;
# getBufferCalcSize(self)
#
# Author: Dan Boehm
#
# Description: Returns the size of the entire historyBuffer.
#
# Parameters: na
#
# Modifies: none
#
# Returns: A number indicating the size of the full historyBuffer.
#
def getBufferSize_Full(self):
return self.bufferSize;
# isDynamicTrigger(self)
#
# Author: Dan Boehm
#
# Description: Returns a boolean representing if the TriggerConstant is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the TriggerConstant is being
# calculated dynamically.
#
def isDynamicTrigger(self):
return self.dynamicTrigger;
# isDynamicTrigger(self)
#
# Author: Dan Boehm
#
# Description: Returns a boolean representing if the bufferSize is
# being calculated dynamically. This value is specified at
# object construction and should not be changed.
#
# Parameters: na
#
# Modifies: none
#
# Returns: boolean representing if the bufferSize is being
# calculated dynamically.
#
def isDynamicHistory(self):
return self.dynamicHistory;
# detectBeat(self, audioSample)
#
# Author: Dan Boehm
#
# Description: Returns a boolean representing if the audioSample given
# represents a beat.
#
# Parameters: audioSample - Array of values representing audio intensity.
#
# Modifies: energyLevel
# beatDetected
# historyBuffer
# beatHistory
# triggerConstant (if dynamicTrigger = True)
# pHistoryBuffer
#
# Returns: boolean representing if a beat was detected.
#
def detectBeat(self, audioSample):
# Calculate instant sound energy
energyLevel = sum(abs(audioSample));
#Compute triggerLevel
if(dynamicTrigger):
triggerConstant = triggerCalc(self.getHistoryBuffer());
# Check for beat
if energyLevel > triggerConstant * average(self.getHistoryBuffer()):
beatDetected = True;
else:
beatDetected = False;
# Update History Buffer
historyBuffer[pHistoryBuffer] = energyLevel;
beatHistory[pHistoryBuffer] = beatDetected;
pHistoryBuffer = pHistoryBuffer + 1;
pHistoryEnd = pHistoryEnd + 1;
if(pHistoryBuffer == bufferSize):
pHistoryBuffer = 0;
if(pHistoryEnd == bufferSize):
pHistoryEnd = 0;
if(dynamicHistory):
self.historySizeCalc();
# Return and Exit
return beatDetected;
# historySizeCalc(self) #####################UNFINISHED#####################
#
# Author: Dan Boehm
#
# Description: Analyzes the Beat History, and lengthens or shortens the
# historyBuffer accordingly.
#
# Parameters: none
#
# Modifies: pHistoryEnd
#
# Returns: none
#
def historySizeCalc(self):
pass
# detectBeat(history)
#
# Author: Dan Boehm
#
# Description: Calculates a triggerConstant from the history given. The
# calculation is done based on variance. The variance is
# calculated across the history and is then entered into a
# linear regression model given by the constants A & B.
# These values are specified during object creation and should
# not be modified.
#
# Parameters: history - Array of values for variance calculation
#
# Modifies: none
#
# Returns: Value of proper triggerConstant for the given history.
#
def triggerCalc(history):
#Compute Variance
v = 0;
for a in range(0, len(history)-1):
v += history[a] - average(history);
v = v / len(history);
#Compute triggerLevel
triggerLevel = triggerCalc_A * v + triggerCalc_B;
return triggerLevel; |
#!/usr/bin/env python3
# As printing A string and A number type like:
# $ print("text" + 3)
# errors as 'cannot concetanate str and int',
# or simply 'cannot add words and numbers',
# we should use the string funtion itself to make
# the number type A string:
print("text " + str(1))
print("text", str(1))
# you can use commas or plus's to put parameters
# on one line, just not with improper types.
|
import logging
from typing import Optional, Union
class ProcessContainer:
"""
Auto call functions under `proc_close_methods` when being garbage collected or `close()`
Attributes:
proc_close_methods (list[callable]): A list to collect all the methods that need to be run.
"""
def __init__(self, *args, **kwargs):
self.proc_close_methods = []
super().__init__(*args, **kwargs)
def __del__(self):
self.close()
def close(self) -> None:
"""
Call all the sessions/threads/processes terminate methods defined in `proc_close_methods`
"""
for func in getattr(self, 'proc_close_methods', []):
try:
func()
except Exception as e:
logging.error(e)
def to_str(bytes_str: Union[bytes, str]) -> str:
"""
Turn `bytes` or `str` to `str`
Args:
bytes_str: `bytes` or `str`
Returns:
utf8-decoded string
"""
if isinstance(bytes_str, bytes):
return bytes_str.decode('utf-8', errors='ignore')
return bytes_str
def to_bytes(bytes_str: Union[bytes, str], ending: Optional[Union[bytes, str]] = None) -> bytes:
"""
Turn `bytes` or `str` to `bytes`
Args:
bytes_str: `bytes` or `str`
ending: `bytes` or `str`, will add to the end of the result.
Only works when the `bytes_str` is `str`
Returns:
utf8-encoded bytes
"""
if isinstance(bytes_str, str):
bytes_str = bytes_str.encode()
if ending:
if isinstance(ending, str):
ending = ending.encode()
return bytes_str + ending
return bytes_str
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
###############################################
#
#
# Run AlphaFold2 step by step
# (https://github.com/deepmind/alphafold)
# Author: Pan Li ([email protected])
# @ Shuimu BioScience
# https://www.shuimubio.com/
#
#
################################################
#
#
# AlphaFold2 Step 1 -- Search homologous sequences and templates
# Usage: run_af2_step1.py /path/to/input.fasta /path/to/output_dir
#
#
import json
import gzip
import os
import pathlib
import pickle
import random
import shutil
import sys
import time
from typing import Dict, Union, Optional
import configparser
import argparse
import numpy as np
cur_path = pathlib.Path(__file__).parent.resolve()
ini_config = configparser.ConfigParser(allow_no_value=True)
assert len(ini_config.read(os.path.join(cur_path, 'config.ini'))) > 0, "Read config.ini failed"
sys.path.insert(0, ini_config['ALPHAFOLD2']['alphafold_path'])
from alphafold.common import protein
from alphafold.common import residue_constants
from alphafold.data import pipeline
from alphafold.data import templates
from alphafold.data.tools import hhsearch
from alphafold.data.tools import hmmsearch
from alphafold.model import config
from alphafold.model import model
from alphafold.model import data
parser = argparse.ArgumentParser(description='AlphaFold2 Step 1 -- Search homologous sequences and templates')
parser.add_argument('input_file', metavar='input_file', type=str, help='The fasta file to process, must one sequence.')
parser.add_argument('output_dir', metavar='output_dir', type=str, help='Path to a directory that will store the results.')
parser.add_argument('--max_template_date', default='2021-11-03', type=str, help="Maximun date to search for templates.")
args = parser.parse_args()
features_output_path = os.path.join(args.output_dir, 'features.pkl.gz')
if os.path.exists(features_output_path):
print(f"Info: {features_output_path} exists, please delete it and try again.")
exit(0)
#################################
### Auxiliary functions
#################################
def check_executable(config_set_value, executable_exe):
if config_set_value != '':
assert os.path.exists(config_set_value), f"{config_set_value} not exists"
return config_set_value
executable_exe = shutil.which(executable_exe)
assert executable_exe is not None, f"{executable_exe} not found in PATH"
return executable_exe
def check_file(config_set_value):
assert os.path.exists(config_set_value) or os.path.exists(config_set_value + "_a3m.ffdata"), f"{config_set_value} not exists"
return config_set_value
#################################
### Excutable files
#################################
jackhmmer_binary_path = check_executable(ini_config['EXCUTABLE']['jackhmmer_binary_path'], "jackhmmer")
hhblits_binary_path = check_executable(ini_config['EXCUTABLE']['hhblits_binary_path'], "hhblits")
hhsearch_binary_path = check_executable(ini_config['EXCUTABLE']['hhsearch_binary_path'], "hhsearch")
hmmsearch_binary_path = check_executable(ini_config['EXCUTABLE']['hmmsearch_binary_path'], "hmmsearch")
hmmbuild_binary_path = check_executable(ini_config['EXCUTABLE']['hmmbuild_binary_path'], "hmmbuild")
kalign_binary_path = check_executable(ini_config['EXCUTABLE']['kalign_binary_path'], "kalign")
#################################
### Database files
#################################
uniref90_database_path = check_file(ini_config['DATABASE']['uniref90_database_path'])
mgnify_database_path = check_file(ini_config['DATABASE']['mgnify_database_path'])
template_mmcif_dir = check_file(ini_config['DATABASE']['template_mmcif_dir'])
obsolete_pdbs_path = check_file(ini_config['DATABASE']['obsolete_pdbs_path'])
uniclust30_database_path = check_file(ini_config['DATABASE']['uniclust30_database_path'])
bfd_database_path = check_file(ini_config['DATABASE']['bfd_database_path'])
pdb70_database_path = check_file(ini_config['DATABASE']['pdb70_database_path'])
#################################
### Options
#################################
max_template_date = args.max_template_date # Use the latest templates
use_precomputed_msas = False
use_small_bfd = False
MAX_TEMPLATE_HITS = 20
#################################
### Define searcher, featurizer and pipeline
#################################
template_searcher = hhsearch.HHSearch(
binary_path=hhsearch_binary_path,
databases=[pdb70_database_path])
template_featurizer = templates.HhsearchHitFeaturizer(
mmcif_dir=template_mmcif_dir,
max_template_date=max_template_date,
max_hits=MAX_TEMPLATE_HITS,
kalign_binary_path=kalign_binary_path,
release_dates_path=None,
obsolete_pdbs_path=obsolete_pdbs_path)
data_pipeline = pipeline.DataPipeline(
jackhmmer_binary_path = jackhmmer_binary_path,
hhblits_binary_path = hhblits_binary_path,
uniref90_database_path = uniref90_database_path,
mgnify_database_path = mgnify_database_path,
bfd_database_path = bfd_database_path,
uniclust30_database_path = uniclust30_database_path,
small_bfd_database_path = None,
template_searcher = template_searcher,
template_featurizer = template_featurizer,
use_small_bfd = use_small_bfd,
use_precomputed_msas = use_precomputed_msas)
#################################
### Run pipeline
#################################
msa_output_dir = os.path.join(args.output_dir, 'msas')
if not os.path.exists(msa_output_dir):
os.makedirs(msa_output_dir)
feature_dict = data_pipeline.process(
input_fasta_path = args.input_file,
msa_output_dir = msa_output_dir)
pickle.dump(feature_dict, gzip.open(features_output_path, 'wb'), protocol=4)
|
import os
import urllib.request
from osgeo import ogr
from mapswipe_workers.definitions import DATA_PATH, CustomError, logger
from mapswipe_workers.project_types.arbitrary_geometry import grouping_functions as g
from mapswipe_workers.project_types.arbitrary_geometry.group import Group
from mapswipe_workers.project_types.base.project import BaseProject
from mapswipe_workers.project_types.base.tile_server import BaseTileServer
class Project(BaseProject):
def __init__(self, project_draft: dict) -> None:
super().__init__(project_draft)
# set group size
self.groupSize = project_draft["groupSize"]
self.inputGeometries = project_draft["inputGeometries"]
self.tileServer = vars(BaseTileServer(project_draft["tileServer"]))
def validate_geometries(self):
raw_input_file = (
f"{DATA_PATH}/" f"input_geometries/raw_input_{self.projectId}.geojson"
)
valid_input_file = (
f"{DATA_PATH}/" f"input_geometries/valid_input_{self.projectId}.geojson"
)
if not os.path.isdir("{}/input_geometries".format(DATA_PATH)):
os.mkdir("{}/input_geometries".format(DATA_PATH))
# download file from given url
url = self.inputGeometries
urllib.request.urlretrieve(url, raw_input_file)
logger.info(
f"{self.projectId}"
f" - __init__ - "
f"downloaded input geometries from url and saved as file: "
f"{raw_input_file}"
)
self.inputGeometries = raw_input_file
# open the raw input file and get layer
driver = ogr.GetDriverByName("GeoJSON")
datasource = driver.Open(raw_input_file, 0)
try:
layer = datasource.GetLayer()
LayerDefn = layer.GetLayerDefn()
except AttributeError:
raise CustomError("Value error in input geometries file")
# create layer for valid_input_file to store all valid geometries
outDriver = ogr.GetDriverByName("GeoJSON")
# Remove output geojson if it already exists
if os.path.exists(valid_input_file):
outDriver.DeleteDataSource(valid_input_file)
outDataSource = outDriver.CreateDataSource(valid_input_file)
outLayer = outDataSource.CreateLayer(
"geometries", geom_type=ogr.wkbMultiPolygon
)
for i in range(0, LayerDefn.GetFieldCount()):
fieldDefn = LayerDefn.GetFieldDefn(i)
outLayer.CreateField(fieldDefn)
outLayerDefn = outLayer.GetLayerDefn()
# check if raw_input_file layer is empty
if layer.GetFeatureCount() < 1:
err = "empty file. No geometries provided"
# TODO: How to user logger and exceptions?
logger.warning(f"{self.projectId} - check_input_geometry - {err}")
raise Exception(err)
# get geometry as wkt
# get the bounding box/ extent of the layer
extent = layer.GetExtent()
# Create a Polygon from the extent tuple
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(extent[0], extent[2])
ring.AddPoint(extent[1], extent[2])
ring.AddPoint(extent[1], extent[3])
ring.AddPoint(extent[0], extent[3])
ring.AddPoint(extent[0], extent[2])
poly = ogr.Geometry(ogr.wkbPolygon)
poly.AddGeometry(ring)
wkt_geometry = poly.ExportToWkt()
# check if the input geometry is a valid polygon
for feature in layer:
feat_geom = feature.GetGeometryRef()
geom_name = feat_geom.GetGeometryName()
fid = feature.GetFID
if not feat_geom.IsValid():
layer.DeleteFeature(fid)
logger.warning(
f"{self.projectId}"
f" - check_input_geometries - "
f"deleted invalid feature {fid}"
)
# we accept only POLYGON or MULTIPOLYGON geometries
elif geom_name != "POLYGON" and geom_name != "MULTIPOLYGON":
layer.DeleteFeature(fid)
logger.warning(
f"{self.projectId}"
f" - check_input_geometries - "
f"deleted non polygon feature {fid}"
)
else:
# Create output Feature
outFeature = ogr.Feature(outLayerDefn)
# Add field values from input Layer
for i in range(0, outLayerDefn.GetFieldCount()):
outFeature.SetField(
outLayerDefn.GetFieldDefn(i).GetNameRef(), feature.GetField(i)
)
outFeature.SetGeometry(feat_geom)
outLayer.CreateFeature(outFeature)
outFeature = None
# check if layer is empty
if layer.GetFeatureCount() < 1:
err = "no geometries left after checking validity and geometry type."
logger.warning(f"{self.projectId} - check_input_geometry - {err}")
raise Exception(err)
del datasource
del outDataSource
del layer
self.validInputGeometries = valid_input_file
logger.info(
f"{self.projectId}"
f" - check_input_geometry - "
f"filtered correct input geometries and created file: "
f"{valid_input_file}"
)
return wkt_geometry
def create_groups(self):
raw_groups = g.group_input_geometries(self.validInputGeometries, self.groupSize)
for group_id, item in raw_groups.items():
group = Group(self, group_id)
group.create_tasks(
item["feature_ids"],
item["feature_geometries"],
item["center_points"],
item["reference"],
item["screen"],
)
# only append valid groups
if group.is_valid():
self.groups.append(group)
logger.info(
f"{self.projectId} " f"- create_groups - " f"created groups dictionary"
)
|
n = int(input())
names = []
for cntN in range(n):
names.append(input().strip())
profits = {name:0 for name in names}
for cntN in range(n):
a = input().strip()
am, k = [int(x) for x in input().split()]
if k == 0:
continue
pr = am // k
profits[a] -= pr * k
# friends = []
for cntK in range(k):
profits[input().strip()] += pr
for name in names:
print(name, profits[name])
|
import operator
class Paginator(object):
"""
Implements a class which provides paginated iteration over an endpoint.
"""
def __init__(self, func, sort_key, *args, **kwargs):
self.func = func
self.sort_key = sort_key
self.args = args
self.kwargs = kwargs
self._key = kwargs.pop('key', operator.attrgetter('id'))
self._bulk = kwargs.pop('bulk', False)
self._sort_key_value = kwargs.pop(self.sort_key, None)
self._buffer = []
def fill(self):
self.kwargs[self.sort_key] = self._sort_key_value
result = self.func(*self.args, **self.kwargs)
if not len(result):
return 0
self._buffer.extend(result)
self._sort_key_value = self._key(result[-1])
return len(result)
def next(self):
return self.__next__()
def __iter__(self):
return self
def __next__(self):
if not len(self._buffer):
if not self.fill():
raise StopIteration
if self._bulk:
res = self._buffer
self._buffer = []
return res
else:
return self._buffer.pop()
|
from sklearn.svm import SVC
import numpy as np
from sklearn.base import TransformerMixin
from ot_distances import RJW_distance
import time
from scg_optimizer import NonConvergenceError
from sklearn.exceptions import NotFittedError
class InfiniteException(Exception):
pass
class NanErrorInDist(Exception):
pass
"""
The following classes are used to create a SVM classifier over the RJW distance using the indefinite kernel e^{-\gamma*RJW}
"""
class GenericSVCClassifier(TransformerMixin):
""" GenericSVCClassifier is a sklearn compatible class.
It computes a SVM classifier over a any type of data as long as a similarity measure is defined.
More precisely if f is a similarity measure it computes a SVM on a precomputed similarity matrix
K=exp{-gamma*f(x,y)} for all x,y
Attributes
----------
similarity_measure : a method
The similarity mesure between the points
gamma : float
The gamma parameter in the similarity matrix K=exp{-gamma*f(x,y)}
D : ndarray
The similarity matrix f(x,y)
svc : the SVM classifier from sklearn
C : float
The C parameter of the SVM
"""
def __init__(self,similarity_measure,C=1,gamma=1,verbose=False,always_raise=False):
self.similarity_measure = similarity_measure
self.gamma=gamma
self.C=C
self.verbose=verbose
self.D=None
self.similarity_measure_time=[]
self.infiniteExceptionOccuredInFit=False
self.always_raise=always_raise
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000) #rbf
def compute_similarity(self,x,y):
""" Compute the similarity between x and y using the similarity_measure
Parameters
----------
x : a abstract object
y : a astract object
Returns
-------
A float representative of the similarity
"""
start=time.time()
try:
similarity=self.similarity_measure(x,y)
except NonConvergenceError:
print('NonConvergenceError for ',x.characterized(),y.characterized())
similarity=np.nan
if self.always_raise:
raise NanErrorInDist
if np.isnan(similarity) and self.always_raise:
raise NanErrorInDist
end=time.time()
self.similarity_measure_time.append(end-start)
return similarity
def gram_matrix(self,X,Y,matrix=None,method='classic'):
""" Compute the similarity matrix K=exp{-gamma*f(x,y)} with f the similarity measure
for all x,y in X and Y
Parameters
----------
X : array of abstract object
Y : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
method : string
If equal to classic compute K=exp{-gamma*f(x,y)}, if equal to no_gaussian compute only f(x,y)
Returns
-------
D : ndarray
The gram matrix of all similarities K=exp{-gamma*f(x,y)} or f(x,y) if method='no_gaussian'
"""
self.compute_all_distance(X,Y,matrix)
if method=='classic':
Z=np.exp(-self.gamma*(self.D))
if not self.assert_all_finite(Z):
raise InfiniteException('There is Nan')
else:
return Z
if method=='no_gaussian':
return self.D
def fit(self,X,y=None,matrix=None):
""" Fit the SVM classifier on the similarity matrix
Parameters
----------
X : array of abstract object
y : classes of all objects
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
self
"""
self.classes_ =np.array(y)
self._fit_X=np.array(X)
Gtrain = np.zeros((X.shape[0],X.shape[0]))
start=time.time()
try :
Gtrain = self.gram_matrix(X,X,matrix,method='classic')
self.svc.fit(Gtrain,self.classes_)
if self.verbose:
print('Time fit : ',time.time()-start)
except InfiniteException:
self.infiniteExceptionOccuredInFit=True
print('InfiniteException : value error in fit because nan')
return self
def predict(self,X,matrix=None):
""" Apply the SVM classifier on X
Parameters
----------
X : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
self
"""
try :
G=self.gram_matrix(X,self._fit_X,matrix,method='classic')
preds=self.svc.predict(G)
except InfiniteException:
print('InfiniteException : Preds error because nan')
preds=np.repeat(-10,len(X)) # Dirty trick so that preds are not None
except NotFittedError:
if self.infiniteExceptionOccuredInFit :
print('NotFittedError : nan dans la gram de fit mais pas dans celle de test')
preds=np.repeat(-10,len(X)) # Dirty trick so that preds are not None
else:
raise NotFittedError
return preds
def assert_all_finite(self,X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
a=X.dtype.char in np.typecodes['AllFloat']
b=np.isfinite(X.sum())
c=np.isfinite(X).all()
if (a and not b and not c):
return False
else :
return True
def compute_all_distance(self,X,Y,matrix=None):
""" Compute all similarities f(x,y) for x,y in X and Y and f the similarity measure
Parameters
----------
X : array of abstract object
Y : array of abstract object
matrix : ndarray, optionnal
If specified used to compute the similarity matrix instead of calculating all the similarities
Returns
-------
None. Set the similarity matrix
"""
if matrix is not None :
self.D=matrix
else:
X=X.reshape(X.shape[0],)
Y=Y.reshape(Y.shape[0],)
if np.all(X==Y):
D= np.zeros((X.shape[0], Y.shape[0]))
H=np.zeros((X.shape[0], Y.shape[0]))
for i, x1 in enumerate(X):
for j,x2 in enumerate(Y):
if j>=i:
dist=self.compute_similarity(x1, x2)
D[i, j] = dist
np.fill_diagonal(H,np.diagonal(D))
D=D+D.T-H
else:
D = np.zeros((X.shape[0], Y.shape[0]))
for i, x1 in enumerate(X):
row=[self.compute_similarity(x1, x2) for j,x2 in enumerate(Y)]
D[i,:]=row
D[np.abs(D)<=1e-15]=0 #threshold due to numerical precision
self.D=D
def set_one_param(self,dicto,key):
if key in dicto:
setattr(self, key, dicto[key])
def get_params(self, deep=True):
return {"similarity_measure":self.similarity_measure,"gamma":self.gamma,"C":self.C}
def get_distances_params(self):
return {"similarity_measure":self.similarity_measure}
def set_params(self, **parameters):
self.set_one_param(parameters,"similarity_measure")
self.set_one_param(parameters,"C")
self.set_one_param(parameters,"gamma")
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000)
return self
class Graph_RJW_SVC_Classifier(GenericSVCClassifier):
""" Graph_RJW_SVC_Classifier is a generic class that inherit from GenericSVCClassifier.
Attributes
----------
gw : a RJW_distance instance
The RJW_distance class for computing RJW
alpha : float
The alpha parameter of RJW
method : string
The name of the method used to compute the structures matrices of the graphs. See Graph class
max_iter : integer
Number of iteration of the FW algorithm for the computation of RJW.
features_metric : string
The name of the method used to compute the cost matrix between the features
transp : ndarray, shape (ns,nt)
The transport matrix between the source distribution and the target distribution
amijo : bool, optionnal
If True the steps of the line-search is found via an amijo research. Else closed form is used.
If there is convergence issues use False.
wl : integer
Parameter Weisfeler-Lehman attributes.
"""
def __init__(self,C=1,gamma=1,alpha=1,beta=0.5,method='random_walk',features_metric='sqeuclidean',
verbose=False,always_raise=False,amijo=True,wl=0):
self.rjw=RJW_distance(alpha=alpha,method=method,features_metric=features_metric,amijo=amijo)
similarity_measure=self.rjw.graph_d
GenericSVCClassifier.__init__(self,similarity_measure=similarity_measure,C=C,gamma=gamma,verbose=verbose)
self.alpha=alpha
self.beta=beta
self.features_metric=features_metric
self.method=method
self.wl=wl
self.amijo=amijo
GenericSVCClassifier.__init__(self,C=C,gamma=gamma,similarity_measure=similarity_measure,verbose=verbose,
always_raise=always_raise)
def fit(self,X,y=None,matrix=None):
self.classes_ = y
self._fit_X = list(X.reshape(X.shape[0],))
for x in self._fit_X :
if x.C is None or x.name_struct_dist!=self.method:
if self.verbose:
print('******************************************************')
print('Construction des matrices de structures')
if x.C is not None:
print('before ',x.name_struct_dist)
print('nw ',self.method)
else:
print('Because structure is None')
print('******************************************************')
_=x.distance_matrix(method=self.method,force_recompute=True)
super(Graph_RJW_SVC_Classifier,self).fit(X,y,matrix)
def get_params(self, deep=True):
return {"alpha":self.alpha
,"features_metric":self.features_metric
,"method":self.method
,"C":self.C
,"gamma":self.gamma
,"amijo":self.amijo
,"wl":self.wl
}
def set_params(self, **parameters):
self.set_one_param(parameters,"alpha")
self.set_one_param(parameters,"features_metric")
self.set_one_param(parameters,"method")
self.set_one_param(parameters,"C")
self.set_one_param(parameters,"gamma")
self.set_one_param(parameters,"amijo")
self.set_one_param(parameters,"wl")
self.svc=SVC(C=self.C,kernel="precomputed",verbose=self.verbose,max_iter=10000000) # rbf
rjw2=RJW_distance(alpha=self.alpha,beta=self.beta, method=self.method,
features_metric=self.features_metric,
amijo=self.amijo)
if self.rjw.get_tuning_params()!=rjw2.get_tuning_params():
self.rjw=RJW_distance(alpha=self.alpha,method=self.method,features_metric=self.features_metric,
amijo=self.amijo)
self.similarity_measure=self.rjw.graph_d
return self
def get_distances_params(self):
dall = {}
dall.update(self.rjw.get_tuning_params())
dall.update({'wl':self.wl})
return dall |
def dict_to_filter_params(d, prefix=''):
"""
Translate a dictionary of attributes to a nested set of parameters suitable for QuerySet filtering. For example:
{
"name": "Foo",
"rack": {
"facility_id": "R101"
}
}
Becomes:
{
"name": "Foo",
"rack__facility_id": "R101"
}
And can be employed as filter parameters:
Device.objects.filter(**dict_to_filter(attrs_dict))
"""
params = {}
for key, val in d.items():
k = prefix + key
if isinstance(val, dict):
params.update(dict_to_filter_params(val, k + '__'))
else:
params[k] = val
return params
class ClassPropertyMeta(type):
def __setattr__(cls, key, value):
obj = cls.__dict__.get(key, None)
if isinstance(obj, classproperty):
return obj.__set__(cls, value)
return super().__setattr__(key, value)
class classproperty:
"""
Similar to @property but used on classes instead of instances.
The only caveat being that your class must use the
classproperty.meta metaclass.
Class properties will still work on class instances unless the
class instance has overidden the class default. This is no different
than how class instances normally work.
Derived from: https://stackoverflow.com/a/5191224/721519
class Z(object, metaclass=classproperty.meta):
@classproperty
def foo(cls):
return 123
_bar = None
@classproperty
def bar(cls):
return cls._bar
@bar.setter
def bar(cls, value):
return cls_bar = value
Z.foo # 123
Z.bar # None
Z.bar = 222
Z.bar # 222
"""
meta = ClassPropertyMeta
def __init__(self, fget, fset=None):
self.fget = self._fix_function(fget)
self.fset = None if fset is None else self._fix_function(fset)
def __get__(self, instance, owner=None):
if not issubclass(type(owner), ClassPropertyMeta):
raise TypeError(
f'Class {owner} does not extend from the required '
f'ClassPropertyMeta metaclass'
)
return self.fget.__get__(None, owner)()
def __set__(self, owner, value):
if not self.fset:
raise AttributeError('can\'t set attribute')
if isinstance(owner, ClassPropertyMeta):
owner = type(owner)
return self.fset.__get__(None, owner)(value)
def setter(self, fset):
self.fset = self._fix_function(fset)
return self
_fn_types = (type(__init__), classmethod, staticmethod)
@classmethod
def _fix_function(cls, fn):
if not isinstance(fn, cls._fn_types):
raise TypeError('Getter or setter must be a function')
# Always wrap in classmethod so we can call its __get__ and not
# have to deal with difference between raw functions.
if not isinstance(fn, (classmethod, staticmethod)):
return classmethod(fn)
return fn
|
import numpy as np
import random
import pickle
import os.path as osp
import pyflex
from copy import deepcopy
from softgym.envs.cloth_fold import ClothFoldEnv
class ClothFoldDropEnv(ClothFoldEnv):
def __init__(self, **kwargs):
self.start_height = 0.8
kwargs['cached_states_path'] = 'cloth_fold_drop_init_states.pkl'
super().__init__(**kwargs)
def _get_drop_point_idx(self):
return self._get_key_point_idx()[:2]
def get_default_config(self):
""" Set the default config of the environment and load it to self.config """
config = {
'ClothPos': [-1.6, 2.0, -0.8],
'ClothSize': [64, 32],
'ClothStiff': [0.9, 1.0, 0.9], # Stretch, Bend and Shear
'camera_name': 'default_camera',
'camera_params': {'default_camera':
{'pos': np.array([1.07199, 0.94942, 1.15691]),
'angle': np.array([0.633549, -0.397932, 0]),
'width': self.camera_width,
'height': self.camera_height}},
'flip_mesh': 0
}
return config
def generate_env_variation(self, num_variations=1, vary_cloth_size=True):
""" Generate initial states. Note: This will also change the current states! """
max_wait_step = 500 # Maximum number of steps waiting for the cloth to stablize
stable_vel_threshold = 0.1 # Cloth stable when all particles' vel are smaller than this
generated_configs, generated_states = [], []
default_config = self.get_default_config()
for i in range(num_variations):
config = deepcopy(default_config)
self.update_camera(config['camera_name'], config['camera_params'][config['camera_name']])
if vary_cloth_size:
cloth_dimx, cloth_dimy = self._sample_cloth_size()
config['ClothSize'] = [cloth_dimx, cloth_dimy]
else:
cloth_dimx, cloth_dimy = config['ClothSize']
self.set_scene(config)
self.action_tool.reset([0., -1., 0.])
pickpoints = self._get_drop_point_idx()[:2] # Pick two corners of the cloth and wait until stablize
config['target_pos'] = self._get_flat_pos()
self._set_to_vertical(x_low=np.random.random() * 0.2 - 0.1, height_low=np.random.random() * 0.1 + 0.1)
# Get height of the cloth without the gravity. With gravity, it will be longer
p1, _, p2, _ = self._get_key_point_idx()
# cloth_height = np.linalg.norm(curr_pos[p1] - curr_pos[p2])
curr_pos = pyflex.get_positions().reshape(-1, 4)
curr_pos[0] += np.random.random() * 0.001 # Add small jittering
original_inv_mass = curr_pos[pickpoints, 3]
curr_pos[pickpoints, 3] = 0 # Set mass of the pickup point to infinity so that it generates enough force to the rest of the cloth
pickpoint_pos = curr_pos[pickpoints, :3]
pyflex.set_positions(curr_pos.flatten())
picker_radius = self.action_tool.picker_radius
self.action_tool.update_picker_boundary([-0.3, 0.05, -0.5], [0.5, 2, 0.5])
self.action_tool.set_picker_pos(picker_pos=pickpoint_pos + np.array([0., picker_radius, 0.]))
# Pick up the cloth and wait to stablize
for j in range(0, max_wait_step):
pyflex.step()
curr_pos = pyflex.get_positions().reshape((-1, 4))
curr_vel = pyflex.get_velocities().reshape((-1, 3))
if np.alltrue(curr_vel < stable_vel_threshold) and j > 300:
break
curr_pos[pickpoints, :3] = pickpoint_pos
pyflex.set_positions(curr_pos)
curr_pos = pyflex.get_positions().reshape((-1, 4))
curr_pos[pickpoints, 3] = original_inv_mass
pyflex.set_positions(curr_pos.flatten())
generated_configs.append(deepcopy(config))
print('config {}: {}'.format(i, config['camera_params']))
generated_states.append(deepcopy(self.get_state()))
return generated_configs, generated_states
def _reset(self):
""" Right now only use one initial state"""
if hasattr(self, 'action_tool'):
particle_pos = pyflex.get_positions().reshape(-1, 4)
drop_point_pos = particle_pos[self._get_drop_point_idx(), :3]
middle_point = np.mean(drop_point_pos, axis=0)
self.action_tool.reset(middle_point) # middle point is not really useful
picker_radius = self.action_tool.picker_radius
self.action_tool.update_picker_boundary([-0.3, 0.5, -0.5], [0.5, 2, 0.5])
self.action_tool.set_picker_pos(picker_pos=drop_point_pos + np.array([0., picker_radius, 0.]))
config = self.get_current_config()
num_particles = np.prod(config['ClothSize'], dtype=int)
particle_grid_idx = np.array(list(range(num_particles))).reshape(config['ClothSize'][1], config['ClothSize'][0]) # Reversed index here
cloth_dimx = config['ClothSize'][0]
x_split = cloth_dimx // 2
self.fold_group_a = particle_grid_idx[:, :x_split].flatten()
self.fold_group_b = np.flip(particle_grid_idx, axis=1)[:, :x_split].flatten()
colors = np.zeros(num_particles)
colors[self.fold_group_a] = 1
pyflex.step()
self.init_pos = pyflex.get_positions().reshape((-1, 4))[:, :3]
pos_a = self.init_pos[self.fold_group_a, :]
pos_b = self.init_pos[self.fold_group_b, :]
self.prev_dist = np.mean(np.linalg.norm(pos_a - pos_b, axis=1))
self.performance_init = None
info = self._get_info()
self.performance_init = info['performance']
return self._get_obs()
def _set_to_vertical(self, x_low, height_low):
curr_pos = pyflex.get_positions().reshape((-1, 4))
vertical_pos = self._get_vertical_pos(x_low, height_low)
curr_pos[:, :3] = vertical_pos
max_height = np.max(curr_pos[:, 1])
if max_height < 0.5:
curr_pos[:, 1] += 0.5 - max_height
pyflex.set_positions(curr_pos)
pyflex.step()
def _get_vertical_pos(self, x_low, height_low):
config = self.get_current_config()
dimx, dimy = config['ClothSize']
x = np.array([i * self.cloth_particle_radius for i in range(dimx)])
x = np.array(list(reversed(x)))
y = np.array([i * self.cloth_particle_radius for i in range(dimy)])
# x = x - np.mean(x)
y = y - np.mean(y)
xx, yy = np.meshgrid(x, y)
curr_pos = np.zeros([dimx * dimy, 3], dtype=np.float32)
curr_pos[:, 0] = x_low
curr_pos[:, 2] = yy.flatten()
curr_pos[:, 1] = xx.flatten() - np.min(xx) + height_low
return curr_pos |
import unittest
from yoti_python_sdk.doc_scan.session.retrieve.document_fields_response import (
DocumentFieldsResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.file_response import (
FileResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.supplementary_document_resource_response import (
SupplementaryDocumentResourceResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.task_response import (
SupplementaryDocumentTextExtractionTaskResponse,
TaskResponse,
)
class SupplementaryDocumentResourceResponseTest(unittest.TestCase):
SOME_ID = "someId"
SOME_DOCUMENT_TYPE = "someDocumentType"
SOME_ISSUING_COUNTRY = "someIssuingCountry"
SOME_TASKS = [
{"first": "task", "type": "SUPPLEMENTARY_DOCUMENT_TEXT_DATA_EXTRACTION"},
{"second": "task"},
]
SOME_PAGES = [{"first": "page"}, {"second": "page"}]
SOME_DOCUMENT_FIELDS = {"media": {}}
SOME_DOCUMENT_FILE = {"media": {}}
def test_should_parse_correctly(self):
data = {
"id": self.SOME_ID,
"document_type": self.SOME_DOCUMENT_TYPE,
"issuing_country": self.SOME_ISSUING_COUNTRY,
"tasks": self.SOME_TASKS,
"pages": self.SOME_PAGES,
"document_fields": self.SOME_DOCUMENT_FIELDS,
"file": self.SOME_DOCUMENT_FILE,
}
result = SupplementaryDocumentResourceResponse(data)
assert result.id == self.SOME_ID
assert result.document_type == self.SOME_DOCUMENT_TYPE
assert result.issuing_country == self.SOME_ISSUING_COUNTRY
assert len(result.tasks) == 2
assert len(result.pages) == 2
assert isinstance(result.document_fields, DocumentFieldsResponse)
assert isinstance(result.document_file, FileResponse)
def test_should_parse_when_none(self):
result = SupplementaryDocumentResourceResponse(None)
assert result.id is None
assert result.document_type is None
assert result.issuing_country is None
assert len(result.tasks) == 0
assert len(result.pages) == 0
assert result.document_fields is None
assert result.document_file is None
def test_should_parse_tasks_with_type(self):
data = {
"id": self.SOME_ID,
"document_type": self.SOME_DOCUMENT_TYPE,
"issuing_country": self.SOME_ISSUING_COUNTRY,
"tasks": self.SOME_TASKS,
"pages": self.SOME_PAGES,
"document_fields": self.SOME_DOCUMENT_FIELDS,
}
result = SupplementaryDocumentResourceResponse(data)
assert len(result.tasks) == 2
assert isinstance(
result.tasks[0], SupplementaryDocumentTextExtractionTaskResponse
)
assert isinstance(result.tasks[1], TaskResponse)
def test_should_filter_text_extraction_tasks(self):
data = {"tasks": self.SOME_TASKS}
result = SupplementaryDocumentResourceResponse(data)
assert len(result.tasks) == 2
assert len(result.text_extraction_tasks) == 1
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib import admin
from thecut.authorship.admin import AuthorshipMixin
from thecut.pages.models import Page
class PageAdmin(AuthorshipMixin, admin.ModelAdmin):
fieldsets = [
(None, {'fields': ['title', 'headline', 'featured_content', 'content',
'meta_description', 'tags']}),
('Publishing', {'fields': ['site', 'url', ('publish_at', 'is_enabled'),
'expire_at', 'publish_by', 'template',
'is_featured', 'is_indexable',
('created_at', 'created_by'),
('updated_at', 'updated_by')],
'classes': ['collapse']}),
]
list_display = ['title', 'publish_at', 'is_enabled', 'is_featured',
'is_indexable']
list_filter = ['publish_at', 'is_enabled', 'is_featured', 'is_indexable']
prepopulated_fields = {'url': ['title']}
readonly_fields = ['created_at', 'created_by', 'updated_at', 'updated_by']
search_fields = ['title']
admin.site.register(Page, PageAdmin)
|
# coding=utf-8
import sys
sys.path.insert(0, "../pylib")
import os.path
import json
import datetime
import logging
from hive_service import ThriftHive
from hive_service.ttypes import HiveServerException
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
def test():
transport = TSocket.TSocket('localhost', 10000)
transport = TTransport.TBufferedTransport(transport)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
client = ThriftHive.Client(protocol)
transport.open()
client.execute("SELECT 1;")
print client.fetchOne()
transport.close()
test()
|
import tensorflow.keras.backend as K
import tensorflow as tf
def mae(y_true, y_pred):
"""
Mean absolute error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean absolute error
"""
return K.mean(K.abs(y_pred - y_true), axis=-1)
def mse(y_true, y_pred):
"""
Mean squared error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean squared error
"""
return K.mean(K.square(y_pred - y_true), axis=-1)
def mape(y_true, y_pred):
"""
Mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return 100. * K.mean(diff, axis=-1)
def msle(y_true, y_pred):
"""
Mean squared logarithmic error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: mean squared logarithmic error
"""
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.mean(K.square(first_log - second_log), axis=-1)
def r2(y_true, y_pred):
"""
:math:`R^2` (coefficient of determination) regression score function.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: R2
"""
SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
return (1 - SS_res/(SS_tot + tf.keras.backend.epsilon()))
def adj_r2(y_true, y_pred):
"""
Adjusted R2 regression score function with default inputs.
Best possible score is 1.0, lower values are worse.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: adjusted R2
"""
SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
return (1 - SS_res/(SS_tot + tf.keras.backend.epsilon())) * (1 - (1 - r2(y_true, y_pred)) * (tf.cast(tf.size(y_true), tf.float32) - 1) / (tf.cast(tf.size(y_true), tf.float32) - tf.cast(tf.rank(y_true), tf.float32) - 1))
# SS_res = tf.reduce_sum(tf.square(y_true - y_pred), axis=-1)
# SS_tot = tf.reduce_sum(tf.square(y_true - tf.reduce_mean(y_true, axis=-1)), axis=-1)
# adj_SS_res = tf.cast(SS_res / (K.shape(y_true)[0] - 1), tf.int32)
# adj_SS_tot = tf.cast(SS_tot / (K.shape(y_true)[0] - 1), tf.int32)
# return (1 - adj_SS_res/(adj_SS_tot + tf.keras.backend.epsilon()))
def rmsle(y_true, y_pred):
"""
Root Mean Squared Logarithm Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: root mean squared logarithm error
"""
first_log = K.log(K.clip(y_pred, K.epsilon(), None) + 1.)
second_log = K.log(K.clip(y_true, K.epsilon(), None) + 1.)
return K.sqrt(K.mean(K.square(first_log - second_log), axis=-1))
def rmse(y_true, y_pred):
"""
Root Mean Squared Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: root mean squared error
"""
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
def smape(y_true, y_pred):
"""
Symmetric mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: symmetric mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return 100. * K.mean(K.mean(diff, axis=-1))
def smape_log(y_true, y_pred):
"""
Symmetric mean absolute percentage error regression loss.
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: symmetric mean absolute percentage error
"""
diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), None))
return K.log(K.mean(K.mean(diff, axis=-1)))
def nrmse(y_true, y_pred):
"""
Normalized Root Mean Squared Error
Args:
y_true ([np.array]): test samples
y_pred ([np.array]): predicted samples
Returns:
[float]: normalized root mean squared error
"""
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1)) / K.mean(K.abs(y_true), axis=-1)
|
# uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: otp.uberdog.OtpAvatarManager
from cPickle import loads, dumps
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
notify = DirectNotifyGlobal.directNotify.newCategory('AvatarManager')
class OtpAvatarManager(DistributedObject.DistributedObject):
__module__ = __name__
notify = notify
OnlineEvent = 'GlobalAvatarManagerOnline'
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.avatars = {}
def delete(self):
self.ignoreAll()
self.cr.avatarManager = None
DistributedObject.DistributedObject.delete(self)
return
def online(self):
messenger.send(OtpAvatarManager.OnlineEvent)
def sendRequestAvatarList(self):
self.sendUpdate('requestAvatarList', [0])
def rejectAvatarList(self, result):
messenger.send('avatarListFailed', [result])
def avatarListResponse(self, pickleData):
avatars = loads(pickleData)
messenger.send('avatarList', [avatars])
def rejectCreateAvatar(self, result):
messenger.send('createdNewAvatarFailed', [result])
def createAvatarResponse(self, avatarId, subId, access, founder):
self.notify.info('new avatarId: %s subId: %s access: %s founder: %s' % (avatarId, subId, access, founder))
messenger.send('createdNewAvatar', [avatarId, subId])
def sendRequestRemoveAvatar(self, avatarId, subId, confirmPassword):
self.sendUpdate('requestRemoveAvatar', [0, avatarId, subId, confirmPassword])
def rejectRemoveAvatar(self, reasonId):
messenger.send('rejectRemoveAvatar', [reasonId])
def removeAvatarResponse(self, avatarId, subId):
messenger.send('removeAvatarResponse', [avatarId, subId])
def sendRequestShareAvatar(self, avatarId, subId, shared):
self.sendUpdate('requestShareAvatar', [0, avatarId, subId, shared])
def rejectShareAvatar(self, reasonId):
messenger.send('rejectShareAvatar', [reasonId])
def shareAvatarResponse(self, avatarId, subId, shared):
messenger.send('shareAvatarResponse', [avatarId, subId, shared])
def sendRequestAvatarSlot(self, subId, slot):
self.sendUpdate('requestAvatarSlot', [0, subId, slot])
def rejectAvatarSlot(self, reasonId, subId, slot):
messenger.send('rejectAvatarSlot', [reasonId, subId, slot])
def avatarSlotResponse(self, subId, slot):
messenger.send('avatarSlotResponse', [subId, slot])
def sendRequestPlayAvatar(self, avatarId, subId):
self.sendUpdate('requestPlayAvatar', [0, avatarId, subId])
def rejectPlayAvatar(self, reasonId, avatarId):
messenger.send('rejectPlayAvatar', [reasonId, avatarId])
def playAvatarResponse(self, avatarId, subId, access, founder):
messenger.send('playAvatarResponse', [avatarId, subId, access, founder]) |
# The contents of this file are subject to the BitTorrent Open Source License
# Version 1.1 (the License). You may not copy or use this file, in either
# source code or executable form, except in compliance with the License. You
# may obtain a copy of the License at http://www.bittorrent.com/license/.
#
# Software distributed under the License is distributed on an AS IS basis,
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
# for the specific language governing rights and limitations under the
# License.
# written by Greg Hazel, based on code by Matt Chisholm
from __future__ import division
import os
import sys
import math
import random
from BTL.translation import _
from BTL.platform import app_name
from BitTorrent.platform import image_root
from BTL.sparse_set import SparseSet
from BTL.obsoletepythonsupport import set
from BitTorrent.GUI_wx import VSizer, HSizer, BTDialog, CheckButton
from BitTorrent.GUI_wx import ChooseDirectorySizer, SPACING, ElectroStaticText
from BitTorrent.GUI_wx import IPValidator, PortValidator, text_wrappable, gui_wrap
from BitTorrent.GUI_wx import list_themes
from BitTorrent.GUI_wx.CustomWidgets import FancyDownloadGauge, SimpleDownloadGauge, ModerateDownloadGauge
from BitTorrent.UI import Rate
from BitTorrent.GUI_wx.LanguageSettings import LanguageSettings
import wx
upload_speed_classes = {
( 4, 5):_("dialup" ),
( 6, 14):_("DSL/cable 128Kb up"),
( 15, 29):_("DSL/cable 256Kb up"),
( 30, 91):_("DSL 768Kb up" ),
( 92, 137):_("T1" ),
( 138, 182):_("T1/E1" ),
( 183, 249):_("E1" ),
( 250, 5446):_("T3" ),
( 5447,18871):_("OC3" ),
(18872,125e6):_("fast" ),
}
download_speed_classes = {
( 4, 5):_("dialup" ),
( 6, 46):_("DSL/cable 384Kb down"),
( 47, 93):_("DSL/cable 768Kb down"),
( 93, 182):_("DSL/T1" ),
( 182, 249):_("E1" ),
( 250, 729):_("DSL 6Mb down" ),
( 730, 5442):_("T3" ),
( 5443,18858):_("OC3" ),
(18859,125e6):_("fast" ),
}
class RateSlider(wx.Slider):
base = 10
multiplier = 4
max_exponent = 4.49
slider_scale = 1000 # slider goes from 0 to slider_scale * max_exponent
backend_conversion = 1024 # slider deals in KB, backend in B
def __init__(self, parent, value, speed_classes):
self.speed_classes = speed_classes
value = self.bytes_to_slider(value)
wx.Slider.__init__(self, parent, wx.ID_ANY,
value=value, minValue=0,
maxValue=self.max_exponent * self.slider_scale)
def bytes_to_slider(self, value):
value /= self.backend_conversion
try:
r = math.log(value/self.multiplier, self.base)
except OverflowError, e:
wx.the_app.logger.error(u'%s (%s, %s, %s)' % (unicode(e.args[0]),
value,
self.multiplier,
self.base),
exc_info=sys.exc_info())
return r * self.slider_scale
def slider_to_bytes(self, value):
r = self.slider_to_kbytes(value)
return r * self.backend_conversion
def slider_to_kbytes(self, value):
value /= self.slider_scale
r = int(round(self.base**value * self.multiplier))
return r
def slider_to_label(self, value):
value = self.slider_to_kbytes(value)
conn_type = ''
for key, conn in self.speed_classes.iteritems():
min_v, max_v = key
if min_v <= value <= max_v:
conn_type = ' (%s)' % conn
break
label = unicode(Rate(value*self.backend_conversion)) + conn_type
return label
class RateSliderBox(wx.StaticBox):
def __init__(self, parent, label, key, settings_window, speed_classes):
self.key = key
self.settings_window = settings_window
wx.StaticBox.__init__(self, parent, label=label)
self.sizer = wx.StaticBoxSizer(self, wx.VERTICAL)
self.text = ElectroStaticText(parent, wx.ID_ANY, 'text')
self.setfunc = lambda v : self.settings_window.setfunc(key, v)
self.slider = RateSlider(parent, self.settings_window.config[key], speed_classes)
self.slider.Bind(wx.EVT_SLIDER, self.OnSlider)
self.LoadValue()
self.sizer.Add(self.text, proportion=1, flag=wx.GROW|wx.TOP|wx.LEFT|wx.RIGHT, border=SPACING)
self.sizer.Add(self.slider, proportion=1, flag=wx.GROW|wx.BOTTOM|wx.LEFT|wx.RIGHT, border=SPACING)
def LoadValue(self):
bytes = self.settings_window.config[self.key]
if bytes <= 0:
wx.the_app.logger.warning(_("Impractically low rate (%s), fixing") % bytes)
self.settings_window.config[self.key] = 4 * 1024
bytes = self.settings_window.config[self.key]
self.slider.SetValue(self.slider.bytes_to_slider(bytes))
self.text.SetLabel(self.slider.slider_to_label(self.slider.GetValue()))
def OnSlider(self, event):
value = event.GetInt()
bytes = self.slider.slider_to_bytes(value)
self.setfunc(bytes)
label = self.slider.slider_to_label(value)
self.text.SetLabel(label)
def Enable(self, enable):
self.text.Enable(enable)
self.slider.Enable(enable)
class SettingsPanel(wx.Panel):
"""Base class for settings panels"""
label = ''
def __init__(self, parent, *a, **k):
style = k.get('style', 0)
k['style'] = style | wx.CLIP_CHILDREN | wx.TAB_TRAVERSAL
# aarrg
self.settings_window = parent.GetParent()
wx.Panel.__init__(self, parent, *a, **k)
parent.AddPage(self, self.label)
self.sizer = VSizer()
self.SetSizerAndFit(self.sizer)
class GeneralSettingsPanel(SettingsPanel):
label = _("General")
def __init__(self, parent, *a, **k):
SettingsPanel.__init__(self, parent, *a, **k)
# widgets
self.confirm_checkbutton = CheckButton(
self,
_("Confirm before quitting %s")%app_name,
self.settings_window,
'confirm_quit',
self.settings_window.config['confirm_quit'])
# sizers
self.sizer.AddFirst(self.confirm_checkbutton)
if os.name == 'nt':
# widgets
self.enforce_checkbutton = CheckButton(
self,
_("Enforce .torrent associations on startup"),
self.settings_window,
'enforce_association',
self.settings_window.config['enforce_association'])
self.startup_checkbutton = CheckButton(
self,
_("Launch BitTorrent when Windows starts"),
self.settings_window,
'launch_on_startup',
self.settings_window.config['launch_on_startup'])
self.start_minimized_checkbutton = CheckButton(
self,
_("Start minimized"),
self.settings_window,
'start_minimized',
self.settings_window.config['start_minimized'])
self.minimize_checkbutton = CheckButton(
self,
_("Minimize to the system tray"),
self.settings_window,
'minimize_to_tray',
self.settings_window.config['minimize_to_tray'])
self.quit_checkbutton = CheckButton(
self,
_("Close to the system tray"),
self.settings_window,
'close_to_tray',
self.settings_window.config['close_to_tray'])
# sizers
self.sizer.Add(wx.StaticLine(self, style=wx.LI_HORIZONTAL), flag=wx.GROW)
self.sizer.Add(self.enforce_checkbutton)
self.sizer.Add(wx.StaticLine(self, style=wx.LI_HORIZONTAL), flag=wx.GROW)
self.sizer.Add(self.startup_checkbutton)
self.sizer.Add(self.start_minimized_checkbutton)
self.sizer.Add(wx.StaticLine(self, style=wx.LI_HORIZONTAL), flag=wx.GROW)
self.sizer.Add(self.minimize_checkbutton)
self.sizer.Add(self.quit_checkbutton)
class SavingSettingsPanel(SettingsPanel):
label = _("Saving")
def __init__(self, parent, *a, **k):
SettingsPanel.__init__(self, parent, *a, **k)
# widgets
self.ask_checkbutton = CheckButton(self,
_("Ask where to save each new download"), self.settings_window,
'ask_for_save', self.settings_window.config['ask_for_save'])
self.save_static_box = wx.StaticBox(self, label=_("Move completed downloads to:"))
self.save_box = ChooseDirectorySizer(self,
self.settings_window.config['save_in'],
setfunc = lambda v: self.settings_window.setfunc('save_in', v),
editable = False,
button_label = _("&Browse"))
self.incoming_static_box = wx.StaticBox(self, label=_("Store unfinished downloads in:"))
self.incoming_box = ChooseDirectorySizer(self,
self.settings_window.config['save_incomplete_in'],
setfunc = lambda v: self.settings_window.setfunc('save_incomplete_in', v),
editable = False,
button_label = _("B&rowse"))
# sizers
self.save_static_box_sizer = wx.StaticBoxSizer(self.save_static_box, wx.VERTICAL)
self.save_static_box_sizer.Add(self.save_box,
flag=wx.ALL|wx.GROW,
border=SPACING)
self.incoming_static_box_sizer = wx.StaticBoxSizer(self.incoming_static_box, wx.VERTICAL)
self.incoming_static_box_sizer.Add(self.incoming_box,
flag=wx.ALL|wx.GROW,
border=SPACING)
self.sizer.AddFirst(self.ask_checkbutton)
self.sizer.Add(self.save_static_box_sizer, flag=wx.GROW)
self.sizer.Add(self.incoming_static_box_sizer, flag=wx.GROW)
class NetworkSettingsPanel(SettingsPanel):
label = _("Network")
def __init__(self, parent, *a, **k):
SettingsPanel.__init__(self, parent, *a, **k)
if os.name == 'nt':
self.autodetect = CheckButton(self,
_("Autodetect available bandwidth"),
self.settings_window,
'bandwidth_management',
self.settings_window.config['bandwidth_management'],
self.bandwidth_management_callback
)
self.sizer.AddFirst(self.autodetect)
self.up_rate_slider = RateSliderBox(self,
_("Maximum upload rate"),
'max_upload_rate',
self.settings_window,
upload_speed_classes)
self.sizer.Add(self.up_rate_slider.sizer, flag=wx.GROW)
self.down_rate_slider = RateSliderBox(self,
_("Average maximum download rate"),
'max_download_rate',
self.settings_window,
download_speed_classes)
self.sizer.Add(self.down_rate_slider.sizer, flag=wx.GROW)
if os.name == 'nt':
self.bandwidth_management_callback()
# Network widgets
self.port_box = wx.StaticBox(self, label=_("Look for available port:"))
port_text = ElectroStaticText(self, wx.ID_ANY, _("starting at port:") + ' ')
port_range = ElectroStaticText(self, wx.ID_ANY, " (1024-65535)")
self.port_field = PortValidator(self, 'minport',
self.settings_window.config,
self.settings_window.setfunc)
self.port_field.add_end('maxport')
self.upnp = CheckButton(self, _("Enable automatic port mapping")+" (&UPnP)",
self.settings_window,
'upnp',
self.settings_window.config['upnp'],
None)
# Network sizers
self.port_box_line1 = wx.BoxSizer(wx.HORIZONTAL)
self.port_box_line1.Add(port_text , flag=wx.ALIGN_CENTER_VERTICAL, border=SPACING)
self.port_box_line1.Add(self.port_field)
self.port_box_line1.Add(port_range, flag=wx.ALIGN_CENTER_VERTICAL, border=SPACING)
self.port_box_sizer = wx.StaticBoxSizer(self.port_box, wx.VERTICAL)
self.port_box_sizer.Add(self.port_box_line1, flag=wx.TOP|wx.LEFT|wx.RIGHT, border=SPACING)
self.port_box_sizer.Add(self.upnp, flag=wx.ALL, border=SPACING)
self.sizer.Add(self.port_box_sizer, flag=wx.GROW)
# debug only code
if wx.the_app.config['debug']:
# widgets
self.ip_box = wx.StaticBox(self, label=_("IP to report to the tracker:"))
self.ip_field = IPValidator(self, 'ip',
self.settings_window.config,
self.settings_window.setfunc)
ip_label = ElectroStaticText(self, wx.ID_ANY,
_("(Has no effect unless you are on the\nsame local network as the tracker)"))
# sizers
self.ip_box_sizer = wx.StaticBoxSizer(self.ip_box, wx.VERTICAL)
self.ip_box_sizer.Add(self.ip_field, flag=wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW, border=SPACING)
self.ip_box_sizer.Add(ip_label, flag=wx.ALL, border=SPACING)
self.sizer.Add(self.ip_box_sizer, flag=wx.GROW)
def bandwidth_management_callback(self):
enable = not self.autodetect.GetValue()
if enable:
self.up_rate_slider.LoadValue()
self.down_rate_slider.LoadValue()
self.up_rate_slider.Enable(enable)
self.down_rate_slider.Enable(enable)
class AppearanceSettingsPanel(SettingsPanel):
label = _("Appearance")
pb_config_key = 'progressbar_style'
# sample data
sample_value = 0.4
sample_data = {'h': SparseSet(),
't': SparseSet(),
}
sample_data['h'].add(0, 80)
sample_data['t'].add(80, 100)
for i in range(20,0,-1):
s = SparseSet()
s.add(200-i*5, 200-(i-1)*5)
sample_data[i-1] = s
del i,s
def __init__(self, parent, *a, **k):
SettingsPanel.__init__(self, parent, *a, **k)
# widgets
self.gauge_box = wx.StaticBox(self, label=_("Progress bar style:"))
self.gauge_sizer = wx.StaticBoxSizer(self.gauge_box, wx.VERTICAL)
self.null_radio = wx.RadioButton(self,
label=_("&None (just show percent complete)"),
style=wx.RB_GROUP)
self.null_radio.value = 0
self.simple_radio = wx.RadioButton(self,
label=_("&Ordinary progress bar"))
self.simple_radio.value = 1
self.simple_sample = self.new_sample(SimpleDownloadGauge, 1)
self.moderate_radio = wx.RadioButton(self,
label=_("&Detailed progress bar"))
self.moderate_radio.value = 2
msg = _("(shows the percentage of complete, transferring, available and missing pieces in the torrent)")
if not text_wrappable:
half = len(msg)//2
for i in xrange(half):
if msg[half+i] == ' ':
msg = msg[:half+i+1] + '\n' + msg[half+i+1:]
break
elif msg[half-i] == ' ':
msg = msg[:half-i+1] + '\n' + msg[half-i+1:]
break
self.moderate_text = ElectroStaticText(self, wx.ID_ANY, msg)
if text_wrappable: self.moderate_text.Wrap(250)
self.moderate_sample = self.new_sample(ModerateDownloadGauge, 2)
self.fancy_radio = wx.RadioButton(self,
label=_("&Piece bar"))
self.fancy_radio.value = 3
self.fancy_text = ElectroStaticText(self, wx.ID_ANY,
_("(shows the status of each piece in the torrent)"))
if text_wrappable: self.fancy_text.Wrap(250)
# generate random sample data
r = set(xrange(200))
self.sample_data = {}
for key, count in (('h',80), ('t',20)) + tuple([(i,5) for i in range(19)]):
self.sample_data[key] = SparseSet()
for d in random.sample(r, count):
self.sample_data[key].add(d)
r.remove(d)
for d in r:
self.sample_data[0].add(d)
self.fancy_sample = self.new_sample(FancyDownloadGauge, 3)
# sizers
gauge = wx.TOP|wx.LEFT|wx.RIGHT
extra = wx.TOP|wx.LEFT|wx.RIGHT|wx.GROW
self.gauge_sizer.Add(self.null_radio , flag=gauge, border=SPACING)
self.gauge_sizer.AddSpacer((SPACING, SPACING))
self.gauge_sizer.Add(self.simple_radio , flag=gauge, border=SPACING)
self.gauge_sizer.Add(self.simple_sample , flag=extra, border=SPACING)
self.gauge_sizer.AddSpacer((SPACING, SPACING))
self.gauge_sizer.Add(self.moderate_radio , flag=gauge, border=SPACING)
self.gauge_sizer.Add(self.moderate_sample, flag=extra, border=SPACING)
self.gauge_sizer.Add(self.moderate_text , flag=extra, border=SPACING)
self.gauge_sizer.AddSpacer((SPACING, SPACING))
self.gauge_sizer.Add(self.fancy_radio , flag=gauge, border=SPACING)
self.gauge_sizer.Add(self.fancy_sample , flag=extra, border=SPACING)
self.gauge_sizer.Add(self.fancy_text , flag=extra, border=SPACING)
self.sizer.AddFirst(self.gauge_sizer, flag=wx.GROW)
# setup
self.pb_group = (self.null_radio, self.simple_radio, self.moderate_radio, self.fancy_radio)
for r in self.pb_group:
r.Bind(wx.EVT_RADIOBUTTON, self.radio)
if r.value == wx.the_app.config[self.pb_config_key]:
r.SetValue(True)
else:
r.SetValue(False)
# toolbar widgets
self.toolbar_box = wx.StaticBox(self, label=_("Toolbar style:"))
self.toolbar_text = CheckButton(self, _("Show text"),
self.settings_window,
'toolbar_text',
self.settings_window.config['toolbar_text'],
wx.the_app.reset_toolbar_style)
self.toolbar_size_text = ElectroStaticText(self, id=wx.ID_ANY, label=_("Icon size:"))
self.toolbar_size_choice = wx.Choice(self, choices=(_("Small"), _("Normal"), _("Large")))
self.toolbar_config_to_choice(wx.the_app.config['toolbar_size'])
self.toolbar_size_choice.Bind(wx.EVT_CHOICE, self.toolbar_choice_to_config)
# toolbar sizers
self.toolbar_sizer = HSizer()
self.toolbar_sizer.AddFirst(self.toolbar_text, flag=wx.ALIGN_CENTER_VERTICAL)
line = wx.StaticLine(self, id=wx.ID_ANY, style=wx.VERTICAL)
self.toolbar_sizer.Add(line,
flag=wx.ALIGN_CENTER_VERTICAL|wx.GROW)
self.toolbar_sizer.Add(self.toolbar_size_text, flag=wx.ALIGN_CENTER_VERTICAL)
self.toolbar_sizer.Add(self.toolbar_size_choice, flag=wx.GROW|wx.ALIGN_TOP, proportion=1)
self.toolbar_box_sizer = wx.StaticBoxSizer(self.toolbar_box, wx.VERTICAL)
self.toolbar_box_sizer.Add(self.toolbar_sizer, flag=wx.GROW)
self.sizer.Add(self.toolbar_box_sizer, flag=wx.GROW)
if wx.the_app.config['debug']:
# the T-Word widgets
self.themes = []
self.theme_choice = wx.Choice(self, choices=[])
self.theme_choice.Enable(False)
self.theme_choice.Bind(wx.EVT_CHOICE, self.set_theme)
self.restart_hint = ElectroStaticText(self, id=wx.ID_ANY, label=_("(Changing themes requires restart.)"))
self.theme_static_box = wx.StaticBox(self, label=_("Theme:"))
# the T-Word sizers
self.theme_sizer = VSizer()
self.theme_sizer.AddFirst(self.theme_choice, flag=wx.GROW|wx.ALIGN_RIGHT)
self.theme_sizer.Add(self.restart_hint, flag=wx.GROW|wx.ALIGN_RIGHT)
self.theme_static_box_sizer = wx.StaticBoxSizer(self.theme_static_box, wx.VERTICAL)
self.theme_static_box_sizer.Add(self.theme_sizer, flag=wx.GROW)
self.sizer.Add(self.theme_static_box_sizer, flag=wx.GROW)
self.get_themes()
def get_themes(self):
def _callback(themes):
self.themes.extend(themes)
self.theme_choice.AppendItems(strings=themes)
curr_theme = wx.the_app.config['theme']
if curr_theme not in self.themes:
self.settings_window.setfunc('theme', 'default')
curr_theme = wx.the_app.config['theme']
curr_idx = self.themes.index(curr_theme)
self.theme_choice.SetSelection(curr_idx)
self.theme_choice.Enable(True)
def callback(themes):
gui_wrap(_callback, themes)
df = list_themes()
df.addCallback(callback)
df.getResult()
def set_theme(self, e):
i = self.theme_choice.GetSelection()
t = self.themes[i]
self.settings_window.setfunc('theme', t)
def toolbar_choice_to_config(self, *a):
i = self.toolbar_size_choice.GetSelection(),
size = 8*(i[0]+2)
self.settings_window.setfunc('toolbar_size', size)
wx.the_app.reset_toolbar_style()
def toolbar_config_to_choice(self, value):
i = (value//8) - 2
self.toolbar_size_choice.SetSelection(i)
def new_sample(self, sample_class, value):
sample = sample_class(self, size=wx.Size(-1, 20), style=wx.SUNKEN_BORDER)
# I happen to know 200 is the right number because I looked.
sample.SetValue(self.sample_value, 'running', (200, 0, self.sample_data))
sample.Bind(wx.EVT_LEFT_DOWN, self.sample)
sample.Bind(wx.EVT_CONTEXT_MENU, None)
sample.value = value
return sample
def radio(self, event):
widget = event.GetEventObject()
value = widget.value
self.settings_window.setfunc(self.pb_config_key, value)
gui_wrap(wx.the_app.main_window.torrentlist.change_gauge_type, value)
def sample(self, event):
self.radio(event)
pb = event.GetEventObject()
value = pb.value
for p in self.pb_group:
if p.value == value:
p.SetValue(True)
break
class LanguageSettingsPanel(LanguageSettings):
label = _("Language")
def __init__(self, parent, *a, **k):
LanguageSettings.__init__(self, parent, *a, **k)
parent.AddPage(self, self.label)
self.settings_window = parent.GetParent()
class SettingsWindow(BTDialog):
def __init__(self, main_window, config, setfunc):
BTDialog.__init__(self, main_window, style=wx.DEFAULT_DIALOG_STYLE|wx.CLIP_CHILDREN|wx.WANTS_CHARS)
self.Bind(wx.EVT_CLOSE, self.close)
self.Bind(wx.EVT_CHAR, self.key)
self.SetTitle(_("%s Settings")%app_name)
self.setfunc = setfunc
self.config = config
self.notebook = wx.Notebook(self)
self.notebook.Bind(wx.EVT_CHAR, self.key)
self.general_panel = GeneralSettingsPanel(self.notebook)
self.saving_panel = SavingSettingsPanel(self.notebook)
self.network_panel = NetworkSettingsPanel(self.notebook)
self.appearance_panel = AppearanceSettingsPanel(self.notebook)
self.language_panel = LanguageSettingsPanel(self.notebook)
self.vbox = VSizer()
self.vbox.AddFirst(self.notebook, proportion=1, flag=wx.GROW)
self.vbox.Layout()
self.SetSizerAndFit(self.vbox)
self.SetFocus()
def key(self, event):
c = event.GetKeyCode()
if c == wx.WXK_ESCAPE:
self.close()
event.Skip()
def get_save_in(self, *e):
d = wx.DirDialog(self, "", style=wx.DD_DEFAULT_STYLE|wx.DD_NEW_DIR_BUTTON)
d.SetPath(self.config['save_in'])
if d.ShowModal() == wx.ID_OK:
path = d.GetPath()
self.saving_panel.save_in_button.SetLabel(path)
self.setfunc('save_in', path)
def start_torrent_behavior_changed(self, event):
widget = event.GetEventObject()
state_name = widget.state_name
self.setfunc('start_torrent_behavior', state_name)
def close(self, *e):
self.Hide()
|
from django.contrib import admin
from .models import Font
class FontAdmin(admin.ModelAdmin):
class Meta:
model = Font
admin.site.register(Font, FontAdmin) |
"""
Janome Japanese morphological analysis echo-bot
This example shows the echo-bot that returns the response analyzed by Janome,
pure Python Japanese morphological analysis engine.
Sample conversation
$ python janomeecho.py
user> 今日も暑くなりそうですね
minette> 今日(名詞), も(助詞), 暑く(形容詞), なり(動詞), そう(名詞), です(助動詞), ね(助詞)
user> もしハワイに行ったらパンケーキをたくさん食べます
minette> 固有名詞あり: ハワイ
Using user dictionary
To use user dictionary, pass the path to user dictionary as `user_dic` argument.
user> 新しい魔法少女リリカルなのはの映画を観ましたか?
minette without udic> 新しい(形容詞), 魔法(名詞), 少女(名詞), リリカル(名詞), な(助動詞), の(名詞), は(助詞), の(助詞), 映画(名詞), を(助詞), 観(動詞), まし(助動詞), た(助動詞), か(助詞), ?(記号)
minette with udic> 固有名詞あり: 魔法少女リリカルなのは
"""
from minette import Minette, DialogService
from minette.tagger.janometagger import JanomeTagger
# Custom dialog service
class DiceDialogService(DialogService):
def process_request(self, request, context, connection):
# Text processing using the result of Janome
context.data["proper_nouns"] = \
[w.surface for w in request.words if w.part_detail1 == "固有名詞"]
def compose_response(self, request, context, connection):
if context.data.get("proper_nouns"):
# Echo extracted proper nouns when the request contains
return "固有名詞あり: " + ", ".join(context.data.get("proper_nouns"))
else:
# Echo with analysis result
return ", ".join(["{}({})".format(w.surface, w.part) for w in request.words])
if __name__ == "__main__":
# Create bot with Janome Tagger
bot = Minette(
default_dialog_service=DiceDialogService,
tagger=JanomeTagger,
# user_dic="/path/to/userdict" # <= Uncomment when you use user dict
)
# Start conversation
while True:
req = input("user> ")
res = bot.chat(req)
for message in res.messages:
print("minette> " + message.text)
|
# 享元模式 - 基于框架实现
from abc import ABCMeta, abstractmethod
# 引入ABCMeta和abstractmethod来定义抽象类和抽象方法
class Flyweight(metaclass=ABCMeta):
"""享元类"""
@abstractmethod
def operation(self, extrinsicState):
pass
class FlyweightImpl(Flyweight):
"""享元类的具体实现类"""
def __init__(self, color):
self.__color = color
def operation(self, extrinsicState):
print("%s 取得 %s色颜料" % (extrinsicState, self.__color))
class FlyweightFactory:
"""享元工厂"""
def __init__(self):
self.__flyweights = {}
def getFlyweight(self, key):
pigment = self.__flyweights.get(key)
if pigment is None:
pigment = FlyweightImpl(key)
return pigment
def testFlyweight():
factory = FlyweightFactory()
pigmentRed = factory.getFlyweight("红")
pigmentRed.operation("梦之队")
pigmentYellow = factory.getFlyweight("黄")
pigmentYellow.operation("梦之队")
pigmentBlue1 = factory.getFlyweight("蓝")
pigmentBlue1.operation("梦之队")
pigmentBlue2 = factory.getFlyweight("蓝")
pigmentBlue2.operation("和平队")
if __name__ == '__main__':
testFlyweight()
"""
梦之队 取得 红色颜料
梦之队 取得 黄色颜料
梦之队 取得 蓝色颜料
和平队 取得 蓝色颜料
""" |
from data.mnist import MNISTDataModule |
import tensorflow as tf
def inference(images, batch_size = 16):
'''build the model
Args:
images: inputdata (images), 4D tensor, batch_size * height * width * depth
Notes:
In each conv layer, the kernel size is:
[kernel_size, kernel_size, number of input channels, number of output channels].
number of input channels are from previuous layer, if previous layer is THE input
layer, number of input channels should be image's channels.
Return:
softmax_linear
'''
#conv1
with tf.variable_scope('conv1') as scope:
weights = tf.get_variable('weights',
shape = [3, 3, 3, 96],
dtype = tf.float32,
initializer = tf.truncated_normal_initializer(mean = 0.0, stddev = 0.01, dtype = tf.float32))
biases = tf.get_variable('biases',
shape = [96],
dtype = tf.float32,
initializer = tf.constant_initializer(0.0))
conv = tf.nn.conv2d(images, weights, strides = [1,1,1,1], padding = 'SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name = scope.name)
# pool1 & norm1
with tf.variable_scope('pooling1_lrn') as scope:
pool1 = tf.nn.max_pool(conv1, ksize = [1, 3, 3, 1], strides = [1, 2, 2 ,1], padding = 'SAME', name = 'pooling1')
norm1 = tf.nn.lrn(pool1, depth_radius = 4, bias = 1.0, alpha = 0.001/9.0, beta = 0.75, name = 'norm1')
#conv2
with tf.variable_scope('conv2') as scope:
weights = tf.get_variable('weights',
shape=[3,3,96, 64],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.05,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[64],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
conv = tf.nn.conv2d(norm1, weights, strides=[1,1,1,1],padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv2 = tf.nn.relu(pre_activation, name='conv2')
#pool2 and norm2
with tf.variable_scope('pooling2_lrn') as scope:
norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001/9.0,
beta=0.75,name='norm2')
pool2 = tf.nn.max_pool(norm2, ksize=[1,3,3,1], strides=[1,1,1,1],
padding='SAME',name='pooling2')
#local3
with tf.variable_scope('local3') as scope:
reshape = tf.reshape(pool2, shape=[batch_size, -1])
dim = reshape.get_shape()[1].value
weights = tf.get_variable('weights',
shape=[dim,384],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[384],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
#local4
with tf.variable_scope('local4') as scope:
weights = tf.get_variable('weights',
shape=[384,192],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[192],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
# softmax
with tf.variable_scope('softmax_linear') as scope:
weights = tf.get_variable('softmax_linear',
shape=[192, 10],
dtype=tf.float32,
initializer=tf.truncated_normal_initializer(stddev=0.004,dtype=tf.float32))
biases = tf.get_variable('biases',
shape=[10],
dtype=tf.float32,
initializer=tf.constant_initializer(0.1))
softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
return softmax_linear
def losses(logits, label):
'''compute loss
Args:
logits: predictions
lable: ground truth
Return:
loss
'''
with tf.variable_scope('loss') as scope:
labels = tf.cast(label, tf.int64)
# to use this loss fuction, one-hot encoding is needed!
#cross_entropy = tf.nn.softmax_cross_entropy_with_logits\
# (logits=logits, labels=labels, name='xentropy_per_example')
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits\
(logits=logits, labels=labels, name='xentropy_per_example')
loss = tf.reduce_mean(cross_entropy, name='loss')
tf.summary.scalar(scope.name+'/loss', loss)
return loss
def trainning(loss, lr):
'''Training ops, the Op returned by this function is what must be passed to
'sess.run()' call to cause the model to train.
Args:
loss: loss tensor, from losses()
Returns:
train_op: The op for trainning
'''
with tf.name_scope('optimizer'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate=lr)
global_step = tf.Variable(0, name = 'global_step', trainable = False)
train_op = optimizer.minimize(loss, global_step = global_step)
return train_op
|
# HEAD
# DataType - Dictionaries Key error while accessing
# DESCRIPTION
# Describes the assigning, working, and method usages of dictionaries
# .keys() - Error
# RESOURCES
#
# Demostrates assigning a diction value which is a set of {key: value} pair
spam = {'name': 'Zophie', 'age': 7}
# Accessing an unavailable key raises error
print(spam['color']) # accessing unavailable key
# Traceback (most recent call last):
# File "<pyshell#1>", line 1, in <module>
# spam['color']
# KeyError: 'color' |
config = {
'node_size': 60,
'node_border_width': 0.6,
'label_size': 8,
'label_color': 'blue',
'solid_edge_color': '#bbbbbb'
} |
#!/usr/bin/env
# -*- coding: utf-8 -*-
import time
import datetime
import logging
import xmltodict
import requests
logger = logging.getLogger(__name__)
class eBayAPI(object):
@staticmethod
def Trading(auth_token, app_id, cert_id, dev_id):
return Trading(
auth_token=auth_token, app_id=app_id,
cert_id=cert_id, dev_id=dev_id,
xmlns='urn:ebay:apis:eBLBaseComponents',
endpoint='https://api.ebay.com/ws/api.dll',
service=None
)
@staticmethod
def Finding(auth_token, app_id, cert_id, dev_id):
return Finding(
auth_token=auth_token, app_id=app_id,
cert_id=cert_id, dev_id=dev_id,
xmlns='http://www.ebay.com/marketplace/search/v1/services',
endpoint='http://svcs.ebay.com/services/search/FindingService/v1',
service='FindingService'
)
@staticmethod
def FileTransfer(auth_token, app_id, cert_id, dev_id):
return FileTransfer(
auth_token=auth_token, app_id=app_id,
cert_id=cert_id, dev_id=dev_id,
xmlns='http://www.ebay.com/marketplace/services',
endpoint='https://storage.ebay.com/FileTransferService',
service='FileTransferService'
)
@staticmethod
def BulkDataExchange(auth_token, app_id, cert_id, dev_id):
return BulkDataExchange(
auth_token=auth_token, app_id=app_id,
cert_id=cert_id, dev_id=dev_id,
xmlns='http://www.ebay.com/marketplace/services',
endpoint='https://webservices.ebay.com/BulkDataExchangeService',
service='BulkDataExchangeService'
)
class eBayRequest(object):
def __init__(self, auth_token, app_id, cert_id, dev_id,
method, service, xmlns, endpoint):
self.endpoint = endpoint
self.method = method
self.dev_id = dev_id
self.app_id = app_id
self.cert_id = cert_id
self.auth_token = auth_token
self.service = service
self.xmlns = xmlns
self.params = {}
self.headers = {
'X-EBAY-API-DETAIL-LEVEL': 0,
'X-EBAY-API-CALL-NAME': self.method,
'X-EBAY-API-DEV-NAME': self.dev_id,
'X-EBAY-API-APP-NAME': self.app_id,
'X-EBAY-API-CERT-NAME': self.cert_id,
'X-EBAY-API-COMPATIBILITY-LEVEL': 835,
'X-EBAY-API-SITEID': 0,
'X-EBAY-SOA-OPERATION-NAME': self.method,
'X-EBAY-SOA-SERVICE-NAME': self.service,
'X-EBAY-SOA-SECURITY-APPNAME': self.app_id,
'X-EBAY-SOA-SECURITY-TOKEN': self.auth_token,
'X-EBAY-SOA-SERVICE-VERSION': '1.1.0',
'X-EBAY-SOA-GLOBAL-ID': 'EBAY-US',
'X-EBAY-SOA-REQUEST-DATA-FORMAT': 'XML',
'CONTENT-TYPE': 'text/xml;charset="UTF-8"'
}
def __str__(self):
req_name = '%sRequest' % self.method
xml = {
req_name: {
'@xmlns': self.xmlns,
}
}
for k, v in self.params.iteritems():
v = str(v) if type(v) is unicode else v
k = str(k) if type(k) is unicode else k
xml[req_name][k] = v
return xmltodict.unparse(xml, pretty=True, indent=' ')
def _handle_errors(self, response):
if 'Errors' in response:
errors = response['Errors']
errors = errors if isinstance(errors, list) else [errors]
else:
errors = []
for err in errors:
level = str(err['SeverityCode'])
if level == 'Warning':
logger.warning('%s', err['LongMessage'])
elif level == 'Error':
logger.error('%s', err['LongMessage'])
return self
def execute(self, stream=False):
logger.debug('Executing %s Request:\n%s', self.method, self)
try:
response = requests.post(
url=self.endpoint, headers=self.headers,
stream=stream, data=str(self)
)
except requests.ConnectionError as e:
logger.error('Error executing %s Request: %s', self.method, e)
return None
if stream:
logger.debug('%s is streaming. This is NOT implemented well yet.',
self.method)
response.raw.decode_content = True
response = response.raw
return response
else:
response = xmltodict.parse(response.text)
logger.debug('%s Response received:\n%s', self.method,
xmltodict.unparse(response, pretty=True, indent=' '))
response = response[self.method + 'Response']
self._handle_errors(response)
return response
class eBayRequestFactory(object):
def __init__(self, auth_token, app_id, cert_id, dev_id,
xmlns, endpoint, service=None):
self._auth_token = auth_token
self._app_id = app_id
self._cert_id = cert_id
self._dev_id = dev_id
self._service = service
self._xmlns = xmlns
self._endpoint = endpoint
def build(self, name, params=None, auth=False):
request = eBayRequest(
auth_token=self._auth_token,
app_id=self._app_id,
cert_id=self._cert_id,
dev_id=self._dev_id,
xmlns=self._xmlns,
endpoint=self._endpoint,
service=name if self._service is None else self._service,
method=name
)
if params is not None and isinstance(params, dict):
request.params.update(params)
if auth:
token = {
'RequesterCredentials': {
'eBayAuthToken': self._auth_token
}
}
request.params.update(token)
return request
class Trading(eBayRequestFactory):
def __init__(self, auth_token, app_id, cert_id, dev_id,
xmlns, endpoint, service):
eBayRequestFactory.__init__(
self, auth_token=auth_token, app_id=app_id, cert_id=cert_id,
dev_id=dev_id, xmlns=xmlns, endpoint=endpoint, service=service
)
self.ADD_ITEMS_MAX = 5
self.END_ITEMS_MAX = 10
def LeaveFeedback(self, Feedback, ItemId, TargetUser):
name = 'LeaveFeedback'
params = {
'CommentText': Feedback,
'CommentType': 'Positive',
'ItemID': ItemId,
'TargetUser': TargetUser
}
return self.build(name, params=params, auth=True).execute()
def GetItemsAwaitingFeedback(self, PageNumber=1, EntriesPerPage=200):
name = 'GetItemsAwaitingFeedback'
params = {
'Pagination': {
'EntriesPerPage': EntriesPerPage,
'PageNumber': PageNumber
}
}
return self.build(name, params=params, auth=True).execute()
def GetMyeBaySelling(self, PageNumber=1, EntriesPerPage=200):
name = 'GetMyeBaySelling'
params = {
'ActiveList': {
'Include': 'true',
'Pagination': {
'EntriesPerPage': EntriesPerPage,
'PageNumber': PageNumber
}
}
}
return self.build(name, params=params, auth=True).execute()
def GetApiAccessRules(self):
name = 'GetApiAccessRules'
return self.build(name).execute()
def GetSuggestedCategories(self, Query):
name = 'GetSuggestedCategories'
Query = (' '.join(Query)) if isinstance(Query, list) else Query
params = {
'Query': Query
}
return self.build(name, params=params, auth=True).execute()
def GetItem(self, ItemId):
name = 'GetItem'
params = {
'ItemID': ItemId
}
return self.build(name, params=params).execute()
def VerifyAddItem(self, item):
name = 'VerifyAddItem'
params = item
return self.build(name, params=params, auth=True).execute()
def AddItem(self, item, allow_warnings=True):
name = 'AddItem'
params = item
request = self.build(name, params=params, auth=True)
verified = self.VerifyAddItem(item)
ack = verified['Ack']
if ack == 'Success' or (allow_warnings and ack == 'Warning'):
return request.execute()
else:
return verified
def AddItems(self, item_array, allow_warnings):
name = 'AddItems'
params = {'AddItemRequestContainer': []}
for item in item_array:
verified = self.VerifyAddItem(item)
ack = verified['Ack']
if ack == 'Success' or (allow_warnings and ack == 'Warning'):
item['MessageID'] = item_array.index(item)
params['AddItemRequestContainer'].append(item)
else:
logger.warning('%s was not able to be verified for adding.',
item['Item']['SKU'])
return self.build(name, params=params, auth=True).execute()
def ReviseItem(self, item):
name = 'ReviseItem'
param_item = {
'Item': {
'ItemID': item['ItemID'],
'SKU': item['SKU'],
'StartPrice': item['StartPrice'],
'Quantity': item['Quantity']
}
}
return self.build(name, params=param_item).execute()
def EndItem(self, itemId):
name = 'EndItem'
params = {
'EndingReason': 'NotAvailable',
'ItemID': itemId
}
return self.build(name, params=params, auth=True).execute()
def EndItems(self, ItemIdArray):
name = 'EndItems'
params = {'EndItemRequestContainer': []}
for ItemId in ItemIdArray:
container = {
'MessageID': ItemIdArray.index(ItemId),
'EndingReason': 'NotAvailable',
'ItemID': ItemId
}
params['EndItemRequestContainer'].append(container)
return self.build(name, params=params, auth=True).execute()
def GetOrders(self, PageNumber=1, EntriesPerPage=100):
name = 'GetOrders'
time_to = datetime.datetime.now()
time_from = time_to - datetime.timedelta(hours=30)
params = {
'CreateTimeFrom': time_from.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'CreateTimeTo': time_to.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'DetailLevel': 'ReturnAll',
'Pagination': {
'EntriesPerPage': EntriesPerPage,
'PageNumber': PageNumber
}
}
return self.build(name, params=params, auth=True).execute()
def GetSellerList(self, PageNumber=1, EntriesPerPage=100):
name = 'GetSellerList'
time_to = datetime.datetime.now()
time_from = time_to - datetime.timedelta(days=119)
params = {
'StartTimeFrom': time_from.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'StartTimeTo': time_to.strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'DetailLevel': 'ReturnAll',
'Pagination': {
'EntriesPerPage': EntriesPerPage,
'PageNumber': PageNumber
}
}
return self.build(name, params=params, auth=True).execute()
def GeteBayDetails(self, DetailName):
name = 'GeteBayDetails'
params = {
'DetailName': DetailName
}
return self.build(name, params=params, auth=True).execute()
def CompleteSale(self, OrderID, TrackingNumber, CarrierUsed):
name = 'CompleteSale'
params = {
'OrderID': OrderID,
'Paid': 'true',
'Shipment': {
'ShippedTime': time.strftime("%Y-%m-%dT%H:%M:%SZ",
time.gmtime()),
'ShipmentTrackingDetails': {
'ShipmentTrackingNumber': TrackingNumber,
'ShippingCarrierUsed': CarrierUsed
}
}
}
return self.build(name, params=params, auth=True).execute()
def ReviseInventoryStatus(self, item_array):
name = 'ReviseInventoryStatus'
params = {'InventoryStatus': []}
for item in item_array:
param_item = {
'ItemID': item['ItemID'],
'SKU': item['SKU'],
'StartPrice': item['StartPrice'],
'Quantity': item['Quantity']
}
params['InventoryStatus'].append(param_item)
return self.build(name, params=params, auth=True).execute()
class Finding(eBayRequestFactory):
def __init__(self, auth_token, app_id, cert_id, dev_id,
xmlns, endpoint, service):
eBayRequestFactory.__init__(
self, auth_token=auth_token, app_id=app_id, cert_id=cert_id,
dev_id=dev_id, xmlns=xmlns, endpoint=endpoint, service=service
)
def getVersion(self):
name = 'getVersion'
return self.build(name).execute()
def findItemsByKeywords(self, keywords):
name = 'findItemsByKeywords'
if isinstance(keywords, list):
keywords = ' '.join(keywords)
params = {
'keywords': keywords
}
return self.build(name, params=params).execute()
def findItemsbyCategory(self, categoryId):
name = 'findItemsbyCategory'
params = {
'categoryId': categoryId
}
return self.build(name, params=params).execute()
def findCompletedItems(self):
name = 'findCompletedItems'
params = {
}
return self.build(name, params=params).execute()
class BulkDataExchange(eBayRequestFactory):
def __init__(self, auth_token, app_id, cert_id, dev_id,
xmlns, endpoint, service):
eBayRequestFactory.__init__(
self, auth_token=auth_token, app_id=app_id, cert_id=cert_id,
dev_id=dev_id, xmlns=xmlns, endpoint=endpoint, service=service
)
def createRecurringJob(self, UUID, frequencyInMinutes, downloadJobType):
name = 'createRecurringJob'
params = {
'UUID': UUID,
'frequencyInMinutes': frequencyInMinutes,
'downloadJobType': downloadJobType
}
request = self.build(name, params=params)
return request.execute()
def createUploadJob(self, UUID, uploadJobType):
name = 'createUploadJob'
params = {
'UUID': UUID,
'uploadJobType': uploadJobType
}
request = self.build(name, params=params)
return request.execute()
def deleteRecurringJob(self, recurringJobId):
name = 'deleteRecurringJob'
params = {
'recurringJobId': recurringJobId
}
request = self.build(name, params=params)
return request.execute()
def getJobs(self, jobType):
name = 'getJobs'
params = {
'jobType': jobType
}
request = self.build(name, params=params)
return request.execute()
def getJobStatus(self, jobId):
name = 'getJobStatus'
params = {
'jobId': jobId
}
request = self.build(name, params=params)
return request.execute()
def getRecurringJobs(self):
name = 'getRecurringJobs'
request = self.build(name)
return request.execute()
def startDownloadJob(self, UUID, jobType):
name = 'startDownloadJob'
params = {
'downloadJobType': jobType,
'UUID': UUID,
'downloadRequestFilter': {
'activeInventoryReportFilter': {
'auctionItemDetails': {
'includeBidCount': 1,
}
}
}
}
request = self.build(name, params=params)
return request.execute()
def startUploadJob(self, jobId):
name = 'startUploadJob'
params = {
'jobId': jobId,
}
request = self.build(name, params=params)
return request.execute()
class FileTransfer(eBayRequestFactory):
def __init__(self, auth_token, app_id, cert_id, dev_id,
xmlns, endpoint, service):
eBayRequestFactory.__init__(
self, auth_token=auth_token, app_id=app_id, cert_id=cert_id,
dev_id=dev_id, xmlns=xmlns, endpoint=endpoint, service=service
)
def downloadFile(self, fileReferenceId, taskReferenceId):
name = 'downloadFile'
params = {
'fileReferenceId': fileReferenceId,
'taskReferenceId': taskReferenceId
}
return self.build(name, params=params).execute(stream=True)
def uploadFile(self):
raise ImportError('Not implemented yet')
__all__ = ['eBayAPI']
|
import cv2 as cv
import numpy as np
blank = np.zeros((500, 500), dtype="uint8")
cv.imshow("Blank", blank)
cv.waitKey(0)
|
import re
INVALID_MESSAGE = "Invalid input, Please enter again"
AVAILABLE_BAUD_RATE = [2400, 4800, 9600, 19200, 38400, 57600, 115200, 230400, 460800, 921600]
NAME_VALIDATE = lambda: get_input(str, "Name: ", "Invalid name, please enter again", NAME_VALIDATOR, allow_empty=True)
BAUD_RATE_VALIDATE = lambda: get_input(int, "Baud rate: ", "Invalid baud rate, please enter again", AVAILABLE_BAUD_RATE)
STOP_BIT_VALIDATE = lambda: get_input(int, "Stop bit: ", "Invalid stop bit, please enter again", [0, 1])
PARITY_BIT_VALIDATE = lambda: get_input(int, "Parity bit: ", "Invalid parity bit, please enter again", [0, 1, 2])
PASSWORD_VALIDATE = lambda: get_input(str, "Password: ", "Invalid password, please enter again", PASSWORD_VALIDATOR, allow_empty=True)
ROLE_VALIDATE = lambda: get_input(int, "Role (Slave[0], Master[1], Slave-Loop[2]): ", "Invalid role, please enter again", [0, 1, 2])
CONNECTION_MODE_VALIDATE = lambda: get_input(int, "Connection Mode [0, 1, 2]: ", "Invalid connection mode, please enter again", [0, 1, 2])
ADDRESS_VALIDATE = lambda: get_input(str, "Bind address: ", "Invalid address, please enter again", ADDRESS_VALIDATOR)
def match_pattern(pattern, string):
match = re.match(pattern, string)
return match is not None and match.start() == 0 and match.end() == len(string)
NAME_VALIDATOR = lambda name: match_pattern("([ -~]{0,32})", name)
BAUD_RATE_VALIDATOR = lambda baud_rate: baud_rate in AVAILABLE_BAUD_RATE
STOP_BIT_VALIDATOR = lambda stop_bit: stop_bit in [0, 1]
PARITY_BIT_VALIDATOR = lambda parity_bit: parity_bit in [0, 1, 2]
PASSWORD_VALIDATOR = lambda password: match_pattern("([ -~]{0,16})", password)
ROLE_VALIDATOR = lambda role: role in [0, 1, 2]
CONNECTION_MODE_VALIDATOR = lambda cmode: cmode in [0, 1, 2]
ADDRESS_VALIDATOR = lambda addr: match_pattern("([0-9A-F]{1,4}):([0-9A-F]{1,2}):([0-9A-F]{1,6})", addr)
BLUETOOTH_CONFIG_VALIDATE = {
"Name": NAME_VALIDATE,
"Baud Rate": BAUD_RATE_VALIDATE,
"Stop Bit": STOP_BIT_VALIDATE,
"Parity Bit": PARITY_BIT_VALIDATE,
"Password": PASSWORD_VALIDATE,
"Role": ROLE_VALIDATE,
"Connection Mode": CONNECTION_MODE_VALIDATE,
"Bind Address": ADDRESS_VALIDATE
}
MASTER_AND_SLAVE_VALIDATE = {
""
"Baud Rate": BAUD_RATE_VALIDATE,
"Stop Bit": STOP_BIT_VALIDATE,
"Parity Bit": PARITY_BIT_VALIDATE
}
def isInt(integer):
try:
int(integer)
return True
except:
return False
def get_input(type, input_msg, wrong_msg="", correct=[], allow_empty=False):
while True:
ans = input(input_msg)
if not ans:
if allow_empty:
return ans
else:
print(wrong_msg)
continue
if type == int:
if not isInt(ans):
if wrong_msg:
print(wrong_msg)
continue
ans = int(ans)
if not correct or ((callable(correct) and correct(ans)) or (not callable(correct) and ans in correct)):
return ans
if wrong_msg:
print(wrong_msg)
|
from core.model.data_source import DataSource
from core.model.generator_setting import GeneratorSetting
from core.model.meta_column import MetaColumn
class SomeError(Exception):
def __init__(self, message: str):
self.message = message
class DatabaseConnectionError(SomeError):
def __init__(self):
super().__init__('Could not connect to the database')
class FatalDatabaseError(SomeError):
def __init__(self):
super().__init__('Fatal database error')
class FileNotAllowedError(SomeError):
def __init__(self):
super().__init__('File not allowed')
class GeneratorRegistrationError(SomeError):
def __init__(self):
super().__init__('Registered generator must be a column generator')
class InvalidPasswordError(SomeError):
def __init__(self):
super().__init__('Please choose a different password')
class GeneratorSettingError(SomeError):
def __init__(self, message: str, generator_setting: GeneratorSetting):
super().__init__('{} ({} generator {})'.format(message,
generator_setting.name,
generator_setting.id))
self.generator_setting = generator_setting
class DataSourceError(SomeError):
def __init__(self, message: str, data_source: DataSource):
super().__init__(message)
self.data_source = data_source
class DatabaseNotReadable(DataSourceError):
def __init__(self, data_source: DataSource):
super().__init__('Unable to reflect the database', data_source)
class DataSourceIdentifierError(DataSourceError):
def __init__(self, message: str, data_source: DataSource, identifier: str):
super().__init__(message, data_source)
self.identifier = identifier
class ColumnError(SomeError):
def __init__(self, message: str, meta_column: MetaColumn):
super().__init__('Column `{}`: {}'.format(meta_column.name, message))
self.meta_column = meta_column
class ColumnGeneratorError(ColumnError):
pass
class ColumnGeneratorNotAssignableError(ColumnGeneratorError):
def __init__(self, meta_column: MetaColumn):
super().__init__('the generator is not assignable to this column', meta_column)
class MalformedIdentifierError(SomeError):
def __init__(self, identifier: str):
super().__init__('Malformed identifier: `{}`'.format(identifier))
class RequisitionMissingReferenceError(SomeError):
def __init__(self, included_table: str, ref_table: str):
super().__init__('The table `{}` references the table `{}`, but it is not included in the export'
.format(included_table, ref_table))
|
import locale
import json
import os
from django.core.management.base import BaseCommand, CommandError
from datetime import datetime as dt
import pandas as pd
from collections import defaultdict
from tqdm import tqdm
import numpy as np
class Document:
def __init__(self, id, text):
self.text = text
self.id = id
self.sentence_delim = " . "
self.splitted_text = self.__split_text()
def __split_text(self):
return self.text.split(self.sentence_delim)
def get_sentence(self, id):
return self.splitted_text[id-1]
class Command(BaseCommand):
help = 'Create classification dataset for CWN only'
def load_documents(self):
texts_path = '../data/texts'
files = os.listdir(texts_path)
documents = list()
for filename in files:
text = open(os.path.join(texts_path, filename), 'r').read()
document = Document(filename, text)
documents.append(document)
return documents
def handle(self, *args, **options):
documents = self.load_documents()
annotations = pd.read_csv('../data/gsml.csv')
positive_lines = list()
negative_lines = list()
for document in documents:
document_annotations = annotations[annotations['TEXT_ID'] == document.id]
document_annotations = document_annotations[document_annotations['TYPE'].isin(['CONTEXT', 'NOT_CHECKABLE', 'WORD'])]
document_annotations_lines = set(document_annotations['SENTENCE_ID'].tolist())
for line_id in range(1, len(document.splitted_text)+1):
if line_id in document_annotations_lines:
positive_lines.append((document.get_sentence(line_id), 1))
else:
negative_lines.append((document.get_sentence(line_id), 0))
dataset = positive_lines + negative_lines
np.random.shuffle(dataset)
with open('../data/binary_classification_cwn.jsonl', 'w') as fhandle:
for line, label in dataset:
if label == 1.0:
sample = {"text": line, "label": "positive"}
else:
sample = {"text": line, "label": "negative"}
fhandle.write("{}\n".format(json.dumps(sample)))
self.stdout.write(self.style.SUCCESS(f'Successfully ran command "{self.help}"'))
|
l = []
p = []
i = []
while True:
v = int(input('Digite um número: '))
l.append(v)
if v%2==0:
p.append(v)
else:
i.append(v)
while True:
q = input('Quer continuar? [S/N] ').upper().strip()[0]
if q in 'SN':
break
if q == 'N':
break
print(30*'-=')
print(f'A lista completa é: {l}')
print(f'A lista de pares é: {p}')
print(f'A listsa de ímpares é: {i} ') |
# -*- coding: utf-8 -*-
import facebook
from allauth.socialaccount.models import SocialToken
from django.core.exceptions import ObjectDoesNotExist
class FacebookAuth(object):
"""
Interface bettween Django AllAuth and Facebook SDK
"""
def __init__(self, user_id):
super(FacebookAuth, self).__init__()
# Only integers are allowed
if not isinstance(user_id, (int, long)):
raise TypeError("An Integer is expected")
self.user_id = user_id
def get_graph(self):
"""
Returns a Graph object to be used on the Facebook SDK.
"""
return facebook.GraphAPI(access_token=self.get_access_token())
def get_access_token(self):
"""
Get a valid token for the user from AllAuth
"""
try:
token = SocialToken.objects.get(
account__user_id=self.user_id).token
except ObjectDoesNotExist:
raise NotValidFacebookAccount("A token has not been found")
return token
class NotValidFacebookAccount(Exception):
"""
NotValidAccount Exception.
"""
pass
|
from msldap import logger
from msldap.network.tcp import MSLDAPTCPNetwork
from msldap.network.socks import SocksProxyConnection
from msldap.network.multiplexor import MultiplexorProxyConnection
from msldap.commons.proxy import MSLDAPProxyType
MSLDAP_SOCKS_PROXY_TYPES = [
MSLDAPProxyType.SOCKS4 ,
MSLDAPProxyType.SOCKS4_SSL ,
MSLDAPProxyType.SOCKS5 ,
MSLDAPProxyType.SOCKS5_SSL]
class MSLDAPNetworkSelector:
def __init__(self):
pass
@staticmethod
async def select(target):
if target.proxy is not None:
if target.proxy.type in MSLDAP_SOCKS_PROXY_TYPES:
return SocksProxyConnection(target)
else:
mpc = MultiplexorProxyConnection(target)
socks_proxy = await mpc.connect()
return socks_proxy
return MSLDAPTCPNetwork(target) |
# Generated by Django 3.1.7 on 2021-03-30 07:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('substitute', '0004_auto_20210323_1633'),
]
operations = [
migrations.AlterField(
model_name='product',
name='labels',
field=models.CharField(blank=True, max_length=400, null=True),
),
]
|
# zipyshare-downloader
# fetcher.py
import asyncio
import aiohttp
import requests
import asyncio
import logging
import os
import zipfile
from typing import List, Dict
from pathlib import Path
from .utils import extract_archived_file, build_pretty_list_log, archive_zip
from .errors import FileExpired
from .parser import finalization_info, parse_info
from .file import File
__all__ = (
'download', 'extract_info',
'download_coro', 'extract_info_coro'
)
log = logging.getLogger(__name__)
def get_info(url) -> Dict[str, str]:
"""
Get informations in Zippyshare url.
"""
log.info('Grabbing required informations in %s' % url)
log.debug('Establishing connection to Zippyshare.')
r = requests.get(url)
try:
r.raise_for_status()
except requests.HTTPError as e:
log.exception('Zippyshare send %s code' % r.status_code)
raise e from None
log.debug('Successfully established connection to Zippyshare.')
log.debug('Checking if file is not expired')
if 'File has expired and does not exist anymore on this server' in r.text:
log.exception('File has expired and does not exist anymore')
raise FileExpired('File has expired and does not exist anymore')
log.debug('Checking if file is exist')
if 'File does not exist on this server' in r.text:
log.exception('File does not exist on this server')
raise FileNotFoundError('File does not exist on this server')
return finalization_info(parse_info(url, r.text))
async def get_info_coro(url) -> Dict[str, str]:
"""
Get informations in Zippyshare url.
Unlike `get_info()` when you have to call `finalization_info()` manually
to fix incorrect informations.
This function automatically called it.
"""
log.info('Grabbing required informations in %s' % url)
log.debug('Establishing connection to Zippyshare.')
async with aiohttp.ClientSession() as session:
r = await session.get(url)
try:
r.raise_for_status()
except aiohttp.ClientResponseError as e:
log.exception('Zippyshare send %s code' % r.status)
raise e from None
body_html = await r.text()
log.debug('Successfully established connection to Zippyshare.')
log.debug('Checking if file is not expired')
if 'File has expired and does not exist anymore on this server' in body_html:
log.exception('File has expired and does not exist anymore')
raise FileExpired('File has expired and does not exist anymore')
log.debug('Checking if file is exist')
if 'File does not exist on this server' in body_html:
log.exception('File does not exist on this server')
raise FileNotFoundError('File does not exist on this server')
return await finalization_info(parse_info(url, body_html), True, session)
def download(*urls, zip: str=None, unzip: bool=False, **kwargs) -> List[File]:
"""
Download multiple zippyshare urls
Parameters
-----------
*urls
Zippyshare urls.
zip: :class:`str`
Zip all downloaded files once finished.
Zip filename will be taken from ``zip`` parameter,
default to ``None``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
unzip: :class:`bool`
Unzip all downloaded files once finished
(if given file is zip format extract it, otherwise ignore it),
default to ``False``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
**kwargs
These parameters will be passed to :meth:`File.download()`,
except for parameter ``filename``.
Returns
-------
List[:class:`File`]
a list of Zippyshare files
"""
if unzip and zip:
raise ValueError("unzip and zip paramaters cannot be set together")
downloaded_files = {}
files = []
for url in urls:
info = get_info(url)
file = File(info)
files.append(file)
if kwargs.get('filename') is not None:
kwargs.pop('filename')
file_path = file.download(**kwargs)
downloaded_files[file] = file_path
if unzip:
extract_archived_file(str(file_path))
if zip:
log.info('Zipping all downloaded files')
path = list(downloaded_files.values())[0]
zip_path = (path.parent / zip)
with zipfile.ZipFile(zip_path, 'w') as zip_writer:
for file, path in downloaded_files.items():
log.debug('Writing %s to %s' % (
path,
zip_path
))
zip_writer.write(path)
os.remove(path)
log.info('Successfully zipped all downloaded files')
return files
def extract_info(url: str, download: bool=True, unzip: bool=False, **kwargs) -> File:
"""
Extract all informations in Zippyshare url.
Parameters
------------
url: :class:`str`
Zippyshare url.
download: :class:`bool`
Download given zippyshare url if ``True``,
default to ``True``.
unzip: :class:`bool`
Unzip downloaded file once finished
(if given file is zip or tar format extract it, otherwise ignore it),
default to ``False``.
**kwargs
These parameters will be passed to :meth:`File.download()`
Returns
-------
:class:`File`
Zippyshare file
"""
info = get_info(url)
file = File(info)
if download:
file_path = file.download(**kwargs)
if unzip:
extract_archived_file(str(file_path))
return file
async def extract_info_coro(url: str, download: bool=True, unzip: bool=False, **kwargs) -> File:
"""
Extract all informations in Zippyshare url.
Parameters
------------
url: :class:`str`
Zippyshare url.
download: :class:`bool`
Download given zippyshare url if ``True``,
default to ``True``.
unzip: :class:`bool`
Unzip downloaded file once finished
(if given file is zip or tar format extract it, otherwise ignore it),
default to ``False``.
**kwargs
These parameters will be passed to :meth:`File.download_coro()`
Returns
-------
:class:`File`
Zippyshare file
"""
info = await get_info_coro(url)
file = File(info)
loop = asyncio.get_event_loop()
if download:
file_path = await file.download_coro(**kwargs)
if unzip:
await loop.run_in_executor(None, lambda: extract_archived_file(str(file_path)))
return file
async def download_coro(*urls, zip: str=None, unzip: bool=False, **kwargs) -> List[File]:
"""
"Coroutine Function"
Download multiple zippyshare urls
Parameters
-----------
*urls: :class:`str`
Zippyshare urls.
zip: :class:`str`
Zip all downloaded files once finished.
Zip filename will be taken from ``zip``,
default to ``None``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
unzip: :class:`bool`
Unzip all downloaded files once finished
(if given file is zip format extract it, otherwise ignore it),
default to ``False``.
NOTE: You can't mix ``zip`` and ``unzip`` options together
with value ``True``, it will raise error.
**kwargs
These parameters will be passed to :meth:`File.download_coro()`,
except for parameter ``filename``.
Returns
-------
List[:class:`File`]
a list of Zippyshare files
"""
if unzip and zip:
raise ValueError("unzip and zip paramaters cannot be set together")
loop = asyncio.get_event_loop()
downloaded_files = {}
files = []
for url in urls:
info = await get_info_coro(url)
file = File(info)
files.append(file)
if kwargs.get('filename') is not None:
kwargs.pop('filename')
file_path = await file.download_coro(**kwargs)
downloaded_files[file] = file_path
if unzip:
await loop.run_in_executor(None, lambda: extract_archived_file(str(file_path)))
if zip:
log.info(build_pretty_list_log(downloaded_files, 'Zipping all downloaded files to "%s"' % zip))
await loop.run_in_executor(None, lambda: archive_zip(downloaded_files, zip))
log.info(build_pretty_list_log(downloaded_files, 'Successfully zip all downloaded files to "%s"' % zip))
return files |
"""empty message
Revision ID: 22b250a4f200
Revises: 6b0fc0dd468
Create Date: 2019-06-25 01:29:17.782735
"""
# revision identifiers, used by Alembic.
revision = '22b250a4f200'
down_revision = '6b0fc0dd468'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('categories', sa.Column('level', sa.Integer(), nullable=False))
op.add_column('categories', sa.Column('lft', sa.Integer(), nullable=False))
op.add_column('categories', sa.Column('parent_id', sa.Integer(), nullable=True))
op.add_column('categories', sa.Column('rgt', sa.Integer(), nullable=False))
op.add_column('categories', sa.Column('tree_id', sa.Integer(), nullable=True))
op.create_index('categories_level_idx', 'categories', ['level'], unique=False)
op.create_index('categories_lft_idx', 'categories', ['lft'], unique=False)
op.create_index('categories_rgt_idx', 'categories', ['rgt'], unique=False)
op.create_unique_constraint(None, 'categories', ['id'])
op.create_foreign_key(None, 'categories', 'categories', ['parent_id'], ['id'], ondelete='CASCADE')
op.create_unique_constraint(None, 'layout_discuss', ['id'])
op.create_unique_constraint(None, 'layout_discuss_objects', ['id'])
op.drop_constraint(u'layout_discuss_objects_layout_discuss_id_fkey', 'layout_discuss_objects', type_='foreignkey')
op.create_foreign_key(None, 'layout_discuss_objects', 'layout_discuss', ['layout_discuss_id'], ['id'], ondelete='CASCADE')
op.create_unique_constraint(None, 'layout_protos', ['id'])
op.create_unique_constraint(None, 'layout_users', ['id'])
op.drop_constraint(u'layout_users_layout_id_fkey', 'layout_users', type_='foreignkey')
op.create_foreign_key(None, 'layout_users', 'layouts', ['layout_id'], ['id'], ondelete='CASCADE')
op.create_unique_constraint(None, 'layout_users_access', ['id'])
op.create_unique_constraint(None, 'layouts', ['id'])
op.create_unique_constraint(None, 'objects', ['id'])
op.create_unique_constraint(None, 'objects_categories', ['id'])
op.drop_constraint(u'objects_categories_object_id_fkey', 'objects_categories', type_='foreignkey')
op.create_foreign_key(None, 'objects_categories', 'objects', ['object_id'], ['id'], ondelete='CASCADE')
op.create_unique_constraint(None, 'proto_objects', ['id'])
op.create_unique_constraint(None, 'prototypes', ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'prototypes', type_='unique')
op.drop_constraint(None, 'proto_objects', type_='unique')
op.drop_constraint(None, 'objects_categories', type_='foreignkey')
op.create_foreign_key(u'objects_categories_object_id_fkey', 'objects_categories', 'categories', ['object_id'], ['id'], ondelete=u'CASCADE')
op.drop_constraint(None, 'objects_categories', type_='unique')
op.drop_constraint(None, 'objects', type_='unique')
op.drop_constraint(None, 'layouts', type_='unique')
op.drop_constraint(None, 'layout_users_access', type_='unique')
op.drop_constraint(None, 'layout_users', type_='foreignkey')
op.create_foreign_key(u'layout_users_layout_id_fkey', 'layout_users', 'proto_objects', ['layout_id'], ['id'], ondelete=u'CASCADE')
op.drop_constraint(None, 'layout_users', type_='unique')
op.drop_constraint(None, 'layout_protos', type_='unique')
op.drop_constraint(None, 'layout_discuss_objects', type_='foreignkey')
op.create_foreign_key(u'layout_discuss_objects_layout_discuss_id_fkey', 'layout_discuss_objects', 'layouts', ['layout_discuss_id'], ['id'], ondelete=u'CASCADE')
op.drop_constraint(None, 'layout_discuss_objects', type_='unique')
op.drop_constraint(None, 'layout_discuss', type_='unique')
op.drop_constraint(None, 'categories', type_='foreignkey')
op.drop_constraint(None, 'categories', type_='unique')
op.drop_index('categories_rgt_idx', table_name='categories')
op.drop_index('categories_lft_idx', table_name='categories')
op.drop_index('categories_level_idx', table_name='categories')
op.drop_column('categories', 'tree_id')
op.drop_column('categories', 'rgt')
op.drop_column('categories', 'parent_id')
op.drop_column('categories', 'lft')
op.drop_column('categories', 'level')
### end Alembic commands ###
|
'''
Created on Sep 12, 2016
@author: gotbergj
'''
from logging import getLogger
class InitiumAI(object):
def __init__(self, browser_type='chrome'):
self.browser_type = browser_type
self.main_url = 'https://www.playinitium.com/main.jsp'
self.running = False
self.location = 'unknown'
self.l = getLogger('aia_log')
def status(self):
try:
if not self.running:
self.l.debug('Please start AIA first.')
return
# location_keeper
location_ele = self.wd.browser.find_element_by_xpath('/html/body/div[2]/div/div[1]/a')
self.location = self.wd.get_text(location_ele)
self.l.debug("Location: {}".format(self.location))
# hp cur/max
hp_raw = self.wd.browser.find_element_by_xpath('//*[@id="hitpointsBar"]/p')
hp_raw = self.wd.get_text(hp_raw)
self.hp_cur, self.hp_max = hp_raw.split('/')
self.l.debug('HP cur: {} / HP max: {}'.format(self.hp_cur, self.hp_max))
# gold
self.gold = self.wd.get_text(self.wd.browser.find_element_by_xpath('//*[@id="mainGoldIndicator"]'))
self.l.debug('Gold: {}'.format(self.gold))
# stats
str_raw = self.wd.get_text(self.wd.browser.find_element_by_xpath('//*[@id="reload-div"]/div/div[3]/div/div[1]/div'))
self.str_base, self.str_cur = str_raw.split(' (')
self.str_cur = self.str_cur[:-1]
self.l.debug('Str base: {} Str cur: {}'.format(self.str_base, self.str_cur))
except Exception as doh:
self.l.debug("Something broke: {}".format(doh))
def explore(self):
pass
|
#Use local method (HOC) for sign prediction in signed networks
#Based on Chiang et. al, 2014
import numpy as np
import cPickle, time
import scipy.sparse as sp
from scipy.linalg import norm
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest, f_classif
import hoc_edge_features as hoc
import utils.ml_pipeline as pipeline
import random, os
import data.simulate_networks as sim
import analytics.stats as stats
#Perform cross validation, testing on one fold and training on the rest
#Input: adjacency matrix [csr matrix]
# indices of data points in each folds
# Maximum cycle order to consider [int]
# number of features to use [int]
# 0 to use random features, -1 to use all features
#Output: average test accuracy, false positive rate
def kfold_CV(adj_matrix, folds, max_cycle_order, num_features = -1):
num_folds = len(folds)
accuracy_fold_data = list()
false_positive_rate_fold_data = list()
time_fold_data = list()
for fold_index in range(num_folds):
print("Fold %d:" % (fold_index + 1))
#get data
train_points = pipeline.join_folds(folds, fold_index)
test_points = folds[fold_index]
train_test_overlap = False
train_row_indices, train_col_indices = zip(*train_points)
test_row_indices, test_col_indices = zip(*test_points)
train_labels = adj_matrix[train_row_indices, train_col_indices].A[0] #array of signs of training edges
test_labels = adj_matrix[test_row_indices, test_col_indices].A[0] #array of signs of test edges
#construct matrix using just training edges
train_matrix = sp.csr_matrix((train_labels, (train_row_indices, train_col_indices)), shape = adj_matrix.shape)
train_matrix = (train_matrix + train_matrix.transpose()).sign() #make symmetric
#Compute feature products
#This dominates the training time, so report time for only this part for experiments
before_train = time.time()
feature_products = hoc.extract_edge_features(train_matrix, max_cycle_order)
#get features and labels corresponding to each data point
train_data = np.asarray([hoc.extract_features_for_edge(feature_products, tr_point) for tr_point in train_points])
test_data = np.asarray([hoc.extract_features_for_edge(feature_products, te_point) for te_point in test_points])
after_train = time.time()
model_time = after_train - before_train
#if, for experimental reasons, we don't want to train on all the features instead
#as a diagnostic for what the model is actually learning and why
if num_features > 0: #perform feature selection
feat_sel = SelectKBest(f_classif, k=num_features)
feat_sel.fit(train_data, train_labels)
train_data = feat_sel.transform(train_data)
test_data = feat_sel.transform(test_data)
elif num_features == 0: #train on random features
print "train data: random matrix of shape ", train_data.shape
train_data = np.random.random(train_data.shape)
#train logistic regression classifier
clf = LogisticRegression()
clf.fit(train_data, train_labels)
#Evaluate
test_preds = clf.predict(test_data)
acc, fpr = pipeline.evaluate(test_preds, test_labels)
accuracy_fold_data.append(acc)
false_positive_rate_fold_data.append(fpr)
print "HOC feature extraction time for one fold: ", model_time
time_fold_data.append(model_time)
return accuracy_fold_data, false_positive_rate_fold_data, time_fold_data
#Machine learning pipeline for prediction using HOC features
#Feature extraction to model training and usage
#Input: adjacency matrix (data)
# Name of dataset to use
# Maximum cycle order to consider
# Number of folds for k-fold cross validation (default 10 like in the paper)
# Number of features to use (to test whether classifier is actually learning)
#Output: average accuracy, false positive rate across folds
def hoc_learning_pipeline(adj_matrix, max_cycle_order, num_folds=10, num_features=-1):
#Split into folds
unique_edge_list = pipeline.get_unique_edges(adj_matrix)
data_folds = pipeline.kfold_CV_split(unique_edge_list, num_folds)
#Perform k-fold cross validation
acc_fold_data, fpr_fold_data, time_fold_data = kfold_CV(adj_matrix, data_folds, max_cycle_order, num_features)
avg_acc = sum(acc_fold_data) / float(len(acc_fold_data))
avg_fpr = sum(fpr_fold_data) / float(len(fpr_fold_data))
avg_time = sum(time_fold_data) / float(len(time_fold_data))
acc_stderr = stats.error_width(stats.sample_std(acc_fold_data), num_folds)
fpr_stderr = stats.error_width(stats.sample_std(fpr_fold_data), num_folds)
time_stderr = stats.error_width(stats.sample_std(time_fold_data), num_folds)
return avg_acc, acc_stderr, avg_fpr, fpr_stderr, avg_time, time_stderr |
x = (1, 2, (3, 'John', 4), 'Hi')
eval = x[0]
print(('x[0]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[2]
print(('x[2]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[-1]
print(('x[-1]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[2][2]
print(('x[2][2]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[2][-1]
print(('x[2][-1]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[-1][-1]
print(('x[-1][-1]: {0} value: {1}').format(str(type(eval)), str(eval)))
try:
eval = x[-1][2]
print(('x[-1][2]: {0} value: {1}').format(str(type(eval)), str(eval)))
except:
print('x[-1][2]: <nonetype> value: error')
eval = x[0:1]
print(('x[0:1]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = x[0:-1]
print(('x[0:-1]: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = len(x)
print(('len(x): {0} value: {1}').format(str(type(eval)), str(eval)))
eval = 2 in x
print(('2 in x: {0} value: {1}').format(str(type(eval)), str(eval)))
eval = 3 in x
print(('3 in x: {0} value: {1}').format(str(type(eval)), str(eval)))
try:
eval = x[0] = 8
print(('x[0] = 8: {0} value: {1}').format(str(type(eval)), str(eval)))
except:
print('x[0] = 8: <nonetype> value: error')
# a new comment |
from rubicon_ml.sklearn.filter_estimator_logger import FilterEstimatorLogger
from rubicon_ml.sklearn.pipeline import RubiconPipeline
__all__ = ["FilterEstimatorLogger", "RubiconPipeline"]
|
import time
from mock import MagicMock
from bxcommon.test_utils.abstract_test_case import AbstractTestCase
from bxcommon.utils.alarm_queue import AlarmQueue
class AlarmQueueTest(AbstractTestCase):
def setUp(self):
self.alarm_queue = AlarmQueue()
def function_to_pass(self, first, second):
return first + second
def test_register_alarm(self):
alarm_id = self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
self.assertEqual(1, len(self.alarm_queue.alarms))
self.assertEqual(1, self.alarm_queue.uniq_count)
self.assertEqual(0, self.alarm_queue.alarms[0].count)
self.assertEqual(0, alarm_id.count)
def test_register_approx_alarm(self):
self.alarm_queue.register_approx_alarm(1, 3, self.function_to_pass, 1, 5)
self.assertEqual(1, len(self.alarm_queue.approx_alarms_scheduled[self.function_to_pass]))
self.assertEqual(self.function_to_pass,
self.alarm_queue.approx_alarms_scheduled[self.function_to_pass][0].alarm.fn)
def test_unregister_alarm(self):
alarm_id1 = self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
self.assertEqual(1, len(self.alarm_queue.alarms))
alarm_id2 = self.alarm_queue.register_alarm(1, self.function_to_pass, 2, 9)
self.assertEqual(2, len(self.alarm_queue.alarms))
self.alarm_queue.unregister_alarm(alarm_id1)
self.assertEqual(1, len(self.alarm_queue.alarms))
self.alarm_queue.unregister_alarm(alarm_id2)
self.assertEqual(0, len(self.alarm_queue.alarms))
def test_fire_alarms(self):
self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
time.time = MagicMock(return_value=time.time() + 2)
self.alarm_queue.fire_alarms()
self.assertEqual(1, len(self.alarm_queue.alarms))
def test_time_to_next_alarm(self):
self.assertIsNone(self.alarm_queue.time_to_next_alarm())
self.alarm_queue.register_alarm(1, self.function_to_pass, 1, 5)
self.assertEqual(1, len(self.alarm_queue.alarms))
self.assertLess(0, self.alarm_queue.time_to_next_alarm())
time.time = MagicMock(return_value=time.time() + 2)
self.assertGreater(0, self.alarm_queue.time_to_next_alarm())
def test_fire_ready_alarms(self):
self.alarm_queue.register_alarm(1, self.function_to_pass, 0, 0)
self.alarm_queue.register_alarm(5, self.function_to_pass, 0, 0)
time.time = MagicMock(return_value=time.time() + 2)
time_to_next_alarm = self.alarm_queue.fire_ready_alarms()
self.assertEqual(1, len(self.alarm_queue.alarms))
self.assertLess(0, time_to_next_alarm)
def test_approx_alarm_cleans_up_even_with_exceptions(self):
def raise_exception(should_raise: bool):
if should_raise:
raise Exception()
else:
pass
self.alarm_queue.register_approx_alarm(0, 1, raise_exception, True)
self.alarm_queue.register_approx_alarm(2, 1, raise_exception, False)
self.alarm_queue.fire_alarms()
time.time = MagicMock(return_value=time.time() + 2)
self.alarm_queue.fire_alarms()
|
print("Hello World!")
print("This is lesson 382 how to execute a python file. Now I had to do a little searching of the Python Docs to be able to run the script with a windows terminal, but I got it figured out!")
print("I also just discovered that if you just press the green play button at the top right of the screen, it will run your script in the terminal automatically!") |
#!/usr/bin/env python
# coding: utf-8
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Desafio-3" data-toc-modified-id="Desafio-3-1"><span class="toc-item-num">1 </span>Desafio 3</a></span><ul class="toc-item"><li><span><a href="#Setup-geral" data-toc-modified-id="Setup-geral-1.1"><span class="toc-item-num">1.1 </span><em>Setup</em> geral</a></span></li><li><span><a href="#Parte-1" data-toc-modified-id="Parte-1-1.2"><span class="toc-item-num">1.2 </span>Parte 1</a></span><ul class="toc-item"><li><span><a href="#Setup-da-parte-1" data-toc-modified-id="Setup-da-parte-1-1.2.1"><span class="toc-item-num">1.2.1 </span><em>Setup</em> da parte 1</a></span></li></ul></li><li><span><a href="#Inicie-sua-análise-a-partir-da-parte-1-a-partir-daqui" data-toc-modified-id="Inicie-sua-análise-a-partir-da-parte-1-a-partir-daqui-1.3"><span class="toc-item-num">1.3 </span>Inicie sua análise a partir da parte 1 a partir daqui</a></span><ul class="toc-item"><li><span><a href="#Normal" data-toc-modified-id="Normal-1.3.1"><span class="toc-item-num">1.3.1 </span>Normal</a></span></li><li><span><a href="#Binomial" data-toc-modified-id="Binomial-1.3.2"><span class="toc-item-num">1.3.2 </span>Binomial</a></span></li><li><span><a href="#Diferença-entre-os-quartis" data-toc-modified-id="Diferença-entre-os-quartis-1.3.3"><span class="toc-item-num">1.3.3 </span>Diferença entre os quartis</a></span></li></ul></li><li><span><a href="#Questão-1" data-toc-modified-id="Questão-1-1.4"><span class="toc-item-num">1.4 </span>Questão 1</a></span></li><li><span><a href="#Questão-2" data-toc-modified-id="Questão-2-1.5"><span class="toc-item-num">1.5 </span>Questão 2</a></span></li><li><span><a href="#Questão-3" data-toc-modified-id="Questão-3-1.6"><span class="toc-item-num">1.6 </span>Questão 3</a></span></li><li><span><a href="#Parte-2" data-toc-modified-id="Parte-2-1.7"><span class="toc-item-num">1.7 </span>Parte 2</a></span><ul class="toc-item"><li><span><a href="#Setup-da-parte-2" data-toc-modified-id="Setup-da-parte-2-1.7.1"><span class="toc-item-num">1.7.1 </span><em>Setup</em> da parte 2</a></span></li></ul></li><li><span><a href="#Inicie-sua-análise-da-parte-2-a-partir-daqui" data-toc-modified-id="Inicie-sua-análise-da-parte-2-a-partir-daqui-1.8"><span class="toc-item-num">1.8 </span>Inicie sua análise da parte 2 a partir daqui</a></span></li><li><span><a href="#Questão-4" data-toc-modified-id="Questão-4-1.9"><span class="toc-item-num">1.9 </span>Questão 4</a></span></li><li><span><a href="#Questão-5" data-toc-modified-id="Questão-5-1.10"><span class="toc-item-num">1.10 </span>Questão 5</a></span></li></ul></li></ul></div>
# # Desafio 3
#
# Neste desafio, iremos praticar nossos conhecimentos sobre distribuições de probabilidade. Para isso,
# dividiremos este desafio em duas partes:
#
# 1. A primeira parte contará com 3 questões sobre um *data set* artificial com dados de uma amostra normal e
# uma binomial.
# 2. A segunda parte será sobre a análise da distribuição de uma variável do _data set_ [Pulsar Star](https://archive.ics.uci.edu/ml/datasets/HTRU2), contendo 2 questões.
#
# > Obs.: Por favor, não modifique o nome das funções de resposta.
# ## _Setup_ geral
# In[2]:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as sct
import seaborn as sns
from statsmodels.distributions.empirical_distribution import ECDF
# In[2]:
#%matplotlib inline
#from IPython.core.pylabtools import figsize
#figsize(12, 8)
#sns.set()
# ## Parte 1
# ### _Setup_ da parte 1
# In[3]:
np.random.seed(42)
dataframe = pd.DataFrame({"normal": sct.norm.rvs(20, 4, size=10000),
"binomial": sct.binom.rvs(100, 0.2, size=10000)})
# ## Inicie sua análise a partir da parte 1 a partir daqui
# In[4]:
dataframe.head(5)
# ### Normal
# In[96]:
normal = dataframe.normal
normal
# In[97]:
normal.describe()
# In[98]:
sct.norm.ppf(0.25, loc=20, scale=4)
# In[99]:
normal_vinte_cinco = normal.describe()[4]
normal_vinte_cinco
# In[100]:
normal_cinquenta = normal.describe()[5]
normal_cinquenta
# In[101]:
normal_setenta_cinco = normal.describe()[6]
normal_setenta_cinco
# ### Binomial
# In[102]:
binomial = dataframe.binomial
binomial.head(5)
# In[103]:
binomial.describe()
# In[104]:
binomial_vinte_cinco = binomial.describe()[4]
binomial_vinte_cinco
# In[105]:
binomial_cinquenta = binomial.describe()[5]
binomial_cinquenta
# In[106]:
binomial_setenta_cinco = binomial.describe()[6]
binomial_setenta_cinco
# ### Diferença entre os quartis
# In[107]:
dif_q1 = (normal_vinte_cinco - binomial_vinte_cinco).round(3)
dif_q2 = (normal_cinquenta - binomial_cinquenta).round(3)
dif_q3 = (normal_setenta_cinco - binomial_setenta_cinco).round(3)
# In[108]:
dif_quartis = (dif_q1, dif_q2, dif_q3)
dif_quartis
# ## Questão 1
#
# Qual a diferença entre os quartis (Q1, Q2 e Q3) das variáveis `normal` e `binomial` de `dataframe`? Responda como uma tupla de três elementos arredondados para três casas decimais.
#
# Em outra palavras, sejam `q1_norm`, `q2_norm` e `q3_norm` os quantis da variável `normal` e `q1_binom`, `q2_binom` e `q3_binom` os quantis da variável `binom`, qual a diferença `(q1_norm - q1 binom, q2_norm - q2_binom, q3_norm - q3_binom)`?
# In[27]:
def q1():
return dif_quartis
# Para refletir:
#
# * Você esperava valores dessa magnitude?
#
# * Você é capaz de explicar como distribuições aparentemente tão diferentes (discreta e contínua, por exemplo) conseguem dar esses valores?
# ## Questão 2
#
# Considere o intervalo $[\bar{x} - s, \bar{x} + s]$, onde $\bar{x}$ é a média amostral e $s$ é o desvio padrão. Qual a probabilidade nesse intervalo, calculada pela função de distribuição acumulada empírica (CDF empírica) da variável `normal`? Responda como uma único escalar arredondado para três casas decimais.
# In[86]:
media = normal.mean()
# In[87]:
desvio_padrao = normal.std()
# In[88]:
probabilidade = ECDF(normal)
dif_intervalo = (probabilidade(media + desvio_padrao) - probabilidade(media - desvio_padrao)).round(3)
dif_intervalo
# In[89]:
def q2():
return dif_intervalo
# Para refletir:
#
# * Esse valor se aproxima do esperado teórico?
# * Experimente também para os intervalos $[\bar{x} - 2s, \bar{x} + 2s]$ e $[\bar{x} - 3s, \bar{x} + 3s]$.
# ## Questão 3
#
# Qual é a diferença entre as médias e as variâncias das variáveis `binomial` e `normal`? Responda como uma tupla de dois elementos arredondados para três casas decimais.
#
# Em outras palavras, sejam `m_binom` e `v_binom` a média e a variância da variável `binomial`, e `m_norm` e `v_norm` a média e a variância da variável `normal`. Quais as diferenças `(m_binom - m_norm, v_binom - v_norm)`?
# In[113]:
m_norm = normal.mean()
# In[114]:
v_norm = normal.var()
# In[115]:
m_binom = binomial.mean()
# In[116]:
v_binom = binomial.var()
# In[117]:
dif_m = (m_binom - m_norm).round(3)
# In[118]:
dif_v = (v_binom - v_norm).round(3)
# In[120]:
dif_norm_binom = (dif_m, dif_v)
dif_norm_binom
# In[121]:
def q3():
return dif_norm_binom
# Para refletir:
#
# * Você esperava valore dessa magnitude?
# * Qual o efeito de aumentar ou diminuir $n$ (atualmente 100) na distribuição da variável `binomial`?
# ## Parte 2
# ### _Setup_ da parte 2
# In[168]:
stars = pd.read_csv("pulsar_stars.csv")
stars.rename({old_name: new_name
for (old_name, new_name)
in zip(stars.columns,
["mean_profile", "sd_profile", "kurt_profile", "skew_profile", "mean_curve", "sd_curve", "kurt_curve", "skew_curve", "target"])
},
axis=1, inplace=True)
stars.loc[:, "target"] = stars.target.astype(bool)
# ## Inicie sua análise da parte 2 a partir daqui
# In[169]:
stars.head(10)
# In[170]:
stars.shape
# ## Questão 4
#
# Considerando a variável `mean_profile` de `stars`:
#
# 1. Filtre apenas os valores de `mean_profile` onde `target == 0` (ou seja, onde a estrela não é um pulsar).
# 2. Padronize a variável `mean_profile` filtrada anteriormente para ter média 0 e variância 1.
#
# Chamaremos a variável resultante de `false_pulsar_mean_profile_standardized`.
#
# Encontre os quantis teóricos para uma distribuição normal de média 0 e variância 1 para 0.80, 0.90 e 0.95 através da função `norm.ppf()` disponível em `scipy.stats`.
#
# Quais as probabilidade associadas a esses quantis utilizando a CDF empírica da variável `false_pulsar_mean_profile_standardized`? Responda como uma tupla de três elementos arredondados para três casas decimais.
# In[173]:
df = stars.query('target == False').mean_profile
df
# In[176]:
df_mean = df.mean()
df_mean
# In[178]:
df_std = df.std()
df_std
# In[181]:
false_pulsar_mean_profile_standardized = (df - df_mean) / df_std
false_pulsar_mean_profile_standardized
# In[188]:
cdf_emp = ECDF(false_pulsar_mean_profile_standardized)
quantis_dist_norm = sct.norm.ppf([0.80, 0.90, 0.95])
probabilidade_quantis = tuple((cdf_emp(quantis_dist_norm)).round(3))
probabilidade_quantis
# In[189]:
def q4():
return probabilidade_quantis
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
# ## Questão 5
#
# Qual a diferença entre os quantis Q1, Q2 e Q3 de `false_pulsar_mean_profile_standardized` e os mesmos quantis teóricos de uma distribuição normal de média 0 e variância 1? Responda como uma tupla de três elementos arredondados para três casas decimais.
# In[192]:
normal_q1 = sct.norm.ppf(0.25, loc=0, scale=1)
normal_q1
# In[193]:
normal_q2 = sct.norm.ppf(0.5, loc=0, scale=1)
normal_q2
# In[194]:
normal_q3 = sct.norm.ppf(0.75, loc=0, scale=1)
normal_q3
# In[195]:
false_pulsar_q1, false_pulsar_q2, false_pulsar_q3 = false_pulsar_mean_profile_standardized.quantile([0.25, 0.5, 0.75])
# In[198]:
dif_quantile = ((false_pulsar_q1 - normal_q1).round(3), (false_pulsar_q2 - normal_q2).round(3), (false_pulsar_q3 - normal_q3).round(3))
dif_quantile
# In[199]:
def q5():
return dif_quantile
# Para refletir:
#
# * Os valores encontrados fazem sentido?
# * O que isso pode dizer sobre a distribuição da variável `false_pulsar_mean_profile_standardized`?
# * Curiosidade: alguns testes de hipóteses sobre normalidade dos dados utilizam essa mesma abordagem.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import unittest
from typing import Dict, Tuple
from omop2obo.utils import *
class TestDataUtils(unittest.TestCase):
"""Class to test the downloading methods from the data utility script."""
def setUp(self):
# create some fake Pandas DataFrames
self.clin_data = pd.DataFrame({'CONCEPT_ID': ['4331309', '4331309', '37018594', '37018594', '442264'],
'CONCEPT_SOURCE_CODE': ['2265305', '2265305', '802510', '802510', '6817202'],
'UMLS_CUI': ['C0729608', 'C0729608', 'C4075981', 'C4075981', 'C0151936'],
'UMLS_CODE': ['2265305', '2265305', '802510', '802510', '6817202']
})
self.subset_clin_data = pd.DataFrame({'CONCEPT_ID': ['4331309', '4331309', '37018594', '37018594', '442264',
'4331309', '4331309', '37018594', '37018594', '442264',
'4331309', '4331309', '37018594', '37018594', '442264'],
'CODE': ['2265305', '2265305', '802510', '802510', '6817202',
'C0729608', 'C0729608', 'C4075981', 'C4075981', 'C0151936',
'2265305', '2265305', '802510', '802510', '6817202'],
'CODE_COLUMN': ['CONCEPT_SOURCE_CODE'] * 5 +
['UMLS_CUI'] * 5 +
['UMLS_CODE'] * 5
})
self.string_data = pd.DataFrame({'CONCEPT_ID': ['4331309', '37018594', '442264', '4029098', '4012199'],
'CONCEPT_LABEL': ['Myocarditis due to infectious agent',
'Complement level below reference range',
'Disorder of tendon',
'Disorder of tetrahydrobiopterin metabolism',
'Vulval pain'],
'CONCEPT_SYNONYM': ['Myocarditis due to infectious agent | Infective '
'myocarditis | Myocarditis due to infectious agent ('
'disorder)',
'Complement level below reference range | Complement '
'level below reference range (finding)',
'Disorder of tendon (disorder) | Disorder of tendon | '
'Tendon disorder',
'Disorder of tetrahydrobiopterin metabolism (disorder) | '
'Disorder of tetrahydrobiopterin metabolism',
'Vulval pain (finding) | Vulval pain | Pain of vulva']
})
# create data to verifying grouping function
self.group_data = pd.DataFrame({'CONCEPT_ID': ['442264', '4029098', '4141365', '133835', '133835'],
'CONCEPT_DBXREF_ONT_URI': ['http://purl.obolibrary.org/obo/MONDO_0100010',
'http://purl.obolibrary.org/obo/MONDO_0045014',
'http://purl.obolibrary.org/obo/MONDO_0043358',
'http://purl.obolibrary.org/obo/HP_0000964',
'http://purl.obolibrary.org/obo/MONDO_0002406'],
'CONCEPT_DBXREF_ONT_TYPE': ['MONDO', 'MONDO', 'MONDO', 'HP', 'MONDO'],
'CONCEPT_DBXREF_ONT_LABEL': ['tendinopathy',
'tetrahydrobiopterin metabolic process disease',
'engraftment syndrome', 'eczema', 'dermatitis'],
'CONCEPT_DBXREF_ONT_EVIDENCE': ['CONCEPT_DBXREF_sctid:68172002',
'CONCEPT_DBXREF_sctid:237913008',
'CONCEPT_DBXREF_sctid:426768001',
'CONCEPT_DBXREF_snomedct_us:43116000',
'CONCEPT_DBXREF_sctid:43116000']
})
# create sample dictionaries
self.sample_dicts = {
'hp': {
'dbxref': {'UMLS:C4022916': 'http://purl.obolibrary.org/obo/HP_0012400',
'UMLS:C4020882': 'http://purl.obolibrary.org/obo/HP_0000925',
'UMLS:C4021789': 'http://purl.obolibrary.org/obo/HP_0000925'},
'label': {'abnormal aldolase level': 'http://purl.obolibrary.org/obo/HP_0012400',
'abnormality of the vertebral column': 'http://purl.obolibrary.org/obo/HP_0000925',
'patulous urethra': 'http://purl.obolibrary.org/obo/HP_0025417'}},
'mondo': {
'dbxref': {'GARD:0009221': 'http://purl.obolibrary.org/obo/MONDO_0022509',
'DOID:5726': 'http://purl.obolibrary.org/obo/MONDO_0003611',
'UMLS:C3642324': 'http://purl.obolibrary.org/obo/MONDO_0003611'},
'label': {'asternia': 'http://purl.obolibrary.org/obo/MONDO_0022509',
'hyperekplexia 3': 'http://purl.obolibrary.org/obo/MONDO_0013827',
'color vision disorder': 'http://purl.obolibrary.org/obo/MONDO_0001703'}}
}
# create example result data
self.ont_data = {'hp': {'label': {'abetalipoproteinemia': 'http://purl.obolibrary.org/obo/HP_0008181'},
'dbxref': {'snomedct_us:190787008': 'http://purl.obolibrary.org/obo/HP_0008181'},
'dbxref_type': {'snomedct_us:190787008': 'DbXref'},
'synonym': {'wet lung': 'http://purl.obolibrary.org/obo/HP_0100598'},
'synonym_type': {'wet lung': 'hasExactSynonym'}}}
self.source_codes = {'snomed:190787008': 'DbXref*snomedct_us'}
return None
def test_data_frame_subsetter(self):
"""Tests the data_frame_subsetter method."""
# run method and test output
subset_data = data_frame_subsetter(self.clin_data, 'CONCEPT_ID',
['CONCEPT_SOURCE_CODE', 'UMLS_CUI', 'UMLS_CODE'])
self.assertIsInstance(subset_data, pd.DataFrame)
self.assertTrue(len(subset_data) == 9)
self.assertEqual(list(subset_data.columns), ['CONCEPT_ID', 'CODE', 'CODE_COLUMN'])
return None
def test_data_frame_supersetter(self):
"""Tests the data_frame_supersetter method."""
# run method and test output
subset_data = data_frame_supersetter(self.subset_clin_data, 'CONCEPT_ID', 'CODE_COLUMN', 'CODE')
self.assertIsInstance(subset_data, pd.DataFrame)
self.assertTrue(len(subset_data) == 3)
self.assertEqual(list(subset_data.columns), ['CONCEPT_ID', 'CONCEPT_SOURCE_CODE', 'UMLS_CODE', 'UMLS_CUI'])
return None
def test_column_splitter(self):
"""Tests the column_splitter method."""
# set-up input parameters
delimited_columns = ['CONCEPT_LABEL', 'CONCEPT_SYNONYM']
split_data = column_splitter(self.string_data, 'CONCEPT_ID', delimited_columns, '|')
# test method and output
self.assertIsInstance(split_data, pd.DataFrame)
self.assertTrue(len(split_data) == 13)
self.assertEqual(list(split_data.columns), ['CONCEPT_ID', 'CONCEPT_LABEL', 'CONCEPT_SYNONYM'])
return None
def test_aggregates_column_values(self):
"""Tests the aggregates_column_values method."""
# set-up input parameters
agg_data = aggregates_column_values(self.subset_clin_data, 'CONCEPT_ID', ['CODE', 'CODE_COLUMN'], '|')
# test method and output
self.assertIsInstance(agg_data, pd.DataFrame)
self.assertTrue(len(agg_data) == 3)
self.assertEqual(list(agg_data.columns), ['CONCEPT_ID', 'CODE', 'CODE_COLUMN'])
return None
def test_data_frame_grouper(self):
"""Tests the data_frame_grouper method."""
grouped_data = data_frame_grouper(self.group_data, 'CONCEPT_ID', 'CONCEPT_DBXREF_ONT_TYPE',
aggregates_column_values)
# test method and output
self.assertIsInstance(grouped_data, pd.DataFrame)
self.assertTrue(len(grouped_data) == 4)
self.assertEqual(list(grouped_data.columns), ['CONCEPT_ID', 'CONCEPT_DBXREF_HP_URI',
'CONCEPT_DBXREF_HP_LABEL', 'CONCEPT_DBXREF_HP_EVIDENCE',
'CONCEPT_DBXREF_MONDO_URI', 'CONCEPT_DBXREF_MONDO_LABEL',
'CONCEPT_DBXREF_MONDO_EVIDENCE'])
return None
def test_normalizes_source_codes(self):
"""Tests the normalizes_source_codes method."""
# set-up input data
data = pd.DataFrame(['reactome:r-hsa-937045', 'http://linkedlifedata.com/resource/umls/id/C0010323',
'snomedct_us:111395007', 'pesticides:derivatives/benazolin-ethyl'], columns=['CODE'])
# set-up input dictionary
source_code_dict = {'snomedct_us': 'snomed', 'http://linkedlifedata.com/resource/umls/id': 'umls'}
# test method
result = normalizes_source_codes(data['CODE'].to_frame(), source_code_dict)
self.assertIsInstance(result, pd.Series)
self.assertIn('reactome:r-hsa-937045', list(result))
self.assertIn('umls:c0010323', list(result))
self.assertIn('snomed:111395007', list(result))
self.assertIn('pesticides:derivatives:benazolin-ethyl', list(result))
return None
def test_merge_dictionaries(self):
"""Tests the merge_dictionaries method."""
# run method and test output
combined_dicts = merge_dictionaries(self.sample_dicts, 'dbxref')
self.assertIsInstance(combined_dicts, Dict)
self.assertTrue(len(combined_dicts.keys()) == 6)
self.assertTrue(len(combined_dicts.values()) == 6)
# test the method when reverse=True
combined_dicts_rev = merge_dictionaries(self.sample_dicts, 'dbxref', reverse=True)
self.assertIsInstance(combined_dicts_rev, Dict)
self.assertTrue(len(combined_dicts_rev.keys()) == 4)
self.assertTrue(len(combined_dicts_rev.values()) == 4)
return None
def test_ohdsi_ananke(self):
"""Tests the ohdsi_ananke method."""
# create input data
combo_dict_df = pd.DataFrame({'CODE': ['hp:0001901', 'hp:0011737', 'hp:0002883', 'hp:0002883'],
'CONCEPT_DBXREF_ONT_URI': ['http://purl.obolibrary.org/obo/HP_0001901',
'http://purl.obolibrary.org/obo/HP_0011737',
'http://purl.obolibrary.org/obo/HP_0002883',
'http://purl.obolibrary.org/obo/HP_0002883']
})
clinical_data = pd.DataFrame({'CONCEPT_ID': ['315763', '440965', '138117', '999999'],
'CODE': ['C0000005', 'C0000039', 'C5234707', 'C9999999'],
'CODE_COLUMN': ['UMLS_CUI', 'UMLS_CUI', 'UMLS_CUI', 'UMLS_CUI']
})
umls_cui_data = pd.DataFrame({'CUI': ['C0000005', 'C0000039', 'C5234707'],
'SAB': ['HPO', 'HPO', 'HPO'],
'CODE': ['hp:0001901', 'hp:0011737', 'hp:0002883']
})
# run method and test output
merged_data = ohdsi_ananke('CONCEPT_ID', ['hp'], combo_dict_df, clinical_data, umls_cui_data)
self.assertIsInstance(merged_data, pd.DataFrame)
self.assertTrue(len(merged_data) == 3)
self.assertTrue(list(merged_data.columns) == ['CONCEPT_ID', 'CODE', 'CODE_COLUMN', 'CONCEPT_DBXREF_ONT_URI'])
return None
def tests_normalizes_clinical_source_codes(self):
"""Tests the normalizes_clinical_source_codes method."""
# set input arguments
dbxref_dict = {'umls:c0008733': 'DbXref', 'snomedct_us:462165005': 'DbXref'}
source_dict = {'snomedct_us': 'snomed'}
# test method
results = normalizes_clinical_source_codes(dbxref_dict, source_dict)
self.assertIsInstance(results, Dict)
self.assertEqual(len(results), 2)
return None
def tests_filters_mapping_content_scenario1(self):
"""Tests the filters_mapping_content method - scenario 1."""
# create input values
# test set 1
input_1_exact = [['HP_0008181', 'HP_0008181'], ['abetalipoproteinemia', 'abetalipoproteinemia'],
['CONCEPT_DBXREF_snomed:190787008', 'CONCEPT_SOURCE_LABEL:abetalipoproteinemia']]
input_1_sim = [['HP_0008181'], ['abetalipoproteinemia'], ['HP_0008181_1.0']]
# test method -- input set 1
results_1 = filters_mapping_content(input_1_exact, input_1_sim, 0.25)
self.assertIsInstance(results_1[0], list)
self.assertEqual(len(results_1[0]), 3)
self.assertIsInstance(results_1[1], list)
self.assertEqual(len(results_1[1]), 3)
self.assertEqual(results_1[0], [['HP_0008181'], ['abetalipoproteinemia'],
'CONCEPT_DBXREF_snomed:190787008 | CONCEPT_SOURCE_LABEL:abetalipoproteinemia'])
self.assertEqual(results_1[1], [['HP_0008181'], ['abetalipoproteinemia'], 'HP_0008181_1.0'])
return None
def tests_filters_mapping_content_scenario2(self):
"""Tests the filters_mapping_content method - scenario 2."""
# create input values
# test set 2
input_2_exact = [['HP_0011276', 'HP_0000951'], ['vascular skin abnormality', 'abnormality of the skin'],
['ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871']]
input_2_sim = [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
['HP_0100309_0.75 | HP_0100310_0.786']]
# test method -- input set 2
results_2 = filters_mapping_content(input_2_exact, input_2_sim, 0.25)
self.assertIsInstance(results_2[0], list)
self.assertEqual(len(results_2[0]), 3)
self.assertIsInstance(results_2[1], list)
self.assertEqual(len(results_2[1]), 3)
self.assertEqual(results_2[0], [['HP_0011276', 'HP_0000951'],
['vascular skin abnormality', 'abnormality of the skin'],
'ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871'])
self.assertEqual(results_2[1], [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
'HP_0100309_0.75 | HP_0100310_0.786'])
return None
def tests_filters_mapping_content_scenario2_threshold(self):
"""Tests the filters_mapping_content method - scenario 2 with high thresholding."""
# create input values
# test set 2
input_2a_exact = [['HP_0011276', 'HP_0000951'], ['vascular skin abnormality', 'abnormality of the skin'],
['ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871']]
input_2a_sim = [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
['HP_0100309_0.75 | HP_0100310_0.786']]
# test method -- input set 2
results_2a = filters_mapping_content(input_2a_exact, input_2a_sim, 0.76)
self.assertIsInstance(results_2a[0], list)
self.assertEqual(len(results_2a[0]), 3)
self.assertIsInstance(results_2a[1], list)
self.assertEqual(len(results_2a[1]), 3)
self.assertEqual(results_2a[0], [['HP_0011276', 'HP_0000951'],
['vascular skin abnormality', 'abnormality of the skin'],
'ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871'])
self.assertEqual(results_2a[1], [['HP_0100310'], ['epidural hemorrhage'], 'HP_0100310_0.786'])
return None
def tests_filters_mapping_content_scenario3(self):
"""Tests the filters_mapping_content method - scenario 3."""
# create input values
# test set 3
input_3_exact = [['HP_0011276', 'HP_0000951'], ['vascular skin abnormality', 'abnormality of the skin'],
['CONCEPT_DBXREF_snomed:11263005 | CONCEPT_DBXREF_msh:d012871']]
input_3_sim = [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
['HP_0100309_0.278 | HP_0100310_0.266']]
# test method -- input set 3
results_3 = filters_mapping_content(input_3_exact, input_3_sim, 0.25)
self.assertIsInstance(results_3[0], list)
self.assertEqual(len(results_3[0]), 3)
self.assertIsInstance(results_3[1], list)
self.assertEqual(len(results_3[1]), 3)
self.assertEqual(results_3[0], [['HP_0011276', 'HP_0000951'],
['vascular skin abnormality', 'abnormality of the skin'],
'CONCEPT_DBXREF_snomed:11263005 | CONCEPT_DBXREF_msh:d012871'])
self.assertEqual(results_3[1], [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
'HP_0100309_0.278 | HP_0100310_0.266'])
return None
def tests_filters_mapping_content_scenario4(self):
"""Tests the filters_mapping_content method - scenario 4."""
# create input values
# test set 4
input_4_exact = [['HP_0011276', 'HP_0000951'], ['vascular skin abnormality', 'abnormality of the skin'],
['ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871']]
input_4_sim = [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
['HP_0100309_0.278 | HP_0100310_0.266']]
# test method -- input set 4
results_4 = filters_mapping_content(input_4_exact, input_4_sim, 0.25)
self.assertIsInstance(results_4[0], list)
self.assertEqual(len(results_4[0]), 3)
self.assertIsInstance(results_4[1], list)
self.assertEqual(len(results_4[1]), 3)
self.assertEqual(results_4[0], [['HP_0011276', 'HP_0000951'],
['vascular skin abnormality', 'abnormality of the skin'],
'ANCESTOR_DBXREF_snomed:11263005 | ANCESTOR_DBXREF_msh:d012871'])
self.assertEqual(results_4[1], [['HP_0100309', 'HP_0100310'], ['subdural hemorrhage', 'epidural hemorrhage'],
'HP_0100309_0.278 | HP_0100310_0.266'])
return None
def tests_filters_mapping_content_scenario5(self):
"""Tests the filters_mapping_content method - scenario 5."""
# create input values
# test set 5
input_5_exact = [['HP_0002011', 'HP_0002960', 'HP_0011096'],
['morphological central nervous system abnormality', 'peripheral demyelination'],
['ANCESTOR_DBXREF_snomed:23853001 | ANCESTOR_DBXREF_snomed:85828009',
'ANCESTOR_LABEL:demyelination']]
input_5_sim = [[], [], []]
# test method -- input set 5
results_5 = filters_mapping_content(input_5_exact, input_5_sim, 0.25)
self.assertIsInstance(results_5[0], list)
self.assertEqual(len(results_5[0]), 3)
self.assertIsInstance(results_5[1], list)
self.assertEqual(len(results_5[1]), 3)
self.assertEqual(results_5[0], [['HP_0002011', 'HP_0002960', 'HP_0011096'],
['morphological central nervous system abnormality',
'peripheral demyelination'],
'ANCESTOR_DBXREF_snomed:23853001 | ANCESTOR_DBXREF_snomed:85828009 | '
'ANCESTOR_LABEL:demyelination'])
self.assertEqual(results_5[1], [None, None, None])
return None
def tests_compiles_mapping_content_1(self):
"""Tests the compiles_mapping_content method - round 1."""
# create required input resources
data_row_1 = pd.Series({'CONCEPT_ID': '4098595',
'CONCEPT_DBXREF_HP_URI': 'http://purl.obolibrary.org/obo/HP_0008181',
'CONCEPT_DBXREF_HP_LABEL': 'abetalipoproteinemia',
'CONCEPT_DBXREF_HP_EVIDENCE': 'CONCEPT_DBXREF_snomed:190787008',
'CONCEPT_STR_HP_URI': 'http://purl.obolibrary.org/obo/HP_0008181',
'CONCEPT_STR_HP_LABEL': 'abetalipoproteinemia',
'CONCEPT_STR_HP_EVIDENCE': 'CONCEPT_SOURCE_LABEL:abetalipoproteinemia',
'HP_SIM_ONT_URI': 'HP_0008181',
'HP_SIM_ONT_LABEL': 'abetalipoproteinemia',
'HP_SIM_ONT_EVIDENCE': 'HP_0008181_1.0'})
data_row_2 = pd.Series({'CONCEPT_ID': '4098595', 'CONCEPT_DBXREF_HP_URI': '', 'CONCEPT_DBXREF_HP_LABEL': '',
'CONCEPT_DBXREF_HP_EVIDENCE': '', 'CONCEPT_STR_HP_URI': '', 'CONCEPT_STR_HP_LABEL': '',
'CONCEPT_STR_HP_EVIDENCE': '', 'HP_SIM_ONT_URI': '', 'HP_SIM_ONT_LABEL': '',
'HP_SIM_ONT_EVIDENCE': ''})
# test method
results = compiles_mapping_content(data_row_1, 'HP', 0.75)
self.assertIsInstance(results[0], list)
self.assertIsInstance(results[1], list)
self.assertEqual(len(results[0]), 3)
self.assertEqual(len(results[1]), 3)
self.assertIsInstance(results[0][0], list)
self.assertIsInstance(results[0][1], list)
self.assertIsInstance(results[0][2], str)
results = compiles_mapping_content(data_row_2, 'HP', 0.75)
self.assertIsInstance(results[0], list)
self.assertIsInstance(results[1], list)
self.assertEqual(len(results[0]), 3)
self.assertEqual(len(results[1]), 3)
self.assertEqual(results[0][0], None)
self.assertEqual(results[0][1], None)
self.assertEqual(results[0][2], None)
return None
def tests_compiles_mapping_content_2(self):
"""Tests the compiles_mapping_content method - round 2."""
# create required input resources
data_row_1 = pd.Series({'CONCEPT_ID': '4134318',
'CONCEPT_DBXREF_HP_URI': '',
'CONCEPT_DBXREF_HP_LABEL': '',
'CONCEPT_DBXREF_HP_EVIDENCE': '',
'CONCEPT_STR_HP_URI': 'http://purl.obolibrary.org/obo/HP_0041249',
'CONCEPT_STR_HP_LABEL': 'fractured nose',
'CONCEPT_STR_HP_EVIDENCE': 'CONCEPT_SYNONYM:fractured_nose',
'HP_SIM_ONT_URI': 'HP_0041249 | HP_0010939 | HP_0004646 | HP_0010941 | HP_0041162 | '
'HP_0041248',
'HP_SIM_ONT_LABEL': 'fractured nose | abnormality of the nasal bone | hypoplasia of '
'the nasal bone | aplasia of the nasal bone | fractured foot bone '
'| fractured carpal bone',
'HP_SIM_ONT_EVIDENCE': 'HP_0041249_1.0 | HP_0010939_0.379 | HP_0004646_0.352 | '
'HP_0010941_0.352 | HP_0041162_0.31 | HP_0041248_0.303'})
# test method
results = compiles_mapping_content(data_row_1, 'HP', 0.75)
self.assertIsInstance(results[0], list)
self.assertIsInstance(results[1], list)
self.assertEqual(len(results[0]), 3)
self.assertEqual(len(results[1]), 3)
self.assertEqual(results[0][2], 'CONCEPT_SYNONYM:fractured_nose')
self.assertEqual(results[1][2], 'HP_0041249_1.0')
return None
def tests_formats_mapping_evidence(self):
"""Tests the formats_mapping_evidence method."""
# prepare needed input data
ont_dict = {'label': {'abetalipoproteinemia': 'http://purl.obolibrary.org/obo/HP_0008181'},
'dbxref': {'snomedct_us:190787008': 'http://purl.obolibrary.org/obo/HP_0008181'},
'dbxref_type': {'snomedct_us:190787008': 'DbXref'},
'synonym': {'wet lung': 'http://purl.obolibrary.org/obo/HP_0100598'},
'synonym_type': {'wet lung': 'hasExactSynonym'}}
source_dict = {'snomed:190787008': 'DbXref*snomedct_us'}
result = ([['HP_0008181'], ['abetalipoproteinemia'],
'CONCEPT_DBXREF_snomed:190787008 | CONCEPT_DBXREF_umls:C0000744 | '
'CONCEPT_SOURCE_LABEL:abetalipoproteinemia | CONCEPT_SYNONYM:abetalipoproteinemia'],
[['HP_0008181'], ['abetalipoproteinemia'], 'HP_0008181_1.0'])
clin_data = {'CONCEPT_LABEL': 'Abetalipoproteinemia',
'CONCEPT_SOURCE_LABEL': 'Abetalipoproteinemia',
'CONCEPT_SYNONYM': 'Abetalipoproteinaemia | Apolipoprotein B deficiency',
'ANCESTOR_LABEL': 'Autosomal recessive hereditary disorder | Metabolic disorder | Finding'}
# test method
results = formats_mapping_evidence(ont_dict, source_dict, result, clin_data)
self.assertIsInstance(results, Tuple)
self.assertEqual(results[0], 'OBO_DbXref-OMOP_CONCEPT_CODE:snomed_190787008 | '
'OBO_DbXref-OMOP_CONCEPT_CODE:umls_C0000744 | '
'OBO_LABEL-OMOP_CONCEPT_LABEL:abetalipoproteinemia')
self.assertEqual(results[1], 'CONCEPT_SIMILARITY:HP_0008181_1.0')
return None
def tests_assigns_mapping_category_exact(self):
"""Tests the assigns_mapping_category method when evidence is exact."""
# set function input 1
mapping_info_1 = [['HP_0008181'], ['abetalipoproteinemia'],
'CONCEPT_DBXREF_snomed:190787008 | CONCEPT_DBXREF_umls:C0000744 | '
'CONCEPT_SOURCE_LABEL:abetalipoproteinemia | CONCEPT_SYNONYM:abetalipoproteinemia']
mapping_evidence_1 = 'OBO_DbXref-OMOP_CONCEPT_CODE:snomedct_us_190787008 | ' \
'OBO_DbXref-OMOP_CONCEPT_CODE:umls_C0000744 | ' \
'OBO_LABEL-OMOP_CONCEPT_LABEL:abetalipoproteinemia | ' \
'OBO_LABEL-OMOP_CONCEPT_SYNONYM:abetalipoproteinemia '
# set function inputs
mapping_info_2 = [['HP_0008181'], ['abetalipoproteinemia'], 'HP_0008181_1.0']
mapping_evidence_2 = 'CONCEPT_SIMILARITY:HP_0008181_1.0'
# test method - exact
results_1 = assigns_mapping_category(mapping_info_1, mapping_evidence_1)
self.assertIsInstance(results_1, str)
self.assertEqual(results_1, 'Automatic Exact - Concept')
# test method - similarity
results_2 = assigns_mapping_category(mapping_info_2, mapping_evidence_2)
self.assertIsInstance(results_2, str)
self.assertEqual(results_2, 'Manual Exact - Concept Similarity')
return None
def tests_assigns_mapping_category_similarity(self):
"""Tests the assigns_mapping_category method when evidence is from concept similarity."""
# set function inputs
mapping_info_2 = [['HP_0008181'], ['abetalipoproteinemia'], 'HP_0008181_1.0']
mapping_evidence_2 = 'CONCEPT_SIMILARITY:HP_0008181_1.0'
# test method - similarity
results_2 = assigns_mapping_category(mapping_info_2, mapping_evidence_2)
self.assertIsInstance(results_2, str)
self.assertEqual(results_2, 'Manual Exact - Concept Similarity')
return None
def tests_aggregates_mapping_results_full_SimResults(self):
"""Tests the aggregates_mapping_results method when there is similarity data."""
# set-up inputs
data1 = pd.DataFrame({'CONCEPT_ID': ['4098595'],
'CONCEPT_LABEL': ['Abetalipoproteinemia'],
'CONCEPT_SOURCE_LABEL': ['Abetalipoproteinemia'],
'CONCEPT_SYNONYM': ['Abetalipoproteinaemia | ABL - Abetalipoproteinaemia | '
'Abetalipoproteinemia (disorder) | Apolipoprotein B deficiency | '
'Abetalipoproteinemia | ABL - Abetalipoproteinemia'],
'CONCEPT_DBXREF_HP_URI': ['http://purl.obolibrary.org/obo/HP_0008181'],
'CONCEPT_DBXREF_HP_LABEL': ['abetalipoproteinemia'],
'CONCEPT_DBXREF_HP_EVIDENCE': ['CONCEPT_DBXREF_snomed:190787008'],
'CONCEPT_STR_HP_URI': ['http://purl.obolibrary.org/obo/HP_0008181'],
'CONCEPT_STR_HP_LABEL': ['abetalipoproteinemia'],
'CONCEPT_STR_HP_EVIDENCE': ['CONCEPT_SOURCE_LABEL:abetalipoproteinemia'],
'HP_SIM_ONT_URI': ['HP_0008181'],
'HP_SIM_ONT_LABEL': ['abetalipoproteinemia'],
'HP_SIM_ONT_EVIDENCE': ['HP_0008181_1.0']})
# test method when there is similarity data
results = aggregates_mapping_results(data1, ['hp'], self.ont_data, self.source_codes, 0.25)
self.assertIsInstance(results, pd.DataFrame)
self.assertEqual(len(results), 1)
self.assertEqual(len(results.columns), 21)
# check annotated values
self.assertEqual(results.at[0, 'AGGREGATED_HP_URI'], 'HP_0008181')
self.assertEqual(results.at[0, 'AGGREGATED_HP_LABEL'], 'abetalipoproteinemia')
self.assertEqual(results.at[0, 'AGGREGATED_HP_MAPPING'], 'Automatic Exact - Concept')
self.assertEqual(results.at[0, 'AGGREGATED_HP_EVIDENCE'], 'OBO_DbXref-OMOP_CONCEPT_CODE:snomed_190787008 | '
'OBO_LABEL-OMOP_CONCEPT_LABEL:abetalipoproteinemia')
self.assertEqual(results.at[0, 'SIMILARITY_HP_URI'], 'HP_0008181')
self.assertEqual(results.at[0, 'SIMILARITY_HP_LABEL'], 'abetalipoproteinemia')
self.assertEqual(results.at[0, 'SIMILARITY_HP_MAPPING'], 'Manual Exact - Concept Similarity')
self.assertEqual(results.at[0, 'SIMILARITY_HP_EVIDENCE'], 'CONCEPT_SIMILARITY:HP_0008181_1.0')
return None
def tests_aggregates_mapping_results_full_NoSimResults(self):
"""Tests the aggregates_mapping_results method when there is similarity data - no similarity data."""
# set-up inputs
data2 = pd.DataFrame({'CONCEPT_ID': ['4098595'],
'CONCEPT_LABEL': ['Abetalipoproteinemia'],
'CONCEPT_SOURCE_LABEL': ['Abetalipoproteinemia'],
'CONCEPT_SYNONYM': ['Abetalipoproteinaemia | ABL - Abetalipoproteinaemia | '
'Abetalipoproteinemia (disorder) | Apolipoprotein B deficiency | '
'Abetalipoproteinemia | ABL - Abetalipoproteinemia'],
'CONCEPT_DBXREF_HP_URI': ['http://purl.obolibrary.org/obo/HP_0008181'],
'CONCEPT_DBXREF_HP_LABEL': ['abetalipoproteinemia'],
'CONCEPT_DBXREF_HP_EVIDENCE': ['CONCEPT_DBXREF_snomed:190787008'],
'CONCEPT_STR_HP_URI': ['http://purl.obolibrary.org/obo/HP_0008181'],
'CONCEPT_STR_HP_LABEL': ['abetalipoproteinemia'],
'CONCEPT_STR_HP_EVIDENCE': ['CONCEPT_SOURCE_LABEL:abetalipoproteinemia']})
# test method when there is no similarity data
results = aggregates_mapping_results(data2, ['hp'], self.ont_data, self.source_codes, 0.25)
self.assertIsInstance(results, pd.DataFrame)
self.assertEqual(len(results), 1)
self.assertEqual(len(results.columns), 18)
# check annotated values
self.assertEqual(results.at[0, 'AGGREGATED_HP_URI'], 'HP_0008181')
self.assertEqual(results.at[0, 'AGGREGATED_HP_LABEL'], 'abetalipoproteinemia')
self.assertEqual(results.at[0, 'AGGREGATED_HP_MAPPING'], 'Automatic Exact - Concept')
self.assertEqual(results.at[0, 'AGGREGATED_HP_EVIDENCE'], 'OBO_DbXref-OMOP_CONCEPT_CODE:snomed_190787008 | '
'OBO_LABEL-OMOP_CONCEPT_LABEL:abetalipoproteinemia')
self.assertEqual(results.at[0, 'SIMILARITY_HP_URI'], None)
self.assertEqual(results.at[0, 'SIMILARITY_HP_LABEL'], None)
self.assertEqual(results.at[0, 'SIMILARITY_HP_MAPPING'], None)
self.assertEqual(results.at[0, 'SIMILARITY_HP_EVIDENCE'], None)
return None
|
# Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run the lottery ticket experiment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def run_experiment(experiment, max_prune_iterations, presets=None):
"""Run the lottery ticket experiment for the specified number of iterations.
Args:
experiment: an object implementing ExperimentBase
max_num_prunes: The number of pruning iterations to perform.
presets: (optional) The presets to use for the first iteration of training.
In the form of a dictionary where each key is the name of a tensor and
each value is a numpy array of the values to which that tensor should
be initialized.
"""
# Run once normally.
initial, final_weights, train_acc = experiment.train_once(0, presets=presets)
# Create the initial masks with no weights pruned.
masks = {}
for k, v in initial.items():
masks[k] = np.ones(v.shape)
# Begin the training loop.
for iteration in range(1, max_prune_iterations + 1):
if experiment.stop_pruning(train_acc):
break
# Prune the network.
masks = experiment.prune_masks(masks, final_weights)
# Train the network again.
_, final_weights, train_acc = experiment.train_once(iteration, presets=initial, masks=masks)
|
from fastapi import FastAPI, Request, Security
import fastapi
from passlib.hash import pbkdf2_sha256
from fastapi.encoders import jsonable_encoder
from fastapi.exceptions import HTTPException
from fastapi.security import OAuth2PasswordBearer
import uuid
import os
from dotenv import load_dotenv, find_dotenv
from pydantic import BaseModel
import jwt
from app import cluster
load_dotenv(find_dotenv())
JWT_SECRET = os.environ.get('SECRET_KEY')
oauth2schema = OAuth2PasswordBearer(tokenUrl='/api/v1/user/login')
user_collection = cluster.web.users
class User(BaseModel):
_id: str
name: str
email: str
username: str
password: str
access_level: int
class UserCommands:
async def register(self, man:User):
data = man
user = {
"_id":uuid.uuid4().hex,
"name":data.name,
"email":data.email,
"username":data.username,
"password":data.password,
"access_level":0
# 'token': access_security.create_access_token(subject={'email':user['email'], 'password':user['password']})
}
user['password'] = pbkdf2_sha256.encrypt(user['password'])
if user_collection.find_one({"email":user['email']}):
raise HTTPException(status_code=400, detail='Email already in use')
if user_collection.find_one({"username":user['username']}):
raise HTTPException(status_code=400, detail='Username already in base')
try:
user_collection.insert_one(user)
except:
raise HTTPException(status_code=400, detail='Sign up failed. Contact administration')
return await self.get_token(user)
async def get_token(self, user:dict):
token = jwt.encode(user, JWT_SECRET)
return dict(access_token = token)
async def login(self, email:str, password:str):
user = user_collection.find_one({'email':email})
if user is None:
raise HTTPException(status_code=401, detail='No such email')
if user and pbkdf2_sha256.verify(password, user['password']):
return await self.get_token(user)
raise HTTPException(status_code=401, detail='Ivalid login data')
async def get_user_by_token(self,token:str = fastapi.Depends(oauth2schema)):
try:
payload = jwt.decode(token, JWT_SECRET, algorithms=['HS256'])
user = user_collection.find_one({'_id':payload['_id']})
except:
raise HTTPException(status_code=401, detail='Ivalid email or password')
return user |
from .base_backbone import BaseBackboneWraper
__all__ = ['BaseBackboneWraper']
|
from django.conf.urls import url
from habit import views
urlpatterns = [
url('^create/$', views.HabitCreateView.as_view(), name='create'),
url('^log/complete/$', views.DailyLogCompleteView.as_view(), name='log-complete'),
url('^$', views.HabitView.as_view(), name='index'),
]
|
# Script that builds and launches a tkinter UI for labeling similar points between two images.
import os
import tkinter as tk
from glob import glob
from random import choices
import torch
from PIL import ImageTk, Image
# Globals used to define state that event handlers might operate on.
imgs_list = []
widgets = None
cur_img_1, cur_img_2 = None, None
pil_img_1, pil_img_2 = None, None
pending_labels = []
mode_select_image_1 = True
img_count = 1
img_loc_1 = None
output_location = "results"
def update_mode_label():
global widgets, mode_select_image_1, img_count
image_widget_1, image_widget_2, mode_label = widgets
mode_str = "Select point in image 1" if mode_select_image_1 else "Select point in image 2"
mode_label.config(text="%s; Saved images: %i" % (mode_str, img_count))
# Handles key presses, which are interpreted as requests to categorize a currently active image patch.
def key_press(event):
global batch_gen, labeler, pending_labels
if event.char == '\t':
next_images()
update_mode_label()
def click(event):
global img_loc_1, mode_select_image_1, pil_img_1, pil_img_2, img_count
x, y = event.x, event.y
if x > 512 or y > 512:
print(f"Bounds error {x} {y}")
return
print(f"Detected click. {x} {y}")
if mode_select_image_1:
img_loc_1 = x, y
mode_select_image_1 = False
else:
ofolder = f'{output_location}/{img_count}'
os.makedirs(ofolder)
pil_img_1.save(os.path.join(ofolder, "1.jpg"))
pil_img_2.save(os.path.join(ofolder, "2.jpg"))
torch.save([img_loc_1, (x,y)], os.path.join(ofolder, "coords.pth"))
img_count = img_count + 1
mode_select_image_1 = True
next_images()
update_mode_label()
def load_image_into_pane(img_path, pane, size=512):
pil_img = Image.open(img_path)
pil_img = pil_img.resize((size,size))
tk_picture = ImageTk.PhotoImage(pil_img)
pane.image = tk_picture
pane.configure(image=tk_picture)
return pil_img
def next_images():
global imgs_list, widgets, cur_img_1, cur_img_2, pil_img_1, pil_img_2
image_widget_1, image_widget_2, mode_label = widgets
cur_img_1, cur_img_2 = choices(imgs_list, k=2)
pil_img_1 = load_image_into_pane(cur_img_1, image_widget_1)
pil_img_2 = load_image_into_pane(cur_img_2, image_widget_2)
if __name__ == '__main__':
os.makedirs(output_location, exist_ok=True)
window = tk.Tk()
window.title("Image pair labeler UI")
window.geometry('1024x620+100+100')
# Load images
imgs_list = glob("E:\\4k6k\\datasets\\ns_images\\imagesets\\imageset_1024_square_with_new\\*.jpg")
# Photo view.
image_widget_1 = tk.Label(window)
image_widget_1.place(x=0, y=0, width=512, height=512)
image_widget_2 = tk.Label(window)
image_widget_2.place(x=512, y=0, width=512, height=512)
# Labels
mode_label = tk.Label(window, text="", anchor="w")
mode_label.place(x=20, y=590, width=400, height=20)
widgets = (image_widget_1, image_widget_2, mode_label)
window.bind("<Tab>", key_press) # Skip current patch
window.bind("<Button-1>", click)
next_images()
update_mode_label()
window.mainloop()
|
A, B, X = map(int, input().split())
total = B // X - A // X
if A % X == 0: total += 1
print(total) |
Subsets and Splits