max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
html_form_parser/__init__.py | gkunde/py_html_form_parser | 0 | 12795951 | <gh_stars>0
import re
from typing import List
from bs4 import BeautifulSoup, Tag
from html_form_parser.models.form_data import FormData
from html_form_parser.models.form_data_entry import FormDataEntry
from html_form_parser.parsers import form_data_entry_parser
class HtmlFormParser:
"""
Parse and extract HTML forms from a HTML page.
"""
def __init__(self, markup: str = None, parser: str = None):
"""
:param markup: A string containing HTML markup.
:param parser: A string containing a valid BeautifulSoup parsing library name.
"""
self.forms = []
if markup is not None:
self.parse(markup, parser)
def parse(self, markup: str, parser: str = None) -> List[FormData]:
"""
Convert a HTML page into Form Data objects
:param markup: A string containing HTML markup.
:param parser: A string property to select a BeutifulSoup Parser.
:returns: A collection of ForData objects. The same objects are
stored within the object.
"""
if parser is None:
parser = "html5lib"
parsers = [
form_data_entry_parser.SelectableInputFormElementParser(),
form_data_entry_parser.ColorInputFormElementParser(),
form_data_entry_parser.RangeInputFormElementParser(),
form_data_entry_parser.SubmitInputFormElementParser(),
form_data_entry_parser.ButtonInputFormElementParser(),
form_data_entry_parser.ImageInputFormElementParser(),
form_data_entry_parser.ButtonFormElementParser(),
form_data_entry_parser.InputFormElementParser(),
form_data_entry_parser.SelectFormElementParser(),
form_data_entry_parser.TextareaFormElementParser(),
form_data_entry_parser.FormDataEntryParser(),
]
bs4_parser = BeautifulSoup(markup, parser)
parsed_forms = bs4_parser.find_all("form")
parsed_fields = bs4_parser.find_all(("button", "input", "select", "textarea", ))
form_id_map = {}
for index, parsed_form in enumerate(parsed_forms):
if "id" in parsed_form.attrs:
form_id_map[parsed_form.attrs["id"]] = index
self.forms.append(self._create_form_data(parsed_form))
# Fields associate to the nearest containing form node, or specify their form owner by attribute.
# https://html.spec.whatwg.org/multipage/form-control-infrastructure.html#association-of-controls-and-forms
for parsed_field in parsed_fields:
form_index = None
if "form" in parsed_field.attrs:
form_index = form_id_map.get(parsed_field.attrs["form"], None)
if form_index is None:
parent_form = parsed_field.find_parent("form")
if parent_form is not None:
form_index = parsed_forms.index(parsed_form)
if form_index is not None:
self.forms[form_index].fields.extend(
self._create_form_data_field(parsed_field, parsers))
return self.forms
def _create_form_data(self, parsed_form: Tag) -> FormData:
"""
Create Form Data from parsed form node object.
:param parsed_form: A BeautifulSoup object containing a form.
:returns: A FormData object
"""
form_data = FormData()
for key, val in parsed_form.attrs.items():
match_key = key.lower()
if match_key == "name":
form_data.name = val
elif match_key == "action":
form_data.action = val.strip()
elif match_key == "method":
form_data.method = val.strip().upper()
elif match_key == "enctype":
form_data.enctype = val.strip()
return form_data
def _create_form_data_field(self, parsed_form_field: Tag, field_parsers: List[form_data_entry_parser.InputFormElementParser] = None) -> List[FormDataEntry]:
"""
Create Form Data Entries from pasred form input element.
:param parsed_form_field: A BeautifulSoup object containing an input field.
:param field_parsers: A collection of HTML input element parsers.
:returns: A collection of Form Data Entry objects
"""
field_type = parsed_form_field.attrs.get("type", None)
for parser in field_parsers:
if field_type is not None and parser.suitable(parsed_form_field.name, field_type.strip().lower()):
return parser.parse(parsed_form_field)
elif parser.suitable(parsed_form_field.name, None):
return parser.parse(parsed_form_field)
return []
| 2.953125 | 3 |
xicam/gui/connections/__init__.py | JulReinhardt/Xi-cam | 6 | 12795952 | <reponame>JulReinhardt/Xi-cam
import requests
from qtpy.QtCore import *
from qtpy.QtGui import *
from qtpy.QtWidgets import *
from xicam.gui.static import path
from xicam.gui.widgets.searchlineedit import SearchLineEdit
from copy import deepcopy
from xicam.plugins import SettingsPlugin, manager
class ConnectionSettingsPlugin(SettingsPlugin):
"""
A built-in settings plugin to configure connections to other hosts
"""
def __init__(self):
# Setup UI
self.widget = QWidget()
self.widget.setLayout(QHBoxLayout())
self.listview = QListView()
self.connectionsmodel = QStandardItemModel()
self.listview.setModel(self.connectionsmodel)
self.plugintoolbar = QToolBar()
self.plugintoolbar.setOrientation(Qt.Vertical)
self.plugintoolbar.addAction(QIcon(str(path("icons/plus.png"))), "Add plugin", self.add_credential)
self.plugintoolbar.addAction(QIcon(str(path("icons/minus.png"))), "Remove plugin", self.remove_credential)
self.widget.layout().addWidget(self.listview)
self.widget.layout().addWidget(self.plugintoolbar)
super(ConnectionSettingsPlugin, self).__init__(QIcon(str(path("icons/server.png"))), "Connections", self.widget)
def add_credential(self):
"""
Open the CamMart install dialog
"""
self._dialog = CredentialDialog()
self._dialog.sigAddCredential.connect(self._add_credential)
self._dialog.exec_()
def remove_credential(self):
"""
Removes a credential
"""
if self.listview.selectedIndexes():
self.connectionsmodel.removeRow(self.listview.selectedIndexes()[0].row())
def _add_credential(self, name: str, credential: dict):
item = QStandardItem(name)
item.credential = credential
item.name = name
self.connectionsmodel.appendRow(item)
self.connectionsmodel.dataChanged.emit(item.index(), item.index())
def toState(self):
credentials = deepcopy(self.credentials)
for name, credential in credentials.items():
if credential.get("savepassword", False):
credential["password"] = None
return credentials
def fromState(self, state):
self.connectionsmodel.clear()
for name, credential in state.items():
item = QStandardItem(name)
item.credential = credential
item.name = name
self.connectionsmodel.appendRow(item)
self.listview.reset()
@property
def credentials(self):
return {
self.connectionsmodel.item(i).name: self.connectionsmodel.item(i).credential
for i in range(self.connectionsmodel.rowCount())
}
class CredentialDialog(QDialog):
sigAddCredential = Signal(str, dict)
sigConnect = Signal(dict)
def __init__(self, addmode=True):
super(CredentialDialog, self).__init__()
# Set size and position
# self.setGeometry(0, 0, 800, 500)
frameGm = self.frameGeometry()
screen = QApplication.desktop().screenNumber(QApplication.desktop().cursor().pos())
centerPoint = QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
# Setup fields
self.host = QLineEdit()
self.profiles = QComboBox()
self.profiles.addItem("New...")
self.profilename = QLineEdit()
self.username = QLineEdit()
self.password = QLineEdit()
self.password.setEchoMode(QLineEdit.Password)
self.savepassword = QCheckBox("Save Password")
self.rememberprofile = QCheckBox("Remember Profile")
# Setup dialog buttons
self.addButton = QPushButton("&Add")
self.connectButton = QPushButton("C&onnect")
self.cancelButton = QPushButton("&Cancel")
self.addButton.clicked.connect(self.add)
self.connectButton.clicked.connect(self.connect)
self.cancelButton.clicked.connect(self.close)
self.profiles.currentTextChanged.connect(self.loadProfile)
self.buttonboxWidget = QDialogButtonBox()
if addmode:
self.buttonboxWidget.addButton(self.addButton, QDialogButtonBox.AcceptRole)
else:
self.buttonboxWidget.addButton(self.connectButton, QDialogButtonBox.AcceptRole)
self.buttonboxWidget.addButton(self.cancelButton, QDialogButtonBox.RejectRole)
# Compose main layout
mainLayout = QFormLayout()
if not addmode:
mainLayout.addRow("Profile", self.profiles)
mainLayout.addRow("Profile", self.profilename)
mainLayout.addRow("Host", self.host)
mainLayout.addRow("Username", self.username)
mainLayout.addRow("Password", self.password)
mainLayout.addRow(self.savepassword)
if not addmode:
mainLayout.addRow(self.rememberprofile)
mainLayout.addRow(self.buttonboxWidget)
# Populate profiles
for name, credential in manager.get_plugin_by_name("Connections", "SettingsPlugin").credentials.items():
self.profiles.addItem(name)
self.setLayout(mainLayout)
self.setWindowTitle("Add Connection...")
# Set modality
self.setModal(True)
def loadProfile(self):
profilename = self.profiles.currentText()
if profilename == "New...":
self.username.setEnabled(True)
self.password.setEnabled(True)
self.host.setEnabled(True)
self.savepassword.setEnabled(True)
self.rememberprofile.setVisible(True)
else:
credential = manager.get_plugin_by_name("Connections", "SettingsPlugin").credentials[profilename]
self.username.setText(credential["username"])
self.host.setText(credential["host"])
self.password.setText(credential["password"])
self.savepassword.setChecked(credential["savepassword"])
self.profilename.setText(profilename)
self.username.setEnabled(False)
self.password.setEnabled(False)
self.host.setEnabled(False)
self.savepassword.setEnabled(False)
self.rememberprofile.setVisible(False)
def add(self):
self.sigAddCredential.emit(
self.profilename.text(),
{
"host": self.host.text(),
"username": self.username.text(),
"password": self.password.text(),
"savepassword": False,
},
)
self.accept()
def connect(self):
if self.rememberprofile.isChecked():
self.add()
self.sigConnect.emit(
{
"host": self.host.text(),
"username": self.username.text(),
"password": self.password.text(),
"savepassword": False,
}
)
self.accept() # Segfault?
class ConnectDelegate(QItemDelegate):
def __init__(self, parent):
super(ConnectDelegate, self).__init__(parent)
self._parent = parent
def paint(self, painter, option, index):
if not self._parent.indexWidget(index):
button = QToolButton(self.parent())
button.setAutoRaise(True)
button.setText("Delete Operation")
button.setIcon(QIcon(path("icons/trash.png")))
sp = QSizePolicy(QSizePolicy.Minimum, QSizePolicy.Minimum)
sp.setWidthForHeight(True)
button.setSizePolicy(sp)
button.clicked.connect(index.data())
self._parent.setIndexWidget(index, button)
| 1.84375 | 2 |
testing_dataset/download_vot2018lt.py | tsingqguo/AttackTracker | 11 | 12795953 | import json
import os
import requests
data_file = './description.json'
with open(data_file) as f:
data = json.load(f)
home_page = data['homepage']
seqs = data['sequences']
for v in seqs:
link = '%s%s' % (home_page,v['annotations']['url'])
print('download %s' % link)
os.system('wget %s -O %s_ann.zip' % (link, v['name']))
link = '%s%s' % (home_page,v['channels']['color']['url'])
print('download %s' % link)
os.system('wget %s -O %s_chn.zip' % (link, v['name']))
| 2.75 | 3 |
effects/police.py | vexofp/hyperion | 725 | 12795954 | import hyperion
import time
import colorsys
# Get the parameters
rotationTime = float(hyperion.args.get('rotation-time', 2.0))
colorOne = hyperion.args.get('color_one', (255,0,0))
colorTwo = hyperion.args.get('color_two', (0,0,255))
colorsCount = hyperion.args.get('colors_count', hyperion.ledCount/2)
reverse = bool(hyperion.args.get('reverse', False))
# Check parameters
rotationTime = max(0.1, rotationTime)
colorsCount = min(hyperion.ledCount/2, colorsCount)
# Initialize the led data
hsv1 = colorsys.rgb_to_hsv(colorOne[0]/255.0, colorOne[1]/255.0, colorOne[2]/255.0)
hsv2 = colorsys.rgb_to_hsv(colorTwo[0]/255.0, colorTwo[1]/255.0, colorTwo[2]/255.0)
colorBlack = (0,0,0)
ledData = bytearray()
for i in range(hyperion.ledCount):
if i <= colorsCount:
rgb = colorsys.hsv_to_rgb(hsv1[0], hsv1[1], hsv1[2])
elif (i >= hyperion.ledCount/2-1) & (i < (hyperion.ledCount/2) + colorsCount):
rgb = colorsys.hsv_to_rgb(hsv2[0], hsv2[1], hsv2[2])
else:
rgb = colorBlack
ledData += bytearray((int(255*rgb[0]), int(255*rgb[1]), int(255*rgb[2])))
# Calculate the sleep time and rotation increment
increment = 3
sleepTime = rotationTime / hyperion.ledCount
while sleepTime < 0.05:
increment *= 2
sleepTime *= 2
increment %= hyperion.ledCount
# Switch direction if needed
if reverse:
increment = -increment
# Start the write data loop
while not hyperion.abort():
hyperion.setColor(ledData)
ledData = ledData[-increment:] + ledData[:-increment]
time.sleep(sleepTime)
| 2.90625 | 3 |
labml_nn/transformers/alibi/experiment.py | BioGeek/annotated_deep_learning_paper_implementations | 3,714 | 12795955 | """
---
title: Attention with Linear Biases (ALiBi) Experiment
summary: This experiment trains an Attention with Linear Biases (ALiBi) based model on Tiny Shakespeare dataset.
---
# [Attention with Linear Biases (ALiBi)](index.html) Experiment
This is an annotated PyTorch experiment to train a [ALiBi model](index.html).
This is based on
[our GPT model](../gpt/index.html).
[](https://app.labml.ai/run/e87bec2a074911ec82cdd1759f10c925)
"""
import torch
from torch.utils.data import DataLoader
from labml import experiment, tracker
from labml.configs import option, calculate
from labml_helpers.datasets.text import SequentialUnBatchedDataset
from labml_nn.transformers.alibi import AlibiMultiHeadAttention
from labml_nn.experiments.nlp_autoregression import transpose_batch
from labml_nn.transformers import TransformerConfigs
from labml_nn.transformers.gpt import Configs as GPTConfigs
class Configs(GPTConfigs):
"""
## Configurations
We extend [GPT configurations](../gpt/index.html) and change the attention mechanism.
"""
# ALiBi based transformer (defined below)
transformer: TransformerConfigs = 'GPT_ALiBi'
# Longer validation set
valid_seq_len: int = 128
valid_loader = 'shuffled_longer_valid_loader'
def other_metrics(self, output: torch.Tensor, target: torch.Tensor):
"""
Log losses at the initial and final tokens
"""
# If there are more tokens that the training sequence length (during validation),
if self.seq_len < output.shape[0]:
# Log the loss at training sequence length
tracker.add(f'loss.{self.seq_len - 1}.', self.loss_func(output[self.seq_len - 1], target[self.seq_len - 1]))
# Log the loss at the first token
tracker.add(f'loss.0.', self.loss_func(output[0], target[0]))
# Log the loss at the final token
tracker.add(f'loss.{int(output.shape[0]) - 1}.', self.loss_func(output[-1], target[-1]))
def _alibi_mha(c: TransformerConfigs):
"""
Create an ALiBi attention module
"""
return AlibiMultiHeadAttention(c.n_heads, c.d_model, dropout_prob=c.dropout)
# Set all attention mechanisms to ALiBi
calculate(TransformerConfigs.encoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_attn, 'alibi_mha', _alibi_mha)
calculate(TransformerConfigs.decoder_mem_attn, 'alibi_mha', _alibi_mha)
@option(Configs.valid_loader)
def shuffled_longer_valid_loader(c: Configs):
"""
Shuffled validation data loader with `valid_seq_len` sequence length
"""
return DataLoader(SequentialUnBatchedDataset(text=c.text.valid,
dataset=c.text,
seq_len=c.valid_seq_len),
batch_size=c.batch_size,
collate_fn=transpose_batch,
shuffle=True)
@option(Configs.transformer, 'GPT_ALiBi')
def _transformer_configs(c: Configs):
"""
### ALiBi based Transformer configurations
"""
# We use our
# [configurable transformer implementation](../configs.html#TransformerConfigs)
conf = TransformerConfigs()
# Set the vocabulary sizes for embeddings and generating logits
conf.n_src_vocab = c.n_tokens
conf.n_tgt_vocab = c.n_tokens
# GPT uses GELU activation for position wise feedforward
conf.ffn.activation = 'GELU'
# ALiBi doesn't use positional embeddings
conf.src_embed = 'no_pos'
conf.tgt_embed = 'no_pos'
# Set all attention mechanisms to ALiBi
conf.encoder_attn = 'alibi_mha'
conf.decoder_attn = 'alibi_mha'
conf.decoder_mem_attn = 'alibi_mha'
#
return conf
def main():
# Create experiment
experiment.create(name="gpt_alibi")
# Create configs
conf = Configs()
# Override configurations
experiment.configs(conf, {
# Use character level tokenizer
'tokenizer': 'character',
# Prompt separator is blank
'prompt_separator': '',
# Starting prompt for sampling
'prompt': 'It is ',
# Use Tiny Shakespeare dataset
'text': 'tiny_shakespeare',
# 'text': 'tiny_shakespeare_no_split',
# Use a context size of $128$
'seq_len': 64,
# Use a context size of $128$
'valid_seq_len': 80,
# Train for $32$ epochs
'epochs': 128,
# Batch size $128$
'batch_size': 128,
# Switch between training and validation for $10$ times
# per epoch
'inner_iterations': 10,
# Transformer configurations
'transformer.d_model': 128,
'transformer.ffn.d_ff': 512,
'transformer.n_heads': 8,
'transformer.n_layers': 4,
'transformer.dropout': 0.1,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# Run training
conf.run()
#
if __name__ == '__main__':
main()
| 2.609375 | 3 |
code_week8_615_621/remove_duplicates_from_sorted_list.py | dylanlee101/leetcode | 0 | 12795956 | <gh_stars>0
'''
给定一个排序链表,删除所有重复的元素,使得每个元素只出现一次。
示例 1:
输入: 1->1->2
输出: 1->2
示例 2:
输入: 1->1->2->3->3
输出: 1->2->3
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/remove-duplicates-from-sorted-list
'''
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
self.next = head
temp = self.next
while temp and temp.next:
if temp.val == temp.next.val:
temp.next = temp.next.next
else:
temp = temp.next
return self.next | 3.6875 | 4 |
semia/generator.py | ddlee-cn/SemIA | 3 | 12795957 | <reponame>ddlee-cn/SemIA
from torch.autograd import Variable as Vb
from semia.network import *
class Encoder(BaseNetwork):
"""
Encoder for both Condition Signal(Segmentation map) and Image(+Noise)
params: num_down, base_nc, out_nc
return: [features] + [Code]
"""
def __init__(self, num_down, base_nc, in_nc, out_nc,
input_FiLM=False, out_feats=False, out_shapes=False, use_VAE=False,
use_attn=False, code_in=None, code_fc=None):
super().__init__()
self.num_down = num_down
self.base_nc = base_nc
self.in_nc = in_nc
self.out_nc = out_nc
self.input_FiLM = input_FiLM # using input_FiLM as affine transformation
self.out_feats = out_feats # output feats
self.out_shapes = out_shapes # output feats shape for inverse
self.use_VAE = use_VAE # produce distribution for code
self.use_attn = use_attn # use attention mechanism
if self.use_VAE:
self.vae_tail_fc_in = code_in # the flattened feat_cond(smallest) length
self.vae_tail_fc_nc = code_fc # mu and logvar length
# Similar to InGAN, increase kernel_size of entry block to 7
self.head_block = ConvBaseBlock(self.in_nc, self.base_nc, kernel=7, pad=3)
down_block = []
for i in range(self.num_down):
# double channels after reduce spatial size
nc_factor = 2 ** i
down_block.append(DownConvBlock(self.base_nc * nc_factor, self.base_nc * nc_factor * 2))
self.down_block = nn.ModuleList(down_block)
if self.use_VAE:
self.tail_block = VAEBlock(self.vae_tail_fc_in, self.vae_tail_fc_nc)
else:
self.tail_block = ConvBaseBlock(self.base_nc * (2 ** self.num_down),
self.out_nc)
if self.use_attn:
attn_layers = []
for i in range(self.num_down):
# double channels after reduce spatial size
nc_factor = 2 ** (i + 1)
attn_layers.append(Cond_Attn(self.base_nc * nc_factor))
self.attn_layers = nn.ModuleList(attn_layers)
def calc_mean_std(self, feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def affine_transformation(self, X, alpha, beta):
x = X.clone()
mean, std = self.calc_mean_std(x)
mean = mean.expand_as(x)
std = std.expand_as(x)
return alpha * ((x - mean) / std) + beta
def reparameterize(self, mu, logvar):
if self.training:
std = logvar.mul(0.5).exp_()
eps = Vb(std.data.new(std.size()).normal_())
return eps.mul(std).add_(mu)
else:
return Vb(mu.data.new(mu.size()).normal_())
def forward(self, input, noise=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None, q_feats=None):
# input: cond or prev_img
if self.input_FiLM:
assert len(FiLM_alphas) == len(FiLM_betas) == self.num_down, "FiLM_alphas and FiLM_betas mismatch"
if self.use_attn:
assert len(k_feats) == len(q_feats) == self.num_down, "k_feats and q_feats mismatch"
feats, shapes, attn_maps = None, None, None
if self.out_feats:
feats = []
if self.out_shapes:
shapes = []
# do not store attn_maps
# if self.use_attn:
# attn_maps = []
if noise is not None:
input = torch.cat((input, noise), 1)
x = self.head_block(input)
for i in range(self.num_down):
if self.out_shapes:
# Output feature shape before DownSample
shapes.append(x.shape[-2:])
x = self.down_block[i](x)
if self.input_FiLM:
x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i])
if self.use_attn:
x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i])
# attn_maps.append(attn_map)
if self.out_feats:
# Out feat after DownSample and FiLM/Attention
feats.append(x)
if self.use_VAE:
mu, logvar = self.tail_block(x)
out = self.reparameterize(mu, logvar)
out = out.view()
else:
out = self.tail_block(x)
return out, feats, shapes, attn_maps
class Decoder(BaseNetwork):
"""
Decoder for Image
input: feature from encoder
parmas: num_up, base_nc, in_nc
return: Image
U-Net skip connections help little.
"""
def __init__(self, num_up, base_nc, in_nc, out_nc,
input_FiLM=False, out_feats=False, in_shapes=False, skip_feats=False, use_attn=False):
super().__init__()
self.num_up = num_up
self.base_nc = base_nc
self.in_nc = in_nc
self.out_nc = out_nc
self.input_FiLM = input_FiLM # whether input FiLMed factors
self.out_feats = out_feats # whether output decoder features
self.in_shapes = in_shapes # whether interpolate feats according to in_shapes
self.skip_feats = skip_feats # whether concat skip feats from encoder
self.use_attn = use_attn # use attention mechanism
# Decoder's head block out_nc = Encoder's tail block in_nc
# self.base_nc * (2 ** self.num_up) = self.base_nc * (2 ** self.num_down)
self.head_block = ConvBaseBlock(self.in_nc, self.base_nc * (2 ** self.num_up))
up_block = []
for i in range(self.num_up):
nc_factor = 2 ** (self.num_up - i)
if skip_feats:
# double UpConv input channel, and half output channel
# for concating skip feats from encoder
# torch.cat(feat, skip_feat) -> feat_next
# 256 -> 64, 128 -> 32
up_block.append(UpConvBlock(self.base_nc * nc_factor * 2, int(self.base_nc * nc_factor // 2)))
else:
up_block.append(UpConvBlock(self.base_nc * nc_factor, int(self.base_nc * nc_factor // 2)))
self.up_block = nn.ModuleList(up_block)
# Similar to InGAN, increase kernel_size of tail block of decoder to 7
# Due to blurry edges, reduce the tail block kernel size back to 3
self.tail_block = ConvBaseBlock(self.base_nc, self.out_nc, kernel=3, pad=1)
if self.use_attn:
attn_layers = []
for i in range(self.num_up):
# double channels after reduce spatial size
nc_factor = 2 ** (self.num_up - i)
attn_layers.append(Cond_Attn(self.base_nc * nc_factor))
self.attn_layers = nn.ModuleList(attn_layers)
def calc_mean_std(self, feat, eps=1e-5):
# eps is a small value added to the variance to avoid divide-by-zero.
size = feat.size()
assert (len(size) == 4)
N, C = size[:2]
feat_var = feat.view(N, C, -1).var(dim=2) + eps
feat_std = feat_var.sqrt().view(N, C, 1, 1)
feat_mean = feat.view(N, C, -1).mean(dim=2).view(N, C, 1, 1)
return feat_mean, feat_std
def affine_transformation(self, X, alpha, beta):
x = X.clone()
mean, std = self.calc_mean_std(x)
mean = mean.expand_as(x)
std = std.expand_as(x)
return alpha * ((x - mean) / std) + beta
def forward(self, code, skip_feats=None, in_shapes=None, FiLM_alphas=None, FiLM_betas=None, k_feats=None,
q_feats=None):
# code: code_img or code_cond
if skip_feats is not None:
assert len(skip_feats) == self.num_up, "skip feats number mismatch"
if self.in_shapes:
if in_shapes is not None:
assert len(in_shapes) == self.num_up, "in_shapes number mismatch self.num_up"
else:
raise ValueError("in_shapes not in Input")
if self.input_FiLM:
assert len(FiLM_alphas) == len(FiLM_betas) == self.num_up, "FiLM_alphas and FiLM_betas mismatch"
if self.use_attn:
assert len(k_feats) == len(q_feats) == self.num_up, "k_feats and q_feats mismatch"
feats, attn_maps = None, None
if self.out_feats:
feats = []
# if self.use_attn:
# attn_maps = []
x = self.head_block(code)
for i in range(self.num_up):
if self.input_FiLM:
x = self.affine_transformation(x, FiLM_alphas[i], FiLM_betas[i])
if self.use_attn:
x, attn_map = self.attn_layers[i](x, k_feats[i], q_feats[i])
# attn_maps.append(attn_map)
if self.out_feats:
# Out feat before UpSample/Concat and after FiLM/Attention
feats.append(x)
if skip_feats is not None:
# merge skip feats before UpSample
skip_feat = skip_feats[self.num_up - i - 1]
if self.input_FiLM:
# also apply FiLM params on skip_feats
skip_feat = self.affine_transformation(skip_feat,
FiLM_alphas[i], FiLM_betas[i])
if self.use_attn:
skip_feat, attn_map = self.attn_layers[i](skip_feat, k_feats[i], q_feats[i])
# attn_maps.append(attn_map)
x = torch.cat((x, skip_feat), 1)
x = self.up_block[i](x)
if self.in_shapes:
# interpolate feature size after UpSample
# print(x.shape, in_shapes[self.num_up-i-1])
# torch.Size([1, 64, 6, 10]) torch.Size([6, 10])
# torch.Size([1, 32, 12, 20]) torch.Size([12, 20])
# torch.Size([1, 16, 24, 40]) torch.Size([25, 40])
x = F.interpolate(x, size=in_shapes[self.num_up - i - 1], mode='nearest')
out = self.tail_block(x)
return out, feats
class Cond_Attn(nn.Module):
"""
Cond-Attention Module
Attetion module may replace SFT module, but takes much more memory and brings a lot computational burden
cond_feats as Key
aug_cond_feats as Query
image_feats as Value
"""
def __init__(self, in_dim, bottleneck_factor=32):
super(Cond_Attn, self).__init__()
self.chanel_in = in_dim
self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1)
self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // bottleneck_factor, kernel_size=1)
self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1) #
def forward(self, x, k, q):
"""
inputs :
x : input feature maps( B X C X W X H)
k : cond feature maps( B X C X W X H)
q : aug cond feature maps( B X C X W X H)
k -> q as Transformation
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize, C, width, height = x.size()
proj_query = self.query_conv(q).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X (W*H) X C
proj_key = self.key_conv(k).view(m_batchsize, -1, width * height) # B X C x (*W*H)
energy = torch.bmm(proj_query, proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N) every pixel has W*H scores
proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N
out = torch.bmm(proj_value, attention.permute(0, 2, 1))
out = out.view(m_batchsize, C, width, height)
out = self.gamma * out + x
return out, attention
class FiLM(BaseNetwork):
"""
FiLMModule(Semantic Feature Translation layer)
Our version seperates the scaling and shiftting, just keep the original naming
"""
def __init__(self, base_nc, num_feats, double=False, reverse=False):
super().__init__()
self.base_nc = base_nc
self.num_feats = num_feats
self.reverse = reverse # assume feats from big to small(more ncs)
self.double = double # whether the rel_feats are concated, instead of diff/ratio
bottlenecks = []
for i in range(num_feats):
# nc_factor
nc_factor = 2 ** (num_feats - i)
if reverse:
nc_factor = 2 ** (i + 1)
# use base_nc * nc_factor // 2 as bottleneck depth
# while Guided-pix2pix use fixed 100 across all feature maps
bottlenecks.append(self.bottleneck_layer(base_nc * nc_factor, base_nc * nc_factor // 2))
self.bottlenecks = torch.nn.ModuleList(bottlenecks)
def bottleneck_layer(self, nc, bottleneck_depth):
if self.double:
block_list = [ConvBaseBlock(nc * 2, nc, kernel=1, pad=0)]
else:
block_list = []
# add a resnet block in bottleneck layer for alpha and beta
# update: remove bn in FiLM module
block_list += [ResnetBlock(nc, use_bn=False, use_bias=True),
nn.utils.spectral_norm(nn.Conv2d(nc, bottleneck_depth, kernel_size=1)),
nn.ReLU(True),
nn.utils.spectral_norm(nn.Conv2d(bottleneck_depth, nc, kernel_size=1))]
main = nn.Sequential(*block_list)
return main
def forward(self, feats):
assert len(feats) == self.num_feats
params = []
for i in range(self.num_feats):
# attach FiLM source features to main graph instead detach()
# update: no need for add 1 for relative feats ratio
# alpha & beta separate
params.append(self.bottlenecks[i](feats[i]))
return params
class SemIAGenerator(BaseNetwork):
"""
netG
input: real image(src_img), input_seg(src_seg), aug_seg(tgt_seg)
output: fake_image(tgt_img)
also output FiLM parameters(alpha, beta) for fixed-point loss and visualization
"""
def __init__(self, opt):
super(SemIAGenerator, self).__init__()
self.num_down = opt.num_down # Encoder feat layer num
self.num_up = opt.num_up # Decoder feat layer num
# self.neck_depth = neck_depth # FiLM layer bottleneck depth
self.base_nc = opt.base_nc # base channel size for conv layers
self.cond_nc = opt.cond_nc # Condition channel size, 3 for seg
self.im_nc = opt.im_nc # Image channel size, commonly 3
self.opt = opt # use FiLM or Cond-Attn
code_c_nc, code_i_nc = self.base_nc * (2 ** self.num_down), self.base_nc * (2 ** self.num_down)
self.encoder_c = Encoder(self.num_down, self.base_nc, self.cond_nc, code_c_nc, out_feats=True)
# use noise + z_prev instead of torch.cat(noise+prev, prev) as input
self.encoder_i = Encoder(self.num_down, self.base_nc, self.im_nc, code_i_nc,
input_FiLM=self.opt.E_use_FiLM, use_attn=self.opt.E_use_attn,
out_feats=True, out_shapes=False)
self.decoder_i = Decoder(self.num_up, self.base_nc, code_i_nc, self.im_nc,
skip_feats=self.opt.D_use_skip, input_FiLM=self.opt.D_use_FiLM,
use_attn=self.opt.D_use_attn)
if self.opt.E_use_FiLM or self.opt.D_use_FiLM:
self.FiLM_c2i_alpha = FiLM(self.base_nc, self.num_up, reverse=True)
self.FiLM_c2i_beta = FiLM(self.base_nc, self.num_up, reverse=True)
def forward(self, x, cond=None, cond_aug=None):
# print(x.shape, cond.shape)
# Condition + FiLM(Feat_img) -> code_cond
# _ denotes out_feats of ecnoder_c is None
_, feats_cond, _, _ = self.encoder_c(cond)
_, feats_cond_aug, _, _ = self.encoder_c(cond_aug)
if self.opt.E_use_FiLM or self.opt.D_use_FiLM:
# Relative feats between cond and cond_aug
rel_feats_ratio = [] # use for alpha(multiplier of FiLM)
for f_c, f_c_a in zip(feats_cond, feats_cond_aug):
rel_feats_ratio.append(torch.div(f_c_a + 1e-14, f_c + 1e-14)) # feats_cond_aug / feats_cond
rel_feats_diff = [] # use for beta(bias of FiLM)
for f_c, f_c_a in zip(feats_cond, feats_cond_aug):
rel_feats_diff.append(torch.add(f_c_a, -f_c)) # feats_cond_aug - feats_cond_aug
# cond2img in Decoder: apply FiLM alpha and beta
# Feat_cond -> alpha, beta
alpha_conds = self.FiLM_c2i_alpha(rel_feats_ratio)
beta_conds = self.FiLM_c2i_beta(rel_feats_diff)
rel_feats_list = [] # for visualization
alpha_beta_list = [] # for fixed-point loss in zero-reconstruction
for fr, fd, a, b in zip(rel_feats_ratio, rel_feats_diff, alpha_conds, beta_conds):
# shift rel_feats_ratio, alpha to around 0 for visualization
rel_feats_list.append([fr.clone() - 1, fd.clone(), a.clone() - 1, b.clone()])
alpha_beta_list.append([a, b])
E_param_dict = {"FiLM_alphas": None, "FiLM_betas": None, "k_feats": None, "q_feats": None}
if self.opt.E_use_FiLM:
E_param_dict["FiLM_alphas"], E_param_dict["FiLM_betas"] = alpha_conds, beta_conds
if self.opt.E_use_attn:
E_param_dict["k_feats"], E_param_dict["q_feats"] = feats_cond, feats_cond_aug
# Noise + Prev_img -> Feat_img, code_img
code_i, feats_img, _, attn_maps = self.encoder_i(x, **E_param_dict)
if not self.opt.D_use_skip:
feats_img = None
# code_img + FiLM(Feat_cond) -> Fake_img
# _ denotes out_feats of decoder_i is None
D_param_dict = {"FiLM_alphas": None, "FiLM_betas": None, "k_feats": None, "q_feats": None}
if self.opt.D_use_FiLM:
alpha_conds.reverse()
beta_conds.reverse()
D_param_dict["FiLM_alphas"], D_param_dict["FiLM_betas"] = alpha_conds, beta_conds
if self.opt.D_use_attn:
feats_cond.reverse()
feats_cond_aug.reverse()
D_param_dict["k_feats"], D_param_dict["q_feats"] = feats_cond, feats_cond_aug
fake_img, _ = self.decoder_i(code_i, skip_feats=feats_img, **D_param_dict)
if self.opt.E_use_FiLM or self.opt.D_use_FiLM:
return fake_img, rel_feats_list, alpha_beta_list
else:
return fake_img, attn_maps
| 2.21875 | 2 |
teamcat_service/doraemon/doraemon/ci/pagefactory/ci_service_pageworker.py | zhangyin2088/Teamcat | 6 | 12795958 | #coding=utf-8
'''
Created on 2015-9-24
@author: Devuser
'''
from doraemon.ci.pagefactory.ci_pageworker import CIPageWorker
from doraemon.ci.viewmodels.ci_left_nav_bar import CIServiceLeftNavBar
from doraemon.ci.viewmodels.ci_sub_nav_bar import CIServiceSubNavBar
from doraemon.ci.viewmodels.vm_ci_deploy_service import VM_CIDeployService
from doraemon.ci.pagefactory.ci_template_path import CIServicePath
from doraemon.project.pagefactory.project_common_pageworker import ProjectCommonControllPageWorker
from business.ci.ci_service import CIService
from doraemon.ci.models import CIDeployService
class CIServicePageWorker(CIPageWorker):
'''
项目页面生成器
'''
def __init__(self, request):
'''
Constructor
'''
CIPageWorker.__init__(self, request)
self.pagemodel = CIServiceLeftNavBar
self.subpage_model = CIServiceSubNavBar
def get_ci_service_fullpage(self, request,sub_nav_action):
dm_products = CIService.get_products_include_me(request)
left_nav_bar = self.get_service_left_bar(request)
sub_nav_bar = self.get_service_sub_navbar(request, dm_products, sub_nav_action)
ci_service_webpart = self.get_ci_service_list_webpart(request,sub_nav_action)
page_fileds = {"left_nav_bar":left_nav_bar, "sub_nav_bar":sub_nav_bar, "ci_service_webpart":ci_service_webpart}
return self.get_page(page_fileds,CIServicePath.service_index_path, request)
def get_ci_service_config_page(self, request,service_id):
dm_products = CIService.get_products_include_me(request)
left_nav_bar = self.get_service_left_bar(request)
sub_nav_bar = self.get_service_sub_navbar(request, dm_products,0)
ci_service_config_webpart = self.ci_service_config_webpart(request,service_id)
page_fileds = {"left_nav_bar":left_nav_bar, "sub_nav_bar":sub_nav_bar, "ci_service_config":ci_service_config_webpart}
return self.get_page(page_fileds,CIServicePath.service_index_path,request)
def ci_service_config_webpart(self, request,service_id):
service=CIDeployService.objects.get(int(service_id))
vm_service=VM_CIDeployService(service,0)
ci_service_project=ProjectCommonControllPageWorker.get_myproject_dropdown_list(self, request,service.Project)
pagefileds = {"service":vm_service,"ci_service_project":ci_service_project}
return self.get_webpart(pagefileds, CIServicePath.service_config_page)
def get_ci_service_list_webpart(self, request,sub_nav_action):
service_list_controll = self.get_ci_service_list_controll(request, sub_nav_action)
pagefileds = {"ci_service_listcontroll":service_list_controll}
return self.get_webpart(pagefileds, CIServicePath.service_list_webpart)
def get_ci_service_list_controll(self, request,sub_nav_action):
dm_ci_services = CIService.get_product_ci_services(request,sub_nav_action)
ci_services = self.get_ci_services(request, dm_ci_services)
pagefileds = {"ci_services":ci_services}
return self.get_webpart(pagefileds, CIServicePath.service_list_controll)
def get_service_left_bar(self, request):
return self.get_left_nav_bar(request, self.pagemodel, CIServicePath.left_nav_template_path)
def get_service_sub_navbar(self, request, dm_products, sub_nav_action):
return self.get_sub_nav_bar(request, self.subpage_model, CIServicePath.sub_nav_template_path, sub_nav_action=sub_nav_action, products=dm_products)
def get_ci_services(self,request,dm_ci_services):
result=list()
for service in dm_ci_services:
temp=VM_CIDeployService(service,0)
result.append(temp)
return result
| 1.9375 | 2 |
app/auth/login.py | ishiundu/bucketlist-cp2 | 3 | 12795959 | <filename>app/auth/login.py
import json
import jwt
from app.models.bucketlist_models import Users
from config import Config
from datetime import datetime, timedelta
from flask import jsonify, request
from flask_restful import abort, Resource
class Index(Resource):
def get(self):
return({"message": "Welcome to the Bucketlist API"
"Register a new User by"
"sending a POST request to auth/register"
"Login by sending a post request to"
"POST auth/login to get started"})
class Login(Resource):
def get(self):
return jsonify({"message": "To login,"
"send a POST request to /auth/login"})
def post(self):
data = json.loads(request.get_data(as_text=True))
if not data:
abort(
400,
message="No parameters passed. Please fill all fields")
else:
username = data['username']
password = data['password']
if not username or not password:
abort(400,
message="Kindly fill in the missing details")
user = Users.query.filter_by(username=username).first()
if not user:
abort(400, message="User does not exist")
if user.check_password(password):
payload = {
'sub': user.user_id,
'exp': datetime.utcnow() + timedelta(minutes=30)
}
token = jwt.encode(
payload, Config.SECRET_KEY, algorithm='HS256')
return jsonify({"message": "Welcome {}".format(user.username),
"token": token.decode('utf-8')})
else:
abort(400, message="Invalid password")
| 2.5625 | 3 |
troposphere_mate/mediaconnect.py | tsuttsu305/troposphere_mate-project | 0 | 12795960 | <filename>troposphere_mate/mediaconnect.py
# -*- coding: utf-8 -*-
"""
This code is auto generated from troposphere_mate.code_generator.__init__.py scripts.
"""
import sys
if sys.version_info.major >= 3 and sys.version_info.minor >= 5: # pragma: no cover
from typing import Union, List, Any
import troposphere.mediaconnect
from troposphere.mediaconnect import (
Encryption as _Encryption,
FailoverConfig as _FailoverConfig,
Source as _Source,
VpcInterfaceAttachment as _VpcInterfaceAttachment,
)
from troposphere import Template, AWSHelperFn
from troposphere_mate.core.mate import preprocess_init_kwargs, Mixin
from troposphere_mate.core.sentiel import REQUIRED, NOTHING
class FailoverConfig(troposphere.mediaconnect.FailoverConfig, Mixin):
def __init__(self,
title=None,
RecoveryWindow=NOTHING, # type: int
State=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
RecoveryWindow=RecoveryWindow,
State=State,
**kwargs
)
super(FailoverConfig, self).__init__(**processed_kwargs)
class Encryption(troposphere.mediaconnect.Encryption, Mixin):
def __init__(self,
title=None,
Algorithm=REQUIRED, # type: Union[str, AWSHelperFn]
RoleArn=REQUIRED, # type: Union[str, AWSHelperFn]
ConstantInitializationVector=NOTHING, # type: Union[str, AWSHelperFn]
DeviceId=NOTHING, # type: Union[str, AWSHelperFn]
KeyType=NOTHING, # type: Union[str, AWSHelperFn]
Region=NOTHING, # type: Union[str, AWSHelperFn]
ResourceId=NOTHING, # type: Union[str, AWSHelperFn]
SecretArn=NOTHING, # type: Union[str, AWSHelperFn]
Url=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Algorithm=Algorithm,
RoleArn=RoleArn,
ConstantInitializationVector=ConstantInitializationVector,
DeviceId=DeviceId,
KeyType=KeyType,
Region=Region,
ResourceId=ResourceId,
SecretArn=SecretArn,
Url=Url,
**kwargs
)
super(Encryption, self).__init__(**processed_kwargs)
class Source(troposphere.mediaconnect.Source, Mixin):
def __init__(self,
title=None,
Decryption=NOTHING, # type: _Encryption
Description=NOTHING, # type: Union[str, AWSHelperFn]
EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn]
IngestIp=NOTHING, # type: Union[str, AWSHelperFn]
IngestPort=NOTHING, # type: int
MaxBitrate=NOTHING, # type: int
MaxLatency=NOTHING, # type: int
Name=NOTHING, # type: Union[str, AWSHelperFn]
Protocol=NOTHING, # type: Union[str, AWSHelperFn]
SourceArn=NOTHING, # type: Union[str, AWSHelperFn]
StreamId=NOTHING, # type: Union[str, AWSHelperFn]
VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn]
WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
Decryption=Decryption,
Description=Description,
EntitlementArn=EntitlementArn,
IngestIp=IngestIp,
IngestPort=IngestPort,
MaxBitrate=MaxBitrate,
MaxLatency=MaxLatency,
Name=Name,
Protocol=Protocol,
SourceArn=SourceArn,
StreamId=StreamId,
VpcInterfaceName=VpcInterfaceName,
WhitelistCidr=WhitelistCidr,
**kwargs
)
super(Source, self).__init__(**processed_kwargs)
class Flow(troposphere.mediaconnect.Flow, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Name=REQUIRED, # type: Union[str, AWSHelperFn]
Source=REQUIRED, # type: _Source
AvailabilityZone=NOTHING, # type: Union[str, AWSHelperFn]
SourceFailoverConfig=NOTHING, # type: _FailoverConfig
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Name=Name,
Source=Source,
AvailabilityZone=AvailabilityZone,
SourceFailoverConfig=SourceFailoverConfig,
**kwargs
)
super(Flow, self).__init__(**processed_kwargs)
class FlowEntitlement(troposphere.mediaconnect.FlowEntitlement, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Description=REQUIRED, # type: Union[str, AWSHelperFn]
FlowArn=REQUIRED, # type: Union[str, AWSHelperFn]
Name=REQUIRED, # type: Union[str, AWSHelperFn]
Subscribers=REQUIRED, # type: List[Union[str, AWSHelperFn]]
DataTransferSubscriberFeePercent=NOTHING, # type: int
Encryption=NOTHING, # type: _Encryption
EntitlementStatus=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Description=Description,
FlowArn=FlowArn,
Name=Name,
Subscribers=Subscribers,
DataTransferSubscriberFeePercent=DataTransferSubscriberFeePercent,
Encryption=Encryption,
EntitlementStatus=EntitlementStatus,
**kwargs
)
super(FlowEntitlement, self).__init__(**processed_kwargs)
class VpcInterfaceAttachment(troposphere.mediaconnect.VpcInterfaceAttachment, Mixin):
def __init__(self,
title=None,
VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
VpcInterfaceName=VpcInterfaceName,
**kwargs
)
super(VpcInterfaceAttachment, self).__init__(**processed_kwargs)
class FlowOutput(troposphere.mediaconnect.FlowOutput, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
FlowArn=REQUIRED, # type: Union[str, AWSHelperFn]
Protocol=REQUIRED, # type: Union[str, AWSHelperFn]
CidrAllowList=NOTHING, # type: List[Union[str, AWSHelperFn]]
Description=NOTHING, # type: Union[str, AWSHelperFn]
Destination=NOTHING, # type: Union[str, AWSHelperFn]
Encryption=NOTHING, # type: _Encryption
MaxLatency=NOTHING, # type: int
Name=NOTHING, # type: Union[str, AWSHelperFn]
Port=NOTHING, # type: int
RemoteId=NOTHING, # type: Union[str, AWSHelperFn]
SmoothingLatency=NOTHING, # type: int
StreamId=NOTHING, # type: Union[str, AWSHelperFn]
VpcInterfaceAttachment=NOTHING, # type: _VpcInterfaceAttachment
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
FlowArn=FlowArn,
Protocol=Protocol,
CidrAllowList=CidrAllowList,
Description=Description,
Destination=Destination,
Encryption=Encryption,
MaxLatency=MaxLatency,
Name=Name,
Port=Port,
RemoteId=RemoteId,
SmoothingLatency=SmoothingLatency,
StreamId=StreamId,
VpcInterfaceAttachment=VpcInterfaceAttachment,
**kwargs
)
super(FlowOutput, self).__init__(**processed_kwargs)
class FlowSource(troposphere.mediaconnect.FlowSource, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
Description=REQUIRED, # type: Union[str, AWSHelperFn]
Name=REQUIRED, # type: Union[str, AWSHelperFn]
Decryption=NOTHING, # type: _Encryption
EntitlementArn=NOTHING, # type: Union[str, AWSHelperFn]
FlowArn=NOTHING, # type: Union[str, AWSHelperFn]
IngestPort=NOTHING, # type: int
MaxBitrate=NOTHING, # type: int
MaxLatency=NOTHING, # type: int
Protocol=NOTHING, # type: Union[str, AWSHelperFn]
StreamId=NOTHING, # type: Union[str, AWSHelperFn]
VpcInterfaceName=NOTHING, # type: Union[str, AWSHelperFn]
WhitelistCidr=NOTHING, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
Description=Description,
Name=Name,
Decryption=Decryption,
EntitlementArn=EntitlementArn,
FlowArn=FlowArn,
IngestPort=IngestPort,
MaxBitrate=MaxBitrate,
MaxLatency=MaxLatency,
Protocol=Protocol,
StreamId=StreamId,
VpcInterfaceName=VpcInterfaceName,
WhitelistCidr=WhitelistCidr,
**kwargs
)
super(FlowSource, self).__init__(**processed_kwargs)
class FlowVpcInterface(troposphere.mediaconnect.FlowVpcInterface, Mixin):
def __init__(self,
title, # type: str
template=None, # type: Template
validation=True, # type: bool
FlowArn=REQUIRED, # type: Union[str, AWSHelperFn]
Name=REQUIRED, # type: Union[str, AWSHelperFn]
RoleArn=REQUIRED, # type: Union[str, AWSHelperFn]
SecurityGroupIds=REQUIRED, # type: List[Union[str, AWSHelperFn]]
SubnetId=REQUIRED, # type: Union[str, AWSHelperFn]
**kwargs):
processed_kwargs = preprocess_init_kwargs(
title=title,
template=template,
validation=validation,
FlowArn=FlowArn,
Name=Name,
RoleArn=RoleArn,
SecurityGroupIds=SecurityGroupIds,
SubnetId=SubnetId,
**kwargs
)
super(FlowVpcInterface, self).__init__(**processed_kwargs)
| 1.875 | 2 |
tests/test_bitable_sample.py | chyroc/pylark | 7 | 12795961 | # Code generated by lark_sdk_gen. DO NOT EDIT.
import unittest
import pylark
import pytest
from tests.test_conf import app_all_permission, app_no_permission
from tests.test_helper import mock_get_tenant_access_token_failed
def mock(*args, **kwargs):
raise pylark.PyLarkError(scope="scope", func="func", code=1, msg="mock-failed")
def mock_raw_request(*args, **kwargs):
raise pylark.PyLarkError(
scope="scope", func="func", code=1, msg="mock-raw-request-failed"
)
# mock get token
class TestBitableSampleMockGetTokenFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockGetTokenFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.cli.auth.get_tenant_access_token = mock_get_tenant_access_token_failed
self.cli.auth.get_app_access_token = mock_get_tenant_access_token_failed
self.module_cli = self.cli.bitable
def test_mock_get_token_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())
assert "msg=failed" in f"{e}"
def test_mock_get_token_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq()
)
assert "msg=failed" in f"{e}"
def test_mock_get_token_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())
assert "msg=failed" in f"{e}"
# mock mock self func
class TestBitableSampleMockSelfFuncFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockSelfFuncFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.bitable
def test_mock_self_func_get_bitable_view_list(self):
origin_func = self.module_cli.get_bitable_view_list
self.module_cli.get_bitable_view_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(pylark.GetBitableViewListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_view_list = origin_func
def test_mock_self_func_create_bitable_view(self):
origin_func = self.module_cli.create_bitable_view
self.module_cli.create_bitable_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(pylark.CreateBitableViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_view = origin_func
def test_mock_self_func_delete_bitable_view(self):
origin_func = self.module_cli.delete_bitable_view
self.module_cli.delete_bitable_view = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(pylark.DeleteBitableViewReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_view = origin_func
def test_mock_self_func_get_bitable_record_list(self):
origin_func = self.module_cli.get_bitable_record_list
self.module_cli.get_bitable_record_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(pylark.GetBitableRecordListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_record_list = origin_func
def test_mock_self_func_get_bitable_record(self):
origin_func = self.module_cli.get_bitable_record
self.module_cli.get_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(pylark.GetBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_record = origin_func
def test_mock_self_func_create_bitable_record(self):
origin_func = self.module_cli.create_bitable_record
self.module_cli.create_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(pylark.CreateBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_record = origin_func
def test_mock_self_func_batch_create_bitable_record(self):
origin_func = self.module_cli.batch_create_bitable_record
self.module_cli.batch_create_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_create_bitable_record = origin_func
def test_mock_self_func_update_bitable_record(self):
origin_func = self.module_cli.update_bitable_record
self.module_cli.update_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(pylark.UpdateBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_bitable_record = origin_func
def test_mock_self_func_batch_update_bitable_record(self):
origin_func = self.module_cli.batch_update_bitable_record
self.module_cli.batch_update_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_update_bitable_record = origin_func
def test_mock_self_func_delete_bitable_record(self):
origin_func = self.module_cli.delete_bitable_record
self.module_cli.delete_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(pylark.DeleteBitableRecordReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_record = origin_func
def test_mock_self_func_batch_delete_bitable_record(self):
origin_func = self.module_cli.batch_delete_bitable_record
self.module_cli.batch_delete_bitable_record = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_delete_bitable_record = origin_func
def test_mock_self_func_get_bitable_field_list(self):
origin_func = self.module_cli.get_bitable_field_list
self.module_cli.get_bitable_field_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(pylark.GetBitableFieldListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_field_list = origin_func
def test_mock_self_func_create_bitable_field(self):
origin_func = self.module_cli.create_bitable_field
self.module_cli.create_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(pylark.CreateBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_field = origin_func
def test_mock_self_func_update_bitable_field(self):
origin_func = self.module_cli.update_bitable_field
self.module_cli.update_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(pylark.UpdateBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.update_bitable_field = origin_func
def test_mock_self_func_delete_bitable_field(self):
origin_func = self.module_cli.delete_bitable_field
self.module_cli.delete_bitable_field = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(pylark.DeleteBitableFieldReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_field = origin_func
def test_mock_self_func_get_bitable_table_list(self):
origin_func = self.module_cli.get_bitable_table_list
self.module_cli.get_bitable_table_list = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(pylark.GetBitableTableListReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_table_list = origin_func
def test_mock_self_func_create_bitable_table(self):
origin_func = self.module_cli.create_bitable_table
self.module_cli.create_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(pylark.CreateBitableTableReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.create_bitable_table = origin_func
def test_mock_self_func_batch_create_bitable_table(self):
origin_func = self.module_cli.batch_create_bitable_table
self.module_cli.batch_create_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_create_bitable_table = origin_func
def test_mock_self_func_delete_bitable_table(self):
origin_func = self.module_cli.delete_bitable_table
self.module_cli.delete_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(pylark.DeleteBitableTableReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.delete_bitable_table = origin_func
def test_mock_self_func_batch_delete_bitable_table(self):
origin_func = self.module_cli.batch_delete_bitable_table
self.module_cli.batch_delete_bitable_table = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq()
)
assert "msg=mock-failed" in f"{e}"
self.module_cli.batch_delete_bitable_table = origin_func
def test_mock_self_func_get_bitable_meta(self):
origin_func = self.module_cli.get_bitable_meta
self.module_cli.get_bitable_meta = mock
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(pylark.GetBitableMetaReq())
assert "msg=mock-failed" in f"{e}"
self.module_cli.get_bitable_meta = origin_func
# mock raw request
class TestBitableSampleMockRawRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleMockRawRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_all_permission.ins()
self.module_cli = self.cli.bitable
self.cli.raw_request = mock_raw_request
def test_mock_raw_request_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(
pylark.GetBitableViewListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(
pylark.CreateBitableViewReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(
pylark.DeleteBitableViewReq(
app_token="x",
table_id="x",
view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(
pylark.GetBitableRecordListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(
pylark.GetBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(
pylark.CreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(
pylark.UpdateBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(
pylark.DeleteBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(
pylark.GetBitableFieldListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(
pylark.CreateBitableFieldReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(
pylark.UpdateBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(
pylark.DeleteBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(
pylark.GetBitableTableListReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(
pylark.CreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(
pylark.DeleteBitableTableReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
def test_mock_raw_request_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(
pylark.GetBitableMetaReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
assert "mock-raw-request-failed" in e.value.msg
# real request
class TestBitableSampleRealRequestFailed(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestBitableSampleRealRequestFailed, self).__init__(*args, **kwargs)
self.cli = app_no_permission.ins()
self.module_cli = self.cli.bitable
def test_real_request_get_bitable_view_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_view_list(
pylark.GetBitableViewListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_view(
pylark.CreateBitableViewReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_view(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_view(
pylark.DeleteBitableViewReq(
app_token="x",
table_id="x",
view_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_record_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record_list(
pylark.GetBitableRecordListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_record(
pylark.GetBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_record(
pylark.CreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_create_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_record(
pylark.BatchCreateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_record(
pylark.UpdateBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_update_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_update_bitable_record(
pylark.BatchUpdateBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_record(
pylark.DeleteBitableRecordReq(
app_token="x",
table_id="x",
record_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_delete_bitable_record(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_record(
pylark.BatchDeleteBitableRecordReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_field_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_field_list(
pylark.GetBitableFieldListReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_field(
pylark.CreateBitableFieldReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_update_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.update_bitable_field(
pylark.UpdateBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_field(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_field(
pylark.DeleteBitableFieldReq(
app_token="x",
table_id="x",
field_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_table_list(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_table_list(
pylark.GetBitableTableListReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.create_bitable_table(
pylark.CreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_create_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_create_bitable_table(
pylark.BatchCreateBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.delete_bitable_table(
pylark.DeleteBitableTableReq(
app_token="x",
table_id="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_batch_delete_bitable_table(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.batch_delete_bitable_table(
pylark.BatchDeleteBitableTableReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
def test_real_request_get_bitable_meta(self):
with pytest.raises(pylark.PyLarkError) as e:
self.module_cli.get_bitable_meta(
pylark.GetBitableMetaReq(
app_token="x",
)
)
assert e.type is pylark.PyLarkError
assert e.value.code > 0
| 1.953125 | 2 |
irop_hook.py | PLSysSec/pitchfork-angr | 5 | 12795962 | import angr
from taint import is_tainted, taintedUnconstrainedBits
import logging
l = logging.getLogger(__name__)
class IROpHook(angr.SimStatePlugin):
"""
Allows hooking the computation of operations performed in the symbolic execution.
(requires our fork of angr to actually respect the hook)
"""
def do_op(self, state, irop, args):
"""
irop: an angr.vex.engines.SimIROp
args: arguments to irop, which will all be claripy objects (instances of claripy.ast.Base)
return: claripy object to use as the result of the operation;
or None to refrain from hooking the operation, and let angr proceed normally
"""
if any(is_tainted(a) for a in args):
#l.debug("Replacing operation {} on {} with unconstrained secret".format(irop, args))
return taintedUnconstrainedBits(state, "secret", irop._output_size_bits)
return None
@angr.SimStatePlugin.memo
def copy(self, memo):
return IROpHook()
| 2.390625 | 2 |
people/views.py | GreenBankObservatory/django-perf | 0 | 12795963 | <gh_stars>0
import inspect
from django.shortcuts import render
from people.get_people import (
get_people_naive,
get_people_select_related_only,
get_people_select_related,
get_people_qs_only,
get_people_values,
)
def index(request):
return render(request, "people/index.html")
def list_people_naive(request):
"""List person/observatory/site using "naive" iteration of Person QuerySet"""
people = get_people_naive()
return render(
request,
"people/list_people_fastest.html",
{
"people": people,
"title": r"Naive QuerySet.all()",
"description": "get_people_naive",
"func_source": inspect.getsource(get_people_naive),
},
)
def list_people_select_related_only(request):
people = get_people_select_related_only()
return render(
request,
"people/list_people_fastest.html",
{
"people": people,
"title": r"Using QuerySet.select_related()",
"description": "get_people_select_related_only",
"func_source": inspect.getsource(get_people_select_related_only),
},
)
def list_people_select_related(request):
"""List person/observatory/site via iteration of QuerySet using select_related"""
people = get_people_select_related()
return render(
request,
"people/list_people_fastest.html",
{
"people": people,
"title": r"Using QuerySet.only()",
"description": "get_people_select_related",
"func_source": inspect.getsource(get_people_select_related),
},
)
def list_people_qs_only(request):
"""List person/observatory/site via iteration of QuerySet.only()"""
people = get_people_qs_only()
return render(
request,
"people/list_people_fastest.html",
{
"people": people,
"title": r"Using QuerySet.select_related() <i>and</i> QuerySet.only()",
"description": "get_people_qs_only",
"func_source": inspect.getsource(get_people_qs_only),
},
)
def list_people_values(request):
"""List person/observatory/site using explict retrieval of relevant values"""
people = get_people_values()
return render(
request,
"people/list_people_fastest.html",
{
"people": people,
"title": r"Using QuerySet.values()",
"description": "get_people_values",
"func_source": inspect.getsource(get_people_values),
},
)
| 2.21875 | 2 |
LeetCode-All-Solution/Python3/LC-0217-Contains-Duplicate.py | YuweiYin/Algorithm_YuweiYin | 0 | 12795964 | <filename>LeetCode-All-Solution/Python3/LC-0217-Contains-Duplicate.py<gh_stars>0
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""=================================================================
@Project : Algorithm_YuweiYin/LeetCode-All-Solution/Python3
@File : LC-0217-Contains-Duplicate.py
@Author : [YuweiYin](https://github.com/YuweiYin)
@Date : 2022-02-25
=================================================================="""
import sys
import time
from typing import List
# import functools
"""
LeetCode - 0217 - (Easy) - Contains Duplicate
https://leetcode.com/problems/contains-duplicate/
Description & Requirement:
Given an integer array nums,
return true if any value appears at least twice in the array,
and return false if every element is distinct.
Example 1:
Input: nums = [1,2,3,1]
Output: true
Example 2:
Input: nums = [1,2,3,4]
Output: false
Example 3:
Input: nums = [1,1,1,3,3,4,3,2,4,2]
Output: true
Constraints:
1 <= nums.length <= 10^5
-10^9 <= nums[i] <= 10^9
"""
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
# exception case
if not isinstance(nums, list) or len(nums) <= 0:
return False # Error input type
if len(nums) == 1:
return False
# main method: (scan once, store numbers in hash set, if duplicate, stop and return True)
return self._containsDuplicate(nums)
def _containsDuplicate(self, nums: List[int]) -> bool:
hash_set = set()
for num in nums:
if num in hash_set:
return True
hash_set.add(num)
return False
def main():
# Example 1: Output: true
# nums = [1, 2, 3, 1]
# Example 2: Output: false
# nums = [1, 2, 3, 4]
# Example 3: Output: true
nums = [1, 1, 1, 3, 3, 4, 3, 2, 4, 2]
# init instance
solution = Solution()
# run & time
start = time.process_time()
ans = solution.containsDuplicate(nums)
end = time.process_time()
# show answer
print('\nAnswer:')
print(ans)
# show time consumption
print('Running Time: %.5f ms' % ((end - start) * 1000))
if __name__ == "__main__":
sys.exit(main())
| 3.890625 | 4 |
model/model.py | sreeji10/movie-recomendation-flask-python | 3 | 12795965 | import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
def get_title_from_index(index):
return df[df.index == index]["title"].values[0]
def get_index_from_title(title):
return df[df.title.str.lower() == title.lower()]["index"].values[0]
def combine_features(row):
try:
return row['keywords'] +" "+row['cast']+" "+row["genres"]+" "+row["director"]
except:
print ("Error:", row )
def check_movie(title):
if title in all_movies:
return True
return False
def get_recommendations(title, n):
names = []
movie_user_likes = title
movie_index = get_index_from_title(movie_user_likes)
similar_movies = list(enumerate(cosine_sim[movie_index]))
sorted_similar_movies = sorted(similar_movies,key=lambda x:x[1],reverse=True)
for element in sorted_similar_movies[1:n+1]:
names.append(get_title_from_index(element[0]))
return names
df = pd.read_csv("./data/movie_dataset.csv")
features = ['keywords','cast','genres','director']
all_movies = df.original_title.str.lower().tolist()
for feature in features:
df[feature] = df[feature].fillna('')
df["combined_features"] = df.apply(combine_features,axis=1)
cv = CountVectorizer()
count_matrix = cv.fit_transform(df["combined_features"])
cosine_sim = cosine_similarity(count_matrix)
| 3.15625 | 3 |
kwat/array_array/__init__.py | KwatME/ccal | 5 | 12795966 | from .apply import apply
from .separate_and_apply import separate_and_apply
| 1.15625 | 1 |
calories/filters.py | DHEERAJPRAKASH/CALORIE_TRACKER | 0 | 12795967 | import django_filters
from django_filters import CharFilter
from .models import *
class FoodFilter(django_filters.FilterSet):
food_name = CharFilter(field_name = 'name' , lookup_expr = 'icontains',label='search food items')
class Meta:
model = Food
fields = ['food_name']
| 1.945313 | 2 |
setup.py | akaraspt/ms-gait-calibrate | 2 | 12795968 | <filename>setup.py
from setuptools import setup, find_packages
install_requires = [
'numpy',
'pandas',
'scipy',
'matplotlib',
'scikit-learn',
'flask',
'flask-script',
'flask-bootstrap',
'werkzeug',
'bokeh',
'Jinja2',
]
setup(
name='gait-calibrate',
version='1.1',
description='A Python toolkit for personalized gait calibration',
author='<NAME>',
author_email='<EMAIL>',
packages=find_packages(),
install_requires=install_requires,
) | 1.320313 | 1 |
tasks-around-university/rest/maingame/views.py | JaliJuhola/tasks-around-tampere | 0 | 12795969 | <reponame>JaliJuhola/tasks-around-tampere
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
#from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from rest.maingame.models import Hotspot, Player, Group, Lobby, LobbyPlayer
from rest.maingame.serializers import HotspotSerializer, PlayerSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest.maingame.channels import WaitingPlayersToJoinChannels
import uuid
from rest.common.channels import PUSHER_CLIENT
import json
from django.utils import timezone
from rest.push_the_buttons.models import PushTheButtonsMainGame
from rest.geocache.models import GeocacheMainGame
from rest.alias.models import AliasMainGame
class AuthView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
def get(self, request, format=None):
identifier = request.GET["id"]
if not identifier:
return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)
try:
player = Player.objects.get(id=identifier)
except Player.DoesNotExist:
return Response({'message': 'player not found'}, status=status.HTTP_400_BAD_REQUEST)
return Response({'token': player.token})
def post(self, request, format=None):
serializer = PlayerSerializer(data=request.data)
token = <KEY>()
if serializer.is_valid():
serializer.save(token=token)
return Response({'token': token}, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class PlayerGroupView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
def get(self, request):
identifier = request.user.group.id
if not identifier:
return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)
players = Player.objects.filter(group__id=identifier)
serializer = PlayerSerializer(players, many=True)
return Response(serializer.data)
def post(self, request, format=None):
identifier = request.data['group_id']
if not identifier:
return Response({'id': 'This field is required!'}, status=status.HTTP_400_BAD_REQUEST)
try:
group = Group.objects.get(id=identifier)
request.user.group = group
request.user.save()
players_on_game = Player.objects.filter(group=group).count()
WaitingPlayersToJoinChannels.new_push_available(request.user.name, players_on_game, group.id, group.name)
except Group.DoesNotExist:
return Response({'message': 'invalid group_id'}, status=status.HTTP_400_BAD_REQUEST)
return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': request.user.group.name, 'group_id': request.user.group.id}, status=status.HTTP_201_CREATED)
class PlayerView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
def get(self, request):
group_name = request.user.group.name
player_name = request.user.name
is_leader = request.user.leader
player_id = request.user.id
group_id = request.user.group.id
return Response({'group': {'name': group_name, 'id': group_id}, 'player': {'name': player_name, 'id': player_id, 'leader': is_leader}})
class GroupView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Player.objects.all()
serializer_class = PlayerSerializer
def post(self, request):
group_name = request.data['group_name']
player = request.user
group = Group.objects.create(name=group_name)
player.group = group
player.leader = True
player.save()
return Response({ 'player_id': request.user.id, 'player_name': request.user.name, 'group_name': group.name, 'group_id': group.id}, status=status.HTTP_201_CREATED)
class LobbyView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Lobby.objects.all()
serializer_class = PlayerSerializer
def post(self, request):
minigame_name = request.data['minigame_name']
group = request.user.group
player = request.user
player.last_connection = timezone.now() + timezone.timedelta(seconds=20)
lobby, created = Lobby.objects.get_or_create(group=group, minigame=minigame_name, closed=False)
lobby_player, created = LobbyPlayer.objects.get_or_create(lobby=lobby, player=player)
lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20)
lobby_player.save()
lobby.save()
player.save()
return Response({'lobby_id': lobby.id})
def patch(self, request):
lobby_id = request.data['lobby_id']
player = request.user
player.last_connection = timezone.now() + timezone.timedelta(seconds=20)
lobby = Lobby.objects.get(id=int(lobby_id))
lobby_player = LobbyPlayer.objects.get(lobby=lobby, player=player)
lobby_player.joined_since = timezone.now() + timezone.timedelta(seconds=20)
lobby_player.save()
player.save()
players_in_lobby = LobbyPlayer.objects.filter(lobby=lobby, joined_since__gte=timezone.now())
response_array = []
for player_in_lobby in players_in_lobby:
player = player_in_lobby.player
response_array.append({'id': player.id, 'name': player.name, 'x': player.x, 'y': player.y, 'group_id': player.group.id, 'avatar': player.icon_name})
return Response({'players': response_array, 'closed': lobby.closed})
class LobbyExitView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Lobby.objects.all()
serializer_class = PlayerSerializer
def post(self, request):
lobby_id = request.data['lobby_id']
lobby = Lobby.objects.get(id=int(lobby_id))
lobby.closed = True
lobby.save()
return Response({'status': True})
class AvatarView(APIView):
"""
List all snippets, or create a new snippet.
"""
queryset = Lobby.objects.all()
serializer_class = PlayerSerializer
def post(self, request):
icon_name = request.data['icon_name']
request.user.icon_name = icon_name
request.user.save()
return Response({'status': True})
class PlayerLocationView(APIView):
def post(self, request):
x_cord = request.data['x']
y_cord = request.data['y']
player = request.user
player.last_connection = timezone.now() + timezone.timedelta(seconds=20)
player.x = x_cord
player.y = y_cord
player.save()
players = Player.objects.filter(group=player.group, last_connection__gte=timezone.now())
response_array = []
for player in players:
player_type = 1
if player == request.user:
player_type= 3
elif player.leader:
player_type = 2
response_array.append({'name': player.name, 'type': player_type, 'location': {'longitude': player.x, 'latitude': player.y}, 'avatar': "../assets/testmarker.png"})
return Response({'players': response_array})
class MinigameProgressionView(APIView):
def get(self, request):
TOTAL_MINIGAMES = 4
total_score = 0
minigames_completed = 0
push_the_buttons_group_max = 0
push_the_buttons_max = 0
push_the_buttons_group_count = 0
alias_group_max = 0
alias_max = 0
alias_group_count = 0
quiklash_group_max = 0
quiklash_max = 0
quiklash_group_count = 0
geocache_group_max = 0
geocache_max = 0
geocache_group_count = 0
# Push the buttons scores
ptbmg = PushTheButtonsMainGame.objects.filter(game_ended=True).order_by('-current_score')
ptbmg_group = ptbmg.filter(group=request.user.group).order_by('-current_score')
if ptbmg_group.first():
push_the_buttons_group_max = ptbmg_group.first().current_score
push_the_buttons_group_count = ptbmg_group.count()
minigames_completed = minigames_completed + 1
total_score = total_score + push_the_buttons_group_max
if ptbmg.first():
push_the_buttons_max = ptbmg.first().current_score
# geocache scores
gcmg = GeocacheMainGame.objects.filter(game_ended=True).order_by('-current_score')
gcmg_group= gcmg.filter(group=request.user.group).order_by('-current_score')
if gcmg_group.first():
geocache_group_max = gcmg_group.first().current_score
geocache_group_count = gcmg_group.count()
minigames_completed = minigames_completed + 1
total_score = total_score + geocache_max
if gcmg.first():
geocache_max = gcmg.first().current_score
# alias scores
amg = AliasMainGame.objects.filter(game_ended=True).order_by('-current_score')
amg_group= amg.filter(group=request.user.group).order_by('-current_score')
if amg_group.first():
alias_group_max = amg_group.first().current_score
alias_group_count = amg_group.count()
minigames_completed = minigames_completed + 1
total_score = total_score + geocache_max
if amg.first():
alias_max = amg.first().current_score
return Response({'Push the buttons': {'group': push_the_buttons_group_max, 'world': push_the_buttons_max, 'count': push_the_buttons_group_count}, 'Alias': {'group': alias_group_max, 'world': alias_max, 'count': alias_group_count}, 'Quiklash': {'group': quiklash_group_max, 'world': quiklash_max, 'count': quiklash_group_count}, 'GeoCache': {'group': geocache_group_max, 'world': geocache_max, 'count': geocache_group_count}, 'total_score': total_score, 'completion_percentage': minigames_completed/TOTAL_MINIGAMES})
| 2.171875 | 2 |
app.py | clearminds/flask-tts | 0 | 12795970 | # -*- coding: utf-8 -*-
import os
from config import Config
from flask import Flask, send_from_directory
from werkzeug.contrib.fixers import ProxyFix
import logging
from gtts import gTTS
from pydub import AudioSegment
import hashlib
try:
from urllib.parse import unquote_plus
except:
from urllib import unquote_plus
config = Config()
app = Flask(__name__)
logging.getLogger('flask_tts').setLevel(logging.DEBUG)
STORAGE_DIR = os.environ['STORAGE_DIR']
@app.route('/generate/<lang>/<text>')
def generate(lang, text):
lang = lang.lower()
text = unquote_plus(text)
tts = gTTS(text=text, lang=lang)
filename = lang+'_'+hashlib.sha1(text.encode('punycode')).hexdigest()+'.mp3'
if os.path.isfile(STORAGE_DIR+filename):
return send_from_directory(STORAGE_DIR, filename)
tts.save(STORAGE_DIR+filename)
sound = AudioSegment.from_file(STORAGE_DIR+filename, format='mp3')
sound = sound.apply_gain(+8.0)
sound.export(STORAGE_DIR+filename,
format="mp3",
bitrate="48k",
parameters=['-ac','2','-ar', '16000'])
return send_from_directory(STORAGE_DIR, filename)
if __name__ == '__main__':
# Be sure to set config.debug_mode to False in production
port = int(os.environ.get("PORT", config.port))
if port != config.port:
config.debug = False
app.wsgi_app = ProxyFix(app.wsgi_app)
app.run(host='0.0.0.0', debug=config.debug_mode, port=port)
| 2.15625 | 2 |
pySpatialTools/Discretization/Discretization_3d/__init__.py | tgquintela/pySpatialTools | 8 | 12795971 |
"""
3D discretization
=================
Space discretization module groups functions to discretize 3-dimensional spaces
in regions and facilitate the retrieve by regions or define neighbourhood with
fixed regions.
"""
| 2.15625 | 2 |
Chapter_14/simulation_model.py | pauldevos/Mastering-Object-Oriented-Python-Second-Edition | 108 | 12795972 | <reponame>pauldevos/Mastering-Object-Oriented-Python-Second-Edition
#!/usr/bin/env python3.7
"""
Mastering Object-Oriented Python 2e
Code Examples for Mastering Object-Oriented Python 2nd Edition
Chapter 14. Example 1 -- simulation model.
"""
from dataclasses import dataclass, astuple, asdict, field
from typing import Tuple, Iterator
from pathlib import Path
import csv
# Mock Object Model
# =====================
# A set of class hierarchies that we'll use for several examples.
# The content is mostly mocks.
class DealerRule:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class Hit17(DealerRule):
"""Hits soft 17"""
pass
class Stand17(DealerRule):
"""Stands on soft 17"""
pass
class SplitRule:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class ReSplit(SplitRule):
"""Simplistic resplit anything."""
pass
class NoReSplit(SplitRule):
"""Simplistic no resplit."""
pass
class NoReSplitAces(SplitRule):
"""One card only to aces; no resplit."""
pass
@dataclass
class Table:
decks: int
limit: int
dealer: DealerRule
split: SplitRule
payout: Tuple[int, int]
class PlayerStrategy:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
class SomeStrategy(PlayerStrategy):
pass
class AnotherStrategy(PlayerStrategy):
pass
class BettingStrategy:
def __repr__(self) -> str:
return f"{self.__class__.__name__}()"
def bet(self) -> int:
raise NotImplementedError("No bet method")
def record_win(self) -> None:
pass
def record_loss(self) -> None:
pass
class Flat(BettingStrategy):
pass
class Martingale(BettingStrategy):
pass
class OneThreeTwoSix(BettingStrategy):
pass
@dataclass
class Player:
play: PlayerStrategy
betting: BettingStrategy
max_rounds: int
init_stake: int
rounds: int = field(init=False)
stake: float = field(init=False)
def __post_init__(self):
self.reset()
def reset(self) -> None:
self.rounds = self.max_rounds
self.stake = self.init_stake
# A mock simulation which is built from the above mock objects.
import random
@dataclass
class Simulate:
"""Mock simulation."""
table: Table
player: Player
samples: int
def __iter__(self) -> Iterator[Tuple]:
"""Yield statistical samples."""
x, y = self.table.payout
blackjack_payout = x / y
for count in range(self.samples):
self.player.reset()
while self.player.stake > 0 and self.player.rounds > 0:
self.player.rounds -= 1
outcome = random.random()
if outcome < 0.579:
self.player.stake -= 1
elif 0.579 <= outcome < 0.883:
self.player.stake += 1
elif 0.883 <= outcome < 0.943:
# a "push"
pass
else:
# 0.943 <= outcome
self.player.stake += blackjack_payout
yield astuple(self.table) + astuple(self.player)
def check(path: Path) -> None:
"""
Validate unit test result file can be read.
:param path: Path to the example output
"""
with path.open("r") as results:
rdr = csv.reader(results)
outcomes = (float(row[10]) for row in rdr)
first = next(outcomes)
sum_0, sum_1 = 1, first
value_min = value_max = first
for value in outcomes:
sum_0 += 1 # value**0
sum_1 += value # value**1
value_min = min(value_min, value)
value_max = max(value_max, value)
mean = sum_1 / sum_0
print(
f"{path}\nMean = {mean:.1f}\n"
f"House Edge = { 1 - mean / 50:.1%}\n"
f"Range = {value_min:.1f} {value_max:.1f}"
)
| 3.71875 | 4 |
CursoIntensivoPython/Aula15_visualizacao_de_dados/die_visual.py | SweydAbdul/estudos-python | 0 | 12795973 | <reponame>SweydAbdul/estudos-python<gh_stars>0
import pygal
from CursoIntensivoPython.Aula15_visualizacao_de_dados.die import Die
# Cria um D6
die = Die()
# Faz alguns lancamentos e armazena os resultados em uma lista
results = []
for roll_num in range(100):
result = die.roll()
results.append(result)
# Analisa os resultados
frequencies = []
for value in range(1, die.num_sides+1):
frequency = results.count(value)
frequencies.append(frequency)
# Visualiza os resultados
hist = pygal.Bar()
hist.title = "Results of rolling one D6 1000 times."
hist.x_labels = [1, 2, 3, 4, 5, 6]
hist.x_title = "Results"
hist.y_title = "Frequency of result"
hist.add('D6', frequencies)
hist.render_to_file('die_visual.svg')
| 3.328125 | 3 |
Medio 2/ex043.py | Gustavsantos/python1 | 0 | 12795974 | peso = float(input('Qual é seu peso? (KG) '))
altura = float(input('Qual é sua altura? (M) '))
imc = peso / (altura * altura)
print('Seu imc é de {:.1f}'.format(imc))
if imc <= 18.5:
print('Você esta abaixo do peso!')
elif imc <= 25:
print('Seu peso é ideal!')
elif imc <= 30:
print('Você esta com sobrepeso!')
elif imc <= 40:
print('Você esta com obesidade, CUIDADO!!')
else:
print('Voce esta com obesidade mórbida, CUIDADO!!')
| 3.8125 | 4 |
arrival/arrival/galaxy.py | paiv/icfpc2020 | 0 | 12795975 | import ctypes
import sys
from pathlib import Path
from .space import SpaceClient
_known_tokens = 'ap cons nil neg c b s isnil car eq mul add lt div i t f cdr SCAN number FUN DEF galaxy GG'
_Tokens = {s:i for i, s in enumerate(_known_tokens.split(), 1)}
class AlienProxy:
def __init__(self):
pass
class MachineImage:
TOKENS = dict(_Tokens)
def emit_call(self, *args):
ap, num, gg = map(self.TOKENS.__getitem__, 'ap number GG'.split())
def emit(fn, args):
fringe = [(fn, args)]
while fringe:
fn, args = fringe.pop()
if fn is None:
yield from self.encode_lists(args)
elif isinstance(args, (list, tuple)) and (len(args) == 0):
yield self.TOKENS[fn]
else:
yield ap
fringe.append((None, args[-1]))
fringe.append((fn, args[:-1]))
return list(emit(args[0], args[1:]))
def encode_lists(self, data):
ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split())
def encode(data):
fringe = [data]
while fringe:
item = fringe.pop()
if isinstance(item, tuple) and (len(item) == 1):
fringe.append(item[0])
elif isinstance(item, list) and (len(item) == 0):
yield nil
elif isinstance(item, (list, tuple)):
yield ap
yield ap
yield cons
fringe.append(item[1:])
fringe.append(item[0])
else:
yield num
yield int(item)
return list(encode(data))
class _partial:
def __init__(self, arg):
self.arg = arg
def __repr__(self):
return f'Partial({repr(self.arg)})'
def decode_lists(self, data):
ap, cons, num, nil, gg = map(self.TOKENS.__getitem__, 'ap cons number nil GG'.split())
def reduce(stack):
while (stack[-3] == '$') and (stack[-2] != '$'):
head, tail = stack[-2], stack[-1]
if head == cons:
xs = self._partial(tail)
elif isinstance(head, self._partial):
if isinstance(tail, list):
xs = [head.arg, *tail]
elif isinstance(tail, tuple):
xs = (head.arg, *tail)
else:
xs = (head.arg, tail)
else:
raise Exception((head, tail))
stack[-3:] = [xs]
stack = ['$', '$']
i = 0
while True:
# print('** ', i, repr(stack), '--', repr(data[i]))
x = data[i]
i += 1
if x == gg: break
elif x == ap: stack.append('$')
elif x == nil: stack.append([]); reduce(stack)
elif x == num: stack.append(data[i]); i += 1; reduce(stack)
else: stack.append(x)
return stack[-1]
def run_tests(self):
gg, = map(self.TOKENS.__getitem__, 'GG'.split())
test_cases = [
[],
[42],
(2, 7),
[(3, 1)],
[[],[],[]],
[0, [42, 11, 12], 3, (8, 9)],
]
for data in test_cases:
image = MachineImage().encode_lists(data)
image += [gg]
rev = MachineImage().decode_lists(image)
assert rev == data, (rev, data)
class Galaxy:
def __init__(self, target='release', api_host=None, api_key=None):
self.state = []
fn = 'libgalaxy' + ('.dylib' if sys.platform == 'darwin' else '.so')
build_target = (target + '/') if target else ''
fn = next(Path(__file__).parent.resolve().parent.glob('**/' + build_target + fn))
print(repr(str(fn)))
self.galexy = ctypes.cdll.LoadLibrary(fn)
p64 = ctypes.POINTER(ctypes.c_int64)
u32 = ctypes.c_uint32
self.galexy.evaluate.argtypes = (u32, p64)
self.galexy.evaluate.restype = p64
self.galexy.load_machine.argtypes = (p64,)
self.galexy.load_machine.restype = None
self.space = SpaceClient(api_host=api_host, api_key=api_key)
def _interact(self, state, event):
flag, state, data = self._evaluate(state, event)
if (flag == 0):
return (state, data)
return self._interact(state, self._send_to_alien(data))
def _evaluate(self, state, event):
self.galexy.load_machine(None)
image = MachineImage().emit_call('galaxy', state, event)
data = (ctypes.c_int64 * len(image))(*image)
res = self.galexy.evaluate(len(image), data)
res = MachineImage().decode_lists(res)
# print('<', repr(res))
return res
def _send_to_alien(self, data):
print('<~', repr(data))
res = self.space.send(data)
print('~>', repr(res))
return res
def _render_frame(self, images):
self.frame = images
def eval_step(self, mouse):
print('>', (self.state))
print('>', (mouse or (0, 0)))
(new_state, images) = self._interact(self.state, mouse or (0, 0))
print('<', (new_state))
# print('<', (images))
self.state = new_state
self._render_frame(images)
if __name__ == '__main__':
g = Galaxy()
r = g.eval_step((0,0))
print(repr(r))
| 2.5 | 2 |
violation/fields/rule.py | adepeter/django-violations | 1 | 12795976 | from django import forms
class RulesModelMultipleChoiceField(forms.ModelMultipleChoiceField):
def label_from_instance(self, obj):
return '%(rule_name)s' % {'rule_name': obj.name}
| 1.984375 | 2 |
harness/training/model_trainer.py | cmu-sei/augur-code | 0 | 12795977 | # Augur: A Step Towards Realistic Drift Detection in Production MLSystems - Code
# Copyright 2022 Carnegie Mellon University.
#
# NO WARRANTY. THIS CARNEGIE MELLON UNIVERSITY AND SOFTWARE ENGINEERING INSTITUTE MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. CARNEGIE MELLON UNIVERSITY MAKES NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED, AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF THE MATERIAL. CARNEGIE MELLON UNIVERSITY DOES NOT MAKE ANY WARRANTY OF ANY KIND WITH RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
#
# Released under a MIT (SEI)-style license, please see license.txt or contact <EMAIL> for full terms.
#
# [DISTRIBUTION STATEMENT A] This material has been approved for public release and unlimited distribution. Please see Copyright notice for non-US Government use and distribution.
#
# Carnegie Mellon® is registered in the U.S. Patent and Trademark Office by Carnegie Mellon University.
#
# This Software includes and/or makes use of the following Third-Party Software subject to its own license:
# 1. Tensorflow (https://github.com/tensorflow/tensorflow/blob/master/LICENSE) Copyright 2014 The Regents of the University of California.
# 2. Pandas (https://github.com/pandas-dev/pandas/blob/main/LICENSE) Copyright 2021 AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team, and open source contributors.
# 3. scikit-learn (https://github.com/scikit-learn/scikit-learn/blob/main/COPYING) Copyright 2021 The scikit-learn developers.
# 4. numpy (https://github.com/numpy/numpy/blob/main/LICENSE.txt) Copyright 2021 NumPy Developers.
# 5. scipy (https://github.com/scipy/scipy/blob/main/LICENSE.txt) Copyright 2021 SciPy Developers.
# 6. statsmodels (https://github.com/statsmodels/statsmodels/blob/main/LICENSE.txt) Copyright 2018 <NAME>, Scipy developers, statsmodels Developers.
# 7. matplotlib (https://github.com/matplotlib/matplotlib/blob/main/LICENSE/LICENSE) Copyright 2016 Matplotlib development team.
#
# DM22-0044
from sklearn.model_selection import KFold
import tensorflow.keras.callbacks as tfcb
from utils.logging import print_and_log
class ModelTrainer:
"""Has functionalities to train a model"""
def __init__(self, model_module, config_params):
self.model_module = model_module
self.config_params = config_params
self.evaluation_input = None
self.evaluation_output = None
@staticmethod
def get_callbacks(patience=2):
"""Gets helper callbacks to save checkpoints and allow early stopping when needed."""
file_path = ".model_weights.hdf5"
es = tfcb.EarlyStopping('val_loss', patience=patience, mode="min")
msave = tfcb.ModelCheckpoint(file_path, save_best_only=True)
return [es, msave]
def train(self, training_set):
"""Train."""
print_and_log("TRAINING")
model = self.model_module.create_model()
epochs = self.config_params.get("epochs")
batch_size = self.config_params.get("batch_size")
print_and_log(f'Starting training with hyper parameters: epochs: {epochs}, batch size: {batch_size}')
validation_data = None
callbacks = None
if training_set.has_validation():
print_and_log("Validation data found")
validation_data = (training_set.x_validation, training_set.y_validation)
callbacks = self.get_callbacks(patience=5)
history = model.fit(training_set.x_train, training_set.y_train,
epochs=epochs,
validation_data=validation_data,
batch_size=batch_size,
callbacks=callbacks)
print_and_log(f'Final training result ({len(history.history.get("loss"))} epochs): '
f'loss: {history.history.get("loss")[-1]}, '
f'accuracy: {history.history.get("accuracy")[-1]}')
if training_set.has_validation():
print_and_log(f'Validation: val_loss: {history.history.get("val_loss")[-1]}, '
f'val_accuracy: {history.history.get("val_accuracy")[-1]}')
print("Done training!", flush=True)
return model, history
def evaluate(self, trained_model, evaluation_input=None, evaluation_output=None):
"""Does an evaluation."""
print_and_log("EVALUATION")
print("Starting evaluation", flush=True)
if self.evaluation_input is not None:
evaluation_input = self.evaluation_input
if self.evaluation_output is not None:
evaluation_output = self.evaluation_output
if evaluation_input is None or evaluation_output is None:
raise Exception("Evaluation input or output not passed properly to evaluate.")
batch_size = self.config_params.get("batch_size")
scores = trained_model.evaluate(evaluation_input, evaluation_output, batch_size=batch_size)
print(f'Done! Evaluation loss and acc: {scores}')
return scores
def cross_validate(self, full_dataset, num_folds=5):
"""k-fold cross-validation to check how model is performing by selecting different sets to train/validate."""
# Define the K-fold Cross Validator
print_and_log("CROSS VALIDATION")
kfold = KFold(n_splits=num_folds, shuffle=True)
# K-fold Cross Validation model evaluation
acc_per_fold = []
loss_per_fold = []
fold_no = 1
for train_index, test_index in kfold.split(full_dataset.get_single_input(), full_dataset.get_output()):
# Generate a print
print('------------------------------------------------------------------------')
print_and_log(f'Training for fold {fold_no} ...')
training_set = self.model_module.get_fold_data(full_dataset, train_index, test_index)
# Fit data to model
print_and_log(f'Training fold samples: {training_set.num_train_samples}')
model, history = self.train(training_set)
# Generate generalization metrics
print_and_log(f'Evaluation fold samples: {training_set.num_validation_samples}')
scores = self.evaluate(model, training_set.x_validation, training_set.y_validation)
print_and_log(f'Score for fold {fold_no}: {model.metrics_names[0]} of {scores[0]}; '
f'{model.metrics_names[1]} of {scores[1] * 100}%')
acc_per_fold.append(scores[1] * 100)
loss_per_fold.append(scores[0])
# Increase fold number
fold_no = fold_no + 1
print_and_log("Done with cross-validation!")
def split_and_train(self, dataset_instance):
"""Splits a dataset and trains the configured model, returning it."""
training_set = self.model_module.split_data(dataset_instance, self.config_params.get("validation_size"))
print_and_log(f'Dataset samples {dataset_instance.get_number_of_samples()}, '
f'training samples: {len(training_set.x_train[0])}, '
f'validation samples: {len(training_set.x_validation[0])}')
trained_model, history = self.train(training_set)
# Store evaluation input/outputs as the validation split, in case evaluation is done later.
self.evaluation_input = training_set.x_validation
self.evaluation_output = training_set.y_validation
return trained_model
| 1.164063 | 1 |
temp/trail.py | fauzaanirsyaadi/Travel | 0 | 12795978 | <gh_stars>0
from flask import Flask, render_template,request, url_for,redirect,send_file,session,abort
from flask_sqlalchemy import SQLAlchemy
from io import BytesIO
from sqlalchemy.orm import scoped_session,sessionmaker
from base64 import b64encode
import base64
from sqlalchemy import func
import sqlite3
from sqlalchemy.sql import text
#from flaskblog import
#from flaskblog.models import User, Posts
#from flaskblog.forms import RegistrationForm, LoginForm
import os
#import login_user
#from flask import flask_login
#from flask import Login_Manager, logged_in,login_user,logout_user,current_user,login_required
from sqlalchemy import or_
print("golu")
#from flask.ext.login import LoginManager
#lm = LoginManager()
#lm.init_app(app)
#lm.login_view = 'login'
#from app.admin import admin_blueprint
from datetime import datetime
today=datetime.now
print(today)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI']= 'mysql://root:@localhost/busservisenew'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SESSION_TYPE']='filesystem'
app.secret_key = <KEY>'
#login_manager=Login_Manager()
print('jhgj')
db = SQLAlchemy(app)
class Posts(db.Model):
ID=db.Column(db.Integer, primary_key=True, autoincrement=True)
firstname=db.Column(db.String(50), unique=False, nullable=True)
lastname=db.Column(db.String(50), unique=False, nullable=True)
email=db.Column(db.String(50), unique=False, nullable=True)
password=db.Column(db.String(50), unique=False, nullable=True)
month=db.Column(db.String(10), unique=False, nullable=True)
day=db.Column(db.String(40),unique=False, nullable=True)
gender=db.Column(db.String(4),unique=False, nullable=True)
year=db.Column(db.String(10),unique=False, nullable=True)
date=db.Column(db.String(40),unique=False, nullable=True)
ima=db.Column(db.LargeBinary,unique=False, nullable=True)
class Busesdata(db.Model):
id=db.Column(db.Integer, primary_key=True, autoincrement=True)
busname=db.Column(db.String(50), unique=False, nullable=True)
seats=db.Column(db.String(50), unique=False, nullable=True)
ticket_per_seat=db.Column(db.String(50), unique=False, nullable=True)
date=db.Column(db.String(50), unique=False, nullable=True)
city=db.Column(db.String(50), unique=False, nullable=True)
type=db.Column(db.String(10), unique=False, nullable=True)
ima=db.Column(db.LargeBinary)
day=db.Column(db.String(20),nullable=True)
monday=db.Column(db.String(20),nullable=True)
tuesday=db.Column(db.String(20),nullable=True)
wednesday=db.Column(db.String(20),nullable=True)
thursday=db.Column(db.String(20),nullable=True)
friday=db.Column(db.String(20),nullable=True)
saterday=db.Column(db.String(20),nullable=True)
@app.route("/")
def hello():
return render_template('index.html')
@app.route("/name",methods=['GET','POST'])
def home():
if (request.method == 'POST'):
FIRST=request.form.get('firstname')
LAST=request.form.get('lastname')
EMAIL=request.form.get('email')
Month=request.form.get('month')
DAY=request.form.get('day')
YEAR=request.form.get('year')
GANDER=request.form.get('gender')
PASSWORD=request.form.get('password')
file=request.files['ima']
if FIRST==" " or LAST==" " or EMAIL==" " or Month==" " or GANDER==" " or PASSWORD==" " or file==" ":
return render_template('name.html' ,error=error)
else:
entry=Posts(day=DAY,year=YEAR,gender=GANDER,month=Month,firstname=FIRST,lastname=LAST,email=EMAIL,password=PASSWORD,ima=file.read(),date=today)
db.session.add(entry)
db.session.commit()
return render_template('name.html')
return render_template('name.html')
@app.route('/busesdata' ,methods=['GET','POST'])
def busesdata():
added="BUS SUCESSFULLY ADDED"
selectall="PLEASE SELECT ALL"
if request.method=="POST":
BUSNAME=request.form.get('busname')
SEATS=request.form.get('seats')
TICKECT_PER_SEAT=request.form.get('ticket_per_seat')
DATE=request.form.get('date')
CITY=request.form.get('city')
TYPE=request.form.get('type')
file=request.files['ima']
DAY=request.form.get("son")
MONDAY=request.form.get("mon")
TUESADAY=request.form.get("tue")
WEDNESDAY=request.form.get("wed")
THURSDAY=request.form.get("thu")
FRIDAY=request.form.get("fri")
SATERDAY=request.form.get("sat")
if BUSNAME==None or SEATS==None or TICKECT_PER_SEAT==None or file==None :
return render_template('busesdata',selectall=selectall)
else:
print(MONDAY)
entry1=Busesdata(busname=BUSNAME,seats=SEATS,ticket_per_seat=TICKECT_PER_SEAT,date=DATE, city=CITY,type=TYPE,ima=file.read(),day=DAY,monday=MONDAY,tuesday=TUESADAY,wednesday=WEDNESDAY,friday=FRIDAY,saterday=SATERDAY)
db.session.add(entry1)
db.session.commit()
return render_template('busesdata.html', added=added)
return render_template('busesdata.html')
@app.route('/index')
def image():
event = Posts.query.filter_by(firstname='ghar').first()
image = b64encode(event.ima)
image = base64.b64encode(event.ima).decode('ascii')
return render_template('index.html',data=list, image=image)
@app.route('/loginreal', methods=['POST','GET'])
def loginreal():
invalid="invlid username of password"
if request.method=='POST':
username=request.form['username']
password=request.form['password']
ragisted=Posts.query.filter_by(firstname=username,password=password).first()
if ragisted is None:
return render_template('loginreal.html',invalid=invalid)
else:
session['ragisted']=True
event = Posts.query.filter_by(firstname=username).first()
#image = b64encode(event.ima)
image = base64.b64encode(event.ima).decode('ascii')
return render_template('index.html',data=list, image=image,username=username)
#return render_template('/index.html')
return render_template('loginreal.html')
@app.route('/loginpage', methods=['POST','GET'])
#@loginreal():
def loginpage():
if request.method=="POST":
#return render_template('loginreal.html')
loginreal()
return render_template('loginreal.html')
@app.route('/booking', methods=["POST","GET"])
def booking1():
global person
noperson="no person selected "
person=request.form.get('person')
city = request.form.get('city')
date1=Busesdata.query.filter_by(city=city).all()
for date in date1:
print(date.date, date.id)
day=(date.day)
monday=print(date.monday)
tuesday=print(date.tuesday)
wednesday=print(date.wednesday)
friday=print(date.friday)
saterday=print(date.saterday)
if date.day=='sunday':
import datetime
today = datetime.date.today()
sunday = today + datetime.timedelta((6 - today.weekday() % 7))
print(sunday)
entry1 = Busesdata(date=sunday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.day:sunday})
db.session.commit()
if date.monday=='monday':
import datetime
today = datetime.date.today()
monday = today + datetime.timedelta((0 - today.weekday() % 7))
if monday<today:
monday=today+datetime.timedelta(7+today.weekday()%7)
print(monday)
entry1 = Busesdata(date=monday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.monday:monday})
db.session.commit()
if date.tuesday=='tuesday':
import datetime
today = datetime.date.today()
tuesday = today + datetime.timedelta((1 - today.weekday() % 7))
print(tuesday)
entry1 = Busesdata(date=tuesday)
db.session.commit()
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.tuesday:tuesday})
if date.thursday=='thursday':
import datetime
today = datetime.date.today()
thursday = today + datetime.timedelta((3 - today.weekday() % 7))
print(thursday)
entry1 = Busesdata(date=thursday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.thursday:thursday})
db.session.commit()
if date.wednesday=='wednesday':
import datetime
today = datetime.date.today()
wednesday = today + datetime.timedelta((2 - today.weekday() % 7))
print(wednesday)
entry1 = Busesdata(date=wednesday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.wednesday:wednesday})
db.session.commit()
if date.friday=='friday':
import datetime
today = datetime.date.today()
friday = today + datetime.timedelta((4 - today.weekday() % 7))
print(friday)
entry1 = Busesdata(date=friday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.friday:friday})
db.session.commit()
if date.saterday=='saterday':
import datetime
today = datetime.date.today()
saterday = today + datetime.timedelta((5 - today.weekday() % 7))
print(saterday)
update5 = Busesdata.query.filter_by(id=date.id).update({Busesdata.saterday:saterday})
db.session.commit()
error="select person"
if request.method=="POST":
#return redirect(url_for('mybooking1',id=id))
#busname=request.form['busname']
date=request.form['date']
global z
city=request.form.get('city')
person=request.form.get('person')
if person=='0':
return render_template('booking.html',noperson=noperson)
else:
id=Bookingdata.query.all()
seat=Busesdata.query.filter_by(city=city).filter(or_(Busesdata.day == date,Busesdata.monday == date,Busesdata.tuesday == date,Busesdata.wednesday == date,Busesdata.thursday == date,Busesdata.friday == date,Busesdata.saterday == date)).all()
for seat1 in seat:
if seat1==None:
return render_template('booking.html', rows=seat)
else:
return render_template('booking.html', rows=seat,date=date)
#(int(float((seat1.seats)))-(float(result1)))
#return redirect(url_for(booking))
#print(x)
#if x<1:
#error="NO BUS AVELABLE"
#return render_template('booking.html',error=error)
#print(x)
#name=seat.busname
#date=seat.date
#prise=seat.ticket_per_seat
#city=seat.city
#type=seat.type
#return render_template('booking.html', rows=seat)
return render_template('booking.html')
class Bookingdata(db.Model):
id=db.Column(db.String(20) ,primary_key=True ,nullable=True)
busname=db.Column(db.String(50), unique=False, nullable=True)
seats=db.Column(db.String(50), unique=False, nullable=True)
collection=db.Column(db.String(50), unique=False, nullable=True)
bookingdate=db.Column(db.String(50), unique=False, nullable=True)
#day=db.Column(db.String(20),nullable=True)
@app.route('/booking', methods=["POST","GET"])
@app.route('/mybooking/<string:id>/<string:date>',methods=['POST','GET'])
def mybooking1(id,date):
sucessfull="BOOKED SUCESSFULLY"
today = datetime.now().strftime("%Y-%m-%d")
seat1=Busesdata.query.filter_by(id=id).all()
for seat in seat1:
print('g')
if person==0:
x=request.form.get('person')
print(x)
x=seat.seats-int(person)
today2=seat.date
print(today2)
X = int(person) * int(seat.ticket_per_seat)
if today2 != today:
update5 = Busesdata.query.filter_by(id=id).update({Busesdata.seats: '50'})
update5 = Busesdata.query.filter_by(id=id).update({Busesdata.date: today})
#delete=Booking.query.filter_by(id=id).delete()
db.session.commit()
if request.method=="POST":
seatfill=int(person)
money=int(X)
PERSON=person
BUSNAME=seat.busname
TYPE=seat.type
ID=seat.id
Bookingdate = request.form.get('bookingdate')
update5=Busesdata.query.filter_by(id=id).update({Busesdata.seats:x})
entry1=Bookingdata(seats=PERSON,id=ID,busname=BUSNAME,collection=X,bookingdate=Bookingdate)
db.session.add(entry1)
db.session.commit()
return render_template('mybooking.html',X=X,date=date,sucessfull=sucessfull)
return render_template('mybooking.html',X=X,date=date)
@app.route('/schedule1')
def viewbookingdata():
buses=Busesdata.query.all()
booking=Bookingdata.query.all()
#for buses in buses:
# n=(int(float(buses.totelseats)-int(float(buses.seats))))
#print(n)
return render_template('schedule1.html' ,buses=buses,booking=booking)
@app.route('/schedule2/<string:id>')
def viewbookingdata2(id):
buses=Busesdata.query.all()
b=Bookingdata.query.with_entities(func.sum(Bookingdata.seats)).filter_by(id = id).first()
for b in b:
print(b)
collection=Bookingdata.query.with_entities(func.sum(Bookingdata.collection)).filter_by(id = id).first()
for collection in collection:
print('a')
#number_trained = db.session.execute(text("select sum seats from Bookingdata where location=id").first())
#print(sum)
#booking=Bookingdata.query.all()
name1= Bookingdata.query.filter_by(id=id).first()
name=name1.busname
date=name1.bookingdate
#for buses in buses:
# n=(int(float(buses.totelseats)-int(float(buses.seats))))
#print(n)
return render_template('schedule2.html',date=date ,buses=buses,b=b,name=name,collection=collection)
@app.route('/admin' ,methods=['GET','POST'])
def login():
error = None
if request.method == 'POST':
if request.form['user'] != 'admin' or request.form['password'] != '<PASSWORD>':
error = 'Invalid Credentials. Please try again.'
else:
session['logged_in']=True
return render_template('welcomeadmin.html')
#app.secret_key=os.unrandom(12)
return render_template('admin.html', error=error)
@app.route('/dataediting')
def dataediting():
event = Busesdata.query.all()
return render_template('dataediting.html',event=event)
@app.route('/dataediting/<string:id>')
def dataediting1(id):
delete1=Busesdata.query.filter_by(id=id).delete()
db.session.commit()
print(delete1)
print(id)
print('golu')
return redirect (url_for('dataediting'))
@app.route('/busesdata1/<string:id>',methods=['POST','GET'])
def busesdata1(id):
added="BUS EDITED SUCESSFULLY"
select=Busesdata.query.filter_by(id=id).first()
global busname
global ticket_per_seat
global city
global day
global type
busname1=select.busname
ticket_per_seat1=select.ticket_per_seat
seat1=select.seats
city1=select.city
day1=select.day
type1=select.type
if request.method=="POST":
BUSNAME = request.form.get('busname')
CITY = request.form.get('city')
SEAT = request.form.get('seats')
TICKET_PER_SEAT = request.form.get('ticket_per_seat')
update=Busesdata.query.filter_by(id=id).update({Busesdata.busname:BUSNAME})#,{city:CITY},{seat:SEAT},{day:DAY},{ticket_per_seat:TICKER_PER_SEAT})
update1=Busesdata.query.filter_by(id=id).update({Busesdata.city:CITY})
update2=Busesdata.query.filter_by(id=id).update({Busesdata.seats:SEAT})
update3=Busesdata.query.filter_by(id=id).update({Busesdata.ticket_per_seat:TICKET_PER_SEAT})
db.session.commit()
return render_template('busesdata1.html',added=added)
return render_template('busesdata1.html', busname1=busname1, ticket_per_seat1=ticket_per_seat1, seat1=seat1, city1=city1,day1=day1, type1=type1)
@app.route('/busesdata1',methods=['POST','GET'])
def updated():
BUSNAME=request.form.get('busname')
CITY=request.form.get('city')
SEAT=request.form.get('seats')
DAY=request.form.get('day')
TICKER_PER_SEAT=request.form.get('ticket_per_seat')
@app.route('/download')
def download():
file_data=Posts.query.filter_by(ID=1).first()
return send_file(BytesIO(file_data.ima),attachment_filename='sak.PNG' ,as_attachment=True)
@app.route("/newlogin")
def loginsubmit():
return render_template('newlogin.html')
@app.route("/schedule")
def schedule():
event2=Busesdata.query.all()
for event in event2:
image = b64encode(event.ima)
image = base64.b64encode(event.ima).decode('ascii')
return render_template('schedule.html' ,rows=event2,data=list,image=image)
app.run(debug=True)
| 2.078125 | 2 |
stranding/stranding.py | 23andMe/stranding | 6 | 12795979 | <reponame>23andMe/stranding
import logging
import warnings
from Bio.pairwise2 import align, format_alignment
from Bio.Seq import Seq
from seqseek import Chromosome
from .exceptions import (MissingReferenceFlank,
InconsistentAlignment,
Unstrandable,
FlanksTooShort)
LOGGER = logging.getLogger(__name__)
DEFAULT_MIN_FLANK_LENGTH = 15
DEFAULT_WINDOW_EXTENSION = 0
FORWARD_STRAND = 1
REVERSE_STRAND = -1
# empirically derived default values from stranding hundreds of thousands of flanks
# from an Illumina beadchip. Two points are awarded for each matching base and one
# point is subtracted for each mismatch. Gaps are strongly discouraged with a 5 point
# penalty.
DEFAULT_MATCH_SCORE = 2
DEFAULT_MISMATCH_PENALTY = -1
DEFAULT_GAP_OPEN_PENALTY = -5
DEFAULT_TOLERANCE = 0.77
class GenomeStranding(object):
def __init__(self,
min_flank_length=DEFAULT_MIN_FLANK_LENGTH,
tolerance=DEFAULT_TOLERANCE,
match_score=DEFAULT_MATCH_SCORE,
mismatch_penalty=DEFAULT_MISMATCH_PENALTY,
gap_open_penalty=DEFAULT_GAP_OPEN_PENALTY):
self.min_flank_length = min_flank_length
self.tolerance = tolerance
self.match_score = match_score
self.mismatch_penalty = mismatch_penalty
self.gap_open_penalty = gap_open_penalty
if self.min_flank_length < DEFAULT_MIN_FLANK_LENGTH:
warnings.warn('Short flank lengths may lead to inaccurate alignments')
def is_high_scoring(self, score, query):
if len(query) < self.min_flank_length:
return False
return score > len(query) * self.match_score * self.tolerance
def is_perfect_score(self, score, query):
if len(query) < self.min_flank_length:
return False
return score == len(query) * self.match_score
def align(self, ref, query, score_only=True):
"""
Drops to biopython's pairwise2.align.localms to perform a local alignment
between the reference and query sequences using the specified (or default)
score and penalty values.
score_only=True instructs bioptyhon to only return the integer score.
This is claimed to be faster and less memory intensive.
Otherwise a tuple of (align1, align2, score, begin, end) is returned.
"""
alignment = align.localms(ref, query, self.match_score, self.mismatch_penalty,
self.gap_open_penalty, self.mismatch_penalty,
score_only=score_only)
if score_only and not alignment:
# when biopython doesn't find any alignments in score_only mode it returns
# an empty list which we treat as a score of 0
return 0
return alignment
def align_and_log(self, ref, query):
alignments = self.align(ref, query, False)
for alignment_tuple in alignments:
a1, a2, score, begin, end = alignment_tuple
if self.is_high_scoring(score, query):
LOGGER.error(format_alignment(*alignment_tuple))
def strand_flanks(self, _5p, _3p, build, chr_name, pos, window=DEFAULT_WINDOW_EXTENSION):
"""
This is a flank stranding algorithm for sequences mapped to a human genome
reference assembly. Mapping coordinates are required! This is not BLAT or BLAST.
Given one or both flanks and genome mapping coordinates it determines
if the flanking sequence(s) correspond to the forward or reverse strand of the
specified reference assembly.
It can optionally look beyond exact mapping coordinates to search nearby regions
(up to the `window` size specified) but takes longer as local alignments are
expensive on long sequences.
The `tolerance` setting defines the minimum alignment score relative to the
query sequence length. This is also impacted by changes to the alignment
scoring parameters.
When `tolerance` is 1.0 and `window` is 0.0 the algorithm will only check for
exact sequence matches at the specified coordinates. This is the most performant
use case as no alignments are performed.
Otherwise, the algorithm will load the reference sequences for the 5' and 3'
flanks at the specified coordinates extending in each direction extended by
`window`. These sequences and their reverse complements are aligned and
scored against the query flanks. Alignments scoring above
`len(query flank) * match_score * tolerance` are accepted.
(a perfect alignment has a score of `len(query flank) * match_score`)
A return value of 1 indicates that alignments were accepted against the forward
reference sequence and the flanks are on the forward strand of the specified
reference assembly.
A return value of -1 indicates that alignments were accepted against the
reverse complement of the forward reference sequence and the flanks correspond
to the "reverse" or "minus" strand of the specified reference assembly.
An InconsistentAlignment exception is raised if alignments are accepted on
both strands. An Unstrandable exception is raised if no alignments are accepted.
"""
# sanity checks
if pos == 0:
raise Unstrandable('Position 0 is unmapped')
elif chr_name in ('0', 0):
raise Unstrandable('Chromosome 0 is unmapped')
elif max(len(_5p), len(_3p)) < self.min_flank_length:
raise FlanksTooShort('At least one flank must be longer than the specified'
' minimum flank length of %d' % self.min_flank_length)
# chromosome-specific conventions
loop = chr_name == 'MT'
chr_name = chr_name if chr_name != 'XY' else 'X'
max_length = max(len(_5p), len(_3p))
# reference sequences
try:
chromosome = Chromosome(chr_name, build, loop=loop)
ref_5p = chromosome.sequence(pos - window - max_length, pos + window)
ref_3p = chromosome.sequence(pos + 1 - window, pos + max_length + window + 1)
except ValueError:
raise MissingReferenceFlank(
'Could not find flanks for %s %d %d' % (chr_name, pos, window))
# exact comparisons are cheap so try this first
if window == 0:
if _3p == ref_3p:
return FORWARD_STRAND
if _5p == ref_5p:
return FORWARD_STRAND
ref_5p_RC = str(Seq(ref_5p).reverse_complement())
ref_3p_RC = str(Seq(ref_3p).reverse_complement())
if window == 0:
if _3p == ref_5p_RC:
return REVERSE_STRAND
if _5p == ref_3p_RC:
return REVERSE_STRAND
if window == 0 and self.tolerance == 1.0:
raise Unstrandable('Strict stranding failed')
# alignments are expensive so try to do as few as possible
fwd_5p_score = self.align(ref_5p, _5p)
if self.is_perfect_score(fwd_5p_score, _5p):
return FORWARD_STRAND
fwd_3p_score = self.align(ref_3p, _3p)
if self.is_perfect_score(fwd_3p_score, _3p):
return FORWARD_STRAND
rev_5p_score = self.align(ref_5p_RC, _3p)
if self.is_perfect_score(rev_5p_score, _3p):
return REVERSE_STRAND
rev_3p_score = self.align(ref_3p_RC, _5p)
if self.is_perfect_score(rev_3p_score, _5p):
return REVERSE_STRAND
is_fwd = self.is_high_scoring(fwd_5p_score, _5p) or self.is_high_scoring(fwd_3p_score, _3p)
is_rev = self.is_high_scoring(rev_5p_score, _3p) or self.is_high_scoring(rev_3p_score, _5p)
if is_fwd and is_rev:
# Alignments were accepted on both strands (!)
# The flanks may be too short or the tolerance may be too loose.
LOGGER.error('Forward alignments')
self.align_and_log(ref_5p, _5p)
self.align_and_log(ref_3p, _3p)
LOGGER.error('Reverse alignments')
self.align_and_log(ref_5p_RC, _3p)
self.align_and_log(ref_3p_RC, _5p)
raise InconsistentAlignment('Inconsistent alignments')
elif is_fwd:
return FORWARD_STRAND
elif is_rev:
return REVERSE_STRAND
raise Unstrandable('No matching alignments')
| 2.375 | 2 |
source/utils.py | ogencoglu/Language-agnostic_BERT_COVID19_Twitter | 2 | 12795980 | '''
utility functions
'''
__author__ = '<NAME>'
import os
from os.path import join
from os.path import abspath
import json
import pandas as pd
import numpy as np
from configs import config as cf
def is_available(filename):
'''
[filename] : str
'''
return os.path.isfile(filename)
def chunks(lst, n):
'''
Yield successive n-sized chunks from list
[lst] : python list
[n] : int
'''
for i in range(0, len(lst), n):
yield lst[i:i + n]
def read_intent_dataset(verbose=True):
'''
Load 'Intent' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data = []
for lang in ['en', 'es', 'fr']:
for ds in ['train', 'test', 'eval']:
path = abspath(join(cf.INTENT_DIR, lang, '{}.tsv'.format(ds)))
df = pd.read_csv(path, header=None, sep='\t',
names=['text', 'class'])
data.append(df)
data = pd.concat(data)
# merge certain categories (see configs.py) and rename columns
data['class'] = data['class'].replace(cf.intent_label_map)
# remove trivial (too easy) categories
for cat in ['hi', 'okay_thanks']:
data = data[data['class'] != 'intent:{}'.format(cat)]
if verbose:
print('\t"Intent" data shape={}'.format(data.shape))
return data
def read_questions_dataset(verbose=True):
'''
Load 'Questions' dataset
[verbose] : bool, verbosity level
'''
# read as a pandas dataframe
data_path = abspath(join(cf.QUESTIONS_DIR, 'final_master_dataset.csv'))
data = pd.read_csv(data_path, delimiter=',',
usecols=['Question', 'Category'])
data.rename(columns={'Question': 'text', 'Category': 'class'},
inplace=True)
data = data[~data['class'].isna()] # remove unannotated rows
# split label into class and subclass, keep only class
data[['class', 'subclass']] = data['class'].str.split('-', 1, expand=True)
data['class'] = data['class'].str.strip()
data.drop(['subclass'], axis=1, inplace=True)
data = data[[i in cf.questions_relevant_categories for i in data['class']]]
if verbose:
print('\t"Questions" data shape={}'.format(data.shape))
return data
def merge_datasets(embeddings='labse', verbose=True):
'''
Merge 'Intent' and 'Questions' datasets
[embeddings] : str, type of embeddings to load ('bert' or 'labse')
[verbose] : bool, verbosity level
'''
# load datasets
intent = read_intent_dataset(verbose=False)
questions = read_questions_dataset(verbose=False)
merged = pd.concat([intent, questions])
# load corresponding embeddings
if embeddings == 'labse':
emb_to_load = (cf.intent_embeddings, cf.questions_embeddings)
elif embeddings == 'bert':
emb_to_load = (cf.intent_embeddings_bert, cf.questions_embeddings_bert)
else:
raise ValueError("embeddings argument can be 'bert' or 'labse'")
print(f'{embeddings} embeddings loaded.')
intent_embeddings = np.load(abspath(join(cf.INTENT_DIR, emb_to_load[0])))
questions_embeddings = np.load(abspath(join(cf.QUESTIONS_DIR,
emb_to_load[1])))
merged_embeddings = np.vstack([intent_embeddings, questions_embeddings])
assert merged.shape[0] == merged_embeddings.shape[0]
if verbose:
print('Full data shape={}'.format(merged.shape))
return merged, merged_embeddings
# _____________ Logging related functions _____________
def convert(o):
if isinstance(o, np.int64):
return int(o)
raise TypeError
def save_logs(logs_dict, dict_name):
'''
Save best hyperparameters dictionary to "logs" directory
[logs_dict] : dict
[dict_name] : str
'''
json.dump(logs_dict,
open('{}/{}.json'.format(cf.LOGS_DIR,
dict_name),
'w'), default=convert)
print('Best hyper-parameters saved...')
return None
def load_logs(dict_name):
'''
Load best hyperparameters dictionary from "logs" directory
[dict_name] : str
'''
log_path = '{}/{}.json'.format(cf.LOGS_DIR, dict_name)
if not is_available(log_path):
raise ValueError('Hyperparameters are not available. '
'Please run train.py in "hyper_opt" mode before full '
'training.')
with open() as logs_json:
logs = json.load(logs_json)
print('Best hyperparameters loaded...')
return logs
| 2.46875 | 2 |
Mundo2/URI_1042.py | NOBarbosa/Exercicios_Python | 0 | 12795981 | <reponame>NOBarbosa/Exercicios_Python
n = input().split()
lista = [int(i) for i in n]
lista.sort() # sort ordena a lista em ordem crescente
print(*lista, sep='\n') # * serve para imprimir toda a lista
print()
print(*n, sep='\n') | 3.671875 | 4 |
fraud_poc/live/predict.py | leosmerling-hopeit/fraud-poc | 2 | 12795982 | <gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: 08_predict.ipynb (unless otherwise specified).
__all__ = ['OrderInfo', '__steps__', '__api__', 'logger', 'model', 'db', 'features', '__init_event__',
'lookup_features', 'predict', '__postprocess__']
# Cell
from typing import Dict, Optional
from datetime import datetime, timezone, timedelta
import os
import json
import pickle
import aioredis
import asyncio
import pandas as pd
import numpy as np
import xgboost as xgb
from dataclasses import dataclass
from hopeit.dataobjects import dataobject
from hopeit.server.serialization import serialize, Serialization, deserialize
from hopeit.server.compression import Compression
from hopeit.app.context import EventContext, PostprocessHook
from hopeit.app.events import Spawn, SHUFFLE
from hopeit.app.api import event_api
from hopeit.app.logger import app_logger
# Cell
@dataobject
@dataclass
class OrderInfo:
order_id: str
customer_id: str
order_date: datetime
email: str
ip_addr: str
order_amount: float
location_lat: float
location_long: float
# Cell
__steps__ = ['lookup_features', 'predict']
__api__ = event_api(
summary="Live: Predict Fraud",
payload=(OrderInfo, "Order Information"),
responses={
200: (dict, "features used for prediction contatining `is_fraud` field as result of prediction"),
404: (str, "customer or email not found (this example only works for known customer_id and email)")
}
)
logger = app_logger()
model = None
db = None
features = ['order_amount',
'num_email_by_customer_id', 'same_email_by_customer_id', 'known_email_by_customer_id',
'num_ip_addr_by_customer_id', 'same_ip_addr_by_customer_id', 'known_ip_addr_by_customer_id',
'num_customer_id_by_email', 'same_customer_id_by_email', 'known_customer_id_by_email',
'order_amount_mean_by_customer_id',
'order_amount_std_by_customer_id', 'order_amount_min_by_customer_id', 'order_amount_max_by_customer_id',
'order_amount_sum_by_customer_id',
'order_amount_mean_by_email',
'order_amount_std_by_email', 'order_amount_min_by_email', 'order_amount_max_by_email',
'order_amount_sum_by_email']
# Cell
async def __init_event__(context: EventContext):
global model, db
if model is None:
file_name = os.path.join(context.env['model']['path'], context.env['model']['name'])
logger.info(context, f"Loading model for prediction from {file_name}...")
with open(file_name, 'rb') as fb:
model = pickle.load(fb)
if db is None:
address = context.env['db']['url']
logger.info(context, f"Connecting to database {address}...")
db = await aioredis.create_redis_pool(address)
# Cell
async def _lookup_db(key: str):
item = await db.get(key)
if item is None:
return None
return deserialize(item, Serialization.PICKLE4, Compression.LZ4, dict)
# Cell
async def lookup_features(order: OrderInfo, context: EventContext) -> Optional[dict]:
logger.info(context, "Looking up features in database...")
assert db, "Connection to database missing."
customer_id_features, email_features = await asyncio.gather(
_lookup_db(order.customer_id),
_lookup_db(order.email)
)
if customer_id_features is None or email_features is None:
return None
return {
**_update_features(order, email_features, 'email'),
**_update_features(order, customer_id_features, 'customer_id'),
**order.to_dict()
}
def _append(data: dict, k: str, new_item: str):
x = data.get(k)
if isinstance(x, str):
x = json.loads(x)
x.append(new_item)
data[k] = list(set(x[-10:]))
def _update_features(order: OrderInfo, data: dict, by: str):
_append(data, f'order_amount_by_{by}', order.order_amount)
_append(data, f'ip_addr_by_{by}', order.ip_addr)
_append(data, f'email_by_{by}', order.email)
_append(data, f'customer_id_by_{by}', order.customer_id)
_calc_counts(data, 'customer_id')
_calc_counts(data, 'email')
_calc_amount_stats(data, 'customer_id')
_calc_amount_stats(data, 'email')
return data
def _calc_counts(data: dict, by: str):
for col in ['ip_addr', 'customer_id', 'email']:
x = data.get(f'{col}_by_{by}')
if x is not None:
data[f'num_{col}_by_{by}'] = len(x)
def _calc_amount_stats(data: dict, by: str):
col = 'order_amount'
x = data.get(f'{col}_by_{by}')
if x is not None:
x = np.array(x)
data[f'{col}_max_by_{by}'] = np.max(x)
data[f'{col}_min_by_{by}'] = np.min(x)
data[f'{col}_mean_by_{by}'] = np.mean(x)
data[f'{col}_std_by_{by}'] = np.std(x)
data[f'{col}_sum_by_{by}'] = np.sum(x)
# Cell
async def predict(data: dict, context: EventContext) -> dict:
df = pd.DataFrame([data], columns=features)
x = xgb.DMatrix(df)
y = model.predict(x)
data['is_fraud'] = y[0].item()
return data
# Cell
async def __postprocess__(payload: Optional[dict], context: EventContext, response: PostprocessHook) -> dict:
if payload is None:
response.status = 404
return "customer or email not found (this example only works for known customer_id and email)"
return payload
| 2.015625 | 2 |
src/ODM2Sensor/settings/production.py | UCHIC/ODM2Sensor | 7 | 12795983 | # TODO: write configuration for production | 1.070313 | 1 |
order/admin.py | Habeebhassan/zarawa_express | 0 | 12795984 | from django.contrib import admin
from .models import Order
# Register your models here.
class OrderAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone_number', 'submitted')
list_filter = ('name', 'submitted')
readonly_fields = ('submitted',)
fieldsets = (
(None, {'fields': ('name', 'phone_number', 'order_details')
}),
('Pick-Up Details', {
'classes': ('collapse',),
'fields': ('pickup_name', 'pickup_address', 'pickup_phone')
}),
('Recipient Details', {
'classes': ('collapse',),
'fields': ('recipient_name', 'dropoff_address', 'recipient_phone', 'submitted')
}),
('Order Admin', {
'classes': ('collapse',),
'fields': ('username',)
})
)
admin.site.register(Order, OrderAdmin)
| 1.820313 | 2 |
rapt/cmds/add/buildpack.py | yougov/rapt | 1 | 12795985 | import click
from rapt.connection import get_vr
from rapt.models import query, models
from rapt.util import edit_yaml, dump_yaml
from pprint import pformat
@click.command()
def buildpack():
tmpl = {
'repo_url': '',
'repo_type': ['git', 'hg'],
'description': '',
'order': 0,
}
vr = get_vr()
info = {
'available buildpacks': [
bp.repo_url for bp in query('buildpack', vr)
]
}
config = edit_yaml(dump_yaml(tmpl),
dump_yaml(info))
click.echo('Creating buildpack with following config:\n')
click.echo(pformat(config))
click.echo()
if click.confirm('Create buildpack?'):
bp = models.Buildpack(vr, config)
bp.create()
click.echo('Create %s %s!' % (bp.repo_url, bp.resource_uri))
| 2.203125 | 2 |
dcclient/rpc.py | asgard-lab/driver | 0 | 12795986 | # Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" RPC class used to communicate with the hardware
"""
import gzip
from StringIO import StringIO as sio
import os
from requests_toolbelt import MultipartEncoder
import requests
#import pdb
class RPC:
""" RPC class. Used to connect to the client and pass the XML files.
"""
def __init__(self, username, password, host, method):
self.username = username
self.password = password
self.host = host
self.method = method
def _create_url(self):
""" Internal method that returns the switches' URLs given the cfg
attributes.
"""
return self.method + '://' + self.host + '/System/File/file_config.html'
def send_xml(self, xml_content):
""" Method used to send a given xml file to the switches
"""
# set url being used
url = self._create_url()
ziped = sio()
with gzip.GzipFile(fileobj=ziped, mode='w') as gzip_file:
gzip_file.write(xml_content)
run_data = ziped.getvalue()
ziped.close()
# pdb.set_trace()
fields = (('page', 'file_upload'),
('running_part', '1'),
('file_to_upload', ('file_to_upload',
run_data,
'application/octet-stream')))
m = MultipartEncoder(fields=fields, boundary='-----boundary-----')
r = requests.post(url=url,
data=m,
auth=(self.username, self.password),
headers={'Content-type': m.content_type},
verify=False)
print r.text
| 2.0625 | 2 |
python/decorator/vanishing_ret_fixed.py | zeroam/TIL | 0 | 12795987 | <reponame>zeroam/TIL
def debug_transformer(func):
def wrapper():
print(f'Function `{func.__name__}` called')
ret = func()
print(f'Function `{func.__name__}` finished')
return ret
return wrapper
@debug_transformer
def walkout():
print('Bye Felical')
@debug_transformer
def get_bob():
return 'Bob'
bob = get_bob()
print(bob) | 2.34375 | 2 |
example_project/example_project/views.py | groupe-conseil-nutshimit-nippour/django-geoprisma | 0 | 12795988 | # -*- coding: utf-8 -*-
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from geoprisma import views as geoprisma_views
@login_required
def maprender(request, *args, **kwargs):
wsName = kwargs.get('wsName')
viewId = kwargs.get('viewId')
if not viewId:
viewId = ""
renderContext = geoprisma_views.maprender(request, wsName, viewId)
if isinstance(renderContext, dict):
templateName = renderContext.get("templateName")
return render(request, "example_project/" + templateName , renderContext)
else:
return renderContext
| 2.015625 | 2 |
model.py | marcelkunze/trackml | 3 | 12795989 | <filename>model.py
########################################################################
# ====================== TrackML CHALLENGE MODEL =====================
########################################################################
# Author: <NAME>
# Date: Dec. 2018
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from __future__ import unicode_literals
import numpy as np
import pandas as pd
import os
import ctypes
_dllPath = os.path.dirname(__file__)
libPath = os.path.join(_dllPath, 'libmodel.so')
_model = ctypes.cdll.LoadLibrary(libPath)
#_model = ctypes.CDLL(libPath)
_workPath = os.path.dirname(os.path.realpath(__file__))
print('model.py:' + _workPath)
_model.processInitHits.argtypes = (ctypes.c_int, ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_double),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int))
_model.processInitCells.argtypes = (ctypes.c_int,ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_int),ctypes.POINTER(ctypes.c_double))
_model.processSetup.argtypes = (ctypes.c_int,ctypes.c_char_p,ctypes.c_char_p)
def setup(number,datapath):
global _model
global _workPath
c_string = _workPath.encode('utf-8')
d_string = datapath.encode('utf-8')
_model.processSetup(ctypes.c_int(number),d_string,c_string)
def initHits(nhits,x,y,z,v,l,m):
global _model
array_type1 = ctypes.c_double * nhits
array_type2 = ctypes.c_double * nhits
array_type3 = ctypes.c_double * nhits
array_type4 = ctypes.c_int * nhits
array_type5 = ctypes.c_int * nhits
array_type6 = ctypes.c_int * nhits
result = _model.processInitHits(ctypes.c_int(nhits),array_type1(*x),array_type2(*y),array_type3(*z),array_type4(*v),array_type5(*l),array_type6(*m))
def initCells(ncells,hit_id,ch0,ch1,value):
global _model
array_type1 = ctypes.c_int * ncells
array_type2 = ctypes.c_int * ncells
array_type3 = ctypes.c_int * ncells
array_type4 = ctypes.c_double * ncells
result = _model.processInitCells(ctypes.c_int(ncells),array_type1(*hit_id),array_type2(*ch0),array_type3(*ch1),array_type4(*value))
def findTracks():
global _model
global _workPath
step = 1
c_string = _workPath.encode('utf-8')
result = _model.processFindTracks(ctypes.c_int(step),c_string)
def readTruth():
global _model
_model.processReadTruth()
def readStarts():
global _model
_model.processReadStarts()
def readHits():
global _model
_model.processReadHits()
def readCells():
global _model
_model.processReadCells()
def readBlacklist():
global _model
_model.processReadBlacklist()
def readWhitelist():
global _model
_model.processReadWhitelist()
def sortTracks():
global _model
_model.processSortTracks()
def finish():
global _model
_model.processFinish()
class Model:
def __init__(self):
self.workpath = os.path.dirname(os.path.realpath(__file__))
self.datapath = os.path.dirname(os.path.realpath(__file__))
def predict_one_event(self, event_id, event, cells):
# Instantiate the tracker
setup(event_id,self.datapath)
# Read hits data
x = event.x.values
y = event.y.values
z = event.z.values
v = event.volume_id.values
l = event.layer_id.values
m = event.module_id.values
nhits = event.shape[0]
initHits(nhits,x, y, z, v, l, m)
# Read cells data
hit_id = cells.hit_id.values
ch0 = cells.ch0.values
ch1 = cells.ch1.values
value = cells.value.values
ncells = cells.shape[0]
initCells(ncells,hit_id,ch0,ch1,value)
# Run the traking code
findTracks()
# Delete the tracker
finish()
# Read the submission file
filename = self.workpath+'/submission'+str(event_id)+'.csv'
sub = pd.read_csv(filename);
return sub
| 2.25 | 2 |
src/morphforgecontrib/indev/meshtools/core.py | mikehulluk/morphforge | 1 | 12795990 | <filename>src/morphforgecontrib/indev/meshtools/core.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
import os
from morphforge.morphology.importer.import_array_swc import NewSWCLoader
from morphforgecontrib.indev.highqualitymesh.create_mesh import MeshFromGTS
from morphforge.morphology.mesh.writer_ply import MeshWriterPLY
from morphforge.morphology.mesh.mesh import TriangleMesh
from morphforge.core.mgrs.locmgr import LocMgr
from morphforgecontrib.morphology.util.axontrimmer import AxonTrimmer
from morphforgecontrib.morphology.util.morphologytranslator import MorphologyTranslator
from morphforgecontrib.morphology.util.minimum_diameter import MorphologyMinimumDiameter
class MeshGenerationOptions:
minimum_diameter = 'MinimumDiameter'
class Context(object):
def __init__(self, src_zip_file, dst_zip_file):
self.color_aliases = {}
self.region_color_defaults = {}
self.currentplyscope = None
self.global_options = {
#MeshGenerationOptions.minimum_diameter: 1.0
}
self.src_zip_file = src_zip_file
self.dst_zip_file = dst_zip_file
self.op_files = []
self.op_dir = '/tmp/mf/meshbuilder/'
LocMgr.ensure_dir_exists(self.op_dir)
def has_option_set(self, key):
return key in self.global_options
def get_option(self, key):
return self.global_options[key]
def get_color(self, alias):
return self.color_aliases[alias]
def add_alias(self, id, color):
assert not id in self.color_aliases
self.color_aliases[id] = color
def set_default_region_color(self, rgn_id, color):
self.region_color_defaults[rgn_id] = color
def new_ply_block(self):
assert self.currentplyscope is None
self.currentplyscope = PlyScope(global_scope=self)
def close_ply_block(self, plyfilename):
self.currentplyscope.finalise(plyfilename=plyfilename)
self.currentplyscope = None
for f in self.op_files:
self.dst_zip_file.write(f)
self.op_files = []
def getFileObjRead(self, filename):
possible_filenames = [filename, 'src/' + filename]
for pf in possible_filenames:
try:
return self.src_zip_file.open(pf, 'r')
except KeyError:
pass
raise ValueError("Can't find file: %s" % filename)
def getFileObjWrite(self, filename):
filename = os.path.join(self.op_dir, filename)
d = os.path.dirname(filename)
if not os.path.exists(d):
os.makedirs(d)
self.op_files.append(filename)
return open(filename, 'w')
class PlyScope(object):
def __init__(self, global_scope):
self.global_scope = global_scope
self.region_colors = {}
self.meshes = []
def get_region_color(self, rgn):
assert isinstance(rgn, int)
# Local colors?
if rgn in self.region_colors:
return self.region_colors[rgn]
if None in self.region_colors:
return self.region_colors[None]
# Global colors?:
if rgn in self.global_scope.region_color_defaults:
return self.global_scope.region_color_defaults[rgn]
if None in self.global_scope.region_color_defaults:
return self.global_scope.region_color_defaults[None]
assert False, 'What do I do with region: %d ' % rgn
# return ColorDef(200,50, np.min((rgn*20,255)))
def include_file(self, filename, options):
src_obj = self.global_scope.getFileObjRead(filename)
morphs = NewSWCLoader.load_swc_set(src=src_obj)
# Hack: only first:
# morphs = [morphs[0]]
for m in morphs:
m = m.to_tree()
# Create the color LUT:
bi_dict = m.region_number_to_name_bidict
rgn_colors = {}
for rgn in m.get_regions():
rgn_name = rgn.name
rgn_int = bi_dict.region_name_to_int(rgn_name)
rgn_color = self.get_region_color(rgn_int)
print '%s -> %s' % (rgn_name, rgn_int), rgn_color
rgn_colors[rgn_name] = rgn_color
# Check for ignored Region:
if None in rgn_colors.values():
for v in rgn_colors.values():
if v is not None:
print 'Partly ignoring Structure:',
for (k, v) in rgn_colors.iteritems():
print k, v
assert False, 'Partly ignored structure!'
continue
# Apply the options:
if 'trim' in options:
m = AxonTrimmer.trim_axon_from_morphology(m,
max_dist_to_parent=options['trim'])
if 'offset' in options:
m = MorphologyTranslator.translate(morphology=m,
offset=options['offset'])
if self.global_scope.has_option_set(MeshGenerationOptions.minimum_diameter):
m = MorphologyMinimumDiameter.ensure(m,
min_diameter=self.global_scope.get_option(MeshGenerationOptions.minimum_diameter))
mesh = MeshFromGTS.build(m, plot=False,
region_color_map=rgn_colors)
self.meshes.append(mesh)
def set_region_color(self, region, color):
self.region_colors[region] = color
def finalise(self, plyfilename):
m = TriangleMesh.merge(meshes=self.meshes)
ply = MeshWriterPLY.build_string(m)
with self.global_scope.getFileObjWrite(plyfilename) as f:
f.write(ply)
class ColorDef(object):
def __init__(self, r, g, b):
self.r = r
self.g = g
self.b = b
def __str__(self):
return '<ColorDef: (%d,%d,%d)>' % (self.r, self.g, self.b)
class RegionColorDef(object):
def __init__(self, rgn, color_def):
assert isinstance(rgn, int)
assert isinstance(color_def, ColorDef)
self.rgn = rgn
self.color_def = color_def
| 1.054688 | 1 |
predict.py | event-driven-robotics/movenet.pytorch | 0 | 12795991 | """
@Fire
https://github.com/fire717
"""
from lib import init, Data, MoveNet, Task
from config import cfg
from lib.utils.utils import arg_parser
# Script to create and save as images all the various outputs of the model
def main(cfg):
init(cfg)
model = MoveNet(num_classes=cfg["num_classes"],
width_mult=cfg["width_mult"],
mode='train')
data = Data(cfg)
test_loader = data.getTestDataloader()
# _,test_loader = data.getTrainValDataloader()
run_task = Task(cfg, model)
run_task.modelLoad("/home/ggoyal/data/mpii/output/e300_valacc0.86824.pth")
# run_task.modelLoad("/home/ggoyal/data/mpii/output/e1000_valacc0.66665.pth")
# run_task.modelLoad("output/mbv2_e105_valacc0.80255.pth") # for coco
# run_task.modelLoad(cfg["newest_ckpt"])
run_task.predict(test_loader, cfg["predict_output_path"])
# run_task.predict(test_loader, "output/predict")
if __name__ == '__main__':
cfg = arg_parser(cfg)
main(cfg) | 2.4375 | 2 |
python/elec-p2sh-hodl.py | brianddk/redd | 7 | 12795992 | #!/usr/bin/env python3
# [rights] Copyright 2020 brianddk at github https://github.com/brianddk
# [license] Apache 2.0 License https://www.apache.org/licenses/LICENSE-2.0
# [repo] github.com/brianddk/reddit/blob/master/python/elec-p2sh-hodl.py
# [btc] BTC-b32: bc1qwc2203uym96u0nmq04pcgqfs9ldqz9l3mz8fpj
# [tipjar] github.com/brianddk/reddit/blob/master/tipjar/tipjar.txt
# [txid] a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93
# [ref] https://live.blockcypher.com/btc-testnet/tx/{txid}/
# [req] python -m pip install electrum
# [note] with open(r"..\reddit\python\hodl.py", 'r') as s: exec(s.read())
from electrum.transaction import TxOutpoint, PartialTxInput, PartialTxOutput, PartialTransaction
from electrum.bitcoin import deserialize_privkey, opcodes, push_script
from electrum.crypto import hash_160, sha256d
from electrum.constants import set_testnet
from electrum.ecc import ECPrivkey
# The basic bitcoinlib utility scripts
x = lambda h: bytes.fromhex(h)
lx = lambda h: bytes.fromhex(h)[::-1]
b2x = lambda b: (b.hex() if 'hex' in dir(b) else hex(b)).replace('0x','')
b2lx = lambda b: b[::-1].hex().replace('0x','')
# Very simple bitcoin script comiler
compile = lambda s: "".join([
opcodes[i].hex() if i in dir(opcodes) else push_script(i) for i in s])
# Electrum assumes P2SH is multisig, this subclass corrects that
class P2SHPartialTransaction(PartialTransaction):
def __init__(self):
PartialTransaction.__init__(self)
@classmethod
def get_preimage_script(cls, txin: 'PartialTxInput') -> str:
return b2x(txin.redeem_script)
# Set testnet
set_testnet()
# I removed the R-value grinding to use "legacy" sig processing
# This is the original TXID we are trying to hit
otxid = 'a8110bbdd40d65351f615897d98c33cbe33e4ebedb4ba2fc9e8c644423dadc93'
# Basic constants to build the TXNIN
wif = 'cQNjiPwYKMBr2oB3bWzf3rgBsu198xb8Nxxe51k6D3zVTA98L25N'
txid = x('6d500966f9e494b38a04545f0cea35fc7b3944e341a64b804fed71cdee11d434')
vout = 1
sats = 9999
script_type = 'p2sh'
binzero = 2**32
sequence = binzero - 3
address = 'tb1qv9hg20f0g08d460l67ph6p4ukwt7m0ttqzj7mk'
sats_less_fees = sats - 200
locktime = 1602565200
# Build the Transaction Input
_, privkey, compressed = deserialize_privkey(wif)
pubkey = ECPrivkey(privkey).get_public_key_hex(compressed=compressed)
prevout = TxOutpoint(txid=txid, out_idx=vout)
txin = PartialTxInput(prevout=prevout)
txin.nsequence = sequence
txin.script_type = script_type
expiry = b2x(lx(b2x(locktime)))
redeem_script = compile([
expiry, 'OP_CHECKLOCKTIMEVERIFY', 'OP_DROP', pubkey, 'OP_CHECKSIG'])
txin.redeem_script = x(redeem_script)
# Build the Transaction Output
txout = PartialTxOutput.from_address_and_value(address, sats_less_fees)
# Build and sign the transaction
tx = P2SHPartialTransaction.from_io([txin], [txout], locktime=locktime)
tx.version = 1
sig = tx.sign_txin(0, privkey)
txin.script_sig = x(compile([sig , redeem_script]))
# Get the serialized txn and compute txid
txn = tx.serialize()
txid = b2lx(sha256d(x(txn)))
# Ensure we arrived at where we intended
if txid != otxid:
print("Did not achive target TXID hash")
print("Perhaps R-value hashing needs to be reverted")
Print("See: https://redd.it/jf97pc")
# Display results
print("pubk:", pubkey)
print("priv:", b2x(privkey))
print("txid:", txid)
print("txn:", txn)
| 2.046875 | 2 |
image_rw_matplot.py | lotlucky/opencv | 0 | 12795993 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : image_rw_matplot.py
# @Date : 2019-03-14
# @Author : wudan
import cv2
import numpy as pd
from matplotlib import pyplot as plt
"""
CV2处理图像使用的是BGR模式,而matplotlib使用的是RGB模式
"""
img = cv2.imread('zhuzhu_1.jpeg',1)
b,g,r = cv2.split(img)
img2 = cv2.merge([r,g,b])
plt.subplot(121),plt.imshow(img) #颜色有失真
plt.subplot(122),plt.imshow(img2) #颜色正常
plt.show()
| 2.671875 | 3 |
network-delay-time/network-delay-time.py | rams1996/Graphs | 0 | 12795994 | class Solution:
def networkDelayTime(self, times: List[List[int]], N: int, K: int) -> int:
import heapq
from collections import defaultdict
createGraph=defaultdict(list)
for i in times:
createGraph[i[0]].append((i[2],i[1]))
heap=[]
heapq.heapify(heap)
time=float('-inf')
visited=set()
visited.add(K)
for edge in createGraph[K]:
heapq.heappush(heap,edge)
while heap:
currEdge=heapq.heappop(heap)
# print(currEdge)
if currEdge[1] not in visited:
for edge in createGraph[currEdge[1]]:
heapq.heappush(heap,(edge[0]+currEdge[0],edge[1]))
visited.add(currEdge[1])
time=max(time,currEdge[0])
if len(visited)!=N:
return -1
return time
| 3.0625 | 3 |
deal/linter/_extractors/__init__.py | m4ta1l/deal | 1 | 12795995 | <reponame>m4ta1l/deal
# app
from .asserts import get_asserts
from .common import get_name
from .contracts import get_contracts
from .exceptions import get_exceptions
from .exceptions_stubs import get_exceptions_stubs
from .globals import get_globals
from .imports import get_imports
from .pre import get_pre
from .prints import get_prints
from .returns import get_returns, has_returns
from .value import get_value
__all__ = [
'get_asserts',
'get_contracts',
'get_exceptions_stubs',
'get_exceptions',
'get_globals',
'get_imports',
'get_name',
'get_pre',
'get_prints',
'get_returns',
'get_value',
'has_returns',
]
| 1.289063 | 1 |
vmtkScripts/vmtksurfaceregiondrawing.py | michelebucelli/vmtk | 1 | 12795996 | <reponame>michelebucelli/vmtk<filename>vmtkScripts/vmtksurfaceregiondrawing.py
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtksurfaceregiondrawing.py,v $
## Language: Python
## Date: $Date: 2006/05/26 12:35:13 $
## Version: $Revision: 1.9 $
## Copyright (c) <NAME>, <NAME>. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import vmtkrenderer
from vmtk import pypes
class vmtkSurfaceRegionDrawing(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Surface = None
self.vmtkRenderer = None
self.OwnRenderer = 0
self.Representation = 'edges'
self.Actor = None
self.ContourWidget = None
self.Interpolator = None
self.OutsideValue = 0.0
self.InsideValue = 1.0
self.OverwriteOutsideValue = 0
self.ComputeDistance = 0
self.TagSmallestRegion = 1
self.CellData = 1
self.ArrayName = 'CellEntityIds'
self.Array = None
self.SetScriptName('vmtksurfaceregiondrawing')
self.SetScriptDoc('draw a closed contour on a surface and generate a new tag inside it')
self.SetInputMembers([
['Surface','i','vtkPolyData',1,'','the input surface','vmtksurfacereader'],
['CellData','celldata','bool',1,'','toggle writing point or cell data array'],
['InsideValue','inside','float',1,'','value with which the surface within the contour is filled'],
['OutsideValue','outside','float',1,'','value with which the surface outside the contour is filled'],
['OverwriteOutsideValue','overwriteoutside','bool',1,'','overwrite outside value also when a tag array already exists in the input surface'],
['ArrayName','array','str',1,'','the name of the self.Array where the generated scalars are stored'],
['TagSmallestRegion','tagsmallestregion','bool',1,'','toggle tagging the smallest or largest region'],
['ComputeDistance','computedistance','bool',1,'','fill the array with the distance to the contour'],
['vmtkRenderer','renderer','vmtkRenderer',1,'','external renderer']
])
self.SetOutputMembers([
['Surface','o','vtkPolyData',1,'','the output surface','vmtksurfacewriter']
])
def SetSurfaceRepresentation(self, representation):
if representation == 'surface':
self.Actor.GetProperty().SetRepresentationToSurface()
self.Actor.GetProperty().EdgeVisibilityOff()
elif representation == 'edges':
self.Actor.GetProperty().SetRepresentationToSurface()
self.Actor.GetProperty().EdgeVisibilityOn()
elif representation == 'wireframe':
self.Actor.GetProperty().SetRepresentationToWireframe()
self.Actor.GetProperty().EdgeVisibilityOff()
self.Representation = representation
def RepresentationCallback(self, obj):
if not self.Actor:
return
if self.Representation == 'surface':
representation = 'edges'
elif self.Representation == 'edges':
representation = 'wireframe'
elif self.Representation == 'wireframe':
representation = 'surface'
self.SetSurfaceRepresentation(representation)
self.vmtkRenderer.RenderWindow.Render()
def ScalarsCallback(self, obj):
rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation())
pointIds = vtk.vtkIdList()
self.Interpolator.GetContourPointIds(rep,pointIds)
points = vtk.vtkPoints()
points.SetNumberOfPoints(pointIds.GetNumberOfIds())
for i in range(pointIds.GetNumberOfIds()):
pointId = pointIds.GetId(i)
point = self.Surface.GetPoint(pointId)
points.SetPoint(i,point)
selectionFilter = vtk.vtkSelectPolyData()
selectionFilter.SetInputData(self.Surface)
selectionFilter.SetLoop(points)
selectionFilter.GenerateSelectionScalarsOn()
if self.TagSmallestRegion:
selectionFilter.SetSelectionModeToSmallestRegion()
else:
selectionFilter.SetSelectionModeToLargestRegion()
selectionFilter.Update()
selectionScalars = selectionFilter.GetOutput().GetPointData().GetScalars()
selectionScalars.SetName('SelectionFilter')
if self.CellData:
self.Surface.GetPointData().AddArray(selectionScalars)
pointDataToCellDataFilter = vtk.vtkPointDataToCellData()
pointDataToCellDataFilter.SetInputData(self.Surface)
pointDataToCellDataFilter.PassPointDataOn()
pointDataToCellDataFilter.Update()
self.Surface = pointDataToCellDataFilter.GetPolyDataOutput()
selectionScalars = self.Surface.GetCellData().GetArray('SelectionFilter')
for i in range(self.Array.GetNumberOfTuples()):
selectionValue = selectionScalars.GetTuple1(i)
if self.ComputeDistance:
contourValue = self.Array.GetTuple1(i)
if (not contourValue < 0.0 and selectionValue < 0.0) or (contourValue < 0.0 and selectionValue < contourValue):
self.Array.SetTuple1(i,selectionValue)
else:
if selectionValue <= 0.0:
self.Array.SetTuple1(i,self.InsideValue)
if self.CellData:
self.Surface.GetPointData().RemoveArray('SelectionFilter')
self.Surface.GetCellData().RemoveArray('SelectionFilter')
self.Surface.Modified()
self.ContourWidget.Initialize()
def DeleteContourCallback(self, obj):
self.ContourWidget.Initialize()
def InteractCallback(self, obj):
# BUG: enable the widget, but immediately after it is disabled again
if self.ContourWidget.GetEnabled() == 1:
self.ContourWidget.SetEnabled(0)
else:
self.ContourWidget.SetEnabled(1)
def Execute(self):
if self.Surface == None:
self.PrintError('Error: no Surface.')
if not self.vmtkRenderer:
self.vmtkRenderer = vmtkrenderer.vmtkRenderer()
self.vmtkRenderer.Initialize()
self.OwnRenderer = 1
self.vmtkRenderer.RegisterScript(self)
triangleFilter = vtk.vtkTriangleFilter()
triangleFilter.SetInputData(self.Surface)
triangleFilter.Update()
self.Surface = triangleFilter.GetOutput()
if self.CellData:
self.Array = self.Surface.GetCellData().GetArray(self.ArrayName)
else:
self.Array = self.Surface.GetPointData().GetArray(self.ArrayName)
if self.Array == None or self.OverwriteOutsideValue:
self.Array = vtk.vtkDoubleArray()
self.Array.SetNumberOfComponents(1)
if self.CellData:
self.Array.SetNumberOfTuples(self.Surface.GetNumberOfCells())
else:
self.Array.SetNumberOfTuples(self.Surface.GetNumberOfPoints())
self.Array.SetName(self.ArrayName)
self.Array.FillComponent(0,self.OutsideValue)
if self.CellData:
self.Surface.GetCellData().AddArray(self.Array)
else:
self.Surface.GetPointData().AddArray(self.Array)
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputData(self.Surface)
mapper.ScalarVisibilityOn()
if self.CellData:
self.Surface.GetCellData().SetActiveScalars(self.ArrayName)
mapper.SetScalarModeToUseCellData()
else:
self.Surface.GetPointData().SetActiveScalars(self.ArrayName)
mapper.SetScalarModeToUsePointData()
arrayRange = [e for e in self.Array.GetValueRange(0)]
if self.InsideValue > arrayRange[1]:
arrayRange[1] = self.InsideValue
elif self.InsideValue < arrayRange[0]:
arrayRange[0] = self.InsideValue
self.Actor = vtk.vtkActor()
self.Actor.SetMapper(mapper)
self.Actor.GetMapper().SetScalarRange(arrayRange[0],arrayRange[1])
self.vmtkRenderer.Renderer.AddActor(self.Actor)
self.vmtkRenderer.Render()
self.ContourWidget = vtk.vtkContourWidget()
self.ContourWidget.SetInteractor(self.vmtkRenderer.RenderWindowInteractor)
rep = vtk.vtkOrientedGlyphContourRepresentation.SafeDownCast(self.ContourWidget.GetRepresentation())
rep.GetLinesProperty().SetColor(1, 0.2, 0)
rep.GetLinesProperty().SetLineWidth(3.0)
pointPlacer = vtk.vtkPolygonalSurfacePointPlacer()
pointPlacer.AddProp(self.Actor)
pointPlacer.GetPolys().AddItem(self.Surface)
pointPlacer.SnapToClosestPointOn()
rep.SetPointPlacer(pointPlacer)
self.Interpolator = vtk.vtkPolygonalSurfaceContourLineInterpolator()
self.Interpolator.GetPolys().AddItem(self.Surface)
rep.SetLineInterpolator(self.Interpolator)
self.ContourWidget.EnabledOn()
self.InputInfo("Drawing contour ...\n")
self.vmtkRenderer.AddKeyBinding('w','Change surface representation.',self.RepresentationCallback)
self.vmtkRenderer.AddKeyBinding('space','Generate scalars',self.ScalarsCallback)
self.vmtkRenderer.AddKeyBinding('d','Delete contour',self.DeleteContourCallback)
#self.vmtkRenderer.AddKeyBinding('i','Start interaction',self.InteractCallback)
self.vmtkRenderer.Render()
if self.OwnRenderer:
self.vmtkRenderer.Deallocate()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
| 2.078125 | 2 |
tests/exception_test.py | gglin001/poptorch | 128 | 12795997 | <gh_stars>100-1000
#!/usr/bin/env python3
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
import pytest
import torch
import poptorch
def harness(setting, Model, args):
opts = poptorch.Options()
if setting == "true":
opts.Precision.enableFloatingPointExceptions(True)
elif setting == "false":
opts.Precision.enableFloatingPointExceptions(False)
poptorch_model = poptorch.inferenceModel(Model(), opts)
if setting == "true":
with pytest.raises(poptorch.Error):
poptorch_model(*args)
else:
poptorch_model(*args)
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_div0(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x / y
x = torch.ones(10, 10)
y = torch.zeros(10, 10)
harness(setting, Model, [x, y])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_mul0inf(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x * y
x = torch.zeros(10, 10)
y = torch.div(torch.ones(10, 10), torch.zeros(10, 10))
harness(setting, Model, [x, y])
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
@pytest.mark.parametrize("setting", {"default", "true", "false"})
def test_nonreal(setting):
class Model(torch.nn.Module):
def forward(self, x):
return torch.sqrt(x)
x = torch.Tensor([-1, -2])
harness(setting, Model, [x])
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
def test_nan(setting):
class Model(torch.nn.Module):
def forward(self, x, y):
return x > y
x = torch.ones(10, 10)
y = torch.div(torch.zeros(10, 10), torch.zeros(10, 10))
harness(setting, Model, [x, y])
@pytest.mark.parametrize("setting", {"default", "true", "false"})
@pytest.mark.skipif(not poptorch.ipuHardwareIsAvailable(),
reason="Floating point exception not supported on model")
def test_ovf(setting):
class Model(torch.nn.Module):
def forward(self, x):
return torch.exp(x)
x = torch.Tensor([3800, 4203])
harness(setting, Model, [x])
| 2.15625 | 2 |
src/orbmatch_video.py | mtc-20/augmented-reality | 0 | 12795998 | <gh_stars>0
'''
Created on Saturday, 3rd October 2020 8:13::01 pm
@author: mtc-20
Coded on VS Code 2019
------
Overview:
------
Last Modified: Sat Oct 03 2020
'''
import cv2
import numpy as np
def display(frame):
cv2.imshow("check", frame)
cv2.waitKey(0)
if cv2.waitKey(1):
cv2.destroyAllWindows()
# Load ORB, BF and model objects
orb = cv2.ORB_create()
bf = cv2.BFMatcher(cv2.NORM_HAMMING2, crossCheck = True)
model = cv2.imread('./../index0_s.jpg')
# Detect and compute keypoints
kp_model, des_model = orb.detectAndCompute(model, None)
try:
# Initialize camera
cap = cv2.VideoCapture(0)
cv2.namedWindow('Result', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Result', 640,480)
while True:
ret, frame = cap.read()
# frame = cv2.flip(frame, 1)
kp_frame, des_frame = orb.detectAndCompute(frame, None)
matches = bf.match(des_model, des_frame)
good = []
for index, m in enumerate(matches):
if index < len(matches) - 1 and m.distance < 0.75 * matches[index+1].distance:
good.append(m)
if len(good) < 5:
# print(len(good))
cv2.imshow("Result", frame)
k = cv2.waitKey(1)
else:
out = cv2.drawMatches(model, kp_model, frame, kp_frame, good[:], 0, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imshow("Result", out)
k = cv2.waitKey(1)
if k%256 ==27:
print("[INFO] User induced exit...")
break
cap.release()
cv2.destroyAllWindows()
except Exception as e:
print("[ERR] ", e)
print("[INFO] Closing...")
cap.release()
cv2.destroyAllWindows() | 2.34375 | 2 |
architect/test_jit.py | MIT-REALM/architect | 2 | 12795999 | <filename>architect/test_jit.py<gh_stars>1-10
import time
import jax
import jax.numpy as jnp
def f(x):
return jnp.dot(x, x.T)
N_trials = 1
total_time = 0.0
x = jnp.ones((1000, 1))
f = jax.jit(f)
start = time.perf_counter()
f(x)
end = time.perf_counter()
print(f"jit_time: {end - start}")
g = jax.vmap(f)
y = jnp.ones((1000, 1000, 1))
start = time.perf_counter()
g(y)
end = time.perf_counter()
print(f"jit_time: {end - start}")
y = jnp.ones((1001, 1000, 1))
start = time.perf_counter()
g(y)
end = time.perf_counter()
print(f"jit_time: {end - start}")
for i in range(N_trials):
start = time.perf_counter()
g(y)
end = time.perf_counter()
total_time += end - start
print(f"total_time: {total_time}")
| 2.546875 | 3 |
stree/Splitter.py | Doctorado-ML/STree | 7 | 12796000 | <gh_stars>1-10
"""
Oblique decision tree classifier based on SVM nodes
Splitter class
"""
import os
import warnings
import random
from math import log, factorial
import numpy as np
from sklearn.feature_selection import SelectKBest, mutual_info_classif
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.exceptions import ConvergenceWarning
from mufs import MUFS
class Snode:
"""
Nodes of the tree that keeps the svm classifier and if testing the
dataset assigned to it
Parameters
----------
clf : SVC
Classifier used
X : np.ndarray
input dataset in train time (only in testing)
y : np.ndarray
input labes in train time
features : np.array
features used to compute hyperplane
impurity : float
impurity of the node
title : str
label describing the route to the node
weight : np.ndarray, optional
weights applied to input dataset in train time, by default None
scaler : StandardScaler, optional
scaler used if any, by default None
"""
def __init__(
self,
clf: SVC,
X: np.ndarray,
y: np.ndarray,
features: np.array,
impurity: float,
title: str,
weight: np.ndarray = None,
scaler: StandardScaler = None,
):
self._clf = clf
self._title = title
self._belief = 0.0
# Only store dataset in Testing
self._X = X if os.environ.get("TESTING", "NS") != "NS" else None
self._y = y
self._down = None
self._up = None
self._class = None
self._feature = None
self._sample_weight = (
weight if os.environ.get("TESTING", "NS") != "NS" else None
)
self._features = features
self._impurity = impurity
self._partition_column: int = -1
self._scaler = scaler
@classmethod
def copy(cls, node: "Snode") -> "Snode":
return cls(
node._clf,
node._X,
node._y,
node._features,
node._impurity,
node._title,
node._sample_weight,
node._scaler,
)
def set_partition_column(self, col: int):
self._partition_column = col
def get_partition_column(self) -> int:
return self._partition_column
def set_down(self, son):
self._down = son
def set_title(self, title):
self._title = title
def set_classifier(self, clf):
self._clf = clf
def set_features(self, features):
self._features = features
def set_impurity(self, impurity):
self._impurity = impurity
def get_title(self) -> str:
return self._title
def get_classifier(self) -> SVC:
return self._clf
def get_impurity(self) -> float:
return self._impurity
def get_features(self) -> np.array:
return self._features
def set_up(self, son):
self._up = son
def is_leaf(self) -> bool:
return self._up is None and self._down is None
def get_down(self) -> "Snode":
return self._down
def get_up(self) -> "Snode":
return self._up
def make_predictor(self):
"""Compute the class of the predictor and its belief based on the
subdataset of the node only if it is a leaf
"""
if not self.is_leaf():
return
classes, card = np.unique(self._y, return_counts=True)
if len(classes) > 1:
max_card = max(card)
self._class = classes[card == max_card][0]
self._belief = max_card / np.sum(card)
else:
self._belief = 1
try:
self._class = classes[0]
except IndexError:
self._class = None
def __str__(self) -> str:
count_values = np.unique(self._y, return_counts=True)
if self.is_leaf():
return (
f"{self._title} - Leaf class={self._class} belief="
f"{self._belief: .6f} impurity={self._impurity:.4f} "
f"counts={count_values}"
)
return (
f"{self._title} feaures={self._features} impurity="
f"{self._impurity:.4f} "
f"counts={count_values}"
)
class Siterator:
"""Stree preorder iterator"""
def __init__(self, tree: Snode):
self._stack = []
self._push(tree)
def __iter__(self):
# To complete the iterator interface
return self
def _push(self, node: Snode):
if node is not None:
self._stack.append(node)
def __next__(self) -> Snode:
if len(self._stack) == 0:
raise StopIteration()
node = self._stack.pop()
self._push(node.get_up())
self._push(node.get_down())
return node
class Splitter:
"""
Splits a dataset in two based on different criteria
Parameters
----------
clf : SVC, optional
classifier, by default None
criterion : str, optional
The function to measure the quality of a split (only used if
max_features != num_features). Supported criteria are “gini” for the
Gini impurity and “entropy” for the information gain., by default
"entropy", by default None
feature_select : str, optional
The strategy used to choose the feature set at each node (only used if
max_features < num_features). Supported strategies are: “best”: sklearn
SelectKBest algorithm is used in every node to choose the max_features
best features. “random”: The algorithm generates 5 candidates and
choose the best (max. info. gain) of them. "mutual": Chooses the best
features w.r.t. their mutual info with the label. "cfs": Apply
Correlation-based Feature Selection. "fcbf": Apply Fast Correlation-
Based, by default None
criteria : str, optional
ecides (just in case of a multi class classification) which column
(class) use to split the dataset in a node. max_samples is
incompatible with 'ovo' multiclass_strategy, by default None
min_samples_split : int, optional
The minimum number of samples required to split an internal node. 0
(default) for any, by default None
random_state : optional
Controls the pseudo random number generation for shuffling the data for
probability estimates. Ignored when probability is False.Pass an int
for reproducible output across multiple function calls, by
default None
normalize : bool, optional
If standardization of features should be applied on each node with the
samples that reach it , by default False
Raises
------
ValueError
clf has to be a sklearn estimator
ValueError
criterion must be gini or entropy
ValueError
criteria has to be max_samples or impurity
ValueError
splitter must be in {random, best, mutual, cfs, fcbf}
"""
def __init__(
self,
clf: SVC = None,
criterion: str = None,
feature_select: str = None,
criteria: str = None,
min_samples_split: int = None,
random_state=None,
normalize=False,
):
self._clf = clf
self._random_state = random_state
if random_state is not None:
random.seed(random_state)
self._criterion = criterion
self._min_samples_split = min_samples_split
self._criteria = criteria
self._feature_select = feature_select
self._normalize = normalize
if clf is None:
raise ValueError(f"clf has to be a sklearn estimator, got({clf})")
if criterion not in ["gini", "entropy"]:
raise ValueError(
f"criterion must be gini or entropy got({criterion})"
)
if criteria not in [
"max_samples",
"impurity",
]:
raise ValueError(
f"criteria has to be max_samples or impurity; got ({criteria})"
)
if feature_select not in ["random", "best", "mutual", "cfs", "fcbf"]:
raise ValueError(
"splitter must be in {random, best, mutual, cfs, fcbf} got "
f"({feature_select})"
)
self.criterion_function = getattr(self, f"_{self._criterion}")
self.decision_criteria = getattr(self, f"_{self._criteria}")
self.fs_function = getattr(self, f"_fs_{self._feature_select}")
def _fs_random(
self, dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Return the best of five random feature set combinations
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
# Random feature reduction
n_features = dataset.shape[1]
features_sets = self._generate_spaces(n_features, max_features)
return self._select_best_set(dataset, labels, features_sets)
@staticmethod
def _fs_best(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Return the variabes with higher f-score
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
return (
SelectKBest(k=max_features)
.fit(dataset, labels)
.get_support(indices=True)
)
@staticmethod
def _fs_mutual(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Return the best features with mutual information with labels
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
# return best features with mutual info with the label
feature_list = mutual_info_classif(dataset, labels)
return tuple(
sorted(
range(len(feature_list)), key=lambda sub: feature_list[sub]
)[-max_features:]
)
@staticmethod
def _fs_cfs(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Correlattion-based feature selection with max_features limit
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
mufs = MUFS(max_features=max_features, discrete=False)
return mufs.cfs(dataset, labels).get_results()
@staticmethod
def _fs_fcbf(
dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Fast Correlation-based Filter algorithm with max_features limit
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(< number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
mufs = MUFS(max_features=max_features, discrete=False)
return mufs.fcbf(dataset, labels, 5e-4).get_results()
def partition_impurity(self, y: np.array) -> np.array:
return self.criterion_function(y)
@staticmethod
def _gini(y: np.array) -> float:
_, count = np.unique(y, return_counts=True)
return 1 - np.sum(np.square(count / np.sum(count)))
@staticmethod
def _entropy(y: np.array) -> float:
"""Compute entropy of a labels set
Parameters
----------
y : np.array
set of labels
Returns
-------
float
entropy
"""
n_labels = len(y)
if n_labels <= 1:
return 0
counts = np.bincount(y)
proportions = counts / n_labels
n_classes = np.count_nonzero(proportions)
if n_classes <= 1:
return 0
entropy = 0.0
# Compute standard entropy.
for prop in proportions:
if prop != 0.0:
entropy -= prop * log(prop, n_classes)
return entropy
def information_gain(
self, labels: np.array, labels_up: np.array, labels_dn: np.array
) -> float:
"""Compute information gain of a split candidate
Parameters
----------
labels : np.array
labels of the dataset
labels_up : np.array
labels of one side
labels_dn : np.array
labels on the other side
Returns
-------
float
information gain
"""
imp_prev = self.criterion_function(labels)
card_up = card_dn = imp_up = imp_dn = 0
if labels_up is not None:
card_up = labels_up.shape[0]
imp_up = self.criterion_function(labels_up)
if labels_dn is not None:
card_dn = labels_dn.shape[0] if labels_dn is not None else 0
imp_dn = self.criterion_function(labels_dn)
samples = card_up + card_dn
if samples == 0:
return 0.0
else:
result = (
imp_prev
- (card_up / samples) * imp_up
- (card_dn / samples) * imp_dn
)
return result
def _select_best_set(
self, dataset: np.array, labels: np.array, features_sets: list
) -> list:
"""Return the best set of features among feature_sets, the criterion is
the information gain
Parameters
----------
dataset : np.array
array of samples (# samples, # features)
labels : np.array
array of labels
features_sets : list
list of features sets to check
Returns
-------
list
best feature set
"""
max_gain = 0
selected = None
warnings.filterwarnings("ignore", category=ConvergenceWarning)
for feature_set in features_sets:
self._clf.fit(dataset[:, feature_set], labels)
node = Snode(
self._clf, dataset, labels, feature_set, 0.0, "subset"
)
self.partition(dataset, node, train=True)
y1, y2 = self.part(labels)
gain = self.information_gain(labels, y1, y2)
if gain > max_gain:
max_gain = gain
selected = feature_set
return selected if selected is not None else feature_set
@staticmethod
def _generate_spaces(features: int, max_features: int) -> list:
"""Generate at most 5 feature random combinations
Parameters
----------
features : int
number of features in each combination
max_features : int
number of features in dataset
Returns
-------
list
list with up to 5 combination of features randomly selected
"""
comb = set()
# Generate at most 5 combinations
number = factorial(features) / (
factorial(max_features) * factorial(features - max_features)
)
set_length = min(5, number)
while len(comb) < set_length:
comb.add(
tuple(sorted(random.sample(range(features), max_features)))
)
return list(comb)
def _get_subspaces_set(
self, dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Compute the indices of the features selected by splitter depending
on the self._feature_select hyper parameter
Parameters
----------
dataset : np.array
array of samples
labels : np.array
labels of the dataset
max_features : int
number of features of the subspace
(<= number of features in dataset)
Returns
-------
tuple
indices of the features selected
"""
# No feature reduction
n_features = dataset.shape[1]
if n_features == max_features:
return tuple(range(n_features))
# select features as selected in constructor
return self.fs_function(dataset, labels, max_features)
def get_subspace(
self, dataset: np.array, labels: np.array, max_features: int
) -> tuple:
"""Re3turn a subspace of the selected dataset of max_features length.
Depending on hyperparameter
Parameters
----------
dataset : np.array
array of samples (# samples, # features)
labels : np.array
labels of the dataset
max_features : int
number of features to form the subspace
Returns
-------
tuple
tuple with the dataset with only the features selected and the
indices of the features selected
"""
indices = self._get_subspaces_set(dataset, labels, max_features)
return dataset[:, indices], indices
def _impurity(self, data: np.array, y: np.array) -> np.array:
"""return column of dataset to be taken into account to split dataset
Parameters
----------
data : np.array
distances to hyper plane of every class
y : np.array
vector of labels (classes)
Returns
-------
np.array
column of dataset to be taken into account to split dataset
"""
max_gain = 0
selected = -1
for col in range(data.shape[1]):
tup = y[data[:, col] > 0]
tdn = y[data[:, col] <= 0]
info_gain = self.information_gain(y, tup, tdn)
if info_gain > max_gain:
selected = col
max_gain = info_gain
return selected
@staticmethod
def _max_samples(data: np.array, y: np.array) -> np.array:
"""return column of dataset to be taken into account to split dataset
Parameters
----------
data : np.array
distances to hyper plane of every class
y : np.array
column of dataset to be taken into account to split dataset
Returns
-------
np.array
column of dataset to be taken into account to split dataset
"""
# select the class with max number of samples
_, samples = np.unique(y, return_counts=True)
return np.argmax(samples)
def partition(self, samples: np.array, node: Snode, train: bool):
"""Set the criteria to split arrays. Compute the indices of the samples
that should go to one side of the tree (up)
Parameters
----------
samples : np.array
array of samples (# samples, # features)
node : Snode
Node of the tree where partition is going to be made
train : bool
Train time - True / Test time - False
"""
# data contains the distances of every sample to every class hyperplane
# array of (m, nc) nc = # classes
data = self._distances(node, samples)
if data.shape[0] < self._min_samples_split:
# there aren't enough samples to split
self._up = np.ones((data.shape[0]), dtype=bool)
return
if data.ndim > 1:
# split criteria for multiclass
# Convert data to a (m, 1) array selecting values for samples
if train:
# in train time we have to compute the column to take into
# account to split the dataset
col = self.decision_criteria(data, node._y)
node.set_partition_column(col)
else:
# in predcit time just use the column computed in train time
# is taking the classifier of class <col>
col = node.get_partition_column()
if col == -1:
# No partition is producing information gain
data = np.ones(data.shape)
data = data[:, col]
self._up = data > 0
def part(self, origin: np.array) -> list:
"""Split an array in two based on indices (self._up) and its complement
partition has to be called first to establish up indices
Parameters
----------
origin : np.array
dataset to split
Returns
-------
list
list with two splits of the array
"""
down = ~self._up
return [
origin[self._up] if any(self._up) else None,
origin[down] if any(down) else None,
]
def _distances(self, node: Snode, data: np.ndarray) -> np.array:
"""Compute distances of the samples to the hyperplane of the node
Parameters
----------
node : Snode
node containing the svm classifier
data : np.ndarray
samples to compute distance to hyperplane
Returns
-------
np.array
array of shape (m, nc) with the distances of every sample to
the hyperplane of every class. nc = # of classes
"""
X_transformed = data[:, node._features]
if self._normalize:
X_transformed = node._scaler.transform(X_transformed)
return node._clf.decision_function(X_transformed)
| 2.828125 | 3 |
Conference/app/admin.py | amit-sgwn/Conference | 0 | 12796001 | from django.contrib import admin
from app.models import *
class TrackAdmin(admin.ModelAdmin):
list_display=('title','description',)
class SessionAdmin(admin.ModelAdmin):
list_display = ('title','status',)
search_fields = ['title','abstract']
list_filter = ('track','speaker',)
actions = ['make_approved',]
def make_approved(self,request,queryset):
row_updated = queryset.update(status = 'a')
if row_updated == 1:
message_bit = "1 session was "
else:
message_bit = "%s session were "%row_updated
self.message_user(request,"%s approved"%message_bit)
make_approved.short_description = "Mark session(s) as approved"
class SpeakerAdmin(admin.ModelAdmin):
list_display = ('name','bio',)
fieldsets = (
("General Information ",{"fields": ("name","bio",)}),
("Social Media",{
"classes":("collapse"),
"fields":("twitter","facebook"),
"description":"Add social media here"})
)
admin.site.register(Speaker,SpeakerAdmin)
admin.site.register(Track,TrackAdmin)
admin.site.register(Session,SessionAdmin)
| 2.015625 | 2 |
hms-core/tests/test_hms_core/test_data_objects.py | PacktPublishing/Hands-On-Software-Engineering-with-Python | 40 | 12796002 | #!/usr/bin/env python
"""
Defines unit-tests for the module at hms_core.data_objects.
"""
#######################################
# Any needed from __future__ imports #
# Create an "__all__" list to support #
# "from module import member" use #
#######################################
__all__ = [
# Test-case classes
# Child test-modules
]
#######################################
# Module metadata/dunder-names #
#######################################
__author__ = '<NAME>'
__copyright__ = 'Copyright 2018, all rights reserved'
__status__ = 'Development'
#######################################
# Standard library imports needed #
#######################################
import os
import sys
import unittest
from datetime import datetime
from uuid import UUID, uuid4
#######################################
# Third-party imports needed #
#######################################
#######################################
# Local imports needed #
#######################################
from idic.unit_testing import *
#######################################
# Initialization needed before member #
# definition can take place #
#######################################
#######################################
# Module-level Constants #
#######################################
LocalSuite = unittest.TestSuite()
#######################################
# Import the module being tested #
#######################################
import hms_core.data_objects as data_objects
from hms_core.data_objects import *
#######################################
# Constants for test-methods #
#######################################
GoodBooleanOrIntEquivalents = [
True, False, 1, 0
]
BadBooleanOrIntEquivalents = [
'true', '', (1,2), tuple()
]
GoodDateTimes = [
# - actual datetime values
datetime.now(), datetime.fromtimestamp(1234567890),
datetime.now().timestamp(),
# - timestamp numbers
1234567890, 1234567890.123456,
# - strings
'2001-01-01 12:34:56', '3001-01-01 12:34:56',
'1911-01-01 12:34:56',
# - datetimes outside the UNIX epoch, just in case
datetime.strptime(
'2001-01-01 12:34:56', BaseDataObject._data_time_string
),
datetime.strptime(
'3001-01-01 12:34:56', BaseDataObject._data_time_string
),
datetime.strptime(
'1911-01-01 12:34:56', BaseDataObject._data_time_string
),
]
BadDateTimes = [
# - invalid types
(1,2), tuple(), True, False, object(),
# - invalid values
'true', '', '1911-01-01 12:34:56.123456'
]
GoodOIDs = [
# - actual UUID values
uuid4(), str(uuid4()),
UUID('dc3a7fdf-2183-49cc-aa00-af9239950254'),
UUID('ffffffff-ffff-ffff-ffff-ffffffffffff'),
UUID('00000000-0000-0000-0000-000000000000'),
# - strings
'dc3a7fdf-2183-49cc-aa00-af9239950254',
'ffffffff-ffff-ffff-ffff-ffffffffffff',
'00000000-0000-0000-0000-000000000000',
'dc3a7fdf218349ccaa00af9239950254',
'ffffffffffffffffffffffffffffffff',
'00000000000000000000000000000000',
]
BadOIDs = [
# - invalid types
(1,2), tuple(), True, False, object(),
# - invalid values
'true', '', '1911-01-01 12:34:56.123456'
]
#######################################
# Code-coverage test-case and #
# decorator-methods #
#######################################
class testdata_objectsCodeCoverage(ModuleCoverageTest):
_testNamespace = 'hms_core'
_testModule = data_objects
LocalSuite.addTests(
unittest.TestLoader().loadTestsFromTestCase(
testdata_objectsCodeCoverage
)
)
#######################################
# Test-cases in the module #
#######################################
class BaseDataObjectDerived(BaseDataObject):
def __init__(self,
oid=None, created=None, modified=None, is_active=None,
is_deleted=None, is_dirty=None, is_new=None
):
BaseDataObject.__init__(
self, oid, created, modified, is_active, is_deleted,
is_dirty, is_new
)
def _create(self):
return BaseDataObject._create(self)
def _update(self):
return BaseDataObject._update(self)
def matches(self, **criteria):
return BaseDataObject.matches(self, **criteria)
def to_data_dict(self):
return BaseDataObject.to_data_dict(self)
@classmethod
def delete(cls, *oids):
pass
@classmethod
def from_data_dict(cls, data_dict):
pass
@classmethod
def get(cls, *oids, **criteria):
pass
@classmethod
def sort(cls, objects, sort_by):
pass
@testdata_objectsCodeCoverage.AddMethodTesting
@testdata_objectsCodeCoverage.AddPropertyTesting
class testBaseDataObject(unittest.TestCase):
###################################
# Tests of class methods #
###################################
def test__init__(self):
# Tests the __init__ method of the BaseDataObject class
# - All we need to do here is prove that the various
# setter- and deleter-method calls are operating as
# expected.
# - deleters first
test_object = BaseDataObjectDerived()
self.assertEquals(test_object._created, None)
self.assertEquals(test_object._is_active, True)
self.assertEquals(test_object._is_deleted, False)
self.assertEquals(test_object._is_dirty, False)
self.assertEquals(test_object._is_new, True)
self.assertEquals(test_object._modified, None)
self.assertEquals(test_object._oid, None)
# - setters
oid = uuid4()
created = GoodDateTimes[0]
modified = GoodDateTimes[1]
is_active = False
is_deleted = True
is_dirty = True
is_new = False
test_object = BaseDataObjectDerived(
oid, created, modified, is_active, is_deleted,
is_dirty, is_new
)
self.assertEquals(test_object.oid, oid)
self.assertEquals(test_object.created, created)
self.assertEquals(test_object.is_active, is_active)
self.assertEquals(test_object.is_deleted, is_deleted)
self.assertEquals(test_object.is_dirty, is_dirty)
self.assertEquals(test_object.is_new, is_new)
self.assertEquals(test_object.modified, modified)
def test_del_created(self):
# Tests the _del_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._created = 'unexpected value'
test_object._del_created()
self.assertEquals(
test_object._created, None,
'BaseDataObject._del_created should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._created,
type(test_object._created).__name__
)
)
def test_del_is_active(self):
# Tests the _del_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_active = 'unexpected value'
test_object._del_is_active()
self.assertEquals(
test_object._is_active, True,
'BaseDataObject._del_is_active should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_active,
type(test_object._is_active).__name__
)
)
def test_del_is_deleted(self):
# Tests the _del_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_deleted = 'unexpected value'
test_object._del_is_deleted()
self.assertEquals(
test_object._is_deleted, False,
'BaseDataObject._del_is_deleted should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_deleted,
type(test_object._is_deleted).__name__
)
)
def test_del_is_dirty(self):
# Tests the _del_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_dirty = 'unexpected value'
test_object._del_is_dirty()
self.assertEquals(
test_object._is_dirty, False,
'BaseDataObject._del_is_dirty should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._is_dirty,
type(test_object._is_dirty).__name__
)
)
def test_del_is_new(self):
# Tests the _del_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._is_new = 'unexpected value'
test_object._del_is_new()
self.assertEquals(
test_object._is_new, True,
'BaseDataObject._del_is_new should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(test_object._is_new, type(test_object._is_new).__name__)
)
def test_del_modified(self):
# Tests the _del_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._modified = 'unexpected value'
test_object._del_modified()
self.assertEquals(
test_object._modified, None,
'BaseDataObject._del_modified should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(
test_object._modified,
type(test_object._modified).__name__
)
)
def test_del_oid(self):
# Tests the _del_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
test_object._oid = 'unexpected value'
test_object._del_oid()
self.assertEquals(
test_object._oid, None,
'BaseDataObject._del_oid should leave None in the '
'underlying storage attribute, but "%s" (%s) was '
'found instead' %
(test_object._oid, type(test_object._oid).__name__)
)
def test_get_created(self):
# Tests the _get_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._created = expected
actual = test_object.created
self.assertEquals(actual, expected,
'_get_created was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._created = None
self.assertEqual(type(test_object._get_created()), datetime,
'BaseDataObject._get_created should return a '
'datetime value if it\'s retrieved from an instance '
'with an underlying None value'
)
def test_get_is_active(self):
# Tests the _get_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_active = expected
actual = test_object.is_active
self.assertEquals(actual, expected,
'_get_is_active was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_deleted(self):
# Tests the _get_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_deleted = expected
actual = test_object.is_deleted
self.assertEquals(actual, expected,
'_get_is_deleted was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_dirty(self):
# Tests the _get_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_dirty = expected
actual = test_object.is_dirty
self.assertEquals(actual, expected,
'_get_is_dirty was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_is_new(self):
# Tests the _get_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._is_new = expected
actual = test_object.is_new
self.assertEquals(actual, expected,
'_get_is_new was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
def test_get_modified(self):
# Tests the _get_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._modified = expected
actual = test_object.modified
self.assertEquals(actual, expected,
'_get_modified was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._modified = None
self.assertEqual(type(test_object._get_modified()), datetime,
'BaseDataObject._get_modified should return a '
'datetime value if it\'s retrieved from an instance '
'with an underlying None value'
)
def test_get_oid(self):
# Tests the _get_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
expected = 'expected value'
test_object._oid = expected
actual = test_object.oid
self.assertEquals(actual, expected,
'_get_oid was expected to return "%s" (%s), but '
'returned "%s" (%s) instead' %
(
expected, type(expected).__name__,
actual, type(actual).__name__
)
)
test_object._oid = None
self.assertEqual(type(test_object._get_oid()), UUID,
'BaseDataObject._get_oid should return a UUID value '
'if it\'s retrieved from an instance with an '
'underlying None value'
)
def test_set_created(self):
# Tests the _set_created method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for created in GoodDateTimes:
if type(created) == datetime:
expected = created
elif type(created) in (int, float):
expected = datetime.fromtimestamp(created)
elif type(created) == str:
expected = datetime.strptime(
created, BaseDataObject._data_time_string
)
test_object._set_created(created)
actual = test_object.created
self.assertEqual(
actual, expected,
'Setting created to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
created, type(created).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for created in BadDateTimes:
try:
test_object._set_created(created)
self.fail(
'BaseDataObject objects should not accept "%s" '
'(%s) as created values, but it was allowed to '
'be set' %
(created, type(created).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a created value of '
'"%s" (%s), but %s was raised instead:\n'
' %s' %
(
created, type(created).__name__,
error.__class__.__name__, error
)
)
def test_set_is_active(self):
# Tests the _set_is_active method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_active in GoodBooleanOrIntEquivalents:
test_object._set_is_active(is_active)
expected = True if is_active else False
actual = test_object.is_active
self.assertEqual(
actual, expected,
'Setting is_active to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_active, type(is_active).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_active in BadBooleanOrIntEquivalents:
try:
test_object._set_is_active(is_active)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_active values, but it '
'was allowed to be set' %
(is_active, type(is_active).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_active value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_active, type(is_active).__name__,
error.__class__.__name__, error
)
)
def test_set_is_deleted(self):
# Tests the _set_is_deleted method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_deleted in GoodBooleanOrIntEquivalents:
test_object._set_is_deleted(is_deleted)
expected = True if is_deleted else False
actual = test_object.is_deleted
self.assertEqual(
actual, expected,
'Setting is_deleted to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_deleted, type(is_deleted).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_deleted in BadBooleanOrIntEquivalents:
try:
test_object._set_is_deleted(is_deleted)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_deleted values, but it '
'was allowed to be set' %
(is_deleted, type(is_deleted).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_deleted value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_deleted, type(is_deleted).__name__,
error.__class__.__name__, error
)
)
def test_set_is_dirty(self):
# Tests the _set_is_dirty method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_dirty in GoodBooleanOrIntEquivalents:
test_object._set_is_dirty(is_dirty)
expected = True if is_dirty else False
actual = test_object.is_dirty
self.assertEqual(
actual, expected,
'Setting is_dirty to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_dirty, type(is_dirty).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_dirty in BadBooleanOrIntEquivalents:
try:
test_object._set_is_dirty(is_dirty)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_dirty values, but it '
'was allowed to be set' %
(is_dirty, type(is_dirty).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_dirty value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_dirty, type(is_dirty).__name__,
error.__class__.__name__, error
)
)
def test_set_is_new(self):
# Tests the _set_is_new method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for is_new in GoodBooleanOrIntEquivalents:
test_object._set_is_new(is_new)
expected = True if is_new else False
actual = test_object.is_new
self.assertEqual(
actual, expected,
'Setting is_new to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
is_new, type(is_new).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for is_new in BadBooleanOrIntEquivalents:
try:
test_object._set_is_new(is_new)
self.fail(
'BaseDataObject objects should not accept '
'"%s" (%s) as valid is_new values, but it '
'was allowed to be set' %
(is_new, type(is_new).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed an is_new value '
'of "%s" (%s), but %s was raised instead:\n'
' %s' %
(
is_new, type(is_new).__name__,
error.__class__.__name__, error
)
)
def test_set_modified(self):
# Tests the _set_modified method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for modified in GoodDateTimes:
if type(modified) == datetime:
expected = modified
elif type(modified) in (int, float):
expected = datetime.fromtimestamp(modified)
elif type(modified) == str:
expected = datetime.strptime(
modified, BaseDataObject._data_time_string
)
test_object._set_modified(modified)
actual = test_object.modified
self.assertEqual(
actual, expected,
'Setting modified to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" (%s) '
'was returned instead' %
(
modified, type(modified).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for modified in BadDateTimes:
try:
test_object._set_modified(modified)
self.fail(
'BaseDataObject objects should not accept "%s" '
'(%s) as modified values, but it was allowed to '
'be set' %
(modified, type(modified).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a modified value of '
'"%s" (%s), but %s was raised instead:\n'
' %s' %
(
modified, type(modified).__name__,
error.__class__.__name__, error
)
)
def test_set_oid(self):
# Tests the _set_oid method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Test all "good" values
for oid in GoodOIDs:
if type(oid) == UUID:
expected = oid
elif type(oid) == str:
expected = UUID(oid)
test_object._set_oid(oid)
actual = test_object.oid
self.assertEqual(
actual, expected,
'Setting oid to "%s" (%s) should return '
'"%s" (%s) through the property, but "%s" '
'(%s) was returned instead.' %
(
oid, type(oid).__name__,
expected, type(expected).__name__,
actual, type(actual).__name__,
)
)
# - Test all "bad" values
for oid in BadOIDs:
try:
test_object._set_oid(oid)
self.fail(
'BaseDatObject objects should not accept '
'"%s" (%s) as a valid oid, but it was '
'allowed to be set' %
(oid, type(oid).__name__)
)
except (TypeError, ValueError):
pass
except Exception as error:
self.fail(
'BaseDataObject objects should raise TypeError '
'or ValueError if passed a value of "%s" (%s) '
'as an oid, but %s was raised instead:\n'
' %s' %
(
oid, type(oid).__name__,
error.__class__.__name__, error
)
)
def testsave(self):
# Tests the save method of the BaseDataObject class
test_object = BaseDataObjectDerived()
# - Set things up to force a call to _create:
test_object._is_new = True
for dirty in (True, False, None):
test_object._is_dirty = dirty
try:
test_object.save()
except NotImplementedError as error:
if str(error) != (
'BaseDataObjectDerived has not implemented '
'_create, as required by BaseDataObject'
):
self.fail(
'Calling _create should return a known '
'error-message, but the message returned '
'was not what was expected'
)
except Exception as error:
self.fail(
'BaseDataObject.save did not raise the '
'expected error while being tested'
)
# - Set things up to force a call to _update:
test_object._is_new = False
for dirty in (True, False, None):
test_object._is_dirty = dirty
try:
test_object.save()
except NotImplementedError as error:
if str(error) != (
'BaseDataObjectDerived has not implemented '
'_update, as required by BaseDataObject'
):
self.fail(
'Calling _create should return a known '
'error-message, but the message returned '
'was not what was expected'
)
except Exception as error:
self.fail(
'BaseDataObject.save did not raise the '
'expected error while being tested'
)
###################################
# Tests of class properties #
###################################
def testcreated(self):
# Tests the created property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.created.fget,
BaseDataObject._get_created,
'BaseDataObject.created is expected to use the '
'_get_created method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.created.fset,
BaseDataObject._set_created,
'BaseDataObject.created is expected to use the '
'_set_created method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.created.fdel,
BaseDataObject._del_created,
'BaseDataObject.created is expected to use the '
'_del_created method as its deleter-method'
)
def testis_active(self):
# Tests the is_active property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_active.fget,
BaseDataObject._get_is_active,
'BaseDataObject.is_active is expected to use the '
'_get_is_active method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_active.fset,
BaseDataObject._set_is_active,
'BaseDataObject.is_active is expected to use the '
'_set_is_active method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_active.fdel,
BaseDataObject._del_is_active,
'BaseDataObject.is_active is expected to use the '
'_del_is_active method as its deleter-method'
)
def testis_deleted(self):
# Tests the is_deleted property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fget,
BaseDataObject._get_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_get_is_deleted method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fset,
BaseDataObject._set_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_set_is_deleted method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_deleted.fdel,
BaseDataObject._del_is_deleted,
'BaseDataObject.is_deleted is expected to use the '
'_del_is_deleted method as its deleter-method'
)
def testis_dirty(self):
# Tests the is_dirty property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fget,
BaseDataObject._get_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_get_is_dirty method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fset,
BaseDataObject._set_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_set_is_dirty method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_dirty.fdel,
BaseDataObject._del_is_dirty,
'BaseDataObject.is_dirty is expected to use the '
'_del_is_dirty method as its deleter-method'
)
def testis_new(self):
# Tests the is_new property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.is_new.fget,
BaseDataObject._get_is_new,
'BaseDataObject.is_new is expected to use the '
'_get_is_new method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.is_new.fset,
BaseDataObject._set_is_new,
'BaseDataObject.is_new is expected to use the '
'_set_is_new method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.is_new.fdel,
BaseDataObject._del_is_new,
'BaseDataObject.is_new is expected to use the '
'_del_is_new method as its deleter-method'
)
def testmodified(self):
# Tests the modified property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.modified.fget,
BaseDataObject._get_modified,
'BaseDataObject.modified is expected to use the '
'_get_modified method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.modified.fset,
BaseDataObject._set_modified,
'BaseDataObject.modified is expected to use the '
'_set_modified method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.modified.fdel,
BaseDataObject._del_modified,
'BaseDataObject.modified is expected to use the '
'_del_modified method as its deleter-method'
)
def testoid(self):
# Tests the oid property of the BaseDataObject class
# - Assert that the getter is correct:
self.assertEqual(
BaseDataObject.oid.fget,
BaseDataObject._get_oid,
'BaseDataObject.oid is expected to use the '
'_get_oid method as its getter-method'
)
# - Assert that the setter is correct:
self.assertEqual(
BaseDataObject.oid.fset,
BaseDataObject._set_oid,
'BaseDataObject.oid is expected to use the '
'_set_oid method as its setter-method'
)
# - Assert that the deleter is correct:
self.assertEqual(
BaseDataObject.oid.fdel,
BaseDataObject._del_oid,
'BaseDataObject.oid is expected to use the '
'_del_oid method as its deleter-method'
)
# def testproperty_name(self):
# # Tests the property_name property of the BaseDataObject class
# # - Assert that the getter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fget,
# BaseDataObject._get_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_get_property_name method as its getter-method'
# )
# # - If property_name is not expected to be publicly settable,
# # the second item here (BaseDataObject._set_property_name) should
# # be changed to None, and the failure message adjusted
# # accordingly:
# # - Assert that the setter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fset,
# BaseDataObject._set_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_set_property_name method as its setter-method'
# )
# # - If property_name is not expected to be publicly deletable,
# # the second item here (BaseDataObject._del_property_name) should
# # be changed to None, and the failure message adjusted
# # accordingly:
# # - Assert that the deleter is correct:
# self.assertEqual(
# BaseDataObject.property_name.fdel,
# BaseDataObject._del_property_name,
# 'BaseDataObject.property_name is expected to use the '
# '_del_property_name method as its deleter-method'
# )
LocalSuite.addTests(
unittest.TestLoader().loadTestsFromTestCase(
testBaseDataObject
)
)
#######################################
# Child-module test-cases to execute #
#######################################
# import child_module
# LocalSuite.addTests(child_module.LocalSuite._tests)
#######################################
# Imports to resolve circular #
# dependencies. Avoid if possible. #
#######################################
#######################################
# Initialization that needs to #
# happen after member definition. #
#######################################
#######################################
# Code to execute if file is called #
# or run directly. #
#######################################
if __name__ == '__main__':
import time
results = unittest.TestResult()
testStartTime = time.time()
LocalSuite.run(results)
results.runTime = time.time() - testStartTime
PrintTestResults(results)
if not results.errors and not results.failures:
SaveTestReport(results, 'hms_core.data_objects',
'hms_core.data_objects.test-results')
| 2.0625 | 2 |
runtime/test/specs/V1_3_cts_only/concat_invalid_rank.mod.py | aosp-goes-brrbrr/packages_modules_NeuralNetworks | 0 | 12796003 | <reponame>aosp-goes-brrbrr/packages_modules_NeuralNetworks
#
# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
input0 = Input("input0", "TENSOR_FLOAT32", "{1, 1, 1, 1, 2}")
input1 = Input("input1", "TENSOR_FLOAT32", "{1, 1, 1, 1, 2}")
axis = 4
output0 = Output("output0", "TENSOR_FLOAT32", "{1, 1, 1, 1, 4}")
model = Model().Operation("CONCATENATION", input0, input1,
axis).To(output0).IntroducedIn("V1_0")
Example({
input0: [1, 2],
input1: [3, 4],
output0: [1, 2, 3, 4],
}).ExpectFailure()
| 1.75 | 2 |
python/handwritten_baseline/pipeline/model/scripts/train_predict_optimize.py | UKPLab/cdcr-beyond-corpus-tailored | 10 | 12796004 | <filename>python/handwritten_baseline/pipeline/model/scripts/train_predict_optimize.py
import copy
import json
import pickle
import pprint
from logging import Logger
from pathlib import Path
from typing import Dict, Optional, List, Union, Tuple
import numpy as np
import optuna
import pandas as pd
import seaborn as sns
from joblib import dump, delayed, Parallel, load
from optuna import Trial
from optuna.samplers import TPESampler
from sklearn.feature_selection import RFECV
from sklearn.model_selection import RepeatedKFold, cross_val_score, KFold
from sklearn.pipeline import Pipeline
from tabulate import tabulate
from python.handwritten_baseline.pipeline.model.classifier_clustering.pairwise_classifier_wrapper import \
PredictOnTransformClassifierWrapper
from python.handwritten_baseline.pipeline.model.data_prep.pipeline_data_input import get_X_and_y_for_pipeline
from python.handwritten_baseline.pipeline.model.feature_extr import LEMMA_EXTR, TFIDF_EXTR, TIME_EXTR, LOCATION_EXTR, \
SENTENCE_EMBEDDING_EXTR, ACTION_PHRASE_EMBEDDING_EXTR, WIKIDATA_EMBEDDING_EXTR
from python.handwritten_baseline.pipeline.model.scripts import SVC_HUBER, LOGISTIC_REGRESSION, _TYPE, _KWARGS, XGBOOST, \
_FIT_PARAMS, MLP
from python.handwritten_baseline.pipeline.model.scripts.feature_importance import get_feature_names_from_pipeline, \
analyze_feature_importance
from python.handwritten_baseline.pipeline.model.scripts.pipeline_instantiation import instantiate_pipeline, \
CLUSTERING_PIPELINE_STEP_NAME, CLASSIFIER_PIPELINE_STEP_NAME
from python.handwritten_baseline.pipeline.model.scripts.prediction_analysis import perform_prediction_analysis
from python.handwritten_baseline.pipeline.model.scripts.scoring import CrossDocCorefScoring, MentionPairScoring
from python.pipeline import RUN_WORKING_DIR, MAX_CORES
from python.util.config import write_config
from python.util.optuna import EarlyStoppingCallback, PlotCallback
from python.util.util import get_dict_hash
def load_data(path):
# load preprocessed dataset from file
with open(path, "rb") as f:
data = pickle.load(f)
return data
def sample_classifier_config_with_optuna(trial: Trial, classifier_name: str) -> Dict:
"""
Uses optuna to sample a config with classifier hyperparameters.
:param trial: Optuna trial
:param classifier_name: The classifier to use (and sample hyperparameters for). Testing them separately seems to
make more sense to me.
:return: classifier config
"""
if classifier_name in [SVC_HUBER, LOGISTIC_REGRESSION]:
if classifier_name == SVC_HUBER:
# modified_huber results in a quadratically smoothed SVM with gamma = 2
loss = "modified_huber"
elif classifier_name == LOGISTIC_REGRESSION:
loss = "log"
else:
raise ValueError
# alpha range follows the suggestions of the sklearn documentation
classifier_config = {_TYPE: "SGDClassifier",
_KWARGS: {"loss": loss,
"alpha": trial.suggest_loguniform("alpha", 1e-7, 1e-1),
"max_iter": 1000,
"early_stopping": True,
"validation_fraction": 0.1,
"n_iter_no_change": 5}}
elif classifier_name == XGBOOST:
classifier_config = {_TYPE: "ConvenientXGBClassifier",
_KWARGS: {"n_jobs": 1,
"n_estimators": 1000, # we use early stopping, so this is the maximum
"learning_rate": trial.suggest_loguniform("learning_rate", 1e-4, 1e0),
# learning rate
"min_child_weight": trial.suggest_float("min_child_weight", 1, 10),
# min required instance weight at a child
"max_depth": trial.suggest_int("max_depth", 3, 12), # max tree depth
"gamma": trial.suggest_loguniform("gamma", 1e-3, 1e0),
# Minimum loss reduction required to make a further partition on a leaf node of the tree.
"max_delta_step": trial.suggest_loguniform("max_delta_step", 1e-3, 1e2),
# Maximum delta step we allow each leaf output to be. Reported to help with imbalanced data.
"subsample": trial.suggest_float("subsample", 0.5, 1.0),
"colsample_bytree": trial.suggest_float("colsample_bytree", 0.5, 1.0),
"colsample_bylevel": trial.suggest_float("colsample_bylevel", 0.5, 1.0),
# recommended to use for imbalanced datasets (which we definitely have)
"scale_pos_weight": trial.suggest_loguniform("scale_pos_weight", 1.0, 10),
"objective": "binary:logistic",
"eval_metric": "logloss",
},
_FIT_PARAMS: {"early_stopping_rounds": 5,
"eval_metric": "logloss",
"validation_fraction": 0.1,
"verbose": False}}
elif classifier_name == MLP:
num_hidden_layers = trial.suggest_int("num_hidden_layers", 1, 2)
last_hidden_layer_size = trial.suggest_int("last_hidden_layer_size", 5, 50)
hidden_layer_sizes = [2 ** (num_hidden_layers - i - 1) * last_hidden_layer_size for i in
range(num_hidden_layers)]
classifier_config = {_TYPE: "MLPClassifier",
_KWARGS: {"hidden_layer_sizes": tuple(hidden_layer_sizes),
"activation": "relu",
"solver": "adam",
"learning_rate_init": trial.suggest_loguniform("learning_rate_init", 1e-4, 1e-1),
"max_iter": 1000,
"shuffle": True,
"early_stopping": True,
"n_iter_no_change": 5,
"validation_fraction": 0.1}}
else:
raise ValueError
return classifier_config
def sample_clustering_config_with_optuna(trial: Trial) -> Dict:
"""
Uses optuna to sample a config dictionary with clustering parameters.
:param trial: optuna trial
:return: config dictionary
"""
cluster_criterion = trial.suggest_categorical("cluster_criterion", ['inconsistent', 'distance', 'maxclust'])
cluster_depth = 0 if not cluster_criterion == 'inconsistent' else trial.suggest_int("cluster_depth", low=1, high=10)
clustering_config = {"threshold": trial.suggest_uniform("threshold", 0, 1),
"linkage_method": trial.suggest_categorical("linkage_method", ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']),
"cluster_criterion": cluster_criterion,
"cluster_depth": cluster_depth}
return clustering_config
def get_feature_extractors_config_with_all_and_defaults() -> Dict:
"""
Returns config section for all feature extractors with default values.
:return:
"""
return {
LEMMA_EXTR: {},
TFIDF_EXTR: {},
TIME_EXTR: {},
LOCATION_EXTR: {},
SENTENCE_EMBEDDING_EXTR: {},
ACTION_PHRASE_EMBEDDING_EXTR: {},
WIKIDATA_EMBEDDING_EXTR: {}
}
def optimize_hyperparameters(config_data: Dict,
config_model: Dict,
config_hyperopt: Dict,
config_global: Dict,
logger: Logger):
"""
To be used for hyperparameter optimization of the mention pair classifier and agglomerative clustering.
:param config_data:
:param config_model:
:param config_hyperopt:
:param config_global:
:param logger:
:return:
"""
# During the hyperparameter optimization, use a fixed random seed for the Optuna sampling, CV splits and classifier.
optimization_random_seed = 0
# If False, hyperparameters for mention pair classification are optimized. If True, hyperparameters for clustering
# are optimized. The latter case needs a full classifier configuration, see below.
with_clustering = config_hyperopt["with_clustering"]
classifier = config_model["classifier"] # type: Union[str, Dict]
# ------------- validate parameters ---------------
if not with_clustering and (classifier is None or type(classifier) is dict):
raise ValueError("To optimize the mention pair classifier, the 'classifier' config parameter must be the name of the classifier to optimize.")
if with_clustering and (type(classifier) is str or not classifier):
raise ValueError("To optimize the clustering step, the 'classifier' config parameter must be a complete classifier configuration in the form of a dictionary.")
# ------------- create base config to more or less use in each optimization step ------------
extractors = config_model["features"].get("extractors", None)
if extractors is None:
extractors = get_feature_extractors_config_with_all_and_defaults()
# Pass this to filter extracted features so that only those from preliminary feature selection are used.
# None means "use all features", an empty list means no features at all!
selected_features = config_model["features"].get("selected_features", None) # type: Optional[List]
pairs_config = config_data["pairs"]
base_config = {"random_seed": optimization_random_seed,
"features": {
"extractors": extractors,
"selected_features": selected_features
},
"pairs": pairs_config
}
# ------------- get going with optimization now ---------------
serialization_dir = config_global[RUN_WORKING_DIR]
train_data = load_data(config_data["train_data_path"])
doc_partitioning = config_data["doc_partitioning"]
oracle_mention_pair_generation = config_data["oracle_mention_pair_generation"]
train_X, train_y = get_X_and_y_for_pipeline(logger,
train_data,
doc_partitioning=doc_partitioning,
oracle_mention_pair_generation=oracle_mention_pair_generation)
# for cross-validation, make 6 splits at most and fall back to leave-one-out (here: one instance = one partition)
# if there are few partitions
cv_num_splits = min(6, len(train_X))
cv_num_repeats = config_hyperopt["cv_num_repeats"]
cv_n_jobs = config_global[MAX_CORES]
if cv_n_jobs > 1 and ((cv_num_splits * cv_num_repeats) % cv_n_jobs) != 0:
logger.warning(f"Inefficient cross-validation parameter choices, expect idling CPUs ({cv_num_splits} folds * {cv_num_repeats} repeats % {cv_n_jobs} CPUs != 0)")
def objective(trial: Trial):
# config dictionaries are modified during instantiation, so we need to deepcopy the originals to not lose them
config = copy.deepcopy(base_config)
if with_clustering:
assert type(classifier) is not str
config["classifier"] = copy.deepcopy(classifier)
config["clustering"] = sample_clustering_config_with_optuna(trial)
else:
assert type(classifier) is str
config["classifier"] = sample_classifier_config_with_optuna(trial, classifier)
# store the config in the trial so that we can retrieve it later and use it to instantiate the best model -
# don't ask me why it needs to be stored as a string, using the dict object did not work
trial.set_user_attr("config", json.dumps(config))
# instantiate feature pipeline and classifier, transform the features
pipeline, scoring = instantiate_pipeline(logger,
config,
with_clustering=with_clustering,
use_caching=True,
scorer_should_return_single_scalar=True,
serialization_dir=serialization_dir / "pipeline" / f"trial_{trial.number:03}")
cv = RepeatedKFold(n_splits=cv_num_splits,
n_repeats=cv_num_repeats,
random_state=optimization_random_seed)
f1_scores_cv = cross_val_score(estimator=pipeline,
X=train_X,
y=train_y,
n_jobs=cv_n_jobs,
cv=cv,
scoring=scoring,
verbose=0)
mean_f1 = f1_scores_cv.mean()
return mean_f1
logger.info("Starting optimization.")
callbacks = []
if "early_stopping" in config_hyperopt:
callbacks.append(EarlyStoppingCallback(logger, **config_hyperopt["early_stopping"]))
callbacks.append(PlotCallback(serialization_dir=serialization_dir / "plots"))
sampler = TPESampler(seed=optimization_random_seed)
study = optuna.create_study(sampler=sampler, direction="maximize")
optuna_timeout_seconds = pd.to_timedelta(config_hyperopt["timeout"]).total_seconds()
optuna_n_trials = config_hyperopt["n_trials"]
study.optimize(objective,
n_trials=optuna_n_trials,
timeout=optuna_timeout_seconds,
callbacks=callbacks)
best_trial = study.best_trial
best_config = json.loads(best_trial.user_attrs["config"])
logger.info("Best trial: " + repr(best_trial))
logger.info("Best config:\n" + pprint.pformat(best_config))
# write best config to file
best_config_file = serialization_dir / "best_model_config.yaml"
write_config(best_config, best_config_file)
def train(config_data: Dict,
config_model: Dict,
config_training: Dict,
config_global: Dict,
logger: Logger) -> None:
"""
Trains n classifier+clustering pipelines with a given configuration.
:param config_data:
:param config_model:
:param config_training:
:param config_global:
:param logger:
:return:
"""
serialization_dir = config_global[RUN_WORKING_DIR]
num_models_to_train = config_training["num_models_to_train"]
with_clustering = config_training["with_clustering"]
train_data = load_data(config_data["train_data_path"])
doc_partitioning = config_data["doc_partitioning"]
oracle_mention_pair_generation = config_data["oracle_mention_pair_generation"]
train_X, train_y = get_X_and_y_for_pipeline(logger,
train_data,
doc_partitioning=doc_partitioning,
oracle_mention_pair_generation=oracle_mention_pair_generation)
base_pipeline_config = {**config_model,
"pairs": config_data["pairs"]}
if base_pipeline_config["features"]["extractors"] is None:
base_pipeline_config["features"]["extractors"] = get_feature_extractors_config_with_all_and_defaults()
def fit_save_and_report(random_seed: int) -> Pipeline:
pipeline_config = copy.deepcopy(base_pipeline_config)
pipeline_config["random_seed"] = random_seed
pipeline, scoring = instantiate_pipeline(logger,
pipeline_config,
with_clustering=with_clustering,
scorer_should_return_single_scalar=False,
serialization_dir=serialization_dir / "pipeline" / f"seed_{random_seed:03}")
pipeline.fit(X=train_X, y=train_y)
return pipeline
# train pipelines in parallel
logger.info(f"Training {num_models_to_train} separate models...")
jobs = [delayed(fit_save_and_report)(random_seed) for random_seed in range(num_models_to_train)]
pipelines = Parallel(n_jobs=config_global[MAX_CORES])(jobs)
if config_training["analyze_feature_importance"]:
logger.info("Analyzing feature importance")
analyze_feature_importance(pipelines, serialization_dir, logger)
logger.info("Saving pipelines to disk")
model_dir = serialization_dir / "serialized_models"
model_dir.mkdir(exist_ok=True)
for i, p in enumerate(pipelines):
dump(p, model_dir / f"{i}.pipeline.joblib")
def evaluate(model_serialization_dir: Path,
config_data: Dict,
config_evaluate: Dict,
config_global: Dict,
logger: Logger) -> pd.DataFrame:
"""
Predicts and evaluates
:param model_serialization_dir: path to the directory containing serialized models and scorers
:param config_data:
:param config_evaluate:
:param config_global:
:param logger:
:return: metrics Dataframe
"""
serialization_dir = Path(config_global[RUN_WORKING_DIR])
logger.info("Finding and loading model pipelines from disk.")
pipelines = {} # type: Dict[int, Pipeline]
for p in model_serialization_dir.iterdir():
i = int(p.stem.split(".")[0])
if "".join(p.suffixes) == ".pipeline.joblib":
pipelines[i] = load(p)
# find out if we are dealing with mention pair classification or clustering pipelines
last_pipeline_step_names = {p.steps[-1][0] for p in pipelines.values()}
if len(last_pipeline_step_names) > 1:
raise ValueError("All pipelines must be of the same type (mention pair classification or clustering)")
last_pipeline_step_name = list(last_pipeline_step_names)[0]
# prepare scorers
if last_pipeline_step_name == CLASSIFIER_PIPELINE_STEP_NAME:
is_clustering_pipeline = False
# collect mention pair scorer parameters
if not "pairs" in config_data:
raise ValueError("Scoring mention pairs requires a 'pairs' config.")
config_pairs = config_data["pairs"]
mpg_prediction_config = config_pairs.pop("mpg_prediction")
if mpg_prediction_config is not None:
logger.warning("'mpg_prediction' was specified for a mention pair scoring scenario. Depending on those parameters, evaluation results are not representative. I hope you know what you're doing.")
elif last_pipeline_step_name == CLUSTERING_PIPELINE_STEP_NAME:
is_clustering_pipeline = True
# if present, inject hard document clusters into the last pipeline stage (the clustering stage)
hard_document_clusters_file = config_evaluate["hard_document_clusters_file"]
if hard_document_clusters_file is not None:
hard_document_clusters_file = Path(hard_document_clusters_file)
assert hard_document_clusters_file.exists() and hard_document_clusters_file.is_file()
with hard_document_clusters_file.open("rb") as f:
hard_document_clusters = pickle.load(f)
# the format in the pickle file is topic_subtopic-part-1_..._subtopic-part-n_doc-id to be used with the Barhom et al. system, so we split on underscores and pick the last value to obtain the document id
hard_document_clusters = [{doc_id.split("_")[-1] for doc_id in cluster} for cluster in hard_document_clusters]
logger.info(f"Using hard document clustering ({len(hard_document_clusters)} clusters given).")
for p in pipelines.values():
p.steps[-1][1].set_params(hard_document_clusters=hard_document_clusters)
else:
raise ValueError("Could not identify last pipeline step.")
# load and prepare data
eval_data = load_data(config_data["eval_data_path"])
doc_partitioning = config_data["doc_partitioning"]
oracle_mention_pair_generation = config_data["oracle_mention_pair_generation"]
eval_X, eval_y = get_X_and_y_for_pipeline(logger,
eval_data,
doc_partitioning=doc_partitioning,
oracle_mention_pair_generation=oracle_mention_pair_generation)
def predict_and_evaluate(i, pipeline):
# write scoring outputs into separate folder for each model
i_serialization_dir = serialization_dir / str(i)
i_serialization_dir.mkdir(exist_ok=True)
# instantiate scorer which fits the pipeline
if is_clustering_pipeline:
scorer = CrossDocCorefScoring(metrics="all", serialization_dir=i_serialization_dir)
else:
scorer = MentionPairScoring(mpg_prediction_config,
serialization_dir=i_serialization_dir)
metrics, outcomes = scorer(pipeline, eval_X, eval_y)
metrics["model"] = i
return metrics, outcomes
# predict in parallel
logger.info(f"Predicting/evaluating {len(pipelines)} separate models...")
jobs = [delayed(predict_and_evaluate)(i, pipeline) for i, pipeline in pipelines.items()]
metrics_and_outcomes = Parallel(n_jobs=config_global[MAX_CORES])(jobs)
metrics, outcomes = list(zip(*metrics_and_outcomes))
# for classifiers only: detailed prediction analysis for each coref link type and prediction examples
if not is_clustering_pipeline and config_evaluate["perform_prediction_analysis"]:
logger.info(f"Performing prediction analysis")
num_samples_per_quadrant = config_evaluate["num_samples_per_quadrant"]
perform_prediction_analysis(dataset=eval_data,
outcomes=outcomes,
num_samples_per_quadrant=num_samples_per_quadrant,
serialization_dir=serialization_dir)
# aggregate metrics: min/max/mean/std
metrics = pd.concat(metrics)
if is_clustering_pipeline:
group_by = ["meta-doc", "metric"]
else:
group_by = ["metric"]
metrics_agg = metrics.groupby(group_by)[["f1", "precision", "recall"]].describe(percentiles=[])
metrics_agg.drop(columns=["count", "50%"], level=1, inplace=True)
# write metrics to disk
metrics.to_csv(serialization_dir / "metrics_unaggregated.csv", index=True)
metrics_agg.to_csv(serialization_dir / "metrics_aggregated.csv", index=True)
metrics_agg_str = tabulate(metrics_agg, headers="keys")
with (serialization_dir / "metrics_aggregated_pretty.txt").open("w") as f:
f.write(metrics_agg_str)
logger.info("\n" + metrics_agg_str)
return metrics_agg
def feature_selection(config_data: Dict,
config_global: Dict,
logger: Logger):
"""
Runs feature selection on the EVALUATION split.
Uses 10 runs of 5-fold cross-validation for recursive feature elimination with a Random Forest mention classifier to
find the most useful features.
:param config_data:
:param config_global:
:param logger:
:return:
"""
serialization_dir = config_global[RUN_WORKING_DIR]
eval_data_path = config_data["eval_data_path"]
oracle_mention_pair_generation = config_data["oracle_mention_pair_generation"]
data = load_data(eval_data_path)
X, y = get_X_and_y_for_pipeline(logger,
data,
doc_partitioning=None,
oracle_mention_pair_generation=oracle_mention_pair_generation)
config_base = {
"classifier": {_TYPE: "RandomForest",
_KWARGS: {"n_estimators": 100}},
"features": {
"extractors": get_feature_extractors_config_with_all_and_defaults(),
"selected_features": None
},
"pairs": config_data["pairs"]
}
def run_rfecv_iteration(random_seed: int,
n_splits: int = 6) -> Tuple[List[str], np.array, np.array]:
# RFECV needs X to be an matrix-like of shape (n_samples, n_features). This means we cannot use our pipeline as is,
# because our X's are not matrix-like. So we run our pipeline up to the point where we input the feature matrix +
# labels into the mention pair classifier, and feed that to RFECV. To do that, we need to chop up the pipeline.
config = copy.deepcopy(config_base)
config["random_seed"] = random_seed
pipeline, scoring = instantiate_pipeline(logger,
config,
with_clustering=False,
scorer_should_return_single_scalar=True,
serialization_dir=serialization_dir / "pipeline")
# remove the classifier at the end of the pipeline
classifier_wrapper = pipeline.steps.pop(-1)[1] # type: PredictOnTransformClassifierWrapper
assert type(classifier_wrapper) is PredictOnTransformClassifierWrapper
random_forest_clf = classifier_wrapper.classifier_
# obtain feature matrix and labels
conflated_X = pipeline.fit_transform(X, y)
actual_X, actual_y = classifier_wrapper._take_apart_X(conflated_X)
cv = KFold(n_splits=n_splits, random_state=random_seed, shuffle=True)
# We set min_impurity_decrease depending on the number of instances to obtain a useful feature selection result.
# min_impurity_decrease was determined based on a series of manual experiments with a varying number of features
# producing random and zero values. For 1e3 instances, values between 1e-7 and 1e-1 were tested, and 0.0015
# produced plots closest to the optimal expected result (i.e. significant peak around the number of non-garbage
# features). Similar experiments were conducted for 1e4 and 1e5 instances. We interpolate between these data points.
num_instances = len(actual_y)
xp = np.log10([1e3, 1e5])
fp = np.log10([0.0015, 0.00025])
min_impurity_decrease = 10**np.interp(np.log10(num_instances), xp, fp)
random_forest_clf.set_params(min_impurity_decrease=min_impurity_decrease)
logger.info("Running feature selection...")
selector = RFECV(estimator=random_forest_clf,
n_jobs=config_global[MAX_CORES],
cv=cv,
scoring="f1_weighted", # use f1_weighted because we have very imbalanced data
verbose=1)
selector.fit(actual_X, actual_y)
logger.info("Done.")
feature_names = get_feature_names_from_pipeline(pipeline)
support = selector.support_
grid_scores = selector.grid_scores_
assert len(support) == len(feature_names)
return feature_names, support, grid_scores
# When using oracle mention pair generation, a randomly determined subset of all mention pairs is used. This has a
# big influence on the results. We therefore make sure run multiple RFECV iterations with different random seeds for
# the mention pair generation and aggregate those.
results = []
for seed in range(7):
results.append(run_rfecv_iteration(seed))
feature_names, supports, grid_scores = list(zip(*results))
# assert that all results are compatible
assert len(set(len(s) for s in supports)) == 1
assert len(set(get_dict_hash(fn) for fn in feature_names)) == 1
# collect selections in DataFrame
selections = pd.DataFrame(np.vstack(supports).transpose(), index=pd.Index(feature_names[0], name="feature-name"))
selected_features = selections.loc[selections.mean(axis=1) > 0.5].index.values
# write to file(s)
selections.to_csv(str(serialization_dir / "selected_features_unaggregated.csv"))
with (serialization_dir / "selected_features.txt").open("w") as f:
f.write("\n".join(selected_features))
logger.info("Selected features: " + "\n".join(selected_features))
# collect scores
df_grid_scores = []
for m in grid_scores:
# number of features and CV-score for that number of features
x_and_y = np.vstack([np.arange(1, len(m) + 1), m]).transpose()
df_grid_scores.append(x_and_y)
df_grid_scores = pd.DataFrame(np.vstack(df_grid_scores))
df_grid_scores.columns = ["num-features", "weighted-f1"]
df_grid_scores.to_csv(str(serialization_dir / "grid_scores.csv"))
# plot feature selection results
plot_destination = serialization_dir / "rfecv_plot.png"
ax = sns.lineplot(x="num-features", y="weighted-f1", data=df_grid_scores)
fig = ax.get_figure()
fig.savefig(str(plot_destination)) | 1.828125 | 2 |
src/bakalarishell/main.py | Hackrrr/BakalariAPI | 5 | 12796005 | from __future__ import annotations
import argparse
import asyncio
import getpass
import inspect
import json
import logging
import logging.config
import os
import threading
import time
import traceback
import warnings
import webbrowser
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from typing import IO, TYPE_CHECKING, Any, Callable, cast
import bakalariapi
import platformdirs
import requests
import rich
from bakalariapi.utils import cs_timedelta, parseHTML
from prompt_toolkit.input import create_input
from prompt_toolkit.key_binding import KeyPress
from prompt_toolkit.keys import Keys
from rich.console import Console
from rich.logging import RichHandler
from rich.progress import BarColumn, Progress, TaskID, TimeRemainingColumn
from rich.syntax import Syntax
from rich.traceback import install as tb_install
from urllib3.exceptions import InsecureRequestWarning
# Takový hack na to, aby `bakalarishell` šel spustit také přímo ze zdrojové složky
# Pokud se `bakalarishell` spustí jako modul (= přes `import`), tak vše proběhne v pořádku
# Pokud se ale spustí přes "python main.py" nebo "python bakalarishell" (kde "bakalarishell"
# je složka), tak relativní `import` selže ("ImportError: attempted relative import with no
# known parent package") a `shell` se naimportuje "přímo" (resp. ne relativně), což už je v pořádku.
# Pozn.: Pokud někdo dumá nad tím, proč zde tedy není jen druhá možnost, tak to je
# kvůli tomu, že ta zase pro změnu nefugnuje při importu jako modul, jelikož v tom případě
# hledá modul `shell` jako "globální" modul (ne jako "lokální" ve složce), tudíž selže.
if TYPE_CHECKING:
from . import shell
else:
try:
from . import shell
except ImportError:
import shell
tb_install(show_locals=True)
cls = shell.cls
api: bakalariapi.BakalariAPI
shell_instance: shell.Shell
dirs = platformdirs.PlatformDirs(
appauthor="BakalariAPI", appname="bakalarishell", roaming=True
)
CONFIG_FILE = "config.json"
TIME_FILE = "_lasttime"
@dataclass
class Args:
url: str | None = None
username: str | None = None
password: str | None = None
browser: str | None = None
executable_path: str | None = None
verbose: int = 0
test: int | None = None
auto_run: bool = False
no_init: bool = False
no_import: bool = False
disable_config: bool = False
commands: list[str] = field(default_factory=list)
args: Args
class RichTask:
def __init__(self, progress: Progress, task_id: TaskID) -> None:
self.progress = progress
self.task_id = task_id
def start(self):
self.progress.start_task(self.task_id)
def update(
self,
total: float | None = None,
completed: float | None = None,
advance: float | None = None,
description: str | None = None,
visible: bool | None = None,
refresh: bool = False,
**fields,
):
self.progress.update(
self.task_id,
total=total,
completed=completed,
advance=advance,
description=description,
visible=visible,
refresh=refresh,
**fields,
)
def finish(self):
task = self.progress.tasks[self.task_id]
task.finished_time = 0
##################################################
##### FUNKCE #####
##################################################
def rich_print(
*objects: Any,
sep: str = " ",
end: str = "\n",
file: IO[str] | None = None,
flush: bool = False,
color: str | None = None,
**kwargs,
):
c = rich.get_console() if file is None else Console(file=file)
if color is not None:
# Pravděpodobně někdy bude problém, že se vše převádí na string, ale zatím to problém není, tak to neřeším eShrug
objects = tuple(map(lambda x: f"[{color}]{x}[/{color}]", objects))
return c.print(*objects, sep=sep, end=end, **kwargs)
def partial_init_notice():
rich_print(
'Tuto akci nelze vykonat, jelikož shell se nachází v omezeném módu. Pro přepnutí do online módu můžete zkusit příkaz "init".',
color="yellow",
)
def dialog_ano_ne(
text: str = "", default: bool | None = None, color: str | None = None
) -> bool:
message = f"{text} Ano/Ne{'' if default is None else (' (Ano)' if default else ' (Ne)')}: "
while True:
# ano/true/yes/1 / ne/false/no/0
if color is not None:
rich_print(message, end="", color=color)
inpt = input()
else:
inpt = input(message)
if len(inpt) == 0:
if default is None:
continue
return default
input_letter = inpt[0].lower()
if input_letter in "aty1":
return True
if input_letter in "nf0":
return False
def dialog_cislo(text: str = "", default: int | None = None):
print(text, "" if default is None else f"({default})")
while True:
inpt = input()
if not inpt:
if default is None:
continue
return default
if inpt.isdecimal():
return int(inpt)
print("Špatná hodnota")
def print_keys(keys: list[tuple[str, str] | str], enter_pokracovani=True):
output = ["Enter - Pokračování"] if enter_pokracovani else []
for key in keys:
if isinstance(key, tuple):
if key[1] == "":
output.append(key[0])
else:
output.append(f"[{key[1]}]{key[0]}[/{key[1]}]")
else:
output.append(key)
rich_print(", ".join(output))
def show(obj: bakalariapi.objects.BakalariObject, title: str | None = None):
if title is not None:
print(title)
if isinstance(obj, bakalariapi.Komens):
rich_print(obj.format(True))
print("\n\n")
print_keys([("P - Potrvrdí přečtení zprávy", "" if obj.confirmed else "green")])
def komens_key_handler(key_press: KeyPress, done: Callable):
if key_press.key == "p":
print("Potvrzuji zprávu...")
obj.confirm(api)
print("Zpráva potvrzena")
asyncio.run(keyhandler(komens_key_handler))
elif isinstance(obj, bakalariapi.Grade):
rich_print(obj.format(True))
print("\n\n")
asyncio.run(keyhandler(None))
elif isinstance(obj, bakalariapi.Meeting):
rich_print(obj.format(True))
print("\n\n")
is_before = obj.is_before_start
delta = obj.start_time_delta
color = ""
# Delta totiž může být očividně i negativní
if not is_before and delta >= timedelta(hours=-1):
color = "red"
elif is_before and delta <= timedelta(minutes=5):
color = "yellow"
elif is_before and delta <= timedelta(minutes=30):
color = "green"
print_keys(
[("O - Otevře schůzku v prohlížeči", color), "Z - Zobrazí HTML pozvánky"]
)
def meeting_key_handler(key_press: KeyPress, done: Callable):
key = key_press.key.lower()
if key == "o":
webbrowser.open(obj.join_url)
elif key == "z":
c = Console()
c.print(Syntax(str(parseHTML(obj.content).prettify()), "html"))
asyncio.run(keyhandler(meeting_key_handler))
# elif isinstance(obj, bakalariapi.Student):
# pass
elif isinstance(obj, bakalariapi.Homework):
rich_print(obj.format(True))
print("\n\n")
print_keys(
[
("H - Označí úkol jako hotový", "" if obj.done else "green"),
"N - Označí úkol jako nehotový",
"Z - Zobrazí HTML úkolu",
]
)
def homework_key_handler(key_press: KeyPress, done: Callable):
key = key_press.key.lower()
if key == "h":
obj.mark_as_done(api, True)
print("Úkol označen jako hotový")
elif key == "n":
obj.mark_as_done(api, False)
print("Úkol označen jako nehotový")
elif key == "z":
c = Console()
c.print(Syntax(str(parseHTML(obj.content).prettify()), "html"))
asyncio.run(keyhandler(homework_key_handler))
else:
raise Exception(f"Undefined type '{type(obj)}' to show")
async def keyhandler(
handler: Callable[[KeyPress, Callable[[], None]], None] | None,
*,
done_on_enter: bool = True,
mask_keyboard_interrupt: bool = False,
):
"""
Začne zaznamenávat zmáčklé klávesy, které následně passuje do dané funkce.
Args:
handler:
Funkce do které se passují zaznamenané klávesy.
Bere 2 argumenty:
key_press:
Zaznamenaný stisk klávesy.
done:
Funkce, která při zavolání ukončí záznam kláves.
Pokud je `None`, nic se nevolá.
Hodnota `None` má smysl pouze pokud parametr `done_on_enter` je `True`.
done_on_enter:
Pokud True, tak se při klávese Enter ukončí záznam kláves.
Pozn.: Pokud True, tak se funkce v parametru handler nevolá.
mask_keyboard_interrupt:
Pokud `True`, tak `KeyboardInterrupt` bude potlačen.
Pokud `False`, `KeyboardInterrupt` bude propagován.
Pozn.: Ve skutečnosti je `KeyboardInterrupt` simulován, jelikož z asyncio loopu `KeyboardInterrupt` nepřichází.
Příklad:
```
def handler(keys_press: KeyPress, done: Callable):
if key_press.key == "q":
done()
asyncio.run(keyhandler(handler))
```
Nebo, pokud máme asynchoní funkci, lepší řešení pro poslední řádku je:
```
await keyhandler(handler)
```
"""
evnt = asyncio.Event()
inpt = create_input()
done = lambda: evnt.set()
def key_handler_proc(keys: list[KeyPress]):
for key_press in keys:
if done_on_enter and key_press.key == Keys.Enter:
done()
# elif key_press.key == Keys.F4:
# for key_press in keys:
# if key_press.key == Keys.Escape:
# raise SystemExit
elif not mask_keyboard_interrupt and key_press.key == Keys.ControlC:
raise KeyboardInterrupt
elif handler is not None:
handler(key_press, done)
with inpt.raw_mode():
with inpt.attach(lambda: key_handler_proc(inpt.read_keys())):
await evnt.wait()
def get_io_filepath(file: str) -> str:
return os.path.join(dirs.user_data_dir, file)
def get_io_file(file: str, create_file: bool, mode: str = "r+") -> IO:
"""Vrátí file handler na daný soubor `file` v uživatelské (data) složce."""
path = get_io_filepath(file)
if not os.path.exists(path):
if not create_file:
raise FileNotFoundError()
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "x", encoding="utf-8"):
pass
return open(path, mode, encoding="utf-8")
def save_config():
with get_io_file(CONFIG_FILE, True) as f:
# Indent, protože chci, aby to šlo přehledně upravit i z editoru (i když k tomu nejspíše nikdy nedojde)
# (a navíc alespoň nemusí řešit formátování při "config show")
json.dump(args.__dict__, f, indent=4)
def disable_ssl():
def patch(f: Callable):
def patched(*args, **kwargs):
# `cast()` protože jsem zatím nepřišel na způsob, jak dostat hint při patchování metod (pomocí `ParamSpec`u)
session = cast(bakalariapi.sessions.RequestsSession, args[0])
bound = inspect.signature(f).bind(*args, **kwargs)
bound.apply_defaults()
login = bound.arguments["login"]
bound.arguments["login"] = False
x = f(*bound.args, **bound.kwargs)
session.session.verify = False
if login:
session.login()
return x
return patched
bakalariapi.sessions.RequestsSession.__init__ = patch(
bakalariapi.sessions.RequestsSession.__init__
)
# Když nastavíme `verify` na `False` (v `requests` modulu), `urllib3` si začne stěžovat
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
##################################################
##### PŘÍKAZO-FUNKCE #####
##################################################
def Init() -> bool:
def partial_init_mode():
rich_print(
"\nInicilizace neproběhla úspěšně a shell poběží v omezeném módu.\nPro přepnutí do plného módu zkuste opětovat inicializaci pomocí příkazu 'init'.",
color="yellow",
)
def ask_import() -> bool:
try:
if args.no_import:
if dialog_ano_ne(
"Server není dostupný; Chce importovat uložená data?",
True,
"yellow",
):
Command_Import()
else:
partial_init_mode()
else:
rich_print(
"Server není dostupný; Uložená data byla již importována, je tedy možné pracovat se starými daty",
color="yellow",
)
partial_init_mode()
except KeyboardInterrupt:
partial_init_mode()
return False
if args.url is None:
try:
args.url = input("URL adresa serveru: ")
api.server_info.url = args.url
except KeyboardInterrupt:
rich_print("\nNebyla zadána adresa serveru", color="red")
partial_init_mode()
return False
if args.username is None:
try:
args.username = input("Přihlašovací jméno: ")
api.username = args.username
except KeyboardInterrupt:
rich_print("\nNebylo zadáno přihlašovací jméno", color="red")
partial_init_mode()
return False
if args.password is None:
try:
args.password = getpass.getpass("Heslo: ")
except KeyboardInterrupt:
rich_print(
"\nHeslo nebylo zadáno, předpokládá se prázdné heslo", color="yellow"
)
args.password = ""
api.password = args.password
try:
rich_print(
f"Kontrola stavu serveru a přihlašovacích údajů pro uživatele [cyan]{api.username}[/cyan]...",
highlight=False,
)
try:
if not api.is_login_valid():
rich_print("Přihlašovací údaje jsou neplatné", color="red")
partial_init_mode()
return False
except requests.exceptions.SSLError:
# rich.get_console().print_exception()
try:
if dialog_ano_ne(
"Nepodařilo se navázat zabezpečené připojení k serveru. Chcete pokračovat s nezabezpečeným připojením?",
False,
"yellow",
):
disable_ssl()
api.session_manager.kill_all(False)
print(
"Deaktivovalo se zabezpečené připojení, inicializace nyní proběhne znovu..."
)
return Init()
else:
return ask_import()
except KeyboardInterrupt:
partial_init_mode()
return False
except requests.exceptions.RequestException:
return ask_import()
except KeyboardInterrupt:
rich_print("Inicializace byla předčasně ukončena", color="yellow")
partial_init_mode()
return False
rich_print("Server běží a přihlašovací údaje jsou správné", color="green")
print("Nastavuji...")
try:
with warnings.catch_warnings():
# Nechceme dostat `VersionMismatchWarning`, protože v `SeverInfo()` kontrolujeme verzi manuálně
warnings.simplefilter("ignore")
api.init()
except KeyboardInterrupt:
rich_print(
"Nebyly získány informace o stavu serveru, ale žádné funkce by tímto neměli být ovlivněny",
color="yellow",
)
return True
print("Nastaveno:")
ServerInfo()
return True
def ServerInfo():
rich_print(
f"Typ uživatele: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.type == '' else f'[cyan]{api.user_info.type}[/cyan]'}\n"
f"Uživatelký hash: {'[bright_black]Není k dispozici[/bright_black]' if api.user_info.hash == '' else f'[cyan]{api.user_info.hash}[/cyan]'}\n"
f"Verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version is None else f'[cyan]{api.server_info.version}[/cyan]'}\n"
f"Datum verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.version_date is None else '[cyan]'+api.server_info.version_date.strftime('%d. %m. %Y')+'[/cyan] [bright_black]('+cs_timedelta((datetime.now() - api.server_info.version_date), 'd')+' stará verze)[/bright_black]'}\n"
f"Evidenční číslo verze Bakalářů: {'[bright_black]Není k dispozici[/bright_black]' if api.server_info.evid_number is None else f'[cyan]{api.server_info.evid_number}[/cyan]'}\n",
highlight=False,
)
if not (api.server_info.version is None) and not api.is_version_supported():
rich_print(
"*** Jiná verze Bakalářů! Všechny funkce nemusejí fungovat správně! ***",
highlight=False,
color="yellow",
)
def Command_Komens(limit: int | None = None, force_fresh: bool = False):
def fresh() -> list[bakalariapi.Komens]:
if api.is_partial_init:
partial_init_notice()
return []
output: list[bakalariapi.Komens] = []
with Progress() as progress:
task = RichTask(
progress, progress.add_task("Získávání zpráv", start=False, total=0)
)
unresolved = api._parse(
bakalariapi.modules.komens.getter_komens_ids(api)
).get(bakalariapi.UnresolvedID)[:limit]
task.update(total=len(unresolved))
for unresolved_id in unresolved:
output.append(api._resolve(unresolved_id).get(bakalariapi.Komens)[0])
task.update(advance=1)
return output
if force_fresh:
zpravy = fresh()
else:
zpravy = api.get_komens(bakalariapi.GetMode.CACHED)
if len(zpravy) == 0:
print("Žádné zprávy v Lootingu, zkouším načíst ze serveru")
zpravy = fresh()
length = len(zpravy)
if length == 0:
print("Nebyly nalezeny žádné aktualní schůzky")
return
cls()
count = 1
for zprava in zpravy:
try:
show(zprava, f"*** Zpráva {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Znamky(force_fresh: bool = False):
print("Získávám známky...")
try:
znamky = api.get_grades(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
length = len(znamky)
print(f"Známky získány ({length}), zobrazuji...")
cls()
count = 1
for znamka in znamky:
try:
show(znamka, f"*** Známka {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Schuzky(force_fresh: bool = False):
def fresh():
if api.is_partial_init:
partial_init_notice()
return []
output = []
with Progress() as progress:
task = RichTask(
progress, progress.add_task("Získávání schůzek", start=False, total=0)
)
unresolved = api._parse(
bakalariapi.modules.meetings.getter_future_meetings_ids(api)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
for unresolved_id in unresolved:
output.append(api._resolve(unresolved_id).get(bakalariapi.Meeting)[0])
task.update(advance=1)
return output
if force_fresh:
schuzky = fresh()
else:
schuzky = api.get_meetings(bakalariapi.GetMode.CACHED)
if len(schuzky) == 0:
print("Žádné schůzky v Lootingu, zkouším načíst ze serveru")
schuzky = fresh()
length = len(schuzky)
if length == 0:
print("Nebyly nalezeny žádné aktualní schůzky")
return
cls()
count = 1
for schuzka in schuzky:
try:
show(schuzka, f"*** Schůzka {count} z {length} ***")
count += 1
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Studenti(force_fresh: bool = False):
print("Získávám studenty...")
try:
studenti = api.get_students(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
length = len(studenti)
print(f"Studenti získáni, počet studentů je {length}")
try:
count = dialog_cislo("Kolik zobrazit výsledků najednou?", 25)
except KeyboardInterrupt:
return
offset = 0
cls()
while offset < length:
try:
for _ in range(count):
if offset >= length:
break
print(studenti[offset].format())
offset += 1
input(
f"Pro pokračování stiskni klávasu... (Již zobrazeno {offset} výsledků z {length})"
)
cls()
except KeyboardInterrupt:
print("\n")
break
def Command_Ukoly(fast: bool = False, force_fresh: bool = False):
print("Načítání úkolů...")
try:
if fast:
ukoly = api.get_homeworks(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH,
fast_mode=True,
)
else:
ukoly = api.get_homeworks(
bakalariapi.GetMode.FRESH
if force_fresh
else bakalariapi.GetMode.CACHED_OR_FRESH,
fast_mode=False,
unfinished_only=False,
only_first_page=False,
)
except bakalariapi.exceptions.PartialInitError:
partial_init_notice()
return
hotove = 0
nehotove = 0
for ukol in ukoly:
if ukol.done:
hotove += 1
else:
nehotove += 1
if hotove + nehotove == 0:
print("Nebyly nalezeny žádné aktualní úkoly")
return
print(f"Úkoly načteny (hotové {hotove}, nehotové {nehotove})")
zobraz_hotove = fast or dialog_ano_ne("Chte zobrazit již hotové úkoly?")
count = 1
for ukol in ukoly:
try:
if not zobraz_hotove and ukol.done:
continue
cls()
show(
ukol,
f"*** Domácí úkol {count} z {hotove + nehotove if zobraz_hotove else nehotove} ***",
)
count += 1
except KeyboardInterrupt:
print("\n")
break
def Command_Konec(nice: bool = True):
shell_instance.stop_loop()
api.kill(nice)
def Command_Export(file_name: str = "main"):
print("Generace JSON dat...")
with get_io_file(file_name, True) as f:
json.dump(api.looting.export_data(), f, ensure_ascii=False)
# Odstraníme data, která jsou případně po JSONu, co jsme teď napsali (třeba pozůstatek po předchozím JSONu, pokud byl delší jak náš současný)
f.truncate()
print(f"JSON data vygenerována a zapsána do souboru '{file_name}'")
def Command_Import(file_name: str = "main"):
try:
with get_io_file(file_name, False) as f:
api.looting.import_data(json.loads(f.read()))
except FileNotFoundError:
rich_print(
f"Data nebyla načtena, jelikož soubor '{file_name}' neexistuje",
color="yellow",
)
else:
print(f"Data ze souboru '{file_name}' byla načtena")
def Command_Config(namespace: dict[str, Any]):
cmd = namespace["cmd"]
config_path = get_io_filepath(CONFIG_FILE)
if cmd == "show":
if os.path.exists(config_path):
with open(config_path, "r") as f:
rich_print(Syntax(f.read(), "json"))
else:
print("Žádná konfigurace není uložená")
elif cmd == "save":
save_config()
print("Konfigurace uložena")
elif cmd == "remove":
if os.path.exists(config_path):
os.remove(config_path)
print("Konfigurace byla vymazána")
else:
print("Nic se nevykonalo, jelikož konfigurace není uložená")
elif cmd == "check":
if os.path.exists(config_path):
s = os.stat(config_path)
rich_print(
f"Konfigurace je uložená z data {datetime.fromtimestamp(s.st_mtime).strftime('%d. %m. %Y, %H:%M:%S')}, velikost konfigurace je {s.st_size}B"
)
else:
print("Žádná konfigurace není uložená")
elif cmd == "open":
dirname = os.path.dirname(config_path) # = dirs.user_data_dir()
if os.path.exists(dirname):
webbrowser.open(os.path.realpath(dirname))
else:
print("Nelze otevřít konfigurační složku, jelikož neexistuje")
##################################################
##### TESTY #####
##################################################
def RunTest(ID: int):
m = __import__(__name__)
t = f"Test{ID}"
if hasattr(m, t):
rich_print(f"Zahajuji test {ID}")
try:
o = getattr(m, t)()
rich_print(
f"Test {ID} skončil" + ("" if o is None else "; Výsledek testu:")
)
if o is not None:
rich_print(o)
except:
rich_print("Test skončil neúspěchem:", color="red")
traceback.print_exc()
else:
rich_print(f"Test {ID} nenalezen", color="red")
def Test0():
print("Spouštím testování...")
with api.session_manager.get_session_or_create(
bakalariapi.sessions.RequestsSession
) as session:
try:
while True:
last = session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)
).json()["data"]["remainingTime"]
print("\r", end="")
while True:
print(
"Současný zbývající čas: " + str(last) + " " * 20, end="\r"
) # Some spaces to rewrite previous text...
session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_EXTEND)
)
current = float(
session.get(
api.get_endpoint(bakalariapi.bakalari.Endpoint.SESSION_INFO)
).json()["data"]["remainingTime"]
)
if last < current:
print("\n")
break
last = current
time.sleep(1)
print(
"Sezení bylo prodlouženo, když zbývalo "
+ str(last)
+ " (+ max 1s) do konce a bylo prodlouženo na "
+ str(current)
)
except KeyboardInterrupt:
print("Testování ukončeno")
def Test1():
# "Kopírování"
print("Vytváření kopie dat skrze export/import...")
data = api.looting.export_data()
new = bakalariapi.looting.Looting()
new.import_data(data)
print("Kopie vytvořena")
# Porovnávání
typ_mismatch = 0
id_len_mismatch = 0
id_mismatch = 0
print("=" * 30)
print(f"Počet typů v datech (old): {len(api.looting.data)}")
print(f"Počet typů v datech (new): {len(api.looting.data)}")
print("Porovnávání zahájeno...")
for typ_old, typ_new in zip(api.looting.data.keys(), new.data.keys()):
if typ_old != typ_new:
print(f"Neshodující se typy! Old: '{typ_old}'; New: '{typ_new}'")
typ_mismatch += 1
continue
old_id_len = len(api.looting.data[typ_old])
new_id_len = len(new.data[typ_new])
if old_id_len != new_id_len:
print(
f"Neshodující se počet záznamů pro typ {typ_old}! Old: {old_id_len}; New: {new_id_len}"
)
id_len_mismatch += 1
for id_old, obj_old, id_new, obj_new in zip(
api.looting.data[typ_old].keys(),
api.looting.data[typ_old].values(),
new.data[typ_new].keys(),
new.data[typ_new].values(),
):
if id_old != id_new:
print(
f"Neshodující se ID! Old: '{id_old}'; New: '{id_new}' (typ: {typ_old}; ID type (old): {type(id_old)}; ID type (new): {type(id_new)})"
)
id_mismatch += 1
print(
f"Porovnávání dokončeno:\nChyb u typů:\t{typ_mismatch}\nChyb u ID:\t{id_mismatch}"
)
return (typ_mismatch, id_mismatch, id_len_mismatch)
def Test2():
print("Získávám IDčka online schůzek...")
IDs = api._parse(
bakalariapi.modules.meetings.getter_meetings_ids(
api, datetime(1, 1, 1), datetime(9999, 12, 31, 23, 59, 59)
)
).get(bakalariapi.UnresolvedID)
la = len(IDs)
print(f"IDčka online schůzek získany ({la})")
print()
error: list[bakalariapi.UnresolvedID[bakalariapi.Meeting]] = []
try:
with Progress() as progress:
task = RichTask(progress, progress.add_task("Získávání schůzek", total=la))
for ID in IDs:
task.update(description=f"Schůzka {ID.ID}")
try:
api._resolve(ID)
except bakalariapi.exceptions.BakalariQuerrySuccessError as e:
progress.log(f"Online schůzku {ID.ID} se nepodařilo načíst")
error.append(ID)
finally:
task.update(advance=1)
except KeyboardInterrupt:
pass
finally:
le = len(error)
print(
f"Úspěšné pokusy: {la - le}; Neúspěšné pokusy: {le}; Chybovost: {le/la*100:.2f}%"
)
def Test3():
print("Tento test již není podporován... Sadge")
return
# return API.GetHomeworksIDs()
def Test4():
print("Tento test již není podporován... Sadge")
return
# return API.MarkHomeworkAsDone(input("ID Úkolu: "), input("ID Studenta: "), True)
def Test5():
print("Tento test již není podporován... Sadge")
return
# homeworks = API.GetHomeworks()
# print("Úkoly načteny...")
# zobrazHotove = AnoNeDialog("Chte zobrazit již hotové úkoly?")
# cls()
# for homework in homeworks:
# if not zobrazHotove and homework.Done:
# continue
# print("*** Domácí úkol ***")
# print(homework.Format())
# print("\n\n")
# input("Pro pokračování stiskni klávasu...")
# cls()
def Test6():
count_total = 0
count_invalid = 0
try:
while True:
count_total += 1
output = api.get_homeworks(
bakalariapi.GetMode.FRESH,
fast_mode=False,
unfinished_only=False,
only_first_page=False,
)
if len(output) <= 20:
count_invalid += 1
print("==============================")
print(f"Nepodařil se se pokus číslo {count_total}")
print(f"Nepodařených pokusů je {count_invalid} z {count_total}")
probrallity = (count_total - count_invalid) / count_total * 100
print("Pravděpodobnost úspěšnosti je %.2f%%" % probrallity)
print("==============================")
time.sleep(5)
except KeyboardInterrupt:
print("==============================")
print(f"Nepodařených pokusů bylo {count_invalid} z celkových {count_total}")
probrallity = (count_total - count_invalid) / count_total * 100
print("Konečná ravděpodobnost úspěšnosti je %.2f%%" % probrallity)
##################################################
##### MAIN #####
##################################################
def main():
global api
global args
def load_args_from_config() -> dict | None:
global args
with get_io_file(CONFIG_FILE, True) as f:
parsed = json.load(f)
return parsed
parser = argparse.ArgumentParser(
description="Shell integrující funkcionalitu BakalářiAPI",
epilog="Ano, ano, ano... Actually je to web scraper, ale API zní líp :)",
)
if parser.prog == "":
parser.prog = "bakalarishell"
parser.add_argument(
"url",
help="URL na bakaláře (př. https://bakalari.skola.cz); Pokud není tento argument přítomen, program se zeptá za běhu",
nargs="?",
default=None,
)
parser.add_argument(
metavar="jmeno",
help="Přihlašovací jméno; Pokud není tento argument přítomen, program se zeptá za běhu",
dest="username",
nargs="?",
default=None,
)
parser.add_argument(
metavar="heslo",
nargs="?",
help="Přihlašovací heslo; Pokud není tento argument přítomen, program se zeptá za běhu",
dest="password",
default=None,
)
parser.add_argument(
"-b",
"--browser",
choices=[x.name.lower() for x in bakalariapi.Browser],
type=str.lower, # => case-insensitive
help="Specifikuje WebDriver prohlížeče, který použít",
default=None,
)
parser.add_argument(
"-e",
"--executablePath",
help="Cesta ke spustitelnému webdriveru pro prohlížeč, který je specifikovaný pomocí '-b'",
dest="executable_path",
default=None,
)
parser.add_argument(
"-t",
"--test",
type=int,
help="Test, který se má spustit",
# dest="test",
metavar="ID",
default=None,
)
parser.add_argument(
"-a",
"--auto-run",
help="Pokud je tato flaga přítomna, spustí se automatické úlohy",
action="store_true",
dest="auto_run",
default=None,
)
parser.add_argument(
"-n",
"--no-init",
help="Pokud je tato flaga přítomna, nebude BakalariAPI instance automaticky inicializována",
action="store_true",
dest="no_init",
default=None,
)
parser.add_argument(
"--no-import",
help="Pokud je tato flaga přítomna, nebude proveden import dat (z hlavního souboru)",
action="store_true",
dest="no_import",
default=None,
)
parser.add_argument(
"-v",
"--verbose",
help="Zapne shell v 'ukecaném módu'; Lze opakovat vícekrát pro větší 'ukecanost' (max 5)",
action="count",
default=None,
)
parser.add_argument(
"-d",
"--disable-config",
help="Soubor s konfigurací se bude ignorovat, tudíž se brát v potaz pouze argumenty z příkazové řádky",
action="store_true",
dest="disable_config",
default=None,
)
parser.add_argument(
"-c",
"--command",
help="Vykoná daný příkaz po zapnutí shellu (po autorunu); Lze opakovat vícekrát",
action="append",
dest="commands",
default=None,
)
# Všechny argumenty pro argparse MUSÍ mít "default=None", jinak se neprofiltrují
# a nelze pro daný argument načíst hodnotu z configu (protože hodnota z configu
# se přepíše hodnotou "None" z argparse)
parsed = {k: v for k, v in vars(parser.parse_args()).items() if v is not None}
# Jelikož hodnoty filtrujeme, tak pokud i po filtrování je "disable_config"
# v "parsed" tak má hodnotu `True`, tudíž se můžeme dotazovat (jen) přes `in`
if not ("disable_config" in parsed):
from_config = load_args_from_config()
if from_config is not None:
parsed = from_config | parsed
args = Args(**parsed)
# Verbose:
# 0 - Nic
# 1 - Warning; Pouze BakalářiAPI
# 2 - Info; Pouze BakalářiAPI
# 3 - Debug; Pouze BakalářiAPI
# 4 - Info
# 5 - NOSET
if args.verbose != 0:
logging.basicConfig(
level=[
None,
"WARNING",
"INFO",
"DEBUG",
"INFO",
"NOTSET",
][args.verbose],
datefmt="[%X]",
handlers=[RichHandler()],
)
logging.info(
"Logging zapnut na levelu %s (%s)",
args.verbose,
logging.getLevelName(logging.root.level),
)
if args.verbose < 4:
for logger in [
logging.getLogger(name) for name in logging.root.manager.loggerDict
]:
if logger.name.startswith("bakalariapi"):
continue
logger.propagate = False
# logging.getLogger("bakalariapi").propagate = True
selenium: bakalariapi.SeleniumHandler | None = None
if args.browser is not None:
selenium = bakalariapi.SeleniumHandler(
bakalariapi.Browser[args.browser.upper()],
args.executable_path,
)
api = bakalariapi.BakalariAPI(args.url, args.username, args.password, selenium)
successful_init = False
if not args.no_init:
successful_init = Init()
if not args.no_import:
try:
with get_io_file("main", False) as f:
api.looting.import_data(json.loads(f.read()))
except FileNotFoundError:
pass
if args.test is not None:
RunTest(args.test)
prepare_shell()
# Chceme `main()` locals, ne `prepare_shell()` locals
shell_instance.PYTHON_EXEC_LOCALS = locals()
print()
rich_print(
f"Bakalarishell připraven - verze BakalářiAPI je "
+ f"[green_yellow]{bakalariapi.__version__}[/green_yellow]"
if "dev" in bakalariapi.__version__
else f"[bright_cyan]{bakalariapi.__version__}[/bright_cyan]"
)
lasttime: datetime = datetime.max
try:
with get_io_file(TIME_FILE, False) as f:
lasttime = datetime.fromisoformat(f.read())
except FileNotFoundError:
pass
if args.auto_run:
if successful_init:
def task_ukoly(api: bakalariapi.BakalariAPI, task: RichTask):
length = len(
api.get_homeworks(bakalariapi.GetMode.FRESH, fast_mode=True)
)
task.update(total=length, completed=length)
def task_komens(api: bakalariapi.BakalariAPI, task: RichTask):
unresolved = api._parse(
bakalariapi.modules.komens.getter_komens_ids(
api,
from_date=None if lasttime is None else lasttime - timedelta(5),
)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
task.start()
for unresolved_id in unresolved:
api._resolve(unresolved_id)
task.update(advance=1)
def task_znamky(api: bakalariapi.BakalariAPI, task: RichTask):
length = len(api.get_all_grades())
task.update(total=length, completed=length)
def task_schuzky(api: bakalariapi.BakalariAPI, task: RichTask):
unresolved = api._parse(
bakalariapi.modules.meetings.getter_future_meetings_ids(api)
).get(bakalariapi.UnresolvedID)
task.update(total=len(unresolved))
task.start()
for unresolved_id in unresolved:
api._resolve(unresolved_id)
task.update(advance=1)
@dataclass
class Task:
description: str
function: Callable[[bakalariapi.BakalariAPI, RichTask], None]
start: bool = True
tasks: list[Task] = [
Task("Získání Komens zpráv", task_komens, False),
Task("Získání schůzek", task_schuzky, False),
Task("Získání úkolů", task_ukoly),
Task("Získání známek", task_znamky),
]
def autorun():
with Progress(
"[progress.description]{task.description}",
BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
"{task.completed}/{task.total}",
TimeRemainingColumn(),
) as progress:
threads: list[threading.Thread] = []
for task in tasks:
thread = threading.Thread(
target=task.function,
args=(
api,
RichTask(
progress,
progress.add_task(
task.description, start=task.start, total=0
),
),
),
)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
print()
autorun()
else:
rich_print(
"Autorun nebyl spuštěn kvůli nepodařené/nekompletní inicializaci",
color="yellow",
)
if "exit" not in args.commands and (not args.no_import or args.auto_run):
print()
today = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)
today_aware = (
datetime.now()
.astimezone()
.replace(hour=0, minute=0, second=0, microsecond=0)
)
first = True
for znamka in filter(
lambda x: min(lasttime, today - timedelta(5)) < x.date1 and x.grade != "?",
api.looting.get(bakalariapi.Grade),
):
if first:
first = False
print("Poslední známky:")
note = znamka.note1.strip() or znamka.note2.strip()
rich_print(
f"Z předmětu [magenta]{znamka.subject}[/magenta] známka [bright_green]{znamka.grade}[/bright_green] ze dne {znamka.date1.strftime('%d. %m. %Y')}"
+ ("" if note == "" else f" - {note}")
)
first = True
for komens in filter(
lambda x: x.grade == "?", api.looting.get(bakalariapi.Grade)
):
if first:
first = False
print("Nadcházející klasifikace:")
rich_print(
f"Z předmětu [magenta]{komens.subject}[/magenta] na {komens.date1.strftime('%d. %m. %Y')}"
)
first = True
for schuzka in filter(
lambda x: today_aware < x.start_time
and x.start_time < today_aware + timedelta(2),
api.looting.get(bakalariapi.Meeting),
):
if first:
first = False
print("Dnešní a zítřejší schůzky:")
rich_print(
f"{schuzka.start_time.strftime('%H:%M %d. %m. %Y')} - {'[bright_black]Neznámý[/bright_black]' if schuzka.owner is None else f'[magenta]{schuzka.owner.name}[/magenta]'} \"{schuzka.name.strip()}\""
)
first = True
for ukol in filter(lambda x: not x.done, api.looting.get(bakalariapi.Homework)):
if first:
first = False
print("Úkoly:")
ukol._sort_by_date
rich_print(
f"Z předmětu [magenta]{ukol.subject}[/magenta] na {ukol.submission_date.strftime('%d. %m.')} - {ukol.content}"
)
first = True
for znamka in filter(
lambda x: (x.need_confirm and not x.confirmed)
or min(lasttime, today - timedelta(5)) < x.time,
api.looting.get(bakalariapi.Komens),
):
if first:
first = False
print("Komens zprávy:")
rich_print(
f"Komens zpráva od [magenta]{znamka.sender}[/magenta] z {znamka.time.strftime('%H:%M %d. %m. %Y')}"
+ (
" [yellow](nepotvrzená)[/yellow]"
if (znamka.need_confirm and not znamka.confirmed)
else ""
)
)
with get_io_file(TIME_FILE, True) as f:
f.write(datetime.now().isoformat())
if len(args.commands) != 0:
if successful_init:
print("Vykonávám zadané příkazy...")
for command in args.commands:
print(command)
shell_instance.proc_string(command)
else:
rich_print(
"Zadané příkazy nebyly spuštěny kvůli nepodařené/nekompletní inicializaci",
color="yellow",
)
try:
shell_instance.start_loop()
except (shell.DummyShellError, KeyboardInterrupt):
Command_Konec(False)
def prepare_shell():
global shell_instance
predefined_commands = [x for x in shell.ShellPredefinedCommands]
predefined_commands.remove(shell.ShellPredefinedCommands.EXIT)
_globals = globals()
_globals["p"] = rich_print
_globals["i"] = rich.inspect
shell_instance = shell.Shell(
# prompt="[bright_green]BakalariAPI Shell[/bright_green][yellow]>[/yellow]",
prompt="BakalariAPI Shell>",
allow_python_exec=True,
python_exec_prefix=" ",
python_exec_globals=_globals,
python_exec_locals=locals(),
predefined_commands=predefined_commands,
command_exception_traceback=True,
command_exception_traceback_locals=True,
command_exception_reraise=False,
raise_on_ctrlc=True,
end_on_ctrlc=True,
dummy_shell="exit" in args.commands,
)
parser_fresh = shell.ShellArgumentParser(add_help=False)
parser_fresh.add_argument(
"-f",
"--fresh",
help="Pokud je tato flaga přítomna, vynutí se získání dat ze serveru",
default=False,
action="store_true",
dest="force_fresh",
)
parser = shell.ShellArgumentParser(parents=[parser_fresh])
parser.add_argument(
"limit",
type=int,
nargs="?",
default=None,
help="Limituje počet zpráv, které se načtou a tím i zrychlí proces",
)
shell_instance.add_command(
shell.Command(
"komens",
Command_Komens,
short_help="Zobrazí komens zprávy",
argparser=parser,
spread_arguments=True,
aliases=["zpravy"],
)
)
shell_instance.add_command(
shell.Command(
"znamky",
Command_Znamky,
short_help="Zobrazí známky",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
shell_instance.add_command(
shell.Command(
"schuzky",
Command_Schuzky,
short_help="Zobrazí (nadcházející) schůzky",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
shell_instance.add_command(
shell.Command(
"studenti",
Command_Studenti,
short_help="Zobrazí studenty",
argparser=shell.ShellArgumentParser(parents=[parser_fresh]),
)
)
parser = shell.ShellArgumentParser()
parser.add_argument("ID", help="ID testu, který se má spustit")
shell_instance.add_command(
shell.Command(
"test",
RunTest,
argparser=parser,
short_help="Spustí daný test",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser(parents=[parser_fresh])
parser.add_argument(
"-s",
"--slow",
help="Pokud je tato flaga přítomna, úkoly budou získány v 'pomalém módu'",
action="store_false",
dest="fast",
default=True,
)
shell_instance.add_command(
shell.Command(
"ukoly",
Command_Ukoly,
argparser=parser,
short_help="Zobrazí úkoly",
spread_arguments=True,
)
)
shell_instance.add_command(
shell.Command(
"server",
ServerInfo,
short_help="Zobrazí informace o serveru",
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"-f",
"--force",
help="Pokud je tato flaga přítomna, neprovede se odlášení sessionů a aplikace se tedy rychleji ukončí",
action="store_false",
default=True,
dest="nice",
)
shell_instance.add_command(
shell.Command(
"exit",
Command_Konec,
argparser=parser,
short_help="Ukončí shell",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"file_name",
nargs="?",
help="ID/jméno exportu",
default="main",
metavar="ID",
)
shell_instance.add_command(
shell.Command(
"export",
Command_Export,
argparser=parser,
short_help="Exportuje data z daného souboru",
spread_arguments=True,
)
)
parser = shell.ShellArgumentParser()
parser.add_argument(
"file_name",
nargs="?",
help="ID/jméno importu",
default="main",
metavar="ID",
)
shell_instance.add_command(
shell.Command(
"import",
Command_Import,
argparser=parser,
short_help="Importuje data z daného souboru",
spread_arguments=True,
)
)
shell_instance.add_command(
shell.Command("init", Init, short_help="Provede (opětovnou) inicializaci")
)
parser = shell.ShellArgumentParser()
subparsers = parser.add_subparsers(
required=True,
metavar="příkaz",
dest="cmd",
parser_class=shell.ShellArgumentParser,
)
subparsers.add_parser(
"show",
help="Zobrazí uloženou konfiguraci",
)
subparsers.add_parser(
"save",
help="Uloží současnou konfiguraci",
)
subparsers.add_parser(
"remove",
help="Odstraní uloženou konfiguraci",
)
subparsers.add_parser(
"check",
help="Zobrazí údaje o uložené konfiguraci",
)
subparsers.add_parser(
"open",
help="Otevře konfigurační složku",
)
shell_instance.add_command(
shell.Command(
"config",
Command_Config,
argparser=parser,
short_help="Příkaz na práci s uloženou konfigurací",
spread_arguments=False,
)
)
if __name__ == "__main__":
main()
| 1.828125 | 2 |
controller/ui.py | timmyArch/timmyPaste | 2 | 12796006 |
from controller.base import *
class UI(FlaskView):
def index(self):
return render_template('index.haml')
def get(self, key=None):
try:
flash=None
if key == 'new':
return render_template('new.haml')
elif key:
return self.__show(key)
except CodeNotFound:
flash="Couldn't find syntax element. Redirect back!"
return render_template('new.haml', flash=flash)
@route('/<key>/raw')
def raw(self, key):
return Response(Code.find(key).code, mimetype="text/plain")
def post(self):
try:
hide = (True,False)[bool(request.form.get('hide') == 'true')]
return redirect('/'+Code.new(request.form.get('code'), hide))
except:
return render_template('new.haml', flash="""
Error while creating
syntax code stuff. Please retry.""")
def __show(self, key):
keylist=key.split('.')
ckey = ((key,'txt'),keylist)[bool(len(keylist)>1)]
a = Code.find(ckey[0])
try:
hcode = a.highlight('.'+ckey[1])
flash=False
except:
hcode = a.highlight('.txt')
flash="""
Sorry, but the Lexxer doesn't exist. Please enter only filename
suffix like .rb or .py
"""
return render_template('show.haml',
key=ckey[0],
flash=flash,
code=hcode)
| 2.4375 | 2 |
source/run_tests.py | andrzejgorski/pascalis | 2 | 12796007 | #!/usr/bin/env python
import subprocess
import re
tests_raw = subprocess.check_output(["ls", "tests"])
tests = re.sub("[^\w]", " ", tests_raw).split()
OUTPUTS = {
'program': "1",
'test_if': "1",
'test_if_false': "",
'test_if_el': "1",
'test_if_el_false': "0",
'test_eq_int': "1",
'test_eq_int_false': "0",
'test_neq_int': "0",
'test_neq_int_false': "1",
'test_eq_bool_true_true': "1",
'test_eq_bool_true_false': "0",
'test_eq_bool_false_false': "1",
'test_eq_bool_false_true': "0",
'test_neq_bool_true_true': "0",
'test_neq_bool_true_false': "1",
'test_neq_bool_false_false': "0",
'test_neq_bool_false_true': "1",
'test_and_true_true': "1",
'test_and_true_false': "0",
'test_and_false_true': "0",
'test_and_false_false': "0",
'test_or_true_true': "1",
'test_or_true_false': "1",
'test_or_false_true': "1",
'test_or_false_false': "0",
'test_lt_true': "1",
'test_lt_false': "0",
'test_gt_true': "1",
'test_gt_false': "0",
'test_le_true': "1",
'test_le_false': "0",
'test_ge_true': "1",
'test_ge_false': "0",
'test_eq_char_false': "0",
'test_eq_char_true': "1",
'test_eq_string_true': "1",
'test_print_string': "printed string",
'test_print_char': 'c',
'test_print_true': 'verum',
'test_print_false': 'falsum',
'test_le_string': 'falsum',
'test_add_strings': 'first second',
'test_python_str_cut_out': 'cut out',
'test_python_str_cut_out_left': 'out',
'test_python_str_cut_out_right': 'cut',
'test_python_str_left_right': 'ut ou',
'test_python_str_left_1': 'ut out',
'test_python_str_right_6': 'cut ou',
'test_string_elem': 'j',
'test_string_elem_2': 'n',
'test_string_length_0': '0',
'test_string_length_6': '6',
'test_char_ord': '99',
'test_decl_int': '1',
'test_decl_bool': 'verum',
'test_decl_str': 'michal ma nowy strych',
'test_decl_char': 'x',
'test_eq_var_int': 'falsum',
'test_change_string_content': 'mleko',
'test_change_one_letter_string': 'jajka',
'test_array_construct': '13',
'test_sub_str_var': 't',
'test_block_variables': 'titulus 2titulus 1',
'test_stmt_decl': 'titulus',
'test_x_plus_1': '11',
'test_x_minus_1': '9',
'test_while': '1112',
'test_10_minus_1': '9',
'test_for_loop': '12345678910',
'test_bool_not': 'falsum',
'test_array_write': '10\n20\n30\n40\n50\n60\n70\n80\n90\n100\n',
'test_procedure': 'Maslo z orzechamiLubie placki',
'test_global_variable': 'teraz ty!raz dwa trzy',
'test_procedure_with_param': '1110',
'test_procedure_with_two_params': 'ala ma kota',
'test_procedure_with_variable_param': 'verum',
'test_procedure_with_many_variables': '24',
'test_procedure_recursion': '3628800',
'test_function_return_0': '0',
'test_function_recursive': '3628800',
'test_function_recursive2': '3628800',
'test_variables_strange': '109',
'test_function_recursive_with_decls': 'falsumverum verum verum verum verum verum',
'test_function_with_variable_params': '10000 10000',
'test_array_function_param': 'verum falsum',
'test_array_length': '10',
'test_function_return_array': '1: 0\n2: 2\n3: 3\n4: 12\n5: 21\n6: 52\n7: 111\n8: 123\n9: 432\n10: 23423\n',
'test_lege_int_simple': '123',
'test_lege_minus_int': '-10',
'test_lege_int_white_spaces': '80',
'test_lege_int_white_spaces2': '-12',
'test_lege_3_ints': '1251',
'test_lege_char': 'c',
'test_lege_10_char': 'abc\n10\nyhb',
'test_lege_string': 'De vita Pascalis etiam pellicula perfecta est anno 1972, <NAME> moderatore.',
'test_lege_3_strings': 'kochammojestudia',
'test_dict_sample': '10',
'test_dict_param': '1',
'test_dict_variable_param': '10',
'test_function_return_dict': '10',
'test_function_declaration_in_function': 'verum',
}
INPUTS = {
'test_lege_int_simple': 'cat_123',
'test_lege_minus_int': 'cat_minus_10',
'test_lege_int_white_spaces': 'int_with_white_space',
'test_lege_int_white_spaces2': 'white_spaces2',
'test_lege_3_ints': '3_ints',
'test_lege_char': 'c',
'test_lege_10_char': '10_chars',
'test_lege_string': 'string_input',
'test_lege_3_strings': '3_strings',
}
def decorate_green(value):
return '\033[1;32m{}\033[1;m'.format(value)
def decorate_red(value):
return '\033[1;31m{}\033[1;m'.format(value)
def decorate_yellow(value):
return '\033[1;33m{}\033[1;m'.format(value)
correct = 0
for numb, test in enumerate(tests, 1):
try:
if test in INPUTS:
command = ["./TestPascalis -s tests/{} < inputs/{}".format(test, INPUTS[test])]
output = subprocess.check_output(command, shell=True)
else:
output = subprocess.check_output(["./TestPascalis", "-s", "tests/{}".format(test)])
except subprocess.CalledProcessError as exc:
print '{}. program {} {}'.format(numb, test, decorate_red('cannot be excecuted'))
print exc
continue
try:
OUTPUTS[test]
except KeyError:
print "{}. Program {} results {}".format(numb, decorate_yellow(test), output)
print "There is defaule value for this program"
continue
if output == OUTPUTS[test]:
correct += 1
print "{}. Test {} is {}.".format(numb, test, decorate_green("ok"))
else:
print "{}. Test {} is {}.".format(numb, test, decorate_red("failed"))
print "output = {}, excepcted = {}".format(output, OUTPUTS[test])
test_count = len(tests)
missing_tests = set(OUTPUTS.keys()) - set(tests)
if missing_tests:
for test in missing_tests:
print 'missing test {}'.format(decorate_red(test))
else:
if correct == test_count:
print decorate_green("Everything is ok.")
print decorate_green("{}/{} tests passed.".format(correct, correct))
else:
print decorate_red("{}/{} tests passed.".format(correct, test_count))
| 2.46875 | 2 |
LeetCode/Session3/FindTilt.py | shobhitmishra/CodingProblems | 0 | 12796008 | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
tilt = 0
def findTilt(self, root: TreeNode) -> int:
self.findTiltHelper(root)
return self.tilt
def findTiltHelper(self, root):
if not root:
return 0
leftSum = self.findTiltHelper(root.left)
rightSum = self.findTiltHelper(root.right)
self.tilt += abs(leftSum - rightSum)
return leftSum + rightSum + root.val
root = TreeNode(10)
root.left = TreeNode(3)
root.left.left = TreeNode(2)
root.left.right = TreeNode(8)
root.left.right.left = TreeNode(7)
root.left.right.right = TreeNode(9)
root.right = TreeNode(15)
root.right.left = TreeNode(13)
root.right.right = TreeNode(17)
root.right.right.right = TreeNode(19)
ob = Solution()
print(ob.findTilt(root))
| 3.421875 | 3 |
put_json_to_dynamodb.py | kykasper/konomania-bot | 0 | 12796009 | import json
import codecs
import pandas as pd
import boto3
csv_path = "./fixed_tweets.csv"
save_path = "./fixed_tweets.json"
df = pd.read_csv(csv_path, header=None, encoding = "utf-8")
df.columns =["tweet"]
df_json = df.to_dict(orient='records')
resource = boto3.resource('dynamodb', region_name='ap-northeast-1')
# Connect to the DynamoDB table
table = resource.Table('konomania-tweet')
# Load the JSON object created in the step 3 using put_item method
for i, tweet in enumerate(df_json):
if i > 1: break
tweet["id"] = i
table.put_item(Item=tweet)
# Test
# response = table.get_item(Key={'seat_no': 'A 314216'})
# response | 2.734375 | 3 |
2018/day07/part2.py | zagura/aoc-2017 | 2 | 12796010 | <reponame>zagura/aoc-2017
#!/usr/bin/python3
# Example line: Step A must be finished before step L can begin.
edges = [(ord(x[1]) - ord('A'), ord(x[7]) - ord('A')) for x in
map(lambda x: x.split(), open('input.in').readlines())]
workers = 5
for e in edges:
print('{} → {}'.format(chr(ord('A') + e[0]),chr(ord('A') + e[1])))
class Node(object):
def __init__(self, no):
self.id = no
self.inputs = {}
self.outputs = {}
self.begin_time = 0
self.end_time = -1
self.busy = -1
def insert_source(self, source_id, source):
self.inputs[source_id] = source
def insert_target(self, target_id, target):
self.outputs[target_id] = target
def __repr__(self):
return str({ 'in': self.inputs.keys(), 'out': self.outputs.keys(), 'id': [self.id]})
graph = {}
# for l in range(ord('Z') - ord('A') + 1):
# graph[l] = Node(l)
for source, target in edges:
if source not in graph:
graph[source] = Node(source)
if target not in graph:
graph[target] = Node(target)
graph[source].insert_target(target, graph[target])
graph[target].insert_source(source, graph[source])
output = []
nodes_to_insert = []
graph_len = len(graph)
time_point = 0
workers = [ -1 for i in range(6)]
while(len(output) < graph_len):
# print(len(output))
# print(len(graph))
for w in range(len(workers)):
nodes_to_insert = []
for node in graph:
# print('{} : {} → {}'.format(node, len(graph[node].inputs), len(graph[node].outputs)))
# print('{}: {}'.format(node, graph[node]))
if len(graph[node].inputs) == 0:
nodes_to_insert.append(node)
#print(nodes_to_insert)
if len(nodes_to_insert) == 0:
print('Total time: {} .'.format(time_point))
break
nodes_to_insert.sort()
limit = min(len(workers), len(nodes_to_insert))
processed_nodes = nodes_to_insert[:limit]
for n in processed_nodes:
if n in graph:
if w != 0 and workers[w] == -1 and graph[n].busy == -1:
print('Assigning {} to worker {} at time point: {}'.format(chr(n+ord('A')), w, time_point))
graph[n].begin_time = time_point
graph[n].end_time = time_point + n + 1 + 60
workers[w] = n
graph[n].busy = w
if time_point == graph[n].end_time and graph[n].busy >= 0 and w == 0:
for k in graph[n].outputs:
out = graph[n].outputs[k]
del out.inputs[n]
print("Removing {} TP {}.".format(n, time_point))
output.append(n)
workers[graph[n].busy] = -1
graph[n].busy = -1
del graph[n]
time_point += 1
print('Total time: {} .'.format(time_point))
| 2.984375 | 3 |
keeper/api/auth.py | lsst-sqre/ltd-keeper | 5 | 12796011 | """Authentication routes."""
from __future__ import annotations
from flask import g
from flask_accept import accept_fallback
from keeper.api import api
from keeper.auth import password_auth
from keeper.logutils import log_route
from ._models import AuthTokenResponse
@api.route("/token")
@accept_fallback
@log_route()
@password_auth.login_required
def get_auth_token() -> str:
"""Obtain a token for API users.
**Example request**
.. code-block:: http
GET /token HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate
Authorization: Basic dXNlcjpwYXNz
Connection: keep-alive
Host: localhost:5000
User-Agent: HTTPie/0.9.3
**Example response**
.. code-block:: http
HTTP/1.0 200 OK
Content-Length: 139
Content-Type: application/json
Date: Tue, 09 Feb 2016 20:23:11 GMT
Server: Werkzeug/0.11.3 Python/3.5.0
{
"token": "<KEY>..."
}
:reqheader Authorization: ``username:password``
:>json string token: Token string. Use this token in the basic auth
``username`` field.
:statuscode 200: No errors.
:statuscode 401: Not authenticated.
"""
return AuthTokenResponse(token=g.user.generate_auth_token()).json()
| 2.84375 | 3 |
tests/acceptance/selene_page_factory_test.py | pupsikpic/selene | 572 | 12796012 | <filename>tests/acceptance/selene_page_factory_test.py
# MIT License
#
# Copyright (c) 2015-2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
from selene import have
from selene.support.shared import browser
from tests.integration.helpers.givenpage import GivenPage
empty_page = 'file://{}/../resources/empty.html'.format(
os.path.abspath(os.path.dirname(__file__))
)
def setup_function():
browser.quit()
def teardown_function():
browser.config.browser_name = 'chrome'
browser.quit()
def test_can_init_default_browser_on_visit():
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<h1 id="header">Selene</h1>'''
)
browser.element("#header").should(have.exact_text("Selene"))
assert browser.driver.name == 'chrome'
def test_can_init_custom_browser_on_visit():
browser.config.browser_name = 'firefox'
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<a id="selene_link">Selene site</a>
'''
)
browser.element("#selene_link").should(have.exact_text("Selene site"))
assert browser.driver.name == 'firefox'
def test_can_init_default_browser_after_custom():
browser.open(empty_page)
GivenPage(browser.driver).opened_with_body(
'''
<h1 id="header">Selene</h1>
'''
)
browser.element("#header").should(have.exact_text("Selene"))
assert browser.driver.name == 'chrome'
| 2.09375 | 2 |
testflows/_core/exceptions.py | testflows/TestFlows-Core | 3 | 12796013 | <filename>testflows/_core/exceptions.py<gh_stars>1-10
import sys
import traceback
def exception(exc_type=None, exc_value=None, exc_traceback=None):
"""Get exception string.
"""
if (exc_type, exc_value, exc_traceback) == (None, None, None):
exc_type, exc_value, exc_traceback = sys.exc_info()
return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)).rstrip()
class TestFlowsException(Exception):
"""Base exception class.
"""
pass
class ResultException(TestFlowsException):
"""Result exception.
"""
pass
class DummyTestException(TestFlowsException):
"""Dummy test exception.
"""
pass
class TestIteration(TestFlowsException):
"""Repeat test.
"""
def __init__(self, repeat, retry, *args, **kwargs):
self.repeat = repeat
self.retry = retry
super(TestIteration, self).__init__(*args, **kwargs)
class TestRerunIndividually(TestFlowsException):
"""Repeat tests individually.
"""
def __init__(self, tests, *args, **kwargs):
self.tests = tests
super(TestRerunIndividually, self).__init__(*args, **kwargs)
class TestFlowsError(TestFlowsException):
"""Base error exception class.
"""
pass
class RequirementError(TestFlowsError):
"""Requirement error.
"""
pass
class SpecificationError(TestFlowsError):
"""Specification error.
"""
pass
class DescriptionError(TestFlowsError):
"""Description error.
"""
pass
class ArgumentError(TestFlowsError):
"""Argument error.
"""
pass
| 2.53125 | 3 |
services/Compute/ec2_standalone.py | asurion/Hibernate | 9 | 12796014 | <reponame>asurion/Hibernate
from utils.randomGen import generateRandomString
from asyncProducerUtil.utils.connect import Connect
class ElasticComputeDefinition(Connect):
api_name = 'ec2'
table_name = 'scheduler_state_logs'
def ec2_def(self, i, schedule):
OS = i.platform
if OS is None:
OS = 'linux'
tenancy = i.placement['Tenancy']
if tenancy =='default':
tenancy = 'shared'
PLATFORM = []
BUSINESS_UNIT = []
p = next((item for item in i.tags if item["Key"] == "PLATFORM"), None)
if p: PLATFORM = p['Value']
b = next((item for item in i.tags if item["Key"] == "BUSINESS_UNIT"), None)
if b: BUSINESS_UNIT = b['Value']
if isinstance(schedule.get('daysActive'), list):
daysActive = ','.join(schedule.get('daysActive'))
else:
daysActive = schedule.get('daysActive')
return {
"uuid": generateRandomString(16),
"resource_id": i.instance_id,
"Account": self.account,
"resource_type": "ec2",
"Region": self.region,
"InstanceType": i.instance_type,
"OperatingSystem": OS,
"Tenancy": tenancy,
"PLATFORM": PLATFORM,
"BUSINESS_UNIT": BUSINESS_UNIT,
"StopTime": int(schedule.get('stop_time')),
"StartTime": int(schedule.get('start_time')),
"instance_count": 1,
"schedule": daysActive,
"tz": schedule.get('tz'),
"TotalHours": schedule.get('TotalHours')
}
def __init__(self, account, region):
Connect.__init__(self, account, region)
self.resource = Connect.resource_connect(self, self.api_name)
def generate_rows(self, schedules):
ec2_instances = []
for s in schedules:
i = self.resource.Instance(s['resource_id'])
try:
ec2_table_row = self.ec2_def(i, s)
ec2_instances.append(ec2_table_row)
except Exception as e:
print e
return {
self.table_name: ec2_instances
} | 2.28125 | 2 |
gridspectra.py | mahdiqezlou/vw_spectra | 0 | 12796015 | <filename>gridspectra.py
# -*- coding: utf-8 -*-
"""Class to generate spectra in the positions where there is a DLA, as known from the grid generation."""
from __future__ import print_function
import numpy as np
import hdfsim
import h5py
import vw_spectra
import os.path as path
class GridSpectra(vw_spectra.VWSpectra):
"""Generate metal line spectra from simulation snapshot"""
def __init__(self,num, base, numlos=5000, res = 1., cdir = None, dla=True, savefile="grid_spectra_DLA.hdf5", savedir=None, gridfile="boxhi_grid_H2.hdf5"):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
if savedir == None:
savedir = path.join(base,"snapdir_"+str(num).rjust(3,'0'))
gridfile = path.join(savedir,gridfile)
self.NumLos = numlos
#All through y axis
axis = np.ones(self.NumLos)
#Load grid positions
self.dlaind = self._load_dla_index(gridfile)
#Re-seed for repeatability
np.random.seed(23)
cofm = self.get_cofm()
vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True)
if dla:
self.replace_not_DLA(ndla=numlos, thresh=10**20.3)
else:
self.replace_not_DLA(ndla=numlos, thresh=10**17)
print("Found DLAs")
def get_cofm(self, num = None):
"""Find a bunch of sightline positions known to be where a DLA or an LLS is."""
if num == None:
num = self.NumLos
#Get some random indices into the box.
index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,num)
cofm = np.array([self.dlaind[0,index],self.dlaind[0,index],self.dlaind[1,index]]).T
#Randomize positions within a cell
cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5)
cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5)
#Some sightlines could end up being through the same cell, in rare cases.
#This is only a problem if you want to compare to a quasar survey with pixels large
#compared to the grid size.
return cofm
def _load_dla_index(self, gridfile, dla=False):
"""Load the positions of DLAs or LLS from savefile"""
#Load the DLA/LLS positions
f=h5py.File(gridfile,'r')
grid_file=f["HaloData"]
ngrid = np.array(grid_file["ngrid"])
self.celsz = 1.*self.box/ngrid[0]
grp = f["abslists"]
#This is needed to make the dimensions right
ind = (grp["DLA"][0,:],grp["DLA"][1,:],grp["DLA"][2,:])
if not dla:
ind_lls = (grp["LLS"][0,:],grp["LLS"][1,:],grp["LLS"][2,:])
f.close()
yslab = (ind[1]+0.5)*self.celsz
yslab_lls = (ind_lls[1]+0.5)*self.celsz
yslab = np.append(yslab,yslab_lls)
zslab = (ind[2]+0.5)*self.celsz
zslab_lls = (ind_lls[2]+0.5)*self.celsz
zslab = np.append(zslab,zslab_lls)
return np.array((yslab, zslab))
class TestGridSpectra(GridSpectra, vw_spectra.VWSpectra):
"""This specialised class tests the spectral generation code by loading several sightlines in a single cell and finding
their average value, compared to the value in the cell."""
def __init__(self,num, base, numlos=5000, res = 1., seed=23,cdir = None, dla=True, savefile="grid_spectra_DLA.hdf5", savedir=None, gridfile="boxhi_grid_H2.hdf5"):
#Load halos to push lines through them
f = hdfsim.get_file(num, base, 0)
self.box = f["Header"].attrs["BoxSize"]
f.close()
if savedir == None:
savedir = path.join(base,"snapdir_"+str(num).rjust(3,'0'))
gridfile = path.join(savedir,gridfile)
self.NumLos = numlos
#All through y axis
axis = np.ones(self.NumLos)
#Load grid positions
self.dlaind = self._load_dla_index(gridfile, dla)
self.dlaval = self._load_dla_val(gridfile, dla)
#Re-seed for repeatability
np.random.seed(seed)
cofm = self.get_cofm()
vw_spectra.VWSpectra.__init__(self,num, base, cofm, axis, res, cdir, savefile=savefile,savedir=savedir, reload_file=True)
def get_cofm(self, num = None):
"""Find a bunch of sightline positions through a single cell containing a DLA."""
if num == None:
num = self.NumLos
#Get a single random position
self.index = np.random.random_integers(0,np.size(self.dlaind[0,:])-1,1)*np.ones(num,dtype=np.int)
cofm = np.array([self.dlaind[0,self.index],self.dlaind[0,self.index],self.dlaind[1,self.index]]).T
#Randomize positions within a cell
cofm[:,1] += self.celsz*(np.random.random_sample(num)-0.5)
cofm[:,2] += self.celsz*(np.random.random_sample(num)-0.5)
#Some sightlines could end up being through the same cell, in rare cases.
#This is only a problem if you want to compare to a quasar survey with pixels large
#compared to the grid size.
return cofm
def check_mean(self):
"""Compute difference between the mean column of the spectra in this cell and the grid value."""
dlaval = self.dlaval[self.index][0]
colden = self.get_col_density("H",1)
specval = np.sum(colden)/self.NumLos
print("From spectra:",specval)
print("From grid:",10**dlaval)
print("different:",specval/10**dlaval)
def _load_dla_val(self, gridfile, dla=True):
"""Load the values of DLAs or LLS from savefile"""
#Load the DLA/LLS positions
f=h5py.File(gridfile,'r')
grp = f["abslists"]
#This is needed to make the dimensions right
if dla:
nhi = np.array(grp["DLA_val"])
else:
nhi = np.array(grp["LLS_val"])
f.close()
return nhi
| 2.625 | 3 |
demos/princomp_test.py | bmcmenamin/hebbnets | 0 | 12796016 | """
Test princomp extraction from CLI
"""
import argparse
import os
import numpy as np
from demo_utils import get_random_data
from hebbnets.networks import MultilayerHahNetwork
np.set_printoptions(suppress=True)
def _argparse():
parser = argparse.ArgumentParser(
prog="Testing HebbNet principal components",
description="Testing HebbNet principal components by decomposing random data"
)
parser.add_argument(
"--num_samples",
help="Number of samples for synthetic data",
default=25,
type=int,
required=False
)
parser.add_argument(
"--data_dimension",
help="Dimension of synthetic data",
default=100,
type=int,
required=False
)
parser.add_argument(
"--data_latent_dimension",
help="Latent dimension of synthetic data",
default=3,
type=int,
required=False
)
parser.add_argument(
"--num_pc",
help="Number of principle components to extract",
default=2,
type=int,
required=False
)
return parser.parse_args()
def get_top_princomps(data_array, num_pcs):
U, S, V = np.linalg.svd(np.array(data_array))
_idx = np.argsort(S)[-num_pcs:]
return V[_idx, :].T
def main(args):
# Make data
demo_data = get_random_data(
args.num_samples,
args.data_dimension,
latent_dim=args.data_latent_dimension
)
# Build/train network
hah_network = MultilayerHahNetwork(
args.data_dimension,
[args.num_pc],
has_bias=False,
act_type='linear',
)
hah_network.train(demo_data, num_epochs=1000)
# Build/train network
real_princomps = get_top_princomps(demo_data, args.num_pc)
hebb_princomps = np.squeeze(hah_network.layers[0].input_weights)
hebb_princomps /= np.linalg.norm(hebb_princomps, axis=0, keepdims=True)
# Show the inner product of top two PCs with learned input weights
inner_prod_mat = real_princomps.T.matmul(hebb_princomps)
prod_as_string = np.array_str(
inner_prod_mat,
suppress_small=True,
precision=4
)
print(np.array_str(inner_prod_mat, precision=4))
if __name__ == "__main__":
args = _argparse()
main(args)
| 2.578125 | 3 |
Chapter01/bias_variance.py | bpbpublications/Getting-started-with-Deep-Learning-for-Natural-Language-Processing | 0 | 12796017 | # -*- coding: utf-8 -*-
"""
## Author: <NAME>
## Copyright: Copyright 2018-2019, Packt Publishing Limited
## Version: 0.0.1
## Maintainer: <NAME>
## Email: <EMAIL>
## Linkedin: https://www.linkedin.com/in/linus1/
## Contributor : {if you debug, append your name here}
## Contributor Email : {if you debug, append your email here}
## Status: active
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
np.random.seed(0)
def true_fun(X):
"""
given X it will provide its mapping to Y by sing function np.cos(1.5 * np.pi * X)
:param X:
:return:
"""
return np.cos(1.5 * np.pi * X)
if __name__ == '__main__':
n_samples = 30
degrees = [1, 3, 9, 15]
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
"""
Evaluating and plotting for each degree of freedom
"""
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using cross-validation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="neg_mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
# predicting on test data
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
# plotting the True and predicted function
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, edgecolor='b', s=20, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\n TEST MSE = {:.2e}".format(
degrees[i], -scores.mean()))
plt.show()
| 3.171875 | 3 |
fjord/settings/test.py | DESHRAJ/fjord | 0 | 12796018 | <reponame>DESHRAJ/fjord<filename>fjord/settings/test.py
DEBUG = TEMPLATE_DEBUG = True
CELERY_ALWAYS_EAGER = True
SESSION_COOKIE_SECURE = False
| 0.933594 | 1 |
days/day101/Bite 10. Practice exceptions/divide.py | alex-vegan/100daysofcode-with-python-course | 2 | 12796019 | <filename>days/day101/Bite 10. Practice exceptions/divide.py
def positive_divide(numerator, denominator):
try:
result = numerator / denominator
except ZeroDivisionError:
return 0
except Exception as x:
raise x
if result < 0:
raise ValueError
return result
| 3.0625 | 3 |
embed.py | Top34051/stargan-zsvc | 8 | 12796020 | import numpy as np
import argparse
from utils import Audio
def sample_wav_audio(path):
audio = Audio()
mel = audio.audio_to_mel(path)
samples = audio.mel_sample(mel, width=128, k=5)
return samples
def save_embeddings(name, samples):
audio = Audio()
avg_embed = np.zeros(256, dtype=np.float32)
for mel in samples:
embed = audio.mel_to_embed(mel)
avg_embed += embed
avg_embed = avg_embed / 5
np.save(f'./embeddings/{name}.npy', avg_embed)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--path', action='store', type=str, required=True)
parser.add_argument('--name', action='store', type=str, required=True)
args = parser.parse_args()
samples = sample_wav_audio(args.path)
save_embeddings(args.name, samples) | 3.015625 | 3 |
python-lib/modellightgbm/dku_lightgbm.py | shippeo/dss-plugin-model-lightgbm | 3 | 12796021 | <reponame>shippeo/dss-plugin-model-lightgbm
from lightgbm import LGBMClassifier, LGBMRegressor
class DkuLGBMClassifier(LGBMClassifier):
def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split',
early_stopping_rounds=None, early_stopping=None):
self.early_stopping_rounds = early_stopping_rounds
super(DkuLGBMClassifier, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type)
def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
return super(DkuLGBMClassifier, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight, eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,
feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds)
class DkuLGBMRegressor(LGBMRegressor):
def __init__(self, boosting_type='gbdt', num_leaves=31, max_depth=-1, learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None, min_split_gain=0.0, min_child_weight=0.001,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=0.0, random_state=None, n_jobs=-1, silent=True, importance_type='split',
early_stopping_rounds=None, early_stopping=None):
self.early_stopping_rounds = early_stopping_rounds
super(DkuLGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, max_depth=max_depth, learning_rate=learning_rate, n_estimators=n_estimators,
subsample_for_bin=subsample_for_bin, objective=objective, class_weight=class_weight, min_split_gain=min_split_gain, min_child_weight=min_child_weight,
min_child_samples=20, subsample=1.0, subsample_freq=0, colsample_bytree=1.0, reg_alpha=0.0,
reg_lambda=reg_lambda, random_state=random_state, n_jobs=n_jobs, silent=silent, importance_type=importance_type)
def fit(self, X, y, sample_weight=None, init_score=None, eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
return super(DkuLGBMRegressor, self).fit(X, y, init_score=init_score, eval_set=eval_set or [(X, y)], eval_names=eval_names, eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_metric=eval_metric, verbose=verbose,
feature_name=feature_name, categorical_feature=categorical_feature, callbacks=callbacks, early_stopping_rounds=self.early_stopping_rounds) | 2.140625 | 2 |
experiments/recall_rate_vs_accuracy.py | happygirlzt/soft_alignment_model_bug_deduplication | 2 | 12796022 | import argparse
import logging
import os
import pickle
import random
import ujson
import sys
import math
from ctypes import c_ulong
from multiprocessing import Array, Queue
from multiprocessing.sharedctypes import RawArray
from queue import Empty
from time import time
import numpy as np
import resource
from scipy.sparse import csr_matrix
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
from sklearn.metrics.pairwise import cosine_similarity
from data.bug_report_database import BugReportDatabase
from data.preprocessing import concatenateSummaryAndDescription
from experiments.sparse_vector import TokenizerStemmer
from nltk import TreebankWordTokenizer, SnowballStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
def loadData(filePath):
f = open(filePath, 'r')
bugIds = set()
duplicateByBugId = {}
pairs = []
for l in f:
l = l.strip()
if len(l) == 0:
break
bug1Id, bug2Id, label = l.split(',')
label = int(label)
pairs.append((bug1Id, bug2Id, label))
bugIds.add(bug1Id)
bugIds.add(bug2Id)
if label == 1:
duplicateBug1List = duplicateByBugId.get(bug1Id, set())
if len(duplicateBug1List) == 0:
duplicateByBugId[bug1Id] = duplicateBug1List
duplicateBug1List.add(bug2Id)
duplicateBug2List = duplicateByBugId.get(bug2Id, set())
if len(duplicateBug2List) == 0:
duplicateByBugId[bug2Id] = duplicateBug2List
duplicateBug2List.add(bug1Id)
return bugIds, duplicateByBugId, pairs, ujson.loads(f.readline())['validations']
class Obj(object):
def __init__(self, dict):
for k, v in dict.items():
setattr(self, k, v)
def predictDeepLearningModel(bugEmbeddingsById, validationPairs):
batchSize = 1024
predictions = []
nBatches = math.ceil(float(len(validationPairs)) / batchSize)
firstBugPairs = []
secondBugPairs = []
for bug1, bug2 in validationPairs:
firstBugPairs.append(bugEmbeddingsById[bug1])
secondBugPairs.append(bugEmbeddingsById[bug2])
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
bug1s = getVariable(torch.stack(firstBugPairs[batchStart: batchStart + batchSize]), args.cuda)
bug2s = getVariable(torch.stack(secondBugPairs[batchStart: batchStart + batchSize]), args.cuda)
if arguments.model == 'retrieval':
predictionInput = [bug1s, bug2s]
elif arguments.model == 'classification':
predictionInput = model[1](bug1s, bug2s)
output = predictionFunction(predictionInput).data.cpu().numpy()
for pr in output:
if isinstance(pr, (np.float32, np.uint8)):
predictions.append(pr)
else:
predictions.append(pr[-1])
return predictions
def parallel(start, duplicateBugs, q):
logger = logging.getLogger()
c = time()
logger.info(
"Process %s started to compute the similarity for %d duplicate bugs. Start idx: %d" % (os.getpid(), len(duplicateBugs), start))
for i, db in enumerate(duplicateBugs):
q.put([start + i, calculateSimiliratyScoreTFIDF(str(db), vectorByBug, bugIds)])
if i % 20 == 0 and i != 0:
logger.info("TF-IDF: Process %s processed %d Duplicate bug of %d in %f" % (
os.getpid(), i, len(duplicateBugs), time() - c))
c = time()
q.put([-1, None])
def calculateSimiliratyScoreTFIDF(duplicateBug, vectorByBug, bugIds):
batchSize = 1024
nPairs = len(bugIds)
nBatches = math.ceil(float(nPairs) / batchSize)
bugEmbedding1 = vectorByBug[duplicateBug]
similarityScores = []
nbDim = bugEmbedding1.shape[1]
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for otherBug in bugIds[batchStart: batchStart + batchSize]:
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[otherBug]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
matrix1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
matrix2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(matrix1, matrix2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return similarityScores
def predictTFIDF(pairs):
batchSize = 8192
nPairs = len(pairs)
nBatches = math.ceil(float(nPairs) / batchSize)
similarityScores = []
for batchIdx in range(nBatches):
batchStart = batchIdx * batchSize
data1 = []
indices1 = []
ptrs1 = [0]
data2 = []
indices2 = []
ptrs2 = [0]
for bug1, bug2 in pairs[batchStart: batchStart + batchSize]:
bugEmbedding1 = vectorByBug[bug1]
data1.extend(bugEmbedding1.data)
indices1.extend(bugEmbedding1.indices)
ptrs1.append(len(indices1))
bugEmbedding2 = vectorByBug[bug2]
data2.extend(bugEmbedding2.data)
indices2.extend(bugEmbedding2.indices)
ptrs2.append(len(indices2))
nbDim = vectorByBug[bug1].shape[1]
pairBug1 = csr_matrix((data1, indices1, ptrs1), shape=(len(ptrs1) - 1, nbDim))
pairBug2 = csr_matrix((data2, indices2, ptrs2), shape=(len(ptrs2) - 1, nbDim))
score = cosine_similarity(pairBug1, pairBug2)
for i in range(score.shape[0]):
similarityScores.append(score[i][i])
return (np.asarray(similarityScores) > args.retrieval_threshold).astype(int)
def chunks(l, n):
chunkSize = int(len(l) / n)
remaining = len(l) % n
chunks = []
begin = 0
for i in range(n):
if remaining != 0:
additional = 1
remaining -= 1
else:
additional = 0
end = begin + chunkSize + additional
chunks.append(l[begin:end])
begin = end
return chunks
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--recall_ratio_k', nargs='+', required=True,
help="list of the values of k to be used in the recall ratio. If k is empty list so recall rate "
"is not calculated")
parser.add_argument('--model', help="model")
parser.add_argument('--model_type', help="model type")
parser.add_argument('--bug_dataset', help="")
parser.add_argument('--input', required=True)
parser.add_argument('--retrieval_threshold', type=float, default=None, help="")
parser.add_argument('--nb_processes', type=int, default=8, help="")
parser.add_argument('--cuda', action="store_true", help="enable cuda.")
logging.basicConfig(format='%(asctime)s %(levelname)-4s %(message)s', level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S', )
logger = logging.getLogger()
args = parser.parse_args()
print(args)
global bugIds
args.recall_ratio_k = [int(k) for k in args.recall_ratio_k]
bugIds, duplicateByBugId, pairs, validations = loadData(args.input)
biggestValidation = validations[-1]
bugReportDataset = BugReportDatabase.fromJson(args.bug_dataset)
bugIds = list(bugIds)
similarityListByDuplicate = []
if args.model_type == 'tfidf':
# Load Model
global vectorByBug
vectorByBug = {}
tfIdfVectorizer = pickle.load(open(args.model, 'rb'))
# Generate bag of words representation for each bug
texts = [concatenateSummaryAndDescription(bugReportDataset.getBug(bugId)) for bugId in bugIds]
vectors = tfIdfVectorizer.transform(texts)
for idx, bugId in enumerate(bugIds):
vectorByBug[bugId] = vectors[idx]
else:
# We can't import torch without allocating a GPU in Cedar cluster.
from experiments.duplicate_bug_detection_deep_learning import generateBugEmbeddings, \
calculateSimilarityScoresDL, \
CosinePrediction, getDataHandlerLexiconEmb, getModel
import torch
import torch.nn.functional as F
from util.torch_util import softmaxPrediction, getVariable
from data.dataset import BugDataExtractor
# Load Model and DataHandlers
arguments = Obj({
'load': args.model,
'cuda': args.cuda,
'summary_bidirectional': False,
'classifier_hidden_size': 300,
'classifier_mul_dif': True
})
dataHandlers, lexicons, embeddings, arguments = getDataHandlerLexiconEmb(arguments)
encoderContainer, model = getModel(dataHandlers, lexicons, embeddings, arguments)
encoderContainer.eval()
model.eval()
# Set the similarity and prediction functions
if arguments.model == 'classification':
similarityFunction = model[1]
if args.cuda:
similarityFunction.cuda()
predictionFunction = softmaxPrediction
elif arguments.model == 'retrieval':
similarityFunction = F.cosine_similarity
predictionFunction = CosinePrediction(args.retrieval_threshold, args.cuda)
if args.cuda:
model.cuda()
encoderContainer.cuda()
# Generate the embedding for each bug
logger.info("Generating Embeddings")
dataExtractor = BugDataExtractor(bugReportDataset, dataHandlers)
bugEmbeddingsById = generateBugEmbeddings(bugIds, dataExtractor, encoderContainer)
# Start to calculate all duplicate pairs recommend list
c = time()
logger.info("Calculating similarity scores")
dupDictItems = duplicateByBugId.items()
if args.model_type == 'tfidf':
# Calculating the score for tf-idf. We had to parallel this step because the sequential version was too slow.
import multiprocessing
logger.info("Calculating cosine similarity of tf-idf model using %d processes" % (args.nb_processes))
funcArgs = []
duplicateBugs = [duplicateBug for duplicateBug, listOfDuplicates in dupDictItems]
q = Queue()
processes = []
similarityScoresList = [0] * len(duplicateBugs)
startToWrite = 0
for idx, chunk in enumerate(chunks(duplicateBugs, args.nb_processes)):
arr = RawArray(c_ulong, [int(bugId) for bugId in chunk])
processes.append(multiprocessing.Process(target=parallel, args=(startToWrite, arr, q)))
startToWrite += len(chunk)
for p in processes:
p.start()
count = 0
while True:
try:
id, scoreList = q.get()
if id == -1:
# The process send a tuple (-1,None) when it is ending its work.
count += 1
# Break the loop when all processes were terminated
if count == len(processes):
break
else:
similarityScoresList[id] = scoreList
except Empty as e:
pass
logger.info(
"Total time to calculate cosine similarity of %d duplicate bugs: %s " % (len(dupDictItems), time() - c))
c = time()
for i, (duplicateBug, listOfDuplicates) in enumerate(dupDictItems):
# Calculate the similarity score of duplicate bug with each bug
if args.model_type == 'tfidf':
similarityScores = similarityScoresList.pop(0)
else:
similarityScores = calculateSimilarityScoresDL(duplicateBug, similarityFunction, bugEmbeddingsById, bugIds,
args.cuda)
# Remove pair (duplicateBug, duplicateBug) and create tuples with bug id and its similarity score.
bugScores = [(bugId, score) for bugId, score in zip(bugIds, similarityScores) if bugId != duplicateBug]
# Sort in descending order the bugs by probability of being duplicate
similarityList = sorted(bugScores, key=lambda x: x[1], reverse=True)
similarityListByDuplicate.append((duplicateBug, [t[0] for t in similarityList]))
if i % 200 == 0 and i != 0:
logger.info("Processed %d Duplicate bug of %d in %f" % (i, len(duplicateByBugId), time() - c))
c = time()
# For each different proportion, we calculate the recall rate and the precision, recall, accuracy
recallKs = sorted([int(k) for k in args.recall_ratio_k])
biggestKValue = recallKs[-1]
total = len(duplicateByBugId)
for validation in validations:
logger.info("Calculating metrics to a validation with proportion: %d" % validation['k'])
valitionBugIds = {}
# Prepare data to prediction
validationPairs = []
targets = []
bugIdsOfValidation = set()
for pairIndex in validation['indexes']:
bug1, bug2, label = pairs[pairIndex]
validationPairs.append((bug1, bug2))
valitionBugIds[bug1] = True
valitionBugIds[bug2] = True
bugIdsOfValidation.add(bug1)
bugIdsOfValidation.add(bug2)
targets.append(max(0, label))
logger.debug("Amount of duplicate pairs: %d\tAmount of pairs: %d" % (
np.count_nonzero(np.asarray(targets)), len(targets)))
logger.debug("Amount of bugs: %d" % (len(bugIdsOfValidation)))
logger.info("Predicting pair labels: %d" % validation['k'])
if args.model_type == 'tfidf':
predictions = predictTFIDF(validationPairs)
else:
predictions = predictDeepLearningModel(bugEmbeddingsById, validationPairs)
# Calculate Recall Rate
hitsPerRateK = [0] * len(recallKs)
logger.info("Calculating Recall Rate")
for duplicateBug, similarityList in similarityListByDuplicate:
pos = biggestKValue + 1
cur = 0
listOfDuplicates = duplicateByBugId[duplicateBug]
for bugId in similarityList:
if bugId not in bugIdsOfValidation:
continue
if bugId in listOfDuplicates:
pos = cur + 1
break
cur += 1
if cur >= biggestKValue:
break
for idx, k in enumerate(recallKs):
if k < pos:
continue
hitsPerRateK[idx] += 1
logger.info("Recall Rate Results:")
for k, hit in zip(recallKs, hitsPerRateK):
rate = float(hit) / total
logger.info("\t\t k=%d: %.3f (%d/%d) " % (k, rate, hit, total))
# Calculate Acc, precision, recall and f1
accum = accuracy_score(targets, predictions, normalize=False)
acc = accum / len(targets)
prec, recall, f1, _ = precision_recall_fscore_support(targets, predictions)
logger.info("Accuracy: %.3f (%d/%d)" % (acc * 100, accum, len(targets)))
logger.info("Precision: {}\tRecall: {}\tF1:{}".format(list(np.around(prec * 100, decimals=3)),
list(np.around(recall * 100, decimals=3)),
list(np.around(f1 * 100, decimals=3))))
logger.info("")
| 2.09375 | 2 |
examples/pruning_two_instances.py | laudv/veritas | 6 | 12796023 | <gh_stars>1-10
import xgboost as xgb
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
import veritas
import veritas.xgb
# Generate a random dataset
np.random.seed(14)
N = 2000
x = np.random.randint(0, 100, size=(N, 1)).astype(float)
y = np.random.randint(0, 100, size=(N, 1)).astype(float)
dist = np.sqrt(x**2 + y**2)
s = x + y
target = ((dist < 50) & (s > 20)) | ((x+2*y) > 200)
# Plot the dataset
#plt.plot(x[target], y[target], '.', color="blue")
#plt.plot(x[~target], y[~target], '.', color="red")
#plt.show()
X = np.concatenate((x, y), axis=1)
# Train a model using XGBoost
xtrain = xgb.DMatrix(X, label=target, missing=None)
params = {
"learning_rate": 0.5,
"max_depth": 4,
"objective": "binary:hinge",
"eval_metric": "error",
"tree_method": "hist",
"seed": 1,
"nthread": 1,
}
bst = xgb.train(params, xtrain, 10, [(xtrain, "train")])
features = ["x", "y"]
feat2id = {f : i for i, f in enumerate(features)}
at = veritas.xgb.addtree_from_xgb_model(bst)
at.base_score = 0.5
# Check whether our "AddTree"'s predictions and XGBoost's match
pred_raw_at = np.array(at.predict(X))
pred_raw = bst.predict(xtrain, output_margin=True)
print("max error", max(pred_raw_at - pred_raw), "(should be no more than float32 rounding error)")
# Look in a 100×100 grid at the values produced by XGBoost
Xv = np.zeros((100*100, 2))
for i, xv in enumerate(range(100)):
for j, yv in enumerate(range(100)):
Xv[i*100+j, 0:2] = [xv, yv]
vs = bst.predict(xgb.DMatrix(Xv), output_margin=True)
fig, (ax0, ax1) = plt.subplots(1, 2, figsize=(16, 6))
pred = (pred_raw.reshape((N,1)) > 0.0)
ax0.plot(x[pred&target], y[pred&target], '.', color="darkblue", alpha=0.5, label="true pos")
ax0.plot(x[~pred&~target], y[~pred&~target], '.', color="darkred", alpha=0.5, label="true neg")
ax0.plot(x[pred&~target], y[pred&~target], 'x', color="blue", label="false pos")
ax0.plot(x[~pred&target], y[~pred&target], 'x', color="red", label="false neg")
im = ax1.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral")
fig.colorbar(im, ax=ax1)
plt.show()
# EXAMPLE 1
# Use VERITAS to find the two output configurations
# - one in box x: [25, 75], y: [50, 80]
# - one in box x: [0, 50], y: [0, 50]
# such that the difference in output is maximized
opt = veritas.Optimizer(minimize=at, maximize=at, matches=set(), match_is_reuse=True)
box0 = [
veritas.RealDomain(25, 75),
veritas.RealDomain(50, 80),
]
box1 = [
veritas.RealDomain(0, 50),
veritas.RealDomain(0, 50),
]
print("num reachable leafs before prune", opt.g0.num_vertices(), opt.g1.num_vertices())
opt.prune_box(box0, 0) # prune instance0 (minimized)
opt.prune_box(box1, 1) # prune instance1 (maximized)
print("num reachable leafs after prune", opt.g0.num_vertices(), opt.g1.num_vertices())
opt.steps(2000)
print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps()))
points = []
for sol in opt.solutions():
# convert Solution object to list of intervals indexes by feature id
intervals = opt.solution_to_intervals(sol, 4)
xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval
yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval
xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval
yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval
points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1])
points = np.array(points)
print(points)
#print(bst.predict(xgb.DMatrix(points), output_margin=True))
fig, ax = plt.subplots()
m, M = abs(min(points[:,2])), max(points[:,2])
im = ax.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral")
ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color="blue"))
ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color="red"))
for p in points[:3]: # 3 best only
l, = ax.plot([p[0], p[2]], [p[1], p[3]])
ax.scatter([p[0]], [p[1]], marker="v", color=l.get_color()) # min
ax.scatter([p[2]], [p[3]], marker="^", color=l.get_color()) # max
fig.colorbar(im, ax=ax)
plt.show()
# EXAMPLE 2
# Use VERITAS to find the two output configurations
# - one in box x: [25, 75], y: [50, 80]
# - one in box x: [0, 50], y: [0, 50]
# such that the difference in output is maximized
# This time, share attribute x between the two instances
opt = veritas.Optimizer(minimize=at, maximize=at, matches=set([0]), match_is_reuse=True)
box0 = [
veritas.RealDomain(25, 75),
veritas.RealDomain(50, 80),
]
box1 = [
veritas.RealDomain(0, 50),
veritas.RealDomain(0, 50),
]
print("num reachable leafs before prune", opt.g0.num_vertices(), opt.g1.num_vertices())
opt.prune_box(box0, 0) # prune instance0 (minimized)
opt.prune_box(box1, 1) # prune instance1 (maximized)
print("num reachable leafs after prune", opt.g0.num_vertices(), opt.g1.num_vertices())
opt.steps(2000)
print((opt.num_solutions(), opt.num_rejected(), opt.num_candidate_cliques(), opt.num_steps()))
points = []
for sol in opt.solutions():
# convert Solution object to list of intervals indexes by feature id
intervals = opt.solution_to_intervals(sol, 4)
xv0 = sum(intervals[0][0])/2 # instance0: middle of first feature interval
yv0 = sum(intervals[0][1])/2 # instance0: middle of second feature interval
xv1 = sum(intervals[1][0])/2 # instance1: middle of first feature interval
yv1 = sum(intervals[1][1])/2 # instance1: middle of second feature interval
points.append([xv0, yv0, xv1, yv1, sol.output0, sol.output1])
points = np.array(points)
print(points)
#print(bst.predict(xgb.DMatrix(points), output_margin=True))
fig, ax = plt.subplots()
m, M = abs(min(points[:,2])), max(points[:,2])
im = ax.imshow(vs.reshape(100,100).T, origin="lower", cmap="Spectral")
ax.add_patch(Rectangle((0, 0), 50, 50, fill=False, color="blue"))
ax.add_patch(Rectangle((25, 50), 50, 30, fill=False, color="red"))
for p in points[:3]: # 3 best only
l, = ax.plot([p[0], p[2]], [p[1], p[3]])
ax.scatter([p[0]], [p[1]], marker="v", color=l.get_color()) # min
ax.scatter([p[2]], [p[3]], marker="^", color=l.get_color()) # max
fig.colorbar(im, ax=ax)
plt.show()
| 2.671875 | 3 |
create_academic.py | hulecom/hulecom.github.io | 0 | 12796024 | <reponame>hulecom/hulecom.github.io
import argparse
import os
#-- command line parameters
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Read a bibtex file to create website architecture for Hugo website
"""
)
parser.add_argument("--file","-f",
help="bibtex file")
#-- working data directory
parser.add_argument("--directory","-D",
help="Website directory for creating the file arborescence")
#-- output file
parser.add_argument("--overwrite","-O",
default=False, action="store_true",
help="Overwrite existing files")
args,_ = parser.parse_known_args()
listtoread = ["title", "year", "month", "author", "booktitle", "journal", "volume", "pages", "url", "abstract", "note"]
with open(args.file, "r") as file:
full_text = file.read()
articles = full_text.split("\n\n")
for article in articles:
lines = article.split("\n")
folder_name = lines[0].split("{")[1][:-1]
if not(os.path.isdir(os.path.join("content", args.directory, folder_name))):
os.mkdir(os.path.join("content", args.directory, folder_name))
with open(os.path.join("content", args.directory, folder_name, folder_name+".bib"), "w") as file:
file.writelines(article)
dic = {}
for info in listtoread:
dic[info] = ""
if "inproceedings" in lines[0]:
dic["publication_types"] = "1"
else:
dic["publication_types"] = "2"
for info in listtoread:
for line in lines:
if info in line.split("=")[0]:
dic[info] = line.split("{")[1][:-2]
break
dic["date"] = dic["year"] + "-" + dic["month"] + "-01"
list_author = [i.split(', ') for i in dic["author"].split(' and ')]
for i in range(len(list_author)):
if '-' in list_author[i][1]:
prenom = list_author[i][1].split('-')
list_author[i][1] = prenom[0][0] + ".-" + prenom[1][0] + "."
else:
list_author[i][1] = list_author[i][1][0] + "."
list_author = [i[0] + ', ' + i[1] for i in list_author]
list_author[list_author.index("<NAME>.")] = '**' + list_author[list_author.index("<NAME>.")] + '**'
dic["authors"] = ', '.join(list_author[:-1]) + ' and ' + list_author[-1]
if dic["journal"]:
dic["booktitle"] = dic["journal"]
if dic["volume"] and dic["pages"]:
dic["info"] = ', ' + dic["volume"] + ', ' + dic["pages"]
elif dic["volume"]:
dic["info"] = ', ' + dic["volume"]
elif dic["pages"]:
dic["info"] = ', ' + dic["pages"]
else:
dic["info"] = ''
if not(os.path.isdir(os.path.join("content", args.directory, folder_name))):
os.mkdir(os.path.join("content", args.directory, folder_name))
if args.overwrite or not(os.path.isfile(os.path.join("content", args.directory, "index.md"))):
text = '---\n'
text += 'title: "' + dic["title"] + '"\n'
text += 'date: ' + dic["date"] + '\n'
text += 'authors: "' + dic["authors"] + '"\n'
text += 'publication_types: "' + dic["publication_types"] + '"\n'
text += 'abstract: "' + dic["abstract"] + '"\n'
text += 'publication: "' + dic["booktitle"] + '"\n'
text += 'info: "' + dic["info"] + '"\n'
text += 'doi: "' + dic["url"] + '"\n'
text += 'note: "' + dic["note"] + '"\n'
text += 'folder_name: "' + folder_name +'"\n'
text += '---'
with open(os.path.join("content", args.directory, folder_name, "index.md"), "w") as file:
file.write(text)
| 3.015625 | 3 |
p2ner/components/scheduler/spullclient/spullclient/core.py | schristakidis/p2ner | 2 | 12796025 | <reponame>schristakidis/p2ner<filename>p2ner/components/scheduler/spullclient/spullclient/core.py
# -*- coding: utf-8 -*-
# Copyright 2012 <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import task, reactor
from twisted.internet.threads import deferToThread
from random import choice, uniform
from time import time
from p2ner.abstract.scheduler import Scheduler
from p2ner.base.Buffer import Buffer
from p2ner.base.BufferList import getMostDeprivedReq
from messages.buffermessage import BufferMessage
from messages.lpbmsg import LPBMessage
from messages.retransmitmessage import RetransmitMessage
from block import Block
EXPIRE_TIME = 0.5
def requestOne(requests):
blocksToRequest = {}
for peer in requests:
if len(requests[peer]) > 1:
blocksToRequest[peer] = [choice(requests[peer])]
else:
blocksToRequest[peer] = requests[peer]
return blocksToRequest
class SPullClient(Scheduler):
def registerMessages(self):
self.messages = []
self.messages.append(BufferMessage())
self.messages.append(LPBMessage())
self.messages.append(RetransmitMessage())
self.blocks = []
self.blocks.append(Block())
def initScheduler(self):
self.log.info('initing scheduler')
self.running = False
self.registerMessages()
self.loopingCall = task.LoopingCall(self.shift)
self.reqInterval=self.stream.scheduler['reqInt']
self.frequency = 1.0/self.stream.scheduler['blocksec']
self.buffer = Buffer(buffersize=self.stream.scheduler['bufsize'],log=self.log)
def errback(self, failure): return failure
def produceBlock(self):
#print "PRODUCEBLOCK"
d = deferToThread(self.getRequestedBID)
d.addCallback(self.sendBlock)
d.addErrback(self.errback)
return d
def sendBlock(self, req):
if not req:
self.running = False
return None
bid, peer = req
#self.log.debug('sending block %d to %s',bid,peer)
self.trafficPipe.call("sendblock", self, bid, peer)
def getRequestedBID(self):
#print "GETREQUESTEDBID"
while True:
#print self.bufferlist
peer = getMostDeprivedReq(self.bufferlist, self.buffer)
if peer is None:
self.running = False
#print "STOP SERVING\n\n"
return None
bl = self.buffer.bIDListCompTrue(peer.s[self.stream.id]["request"])
if len(bl) > 0:
blockID = choice(bl)
peer.s[self.stream.id]["request"].remove(blockID)
peer.s[self.stream.id]["buffer"].update(blockID)
#print "SENDING BLOCK", blockID, peer
peer.s[self.stream.id]["luck"] = blockID
return (blockID, peer)
else:
peer.s[self.stream.id]["request"]=[]
def start(self):
self.log.info('scheduler is starting')
self.loopingCall.start(self.frequency)
def stop(self):
self.log.info('scheduler is stopping')
#reactor.callLater(0, self.stream.stop)
try:
self.loopingCall.stop()
except:
pass
def makeRequests(self, receivingBlocks, missingBlocks, neighbours):
#print 'neighbours:',neighbours
#print "COMPUTING REQUESTS"
#print missingBlocks
#exclude receiving
def dd(self, receivingBlocks, missingBlocks, neighbours):
for bid in missingBlocks:
if bid in receivingBlocks:
missingBlocks.remove(bid)
elif bid < self.buffer.flpb:
missingBlocks.remove(bid)
#print 'missing blocks:',missingBlocks
tmpBlocksToRequest = {}
requestableBlocks = {}
for peer in neighbours:
if self.stream.id not in peer.s:
print str(self.stream.id), 'in cotinue 1', peer, peer.s
print neighbours
continue
if "buffer" not in peer.s[self.stream.id]:
print 'in continue 2'
continue
buffer = peer.s[self.stream.id]["buffer"]
#print 'neigh buffer:',buffer
tempReq = buffer.bIDListCompTrue(missingBlocks)
tmpBlocksToRequest[peer] = tempReq
#print 'temp:',tempReq
for b in tempReq:
if b in requestableBlocks:
requestableBlocks[b].append(peer)
else:
requestableBlocks[b] = [peer]
keys = tmpBlocksToRequest.keys()
blocksToRequest = {}
for k in keys:
blocksToRequest[k] = []
#take out blocks with only 1 source
reqBlockList = requestableBlocks.keys()
for b in reqBlockList:
if len(requestableBlocks[b]) == 1:
peer = requestableBlocks[b][0]
blocksToRequest[peer].append(b)
del requestableBlocks[b]
#while There are blocks to request
while len(requestableBlocks) > 0:
#get the block with less sources
block = min([ (len(requestableBlocks[x]),x) for x in requestableBlocks])[1]
#get the peer with min(less possible requests, less requests so far)
peer = min([ (min(len(tmpBlocksToRequest[x]),len(blocksToRequest[x])),x) for x in tmpBlocksToRequest if block in tmpBlocksToRequest[x]])[1]
del requestableBlocks[block]
blocksToRequest[peer].append(block)
#print "BLOCKSTOREQUESTSSSS", blocksToRequest
peerWithRequests = 0
for peer in blocksToRequest:
if len(blocksToRequest[peer]):
peerWithRequests+=1
if peerWithRequests > self.reqInterval:
blocksToRequest = requestOne(blocksToRequest)
return blocksToRequest
return deferToThread(dd, self, receivingBlocks, missingBlocks, neighbours)
#return dd(self, receivingBlocks, missingBlocks, neighbours)
def sendRequests(self, requests):
for peer in self.overlay.getNeighbours():
reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, requests.get(peer), peer, self.controlPipe)
def sendLPB(self, peer):
self.log.warning('sending LPB message to %s',peer)
LPBMessage.send(self.stream.id, self.buffer.lpb, peer, self.controlPipe)
def shift(self, norequests = False):
n = self.overlay.getNeighbours()
outID = self.buffer.shift()
if not norequests:
#send buffers
if self.buffer.lpb % self.reqInterval == 0:
d = self.trafficPipe.call("getreceiving", self)
d.addCallback (self.makeRequests, self.buffer.getFalseBIDList(), n)
d.addCallback(self.sendRequests)
d.addErrback(self.errback)
else:
#print 'sending buffer'
reactor.callLater(uniform(0,0.05), BufferMessage.send, self.stream.id, self.buffer, None, n, self.controlPipe)
#self.log.debug('%s',self.buffer)
#print self.buffer
#push block to output
outdata = self.trafficPipe.call("popblockdata", self, outID)
outdata.addCallback(self.output.write)
def isRunning(self):
return self.loopingCall.running
def askFragments(self,bid,fragments,peer):
print 'should ask from ',peer,fragments,bid
RetransmitMessage.send(self.stream.id,fragments,bid,peer,self.controlPipe)
def retransmit(self,block,fragments,peer):
print 'should retransmit to ',peer,block,fragments
b={}
b['blockid']=block
b['fragments']=fragments
self.trafficPipe.call('sendFragments',self,b,peer)
| 2 | 2 |
pyramid_openapi3/tests/test_add_formatter.py | niteoweb/pyramid_openapi | 74 | 12796026 | """Tests for registering custom formatters."""
from pyramid.testing import DummyRequest
from pyramid.testing import testConfig
def test_add_formatter() -> None:
"""Test registration of a custom formatter."""
with testConfig() as config:
request = DummyRequest()
config.include("pyramid_openapi3")
config.pyramid_openapi3_add_formatter("foormatter", lambda x: x)
formatter = request.registry.settings["pyramid_openapi3_formatters"].get(
"foormatter", None
)
assert formatter("foo") == "foo"
| 2.484375 | 2 |
test/m366_22_test.py | schwehr/ais-areanotice-py | 7 | 12796027 | #!/usr/bin/env python
"""Test USCG specific 8:367:22 area notice message Version 23 samples."""
import datetime
import unittest
from ais_area_notice import m366_22
# from m366_22 import AreaNotice
# from m366_22 import AreaNoticeCircle
# from m366_22 import AreaNoticeRectangle
# from m366_22 import AreaNoticeSector
# from m366_22 import AreaNoticePoly
# from m366_22 import AreaNoticeText
# from m366_22 import SHAPES
class TestAreaNotice(unittest.TestCase):
def testEmptyInit(self):
self.assertRaises(m366_22.Error, m366_22.AreaNotice)
def testInitWithAreaType(self):
area_type = 1
now = datetime.datetime.utcnow()
an = m366_22.AreaNotice(area_type=area_type, when=now)
self.assertFalse(an.areas)
self.assertEqual(an.area_type, area_type)
self.assertEqual(an.when.year, now.year)
self.assertEqual(an.when.month, now.month)
self.assertEqual(an.when.day, now.day)
self.assertEqual(an.when.hour, now.hour)
self.assertEqual(an.when.minute, now.minute)
self.assertEqual(an.when.second, 0)
self.assertIsNone(an.duration_min)
self.assertIsNone(an.link_id)
self.assertIsNone(an.mmsi)
class TestVersion23Samples(unittest.TestCase):
@unittest.skip('TODO(schwehr): Fix this failure.')
def testCircle(self):
# TODO(grepjohnson): Why are there two messages?
aivdm = (
'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MK4lh<7=B42l0000,2*7F'
#'!AIVDM,1,1,0,A,85M:Ih1KUQU6jAs85`0MKFaH;k4>42l0000,2*0E'
)
an = m366_22.AreaNotice(nmea_strings=[aivdm])
# self.assertEqual(an., )
if __name__ == '__main__':
unittest.main()
| 2.71875 | 3 |
model/CNN_data/load_train_data.py | nicolepanek/Thermophile_classification | 0 | 12796028 | <gh_stars>0
import pandas as pd
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
from sklearn.model_selection import train_test_split
def load_data():
#Load data
X = torch.load('/gscratch/stf/jgershon/tensor_x.pt')
Y = torch.load('/gscratch/stf/jgershon/tensor_y.pt')
return X,Y
def split_data(X,Y, save_data=False):
assert X.size()[0] == Y.size()[0]
#Convert y back from one hot encoding
Y = torch.argmax(Y,dim=1)
print('new Y: ',Y[:10])
print('X load: ',X.size())
print('Y load: ',Y.size())
# Split data tensors into dev and test sets
X_train, X_test, y_train, y_test = train_test_split( \
X, Y, test_size = 0.20, random_state=42)
print('X_train: ', X_train.size())
print('X_test: ',X_test.size())
print('y_train: ', y_train.size())
print('y_test: ',y_test.size())
if save_data:
torch.save(X_train,'/gscratch/stf/jgershon/X_train.pt')
torch.save(X_test,'/gscratch/stf/jgershon/X_test.pt')
torch.save(y_train,'/gscratch/stf/jgershon/y_train.pt')
torch.save(y_test,'/gscratch/stf/jgershon/y_test.pt')
trainset = TensorDataset(X_train, y_train)
testset = TensorDataset(X_test, y_test)
return trainset, testset
def make_data_loader(trainset, testset, batchsize=100):
assert isinstance(batchsize, int),'Batch size should be 100'
# Prepare train and test loaders
train_loader = torch.utils.data.DataLoader(trainset,
batch_size = batchsize,
shuffle = True,
num_workers=2)
test_loader = torch.utils.data.DataLoader(testset,
batch_size = batchsize,
shuffle = True,
num_workers=2)
return train_loader, test_loader
| 2.59375 | 3 |
test/media/qa_test/test_create_thumbnail.py | yunfan/bce-sdk-python | 22 | 12796029 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
########################################################################
#
# Copyright 2015 Baidu, Inc.
#
########################################################################
"""
File: test_create_thumbnail.py
Date: 2015/06/10 15:15:40
"""
import os
import sys
import unittest
import json
import time
import media_config
import re
import mediaBase
_NOW_PATH = os.path.dirname(os.path.abspath(__file__)) + '/'
_COMMON_PATH = _NOW_PATH + '../../../'
sys.path.insert(0, _COMMON_PATH)
from baidubce.services.media import media_client
from baidubce.exception import BceHttpClientError
from baidubce.exception import BceServerError
from baidubce.exception import BceClientError
import nose
from nose import tools
from nose.tools import assert_raises
from nose.tools import assert_is_none
from nose.tools import raises
class TestCreateThumbnail(mediaBase.MediaBase):
"""test create thumbnail"""
def __init__(self):
"""construction """
mediaBase.MediaBase.__init__(self)
self.pre = self.prefix + 'createthumb'
self.pipeline_name = self.pre
self.container = 'mp4'
self.capacity = 1
self.key = '10s.mp4'
self.key_prefix = '/00mingxioutput'
self.target_format = 'jpg'
self.sizing_policy = 'keep'
self.width_in_pixel = 640
self.height_in_pixel = 400
self.mode = 'manual'
self.start_time_in_second = 0
self.end_time_in_second = 50
self.interval_in_second = 10
self.client = media_client.MediaClient(media_config.config)
def setUp(self):
"""create env"""
time.sleep(2)
succ = True
try:
resp = self.client.create_pipeline(self.pipeline_name, self.sourceBucket,
self.targetBucket)
except Exception as e:
print(e.message)
succ = False
finally:
nose.tools.assert_true(succ)
def tearDown(self):
"""clear env"""
time.sleep(2)
resp = self.client.list_pipelines()
for each_pipeline in resp.pipelines:
pipeline_name = each_pipeline.pipeline_name
if (pipeline_name.startswith(self.pre)):
resp = self.client.list_thumbnail_jobs_by_pipeline(pipeline_name)
if resp.thumbnails:
for each_job in resp.thumbnails:
while(1):
resp = self.client.get_thumbnail_job(each_job.job_id)
if resp.job_status != 'SUCCESS' and resp.job_status != 'FAILED':
print('please wait ....\n')
time.sleep(5)
else:
break
resp = self.client.delete_pipeline(pipeline_name)
def test_create_thumbnail_normal(self):
"""create thumbnail normal"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': self.target_format,
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
capture = {'mode': self.mode,
'startTimeInSecond': self.start_time_in_second,
'endTimeInSecond': self.end_time_in_second,
'intervalInSecond': self.interval_in_second
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target, capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_with_pipeline_deleted(self):
"""create thumbnail with delete pipeline"""
resp = self.client.delete_pipeline(self.pipeline_name)
nose.tools.assert_is_not_none(resp)
source = {'key': self.key}
try:
self.client.create_thumbnail_job(self.pipeline_name, source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('pipeline has been deleted')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_pipeline_not_exist(self):
"""create thumbnail with pipeline not exist"""
source = {'key': self.key}
try:
self.client.create_thumbnail_job('not_exist_pipeline', source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('The requested pipeline does not exist')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_pipeline_none(self):
"""create thumbnail with pipeline none"""
source = {'key': self.key}
try:
self.client.create_thumbnail_job(None, source)
except ValueError as e:
assert e.message.startswith('arg "pipeline_name" should not be None')
def test_create_thumbnail_with_pipeline_empty(self):
"""create thumbnail with pipeline empty"""
source = {'key': self.key}
with nose.tools.assert_raises_regexp(BceClientError,
'pipeline_name can\'t be empty string'):
self.client.create_thumbnail_job('', source)
def test_create_thumbnail_with_key_is_chiness(self):
"""create thumbnail job with key is chiness"""
self.key = 'test--*--中文.mp4'
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_is_multiple_chars(self):
"""create thumbnail job with key is multiple chars"""
self.key = 'job_测试_123.mp4'
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_not_exist(self):
"""create thumbnail with key not exist"""
source = {'key': 'not_exist.mp4'}
try:
self.client.create_thumbnail_job(self.pipeline_name, source)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('bos object: not_exist.mp4 does not exist')
else:
assert True == False, 'not throw BceServerError'
def test_create_thumbnail_with_key_include_folder(self):
"""create thumbnail with key include folder"""
source = {'key': 'media/info/jobtest.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_with_key_long_name(self):
"""create thumbnail with key long name"""
source = {'key': 'longname12longname12longname12longname12longname12longname12.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp)
def test_create_thumbnail_keyprefix_none(self):
"""create thumbnail with key prefix is none"""
source = {'key': self.key}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_keyprefix_keydot(self):
"""create thumbnail with key prefix key dot"""
source = {'key': 'test.thumbnail.csdn.mp4'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_format_png(self):
"""create thumbnail with png pic"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_format_not_in_enum(self):
"""create thumbnail format not in enum"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'txt',
'sizingPolicy': self.sizing_policy,
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not construct')
else:
assert True == False
def test_create_thumbnail_sizingpolicy_in_enum(self):
"""create thumbnail with png pic"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_sizingpolicy_not_in_enum(self):
"""create thumbnail format not in enum"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'notsizing',
'widthInPixel': self.width_in_pixel,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not construct')
else:
assert True == False
def test_create_thumbnail_widthinpixel_equal_2000(self):
"""create thumbnail with width pixel equal 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 2000,
'heightInPixel': self.height_in_pixel,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_widthinpixel_lessthan_10(self):
"""create thumbnail with width pixel less than 10"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 5,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=')
else:
assert True == False
def test_create_thumbnail_widthinpixel_morethan_2000(self):
"""create thumbnail with width pixel more than 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': 2001,
'heightInPixel': self.height_in_pixel,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.widthInPixel:target.widthInPixel=')
else:
assert True == False
def test_create_thumbnail_heightinpixel_equal_2000(self):
"""create thumbnail withheight pixel equal 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 2000,
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_heightinpixel_lessthan_10(self):
"""create thumbnail with height pixel less than 10"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 5,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=')
else:
assert True == False
def test_create_thumbnail_heightinpixel_morethan_2000(self):
"""create thumbnail with height pixel more than 2000"""
source = {'key': self.key}
target = {'keyPrefix': self.key_prefix,
'format': 'png',
'sizingPolicy': 'shrinkToFit',
'widthInPixel': self.width_in_pixel,
'heightInPixel': 2001,
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, target)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('target.heightInPixel:target.heightInPixel=')
else:
assert True == False
def test_create_thumbnail_mode_is_auto(self):
"""create thumbnail with mode is auto"""
source = {'key': self.key}
capture = {'mode': 'auto'}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_not_in_enum(self):
"""create thumbnail with mode not in enum"""
source = {'key': self.key}
capture = {'mode': 'notmode'}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('Could not read JSON: Can not')
else:
assert True == False
def test_create_thumbnail_start_time_lessthan_0(self):
"""create thumbnail with start time less than 0"""
source = {'key': self.key}
capture = {'startTimeInSecond': -1}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.startTimeInSecond:capture.startTimeInSecond')
else:
assert True == False
def test_create_thumbnail_start_time_float(self):
"""create thumbnail with start time float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 1.25,
'endTimeInSecond': 50,
'intervalInSecond': 10}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_none_starttime(self):
"""create thumbnail mode is manual with start time is none"""
source = {'key': self.key}
capture = {
'mode': 'manual'
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('start time is required in manual mode')
else:
assert True == False
def test_create_thumbnail_end_time_lessthan_0(self):
"""create thumbnail with end time less than 0"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'startTimeInSecond': 0,
'endTimeInSecond': -1,
'intervalInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.endTimeInSecond:capture.endTimeInSecond')
else:
assert True == False
def test_create_thumbnail_end_time_float(self):
"""create thumbnail with end time float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 1,
'endTimeInSecond': 48.34,
'intervalInSecond': 10}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_auto_with_starttime(self):
"""create thumbnail mode is auto with end time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'startTimeInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_auto_with_endtime(self):
"""create thumbnail mode is auto with end time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'endTimeInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_auto_with_interval(self):
"""create thumbnail mode is auto with interval time"""
source = {'key': self.key}
capture = {
'mode': 'auto',
'intervalInSecond': 10
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'cannot specify start time, end time, interval or frame number in auto mode')
else:
assert True == False
def test_create_thumbnail_mode_manual_with_null_endtime(self):
"""create thumbnail mode is manual with end time none"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond': 10
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_with_endtime_less_starttime(self):
"""create thumbnail mode is manual with endtime less than start time"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':20,
'endTimeInSecond':10,
'intervalInSecond': 5
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith('start time cannot larger than end time')
else:
assert True == False
def test_create_thumbnail_mode_manual_endtime_null(self):
"""create thumbnail mode is manual with endtime null"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':100,
'intervalInSecond': 5
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_mode_manual_interval_null(self):
"""create thumbnail mode is manual with interval null"""
source = {'key': '测试视频.mp4'}
capture = {
'mode': 'manual',
'startTimeInSecond':10,
'endTimeInSecond': 20
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
print(resp)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_interval_less_0(self):
"""create thumbnail mode is manual with interver null"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':1,
'endTimeInSecond':50,
'intervalInSecond': -1
}
try:
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
except BceHttpClientError as e:
if isinstance(e.last_error, BceServerError):
assert e.last_error.message.startswith(
'capture.intervalInSecond:capture.intervalInSecond')
else:
assert True == False
def test_create_thumbnail_interval_float(self):
"""create thumbnail mode is manual with interver float"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':1,
'endTimeInSecond':50,
'intervalInSecond': 1.56
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
def test_create_thumbnail_start_equal_end(self):
"""create thumbnail start time equal end time"""
source = {'key': self.key}
capture = {
'mode': 'manual',
'startTimeInSecond':10,
'endTimeInSecond':10,
'intervalInSecond': 1.56
}
resp = self.client.create_thumbnail_job(self.pipeline_name, source, capture=capture)
nose.tools.assert_is_not_none(resp.job_id)
| 2.078125 | 2 |
ML/Pytorch/GANs/5. ProGAN/test.py | ZonePG/Machine-Learning-Collection | 9 | 12796030 | <filename>ML/Pytorch/GANs/5. ProGAN/test.py
def func(x=1, y=2, **kwargs):
print(x, y)
print(func(x=3, y=4))
| 1.984375 | 2 |
uranai.py | heeeedgehog/chat2021 | 0 | 12796031 | <reponame>heeeedgehog/chat2021
import re
import time
import random
import IPython
from google.colab import output
n = 0
def chat(text, **kw): #チャット用の関数(ここを書き換える)
global n
n += 1
return 'ほ' * n
# アイコンの指定
BOT_ICON = 'https://3.bp.blogspot.com/-qbORCFE5qhk/UmTBJwEYKjI/AAAAAAAAZYY/nbjieynFcLQ/s800/job_uranaishi.png'
YOUR_ICON = 'https://3.bp.blogspot.com/-nHZhTWISMxk/Vw5KxMQxRhI/AAAAAAAA5tQ/HR_btIW3k1ISG3GGNG1HFpsgk38wSuGzwCLcB/s800/nuigurumi_bear.png'
def run_chat(chat = chat, start='こんにちは!占いの館へようこそ!この館では、3つの占いを通してあなたを必ずハッピーにします!では早速、占いをはじめましょう!', **kw):
def display_bot(bot_text):
with output.redirect_to_element('#output'):
bot_name = kw.get('bot_name', 'Master')
bot_icon = kw.get('bot_icon', BOT_ICON)
display(IPython.display.HTML(f'''
<div class="sb-box">
<div class="icon-img icon-img-left">
<img src="{bot_icon}" width="60px">
</div><!-- /.icon-img icon-img-left -->
<div class="icon-name icon-name-left">{bot_name}</div>
<div class="sb-side sb-side-left">
<div class="sb-txt sb-txt-left">
{bot_text}
</div><!-- /.sb-txt sb-txt-left -->
</div><!-- /.sb-side sb-side-left -->
</div><!-- /.sb-box -->
'''))
def display_you(your_text):
with output.redirect_to_element('#output'):
your_name = kw.get('your_name', 'あなた')
your_icon = kw.get('your_icon', YOUR_ICON)
display(IPython.display.HTML(f'''
<div class="sb-box">
<div class="icon-img icon-img-right">
<img src="{your_icon}" width="60px">
</div><!-- /.icon-img icon-img-right -->
<div class="icon-name icon-name-right">{your_name}</div>
<div class="sb-side sb-side-right">
<div class="sb-txt sb-txt-right">
{your_text}
</div><!-- /.sb-txt sb-txt-right -->
</div><!-- /.sb-side sb-side-right -->
</div><!-- /.sb-box -->
'''))
display(IPython.display.HTML('''
<style>
/* 全体 */
.sb-box {
position: relative;
overflow: hidden;
}
/* アイコン画像 */
.icon-img {
position: absolute;
overflow: hidden;
top: 0;
width: 80px;
height: 80px;
}
/* アイコン画像(左) */
.icon-img-left {
left: 0;
}
/* アイコン画像(右) */
.icon-img-right {
right: 0;
}
/* アイコン画像 */
.icon-img img {
border-radius: 50%;
border: 2px solid #eee;
}
/* アイコンネーム */
.icon-name {
position: absolute;
width: 80px;
text-align: center;
top: 83px;
color: #fff;
font-size: 10px;
}
/* アイコンネーム(左) */
.icon-name-left {
left: 0;
}
/* アイコンネーム(右) */
.icon-name-right {
right: 0;
}
/* 吹き出し */
.sb-side {
position: relative;
float: left;
margin: 0 105px 40px 105px;
}
.sb-side-right {
float: right;
}
/* 吹き出し内のテキスト */
.sb-txt {
position: relative;
border: 2px solid #eee;
border-radius: 6px;
background: #eee;
color: #333;
font-size: 15px;
line-height: 1.7;
padding: 18px;
}
.sb-txt>p:last-of-type {
padding-bottom: 0;
margin-bottom: 0;
}
/* 吹き出しの三角 */
.sb-txt:before {
content: "";
position: absolute;
border-style: solid;
top: 16px;
z-index: 3;
}
.sb-txt:after {
content: "";
position: absolute;
border-style: solid;
top: 15px;
z-index: 2;
}
/* 吹き出しの三角(左) */
.sb-txt-left:before {
left: -7px;
border-width: 7px 10px 7px 0;
border-color: transparent #eee transparent transparent;
}
.sb-txt-left:after {
left: -10px;
border-width: 8px 10px 8px 0;
border-color: transparent #eee transparent transparent;
}
/* 吹き出しの三角(右) */
.sb-txt-right:before {
right: -7px;
border-width: 7px 0 7px 10px;
border-color: transparent transparent transparent #eee;
}
.sb-txt-right:after {
right: -10px;
border-width: 8px 0 8px 10px;
border-color: transparent transparent transparent #eee;
}
/* 767px(iPad)以下 */
@media (max-width: 767px) {
.icon-img {
width: 60px;
height: 60px;
}
/* アイコンネーム */
.icon-name {
width: 60px;
top: 62px;
font-size: 9px;
}
/* 吹き出し(左) */
.sb-side-left {
margin: 0 0 30px 78px;
/* 吹き出し(左)の上下左右の余白を狭く */
}
/* 吹き出し(右) */
.sb-side-right {
margin: 0 78px 30px 0;
/* 吹き出し(右)の上下左右の余白を狭く */
}
/* 吹き出し内のテキスト */
.sb-txt {
padding: 12px;
/* 吹き出し内の上下左右の余白を-6px */
}
}
</style>
<script>
var inputPane = document.getElementById('input');
inputPane.addEventListener('keydown', (e) => {
if(e.keyCode == 13) {
google.colab.kernel.invokeFunction('notebook.Convert', [inputPane.value], {});
inputPane.value=''
}
});
</script>
<div id='output' style='background: #66d;'></div>
<div style='text-align: right'><textarea id='input' style='width: 100%; background: #eee;'></textarea></div>
'''))
def convert(your_text):
display_you(your_text)
bot_text = chat(your_text, **kw)
time.sleep(random.randint(0,4))
display_bot(bot_text)
output.register_callback('notebook.Convert', convert)
if start is not None:
display_bot(start)
# フレーム 状態をもつ辞書
# 'name', 'birthday', 'asking'
frame = {}
TYPE = []
def number(x):
number = list(x)
number = [''.join( x for x in number if x not in '\n')]
number = sum([[*word] for word in number], [])
m = re.compile('^[0-9]+$')
result = [s for s in number if m.match(s)]
number = list(map(int, result))
#sn = sum(int(c) for c in number)
return len(number)
def match(x):
Match = list(x)
Match = ''.join( x for x in Match if x not in '\n')
pattern = r'\d\d'
result = re.match(pattern, Match)
if result == None:
return 'None'
def soulnumber(X):
number = [''.join( x for x in X if x not in '\n')]
number = sum([[*word] for word in number], [])
m = re.compile('^[0-9]+$')
result = [s for s in number if m.match(s)]
number = list(map(int, result))
sn = sum(int(c) for c in number)
if sn % 11 == 0: # ゾロ目の時
return sn
if sn > 9: #2桁の時は
return soulnumber(str(sn)) #再帰を使う
return sn
def uranai(input_text):
global frame # 外部の状態を参照する
if 'asking' in frame: # asking から更新する
frame[frame['asking']] = input_text
del frame['asking']
if 'name' not in frame:
frame['asking'] = 'name' # 名前をたずねる
return 'あなたの名前は?'
if frame['name'] == '\n':
del frame['name']
frame['asking'] = 'name'
return '名前が入力されていないようです。もう一度、あなたのお名前を入力してください。'
if 'name' in frame and 'year' not in frame:
frame['asking'] = 'year' # 誕生年をたずねる
return 'あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'
if 'name' in frame and (number(frame['year']) != 4 or match(frame['year']) == 'None'):
del frame['year']
frame['asking'] = 'year' # 誕生年をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた年を西暦(4桁)で教えてください(ex:平成12年生まれの場合は2000と入力)。'
if 'name' in frame and 'year' in frame and 'month' not in frame:
frame['asking'] = 'month' # 誕生月をたずねる
return 'あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and (number(frame['month']) != 2 or match(frame['month']) == 'None'):
del frame['month']
frame['asking'] = 'month' # 誕生月をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた月を2桁で教えてください(ex:1月生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' not in frame:
frame['asking'] = 'day' # 誕生日をたずねる
return 'あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and (number(frame['day']) != 2 or match(frame['day']) == 'None'):
del frame['day']
frame['asking'] = 'day' # 誕生日をたずねる
return '正しく入力されていないようです。もう一度、あなたの生まれた日を2桁で教えてください(ex:1日生まれの場合は01と入力)。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' not in frame: # 占います
frame['asking'] = 'type'
return 'この館では、計算したソウルナンバーをもとに3つの占いができます!Aでは性格やタイプを、Bでは同じソウルナンバーを持つ有名人を、Cではラッキーカラーを診断します!!!A,B,Cのうちどれか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and frame['type'] != '\nA' and frame['type'] != '\nB' and frame['type'] != '\nC': # 占います
del frame['type']
frame['asking'] = 'type'
return '正しく入力されていないようです。もう一度、A,B,Cのうちどれか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and 'manzoku' not in frame:
if frame['type'] == '\nA':
#number = list(frame['year']) + list(frame['month']) + list(frame['day'])
TYPE.append('A')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが1のあなたは、素晴らしい行動力の持ち主で、頭の回転が速く、周りからも頼られる存在ですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが2のあなたは、さっぱりした兄貴肌・姉貴肌的な性格で、バランス調整力が高い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが3のあなたは、平和主義者で洞察力が高く、周りからも慕われる存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが4のあなたは、外向的で積極的なリーダー気質で、周りに影響力を与えられるような存在のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが5のあなたは、真面目で曲がったことが嫌いで、自分の道を突き進む人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが6のあなたは、社交的で、情け深く、頭の回転が速い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが7のあなたは、優しく、家庭的で、探求心が強い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが8のあなたは、穏やかな性格で純粋な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが9のあなたは、さびしがり屋さんで、やんちゃな部分もある、憎めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが11のあなたは、直感が鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが22のあなたは、判断力が強く、諦めない人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが33のあなたは、天才肌な人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが44のあなたは、問題解決能力が高く、リーダー気質で、考えが鋭い人のようですね!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if frame['type'] == '\nB':
#number = list(frame['year']) + list(frame['month']) + list(frame['day'])
TYPE.append('B')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'あなたと同じ1のソウルナンバーを持つ有名人には、お笑いタレントの春日俊彰さんや俳優の成田凌さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'あなたと同じ2のソウルナンバーを持つ有名人には、歌手の和田アキ子さんや俳優の山﨑賢人さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'あなたと同じ3のソウルナンバーを持つ有名人には、俳優の生瀬勝久さんや女優の天海祐希さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'あなたと同じ4のソウルナンバーを持つ有名人には、お笑いタレントの渡辺直美さんや女優の米倉涼子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'あなたと同じ5のソウルナンバーを持つ有名人には、予備校講師の林修先生やタレントの国分太一さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'あなたと同じ6のソウルナンバーを持つ有名人には、女優の深田恭子さんや歌手の米津玄師さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'あなたと同じ7のソウルナンバーを持つ有名人には、女優の新垣結衣さんや長澤まさみさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'あなたと同じ8のソウルナンバーを持つ有名人には、プロフィギュアスケーターの浅田真央さんやプロ野球選手の大谷翔平選手など多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'あなたと同じ9のソウルナンバーを持つ有名人には、女優の北川景子さんやお笑いタレントの松本人志さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'あなたと同じ11のソウルナンバーを持つ有名人には、お笑いタレントの上田晋也さんや女優の杉咲花さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'あなたと同じ22のソウルナンバーを持つ有名人には、お笑いタレントの博多大吉さんや女優の小池栄子さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'あなたと同じ33のソウルナンバーを持つ有名人には、俳優の福山雅治さんや歌手のあいみょんさんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'あなたと同じ44のソウルナンバーを持つ有名人には、アイドルの岸優太さんや女優の中村静香さんなど多くの有名人がいらっしゃいます!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if frame['type'] == '\nC':
TYPE.append('C')
soul = soulnumber(list(frame['year']) + list(frame['month']) + list(frame['day']))
if soul == 1:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが1のあなたのラッキーカラーは、レッドです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 2:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが2のあなたのラッキーカラーは、ホワイト、オレンジ、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 3:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが3のあなたのラッキーカラーは、イエローです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 4:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが4のあなたのラッキーカラーは、グリーン、ブラウン、ブルーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 5:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが5のあなたのラッキーカラーは、グリーンとピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 6:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが6のあなたのラッキーカラーは、ピンクです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 7:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが7のあなたのラッキーカラーは、ネイビーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 8:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが8のあなたのラッキーカラーは、オレンジです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 9:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが9のあなたのラッキーカラーは、パープルとホワイトです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 11:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが11のあなたのラッキーカラーは、シルバーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 22:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが22のあなたのラッキーカラーは、ゴールド、シルバー、グリーンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
elif soul == 33:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが33のあなたのラッキーカラーは、レインボーです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
else:
frame['asking'] = 'manzoku'
return 'ソウルナンバーが44のあなたのラッキーカラーは、ブラウンです!参考にしてみてください!この占い結果に満足できた場合はYを、満足できなかった場合はNを入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] != '\nY' and frame['manzoku'] != '\nN':
del frame['manzoku']
frame['asking'] = 'manzoku'
return '正しく入力されていないようです。もう一度、YかNのどちらか1文字を入力してください。'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nY':
return 'よかったです!また占いしにきてくださいね!'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nN' and len(TYPE) < 3:
#TYPE.append(frame['type'])
del frame['type']
del frame['manzoku']
frame['asking'] = 'type'
return 'ではもう1度、A(性格やタイプ)、B(同じナンバーを持つ有名人)、C(ラッキーカラー)を選択し、A,B,Cのうちどれか1文字を入力してください。次の占いであなたをハッピーにさせてみせます!'
if 'name' in frame and 'year' in frame and 'month' in frame and 'day' in frame and 'type' in frame and frame['manzoku'] == '\nN' and len(TYPE) >= 3:
return 'A,B,Cの占いであなたをハッピーにさせることができずに申し訳ないです。でも占いでは見つけることのできなかったあなたの魅力は必ずあるはずです!!元気を出してください!!!'
return output_text
def start():
run_chat(chat=uranai)
| 2.8125 | 3 |
mex/apps.py | coblo/mex | 5 | 12796032 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.apps import AppConfig
from django.contrib.admin import AdminSite
class MexConfig(AppConfig):
name = "mex"
verbose_name = settings.MEX_BRAND
def ready(self):
AdminSite.site_title = settings.MEX_BRAND
AdminSite.site_header = settings.MEX_BRAND
AdminSite.index_title = ""
| 1.6875 | 2 |
cryptoalarm-app/python/cryptoalarm/monitor.py | nesfit/jane-cryptoalarm | 3 | 12796033 | #!/usr/bin/env python3
"""
This module specifies class Monitored designated for processing of cryptocurrencies blocks
"""
import threading
import logging
from timeit import default_timer as timer
from datetime import datetime, timedelta
from .coin import BTC, BCH, DASH, ZEC, LTC, ETH
from .database import Database
from .notifier import Notifier
logger = logging.getLogger(__name__)
class Monitor():
"""
Monitor controls the processing of cryptocurrencies bloks
"""
stop = threading.Event()
coins = []
threads = []
database = None
notifier = None
def __init__(self, config):
"""
Construct new Monitor object
:param config: configuration dict
"""
self.config = config
self.database = Database(config['db'], self.config)
self.notifier = Notifier(config, self.database)
for coin in config['coins']:
coin_inst = coin(config, self.stop)
coin_inst.db_id = self.database.get_coin(coin_inst.__class__.__name__)['id']
self.coins.append(coin_inst)
def shutdown(self, signum, frame):
"""
Terminate threads of each component
"""
logger.info('Shuting down')
self.stop.set()
for thread in self.threads:
thread.join()
self.notifier.process_remaining()
def test_connection(self):
"""
Test connectivity of all components
"""
self.notifier.test_connection()
for coin in self.coins:
if not coin.test_connection():
raise ConnectionError('{}: node unreachable'.format(coin.__class__.__name__))
def start(self):
"""
Start thread for every coin and notifier
"""
for coin in self.coins:
logger.info('%s: monitoring started', coin)
thread = threading.Thread(target=self.worker, args=(coin,))
self.threads.append(thread)
thread.start()
thread = threading.Thread(target=self.notifier.worker, args=(self.stop,))
self.threads.append(thread)
thread.start()
def set_last_blocks(self):
"""
Set the current block of each coin as the last processed
"""
for coin in self.coins:
number, block_hash = coin.get_last_block_number()
self.database.insert_block(coin, number, block_hash)
logger.info('%s: setting %s as last processed block', coin, number)
def process_block(self, database, coin, number):
"""
Process transaction of <coin> in a block of number <number>
:param database: Database object
:param coin: Coin object
:param number: block number
:return: number of the next block
"""
time_start = timer()
coin.get_block(number)
block_id = database.insert_block(coin, number, coin.get_block_hash())
logger.info('%s: processing block: %s', coin, number)
cnt = 0
for tx_hash in coin.get_block_transactions():
addresses = coin.get_transaction_io(tx_hash)
self.notifier.add_transaction(coin, number, block_id, tx_hash, addresses)
cnt += 1
time_total = timer() - time_start
logger.debug('%s: processed %d transactions in %.4fs', coin, cnt, time_total)
return number + 1
def last_processed_block(self, database, coin):
"""
Get the last block procesesd of <coin>
:param database: Database object
:param coin: Coin object
:return: number of last processed block
"""
number = database.get_last_block_number(coin)
while True:
hash_saved = database.get_block_hash(coin, number)
hash_node = coin.get_block_hash(number)
if hash_saved == hash_node or hash_saved is None:
break
database.delete_block(coin, number)
number -= 1
#print("last_processed_block> ", number)
return number
def worker(self, coin):
"""
Process new blocks of cryptocurrency <coin> until stop event is set.
:param coin: a class inherited from Coin
"""
database = Database(self.config['db'], self.config)
while not self.stop.is_set():
current_number = self.last_processed_block(database, coin) + 1
last_number, _ = coin.get_last_block_number()
#print(current_number, last_number)
while current_number <= last_number:
if self.stop.is_set():
break
try:
current_number = self.process_block(database, coin, current_number)
except InterruptedError:
break
until_next_block = (coin.get_block_creation_time() + coin.get_block_time() - datetime.now()).total_seconds()
if until_next_block < 0: # should be already generated
until_next_block = (coin.get_block_time() * 0.05).total_seconds() # wait only brief time (5% of block time) before trying again
self.stop.wait(timeout=until_next_block)
logger.info('%s: terminating', coin)
| 2.875 | 3 |
examples/opencv_stream/sender.py | Rubilmax/python-p2p | 2 | 12796034 | <filename>examples/opencv_stream/sender.py
import sys
import os
# allow the example to be run without installing the package, from this repository's root directory
sys.path.append(os.path.abspath(os.path.join('.')))
"""
OpenCV Stream sender example
This example is made to be run from one python shell,
waiting for connections from receivers.
"""
import cv2
import queue
from peerpy import Peer, protocol
cam = cv2.VideoCapture(0)
with Peer(timeout=1) as peer:
while True:
ret, frame = cam.read()
if not ret:
print("Failed grabbing camera frame")
break
k = cv2.waitKey(1)
if k % 256 == 27:
print("Escape hit, closing...")
break
peer.broadcast(frame)
#cv2.imshow("Webcam", frame)
cam.release()
# cv2.destroyAllWindows()
| 2.65625 | 3 |
streaming_event_compliance/services/build_automata/case_thread.py | lvzheqi/StreamingEventCompliance | 3 | 12796035 | <filename>streaming_event_compliance/services/build_automata/case_thread.py<gh_stars>1-10
from streaming_event_compliance import app
from streaming_event_compliance.objects.variable.globalvar import gVars, CL, T, C
from streaming_event_compliance.objects.automata import automata
from streaming_event_compliance.objects.exceptions.exception import ThreadException
from streaming_event_compliance.objects.logging.server_logging import ServerLogging
import threading
import traceback
import sys
check_executing_order = {}
WINDOW_SIZE = app.config['WINDOW_SIZE']
MAXIMUN_WINDOW_SIZE = app.config['MAXIMUN_WINDOW_SIZE']
def run_build(case_id):
func_name = sys._getframe().f_code.co_name
ServerLogging().log_info(func_name, str(threading.current_thread()))
try:
if C.lock_List.get(case_id).acquire():
ServerLogging().log_info(func_name, "server", case_id, "Acquiring lock")
windows_memory = C.dictionary_cases.get(case_id)[0: MAXIMUN_WINDOW_SIZE + 1]
C.dictionary_cases.get(case_id).pop(0)
C.lock_List.get(case_id).release()
ServerLogging().log_info(func_name, "server", case_id, "Released lock")
executing_order4test(case_id, windows_memory)
calculate_connection_for_different_prefix_automata(windows_memory)
ServerLogging().log_info(func_name, "server", case_id, "Calculating connections")
except Exception:
ServerLogging().log_error(func_name, "server", case_id, "Error with Caselock")
raise ThreadException(traceback.format_exc())
def calculate_connection_for_different_prefix_automata(windowsMemory):
"""
Description:
This function will calculate the connections with different size for the windowsMemory.
:param windowsMemory: :`list` a list of activities from the same case_id of current event(another event),
size is maximum_window_size, and the current event is in the last position of the
windowsMemory (i.e. event == windowsMemory[maximum_window_size]).
"""
for ws in WINDOW_SIZE:
source_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws: MAXIMUN_WINDOW_SIZE])
sink_node = ','.join(windowsMemory[MAXIMUN_WINDOW_SIZE - ws + 1: MAXIMUN_WINDOW_SIZE + 1])
if CL.lock_list.get((source_node, sink_node)):
if CL.lock_list.get((source_node, sink_node)).acquire():
try:
if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1:
gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0))
elif source_node.find('*') == -1:
gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1))
elif source_node.find('*') != -1 and sink_node.find('*') == -1:
gVars.autos.get(ws).update_automata(automata.Connection('NONE', sink_node, 1))
CL.lock_list.get((source_node, sink_node)).release()
except Exception as ec:
raise ec
else:
lock = threading.RLock()
CL.lock_list[source_node, sink_node] = lock
if CL.lock_list.get((source_node, sink_node)).acquire():
try:
if windowsMemory[MAXIMUN_WINDOW_SIZE] == '~!@#$%' and source_node.find('*') == -1:
gVars.autos.get(ws).update_automata(automata.Connection(source_node, '~!@#$%', 0))
elif source_node.find('*') == -1:
gVars.autos.get(ws).update_automata(automata.Connection(source_node, sink_node, 1))
CL.lock_list.get((source_node, sink_node)).release()
except Exception as ec:
raise ec
def executing_order4test(case_id, windows_memory):
global check_executing_order
'''--------For Testing: Before releasing lock, which thread used it will be stored-------'''
if check_executing_order.get(case_id):
check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE])
else:
check_executing_order[case_id] = []
check_executing_order.get(case_id).append(windows_memory[MAXIMUN_WINDOW_SIZE])
'''--------For Testing: Before releasing lock, which thread used it will be stored-------'''
| 2.046875 | 2 |
reduce_demo.py | zdenek-nemec/python-tutorial-socratica | 1 | 12796036 | <reponame>zdenek-nemec/python-tutorial-socratica
import functools
numbers = list(range(1, 11))
print(numbers)
product = functools.reduce(lambda x, y: x*y, numbers)
print(product)
| 3.515625 | 4 |
common/plotter.py | tianluyuan/pyutils | 1 | 12796037 | <reponame>tianluyuan/pyutils<gh_stars>1-10
import functools
import numpy as np
from scipy import optimize
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import colors
import matplotlib.patheffects as path_effects
import matplotlib.pyplot as plt
from collections import namedtuple
CoordSys = namedtuple('CoordSys', 'det eq gal')
COORD = CoordSys('Detector', 'Equatorial', 'Galactic')
def multipage(filename, figs=None, dpi=200):
""" http://stackoverflow.com/questions/26368876/saving-all-open-matplotlib-figures-in-one-file-at-once
"""
pp = PdfPages(filename)
if figs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
for fig in figs:
fig.savefig(pp, format='pdf')
pp.close()
def colorlines(x, y, ncolors=5, cmapname='viridis_r', **kwargs):
"""Plot a line plot in which the lines change colors as the data is
stepped through.
*ncolors* specifies the number of different colors to use
"""
cmap = plt.get_cmap(cmapname)
norm = colors.Normalize(vmin=0, vmax=ncolors-1)
for i in range(ncolors):
chunksize = len(x)//ncolors
low = i*chunksize
# add 1 to keep lines connected
high = min((i+1)*chunksize+1, len(x))
plt.plot(x[low:high], y[low:high], color=cmap(norm(i)), **kwargs)
def pdf(func):
""" decorator to save all plots generated by func to pdf
"""
@functools.wraps(func)
def pdfwrapper(*args, **kwargs):
if 'pdffile' in kwargs and kwargs['pdffile'] is not None:
plt.close('all')
ret = func(*args, **kwargs)
multipage(kwargs['pdffile'])
return ret
else:
return func(*args, **kwargs)
return pdfwrapper
def new_lims(curr_lim, bounds):
"""checks whether the current limit exceeds the bounds and returns
the appropriate new limits based on bounds or the current
limit. Reverse order (i.e. left > right) is allowed and accounted for.
"""
lb, rt = curr_lim
if lb <= rt:
# normal ordering
combined = sorted(curr_lim+bounds)
# no overlap
if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt):
return bounds[0], bounds[1]
return combined[1], combined[2]
else:
# reverse ordering
combined = sorted(curr_lim+bounds, reverse=True)
# no overlap
if tuple(combined[:2]) == (lb, rt) or tuple(combined[2:]) == (lb, rt):
return bounds[0], bounds[1]
return combined[1], combined[2]
def restrict_axes(ax, xlim=None, ylim=None):
"""Given a matplotlib axis *ax*, restricts the axis limits to xlim and
ylim if they exceed the bounds (xlim, ylim). If the axis limit
does not overlap with (xlim, ylim), the new limits are set to
(xlim, ylim). Otherwise limits are kept as is.
*xlim* and *ylim* are the restricted ranges and should be passed as tuples
"""
if xlim is not None:
ax.set_xlim(*new_lims(ax.get_xlim(), xlim), auto=None)
if ylim is not None:
ax.set_ylim(*new_lims(ax.get_ylim(), ylim), auto=None)
def contour_levels(x, y, cls=(0.95, 0.68), bins=None):
"""given 2D datapoints, return values of the pdf corresponding to the
passed confidence levels
"""
if bins is None:
bins = int(np.sqrt(len(x)))
# Make a 2d normed histogram
H,xedges,yedges=np.histogram2d(x,y,bins=bins,normed=True)
norm=H.sum() # Find the norm of the sum
# Take histogram bin membership as proportional to Likelihood
# This is true when data comes from a Markovian process
def objective(limit, target):
w = np.where(H>limit)
count = H[w]
return count.sum() - target
levels = [optimize.bisect(objective, H.min(), H.max(), args=(cl*norm,))
for cl in cls]
levels.append(H.max())
return levels
def hp_ticklabels(coord, zoom=False, lonra=None, latra=None, rot=None):
""" labels coordinates on a healpy map
zoom: indicates zoomed-in cartview
lonra: longitude range of zoomed-in map
latra: latitude range of zoom-in map
rot: center of zoomed in map
lcoord: label of coordinate system
"""
import healpy as hp
# coordinate labels
ax = plt.gca()
if zoom:
# location of other, fixed coordinate
lon_offset = rot[0]+lonra[0]
lat_offset = rot[1]+latra[0]
# lonlat coordinates for labels
lons = np.arange(np.round(lon_offset),
lon_offset+lonra[1]-lonra[0], 2)
lats = np.arange(np.round(lat_offset),
lat_offset+latra[1]-latra[0], 2)
else:
lon_offset = -180
lat_offset = 0
# lonlat coordinates for labels
lons = np.arange(-150, 181, 30)
lats = np.arange(-90, 91, 30)
# actual text at those coordinates
if coord == COORD.det:
llats = 90-lats
else:
llats = lats
# white outline around text
pe = [path_effects.Stroke(linewidth=1.5, foreground='white'),
path_effects.Normal()]
for _ in zip(lats, llats):
hp.projtext(lon_offset, _[0], "{:.0f}$^\circ$".format(_[1]),
lonlat=True, path_effects=pe)
if zoom:
for _ in lons:
hp.projtext(_, lat_offset,
"{:.0f}$^\circ$".format(_), lonlat=True,
path_effects=pe)
else:
ax.annotate(r"$\bf{-180^\circ}$", xy=(1.7, 0.625), size="medium")
ax.annotate(r"$\bf{180^\circ}$", xy=(-1.95, 0.625), size="medium")
ax.annotate(coord, xy=(0.8, -0.05),
size="medium", xycoords="axes fraction")
| 2.578125 | 3 |
JsonCodeTools/jinja2_codegen/templates/base/backend_api.py | kamlam/EAGLE-Open-Model-Profile-and-Tools-1 | 0 | 12796038 | import sys
from flask import Response, Blueprint
from flask.views import MethodView
from backend.backend import save_state, load_state
setattr(sys.modules[__name__], __name__, Blueprint(__name__, __name__))
urls = [("/backend/save_state/" , "BackendSaveState", ["POST"]),
("/backend/load_state/" , "BackendLoadState", ["POST"])
]
class Successful(Response):
def __init__(self, message, info=''):
super(Successful, self).__init__()
self.status = '200 '+message
self.status_code = 200
self.headers = {'Content-Type': 'application/json'}
self.data = info
#/backend/save_state/
class BackendSaveState(MethodView):
def post(self):
print "Save state operation"
retval = save_state()
if retval:
return Successful("Successful operation",'Saved state')
#/backend/load_state/
class BackendLoadState(MethodView):
def post(self):
print "Load state operation"
retval = load_state()
if retval:
return Successful("Successful operation",'Loaded state')
for element in urls:
getattr(sys.modules[__name__], __name__).add_url_rule(element[0], view_func = globals()[element[1]].as_view(''+element[1]+'_api'), methods=element[2])
| 2.34375 | 2 |
website/API/generate_pairs.py | marcinkaczmarek10/pairs_generator_flask | 0 | 12796039 | <reponame>marcinkaczmarek10/pairs_generator_flask<filename>website/API/generate_pairs.py
import operator
import itertools
from flask import jsonify, abort, Blueprint, request
from website.database.DB import SessionFactory, SessionContextManager
from website.database.models import UsersPerson, RandomPair, DrawCount
from website.generate_pairs.generate_random_pairs import generate_random_pairs, Person
from website.utils.data_serializers import ResultSchema, RandomPersonSchema
from website.utils.login_manager import token_required
from website.utils.email_sending import send_mail_to_pairs, MailError
from website.generate_pairs.routes import limiter
api = Blueprint('api', __name__)
@api.route('/results')
@token_required
def get_results(user):
query = SessionFactory.session.query(
RandomPair).outerjoin(
DrawCount, RandomPair.draw_count == DrawCount.id).filter(
DrawCount.user_id == user.id).all()
schema = ResultSchema(many=True)
pretty_result = schema.dump(query)
item_getter = operator.itemgetter('draw_count')
sorted_pretty_result = sorted(pretty_result, key=item_getter)
grouped_result = [list(g) for k, g in itertools.groupby(sorted_pretty_result, item_getter)]
return jsonify(grouped_result), 200
@api.route('/pairs')
@token_required
def get_user_pairs(user):
query = SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).all()
schema = RandomPersonSchema(many=True)
user_pairs = schema.dump(query)
return jsonify(user_pairs), 200
@api.route('/generate-pairs', methods=['POST'])
@token_required
def post_generate_pairs(user):
req = request.get_json()
user_random_people_pool = [
Person(person['person_name'], person['person_email']) for person in req
]
if len(user_random_people_pool) > 1:
user_results = generate_random_pairs(user_random_people_pool)
draw_count = DrawCount(user_id=user.id)
with SessionContextManager() as sessionCM:
sessionCM.add(draw_count)
is_draw_count = SessionFactory.session.query(
DrawCount).filter_by(user_id=user.id).order_by(DrawCount.id.desc()).first()
if is_draw_count:
for [first_person, second_person] in user_results:
user_random_pairs = RandomPair(
first_person_name=first_person.name,
first_person_email=first_person.email,
second_person_name=second_person.name,
second_person_email=second_person.email,
draw_count=is_draw_count.id
)
with SessionContextManager() as sessionCM:
sessionCM.add(user_random_pairs)
with SessionContextManager():
SessionFactory.session.query(UsersPerson).filter_by(user_id=user.id).delete()
return jsonify({'Message': 'Your pairs were created!'}), 200
return abort(403, description='You cannot do this')
@api.route('/delete-pair', methods=['DELETE'])
@token_required
def delete_pair(user):
pair = request.get_json()
pair_id = pair['pair']
query = SessionFactory.session.query(UsersPerson).filter_by(id=pair_id).first()
if query:
with SessionContextManager() as session:
session.delete(query)
return jsonify({'message': 'Pair deleted!'}), 200
return jsonify({'message': 'There is no pair!'}), 404
@api.route('/delete-results', methods=['DELETE'])
@token_required
def delete_results(user):
result = request.get_json()
draw_id = result['draw_count']
query = SessionFactory.session.query(RandomPair).filter_by(draw_count=draw_id).all()
if query:
for row in query:
with SessionContextManager() as session:
session.delete(row)
return jsonify({'message': 'Result deleted!'}), 200
return jsonify({'message': 'There is no result!'}), 404
@api.route('/send-email', methods=['POST'])
@token_required
@limiter.limit('20/day')
def send_email_to_chosen(user):
req = request.get_json()
query = SessionFactory.session.query(RandomPair).filter_by(draw_count=req['draw_count']).all()
schema = ResultSchema(many=True)
recipients = schema.dump(query)
if query:
try:
send_mail_to_pairs(recipients, req['title'], req['body'])
return jsonify({'message': 'Emails sent!'}), 200
except MailError:
return jsonify({'message': 'Could not send mails!'}), 500
return jsonify({'message': 'There are no results'}), 404
| 2.359375 | 2 |
utils/multi_threading.py | othmbela/fifa-21-web-scraping | 2 | 12796040 | import threading
class MultiThreading(object):
def __init__(self, scrapers):
self.scrapers = scrapers
def run(self):
threads = []
for i in range(len(self.scrapers)):
t = threading.Thread(target=self.scrapers[i].start)
t.start()
threads.append(t)
for thread in threads:
thread.join() | 3.359375 | 3 |
shisell/extenders/__init__.py | Soluto/shisell-python | 2 | 12796041 | from .create_scoped import create_scoped
from .with_context import with_context
from .with_extras import with_extra, with_extras, Extras
from .with_filter import with_filter, Filter
from .with_identities import with_identity, with_identities, Identities
from .with_meta import with_meta
| 1.023438 | 1 |
data types and variables LAB/special numbers.py | nrgxtra/fundamentals | 0 | 12796042 | <filename>data types and variables LAB/special numbers.py
n = int(input())
for i in range(1, n + 1):
sum_of_digits = 0
for cd in range(len(str(i))):
sum_of_digits += int(str(i)[cd])
if (sum_of_digits == 5) or (sum_of_digits == 7) or (sum_of_digits == 11):
print(f'{i} -> True')
else:
print(f'{i} -> False')
| 3.84375 | 4 |
app/vehicle.py | harnoor-g/trademebot | 0 | 12796043 | #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> this class creates an instance of each vehicle that has been parsed
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
class Vehicle:
num_vehicles = 0
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> init method creates a range of variables to be processed
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def __init__(self, title, odometer, price_info, desc, link):
self.title = title
self.odometer = odometer
self.price_info = price_info
self.desc = desc
self.search_link = link
self.year = self.findYear()
self.make_model = self.findMakeModel()
self.kms = self.findKMS()
self.listing_type = self.check_listing_type()
self.reserve_price, self.buy_now_price = self.findPrice()
Vehicle.num_vehicles += 1
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns the year of the vehicle obtained from the title as an int
#>> includes hard coded values based on source code
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def findYear(self):
return int(self.title.split()[0])
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns the make and model of the vehicle
#>> includes hard coded values based on source code
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def findMakeModel(self):
title_list = self.title.split()
if len(title_list) == 3:
return title_list[1] + title_list[2]
make_model = ""
next(iter(title_list))
for title in iter(title_list):
make_model += title + " "
return make_model.strip()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns an integer value of the odometer
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def findKMS(self):
without_km = self.odometer[:-2]
return int(without_km.replace(",", ""))
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns two values, reserve_price(return_arg=1) and buy_now_price(return_arg=2) based on the listing type
#>> remove_delimiters(): removes the commas and periods from the the price info string and splits into list format
#>> get_price(): based on the index of buy now or reserve price derived from source code, removes $ and returns an int of the price
#>> reserve and buy now values are returned based on the listing type
#>> includes hard coded values based on source code
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def findPrice(self):
price_info_list = self.price_info.split(" ")
def remove_delimiters(value):
return value.replace(",", "").replace(".", " ").split()
def get_price(index_of_price):
return int(remove_delimiters((price_info_list[index_of_price])[1:])[0])
if self.listing_type == "auction1":
return get_price(1), get_price(3)
elif self.listing_type == "auction2":
return get_price(1), None
elif self.listing_type == "classified":
return None, get_price(1)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns the listing type of the vehicle
#>> based on preference
#>> includes hard coded values based on source code
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def check_listing_type(self):
info = self.price_info.split(" ")
if len(info) == 4:
return "auction1"
elif len(info) == 2:
if info[0] == "Reserve met" or info[0] == "No reserve" or info[0] == "Reserve not met":
return "auction2"
else:
return "classified"
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns a representation of the vehicle title, odometer and price info
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def info(self):
return f"{self.title} - {self.odometer} - {self.price_info}"
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns a representation of the vehicle title
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def title_info(self):
return f"{self.title}"
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#>> returns a representation of the vehicle title, odometer and price info
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
def __str__(self):
return f"{self.title} - {self.odometer} - {self.price_info}" | 3.59375 | 4 |
pypeerassets/transactions.py | sparklecoin/pypeerassets | 0 | 12796044 | <gh_stars>0
'''transaction assembly/dissasembly'''
from decimal import Decimal
from math import ceil
from time import time
from btcpy.structs.address import Address
from btcpy.structs.script import (
NulldataScript,
P2pkhScript,
ScriptSig,
StackData,
)
from btcpy.structs.transaction import (
Locktime,
PeercoinMutableTx,
MutableTransaction,
Transaction,
TxIn,
TxOut,
)
from pypeerassets.kutil import Kutil
from pypeerassets.networks import net_query
from pypeerassets.provider import Provider
def calculate_tx_fee(tx_size: int) -> Decimal:
'''return tx fee from tx size in bytes'''
min_fee = Decimal(0.01) # minimum
return Decimal(ceil(tx_size / 1000) * min_fee)
def nulldata_script(data: bytes) -> NulldataScript:
'''create nulldata (OP_return) script'''
stack = StackData.from_bytes(data)
return NulldataScript(stack)
def p2pkh_script(network: str, address: str) -> P2pkhScript:
'''create pay-to-key-hash (P2PKH) script'''
network_params = net_query(network)
addr = Address.from_string(network=network_params.btcpy_constants,
string=address)
return P2pkhScript(addr)
def tx_output(network: str, value: Decimal, n: int,
script: ScriptSig) -> TxOut:
'''create TxOut object'''
network_params = net_query(network)
return TxOut(network=network_params.btcpy_constants,
value=int(value * network_params.denomination),
n=n, script_pubkey=script)
def make_raw_transaction(
network: str,
inputs: list,
outputs: list,
locktime: Locktime,
timestamp: int=int(time()),
version: int=1,
) -> MutableTransaction:
'''create raw transaction'''
network_params = net_query(network)
if network_params.network_name.startswith("peercoin"):
return PeercoinMutableTx(
version=version,
timestamp=timestamp,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params.btcpy_constants,
)
return MutableTransaction(
version=version,
ins=inputs,
outs=outputs,
locktime=locktime,
network=network_params.btcpy_constants,
)
def find_parent_outputs(provider: Provider, utxo: TxIn) -> TxOut:
'''due to design of the btcpy library, TxIn object must be converted to TxOut object before signing'''
index = utxo.txout # utxo index
return TxOut.from_json(provider.getrawtransaction(utxo.txid, 1)['vout'][index])
def sign_transaction(provider: Provider, unsigned_tx: MutableTransaction,
key: Kutil) -> Transaction:
'''sign transaction with Kutil'''
parent_output = find_parent_outputs(provider, unsigned_tx.ins[0])
return key.sign_transaction(parent_output, unsigned_tx)
| 2.046875 | 2 |
day5/image.py | termoshtt/sevendayshpc | 39 | 12796045 | import glob
import numpy as np
from matplotlib import pyplot as plt
for filename in glob.glob("*.dat"):
print(filename)
name = filename.split(".")[0]
data = np.loadtxt(filename, delimiter=",")
size = int(np.sqrt(len(data)))
data = data.reshape((size, size))
fig, ax = plt.subplots(figsize=(5.12, 5.12))
ax.imshow(data)
plt.tick_params(
bottom=False, left=False, right=False, top=False,
labelbottom=False, labelleft=False, labelright=False, labeltop=False
)
plt.tight_layout()
plt.savefig(name + ".png")
plt.close() | 2.78125 | 3 |
code/main2_xgb.py | wayinone/NYtaxi | 0 | 12796046 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 6 00:25:27 2017
@author: Wayne
"""
import pandas as pd
import xgboost as xgb
import numpy as np
from sklearn.model_selection import train_test_split
import pickle
#%%
mydf1= mydf[outliers.outliers==False]
z = np.log(data.trip_duration+1)
X = mydf1
Xtest = testdf
data_test = xgb.DMatrix(Xtest)
#%%
rmse = lambda z,zp:np.sqrt(np.mean((z-zp)**2))
#%%
parms = {'max_depth':14, #maximum depth of a tree
'objective':'reg:linear',
'eta' :0.025,
'subsample':0.8,#SGD will use this percentage of data
'lambda ' :4, #L2 regularization term,>1 more conservative
'colsample_bytree ':0.9,
'colsample_bylevel':1,
'min_child_weight': 10,
'nthread' :3} #number of cpu core to use
#%% split training set to validation set
Xtrain, Xval, Ztrain, Zval = train_test_split(X, z, test_size=0.2, random_state=1)
#Xcv,Xv,Zcv,Zv = train_test_split(Xval, Zval, test_size=0.5, random_state=1)
data_tr = xgb.DMatrix(Xtrain, label=Ztrain)
data_val = xgb.DMatrix(Xval , label=Zval)
evallist = [(data_tr, 'train'), (data_val, 'valid')]
model = xgb.train(parms, data_tr, num_boost_round=881, evals = evallist,
early_stopping_rounds=30, maximize=False,
verbose_eval=100)
print('score = %1.5f, n_boost_round =%d.'%(model.best_score,model.best_iteration))
#%% training all the data
data_train = xgb.DMatrix(X, label=z)
evallist = [(data_train, 'train')]
model = xgb.train(parms, data_train, num_boost_round=880, evals = evallist,
maximize=False,
verbose_eval=100)
#%%
#%%
ztest = model.predict(data_test)
#%%
ytest = np.exp(ztest)-1
submission = pd.DataFrame({'id': test.id, 'trip_duration': ytest})
submission.to_csv('submission_1.csv', index=False)
#%%
with open('filename.pickle', 'rb') as handle:
b = pickle.load(handle)
#%%
for d in (mydf,testdf):
print(d.Temp.mean())
#%%
print('Id is unique.') if train.id.nunique() == train.shape[0] else print('oops')
print('Train and test sets are distinct.') if len(np.intersect1d(train.id.values, test.id.values))== 0 else print('oops')
print('We do not need to worry about missing values.') if train.count().min() == train.shape[0] and test.count().min() == test.shape[0] else print('oops')
print('The store_and_fwd_flag has only two values {}.'.format(str(set(train.store_and_fwd_flag.unique()) | set(test.store_and_fwd_flag.unique()))))
#%% Kmeans
from sklearn.cluster import MiniBatchKMeans
coords = np.vstack((mydf[['pickup_latitude', 'pickup_longitude']].values,
mydf[['dropoff_latitude', 'dropoff_longitude']].values,
testdf[['pickup_latitude', 'pickup_longitude']].values,
testdf[['dropoff_latitude', 'dropoff_longitude']].values))
sample_ind = np.random.permutation(len(coords))[:500000]
kmeans = MiniBatchKMeans(n_clusters=20, batch_size=10000).fit(coords[sample_ind])
for df in (mydf,testdf):
df.loc[:, 'pickup_loc'] = kmeans.predict(df[['pickup_latitude', 'pickup_longitude']])
df.loc[:, 'dropoff_loc'] = kmeans.predict(df[['dropoff_latitude', 'dropoff_longitude']])
#%%
train_loc = [None]*2;test_loc=[None]*2
for i,loc in enumerate(['pickup_loc','dropoff_loc']):
train_loc[i]= pd.get_dummies(mydf[loc], prefix=loc, prefix_sep='_')
test_loc[i] = pd.get_dummies(testdf[loc], prefix=loc, prefix_sep='_')
train_loc = pd.concat(train_loc,axis=1)
test_loc = pd.concat(test_loc,axis=1)
#%%
mydf1 = pd.concat([mydf,train_loc],axis = 1)
testdf1 = pd.concat([testdf,test_loc],axis = 1)
#%%
mydf1 = mydf1[mydf1['outliers']==False]
mydf1 = mydf1.drop(['id','outliers'],axis=1)
z = mydf1.log_trip_duration
X = mydf1.drop(['log_trip_duration'],axis=1)
Xtest = testdf1.drop('id',axis=1)
#%%
X = X.drop(['pickup_loc','dropoff_loc'],axis=1)
#%%
Xtest=Xtest.drop(['pickup_loc','dropoff_loc'],axis=1) | 2.34375 | 2 |
CNIC-C/detector.py | CSnode/Multimodal-Captioning | 2 | 12796047 | # -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import cPickle
import ipdb
class Detector():
def __init__(self,weight_file_path,n_labels):
self.image_mean=[103.939,116.779,123.68]
self.n_labels=n_labels
with open(weight_file_path)as f:
self.pretrained_weights=cPickle.load(f)
def get_weight(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[0]
def get_bias(self,layer_name):
layer=self.pretrained_weights[layer_name]
return layer[1]
def get_conv_weight(self,name):
f=self.get_weight(name)
return f.transpose((2,3,1,0))
def conv_layer(self,bottom,name):
with tf.variable_scope(name)as scope:
w=self.get_conv_weight(name)
b=self.get_bias(name)
conv_weights=tf.get_variable("W",shape=w.shape,initializer=tf.constant_initializer(w))
conv_biases=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
conv=tf.nn.conv2d(bottom,conv_weights,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,conv_biases)
relu=tf.nn.relu(bias,name=name)
return relu
def new_conv_layer(self,bottom,filter_shape,name):
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=filter_shape,initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=filter_shape[-1],initializer=tf.constant_initializer(0.))
conv=tf.nn.conv2d(bottom,w,[1,1,1,1],padding='SAME')
bias=tf.nn.bias_add(conv,b)
return bias
def fc_layer(self,bottom,name,create=False):
shape=bottom.get_shape().as_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
cw=self.get_weight(name)
b=self.get_bias(name)
if name=="fc6":
cw=cw.reshape((4096,512,7,7))
cw=cw.transpose((2,3,1,0))
cw=cw.reshape((25088,4096))
else:
cw=cw.transpose((1,0))
with tf.variable_scope(name)as scope:
cw=tf.get_variable("W",shape=cw.shape,initializer=tf.constant_initializer(cw))
b=tf.get_variable("b",shape=b.shape,initializer=tf.constant_initializer(b))
fc=tf.nn.bias_add(tf.matmul(x,cw),b,name=scope)
return fc
def new_fc_layer(self,bottom,input_size,output_size,name):
shape=bottom.get_shape().to_list()
dim=np.prod(shape[1:])
x=tf.reshape(bottom,[-1,dim])
with tf.variable_scope(name)as scope:
w=tf.get_variable("W",shape=[input_size,output_size],initializer=tf.random_normal_initializer(0.,0.01))
b=tf.get_variable("b",shape=[output_size],initializer=tf.constant_initializer(0.))
fc=tf.nn.bias_add(tf.matmul(x,w),b,name=scope)
return fc
def inference(self,rgb,train=False):
rgb*=255.
r,g,b=tf.split(rgb,3,3)
bgr=tf.concat([b-self.image_mean[0],g-self.image_mean[1],r-self.image_mean[2]],3)
relu1_1=self.conv_layer(bgr,"conv1_1")
relu1_2=self.conv_layer(relu1_1,"conv1_2")
pool1=tf.nn.max_pool(relu1_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool1')
relu2_1=self.conv_layer(pool1,"conv2_1")
relu2_2=self.conv_layer(relu2_1,"conv2_2")
pool2=tf.nn.max_pool(relu2_2,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool2')
relu3_1=self.conv_layer(pool2,"conv3_1")
relu3_2=self.conv_layer(relu3_1,"conv3_2")
relu3_3=self.conv_layer(relu3_2,"conv3_3")
pool3=tf.nn.max_pool(relu3_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool3')
relu4_1=self.conv_layer(pool3,"conv4_1")
relu4_2=self.conv_layer(relu4_1,"conv4_2")
relu4_3=self.conv_layer(relu4_2,"conv4_3")
pool4=tf.nn.max_pool(relu4_3,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name='pool4')
relu5_1=self.conv_layer(pool4,"conv5_1")
relu5_2=self.conv_layer(relu5_1,"conv5_2")
relu5_3=self.conv_layer(relu5_2,"conv5_3")
conv6=self.new_conv_layer(relu5_3,[3,3,512,1024],"conv6")
gap=tf.reduce_mean(conv6,[1,2])
with tf.variable_scope("GAP"):
gap_w=tf.get_variable("W",shape=[1024,self.n_labels],initializer=tf.random_normal_initializer(0.,0.01))
output=tf.matmul(gap,gap_w)
return pool1,pool2,pool3,pool4,relu5_3,conv6,gap,output
def get_classmap(self,label,conv6):
conv6_resized=tf.image.resize_bilinear(conv6,[224,224])
with tf.variable_scope("GAP",reuse=True):
label_w=tf.gather(tf.transpose(tf.get_variable("W")),label)
label_w=tf.reshape(label_w,[-1,1024,1])
conv6_resized=tf.reshape(conv6_resized,[-1,224*224,1024])
classmap=tf.matmul(conv6_resized,label_w)
classmap=tf.reshape(classmap,[-1,224,224])
return classmap
| 2.453125 | 2 |
analysis/linearprobe.py | paulgavrikov/torchbox | 0 | 12796048 | <reponame>paulgavrikov/torchbox
import torch
from torch.nn import Flatten, LazyLinear, Softmax
class LinearProbe:
def __init__(self, device="cpu", verbose=False):
self.device = device
self.max_epochs = 10
self.verbose = verbose
rep = None
def _hook(model, inp, out):
rep = out
def fit_all(self):
pass
def _fit_probe(self, model, probe, train_loader, optimizer, criterion):
for epoch in range(self.max_epochs):
# acc = torchmetrics.Accuracy().to(self.device)
total_loss = 0
for batch_idx, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
with torch.no_grad():
_ = model(inputs)
optimizer.zero_grad()
outputs = probe(rep)
loss = criterion(outputs, targets)
# acc(outputs, targets)
# total_loss += loss.item()
loss.backward()
optimizer.step()
if self.verbose:
print(f"epoch: {epoch}/{self.max_epochs} train-loss: {total_loss / len(train_loader)}")
def fit_layer(model, trainloader, valloader, classes, layer, epochs=10, device=-1,
criterion=torch.nn.CrossEntropyLoss(), optimizer=torch.optim.Adam) -> float:
global rep
model.eval()
# def probe
probe = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.LazyLinear(classes),
torch.nn.Softmax()
).to(device)
criterion = criterion.to(device)
optimizer = optimizer(probe.parameters(), lr=1e-4)
# register hook on layer
for name, module in model.named_modules():
module._forward_hooks.clear()
handle = layer.register_forward_hook(_hook)
# dequeue trainloader through model and train probe
self._
# dequeue valloader through probe and measure loss / acc
probe.eval()
acc = torchmetrics.Accuracy().to(device)
total_loss = 0
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(valloader):
inputs, targets = inputs.to(device), targets.to(device)
_ = model(inputs)
outputs = probe(rep)
loss = criterion(outputs, targets)
acc(outputs, targets)
total_loss += loss.item()
handle.remove()
return total_loss / len(trainloader), acc.compute().item()
| 2.5 | 2 |
problems/reversing/obfuscatedPython1/debug/obfuscatedPython1_explained.py | wanqizhu/treectf | 1 | 12796049 | <reponame>wanqizhu/treectf<gh_stars>1-10
""" woo pretty code!!!
"description": "There are two characters wrong with the code. What's that pretty face doing?"
treeCTF{110_105_99_101}
Actually though, the code may look fancy, but all it's doing is doing some random stuff with an array of characters
but instead of returning the result (j), line 7 is
return; j[:why:-1]
Note the semicolon which acts as a linebreak, so j[:why:-1] is never executed
Removing all the spaces gives the answer
['n', 'i', 'c', 'e']
Also, when we define x, we misspelled 'eval'
x=evl(...) should be eval(...)
"""
def sup(x,why):
j=[]
for i in x:
j.append(x[int((x.index(i)^ord(i))*1.337)%len(x)])
return; j[:why:-1] # remove the semicolon to return the answer
# all x is is ['i', 'm', 'a', 'e', 'f', 'n', 'e', 'c']
# run through github.com/wanqizhu/pyfuck
x=['i','m','an'[0],"""emotional"""[0],'friend'[0],"""hellotherehowareyoudoingthisisanicefoxfenn"""[-1],'e','c'];
# this just calls the function sup(x, 3)
sup(x,3) | 2.859375 | 3 |
utilities/utility.py | kwanj-k/flask_sm | 0 | 12796050 | <reponame>kwanj-k/flask_sm<filename>utilities/utility.py
from helpers.database import db_session
class Utility(object):
def save(self):
"""Function for saving new objects"""
db_session.add(self)
db_session.commit()
def delete(self):
"""Function for deleting objects"""
db_session.delete(self)
db_session.commit()
def update_entity_fields(entity, **kwargs):
keys = kwargs.keys()
for key in keys:
exec("entity.{0} = kwargs['{0}']".format(key))
return entity
| 2.6875 | 3 |
Subsets and Splits