code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
import Deck
class Blackjack(object):
def __init__(self):
self.start()
def start(self):
deck = Deck.Deck(1)
deck.display_decks()
Blackjack().start()
|
[
"Deck.Deck"
] |
[((122, 134), 'Deck.Deck', 'Deck.Deck', (['(1)'], {}), '(1)\n', (131, 134), False, 'import Deck\n')]
|
import sqlalchemy.orm
import xml.etree.ElementTree
from ietf.sql.rfc import (Abstract, Author, FileFormat, IsAlso, Keyword,
ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream,
UpdatedBy, Updates,)
import ietf.xml.parse as parse
def _add_keyword(session: sqlalchemy.orm.session.Session,
word: str,
) -> Keyword:
"""Create Keyword instances without violating uniqueness restraint."""
keyword = session.query(Keyword).filter(Keyword.word == word).one_or_none()
if keyword is None:
keyword = Keyword(word)
session.add(keyword)
return keyword
def add_all(session: sqlalchemy.orm.session.Session,
root: xml.etree.ElementTree.Element):
"""Add all RFC entries from XML `root` to sqlalchemy `session`."""
entries = parse.findall(root, 'rfc-entry')
for entry in entries:
doc_id = parse.find_doc_id(entry)
title = parse.find_title(entry)
authors = parse.find_author(entry)
year, month, day = parse.find_date(entry)
formats = parse.find_format(entry)
keywords = parse.find_keywords(entry)
abstract_pars = parse.find_abstract(entry)
draft = parse.find_draft(entry)
notes = parse.find_notes(entry)
obsoletes = parse.find_obsoletes(entry)
obsoleted_by = parse.find_obsoleted_by(entry)
updates = parse.find_updates(entry)
updated_by = parse.find_updated_by(entry)
is_also = parse.find_is_also(entry)
see_also = parse.find_see_also(entry)
cur_status = parse.find_current_status(entry)
pub_status = parse.find_publication_status(entry)
streams = parse.find_stream(entry)
area = parse.find_area(entry)
wg = parse.find_wg_acronym(entry)
errata = parse.find_errata_url(entry)
doi = parse.find_doi(entry)
rfc = Rfc(
# Create the Rfc object with its single-column values set
id=doc_id,
title=title,
date_year=year, date_month=month, date_day=day,
draft=draft,
notes=notes,
current_status=cur_status,
publication_status=pub_status,
area=area,
wg_acronym=wg,
errata_url=errata,
doi=doi,
)
for author in authors:
# Add authors to rfc
rfc.authors.append(Author(name=author['name'],
title=author['title'],
organization=author['organization'],
org_abbrev=author['org_abbrev']))
for entry in formats:
# Add formats to rfc
filetype, char_count, page_count = entry
rfc.formats.append(FileFormat(filetype=filetype,
char_count=char_count,
page_count=page_count))
for word in keywords:
# Add keywords to rfc
keyword = _add_keyword(session, word)
rfc.keywords.append(keyword)
for par in abstract_pars:
# Add abstract to rfc
rfc.abstract.append(Abstract(par=par))
for doc in obsoletes:
# Add obsoletes to rfc
doc_type, doc_id = doc
rfc.obsoletes.append(Obsoletes(doc_id=doc_id, doc_type=doc_type))
for doc in obsoleted_by:
# Add obsoleted_by to rfc
doc_type, doc_id = doc
rfc.obsoleted_by.append(ObsoletedBy(doc_id=doc_id,
doc_type=doc_type))
for doc in updates:
# Add updates to rfc
doc_type, doc_id = doc
rfc.updates.append(Updates(doc_id=doc_id, doc_type=doc_type))
for doc in updated_by:
# Add updated_by to rfc
doc_type, doc_id = doc
rfc.updated_by.append(UpdatedBy(doc_id=doc_id, doc_type=doc_type))
for doc in is_also:
# Add is_also to rfc
doc_type, doc_id = doc
rfc.is_also.append(IsAlso(doc_id=doc_id, doc_type=doc_type))
for doc in see_also:
# Add see_also to rfc
doc_type, doc_id = doc
rfc.see_also.append(SeeAlso(doc_id=doc_id, doc_type=doc_type))
for value in streams:
# Add stream to rfc
rfc.stream.append(Stream(value))
session.add(rfc)
|
[
"ietf.xml.parse.find_obsoleted_by",
"ietf.xml.parse.find_see_also",
"ietf.xml.parse.find_obsoletes",
"ietf.xml.parse.find_current_status",
"ietf.sql.rfc.Stream",
"ietf.sql.rfc.FileFormat",
"ietf.sql.rfc.Rfc",
"ietf.sql.rfc.IsAlso",
"ietf.sql.rfc.Author",
"ietf.xml.parse.find_format",
"ietf.xml.parse.find_errata_url",
"ietf.xml.parse.find_updates",
"ietf.sql.rfc.SeeAlso",
"ietf.xml.parse.find_draft",
"ietf.xml.parse.find_publication_status",
"ietf.xml.parse.find_title",
"ietf.xml.parse.find_date",
"ietf.xml.parse.find_doc_id",
"ietf.xml.parse.find_area",
"ietf.xml.parse.find_doi",
"ietf.xml.parse.find_is_also",
"ietf.xml.parse.find_keywords",
"ietf.xml.parse.findall",
"ietf.xml.parse.find_stream",
"ietf.sql.rfc.UpdatedBy",
"ietf.sql.rfc.Updates",
"ietf.sql.rfc.Obsoletes",
"ietf.xml.parse.find_notes",
"ietf.xml.parse.find_wg_acronym",
"ietf.sql.rfc.ObsoletedBy",
"ietf.xml.parse.find_updated_by",
"ietf.sql.rfc.Abstract",
"ietf.xml.parse.find_author",
"ietf.xml.parse.find_abstract",
"ietf.sql.rfc.Keyword"
] |
[((843, 875), 'ietf.xml.parse.findall', 'parse.findall', (['root', '"""rfc-entry"""'], {}), "(root, 'rfc-entry')\n", (856, 875), True, 'import ietf.xml.parse as parse\n'), ((590, 603), 'ietf.sql.rfc.Keyword', 'Keyword', (['word'], {}), '(word)\n', (597, 603), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((919, 943), 'ietf.xml.parse.find_doc_id', 'parse.find_doc_id', (['entry'], {}), '(entry)\n', (936, 943), True, 'import ietf.xml.parse as parse\n'), ((960, 983), 'ietf.xml.parse.find_title', 'parse.find_title', (['entry'], {}), '(entry)\n', (976, 983), True, 'import ietf.xml.parse as parse\n'), ((1002, 1026), 'ietf.xml.parse.find_author', 'parse.find_author', (['entry'], {}), '(entry)\n', (1019, 1026), True, 'import ietf.xml.parse as parse\n'), ((1054, 1076), 'ietf.xml.parse.find_date', 'parse.find_date', (['entry'], {}), '(entry)\n', (1069, 1076), True, 'import ietf.xml.parse as parse\n'), ((1095, 1119), 'ietf.xml.parse.find_format', 'parse.find_format', (['entry'], {}), '(entry)\n', (1112, 1119), True, 'import ietf.xml.parse as parse\n'), ((1139, 1165), 'ietf.xml.parse.find_keywords', 'parse.find_keywords', (['entry'], {}), '(entry)\n', (1158, 1165), True, 'import ietf.xml.parse as parse\n'), ((1190, 1216), 'ietf.xml.parse.find_abstract', 'parse.find_abstract', (['entry'], {}), '(entry)\n', (1209, 1216), True, 'import ietf.xml.parse as parse\n'), ((1233, 1256), 'ietf.xml.parse.find_draft', 'parse.find_draft', (['entry'], {}), '(entry)\n', (1249, 1256), True, 'import ietf.xml.parse as parse\n'), ((1273, 1296), 'ietf.xml.parse.find_notes', 'parse.find_notes', (['entry'], {}), '(entry)\n', (1289, 1296), True, 'import ietf.xml.parse as parse\n'), ((1317, 1344), 'ietf.xml.parse.find_obsoletes', 'parse.find_obsoletes', (['entry'], {}), '(entry)\n', (1337, 1344), True, 'import ietf.xml.parse as parse\n'), ((1368, 1398), 'ietf.xml.parse.find_obsoleted_by', 'parse.find_obsoleted_by', (['entry'], {}), '(entry)\n', (1391, 1398), True, 'import ietf.xml.parse as parse\n'), ((1417, 1442), 'ietf.xml.parse.find_updates', 'parse.find_updates', (['entry'], {}), '(entry)\n', (1435, 1442), True, 'import ietf.xml.parse as parse\n'), ((1464, 1492), 'ietf.xml.parse.find_updated_by', 'parse.find_updated_by', (['entry'], {}), '(entry)\n', (1485, 1492), True, 'import ietf.xml.parse as parse\n'), ((1511, 1536), 'ietf.xml.parse.find_is_also', 'parse.find_is_also', (['entry'], {}), '(entry)\n', (1529, 1536), True, 'import ietf.xml.parse as parse\n'), ((1556, 1582), 'ietf.xml.parse.find_see_also', 'parse.find_see_also', (['entry'], {}), '(entry)\n', (1575, 1582), True, 'import ietf.xml.parse as parse\n'), ((1604, 1636), 'ietf.xml.parse.find_current_status', 'parse.find_current_status', (['entry'], {}), '(entry)\n', (1629, 1636), True, 'import ietf.xml.parse as parse\n'), ((1658, 1694), 'ietf.xml.parse.find_publication_status', 'parse.find_publication_status', (['entry'], {}), '(entry)\n', (1687, 1694), True, 'import ietf.xml.parse as parse\n'), ((1713, 1737), 'ietf.xml.parse.find_stream', 'parse.find_stream', (['entry'], {}), '(entry)\n', (1730, 1737), True, 'import ietf.xml.parse as parse\n'), ((1753, 1775), 'ietf.xml.parse.find_area', 'parse.find_area', (['entry'], {}), '(entry)\n', (1768, 1775), True, 'import ietf.xml.parse as parse\n'), ((1789, 1817), 'ietf.xml.parse.find_wg_acronym', 'parse.find_wg_acronym', (['entry'], {}), '(entry)\n', (1810, 1817), True, 'import ietf.xml.parse as parse\n'), ((1835, 1863), 'ietf.xml.parse.find_errata_url', 'parse.find_errata_url', (['entry'], {}), '(entry)\n', (1856, 1863), True, 'import ietf.xml.parse as parse\n'), ((1878, 1899), 'ietf.xml.parse.find_doi', 'parse.find_doi', (['entry'], {}), '(entry)\n', (1892, 1899), True, 'import ietf.xml.parse as parse\n'), ((1915, 2137), 'ietf.sql.rfc.Rfc', 'Rfc', ([], {'id': 'doc_id', 'title': 'title', 'date_year': 'year', 'date_month': 'month', 'date_day': 'day', 'draft': 'draft', 'notes': 'notes', 'current_status': 'cur_status', 'publication_status': 'pub_status', 'area': 'area', 'wg_acronym': 'wg', 'errata_url': 'errata', 'doi': 'doi'}), '(id=doc_id, title=title, date_year=year, date_month=month, date_day=day,\n draft=draft, notes=notes, current_status=cur_status, publication_status\n =pub_status, area=area, wg_acronym=wg, errata_url=errata, doi=doi)\n', (1918, 2137), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((2437, 2562), 'ietf.sql.rfc.Author', 'Author', ([], {'name': "author['name']", 'title': "author['title']", 'organization': "author['organization']", 'org_abbrev': "author['org_abbrev']"}), "(name=author['name'], title=author['title'], organization=author[\n 'organization'], org_abbrev=author['org_abbrev'])\n", (2443, 2562), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((2820, 2895), 'ietf.sql.rfc.FileFormat', 'FileFormat', ([], {'filetype': 'filetype', 'char_count': 'char_count', 'page_count': 'page_count'}), '(filetype=filetype, char_count=char_count, page_count=page_count)\n', (2830, 2895), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((3236, 3253), 'ietf.sql.rfc.Abstract', 'Abstract', ([], {'par': 'par'}), '(par=par)\n', (3244, 3253), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((3388, 3431), 'ietf.sql.rfc.Obsoletes', 'Obsoletes', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (3397, 3431), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((3575, 3620), 'ietf.sql.rfc.ObsoletedBy', 'ObsoletedBy', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (3586, 3620), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((3797, 3838), 'ietf.sql.rfc.Updates', 'Updates', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (3804, 3838), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((3976, 4019), 'ietf.sql.rfc.UpdatedBy', 'UpdatedBy', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (3985, 4019), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((4148, 4188), 'ietf.sql.rfc.IsAlso', 'IsAlso', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (4154, 4188), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((4320, 4361), 'ietf.sql.rfc.SeeAlso', 'SeeAlso', ([], {'doc_id': 'doc_id', 'doc_type': 'doc_type'}), '(doc_id=doc_id, doc_type=doc_type)\n', (4327, 4361), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n'), ((4455, 4468), 'ietf.sql.rfc.Stream', 'Stream', (['value'], {}), '(value)\n', (4461, 4468), False, 'from ietf.sql.rfc import Abstract, Author, FileFormat, IsAlso, Keyword, ObsoletedBy, Obsoletes, Rfc, SeeAlso, Stream, UpdatedBy, Updates\n')]
|
import re
import pickle
with open('words2num_dict.pickle', 'rb') as handle:
words2num = pickle.load(handle)
num2words = {v: k for k, v in words2num.items()}
freq_raw = []
with open('output.txt') as f:
for i in range(31):
f.readline()
for line in f:
line = re.sub('\[\w+\]', '', line)
line = re.sub('\n', '', line)
line = re.sub('{', '', line)
line = re.sub('}', '', line)
freq_raw.append(line.strip())
num_word_list = []
freq_list = []
for i in range(len(freq_raw)):
if freq_raw[i] != '':
num_list = freq_raw[i].split(' ')
#print(num_list)
for j in range(len(num_list)-2):
num_list[j] = int(num_list[j])
num_list[-1] = int(num_list[-1])
#print(num_list[-1])
num_word_list.append(num_list[:-2])
freq_list.append(num_list[-1])
words_tuple = []
for i in range(len(num_word_list)):
words = []
for j in range(len(num_word_list[i])):
if(num_word_list[i][j] != len(num2words)+5):
words.append(num2words[num_word_list[i][j]].strip())
words_tuple.append(tuple((words, freq_list[i])))
'''
for i in words_tuple:
print(i)
'''
with open('words_tuple.pickle', 'wb') as handle:
pickle.dump(words_tuple, handle)
|
[
"pickle.dump",
"pickle.load",
"re.sub"
] |
[((97, 116), 'pickle.load', 'pickle.load', (['handle'], {}), '(handle)\n', (108, 116), False, 'import pickle\n'), ((1190, 1222), 'pickle.dump', 'pickle.dump', (['words_tuple', 'handle'], {}), '(words_tuple, handle)\n', (1201, 1222), False, 'import pickle\n'), ((299, 329), 're.sub', 're.sub', (['"""\\\\[\\\\w+\\\\]"""', '""""""', 'line'], {}), "('\\\\[\\\\w+\\\\]', '', line)\n", (305, 329), False, 'import re\n'), ((340, 362), 're.sub', 're.sub', (['"""\n"""', '""""""', 'line'], {}), "('\\n', '', line)\n", (346, 362), False, 'import re\n'), ((376, 397), 're.sub', 're.sub', (['"""{"""', '""""""', 'line'], {}), "('{', '', line)\n", (382, 397), False, 'import re\n'), ((411, 432), 're.sub', 're.sub', (['"""}"""', '""""""', 'line'], {}), "('}', '', line)\n", (417, 432), False, 'import re\n')]
|
import os
from pathlib import Path
import numpy as np
import pandas as pd
import spacy
from spacy.compat import pickle
import lz4.frame
from tqdm import tqdm
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping
from ehr_classification.tokenizer import get_features, get_custom_tokenizer
from ehr_classification.classifier_model import compile_lstm
def run_multiple_models(df,
features,
weights,
word_vectors,
max_note_length=2000,
batch_size=64,
gpu_device='0'
):
'''
Run model on infile, adds columns for predictions and save it to outfile
:param df:
:param features:
:param weights:
:param word_vectors:
:param max_note_length:
:param batch_size:
:param gpu_device:
:return:
'''
# use specified gpu device
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_device)
nlp = get_custom_tokenizer(word_vectors)
embeddings = nlp.vocab.vectors.data
model = compile_lstm(embeddings,
{'nr_hidden': 64, 'max_length': max_note_length, 'nr_class': 4},
{'dropout': 0.5, 'lr': 0.0001})
for target, weight in tqdm(list(weights.items())):
model.load_weights(weight)
print(f'Predicting {target}.')
predictions = model.predict(features, batch_size=batch_size, verbose=True)
print(f'Done predicting {target}.')
df[(target + '_predictions')] = predictions[0]
df[(target + '_raw')] = predictions[1]
return df
def run_multiple_models_pickle(infile,
outfile,
word_vectors,
overwrite=False,
**kwargs
):
# only run when not already there
outfile = Path(outfile)
if not outfile.exists() or overwrite:
outfile.touch()
from .utils import lz4_load
data_dict = lz4_load(infile)
predictions = run_multiple_models(df=data_dict['meta'],
features=data_dict['data'],
word_vectors=word_vectors,
**kwargs)
print('Writing to file')
predictions.to_parquet(outfile)
print('Done writing to file')
def run_multiple_models_parquet(infile,
outfile,
word_vectors,
note_column='NoteTXT',
max_note_length=2000,
**kwargs
):
def select_rows(df): # Remove rows with empty note text
df = pd.DataFrame(df.loc[df[note_column].notnull()])
return df
eval_data = pd.read_parquet(infile)
lz4_file = infile.replace('.parquet', '.pic.lz4')
if Path(lz4_file).exists():
print('Loading features')
with lz4.frame.open(lz4_file, mode='r') as f:
eval_docs = pickle.load(f)
else:
print('Extracting tokens')
tokenizer = get_custom_tokenizer(word_vectors)
note_texts = eval_data[note_column]
tokens = list(tokenizer.pipe(note_texts))
print('Extracting features')
eval_features = get_features(tokens, max_note_length)
eval_data = select_rows(eval_data)
eval_data = run_multiple_models(df=eval_data,
features=eval_features,
word_vectors=word_vectors,
**kwargs)
print('Writing to file')
eval_data.to_parquet(outfile)
print('Done writing to file')
def run_current_models(infile, outfile, classifier_type, input_type='parquet', **kwargs):
# use models and vectors path from environment (or use defaults)
models_path = os.getenv("PREDICT_EHR_MODELS")
if not models_path:
models_path = '/mnt/obi0/phi/ehr/models/'
vectors_path = os.getenv("PREDICT_EHR_VECTORS")
if not vectors_path:
vectors_path = '/mnt/obi0/phi/ehr/word_vectors/filtered_20-05-23.bigram'
if classifier_type == 'event':
weights = {
'Event_PCI': f'{models_path}/Events/PCI/LSTM_CNN_BEST_model.hdf5',
'Event_ACS': f'{models_path}/Events/ACS/LSTM_CNN_BEST_model.hdf5',
'Event_HF': f'{models_path}/Events/HF/LSTM_CNN_BEST_model.hdf5',
'Event_IS': f'{models_path}/Events/IS/LSTM_CNN_BEST_model.hdf5'
}
elif classifier_type == 'history':
weights = {
'History_CAD': f'{models_path}/History/CAD/LSTM_CNN_BEST_model.hdf5',
'History_CAD_UI': f'{models_path}/History/CAD_UI/LSTM_CNN_BEST_model.hdf5',
'History_HF': f'{models_path}/History/HF/LSTM_CNN_BEST_model.hdf5',
'History_HF_UI': f'{models_path}/History/HF_UI/LSTM_CNN_BEST_model.hdf5',
}
else:
raise NotImplementedError
print(f'Predicting using weights: {weights}')
if input_type == 'parquet':
run_multiple_models_parquet(infile=infile,
outfile=outfile,
weights=weights,
word_vectors=vectors_path,
**kwargs)
elif input_type == 'pickle':
run_multiple_models_pickle(infile=infile,
outfile=outfile,
weights=weights,
word_vectors=vectors_path,
**kwargs)
def predict(output_directory,
classifier_type: ('note classifier, `event` or `history`', 'option', 't') = 'event',
gpu: ('gpu to use', 'option', 'g') = 0,
gpu_offset: ('subtract gpu offset', 'option', 's') = 0,
input_type: ('input type, can be `parquet` or `pickle`', 'option', 'i') = 'parquet',
*file_names):
"""Takes one or more parquet files and writes tokenized text to output file.
# set environment variables for models and word vectors
export PREDICT_EHR_VECTORS=en_core_web_lg
export PREDICT_EHR_MODELS=PATH/TO/MODELS
# run predictions for events on one or more parquet files
predict_ehr -t event out_dir text1.parquet
predict_ehr -t event out_dir text1.parquet text2.parquet text3.parquet
# run on multiple files in parallel with 4 gpus, using text that has been tokenized before:
'parallel -j 4 predict_ehr . -t event -g {%} -s 1 -i pickle {} ::: *.pic.lz4'
'parallel -j 4 predict_ehr . -t history -g {%} -s 1 -i pickle {} ::: *.pic.lz4'
"""
print(f'Predicting with the following input files: {file_names}')
for infile in file_names:
input_file = Path(infile)
assert Path(output_directory).exists()
output_file = Path(output_directory) / (input_file.name + '.predictions.pq')
print('Processing', infile)
run_current_models(infile,
str(output_file),
classifier_type=classifier_type,
gpu_device=int(gpu) - int(gpu_offset),
input_type=input_type)
def predict_():
"""Entry point for console_scripts
"""
import plac
plac.call(predict)
def train_model(train_texts,
train_labels,
validation_texts,
validation_labels,
model_name,
output_path='.',
max_note_length=2000,
learning_rate=0.0001,
epochs=150,
batch_size=64,
gpu_device='0',
save_best_only=True,
**kwargs):
"""
Train a model with train_texts and train_labels and validate on validation_texts and validation_labels.
train_texts: array of notes to be used for model training.
train_labels: a binary label to be used for training. The index should correspond to the train_texts
"""
# use specified gpu device
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_device)
# use word vectors from environment variable (or defaults)
vectors_path = os.getenv("PREDICT_EHR_VECTORS")
if not vectors_path:
vectors_path = '/mnt/obi0/phi/ehr/word_vectors/filtered_20-05-23.bigram'
nlp = get_custom_tokenizer(vectors_path)
embeddings = nlp.vocab.vectors.data
print('Parsing texts...')
train_docs = list(nlp.pipe(train_texts, batch_size=2000))
validation_docs = list(nlp.pipe(validation_texts, batch_size=2000))
train_x = get_features(train_docs, max_note_length)
validation_x = get_features(validation_docs, max_note_length)
train_labels = [train_labels, train_labels]
validation_labels = [validation_labels, validation_labels]
model = compile_lstm(embeddings, {'max_length': max_note_length}, {'lr': learning_rate})
# define callbacks
checkpoint_file = model_name + '_{epoch:02d}-{val_loss:.2f}.hdf5'
checkpoint_path = os.path.join(output_path, 'checkpoints', checkpoint_file)
print(f'Saving checkpoints to {checkpoint_path}')
checkpoint_callback = ModelCheckpoint(
filepath=checkpoint_path,
monitor='val_loss', save_best_only=save_best_only, save_weights_only=True
)
tensorboard_path = os.path.join(output_path, 'tensorboard', model_name)
print(f'Writing tensorboard output to {tensorboard_path}')
tensorboard_callback = TensorBoard(
log_dir=tensorboard_path,
write_graph=False, profile_batch=0
)
early_stopping_callback = EarlyStopping(monitor='val_loss', patience=50)
print('Training...')
model.fit(train_x,
train_labels,
validation_data=(validation_x, validation_labels),
epochs=epochs,
batch_size=batch_size,
callbacks=[checkpoint_callback, tensorboard_callback, early_stopping_callback])
return model
def train(labels_path, model_name, output_path,
epochs: ('number of epochs', 'option', 'e') = 150,
gpu: ('gpu to use', 'option', 'g') = 0,
gpu_offset: ('subtract gpu offset', 'option', 's') = 0,
testrun: ('do short testrun on 200 samples', 'flag', 't') = False,
all_checkpoints: ('save all or best checkpoint only', 'flag', 'a') = False):
"""Basic training method that takes parquet file with labeled data, splits into training and validation set
and trains model (with early stopping).
# first configure a spacy model to use as word vector mapping
export PREDICT_EHR_VECTORS=en_core_web_lg
# then train a classifier model given labels
train_ehr --gpu 0 mgb_predictions_event/Event_PCI_labels.parquet Event_PCI mimic_models_event
"""
if not Path(output_path).exists():
Path(output_path).mkdir(parents=True)
print('Processing', labels_path)
labels_df = pd.read_parquet(labels_path)
# shuffle the labels
labels_df = labels_df.sample(frac=1, random_state=42)
if testrun:
labels_df = labels_df.iloc[:100]
# split into two sets for training and validation
train_df, validation_df = np.array_split(labels_df, 2)
print(f'Train data shape: {train_df.shape}')
print(f'Validation data shape: {validation_df.shape}')
print(f'Training model: {model_name}')
model = train_model(train_texts=train_df['NoteTXT'],
train_labels=train_df['label'],
validation_texts=validation_df['NoteTXT'],
validation_labels=validation_df['label'],
model_name=model_name,
output_path=output_path,
epochs=int(epochs),
save_best_only=not all_checkpoints,
gpu_device=int(gpu) - int(gpu_offset))
model.save_weights(os.path.join(output_path, model_name + '.hdf5'))
def train_():
"""Entry point for console_scripts
"""
import plac
plac.call(train)
if __name__ == "__main__":
predict_()
|
[
"ehr_classification.classifier_model.compile_lstm",
"tensorflow.keras.callbacks.TensorBoard",
"plac.call",
"tensorflow.keras.callbacks.ModelCheckpoint",
"ehr_classification.tokenizer.get_custom_tokenizer",
"pathlib.Path",
"tensorflow.keras.callbacks.EarlyStopping",
"pandas.read_parquet",
"numpy.array_split",
"ehr_classification.tokenizer.get_features",
"os.path.join",
"os.getenv",
"spacy.compat.pickle.load"
] |
[((1076, 1110), 'ehr_classification.tokenizer.get_custom_tokenizer', 'get_custom_tokenizer', (['word_vectors'], {}), '(word_vectors)\n', (1096, 1110), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((1163, 1288), 'ehr_classification.classifier_model.compile_lstm', 'compile_lstm', (['embeddings', "{'nr_hidden': 64, 'max_length': max_note_length, 'nr_class': 4}", "{'dropout': 0.5, 'lr': 0.0001}"], {}), "(embeddings, {'nr_hidden': 64, 'max_length': max_note_length,\n 'nr_class': 4}, {'dropout': 0.5, 'lr': 0.0001})\n", (1175, 1288), False, 'from ehr_classification.classifier_model import compile_lstm\n'), ((2008, 2021), 'pathlib.Path', 'Path', (['outfile'], {}), '(outfile)\n', (2012, 2021), False, 'from pathlib import Path\n'), ((2998, 3021), 'pandas.read_parquet', 'pd.read_parquet', (['infile'], {}), '(infile)\n', (3013, 3021), True, 'import pandas as pd\n'), ((4063, 4094), 'os.getenv', 'os.getenv', (['"""PREDICT_EHR_MODELS"""'], {}), "('PREDICT_EHR_MODELS')\n", (4072, 4094), False, 'import os\n'), ((4188, 4220), 'os.getenv', 'os.getenv', (['"""PREDICT_EHR_VECTORS"""'], {}), "('PREDICT_EHR_VECTORS')\n", (4197, 4220), False, 'import os\n'), ((7503, 7521), 'plac.call', 'plac.call', (['predict'], {}), '(predict)\n', (7512, 7521), False, 'import plac\n'), ((8458, 8490), 'os.getenv', 'os.getenv', (['"""PREDICT_EHR_VECTORS"""'], {}), "('PREDICT_EHR_VECTORS')\n", (8467, 8490), False, 'import os\n'), ((8607, 8641), 'ehr_classification.tokenizer.get_custom_tokenizer', 'get_custom_tokenizer', (['vectors_path'], {}), '(vectors_path)\n', (8627, 8641), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((8861, 8902), 'ehr_classification.tokenizer.get_features', 'get_features', (['train_docs', 'max_note_length'], {}), '(train_docs, max_note_length)\n', (8873, 8902), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((8922, 8968), 'ehr_classification.tokenizer.get_features', 'get_features', (['validation_docs', 'max_note_length'], {}), '(validation_docs, max_note_length)\n', (8934, 8968), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((9093, 9178), 'ehr_classification.classifier_model.compile_lstm', 'compile_lstm', (['embeddings', "{'max_length': max_note_length}", "{'lr': learning_rate}"], {}), "(embeddings, {'max_length': max_note_length}, {'lr': learning_rate}\n )\n", (9105, 9178), False, 'from ehr_classification.classifier_model import compile_lstm\n'), ((9291, 9348), 'os.path.join', 'os.path.join', (['output_path', '"""checkpoints"""', 'checkpoint_file'], {}), "(output_path, 'checkpoints', checkpoint_file)\n", (9303, 9348), False, 'import os\n'), ((9429, 9549), 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'checkpoint_path', 'monitor': '"""val_loss"""', 'save_best_only': 'save_best_only', 'save_weights_only': '(True)'}), "(filepath=checkpoint_path, monitor='val_loss',\n save_best_only=save_best_only, save_weights_only=True)\n", (9444, 9549), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping\n'), ((9591, 9643), 'os.path.join', 'os.path.join', (['output_path', '"""tensorboard"""', 'model_name'], {}), "(output_path, 'tensorboard', model_name)\n", (9603, 9643), False, 'import os\n'), ((9734, 9807), 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'tensorboard_path', 'write_graph': '(False)', 'profile_batch': '(0)'}), '(log_dir=tensorboard_path, write_graph=False, profile_batch=0)\n', (9745, 9807), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping\n'), ((9860, 9906), 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(50)'}), "(monitor='val_loss', patience=50)\n", (9873, 9906), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard, EarlyStopping\n'), ((11180, 11208), 'pandas.read_parquet', 'pd.read_parquet', (['labels_path'], {}), '(labels_path)\n', (11195, 11208), True, 'import pandas as pd\n'), ((11433, 11461), 'numpy.array_split', 'np.array_split', (['labels_df', '(2)'], {}), '(labels_df, 2)\n', (11447, 11461), True, 'import numpy as np\n'), ((12277, 12293), 'plac.call', 'plac.call', (['train'], {}), '(train)\n', (12286, 12293), False, 'import plac\n'), ((3300, 3334), 'ehr_classification.tokenizer.get_custom_tokenizer', 'get_custom_tokenizer', (['word_vectors'], {}), '(word_vectors)\n', (3320, 3334), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((3490, 3527), 'ehr_classification.tokenizer.get_features', 'get_features', (['tokens', 'max_note_length'], {}), '(tokens, max_note_length)\n', (3502, 3527), False, 'from ehr_classification.tokenizer import get_features, get_custom_tokenizer\n'), ((6981, 6993), 'pathlib.Path', 'Path', (['infile'], {}), '(infile)\n', (6985, 6993), False, 'from pathlib import Path\n'), ((12145, 12192), 'os.path.join', 'os.path.join', (['output_path', "(model_name + '.hdf5')"], {}), "(output_path, model_name + '.hdf5')\n", (12157, 12192), False, 'import os\n'), ((3083, 3097), 'pathlib.Path', 'Path', (['lz4_file'], {}), '(lz4_file)\n', (3087, 3097), False, 'from pathlib import Path\n'), ((3220, 3234), 'spacy.compat.pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3231, 3234), False, 'from spacy.compat import pickle\n'), ((7063, 7085), 'pathlib.Path', 'Path', (['output_directory'], {}), '(output_directory)\n', (7067, 7085), False, 'from pathlib import Path\n'), ((7009, 7031), 'pathlib.Path', 'Path', (['output_directory'], {}), '(output_directory)\n', (7013, 7031), False, 'from pathlib import Path\n'), ((11053, 11070), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (11057, 11070), False, 'from pathlib import Path\n'), ((11089, 11106), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (11093, 11106), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-10 23:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('stardate', '0009_auto_20170301_0155'),
]
operations = [
migrations.RemoveField(
model_name='blog',
name='social_auth',
),
]
|
[
"django.db.migrations.RemoveField"
] |
[((293, 354), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""blog"""', 'name': '"""social_auth"""'}), "(model_name='blog', name='social_auth')\n", (315, 354), False, 'from django.db import migrations\n')]
|
# Generated by Django 3.1 on 2020-08-17 21:14
from django.db import migrations, models
import profile.models
class Migration(migrations.Migration):
dependencies = [
('profile', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to=profile.models.upload_path),
),
]
|
[
"django.db.models.ImageField"
] |
[((343, 421), 'django.db.models.ImageField', 'models.ImageField', ([], {'blank': '(True)', 'null': '(True)', 'upload_to': 'profile.models.upload_path'}), '(blank=True, null=True, upload_to=profile.models.upload_path)\n', (360, 421), False, 'from django.db import migrations, models\n')]
|
""" Tests File"""
import requests
from website import API_KEY
def test_api_key_is_not_null():
""" Get API_KEY value and compare """
assert API_KEY != None
def test_request_api_key():
""" Test a request with api_key value """
assert requests.get(f'https://api.themoviedb.org/3/movie/76341?api_key={API_KEY}').status_code == 200
|
[
"requests.get"
] |
[((250, 325), 'requests.get', 'requests.get', (['f"""https://api.themoviedb.org/3/movie/76341?api_key={API_KEY}"""'], {}), "(f'https://api.themoviedb.org/3/movie/76341?api_key={API_KEY}')\n", (262, 325), False, 'import requests\n')]
|
# Generated by Django 3.1.3 on 2020-11-26 02:30
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('users', '0006_auto_20201126_0321'),
]
operations = [
migrations.AddField(
model_name='images',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 26, 2, 30, 10, 7725, tzinfo=utc)),
),
]
|
[
"datetime.datetime"
] |
[((413, 473), 'datetime.datetime', 'datetime.datetime', (['(2020)', '(11)', '(26)', '(2)', '(30)', '(10)', '(7725)'], {'tzinfo': 'utc'}), '(2020, 11, 26, 2, 30, 10, 7725, tzinfo=utc)\n', (430, 473), False, 'import datetime\n')]
|
from django import forms
from projects.models import *
from django.views.decorators.csrf import csrf_exempt
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.contrib.auth.models import User
import urllib2
import json
class Project(forms.ModelForm):
"""docstring for Project"""
class Meta:
model = Projects
exclude = ['created_on', 'updated_on']
widgets = {
'description': forms.Textarea(attrs={'rows':4, 'cols':15}),
'name': forms.TextInput(attrs = {'autofocus':"autofocus"})
}
@staticmethod
@csrf_exempt
def List_all_project(request):
prject_list = Projects.objects.all()
paginator = Paginator(prject_list, 10) # Show 25 contacts per page
page_range = paginator.page_range
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
projects = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
projects = paginator.page(paginator.num_pages, page_range=1)
return render_to_response('projects.html', {'resource':'projects', 'project_list' : projects, 'page_range' : page_range}, context_instance=RequestContext(request))
@staticmethod
@csrf_exempt
def get_repo_info(request):
response = urllib2.urlopen('https://api.github.com/users/moztn/repos')
data = json.load(response)
paginator = Paginator(data, 10) # Show 25 contacts per page
page_range = paginator.page_range
page = request.GET.get('page')
try:
projects = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
projects = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
projects = paginator.page(paginator.num_pages, page_range=1)
return render_to_response('projects.html', {'data':projects}, context_instance=RequestContext(request))
@staticmethod
@csrf_exempt
def get_repo_details(request):
repo_name = request.GET.get('repo')
repo_url = 'https://api.github.com/repos/moztn/'+repo_name
response = urllib2.urlopen(repo_url)
data = json.load(response)
contrib_url = data['contributors_url']
contrib_response = urllib2.urlopen(contrib_url)
contrib_data = json.load(contrib_response)
language_url = data['languages_url']
language_response = urllib2.urlopen(language_url)
language_data = json.load(language_response)
activity_url = data['events_url']
activity_response = urllib2.urlopen(activity_url)
act_data = json.load(activity_response)
paginator = Paginator(act_data, 10) # Show 25 contacts per page
page_range = paginator.page_range
page = request.GET.get('page')
try:
activity_data = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
activity_data = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
activity_data = paginator.page(paginator.num_pages, page_range=1)
return render_to_response('projects_details.html', {'data':data, 'contrib_data':contrib_data, 'language_data':language_data, 'activity_data':activity_data}, context_instance=RequestContext(request))
|
[
"json.load",
"django.forms.TextInput",
"django.core.paginator.Paginator",
"urllib2.urlopen",
"django.forms.Textarea",
"django.template.RequestContext"
] |
[((798, 824), 'django.core.paginator.Paginator', 'Paginator', (['prject_list', '(10)'], {}), '(prject_list, 10)\n', (807, 824), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((1473, 1532), 'urllib2.urlopen', 'urllib2.urlopen', (['"""https://api.github.com/users/moztn/repos"""'], {}), "('https://api.github.com/users/moztn/repos')\n", (1488, 1532), False, 'import urllib2\n'), ((1542, 1561), 'json.load', 'json.load', (['response'], {}), '(response)\n', (1551, 1561), False, 'import json\n'), ((1579, 1598), 'django.core.paginator.Paginator', 'Paginator', (['data', '(10)'], {}), '(data, 10)\n', (1588, 1598), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((2290, 2315), 'urllib2.urlopen', 'urllib2.urlopen', (['repo_url'], {}), '(repo_url)\n', (2305, 2315), False, 'import urllib2\n'), ((2325, 2344), 'json.load', 'json.load', (['response'], {}), '(response)\n', (2334, 2344), False, 'import json\n'), ((2408, 2436), 'urllib2.urlopen', 'urllib2.urlopen', (['contrib_url'], {}), '(contrib_url)\n', (2423, 2436), False, 'import urllib2\n'), ((2454, 2481), 'json.load', 'json.load', (['contrib_response'], {}), '(contrib_response)\n', (2463, 2481), False, 'import json\n'), ((2544, 2573), 'urllib2.urlopen', 'urllib2.urlopen', (['language_url'], {}), '(language_url)\n', (2559, 2573), False, 'import urllib2\n'), ((2592, 2620), 'json.load', 'json.load', (['language_response'], {}), '(language_response)\n', (2601, 2620), False, 'import json\n'), ((2680, 2709), 'urllib2.urlopen', 'urllib2.urlopen', (['activity_url'], {}), '(activity_url)\n', (2695, 2709), False, 'import urllib2\n'), ((2723, 2751), 'json.load', 'json.load', (['activity_response'], {}), '(activity_response)\n', (2732, 2751), False, 'import json\n'), ((2767, 2790), 'django.core.paginator.Paginator', 'Paginator', (['act_data', '(10)'], {}), '(act_data, 10)\n', (2776, 2790), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((566, 611), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'rows': 4, 'cols': 15}"}), "(attrs={'rows': 4, 'cols': 15})\n", (580, 611), False, 'from django import forms\n'), ((622, 671), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'autofocus': 'autofocus'}"}), "(attrs={'autofocus': 'autofocus'})\n", (637, 671), False, 'from django import forms\n'), ((1376, 1399), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (1390, 1399), False, 'from django.template import RequestContext\n'), ((2090, 2113), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (2104, 2113), False, 'from django.template import RequestContext\n'), ((3389, 3412), 'django.template.RequestContext', 'RequestContext', (['request'], {}), '(request)\n', (3403, 3412), False, 'from django.template import RequestContext\n')]
|
import os
import sys
import hashlib
import requests
def md5_str(szText):
return str(hashlib.md5(szText).hexdigest())
def MD5(szText):
m = hashlib.md5()
m.update(szText)
return m.digest()
def md5_file(filePath):
with open(filePath, 'rb') as fh:
m = hashlib.md5()
while True:
data = fh.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
# print md5_str("cf43e9194c5e4f7c8cb11469e1d0691c")
# print MD5("cf43e9194c5e4f7c8cb11469e1d0691c")
# print len(MD5("cf43e9194c5e4f7c8cb11469e1d0691c"))
|
[
"hashlib.md5"
] |
[((157, 170), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (168, 170), False, 'import hashlib\n'), ((294, 307), 'hashlib.md5', 'hashlib.md5', ([], {}), '()\n', (305, 307), False, 'import hashlib\n'), ((95, 114), 'hashlib.md5', 'hashlib.md5', (['szText'], {}), '(szText)\n', (106, 114), False, 'import hashlib\n')]
|
import math
import warnings
from collections import OrderedDict
from enum import Enum
import efel
import matplotlib.pyplot as plt
import numpy as np
from lib.Model import Model
from lib.NrnModel import NrnModel
class Level(Enum):
HIGH = 0.5
MID = 5.0
LOW = 10.0
VLOW = 50.0
EFEL_NAME_MAP = {
"AP Amplitude": "AP_amplitude",
"AP Height": "AP_height",
"AP Width": "AP_width",
"AHP Absolute Depth": "AHP_depth_abs",
"AHP time from peak": "AHP_time_from_peak",
"Spike Count": "Spikecount",
"Time to First Spike": "time_to_first_spike",
}
EFEL2NAME_MAP = {v: k for k, v in EFEL_NAME_MAP.items()}
def _zero_valued_dict(keys):
return dict.fromkeys(keys, 0)
class EfelMeasurements():
def __init__(self, model:Model , config: dict):
self.cell = model
self.voltage = None
self.t = None
self.delay = None
self.duration = None
self.current_amplitude = None
self.Tstop = None
self.trace = {}
self._setup(config)
def _setup(self, config):
self.voltage, self.t = self.cell.stimulateCell(
float(config["Amplitude"]), float(
config["Duration"]), float(config["Delay"]),
float(
config["T stop"]), config["Stimulus Section"], config["Recording Section"],
clampAt=float(config["Stimulus Position"]), recordAt=float(config["Recording Position"]), init=float(config["Vinit"]))
self.delay = float(config["Delay"])
self.duration = float(config["Duration"])
self.Tstop = float(config["T stop"])
self.current_amplitude = float(config["Amplitude"])
self._initialize()
def _initialize(self):
# start = sorted(self._closeMatches(self.t,delay,0.025),key=lambda x: x[0])[0][0]
# end = sorted(self._closeMatches(self.t,delay+duration,0.025),key=lambda x: x[0])[0][0]
# print(t[2]-t[1])
efel.setDoubleSetting('stimulus_current', self.current_amplitude)
efel.setIntSetting("strict_stiminterval", True)
self.trace['T'] = self.t
self.trace['V'] = self.voltage
# max because delay may be less than 5ms
self.trace['stim_start'] = [max(self.delay-5, 0)]
self.trace['stim_end'] = [self.Tstop]
return self.voltage, self.t
def get_measurements(self, outputDict: dict,featureNames: list):
traces = [self.trace]
efel_feature_names = self._convert_to_efel_names(featureNames)
warnings.filterwarnings("ignore", category=RuntimeWarning)
check_peaks = efel.getFeatureValues(traces, ["Spikecount_stimint"])
if check_peaks[0]["Spikecount_stimint"][0] == 0:
return _zero_valued_dict(featureNames)
amplitudes = efel.getFeatureValues(traces, ["AP_amplitude"])
if (amplitudes[0]["AP_amplitude"] is None):
# print("efel failed",len(traces_results[0]["AP_amplitude"]) , len(traces_results[0]["AP_height"]))
print(f"n spikes are {check_peaks[0]['Spikecount_stimint'][0]}")
return _zero_valued_dict(featureNames)
traces_results = efel.getFeatureValues(traces, efel_feature_names)
warnings.filterwarnings("default", category=RuntimeWarning)
for trace_results in traces_results:
# trace_result is a dictionary, with as keys the requested eFeatures
for feature_name, feature_values in trace_results.items():
if len(feature_values) > 0:
outputDict[EFEL2NAME_MAP[feature_name]
] = np.mean(feature_values)
else:
print(f"{feature_name} failed")
print(f"{feature_name} equals {feature_values}")
outputDict[EFEL2NAME_MAP[feature_name]] = 0
if "Time to First Spike" in list(outputDict.keys()):
if outputDict["Time to First Spike"] !=0:
outputDict["Time to First Spike"] +=self.delay
self.measurements = outputDict
# for name in featureNames:
# if name == "Input Resistance":
# self.measurements[name] = self.inputResistance(-0.5,
# plotting=False, printing=False)
# elif name == "Rheobase":
# self.measurements[name] = self.Rheobase(
# Level.VLOW, 1, plotting=False, printing=False)
# elif name == "Time Constant":
# self.measurements[name] = self.timeConstant(
# -0.5, plotting=False, printing=False)
return self.measurements
def _closeMatches(self, lst: list, findVal, tolerance):
""" find a list of closest matches to a specific value with a spicified tolerance
Args:
:param lst: target list to search into
:param findVal: target value
:param tolerance: accepted error in matches
:return: list of (value,index) pairs
"""
# matches = [(val,index) for index,val in enumerate(lst) if abs(val - findVal) < tolerance]
matches = [(val, index) for index, val in enumerate(lst)
if math.isclose(val, findVal, abs_tol=tolerance)]
return matches
def _convert_to_efel_names(self, regular_feature_names: list):
efel_feature_names = []
for fName in regular_feature_names:
if fName not in list(EFEL_NAME_MAP.keys()):
raise ValueError(
f" Feature: '{fName}' is not availabe in Efel or not spelled well")
efel_feature_names.append(EFEL_NAME_MAP[fName])
return efel_feature_names
if __name__ == '__main__':
fig, ax = plt.subplots()
for i in range(1):
delay = 150 # 150
duration = 1
current = 21
efel.setDoubleSetting('stimulus_current', current)
# efel.setDoubleSetting('interp_step', 0.025)
# efel.setIntSetting("strict_stiminterval", True)
testEFEL = EfelMeasurements()
testEFEL.stimulateCell(current, duration, delay,
testEFEL.iseg, 0.5, 500)
testEFEL.get_measurements(["Spikecount", "time_to_first_spike", "AP_amplitude",
"AP_height", 'AP_width', 'AHP_depth_abs', "AHP_time_from_peak"])
testEFEL.model.graphVolt(
testEFEL.voltage, testEFEL.t, "trace", ax, color=np.random.rand(3,))
# ax.set_color("red")
plt.show()
|
[
"matplotlib.pyplot.show",
"warnings.filterwarnings",
"efel.setDoubleSetting",
"efel.setIntSetting",
"efel.getFeatureValues",
"numpy.mean",
"math.isclose",
"numpy.random.rand",
"matplotlib.pyplot.subplots"
] |
[((5889, 5903), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5901, 5903), True, 'import matplotlib.pyplot as plt\n'), ((6657, 6667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6665, 6667), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2121), 'efel.setDoubleSetting', 'efel.setDoubleSetting', (['"""stimulus_current"""', 'self.current_amplitude'], {}), "('stimulus_current', self.current_amplitude)\n", (2077, 2121), False, 'import efel\n'), ((2130, 2177), 'efel.setIntSetting', 'efel.setIntSetting', (['"""strict_stiminterval"""', '(True)'], {}), "('strict_stiminterval', True)\n", (2148, 2177), False, 'import efel\n'), ((2619, 2677), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'RuntimeWarning'}), "('ignore', category=RuntimeWarning)\n", (2642, 2677), False, 'import warnings\n'), ((2700, 2753), 'efel.getFeatureValues', 'efel.getFeatureValues', (['traces', "['Spikecount_stimint']"], {}), "(traces, ['Spikecount_stimint'])\n", (2721, 2753), False, 'import efel\n'), ((2884, 2931), 'efel.getFeatureValues', 'efel.getFeatureValues', (['traces', "['AP_amplitude']"], {}), "(traces, ['AP_amplitude'])\n", (2905, 2931), False, 'import efel\n'), ((3251, 3300), 'efel.getFeatureValues', 'efel.getFeatureValues', (['traces', 'efel_feature_names'], {}), '(traces, efel_feature_names)\n', (3272, 3300), False, 'import efel\n'), ((3309, 3368), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""default"""'], {'category': 'RuntimeWarning'}), "('default', category=RuntimeWarning)\n", (3332, 3368), False, 'import warnings\n'), ((6004, 6054), 'efel.setDoubleSetting', 'efel.setDoubleSetting', (['"""stimulus_current"""', 'current'], {}), "('stimulus_current', current)\n", (6025, 6054), False, 'import efel\n'), ((5358, 5403), 'math.isclose', 'math.isclose', (['val', 'findVal'], {'abs_tol': 'tolerance'}), '(val, findVal, abs_tol=tolerance)\n', (5370, 5403), False, 'import math\n'), ((6603, 6620), 'numpy.random.rand', 'np.random.rand', (['(3)'], {}), '(3)\n', (6617, 6620), True, 'import numpy as np\n'), ((3711, 3734), 'numpy.mean', 'np.mean', (['feature_values'], {}), '(feature_values)\n', (3718, 3734), True, 'import numpy as np\n')]
|
# Implementatin of our blockchain
# <NAME>
# Object Oriented blockchain
# The container + chain where our blocks live
# Bring in some needed libraries
from datetime import datetime
import hashlib
import json
from urllib.parse import urlparse
import requests
from timeit import default_timer as timer
class Blockchain:
# Initialize the chain and the genesis block
def __init__(self):
self.chain = []
self.transactions = []
self.difficulty = "0000"
self.difficultyArray = []
self.createBlock(1, "0", None) # Genesis block
self.nodes = set()
self.users = {}
# This dict keeps track of all clients/miners using the chain
def addUser(self, userId, publickey, miner=False):
self.users[userId] = {"publicKey": publickey, "isMiner": miner}
# Either add or subtract a "0" from the difficulty
def changeDifficulty(self, increase=True):
if increase:
self.difficulty += "0"
else:
self.difficulty = self.difficulty[:-1]
def getLength(self):
return len(self.chain)
# Block format is a dictonary
# Hash_solution is the puzzle that solved it
def createBlock(self, nonce, previous_hash, hash_solution):
block = {
"blockNum": len(self.chain) + 1,
"timestamp": str(datetime.now().replace(microsecond=0)),
"nonce": nonce,
"hashSolution": hash_solution,
"previousHash": previous_hash,
"transactions": self.transactions,
}
# Empty the transactions
self.transactions = []
self.chain.append(block)
self.difficultyArray.append(self.difficulty)
return block
# Returns the last block in the chain
def getPreviousBlock(self):
return self.chain[-1]
# Solving the hash with the nonce
def proofOfWork(self, previous_nonce):
new_nonce = 1
proof_of_work = False
start = timer()
while proof_of_work is False:
# We can define our own proof-of-work puzzle (n**2 - pn**2) in this case
hash_solution = hashlib.sha256(
str((new_nonce ** 2 - previous_nonce ** 2) + len(self.chain)).encode(
"utf-8"
)
).hexdigest()
if hash_solution[: len(self.difficulty)] == self.difficulty:
proof_of_work = True
else:
new_nonce += 1
end = timer()
return new_nonce, hash_solution, round(end - start, 6)
# Hash the contents of the entire block
def hash(self, block):
encoded_block = json.dumps(block, sort_keys=True).encode("utf-8")
return hashlib.sha256(encoded_block).hexdigest()
# Check if chain has all valid blocks
def isChainValid(self, chain):
previous_block = chain[0]
block_index = 1
while block_index < len(chain):
block = chain[block_index]
if block["previousHash"] != self.hash(previous_block):
print("No!")
return False, block_index
previous_nonce = previous_block["nonce"]
nonce = block["nonce"]
hash_operation = hashlib.sha256(
str((nonce ** 2 - previous_nonce ** 2) +
block_index).encode("utf-8")
).hexdigest()
try:
difficultyAtBlock = self.difficultyArray[block_index]
if hash_operation[:len(difficultyAtBlock)] != difficultyAtBlock:
return False, block_index
except:
print(len(self.difficultyArray), len(self.chain))
# Move forward in the chain if everything checks out
previous_block = block
block_index += 1
return True, len(self.chain)
# Creates a transaction and returns the future next block number
def addTransaction(self, sender, receiver, data):
self.transactions.append(
{"sender": sender, "receiver": receiver, "document": data}
)
previous_block = self.getPreviousBlock()
return previous_block["blockNum"] + 1
# Returns the address of a new node on the network
def addNode(self, addressOfNode):
parsed_url = urlparse(addressOfNode)
self.nodes.add(parsed_url.netloc)
def getNumNodes(self):
return len(self.nodes)
# Find the best chain-by-consensus on network (longest chain)
def replaceChain(self):
network = self.nodes
longest_chain = None
max_length = len(self.chain)
for node in network:
try:
response = requests.get(f"http://{node}/get_chain_json")
if response.status_code == 200:
length = response.json()["length"]
chain = response.json()["chain"]
difficulties = list(response.json()["difficulties"])
print(len(difficulties), difficulties)
if length > max_length: # (self.isChainValid(chain)):
print("yes!")
max_length = length
longest_chain = chain
chain_difficulties = difficulties
except:
continue
if longest_chain:
self.chain = longest_chain
self.difficultyArray = chain_difficulties
return True
return False
# Functions to append bogus blocks to chain and remove
def simulateFakeBlocks(self):
for _ in range(2):
self.chain.append(
{
"blockNum": len(self.chain) + 1,
"timestamp": "Never",
"nonce": -1,
"previousHash": "FAKE BLOCK",
"transactions": [
{
"sender": "You",
"receiver": "Theif",
"document": {"Your Bank Account": 123456789},
}
],
}
)
def pruneFakeBlocks(self):
is_valid, last_valid_block = self.isChainValid(self.chain)
if not is_valid:
self.chain = self.chain[:last_valid_block]
return True, last_valid_block
return False, last_valid_block
# --- Testing Functions below ---
# bc = Blockchain()
# print(bc.isChainValid(bc.chain))
|
[
"timeit.default_timer",
"json.dumps",
"hashlib.sha256",
"requests.get",
"datetime.datetime.now",
"urllib.parse.urlparse"
] |
[((1980, 1987), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1985, 1987), True, 'from timeit import default_timer as timer\n'), ((2487, 2494), 'timeit.default_timer', 'timer', ([], {}), '()\n', (2492, 2494), True, 'from timeit import default_timer as timer\n'), ((4322, 4345), 'urllib.parse.urlparse', 'urlparse', (['addressOfNode'], {}), '(addressOfNode)\n', (4330, 4345), False, 'from urllib.parse import urlparse\n'), ((2655, 2688), 'json.dumps', 'json.dumps', (['block'], {'sort_keys': '(True)'}), '(block, sort_keys=True)\n', (2665, 2688), False, 'import json\n'), ((2721, 2750), 'hashlib.sha256', 'hashlib.sha256', (['encoded_block'], {}), '(encoded_block)\n', (2735, 2750), False, 'import hashlib\n'), ((4711, 4756), 'requests.get', 'requests.get', (['f"""http://{node}/get_chain_json"""'], {}), "(f'http://{node}/get_chain_json')\n", (4723, 4756), False, 'import requests\n'), ((1340, 1354), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1352, 1354), False, 'from datetime import datetime\n')]
|
"""
Get singer songs by qq music.
Refer: https://github.com/yangjianxin1/QQMusicSpider
``` bash
cd crawler/qq_music
python crawler.py initdb
python crawler.py crawler -s {singer_mid}
```
"""
import sqlite3
import sys
import time
import click
import requests
from requests.adapters import HTTPAdapter
sys.path.append("../../")
import config
session = requests.Session()
adapters = HTTPAdapter(max_retries=3)
session.mount('https://', adapters)
SONG_BY_SINGER_URL = "https://u.y.qq.com/cgi-bin/musicu.fcg?data=%7B%22comm%22%3A%7B%22ct%22%3A24%2C%22cv%22%3A0%7D%2C%22singerSongList%22%3A%7B%22method%22%3A%22GetSingerSongList%22%2C%22param%22%3A%7B%22order%22%3A1%2C%22singerMid%22%3A%22{singer_mid}%22%2C%22begin%22%3A{begin}%2C%22num%22%3A{num}%7D%2C%22module%22%3A%22musichall.song_list_server%22%7D%7D"
def init_db(filename):
table_sql = """CREATE TABLE `song`(
id INT PRIMARY KEY,
mid VARCHAR(100) NOT NULL,
singer_mid VARCHAR(100) NOT NULL,
name VARCHAR(255) NOT NULL,
title VARCHAR(255) NOT NULL,
created_at INT NOT NULL)"""
if filename is None:
filename = config.DB_PATH
conn = sqlite3.connect(filename)
cursor = conn.cursor()
cursor.execute(table_sql)
cursor.close()
conn.close()
def get_song_from_qq(singer_mid: str, offset: int, limit: int):
"""
Get music data list from qq
Args:
singer_mid: singer mid
offset:
limit:
Returns:
song data
"""
try:
resp = session.get(url=SONG_BY_SINGER_URL.format(singer_mid=singer_mid, begin=offset, num=limit))
data = resp.json()
if data["code"] == 0:
return data["singerSongList"]["data"]["songList"]
else:
print(f"Error in req for singer {singer_mid}, offset: {offset}, limit: {limit}")
return []
except Exception as e:
print(f"Exception in get song from qq. errors: {e}")
return []
def save_to_db(filename, singer_mid, data):
now_time = int(time.time())
params = []
for song in data:
song_info = song["songInfo"]
item = [
song_info["mid"], singer_mid,
song_info["name"], song_info["title"], now_time
]
params.append(item)
conn = sqlite3.connect(filename)
cursor = None
try:
cursor = conn.cursor()
cursor.executemany(
"INSERT INTO song(mid, singer_mid, name, title, created_at) "
"VALUES (?,?,?,?,?)",
params
)
conn.commit()
return True
except Exception as e:
conn.rollback()
print(f"Exception save data to db, errors: {e}")
return False
finally:
if cursor:
cursor.close()
conn.close()
def handler(filename, singer_mid):
offset = 0
limit = 100
while 1:
data = get_song_from_qq(singer_mid, offset, limit)
if data:
st = save_to_db(filename, singer_mid, data)
click.echo(f"Save data for offset: {offset}, limit: {limit}, status: {st}")
else:
break
offset += limit
return True
@click.group()
def cli():
pass
@cli.command(help="Initial database")
@click.option("--filename", "-f", default=None)
def initdb(filename):
click.echo("Begin to initial db.")
init_db(filename)
click.echo("Finished initial.")
@cli.command(help="Crawler music for singer")
@click.option("--filename", "-f", default=None)
@click.option("--singer", "-s", help="The singer mid", default=None)
def crawler(filename, singer):
if singer is None:
click.echo("You must need provide singer mid!")
return
if filename is None:
filename = config.DB_PATH
handler(filename, singer)
if __name__ == '__main__':
cli()
|
[
"sys.path.append",
"requests.adapters.HTTPAdapter",
"requests.Session",
"click.option",
"click.echo",
"time.time",
"sqlite3.connect",
"click.group"
] |
[((333, 358), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (348, 358), False, 'import sys\n'), ((384, 402), 'requests.Session', 'requests.Session', ([], {}), '()\n', (400, 402), False, 'import requests\n'), ((414, 440), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': '(3)'}), '(max_retries=3)\n', (425, 440), False, 'from requests.adapters import HTTPAdapter\n'), ((3173, 3186), 'click.group', 'click.group', ([], {}), '()\n', (3184, 3186), False, 'import click\n'), ((3248, 3294), 'click.option', 'click.option', (['"""--filename"""', '"""-f"""'], {'default': 'None'}), "('--filename', '-f', default=None)\n", (3260, 3294), False, 'import click\n'), ((3463, 3509), 'click.option', 'click.option', (['"""--filename"""', '"""-f"""'], {'default': 'None'}), "('--filename', '-f', default=None)\n", (3475, 3509), False, 'import click\n'), ((3511, 3578), 'click.option', 'click.option', (['"""--singer"""', '"""-s"""'], {'help': '"""The singer mid"""', 'default': 'None'}), "('--singer', '-s', help='The singer mid', default=None)\n", (3523, 3578), False, 'import click\n'), ((1167, 1192), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (1182, 1192), False, 'import sqlite3\n'), ((2296, 2321), 'sqlite3.connect', 'sqlite3.connect', (['filename'], {}), '(filename)\n', (2311, 2321), False, 'import sqlite3\n'), ((3321, 3355), 'click.echo', 'click.echo', (['"""Begin to initial db."""'], {}), "('Begin to initial db.')\n", (3331, 3355), False, 'import click\n'), ((3382, 3413), 'click.echo', 'click.echo', (['"""Finished initial."""'], {}), "('Finished initial.')\n", (3392, 3413), False, 'import click\n'), ((2039, 2050), 'time.time', 'time.time', ([], {}), '()\n', (2048, 2050), False, 'import time\n'), ((3641, 3688), 'click.echo', 'click.echo', (['"""You must need provide singer mid!"""'], {}), "('You must need provide singer mid!')\n", (3651, 3688), False, 'import click\n'), ((3021, 3096), 'click.echo', 'click.echo', (['f"""Save data for offset: {offset}, limit: {limit}, status: {st}"""'], {}), "(f'Save data for offset: {offset}, limit: {limit}, status: {st}')\n", (3031, 3096), False, 'import click\n')]
|
from bs4 import BeautifulSoup as Soup
import re
string = '<p>Please click <a href="http://www.dr-chuck.com">here</a></p>'
match = re.findall('https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', string)
html = Soup(string, 'html.parser')
bsm= [a['href'] for a in html.find_all('a')]
match2 = re.findall('"(http.*.*)"',string)
print(match)
print(match2)
print(bsm)
|
[
"bs4.BeautifulSoup",
"re.findall"
] |
[((131, 194), 're.findall', 're.findall', (['"""https?://(?:[-\\\\w.]|(?:%[\\\\da-fA-F]{2}))+"""', 'string'], {}), "('https?://(?:[-\\\\w.]|(?:%[\\\\da-fA-F]{2}))+', string)\n", (141, 194), False, 'import re\n'), ((201, 228), 'bs4.BeautifulSoup', 'Soup', (['string', '"""html.parser"""'], {}), "(string, 'html.parser')\n", (205, 228), True, 'from bs4 import BeautifulSoup as Soup\n'), ((288, 322), 're.findall', 're.findall', (['""""(http.*.*)\\""""', 'string'], {}), '(\'"(http.*.*)"\', string)\n', (298, 322), False, 'import re\n')]
|
import subprocess
import sys
def start():
# 0x78 == 01111000 == x, eXit
return subprocess.call((sys.executable, "main.py")) == 0x78
if __name__ == "__main__":
print("Executing Bot initialisation.")
while True:
# Start the bot
print("Starting Bot;")
code = start()
if not code == 0x78:
print(f'\nRe-executing Bot initialisation. Exit code {code}')
continue
else: # Exit if bot script returns special shutdown code 0x78
print(f'\nShutting down. Exit code {code}')
break
|
[
"subprocess.call"
] |
[((89, 133), 'subprocess.call', 'subprocess.call', (["(sys.executable, 'main.py')"], {}), "((sys.executable, 'main.py'))\n", (104, 133), False, 'import subprocess\n')]
|
from tkinter import *
from tkinter import ttk
from View.Datepicker import Datepicker
from Controller.Controller import Controller
from tkinter.filedialog import askopenfilename
"""
Autor: <NAME> (418002863)
Email: <EMAIL>
"""
class GUI(ttk.Frame):
fields_current_query = dict()
def __init__(self, master):
self.control = Controller()
# Main frame
super().__init__(master)
super().grid(row=0, column=0, sticky=(N, W, E, S))
# Master settings
master.title("Pólizas")
master.columnconfigure(0, weight=1)
master.rowconfigure(0, weight=1)
# Notebook
self.notebook = ttk.Notebook(self, padding=10)
self.notebook.grid(row=0, column=0, sticky=(N, W, E, S))
# Consultas frame
self.fr_consultas = ttk.Frame(self.notebook)
self.notebook.add(self.fr_consultas, text="Consultas", padding=10)
# Agregar registros / importar frame
self.fr_agregar = ttk.Frame(self.notebook)
self.notebook.add(self.fr_agregar, text="Agregar / Importar", padding=10)
# Ayuda frame
self.fr_ayuda = ttk.Frame(self.notebook)
self.notebook.add(self.fr_ayuda, text="Ayuda", padding=10)
# Cliente
self.lf_cliente = ttk.Labelframe(self.fr_consultas, text="Cliente")
self.lf_cliente.grid(row=0, column=0, rowspan=3, columnspan=6)
self.val_id_cliente = BooleanVar()
self.ch_id_cliente = ttk.Checkbutton(self.lf_cliente, variable=self.val_id_cliente)
self.ch_id_cliente.grid(row=1, column=0)
self.la_id_cliente = ttk.Label(self.lf_cliente, text="id_cliente")
self.la_id_cliente.grid(row=1, column=1)
self.id_cliente = StringVar()
self.en_id_cliente = ttk.Entry(self.lf_cliente, textvariable=self.id_cliente)
self.en_id_cliente.grid(row=1, column=2)
self.val_nombre = BooleanVar()
self.ch_nombre = ttk.Checkbutton(self.lf_cliente, variable=self.val_nombre)
self.ch_nombre.grid(row=1, column=3)
self.la_nombre = ttk.Label(self.lf_cliente, text="Nombre")
self.la_nombre.grid(row=1, column=4)
self.nombre = StringVar()
self.en_nombre = ttk.Entry(self.lf_cliente, width=34, textvariable=self.nombre)
self.en_nombre.grid(row=1, column=5)
self.val_direccion = BooleanVar()
self.ch_direccion = ttk.Checkbutton(self.lf_cliente, variable=self.val_direccion)
self.ch_direccion.grid(row=2, column=0)
self.la_direccion = ttk.Label(self.lf_cliente, text="Dirección")
self.la_direccion.grid(row=2, column=1)
self.direccion = StringVar()
self.en_direccion = ttk.Entry(self.lf_cliente, width=72, textvariable=self.direccion)
self.en_direccion.grid(row=2, column=2, columnspan=4)
for child in self.lf_cliente.winfo_children():
child.grid_configure(padx=5, pady=5)
# Factura
self.lf_factura = ttk.LabelFrame(self.fr_consultas, text="Factura")
self.lf_factura.grid(row=0, column=6, rowspan=3, columnspan=3)
self.val_id_factura = BooleanVar()
self.ch_id_factura = ttk.Checkbutton(self.lf_factura, variable=self.val_id_factura)
self.ch_id_factura.grid(row=1, column=6)
self.la_id_factura = ttk.Label(self.lf_factura, text="id_factura")
self.la_id_factura.grid(row=1, column=7)
self.id_factura = StringVar()
self.en_id_factura = ttk.Entry(self.lf_factura, textvariable=self.id_factura)
self.en_id_factura.grid(row=1, column=8)
self.val_costo_vehiculo = BooleanVar()
self.ch_costo_vehiculo = ttk.Checkbutton(self.lf_factura, variable=self.val_costo_vehiculo)
self.ch_costo_vehiculo.grid(row=2, column=6)
self.la_costo_vehiculo = ttk.Label(self.lf_factura, text="Costo del\nautomóvil")
self.la_costo_vehiculo.grid(row=2, column=7)
self.costo_vehiculo = StringVar()
self.en_costo_vehiculo = ttk.Entry(self.lf_factura, textvariable=self.costo_vehiculo)
self.en_costo_vehiculo.grid(row=2, column=8)
for child in self.lf_factura.winfo_children():
child.grid_configure(padx=5, pady=5)
# Vehículo
self.lf_vehiculo = ttk.LabelFrame(self.fr_consultas, text="Vehículo")
self.lf_vehiculo.grid(row=3, column=6, rowspan=4, columnspan=3)
self.val_placas = BooleanVar()
self.ch_placas = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_placas)
self.ch_placas.grid(row=4, column=0)
self.la_placas = ttk.Label(self.lf_vehiculo, text="Placas")
self.la_placas.grid(row=4, column=1)
self.placas = StringVar()
self.en_placas = ttk.Entry(self.lf_vehiculo, textvariable=self.placas)
self.en_placas.grid(row=4, column=2)
self.val_marca = BooleanVar()
self.ch_marca = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_marca)
self.ch_marca.grid(row=5, column=0)
self.la_marca = ttk.Label(self.lf_vehiculo, text="Marca")
self.la_marca.grid(row=5, column=1)
self.marca = StringVar()
self.en_marca = ttk.Entry(self.lf_vehiculo, textvariable=self.marca)
self.en_marca.grid(row=5, column=2)
self.val_modelo = BooleanVar()
self.ch_modelo = ttk.Checkbutton(self.lf_vehiculo, variable=self.val_modelo)
self.ch_modelo.grid(row=6, column=0)
self.la_modelo = ttk.Label(self.lf_vehiculo, text="Modelo")
self.la_modelo.grid(row=6, column=1)
self.modelo = StringVar()
self.en_modelo = ttk.Entry(self.lf_vehiculo, textvariable=self.modelo)
self.en_modelo.grid(row=6, column=2)
for child in self.lf_vehiculo.winfo_children():
child.grid_configure(padx=5, pady=5)
# Póliza
self.lf_poliza = ttk.LabelFrame(self.fr_consultas, text="Póliza")
self.lf_poliza.grid(row=3, column=0, rowspan=3, columnspan=6)
self.val_costo_seguro = BooleanVar()
self.ch_costo_seguro = ttk.Checkbutton(self.lf_poliza, variable=self.val_costo_seguro)
self.ch_costo_seguro.grid(row=4, column=3)
self.la_costo_seguro = ttk.Label(self.lf_poliza, text="Costo del\nseguro")
self.la_costo_seguro.grid(row=4, column=4)
self.costo_seguro = StringVar()
self.en_costo_seguro = ttk.Entry(self.lf_poliza, textvariable=self.costo_seguro)
self.en_costo_seguro.grid(row=4, column=5)
self.val_prima_asegurada = BooleanVar()
self.ch_prima_asegurada = ttk.Checkbutton(self.lf_poliza, variable=self.val_prima_asegurada)
self.ch_prima_asegurada.grid(row=5, column=3)
self.la_prima_asegurada = ttk.Label(self.lf_poliza, text="Prima asegurada")
self.la_prima_asegurada.grid(row=5, column=4)
self.prima_asegurada = StringVar()
self.en_prima_asegurada = ttk.Entry(self.lf_poliza, textvariable=self.prima_asegurada)
self.en_prima_asegurada.grid(row=5, column=5)
self.val_fecha_apertura = BooleanVar()
self.ch_fecha_apertura = ttk.Checkbutton(self.lf_poliza, variable=self.val_fecha_apertura)
self.ch_fecha_apertura.grid(row=4, column=6)
self.la_fecha_apertura = ttk.Label(self.lf_poliza, text="Fecha de\napertura")
self.la_fecha_apertura.grid(row=4, column=7)
self.fecha_apertura = StringVar()
self.en_fecha_apertura = Datepicker(self.lf_poliza, datevar=self.fecha_apertura)
self.en_fecha_apertura.grid(row=4, column=8)
self.val_fecha_vencimiento = BooleanVar()
self.ch_fecha_vencimiento = ttk.Checkbutton(self.lf_poliza, variable=self.val_fecha_vencimiento)
self.ch_fecha_vencimiento.grid(row=5, column=6)
self.la_fecha_vencimiento = ttk.Label(self.lf_poliza, text="Fecha de\nvencimiento")
self.la_fecha_vencimiento.grid(row=5, column=7)
self.fecha_vencimiento = StringVar()
self.en_fecha_vencimiento = Datepicker(self.lf_poliza, datevar=self.fecha_vencimiento)
self.en_fecha_vencimiento.grid(row=5, column=8)
for child in self.lf_poliza.winfo_children():
child.grid_configure(padx=5, pady=5)
# Table
self.fr_tabla = ttk.Frame(self.fr_consultas, width=900, height=180)
self.fr_tabla.grid(row=7, column=0, rowspan=8, columnspan=10)
self.fr_tabla.grid_propagate(0)
self.tabla = ttk.Treeview(self.fr_tabla, height=12, selectmode=BROWSE)
self.tabla.grid(row=7, column=0, sticky=N+S+W+E)
self.tabla.bind("<<TreeviewSelect>>", self.populate_fields)
# Scroll bars
self.vscroll = ttk.Scrollbar(self.fr_tabla, orient=VERTICAL)
self.vscroll.grid(row=7, column=9, rowspan=7, sticky=W+N+S)
self.hscroll = ttk.Scrollbar(self.fr_tabla, orient=HORIZONTAL)
self.hscroll.grid(row=14, column=0, columnspan=9, sticky=W+E+N)
# Scroll bars binding
self.vscroll.configure(command=self.tabla.yview)
self.hscroll.configure(command=self.tabla.xview)
self.tabla.configure(yscrollcommand=self.vscroll.set)
self.tabla.configure(xscrollcommand=self.hscroll.set)
# Buttons
self.bo_mostrar = ttk.Button(self.fr_consultas, text="Mostrar todo", width=16,
command=self.show_all)
self.bo_mostrar.grid(row=1, column=9, sticky=W)
self.bo_limpiar = ttk.Button(self.fr_consultas, text="Limpiar campos", width=16,
command=self.limpiar_campos)
self.bo_limpiar.grid(row=2, column=9, sticky=W)
self.bo_buscar = ttk.Button(self.fr_consultas, text="Buscar", width=16)
self.bo_buscar.grid(row=3, column=9, sticky=W)
self.bo_actualizar = ttk.Button(self.fr_consultas, text="Actualizar", width=16)
self.bo_actualizar.grid(row=4, column=9, sticky=W)
self.bo_eliminar = ttk.Button(self.fr_consultas, text="Eliminar", width=16)
self.bo_eliminar.grid(row=5, column=9, sticky=W)
# Padding of elements in consultas frame
for child in self.fr_consultas.winfo_children():
child.grid_configure(padx=5, pady=5)
# Ayuda frame widgets
self.la_ayuda = ttk.Label(self.fr_ayuda,
text="Licenciatura en Matemáticas Aplicadas\n\n"
"Proyecto final para la materia de Manejo de Datos.\n"
"Profesor: <NAME>\n\n"
"Autor: <NAME> (418002863)\n")
self.la_ayuda.grid(row=0, column=0)
# Padding of elements in ayuda frame
for child in self.fr_ayuda.winfo_children():
child.grid_configure(padx=5, pady=5)
# Agregar / importar frame widgets
self.la_instruccion = ttk.Label(self.fr_agregar,
text="NOTA: \n"
"Los campos marcados con * no pueden estar vacíos.\n"
"Los campos marcados con + pueden dejarse en blanco y se generan "
"automáticamente.")
self.la_instruccion.grid(row=0, column=0, pady=20)
self.lf_ag_cliente = ttk.Labelframe(self.fr_agregar, text="Cliente")
self.lf_ag_cliente.grid(row=4, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_cliente = ttk.Label(self.lf_ag_cliente, text="id_cliente+")
self.la_ag_id_cliente.grid(row=1, column=1)
self.ag_id_cliente = StringVar()
self.en_ag_id_cliente = ttk.Entry(self.lf_ag_cliente, textvariable=self.ag_id_cliente)
self.en_ag_id_cliente.grid(row=1, column=2)
self.la_ag_nombre = ttk.Label(self.lf_ag_cliente, text="Nombre")
self.la_ag_nombre.grid(row=1, column=4)
self.ag_nombre = StringVar()
self.en_ag_nombre = ttk.Entry(self.lf_ag_cliente, width=35, textvariable=self.ag_nombre)
self.en_ag_nombre.grid(row=1, column=5)
self.la_ag_direccion = ttk.Label(self.lf_ag_cliente, text="Dirección")
self.la_ag_direccion.grid(row=2, column=1)
self.ag_direccion = StringVar()
self.en_ag_direccion = ttk.Entry(self.lf_ag_cliente, width=68, textvariable=self.ag_direccion)
self.en_ag_direccion.grid(row=2, column=2, columnspan=4)
self.bo_ag_cliente = ttk.Button(self.lf_ag_cliente, width=18,
text="Agregar cliente", command=self.insert_cliente)
self.bo_ag_cliente.grid(row=1, column=6)
self.bo_importar_clientes = ttk.Button(self.lf_ag_cliente, width=18,
text="Importar clientes", command=self.importar_clientes)
self.bo_importar_clientes.grid(row=2, column=6)
for child in self.lf_ag_cliente.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_vehiculo = ttk.Labelframe(self.fr_agregar, text="Vehículo")
self.lf_ag_vehiculo.grid(row=7, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_placas = ttk.Label(self.lf_ag_vehiculo, text="Placas*")
self.la_ag_placas.grid(row=1, column=1)
self.ag_placas = StringVar()
self.en_ag_placas = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_placas)
self.en_ag_placas.grid(row=1, column=2)
# self.la_ag_id_factura = ttk.Label(self.lf_ag_vehiculo, text="id_factura")
# self.la_ag_id_factura.grid(row=2, column=1)
#
# self.ag_id_factura = StringVar()
# self.en_ag_id_factura = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_id_factura)
# self.en_ag_id_factura.grid(row=2, column=2)
self.la_ag_marca = ttk.Label(self.lf_ag_vehiculo, text="Marca")
self.la_ag_marca.grid(row=1, column=3)
self.ag_marca = StringVar()
self.en_ag_marca = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_marca)
self.en_ag_marca.grid(row=1, column=4)
self.la_ag_modelo = ttk.Label(self.lf_ag_vehiculo, text="Modelo")
self.la_ag_modelo.grid(row=2, column=3)
self.ag_modelo = StringVar()
self.en_ag_modelo = ttk.Entry(self.lf_ag_vehiculo, textvariable=self.ag_modelo)
self.en_ag_modelo.grid(row=2, column=4)
self.bo_ag_vehiculo = ttk.Button(self.lf_ag_vehiculo, width=18,
text="Agregar vehículo", command=self.insert_vehiculo)
self.bo_ag_vehiculo.grid(row=1, column=6)
self.bo_importar_vehiculo = ttk.Button(self.lf_ag_vehiculo, width=18,
text="Importar vehículos")
self.bo_importar_vehiculo.grid(row=2, column=6)
for child in self.lf_ag_vehiculo.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_factura = ttk.Labelframe(self.fr_agregar, text="Factura")
self.lf_ag_factura.grid(row=10, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_factura2 = ttk.Label(self.lf_ag_factura, text="id_factura+")
self.la_ag_id_factura2.grid(row=1, column=1)
self.ag_id_factura2 = StringVar()
self.en_ag_id_factura2 = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_id_factura2)
self.en_ag_id_factura2.grid(row=1, column=2)
self.la_ag_placas2 = ttk.Label(self.lf_ag_factura, text="Placas*")
self.la_ag_placas2.grid(row=2, column=1)
self.ag_placas2 = StringVar()
self.en_ag_placas2 = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_placas2)
self.en_ag_placas2.grid(row=2, column=2)
self.la_ag_costo = ttk.Label(self.lf_ag_factura, text="Costo del vehículo*")
self.la_ag_costo.grid(row=1, column=3)
self.ag_costo = StringVar()
self.en_ag_costo = ttk.Entry(self.lf_ag_factura, textvariable=self.ag_costo)
self.en_ag_costo.grid(row=1, column=4)
self.bo_ag_factura = ttk.Button(self.lf_ag_factura, width=18,
text="Agregar factura", command=self.insert_factura)
self.bo_ag_factura.grid(row=1, column=5)
self.bo_importar_facturas = ttk.Button(self.lf_ag_factura, width=18,
text="Importar facturas")
self.bo_importar_facturas.grid(row=2, column=5)
for child in self.lf_ag_factura.winfo_children():
child.grid_configure(padx=5, pady=5)
self.lf_ag_poliza = ttk.Labelframe(self.fr_agregar, text="Póliza")
self.lf_ag_poliza.grid(row=1, column=0, rowspan=3, columnspan=8, sticky=(E, W))
self.la_ag_id_cliente2 = ttk.Label(self.lf_ag_poliza, text="id_cliente*")
self.la_ag_id_cliente2.grid(row=1, column=1)
self.ag_id_cliente2 = StringVar()
self.en_ag_id_cliente2 = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_id_cliente2)
self.en_ag_id_cliente2.grid(row=1, column=2)
self.la_ag_id_factura3 = ttk.Label(self.lf_ag_poliza, text="id_factura*")
self.la_ag_id_factura3.grid(row=2, column=1)
self.ag_id_factura3 = StringVar()
self.en_ag_id_factura3 = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_id_factura3)
self.en_ag_id_factura3.grid(row=2, column=2)
self.la_ag_costo_seguro = ttk.Label(self.lf_ag_poliza, text="Costo del seguro+")
self.la_ag_costo_seguro.grid(row=1, column=3)
self.ag_costo_seguro = StringVar()
self.en_ag_costo_seguro = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_costo_seguro)
self.en_ag_costo_seguro.grid(row=1, column=4)
self.la_ag_prima = ttk.Label(self.lf_ag_poliza, text="Prima asegurada+")
self.la_ag_prima.grid(row=2, column=3)
self.ag_prima = StringVar()
self.en_ag_prima = ttk.Entry(self.lf_ag_poliza, textvariable=self.ag_prima)
self.en_ag_prima.grid(row=2, column=4)
self.la_ag_apertura = ttk.Label(self.lf_ag_poliza, text="Fecha de apertura+")
self.la_ag_apertura.grid(row=1, column=5)
self.ag_apertura = StringVar()
self.en_ag_apertura = Datepicker(self.lf_ag_poliza, datevar=self.ag_apertura)
self.en_ag_apertura.grid(row=1, column=6)
self.la_ag_vencimiento = ttk.Label(self.lf_ag_poliza, text="Fecha de vencimiento+")
self.la_ag_vencimiento.grid(row=2, column=5)
self.ag_vencimiento = StringVar()
self.en_ag_vencimiento = Datepicker(self.lf_ag_poliza, datevar=self.ag_vencimiento)
self.en_ag_vencimiento .grid(row=2, column=6)
self.bo_gen_poliza = ttk.Button(self.lf_ag_poliza, width=18,
text="Generar póliza", command=self.gen_poliza)
self.bo_gen_poliza.grid(row=1, column=7)
for child in self.lf_ag_poliza.winfo_children():
child.grid_configure(padx=5, pady=5)
# Padding of elements in agregar / importar frame
for child in self.fr_agregar.winfo_children():
child.grid_configure(padx=5, pady=5)
def insert_cliente(self):
data = dict()
if self.ag_id_cliente.get():
data['id_cliente'] = int(self.ag_id_cliente.get())
if self.ag_nombre.get():
data['nombre'] = self.ag_nombre.get()
if self.ag_direccion.get():
data['direccion'] = self.ag_direccion.get()
self.control.insert_cliente(**data)
self.show_new_cliente(**data)
def show_new_cliente(self, **data):
new_cliente = self.control.last_cliente(**data)
self.ag_id_cliente.set(new_cliente['id_cliente'])
self.ag_nombre.set(new_cliente['nombre'])
self.ag_direccion.set(new_cliente['direccion'])
def insert_vehiculo(self):
data = dict()
if self.ag_placas.get():
data['placas'] = self.ag_placas.get()
# if self.ag_id_factura.get():
# data['id_factura'] = int(self.ag_id_factura.get())
if self.ag_marca.get():
data['marca'] = self.ag_marca.get()
if self.ag_modelo.get():
data['modelo'] = self.ag_modelo.get()
self.control.insert_vehiculo(**data)
self.show_new_vehiculo(**data)
def show_new_vehiculo(self, **data):
new_vehiculo = self.control.last_vehiculo(**data)
self.ag_placas.set(new_vehiculo['placas'])
self.ag_marca.set(new_vehiculo['marca'])
self.ag_modelo.set(new_vehiculo['modelo'])
def insert_factura(self):
data = dict()
if self.ag_id_factura2.get():
data['id_factura'] = int(self.ag_id_factura2.get())
if self.ag_placas2.get():
data['placas'] = self.ag_placas2.get()
if self.ag_costo.get():
data['costo_vehiculo'] = float(self.ag_costo.get())
self.control.insert_factura(**data)
self.show_new_factura(**data)
def show_new_factura(self, **data):
new_factura = self.control.last_factura(**data)
self.ag_id_factura2.set(new_factura['id_factura'])
self.ag_placas2.set(new_factura['placas'])
self.ag_costo.set(new_factura['costo_vehiculo'])
def gen_poliza(self):
data = dict()
if self.ag_id_cliente2.get():
data['id_cliente'] = int(self.ag_id_cliente2.get())
if self.ag_id_factura3.get():
data['id_factura'] = int(self.ag_id_factura3.get())
if self.ag_prima.get():
data['prima_asegurada'] = float(self.ag_prima.get())
if self.ag_costo_seguro.get():
data['costo_seguro'] = float(self.ag_costo_seguro.get())
if self.ag_apertura.get():
data['fecha_apertura'] = self.ag_apertura.get()
if self.ag_vencimiento.get():
data['fecha_vencimiento'] = self.ag_vencimiento.get()
self.control.gen_poliza(**data)
self.show_new_poliza(**data)
def show_new_poliza(self, **data):
new_poliza = self.control.last_poliza(**data)
self.ag_id_cliente2.set(new_poliza['id_cliente'])
self.ag_id_factura3.set(new_poliza['id_factura'])
self.ag_prima.set(new_poliza['prima_asegurada'])
self.ag_costo_seguro.set(new_poliza['costo_seguro'])
self.ag_apertura.set(new_poliza['fecha_apertura'])
self.ag_vencimiento.set(new_poliza['fecha_vencimiento'])
def importar_clientes(self):
path = askopenfilename()
self.control.insert_clientes(path)
def importar_vehiculos(self):
path = askopenfilename()
self.control.insert_vehiculos(path)
def importar_facturas(self):
path = askopenfilename()
self.control.insert_facturas(path)
def get_active_fields(self):
active_fields = dict()
active_fields["id_cliente"] = self.val_id_cliente.get()
active_fields["nombre"] = self.val_nombre.get()
active_fields["direccion"] = self.val_direccion.get()
active_fields["placas"] = self.val_placas.get()
active_fields["marca"] = self.val_marca.get()
active_fields["modelo"] = self.val_modelo.get()
active_fields["id_factura"] = self.val_id_factura.get()
active_fields["costo_vehiculo"] = self.val_costo_vehiculo.get()
active_fields["prima_asegurada"] = self.val_prima_asegurada.get()
active_fields["costo_seguro"] = self.val_costo_seguro.get()
active_fields["fecha_apertura"] = self.val_fecha_apertura.get()
active_fields["fecha_vencimiento"] = self.val_fecha_vencimiento.get()
return active_fields
def show_all(self):
self.clear_results()
# Set columnas
all_fields = {
# Clientes
"id_cliente": True,
"nombre": True,
"direccion": True,
# Facturas
"id_factura": True,
"costo_vehiculo": True,
# Pólizas
"costo_seguro": True,
"prima_asegurada": True,
"fecha_apertura": True,
"fecha_vencimiento": True,
# Vehículos
"placas": True,
"marca": True,
"modelo": True
}
self.set_columnas(all_fields)
self.fields_current_query = all_fields
# Query
rows = self.control.query_all(all_fields)
# Agregar filas
for i, row in enumerate(rows):
self.tabla.insert("", END, text=str(i+1), values=row)
def set_columnas(self, fields):
# Set columns
self.tabla.configure(columns=tuple(fields))
for column in fields.keys():
self.tabla.column(column, width=15)
# Set headings
self.tabla.heading("#0", text="No.")
if fields.get("id_cliente", False):
self.tabla.heading("id_cliente", text="id_cliente")
if fields.get("nombre", False):
self.tabla.heading("nombre", text="Nombre")
if fields.get("direccion", False):
self.tabla.heading("direccion", text="Dirección")
if fields.get("placas", False):
self.tabla.heading("placas", text="Placas")
if fields.get("modelo", False):
self.tabla.heading("modelo", text="Modelo")
if fields.get("marca", False):
self.tabla.heading("marca", text="Marca")
if fields.get("id_factura", False):
self.tabla.heading("id_factura", text="id_factura")
if fields.get("costo_vehiculo", False):
self.tabla.heading("costo_vehiculo", text="Costo del vehículo")
if fields.get("prima_asegurada", False):
self.tabla.heading("prima_asegurada", text="Prima asegurada")
if fields.get("costo_seguro", False):
self.tabla.heading("costo_seguro", text="Costo del seguro")
if fields.get("fecha_apertura", False):
self.tabla.heading("fecha_apertura", text="Fecha de apertura")
if fields.get("fecha_vencimiento", False):
self.tabla.heading("fecha_vencimiento", text="Fecha de vencimiento")
def clear_results(self):
for child in self.tabla.get_children():
self.tabla.delete(child)
def limpiar_campos(self):
# Limpiar cliente
self.val_id_cliente.set(False)
self.id_cliente.set("")
self.val_nombre.set(False)
self.nombre.set("")
self.val_direccion.set(False)
self.direccion.set("")
# Limpiar vehiculo
self.val_placas.set(False)
self.placas.set("")
self.val_marca.set(False)
self.marca.set("")
self.val_modelo.set(False)
self.modelo.set("")
# Limpiar factura
self.val_id_factura.set(False)
self.id_factura.set("")
self.val_costo_vehiculo.set(False)
self.costo_vehiculo.set("")
# Limpiar póliza
self.val_costo_seguro.set(False)
self.costo_seguro.set("")
self.val_prima_asegurada.set(False)
self.prima_asegurada.set("")
self.val_fecha_apertura.set(False)
self.fecha_apertura.set("")
self.val_fecha_vencimiento.set(False)
self.fecha_vencimiento.set("")
def populate_fields(self, event):
row_id = self.tabla.selection()[0]
if self.fields_current_query["id_cliente"]:
self.id_cliente.set(str(self.tabla.set(row_id, "id_cliente")))
if self.fields_current_query["nombre"]:
self.nombre.set(str(self.tabla.set(row_id, "nombre")))
if self.fields_current_query["direccion"]:
self.direccion.set(str(self.tabla.set(row_id, "direccion")))
if self.fields_current_query["placas"]:
self.placas.set(str(self.tabla.set(row_id, "placas")))
if self.fields_current_query["marca"]:
self.marca.set(str(self.tabla.set(row_id, "marca")))
if self.fields_current_query["modelo"]:
self.modelo.set(str(self.tabla.set(row_id, "modelo")))
if self.fields_current_query["id_factura"]:
self.id_factura.set(str(self.tabla.set(row_id, "id_factura")))
if self.fields_current_query["costo_vehiculo"]:
self.costo_vehiculo.set(str(self.tabla.set(row_id, "costo_vehiculo")))
if self.fields_current_query["prima_asegurada"]:
self.prima_asegurada.set(str(self.tabla.set(row_id, "prima_asegurada")))
if self.fields_current_query["costo_seguro"]:
self.costo_seguro.set(str(self.tabla.set(row_id, "costo_seguro")))
if self.fields_current_query["fecha_apertura"]:
self.fecha_apertura.set(str(self.tabla.set(row_id, "fecha_apertura")))
if self.fields_current_query["fecha_vencimiento"]:
self.fecha_vencimiento.set(str(self.tabla.set(row_id, "fecha_vencimiento")))
if __name__ == '__main__':
root = Tk()
GUI(root)
root.mainloop()
|
[
"tkinter.ttk.Label",
"tkinter.ttk.Entry",
"tkinter.ttk.Labelframe",
"tkinter.ttk.Scrollbar",
"View.Datepicker.Datepicker",
"tkinter.filedialog.askopenfilename",
"tkinter.ttk.Frame",
"Controller.Controller.Controller",
"tkinter.ttk.Treeview",
"tkinter.ttk.Button",
"tkinter.ttk.Checkbutton",
"tkinter.ttk.Notebook",
"tkinter.ttk.LabelFrame"
] |
[((341, 353), 'Controller.Controller.Controller', 'Controller', ([], {}), '()\n', (351, 353), False, 'from Controller.Controller import Controller\n'), ((656, 686), 'tkinter.ttk.Notebook', 'ttk.Notebook', (['self'], {'padding': '(10)'}), '(self, padding=10)\n', (668, 686), False, 'from tkinter import ttk\n'), ((807, 831), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.notebook'], {}), '(self.notebook)\n', (816, 831), False, 'from tkinter import ttk\n'), ((979, 1003), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.notebook'], {}), '(self.notebook)\n', (988, 1003), False, 'from tkinter import ttk\n'), ((1133, 1157), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.notebook'], {}), '(self.notebook)\n', (1142, 1157), False, 'from tkinter import ttk\n'), ((1270, 1319), 'tkinter.ttk.Labelframe', 'ttk.Labelframe', (['self.fr_consultas'], {'text': '"""Cliente"""'}), "(self.fr_consultas, text='Cliente')\n", (1284, 1319), False, 'from tkinter import ttk\n'), ((1464, 1526), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_cliente'], {'variable': 'self.val_id_cliente'}), '(self.lf_cliente, variable=self.val_id_cliente)\n', (1479, 1526), False, 'from tkinter import ttk\n'), ((1606, 1651), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_cliente'], {'text': '"""id_cliente"""'}), "(self.lf_cliente, text='id_cliente')\n", (1615, 1651), False, 'from tkinter import ttk\n'), ((1769, 1825), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_cliente'], {'textvariable': 'self.id_cliente'}), '(self.lf_cliente, textvariable=self.id_cliente)\n', (1778, 1825), False, 'from tkinter import ttk\n'), ((1941, 1999), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_cliente'], {'variable': 'self.val_nombre'}), '(self.lf_cliente, variable=self.val_nombre)\n', (1956, 1999), False, 'from tkinter import ttk\n'), ((2071, 2112), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_cliente'], {'text': '"""Nombre"""'}), "(self.lf_cliente, text='Nombre')\n", (2080, 2112), False, 'from tkinter import ttk\n'), ((2218, 2280), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_cliente'], {'width': '(34)', 'textvariable': 'self.nombre'}), '(self.lf_cliente, width=34, textvariable=self.nombre)\n', (2227, 2280), False, 'from tkinter import ttk\n'), ((2397, 2458), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_cliente'], {'variable': 'self.val_direccion'}), '(self.lf_cliente, variable=self.val_direccion)\n', (2412, 2458), False, 'from tkinter import ttk\n'), ((2536, 2580), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_cliente'], {'text': '"""Dirección"""'}), "(self.lf_cliente, text='Dirección')\n", (2545, 2580), False, 'from tkinter import ttk\n'), ((2695, 2760), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_cliente'], {'width': '(72)', 'textvariable': 'self.direccion'}), '(self.lf_cliente, width=72, textvariable=self.direccion)\n', (2704, 2760), False, 'from tkinter import ttk\n'), ((2973, 3022), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.fr_consultas'], {'text': '"""Factura"""'}), "(self.fr_consultas, text='Factura')\n", (2987, 3022), False, 'from tkinter import ttk\n'), ((3167, 3229), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_factura'], {'variable': 'self.val_id_factura'}), '(self.lf_factura, variable=self.val_id_factura)\n', (3182, 3229), False, 'from tkinter import ttk\n'), ((3309, 3354), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_factura'], {'text': '"""id_factura"""'}), "(self.lf_factura, text='id_factura')\n", (3318, 3354), False, 'from tkinter import ttk\n'), ((3472, 3528), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_factura'], {'textvariable': 'self.id_factura'}), '(self.lf_factura, textvariable=self.id_factura)\n', (3481, 3528), False, 'from tkinter import ttk\n'), ((3660, 3726), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_factura'], {'variable': 'self.val_costo_vehiculo'}), '(self.lf_factura, variable=self.val_costo_vehiculo)\n', (3675, 3726), False, 'from tkinter import ttk\n'), ((3814, 3872), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_factura'], {'text': '"""Costo del\nautomóvil"""'}), '(self.lf_factura, text="""Costo del\nautomóvil""")\n', (3823, 3872), False, 'from tkinter import ttk\n'), ((3999, 4059), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_factura'], {'textvariable': 'self.costo_vehiculo'}), '(self.lf_factura, textvariable=self.costo_vehiculo)\n', (4008, 4059), False, 'from tkinter import ttk\n'), ((4266, 4316), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.fr_consultas'], {'text': '"""Vehículo"""'}), "(self.fr_consultas, text='Vehículo')\n", (4280, 4316), False, 'from tkinter import ttk\n'), ((4454, 4513), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_vehiculo'], {'variable': 'self.val_placas'}), '(self.lf_vehiculo, variable=self.val_placas)\n', (4469, 4513), False, 'from tkinter import ttk\n'), ((4585, 4627), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_vehiculo'], {'text': '"""Placas"""'}), "(self.lf_vehiculo, text='Placas')\n", (4594, 4627), False, 'from tkinter import ttk\n'), ((4733, 4786), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_vehiculo'], {'textvariable': 'self.placas'}), '(self.lf_vehiculo, textvariable=self.placas)\n', (4742, 4786), False, 'from tkinter import ttk\n'), ((4896, 4954), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_vehiculo'], {'variable': 'self.val_marca'}), '(self.lf_vehiculo, variable=self.val_marca)\n', (4911, 4954), False, 'from tkinter import ttk\n'), ((5024, 5065), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_vehiculo'], {'text': '"""Marca"""'}), "(self.lf_vehiculo, text='Marca')\n", (5033, 5065), False, 'from tkinter import ttk\n'), ((5168, 5220), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_vehiculo'], {'textvariable': 'self.marca'}), '(self.lf_vehiculo, textvariable=self.marca)\n', (5177, 5220), False, 'from tkinter import ttk\n'), ((5331, 5390), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_vehiculo'], {'variable': 'self.val_modelo'}), '(self.lf_vehiculo, variable=self.val_modelo)\n', (5346, 5390), False, 'from tkinter import ttk\n'), ((5462, 5504), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_vehiculo'], {'text': '"""Modelo"""'}), "(self.lf_vehiculo, text='Modelo')\n", (5471, 5504), False, 'from tkinter import ttk\n'), ((5610, 5663), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_vehiculo'], {'textvariable': 'self.modelo'}), '(self.lf_vehiculo, textvariable=self.modelo)\n', (5619, 5663), False, 'from tkinter import ttk\n'), ((5859, 5907), 'tkinter.ttk.LabelFrame', 'ttk.LabelFrame', (['self.fr_consultas'], {'text': '"""Póliza"""'}), "(self.fr_consultas, text='Póliza')\n", (5873, 5907), False, 'from tkinter import ttk\n'), ((6055, 6118), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_poliza'], {'variable': 'self.val_costo_seguro'}), '(self.lf_poliza, variable=self.val_costo_seguro)\n', (6070, 6118), False, 'from tkinter import ttk\n'), ((6202, 6253), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_poliza'], {'text': '"""Costo del\nseguro"""'}), "(self.lf_poliza, text='Costo del\\nseguro')\n", (6211, 6253), False, 'from tkinter import ttk\n'), ((6377, 6434), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_poliza'], {'textvariable': 'self.costo_seguro'}), '(self.lf_poliza, textvariable=self.costo_seguro)\n', (6386, 6434), False, 'from tkinter import ttk\n'), ((6570, 6636), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_poliza'], {'variable': 'self.val_prima_asegurada'}), '(self.lf_poliza, variable=self.val_prima_asegurada)\n', (6585, 6636), False, 'from tkinter import ttk\n'), ((6726, 6775), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_poliza'], {'text': '"""Prima asegurada"""'}), "(self.lf_poliza, text='Prima asegurada')\n", (6735, 6775), False, 'from tkinter import ttk\n'), ((6908, 6968), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_poliza'], {'textvariable': 'self.prima_asegurada'}), '(self.lf_poliza, textvariable=self.prima_asegurada)\n', (6917, 6968), False, 'from tkinter import ttk\n'), ((7105, 7170), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_poliza'], {'variable': 'self.val_fecha_apertura'}), '(self.lf_poliza, variable=self.val_fecha_apertura)\n', (7120, 7170), False, 'from tkinter import ttk\n'), ((7258, 7313), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_poliza'], {'text': '"""Fecha de\napertura"""'}), '(self.lf_poliza, text="""Fecha de\napertura""")\n', (7267, 7313), False, 'from tkinter import ttk\n'), ((7440, 7495), 'View.Datepicker.Datepicker', 'Datepicker', (['self.lf_poliza'], {'datevar': 'self.fecha_apertura'}), '(self.lf_poliza, datevar=self.fecha_apertura)\n', (7450, 7495), False, 'from View.Datepicker import Datepicker\n'), ((7636, 7704), 'tkinter.ttk.Checkbutton', 'ttk.Checkbutton', (['self.lf_poliza'], {'variable': 'self.val_fecha_vencimiento'}), '(self.lf_poliza, variable=self.val_fecha_vencimiento)\n', (7651, 7704), False, 'from tkinter import ttk\n'), ((7798, 7856), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_poliza'], {'text': '"""Fecha de\nvencimiento"""'}), '(self.lf_poliza, text="""Fecha de\nvencimiento""")\n', (7807, 7856), False, 'from tkinter import ttk\n'), ((7992, 8050), 'View.Datepicker.Datepicker', 'Datepicker', (['self.lf_poliza'], {'datevar': 'self.fecha_vencimiento'}), '(self.lf_poliza, datevar=self.fecha_vencimiento)\n', (8002, 8050), False, 'from View.Datepicker import Datepicker\n'), ((8252, 8303), 'tkinter.ttk.Frame', 'ttk.Frame', (['self.fr_consultas'], {'width': '(900)', 'height': '(180)'}), '(self.fr_consultas, width=900, height=180)\n', (8261, 8303), False, 'from tkinter import ttk\n'), ((8436, 8493), 'tkinter.ttk.Treeview', 'ttk.Treeview', (['self.fr_tabla'], {'height': '(12)', 'selectmode': 'BROWSE'}), '(self.fr_tabla, height=12, selectmode=BROWSE)\n', (8448, 8493), False, 'from tkinter import ttk\n'), ((8665, 8710), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self.fr_tabla'], {'orient': 'VERTICAL'}), '(self.fr_tabla, orient=VERTICAL)\n', (8678, 8710), False, 'from tkinter import ttk\n'), ((8803, 8850), 'tkinter.ttk.Scrollbar', 'ttk.Scrollbar', (['self.fr_tabla'], {'orient': 'HORIZONTAL'}), '(self.fr_tabla, orient=HORIZONTAL)\n', (8816, 8850), False, 'from tkinter import ttk\n'), ((9238, 9326), 'tkinter.ttk.Button', 'ttk.Button', (['self.fr_consultas'], {'text': '"""Mostrar todo"""', 'width': '(16)', 'command': 'self.show_all'}), "(self.fr_consultas, text='Mostrar todo', width=16, command=self.\n show_all)\n", (9248, 9326), False, 'from tkinter import ttk\n'), ((9442, 9538), 'tkinter.ttk.Button', 'ttk.Button', (['self.fr_consultas'], {'text': '"""Limpiar campos"""', 'width': '(16)', 'command': 'self.limpiar_campos'}), "(self.fr_consultas, text='Limpiar campos', width=16, command=self\n .limpiar_campos)\n", (9452, 9538), False, 'from tkinter import ttk\n'), ((9653, 9707), 'tkinter.ttk.Button', 'ttk.Button', (['self.fr_consultas'], {'text': '"""Buscar"""', 'width': '(16)'}), "(self.fr_consultas, text='Buscar', width=16)\n", (9663, 9707), False, 'from tkinter import ttk\n'), ((9793, 9851), 'tkinter.ttk.Button', 'ttk.Button', (['self.fr_consultas'], {'text': '"""Actualizar"""', 'width': '(16)'}), "(self.fr_consultas, text='Actualizar', width=16)\n", (9803, 9851), False, 'from tkinter import ttk\n'), ((9939, 9995), 'tkinter.ttk.Button', 'ttk.Button', (['self.fr_consultas'], {'text': '"""Eliminar"""', 'width': '(16)'}), "(self.fr_consultas, text='Eliminar', width=16)\n", (9949, 9995), False, 'from tkinter import ttk\n'), ((10264, 10445), 'tkinter.ttk.Label', 'ttk.Label', (['self.fr_ayuda'], {'text': '"""Licenciatura en Matemáticas Aplicadas\n\nProyecto final para la materia de Manejo de Datos.\nProfesor: <NAME>\n\nAutor: <NAME> (418002863)\n"""'}), '(self.fr_ayuda, text=\n """Licenciatura en Matemáticas Aplicadas\n\nProyecto final para la materia de Manejo de Datos.\nProfesor: <NAME>\n\nAutor: <NAME> (418002863)\n"""\n )\n', (10273, 10445), False, 'from tkinter import ttk\n'), ((10864, 11050), 'tkinter.ttk.Label', 'ttk.Label', (['self.fr_agregar'], {'text': '"""NOTA: \nLos campos marcados con * no pueden estar vacíos.\nLos campos marcados con + pueden dejarse en blanco y se generan automáticamente."""'}), '(self.fr_agregar, text=\n """NOTA: \nLos campos marcados con * no pueden estar vacíos.\nLos campos marcados con + pueden dejarse en blanco y se generan automáticamente."""\n )\n', (10873, 11050), False, 'from tkinter import ttk\n'), ((11312, 11359), 'tkinter.ttk.Labelframe', 'ttk.Labelframe', (['self.fr_agregar'], {'text': '"""Cliente"""'}), "(self.fr_agregar, text='Cliente')\n", (11326, 11359), False, 'from tkinter import ttk\n'), ((11482, 11531), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_cliente'], {'text': '"""id_cliente+"""'}), "(self.lf_ag_cliente, text='id_cliente+')\n", (11491, 11531), False, 'from tkinter import ttk\n'), ((11658, 11720), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_cliente'], {'textvariable': 'self.ag_id_cliente'}), '(self.lf_ag_cliente, textvariable=self.ag_id_cliente)\n', (11667, 11720), False, 'from tkinter import ttk\n'), ((11802, 11846), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_cliente'], {'text': '"""Nombre"""'}), "(self.lf_ag_cliente, text='Nombre')\n", (11811, 11846), False, 'from tkinter import ttk\n'), ((11961, 12029), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_cliente'], {'width': '(35)', 'textvariable': 'self.ag_nombre'}), '(self.lf_ag_cliente, width=35, textvariable=self.ag_nombre)\n', (11970, 12029), False, 'from tkinter import ttk\n'), ((12110, 12157), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_cliente'], {'text': '"""Dirección"""'}), "(self.lf_ag_cliente, text='Dirección')\n", (12119, 12157), False, 'from tkinter import ttk\n'), ((12281, 12352), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_cliente'], {'width': '(68)', 'textvariable': 'self.ag_direccion'}), '(self.lf_ag_cliente, width=68, textvariable=self.ag_direccion)\n', (12290, 12352), False, 'from tkinter import ttk\n'), ((12448, 12546), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_cliente'], {'width': '(18)', 'text': '"""Agregar cliente"""', 'command': 'self.insert_cliente'}), "(self.lf_ag_cliente, width=18, text='Agregar cliente', command=\n self.insert_cliente)\n", (12458, 12546), False, 'from tkinter import ttk\n'), ((12668, 12771), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_cliente'], {'width': '(18)', 'text': '"""Importar clientes"""', 'command': 'self.importar_clientes'}), "(self.lf_ag_cliente, width=18, text='Importar clientes', command=\n self.importar_clientes)\n", (12678, 12771), False, 'from tkinter import ttk\n'), ((13009, 13057), 'tkinter.ttk.Labelframe', 'ttk.Labelframe', (['self.fr_agregar'], {'text': '"""Vehículo"""'}), "(self.fr_agregar, text='Vehículo')\n", (13023, 13057), False, 'from tkinter import ttk\n'), ((13177, 13223), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_vehiculo'], {'text': '"""Placas*"""'}), "(self.lf_ag_vehiculo, text='Placas*')\n", (13186, 13223), False, 'from tkinter import ttk\n'), ((13338, 13397), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_vehiculo'], {'textvariable': 'self.ag_placas'}), '(self.lf_ag_vehiculo, textvariable=self.ag_placas)\n', (13347, 13397), False, 'from tkinter import ttk\n'), ((13818, 13862), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_vehiculo'], {'text': '"""Marca"""'}), "(self.lf_ag_vehiculo, text='Marca')\n", (13827, 13862), False, 'from tkinter import ttk\n'), ((13974, 14032), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_vehiculo'], {'textvariable': 'self.ag_marca'}), '(self.lf_ag_vehiculo, textvariable=self.ag_marca)\n', (13983, 14032), False, 'from tkinter import ttk\n'), ((14109, 14154), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_vehiculo'], {'text': '"""Modelo"""'}), "(self.lf_ag_vehiculo, text='Modelo')\n", (14118, 14154), False, 'from tkinter import ttk\n'), ((14269, 14328), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_vehiculo'], {'textvariable': 'self.ag_modelo'}), '(self.lf_ag_vehiculo, textvariable=self.ag_modelo)\n', (14278, 14328), False, 'from tkinter import ttk\n'), ((14408, 14509), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_vehiculo'], {'width': '(18)', 'text': '"""Agregar vehículo"""', 'command': 'self.insert_vehiculo'}), "(self.lf_ag_vehiculo, width=18, text='Agregar vehículo', command=\n self.insert_vehiculo)\n", (14418, 14509), False, 'from tkinter import ttk\n'), ((14633, 14701), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_vehiculo'], {'width': '(18)', 'text': '"""Importar vehículos"""'}), "(self.lf_ag_vehiculo, width=18, text='Importar vehículos')\n", (14643, 14701), False, 'from tkinter import ttk\n'), ((14944, 14991), 'tkinter.ttk.Labelframe', 'ttk.Labelframe', (['self.fr_agregar'], {'text': '"""Factura"""'}), "(self.fr_agregar, text='Factura')\n", (14958, 14991), False, 'from tkinter import ttk\n'), ((15116, 15165), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_factura'], {'text': '"""id_factura+"""'}), "(self.lf_ag_factura, text='id_factura+')\n", (15125, 15165), False, 'from tkinter import ttk\n'), ((15295, 15358), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_factura'], {'textvariable': 'self.ag_id_factura2'}), '(self.lf_ag_factura, textvariable=self.ag_id_factura2)\n', (15304, 15358), False, 'from tkinter import ttk\n'), ((15442, 15487), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_factura'], {'text': '"""Placas*"""'}), "(self.lf_ag_factura, text='Placas*')\n", (15451, 15487), False, 'from tkinter import ttk\n'), ((15605, 15664), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_factura'], {'textvariable': 'self.ag_placas2'}), '(self.lf_ag_factura, textvariable=self.ag_placas2)\n', (15614, 15664), False, 'from tkinter import ttk\n'), ((15742, 15799), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_factura'], {'text': '"""Costo del vehículo*"""'}), "(self.lf_ag_factura, text='Costo del vehículo*')\n", (15751, 15799), False, 'from tkinter import ttk\n'), ((15911, 15968), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_factura'], {'textvariable': 'self.ag_costo'}), '(self.lf_ag_factura, textvariable=self.ag_costo)\n', (15920, 15968), False, 'from tkinter import ttk\n'), ((16046, 16144), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_factura'], {'width': '(18)', 'text': '"""Agregar factura"""', 'command': 'self.insert_factura'}), "(self.lf_ag_factura, width=18, text='Agregar factura', command=\n self.insert_factura)\n", (16056, 16144), False, 'from tkinter import ttk\n'), ((16266, 16332), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_factura'], {'width': '(18)', 'text': '"""Importar facturas"""'}), "(self.lf_ag_factura, width=18, text='Importar facturas')\n", (16276, 16332), False, 'from tkinter import ttk\n'), ((16573, 16619), 'tkinter.ttk.Labelframe', 'ttk.Labelframe', (['self.fr_agregar'], {'text': '"""Póliza"""'}), "(self.fr_agregar, text='Póliza')\n", (16587, 16619), False, 'from tkinter import ttk\n'), ((16742, 16790), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""id_cliente*"""'}), "(self.lf_ag_poliza, text='id_cliente*')\n", (16751, 16790), False, 'from tkinter import ttk\n'), ((16920, 16982), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_poliza'], {'textvariable': 'self.ag_id_cliente2'}), '(self.lf_ag_poliza, textvariable=self.ag_id_cliente2)\n', (16929, 16982), False, 'from tkinter import ttk\n'), ((17070, 17118), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""id_factura*"""'}), "(self.lf_ag_poliza, text='id_factura*')\n", (17079, 17118), False, 'from tkinter import ttk\n'), ((17248, 17310), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_poliza'], {'textvariable': 'self.ag_id_factura3'}), '(self.lf_ag_poliza, textvariable=self.ag_id_factura3)\n', (17257, 17310), False, 'from tkinter import ttk\n'), ((17399, 17453), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""Costo del seguro+"""'}), "(self.lf_ag_poliza, text='Costo del seguro+')\n", (17408, 17453), False, 'from tkinter import ttk\n'), ((17586, 17649), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_poliza'], {'textvariable': 'self.ag_costo_seguro'}), '(self.lf_ag_poliza, textvariable=self.ag_costo_seguro)\n', (17595, 17649), False, 'from tkinter import ttk\n'), ((17732, 17785), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""Prima asegurada+"""'}), "(self.lf_ag_poliza, text='Prima asegurada+')\n", (17741, 17785), False, 'from tkinter import ttk\n'), ((17897, 17953), 'tkinter.ttk.Entry', 'ttk.Entry', (['self.lf_ag_poliza'], {'textvariable': 'self.ag_prima'}), '(self.lf_ag_poliza, textvariable=self.ag_prima)\n', (17906, 17953), False, 'from tkinter import ttk\n'), ((18032, 18087), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""Fecha de apertura+"""'}), "(self.lf_ag_poliza, text='Fecha de apertura+')\n", (18041, 18087), False, 'from tkinter import ttk\n'), ((18208, 18263), 'View.Datepicker.Datepicker', 'Datepicker', (['self.lf_ag_poliza'], {'datevar': 'self.ag_apertura'}), '(self.lf_ag_poliza, datevar=self.ag_apertura)\n', (18218, 18263), False, 'from View.Datepicker import Datepicker\n'), ((18348, 18406), 'tkinter.ttk.Label', 'ttk.Label', (['self.lf_ag_poliza'], {'text': '"""Fecha de vencimiento+"""'}), "(self.lf_ag_poliza, text='Fecha de vencimiento+')\n", (18357, 18406), False, 'from tkinter import ttk\n'), ((18536, 18594), 'View.Datepicker.Datepicker', 'Datepicker', (['self.lf_ag_poliza'], {'datevar': 'self.ag_vencimiento'}), '(self.lf_ag_poliza, datevar=self.ag_vencimiento)\n', (18546, 18594), False, 'from View.Datepicker import Datepicker\n'), ((18679, 18771), 'tkinter.ttk.Button', 'ttk.Button', (['self.lf_ag_poliza'], {'width': '(18)', 'text': '"""Generar póliza"""', 'command': 'self.gen_poliza'}), "(self.lf_ag_poliza, width=18, text='Generar póliza', command=self\n .gen_poliza)\n", (18689, 18771), False, 'from tkinter import ttk\n'), ((22457, 22474), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (22472, 22474), False, 'from tkinter.filedialog import askopenfilename\n'), ((22568, 22585), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (22583, 22585), False, 'from tkinter.filedialog import askopenfilename\n'), ((22679, 22696), 'tkinter.filedialog.askopenfilename', 'askopenfilename', ([], {}), '()\n', (22694, 22696), False, 'from tkinter.filedialog import askopenfilename\n')]
|
#!/usr/bin/python3
"""
Given four lists A, B, C, D of integer values, compute how many tuples (i, j,
k, l) there are such that A[i] + B[j] + C[k] + D[l] is zero.
To make problem a bit easier, all A, B, C, D have same length of N where
0 ≤ N ≤ 500. All integers are in the range of -2^28 to 2^28 - 1 and the result
is guaranteed to be at most 2^31 - 1.
Example:
Input:
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
Output:
2
Explanation:
The two tuples are:
1. (0, 0, 0, 1) -> A[0] + B[0] + C[0] + D[1] = 1 + (-2) + (-1) + 2 = 0
2. (1, 1, 0, 0) -> A[1] + B[1] + C[0] + D[0] = 2 + (-1) + (-1) + 0 = 0
"""
from collections import defaultdict
class Solution:
def fourSumCount(self, A, B, C, D):
"""
Brute force with map: O(N^3)
O(N^3) is pretty large, O(N^2) or O(N log N)?
O(N^2) to sum cartesian product (A, B) to construct index
similar to C, D.
Then index loop up
:type A: List[int]
:type B: List[int]
:type C: List[int]
:type D: List[int]
:rtype: int
"""
N = len(A)
AB = defaultdict(int)
CD = defaultdict(int)
for i in range(N):
for j in range(N):
AB[A[i] + B[j]] += 1
CD[C[i] + D[j]] += 1
ret = 0
# O(N^2)
for gross, count in AB.items():
target = 0 - gross
ret += count * CD[target]
return ret
if __name__ == "__main__":
A = [ 1, 2]
B = [-2,-1]
C = [-1, 2]
D = [ 0, 2]
assert Solution().fourSumCount(A, B, C, D) == 2
|
[
"collections.defaultdict"
] |
[((1100, 1116), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1111, 1116), False, 'from collections import defaultdict\n'), ((1130, 1146), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (1141, 1146), False, 'from collections import defaultdict\n')]
|
from django.shortcuts import render
from moviesapp.models import Movies
from moviesapp.forms import MoviesForm
def index(request):
return render(request, 'moviesapp/index.html')
def moviesList(request):
moviesList=Movies.objects.all()
movies_dict={'movies':moviesList}
return render(request, 'moviesapp/moviesList.html',movies_dict)
def addMovies(request):
success=str("Successfuly Movie Registration")
moviesForm=MoviesForm()
movies_dict={'movies':moviesForm}
if request.method=='POST':
moviesForm=MoviesForm(request.POST)
movies_dict={'movies':moviesForm,'success':success}
if moviesForm.is_valid():
moviesForm.save()
return render(request, 'moviesapp/addMovies.html',movies_dict)
return render(request, 'moviesapp/addMovies.html',movies_dict)
|
[
"django.shortcuts.render",
"moviesapp.forms.MoviesForm",
"moviesapp.models.Movies.objects.all"
] |
[((143, 182), 'django.shortcuts.render', 'render', (['request', '"""moviesapp/index.html"""'], {}), "(request, 'moviesapp/index.html')\n", (149, 182), False, 'from django.shortcuts import render\n'), ((224, 244), 'moviesapp.models.Movies.objects.all', 'Movies.objects.all', ([], {}), '()\n', (242, 244), False, 'from moviesapp.models import Movies\n'), ((294, 351), 'django.shortcuts.render', 'render', (['request', '"""moviesapp/moviesList.html"""', 'movies_dict'], {}), "(request, 'moviesapp/moviesList.html', movies_dict)\n", (300, 351), False, 'from django.shortcuts import render\n'), ((441, 453), 'moviesapp.forms.MoviesForm', 'MoviesForm', ([], {}), '()\n', (451, 453), False, 'from moviesapp.forms import MoviesForm\n'), ((777, 833), 'django.shortcuts.render', 'render', (['request', '"""moviesapp/addMovies.html"""', 'movies_dict'], {}), "(request, 'moviesapp/addMovies.html', movies_dict)\n", (783, 833), False, 'from django.shortcuts import render\n'), ((542, 566), 'moviesapp.forms.MoviesForm', 'MoviesForm', (['request.POST'], {}), '(request.POST)\n', (552, 566), False, 'from moviesapp.forms import MoviesForm\n'), ((710, 766), 'django.shortcuts.render', 'render', (['request', '"""moviesapp/addMovies.html"""', 'movies_dict'], {}), "(request, 'moviesapp/addMovies.html', movies_dict)\n", (716, 766), False, 'from django.shortcuts import render\n')]
|
import streamlit as st
from help import health_analysis
from load_chatmodel import load
from data import main_data
from get_medical import get_links
import torch
from tensorflow.keras.preprocessing.sequence import pad_sequences
encoder_net, decoder_net = load()
data = main_data()
max, vocab_enc, vocab_dec = data.len_all()
tok_enc = data.tok_enc
tok_dec = data.tok_dec
st.title("Animo")
st.write("A Guide To Mental Heath")
html_temp1 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">“Anything that’s human is mentionable, and anything that is mentionable can be more manageable. When we can talk about our feelings, they become less overwhelming, less upsetting, and less scary.” <NAME></h2>
</div>"""
st.markdown(html_temp1, unsafe_allow_html=True)
st.write("")
html_temp2 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Animo is the latin translation of mind and noticing the sync where mental health is the well being of one's mind .We bring you a one stop guide to answer all your questions regarding mental health. We aim to provide and connect every individual with the vast expanse of mental health .Enter your queries in the space provided below and we'll be there at your service!!</h2>
</div>"""
st.markdown(html_temp2, unsafe_allow_html=True)
page_bg_img = '''
<style>
body {
background-image: url("https://www.homemaidsimple.com/wp-content/uploads/2018/04/Mental-health-stigma-1.jpg");
background-size: cover;
height: 100vh;
background-position: center;
}
</style>
'''
st.markdown(page_bg_img, unsafe_allow_html=True)
html_temp3 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5) ">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Let's Talk</h2>
</div>"""
st.markdown(html_temp3, unsafe_allow_html=True)
st.write("")
st.write("Ask as many questions as you want and without the fear of judgement. We are hear for you clear any doubts you have about your health.")
st.write("Note: Enter your question in brief!!")
st.write("")
question = st.text_input("Question")
if question:
question = tok_enc.texts_to_sequences(question)
seq = torch.tensor(pad_sequences(question, padding='pre',maxlen=100), dtype=torch.long)
with torch.no_grad():
hidden, cell = encoder_net(seq)
outputs = [1]
for _ in range(100):
previous_word = torch.LongTensor([outputs[-1]]).to("cpu")
with torch.no_grad():
output, hidden, cell = decoder_net(previous_word, hidden, cell)
best_guess = output.argmax(1).item()
outputs.append(best_guess)
# Model predicts it's the end of the sentence
if output.argmax(1).item() == 2:
break
if question:
question = question.lower()
sent = [question]
health_model = health_analysis(sent)
if health_model.recommend():
html_temp4 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">From our conversation we would like to recommend for you whats best for your health. We also provide a service which recommends trained mental health experts,counsellors and psychiatrists in your area.")
</h2>
</div>"""
st.markdown(html_temp4, unsafe_allow_html=True)
html_temp5 = """
<div style="background-color:#000000 ;padding:10px; background:rgba(255,255,255,0.2); box-shadow: 0 5px 15px rgba(0,0,0,0.5)">
<h2 style="color:black;text-align:center;font-family: "Lucida Console", Courier, monospace;">Please enter your pincode in the box below!!</h2>
</div>"""
st.markdown(html_temp5, unsafe_allow_html=True)
pin = st.text_input("Pin")
st.write("""Mental Health : is a loaded term. It can trigger a dizzying array of reactions when we hear it. In a country like India where
Mental health is probably the next pandemic but the awareness is still very low. Our main objective is to educate the population still oblivious to the issues regarding mental health.""")
if pin:
for i in get_links(pin):
st.write(i)
|
[
"streamlit.markdown",
"data.main_data",
"streamlit.text_input",
"get_medical.get_links",
"torch.LongTensor",
"help.health_analysis",
"streamlit.write",
"streamlit.title",
"tensorflow.keras.preprocessing.sequence.pad_sequences",
"load_chatmodel.load",
"torch.no_grad"
] |
[((265, 271), 'load_chatmodel.load', 'load', ([], {}), '()\n', (269, 271), False, 'from load_chatmodel import load\n'), ((280, 291), 'data.main_data', 'main_data', ([], {}), '()\n', (289, 291), False, 'from data import main_data\n'), ((387, 404), 'streamlit.title', 'st.title', (['"""Animo"""'], {}), "('Animo')\n", (395, 404), True, 'import streamlit as st\n'), ((406, 441), 'streamlit.write', 'st.write', (['"""A Guide To Mental Heath"""'], {}), "('A Guide To Mental Heath')\n", (414, 441), True, 'import streamlit as st\n'), ((906, 953), 'streamlit.markdown', 'st.markdown', (['html_temp1'], {'unsafe_allow_html': '(True)'}), '(html_temp1, unsafe_allow_html=True)\n', (917, 953), True, 'import streamlit as st\n'), ((955, 967), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (963, 967), True, 'import streamlit as st\n'), ((1595, 1642), 'streamlit.markdown', 'st.markdown', (['html_temp2'], {'unsafe_allow_html': '(True)'}), '(html_temp2, unsafe_allow_html=True)\n', (1606, 1642), True, 'import streamlit as st\n'), ((1887, 1935), 'streamlit.markdown', 'st.markdown', (['page_bg_img'], {'unsafe_allow_html': '(True)'}), '(page_bg_img, unsafe_allow_html=True)\n', (1898, 1935), True, 'import streamlit as st\n'), ((2210, 2257), 'streamlit.markdown', 'st.markdown', (['html_temp3'], {'unsafe_allow_html': '(True)'}), '(html_temp3, unsafe_allow_html=True)\n', (2221, 2257), True, 'import streamlit as st\n'), ((2259, 2271), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (2267, 2271), True, 'import streamlit as st\n'), ((2273, 2428), 'streamlit.write', 'st.write', (['"""Ask as many questions as you want and without the fear of judgement. We are hear for you clear any doubts you have about your health."""'], {}), "(\n 'Ask as many questions as you want and without the fear of judgement. We are hear for you clear any doubts you have about your health.'\n )\n", (2281, 2428), True, 'import streamlit as st\n'), ((2422, 2470), 'streamlit.write', 'st.write', (['"""Note: Enter your question in brief!!"""'], {}), "('Note: Enter your question in brief!!')\n", (2430, 2470), True, 'import streamlit as st\n'), ((2472, 2484), 'streamlit.write', 'st.write', (['""""""'], {}), "('')\n", (2480, 2484), True, 'import streamlit as st\n'), ((2497, 2522), 'streamlit.text_input', 'st.text_input', (['"""Question"""'], {}), "('Question')\n", (2510, 2522), True, 'import streamlit as st\n'), ((3282, 3303), 'help.health_analysis', 'health_analysis', (['sent'], {}), '(sent)\n', (3297, 3303), False, 'from help import health_analysis\n'), ((2618, 2668), 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['question'], {'padding': '"""pre"""', 'maxlen': '(100)'}), "(question, padding='pre', maxlen=100)\n", (2631, 2668), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), ((2699, 2714), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2712, 2714), False, 'import torch\n'), ((3860, 3907), 'streamlit.markdown', 'st.markdown', (['html_temp4'], {'unsafe_allow_html': '(True)'}), '(html_temp4, unsafe_allow_html=True)\n', (3871, 3907), True, 'import streamlit as st\n'), ((4252, 4299), 'streamlit.markdown', 'st.markdown', (['html_temp5'], {'unsafe_allow_html': '(True)'}), '(html_temp5, unsafe_allow_html=True)\n', (4263, 4299), True, 'import streamlit as st\n'), ((4315, 4335), 'streamlit.text_input', 'st.text_input', (['"""Pin"""'], {}), "('Pin')\n", (4328, 4335), True, 'import streamlit as st\n'), ((4347, 4691), 'streamlit.write', 'st.write', (['"""Mental Health : is a loaded term. It can trigger a dizzying array of reactions when we hear it. In a country like India where \n Mental health is probably the next pandemic but the awareness is still very low. Our main objective is to educate the population still oblivious to the issues regarding mental health."""'], {}), '(\n """Mental Health : is a loaded term. It can trigger a dizzying array of reactions when we hear it. In a country like India where \n Mental health is probably the next pandemic but the awareness is still very low. Our main objective is to educate the population still oblivious to the issues regarding mental health."""\n )\n', (4355, 4691), True, 'import streamlit as st\n'), ((2889, 2904), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2902, 2904), False, 'import torch\n'), ((4722, 4736), 'get_medical.get_links', 'get_links', (['pin'], {}), '(pin)\n', (4731, 4736), False, 'from get_medical import get_links\n'), ((2831, 2862), 'torch.LongTensor', 'torch.LongTensor', (['[outputs[-1]]'], {}), '([outputs[-1]])\n', (2847, 2862), False, 'import torch\n'), ((4755, 4766), 'streamlit.write', 'st.write', (['i'], {}), '(i)\n', (4763, 4766), True, 'import streamlit as st\n')]
|
import uuid
import unittest
import influxgraph
from influxdb import InfluxDBClient
class InfluxGraphLogFileTestCase(unittest.TestCase):
def setUp(self):
self.db_name = 'fakey'
self.client = InfluxDBClient(database=self.db_name)
self.client.create_database(self.db_name)
_logger = influxgraph.classes.finder.logger
_logger.handlers = []
def tearDown(self):
self.client.drop_database(self.db_name)
def test_create_log_file_should_succeed(self):
config = { 'influxdb' : { 'host' : 'localhost',
'port' : 8086,
'user' : 'root',
'pass' : '<PASSWORD>',
'db' : self.db_name,
'log_file' : '/tmp/fakey',
'log_level' : 'debug',
},
}
finder = influxgraph.InfluxDBFinder(config)
self.assertTrue(finder)
def test_create_root_log_file_should_fail(self):
_config = { 'influxdb' : { 'host' : 'localhost',
'port' : 8086,
'user' : 'root',
'pass' : '<PASSWORD>',
'db' : self.db_name,
'log_file' : '/' + str(uuid.uuid4()),
'log_level' : 'debug',
},
}
finder = influxgraph.InfluxDBFinder(_config)
self.assertTrue(finder)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"uuid.uuid4",
"influxdb.InfluxDBClient",
"influxgraph.InfluxDBFinder"
] |
[((1704, 1719), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1717, 1719), False, 'import unittest\n'), ((212, 249), 'influxdb.InfluxDBClient', 'InfluxDBClient', ([], {'database': 'self.db_name'}), '(database=self.db_name)\n', (226, 249), False, 'from influxdb import InfluxDBClient\n'), ((987, 1021), 'influxgraph.InfluxDBFinder', 'influxgraph.InfluxDBFinder', (['config'], {}), '(config)\n', (1013, 1021), False, 'import influxgraph\n'), ((1604, 1639), 'influxgraph.InfluxDBFinder', 'influxgraph.InfluxDBFinder', (['_config'], {}), '(_config)\n', (1630, 1639), False, 'import influxgraph\n'), ((1439, 1451), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1449, 1451), False, 'import uuid\n')]
|
import discord
from discord.ext import commands
import datetime as dt
import utilities.db as DB
import utilities.facility as Facility
from bot import MichaelBot
class CustomCommand(commands.Cog, name = "Custom Commands", command_attrs = {"cooldown_after_parsing": True}):
"""Commands that support adding custom commands."""
def __init__(self, bot):
self.bot : MichaelBot = bot
self.emoji = '✨'
self.__flags__ = [
"--description",
"--message",
"--channel",
"--reply",
"--addroles",
"--rmvroles"
]
@commands.Cog.listener("on_message")
async def _message(self, message : discord.Message):
# This is kind of oofy, but whatever conditions within `events.py`, you'll need to filter them out here.
if message.author == self.bot.user or isinstance(message.channel, discord.DMChannel):
return
guild_prefix = self.bot._prefixes[message.guild.id] if self.bot.user.id != 649822097492803584 else '!'
if message.content.startswith(guild_prefix):
import utilities.db as DB
async with self.bot.pool.acquire() as conn:
# The message have the format of <prefix>command some_random_bs
# To get the command, split the content, and get the first, which will be
# <prefix>command only.
# To remove prefix, trim the string view based on the length of prefix.
existed = await DB.CustomCommand.get_command(conn, message.guild.id, message.content.split()[0][len(guild_prefix):])
if existed is not None:
if existed["channel"] is not None:
channel = message.guild.get_channel(existed["channel"])
else:
channel = message.channel
# Can only reply to the same channel
reference = None
if existed["is_reply"] and existed["channel"] == message.channel.id:
reference = message
else:
reference = None
try:
await channel.send(existed["message"], reference = reference)
except discord.Forbidden:
# For now, we're just silently ignore this.
# Might change to raising a command error though.
pass
if len(existed["addroles"]) > 0:
addroles_list = [message.guild.get_role(role) for role in existed["addroles"]]
try:
await message.author.add_roles(*addroles_list)
except discord.Forbidden:
await channel.send("Failed to add roles.")
if len(existed["rmvroles"]) > 0:
rmvroles_list = [message.guild.get_role(role) for role in existed["rmvroles"]]
try:
await message.author.remove_roles(*rmvroles_list)
except discord.Forbidden:
await channel.send("Failed to remove roles.")
@commands.group(aliases = ['ccmd', 'customcmd'], invoke_without_command = True)
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.bot_has_permissions(add_reactions = True, read_message_history = True, send_messages = True)
async def ccommand(self, ctx):
'''
View custom commands for this guild.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example:** {prefix}{command_name}
**You need:** None.
**I need:** `Add Reactions`, `Read Message History`, `Send Messages`.
'''
async with self.bot.pool.acquire() as conn:
custom_commands = await DB.CustomCommand.get_commands(conn, ctx.guild.id)
if custom_commands == [None] * len(custom_commands):
return await ctx.reply("*Cricket noises*", mention_author = False)
from templates.navigate import listpage_generator
def title_formatter(command):
embed = Facility.get_default_embed(
title = "Custom Commands",
timestamp = dt.datetime.utcnow()
).set_author(
name = ctx.guild.name,
icon_url = ctx.guild.icon_url
)
return embed
def item_formatter(embed, command):
embed.add_field(
name = command["name"],
value = f"*{command['description']}*" if command["description"] != "" else "*None*",
inline = False
)
page = listpage_generator(3, custom_commands, title_formatter, item_formatter)
await page.start(ctx)
@ccommand.command()
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.has_guild_permissions(manage_guild = True)
@commands.bot_has_permissions(read_message_history = True, send_messages = True)
async def add(self, ctx : commands.Context, name, *, input):
'''
Add a custom command to the guild.
The `input` is in the form of arguments commonly used within terminals.
There are 5 arguments, one of which is required:
- `--description`: The command's description.
- **`--message`: This is required. The command's response.**
- `--channel`: The channel the command will send the response to. Must be ID.
- `--reply`: A flag indicating whether the message will be a reply.
- `--addroles`: The roles the bot will add to the command invoker. Must be IDs.
- `--rmvroles`: The roles the bot will remove to the command invoker. Must be IDs.
Order is not important.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example 1:** {prefix}{command_name} test --message Hello
**Example 2:** {prefix}{command_name} test2 --description Give some cool roles --message Enjoy :D --reply --addroles 704527865173114900 644339804141518848
**You need:** `Manage Server`.
**I need:** `Read Message History`, `Send Messages`.
'''
builtin_existed = ctx.bot.get_command(name)
if builtin_existed is not None:
return await ctx.reply("This command's name already existed within the bot. Please choose a different one.")
async with self.bot.pool.acquire() as conn:
existed = await DB.CustomCommand.get_command(conn, ctx.guild.id, name)
if existed is not None:
return await ctx.reply(f"This guild already has a command with the name `{name}`. Please choose a different one.")
arguments = Facility.flag_parse(input, self.__flags__)
description = arguments["--description"]
message = arguments["--message"]
channel = arguments["--channel"]
is_reply = arguments["--reply"]
addroles = arguments["--addroles"]
rmvroles = arguments["--rmvroles"]
addroles_list = []
rmvroles_list = []
if isinstance(description, bool):
return await ctx.reply("`--description` is not a flag but rather an argument.")
if message is None:
return await ctx.reply("`--message` is a required argument.")
if isinstance(message, bool):
return await ctx.reply("`--message` is not a flag but rather an argument.")
if isinstance(channel, bool):
return await ctx.reply("`--channel` must be an existed channel's ID.")
elif channel is not None:
try:
channel = int(channel)
except ValueError:
return await ctx.reply("`--channel` must be an existed channel's ID.")
dchannel = ctx.guild.get_channel(channel)
if dchannel is None:
return await ctx.reply("`--channel` must be an existed channel's ID.")
# I decide to make `--reply` both a flag and argument (although there will be no info in argument).
if is_reply is not None:
is_reply = True
else:
is_reply = False
if isinstance(addroles, bool) or isinstance(rmvroles, bool):
return await ctx.reply("`--addroles`/`--rmvroles` is not a flag but rather an argument.")
if isinstance(addroles, str):
addroles_list = []
for role in addroles.split():
try:
drole = ctx.guild.get_role(int(role))
except ValueError:
return await ctx.reply("`--addroles` must contain existed roles' ID.")
if drole is not None and drole < ctx.guild.get_member(self.bot.user.id).top_role:
addroles_list.append(int(role))
if isinstance(rmvroles, str):
rmvroles_list = []
for role in rmvroles.split():
try:
drole = ctx.guild.get_role(int(role))
except ValueError:
return await ctx.reply("`--rmvroles` must contain existed roles' ID.")
if drole is not None and drole < ctx.guild.get_member(self.bot.user.id).top_role:
rmvroles_list.append(int(role))
async with conn.transaction():
await DB.CustomCommand.add(conn, ctx.guild.id, name, {
"description": description,
"message": message,
"channel": channel,
"is_reply": is_reply,
"addroles": addroles_list,
"rmvroles": rmvroles_list
})
await ctx.reply(f"Added command `{name}`.", mention_author = False)
@ccommand.command()
@commands.cooldown(rate = 1, per = 5.0, type = commands.BucketType.guild)
@commands.has_guild_permissions(manage_guild = True)
@commands.bot_has_permissions(read_message_history = True, send_messages = True)
async def remove(self, ctx, name):
'''
Remove a custom command from the guild.
**Usage:** {usage}
**Cooldown:** 5 seconds per 1 use (guild)
**Example:** {prefix}{command_name} test
**You need:** `Manage Server`.
**I need:** `Read Message History`, `Send Messages`.
'''
builtin_existed = ctx.bot.get_command(name)
if builtin_existed is not None:
return await ctx.reply("This command's name somehow matches the bot's default commands. Contact the developer.")
async with self.bot.pool.acquire() as conn:
existed = await DB.CustomCommand.get_command(conn, ctx.guild.id, name)
if existed is None:
return await ctx.reply(f"There is no such command in this guild.")
async with conn.transaction():
await DB.CustomCommand.remove(conn, ctx.guild.id, name)
await ctx.reply(f"Removed command `{name}`.", mention_author = False)
@ccommand.command()
async def edit(self, ctx):
# Make edit the same as creation with a few catch:
# - name is not changeable; it'll be ignored if provided.
# - if any arguments is not provided, it'll retain the old behavior.
# - to clear an optional argument (say --addroles), you need to provide the string "clear" (case-insensitive).
# - to toggle, simply provide the argument again.
pass
def setup(bot : MichaelBot):
bot.add_cog(CustomCommand(bot))
|
[
"discord.ext.commands.Cog.listener",
"utilities.db.CustomCommand.get_commands",
"discord.ext.commands.cooldown",
"utilities.db.CustomCommand.get_command",
"discord.ext.commands.bot_has_permissions",
"datetime.datetime.utcnow",
"discord.ext.commands.has_guild_permissions",
"discord.ext.commands.group",
"templates.navigate.listpage_generator",
"utilities.db.CustomCommand.add",
"utilities.facility.flag_parse",
"utilities.db.CustomCommand.remove"
] |
[((623, 658), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', (['"""on_message"""'], {}), "('on_message')\n", (644, 658), False, 'from discord.ext import commands\n'), ((3312, 3386), 'discord.ext.commands.group', 'commands.group', ([], {'aliases': "['ccmd', 'customcmd']", 'invoke_without_command': '(True)'}), "(aliases=['ccmd', 'customcmd'], invoke_without_command=True)\n", (3326, 3386), False, 'from discord.ext import commands\n'), ((3396, 3462), 'discord.ext.commands.cooldown', 'commands.cooldown', ([], {'rate': '(1)', 'per': '(5.0)', 'type': 'commands.BucketType.guild'}), '(rate=1, per=5.0, type=commands.BucketType.guild)\n', (3413, 3462), False, 'from discord.ext import commands\n'), ((3474, 3573), 'discord.ext.commands.bot_has_permissions', 'commands.bot_has_permissions', ([], {'add_reactions': '(True)', 'read_message_history': '(True)', 'send_messages': '(True)'}), '(add_reactions=True, read_message_history=True,\n send_messages=True)\n', (3502, 3573), False, 'from discord.ext import commands\n'), ((5064, 5130), 'discord.ext.commands.cooldown', 'commands.cooldown', ([], {'rate': '(1)', 'per': '(5.0)', 'type': 'commands.BucketType.guild'}), '(rate=1, per=5.0, type=commands.BucketType.guild)\n', (5081, 5130), False, 'from discord.ext import commands\n'), ((5142, 5191), 'discord.ext.commands.has_guild_permissions', 'commands.has_guild_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (5172, 5191), False, 'from discord.ext import commands\n'), ((5199, 5274), 'discord.ext.commands.bot_has_permissions', 'commands.bot_has_permissions', ([], {'read_message_history': '(True)', 'send_messages': '(True)'}), '(read_message_history=True, send_messages=True)\n', (5227, 5274), False, 'from discord.ext import commands\n'), ((10289, 10355), 'discord.ext.commands.cooldown', 'commands.cooldown', ([], {'rate': '(1)', 'per': '(5.0)', 'type': 'commands.BucketType.guild'}), '(rate=1, per=5.0, type=commands.BucketType.guild)\n', (10306, 10355), False, 'from discord.ext import commands\n'), ((10367, 10416), 'discord.ext.commands.has_guild_permissions', 'commands.has_guild_permissions', ([], {'manage_guild': '(True)'}), '(manage_guild=True)\n', (10397, 10416), False, 'from discord.ext import commands\n'), ((10424, 10499), 'discord.ext.commands.bot_has_permissions', 'commands.bot_has_permissions', ([], {'read_message_history': '(True)', 'send_messages': '(True)'}), '(read_message_history=True, send_messages=True)\n', (10452, 10499), False, 'from discord.ext import commands\n'), ((4924, 4995), 'templates.navigate.listpage_generator', 'listpage_generator', (['(3)', 'custom_commands', 'title_formatter', 'item_formatter'], {}), '(3, custom_commands, title_formatter, item_formatter)\n', (4942, 4995), False, 'from templates.navigate import listpage_generator\n'), ((7012, 7054), 'utilities.facility.flag_parse', 'Facility.flag_parse', (['input', 'self.__flags__'], {}), '(input, self.__flags__)\n', (7031, 7054), True, 'import utilities.facility as Facility\n'), ((3997, 4046), 'utilities.db.CustomCommand.get_commands', 'DB.CustomCommand.get_commands', (['conn', 'ctx.guild.id'], {}), '(conn, ctx.guild.id)\n', (4026, 4046), True, 'import utilities.db as DB\n'), ((6757, 6811), 'utilities.db.CustomCommand.get_command', 'DB.CustomCommand.get_command', (['conn', 'ctx.guild.id', 'name'], {}), '(conn, ctx.guild.id, name)\n', (6785, 6811), True, 'import utilities.db as DB\n'), ((11158, 11212), 'utilities.db.CustomCommand.get_command', 'DB.CustomCommand.get_command', (['conn', 'ctx.guild.id', 'name'], {}), '(conn, ctx.guild.id, name)\n', (11186, 11212), True, 'import utilities.db as DB\n'), ((9843, 10043), 'utilities.db.CustomCommand.add', 'DB.CustomCommand.add', (['conn', 'ctx.guild.id', 'name', "{'description': description, 'message': message, 'channel': channel,\n 'is_reply': is_reply, 'addroles': addroles_list, 'rmvroles': rmvroles_list}"], {}), "(conn, ctx.guild.id, name, {'description': description,\n 'message': message, 'channel': channel, 'is_reply': is_reply,\n 'addroles': addroles_list, 'rmvroles': rmvroles_list})\n", (9863, 10043), True, 'import utilities.db as DB\n'), ((11394, 11443), 'utilities.db.CustomCommand.remove', 'DB.CustomCommand.remove', (['conn', 'ctx.guild.id', 'name'], {}), '(conn, ctx.guild.id, name)\n', (11417, 11443), True, 'import utilities.db as DB\n'), ((4431, 4451), 'datetime.datetime.utcnow', 'dt.datetime.utcnow', ([], {}), '()\n', (4449, 4451), True, 'import datetime as dt\n')]
|
import datetime
import os
import sqlalchemy.orm as sa_orm
from app import factories
from app.etl import transformers
from app.util.json import load_json_file
def test_make_response_from_nested_schema(session: sa_orm.Session, data_dir):
schema = load_json_file(os.path.join(data_dir, 'nested_schema.json'))
form = factories.FormFactory(schema=schema)
user = factories.UserFactory()
session.add_all([form, user])
session.flush()
get_node_path_map = transformers.get_node_path_map_cache(session)
result = factories.make_response(get_node_path_map, form.id)
assert result
assert type(result) == dict
# test a selection of the nested values
assert result['root']
assert type(result['root']) == dict
assert result['root']['primary_care_doctor_phone'] is not None
assert type(result['root']['primary_care_doctor_phone']) == str
assert result['root']['emergency_visits'] is not None
assert type(result['root']['emergency_visits']) == int
assert result['root']['medical_conditions']['mobility_help']['walker'] is not None
assert type(result['root']['medical_conditions']['mobility_help']['walker']) == bool
assert result['root']['date_of_birth'] is not None
assert type(result['root']['date_of_birth']) == str
parsed_date = datetime.datetime.strptime(result['root']['date_of_birth'], '%Y-%m-%d')
assert parsed_date
|
[
"app.factories.FormFactory",
"app.factories.UserFactory",
"datetime.datetime.strptime",
"app.factories.make_response",
"app.etl.transformers.get_node_path_map_cache",
"os.path.join"
] |
[((325, 361), 'app.factories.FormFactory', 'factories.FormFactory', ([], {'schema': 'schema'}), '(schema=schema)\n', (346, 361), False, 'from app import factories\n'), ((374, 397), 'app.factories.UserFactory', 'factories.UserFactory', ([], {}), '()\n', (395, 397), False, 'from app import factories\n'), ((478, 523), 'app.etl.transformers.get_node_path_map_cache', 'transformers.get_node_path_map_cache', (['session'], {}), '(session)\n', (514, 523), False, 'from app.etl import transformers\n'), ((537, 588), 'app.factories.make_response', 'factories.make_response', (['get_node_path_map', 'form.id'], {}), '(get_node_path_map, form.id)\n', (560, 588), False, 'from app import factories\n'), ((1311, 1382), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["result['root']['date_of_birth']", '"""%Y-%m-%d"""'], {}), "(result['root']['date_of_birth'], '%Y-%m-%d')\n", (1337, 1382), False, 'import datetime\n'), ((268, 312), 'os.path.join', 'os.path.join', (['data_dir', '"""nested_schema.json"""'], {}), "(data_dir, 'nested_schema.json')\n", (280, 312), False, 'import os\n')]
|
from glob import glob
import os, random
class userAgents(object):
def __init__(self):
files_ = glob(f'{os.path.dirname(os.path.realpath(__file__))}/assets/*.txt')
self.uas,self.Chrome,self.Edge,self.Firefox,self.Opera,self.Safari = [],[],[],[],[],[]
for file_ in files_:
with open(file_,'r') as f:
records_ = f.read().split('\n')
f.close()
for rec in records_:
self.uas.append(rec)
if 'Chrome' in file_:
self.Chrome.append(rec)
elif 'Edge' in file_:
self.Edge.append(rec)
elif 'Firefox' in file_:
self.Firefox.append(rec)
elif 'Opera' in file_:
self.Opera.append(rec)
elif 'Safari' in file_:
self.Safari.append(rec)
def random(self,engine=None):
if engine == 'Chrome':
return random.choice(self.Chrome)
elif engine == 'Edge':
return random.choice(self.Edge)
elif engine == 'Firefox':
return random.choice(self.Firefox)
elif engine == 'Opera':
return random.choice(self.Opera)
elif engine == 'Safari':
return random.choice(self.Safari)
else:
return random.choice(self.uas)
def count(self):
return len(self.uas)
if __name__ == "__main__":
ua = userAgents()
print(ua.random('Firefox'))
print(ua.count())
|
[
"os.path.realpath",
"random.choice"
] |
[((754, 780), 'random.choice', 'random.choice', (['self.Chrome'], {}), '(self.Chrome)\n', (767, 780), False, 'import os, random\n'), ((816, 840), 'random.choice', 'random.choice', (['self.Edge'], {}), '(self.Edge)\n', (829, 840), False, 'import os, random\n'), ((879, 906), 'random.choice', 'random.choice', (['self.Firefox'], {}), '(self.Firefox)\n', (892, 906), False, 'import os, random\n'), ((124, 150), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (140, 150), False, 'import os, random\n'), ((943, 968), 'random.choice', 'random.choice', (['self.Opera'], {}), '(self.Opera)\n', (956, 968), False, 'import os, random\n'), ((1006, 1032), 'random.choice', 'random.choice', (['self.Safari'], {}), '(self.Safari)\n', (1019, 1032), False, 'import os, random\n'), ((1052, 1075), 'random.choice', 'random.choice', (['self.uas'], {}), '(self.uas)\n', (1065, 1075), False, 'import os, random\n')]
|
import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.utils.translation import gettext_lazy as _
from investments.models import TimestampedModel
UserModel = get_user_model()
class Tag(TimestampedModel):
uuid = models.UUIDField(default=uuid.uuid4, primary_key=True)
name = models.CharField(_("Name"), max_length=254)
author = models.ForeignKey(UserModel, related_name="tags", on_delete=models.CASCADE)
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
def __str__(self):
return f"{self.name} - {self.author}"
|
[
"django.db.models.ForeignKey",
"django.db.models.UUIDField",
"django.contrib.auth.get_user_model",
"django.utils.translation.gettext_lazy"
] |
[((206, 222), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (220, 222), False, 'from django.contrib.auth import get_user_model\n'), ((265, 319), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'default': 'uuid.uuid4', 'primary_key': '(True)'}), '(default=uuid.uuid4, primary_key=True)\n', (281, 319), False, 'from django.db import models\n'), ((388, 463), 'django.db.models.ForeignKey', 'models.ForeignKey', (['UserModel'], {'related_name': '"""tags"""', 'on_delete': 'models.CASCADE'}), "(UserModel, related_name='tags', on_delete=models.CASCADE)\n", (405, 463), False, 'from django.db import models\n'), ((348, 357), 'django.utils.translation.gettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (349, 357), True, 'from django.utils.translation import gettext_lazy as _\n'), ((504, 512), 'django.utils.translation.gettext_lazy', '_', (['"""Tag"""'], {}), "('Tag')\n", (505, 512), True, 'from django.utils.translation import gettext_lazy as _\n'), ((543, 552), 'django.utils.translation.gettext_lazy', '_', (['"""Tags"""'], {}), "('Tags')\n", (544, 552), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
from mltoolkit.mldp.utils.constants.vocabulary import PAD, START, END, UNK
from mltoolkit.mldp import PyTorchPipeline
from mltoolkit.mldp.steps.readers import CsvReader
from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, \
SeqLenComputer, Padder, SeqWrapper
from mltoolkit.mldp.steps.general import ChunkAccumulator
from mltoolkit.mldp.steps.transformers.general import Shuffler
from mltoolkit.mldp.steps.transformers.field import FieldRenamer
from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer,\
SummMapper, GoldSummRevIndxsCreator, NumpyFormatter
from fewsum.utils.fields import ModelF, GoldDataF
from csv import QUOTE_NONE
from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp,\
POVProp
def assemble_tuning_pipeline(word_vocab, max_groups_per_batch=1, tok_func=None,
lowercase=False):
"""
The pipeline yields tokenized reviews and summaries that can be used for
training (fine-tuning of the model).
"""
assert START in word_vocab and END in word_vocab
reader = CsvReader(sep='\t', encoding='utf-8', engine='python',
chunk_size=None,
use_lists=True, quating=QUOTE_NONE)
chunk_accum = ChunkAccumulator(new_size=max_groups_per_batch)
ama_spec_trans = AmazonTransformer(fnames_to_copy=[GoldDataF.PROD_ID,
GoldDataF.CAT,
])
summ_mapper = SummMapper(fname=ModelF.SUMMS,
new_indx_fname=ModelF.SUMM_GROUP_INDX)
token_processor = TokenProcessor(fnames=[ModelF.REV, ModelF.SUMM],
tok_func=tok_func, lowercase=lowercase)
vocab_mapper = VocabMapper({ModelF.REV: word_vocab,
ModelF.SUMM: word_vocab})
fname_renamer = FieldRenamer({GoldDataF.PROD_ID: ModelF.GROUP_ID,
GoldDataF.CAT: ModelF.CAT,
ModelF.SUMMS: ModelF.SUMM})
seq_wrapper = SeqWrapper(fname=[ModelF.REV, ModelF.SUMM],
start_el=word_vocab[START].id,
end_el=word_vocab[END].id)
padder = Padder(fname=[ModelF.REV, ModelF.SUMM],
new_mask_fname=[ModelF.REV_MASK, ModelF.SUMM_MASK],
pad_symbol=word_vocab[PAD].id, padding_mode='right')
indxs_creator = GoldSummRevIndxsCreator()
# rev_mapper = RevMapper(group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
# group_rev_mask_fname=ModelF.GROUP_REV_INDXS_MASK,
# rev_mask_fname=ModelF.REV_MASK)
# props
len_prop = SummLenProp(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,
group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
summ_group_indx_fname=ModelF.SUMM_GROUP_INDX,
new_fname=ModelF.LEN_PROP)
pov_prop = POVProp(text_fname=ModelF.SUMM, new_fname=ModelF.POV_PROP)
rouge_prop = SummRougeProp(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,
group_rev_indxs_fname=ModelF.GROUP_REV_INDXS,
summ_group_indx_fname=ModelF.SUMM_GROUP_INDX,
new_fname=ModelF.ROUGE_PROP)
rating_prop = DummyProp(fname=ModelF.SUMM, new_fname=ModelF.RATING_PROP,
fval=0.)
np_formatter = NumpyFormatter([ModelF.LEN_PROP, ModelF.RATING_PROP,
ModelF.POV_PROP, ModelF.ROUGE_PROP])
pipeline = PyTorchPipeline(reader=reader, error_on_invalid_chunk=False)
# pipeline.add_step(shuffler)
pipeline.add_step(chunk_accum)
pipeline.add_step(ama_spec_trans)
pipeline.add_step(summ_mapper)
pipeline.add_step(fname_renamer)
pipeline.add_step(indxs_creator)
# props
pipeline.add_step(rating_prop)
pipeline.add_step(rouge_prop)
pipeline.add_step(token_processor)
# the props below require tokenization
pipeline.add_step(len_prop)
pipeline.add_step(pov_prop)
pipeline.add_step(vocab_mapper)
pipeline.add_step(seq_wrapper)
pipeline.add_step(padder)
pipeline.add_step(np_formatter)
return pipeline
|
[
"mltoolkit.mldp.steps.readers.CsvReader",
"mltoolkit.mldp.steps.transformers.nlp.VocabMapper",
"mltoolkit.mldp.steps.transformers.nlp.Padder",
"mltoolkit.mldp.PyTorchPipeline",
"fewsum.data_pipelines.steps.GoldSummRevIndxsCreator",
"mltoolkit.mldp.steps.transformers.field.FieldRenamer",
"fewsum.data_pipelines.steps.props.SummLenProp",
"fewsum.data_pipelines.steps.props.DummyProp",
"fewsum.data_pipelines.steps.SummMapper",
"mltoolkit.mldp.steps.transformers.nlp.TokenProcessor",
"fewsum.data_pipelines.steps.props.POVProp",
"fewsum.data_pipelines.steps.props.SummRougeProp",
"mltoolkit.mldp.steps.transformers.nlp.SeqWrapper",
"mltoolkit.mldp.steps.general.ChunkAccumulator",
"fewsum.data_pipelines.steps.AmazonTransformer",
"fewsum.data_pipelines.steps.NumpyFormatter"
] |
[((1107, 1218), 'mltoolkit.mldp.steps.readers.CsvReader', 'CsvReader', ([], {'sep': '"""\t"""', 'encoding': '"""utf-8"""', 'engine': '"""python"""', 'chunk_size': 'None', 'use_lists': '(True)', 'quating': 'QUOTE_NONE'}), "(sep='\\t', encoding='utf-8', engine='python', chunk_size=None,\n use_lists=True, quating=QUOTE_NONE)\n", (1116, 1218), False, 'from mltoolkit.mldp.steps.readers import CsvReader\n'), ((1280, 1327), 'mltoolkit.mldp.steps.general.ChunkAccumulator', 'ChunkAccumulator', ([], {'new_size': 'max_groups_per_batch'}), '(new_size=max_groups_per_batch)\n', (1296, 1327), False, 'from mltoolkit.mldp.steps.general import ChunkAccumulator\n'), ((1350, 1418), 'fewsum.data_pipelines.steps.AmazonTransformer', 'AmazonTransformer', ([], {'fnames_to_copy': '[GoldDataF.PROD_ID, GoldDataF.CAT]'}), '(fnames_to_copy=[GoldDataF.PROD_ID, GoldDataF.CAT])\n', (1367, 1418), False, 'from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer, SummMapper, GoldSummRevIndxsCreator, NumpyFormatter\n'), ((1549, 1618), 'fewsum.data_pipelines.steps.SummMapper', 'SummMapper', ([], {'fname': 'ModelF.SUMMS', 'new_indx_fname': 'ModelF.SUMM_GROUP_INDX'}), '(fname=ModelF.SUMMS, new_indx_fname=ModelF.SUMM_GROUP_INDX)\n', (1559, 1618), False, 'from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer, SummMapper, GoldSummRevIndxsCreator, NumpyFormatter\n'), ((1671, 1763), 'mltoolkit.mldp.steps.transformers.nlp.TokenProcessor', 'TokenProcessor', ([], {'fnames': '[ModelF.REV, ModelF.SUMM]', 'tok_func': 'tok_func', 'lowercase': 'lowercase'}), '(fnames=[ModelF.REV, ModelF.SUMM], tok_func=tok_func,\n lowercase=lowercase)\n', (1685, 1763), False, 'from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, SeqLenComputer, Padder, SeqWrapper\n'), ((1817, 1879), 'mltoolkit.mldp.steps.transformers.nlp.VocabMapper', 'VocabMapper', (['{ModelF.REV: word_vocab, ModelF.SUMM: word_vocab}'], {}), '({ModelF.REV: word_vocab, ModelF.SUMM: word_vocab})\n', (1828, 1879), False, 'from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, SeqLenComputer, Padder, SeqWrapper\n'), ((1933, 2041), 'mltoolkit.mldp.steps.transformers.field.FieldRenamer', 'FieldRenamer', (['{GoldDataF.PROD_ID: ModelF.GROUP_ID, GoldDataF.CAT: ModelF.CAT, ModelF.\n SUMMS: ModelF.SUMM}'], {}), '({GoldDataF.PROD_ID: ModelF.GROUP_ID, GoldDataF.CAT: ModelF.CAT,\n ModelF.SUMMS: ModelF.SUMM})\n', (1945, 2041), False, 'from mltoolkit.mldp.steps.transformers.field import FieldRenamer\n'), ((2125, 2230), 'mltoolkit.mldp.steps.transformers.nlp.SeqWrapper', 'SeqWrapper', ([], {'fname': '[ModelF.REV, ModelF.SUMM]', 'start_el': 'word_vocab[START].id', 'end_el': 'word_vocab[END].id'}), '(fname=[ModelF.REV, ModelF.SUMM], start_el=word_vocab[START].id,\n end_el=word_vocab[END].id)\n', (2135, 2230), False, 'from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, SeqLenComputer, Padder, SeqWrapper\n'), ((2299, 2447), 'mltoolkit.mldp.steps.transformers.nlp.Padder', 'Padder', ([], {'fname': '[ModelF.REV, ModelF.SUMM]', 'new_mask_fname': '[ModelF.REV_MASK, ModelF.SUMM_MASK]', 'pad_symbol': 'word_vocab[PAD].id', 'padding_mode': '"""right"""'}), "(fname=[ModelF.REV, ModelF.SUMM], new_mask_fname=[ModelF.REV_MASK,\n ModelF.SUMM_MASK], pad_symbol=word_vocab[PAD].id, padding_mode='right')\n", (2305, 2447), False, 'from mltoolkit.mldp.steps.transformers.nlp import TokenProcessor, VocabMapper, SeqLenComputer, Padder, SeqWrapper\n'), ((2505, 2530), 'fewsum.data_pipelines.steps.GoldSummRevIndxsCreator', 'GoldSummRevIndxsCreator', ([], {}), '()\n', (2528, 2530), False, 'from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer, SummMapper, GoldSummRevIndxsCreator, NumpyFormatter\n'), ((2775, 2960), 'fewsum.data_pipelines.steps.props.SummLenProp', 'SummLenProp', ([], {'summ_fname': 'ModelF.SUMM', 'rev_fname': 'ModelF.REV', 'group_rev_indxs_fname': 'ModelF.GROUP_REV_INDXS', 'summ_group_indx_fname': 'ModelF.SUMM_GROUP_INDX', 'new_fname': 'ModelF.LEN_PROP'}), '(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,\n group_rev_indxs_fname=ModelF.GROUP_REV_INDXS, summ_group_indx_fname=\n ModelF.SUMM_GROUP_INDX, new_fname=ModelF.LEN_PROP)\n', (2786, 2960), False, 'from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp, POVProp\n'), ((3048, 3106), 'fewsum.data_pipelines.steps.props.POVProp', 'POVProp', ([], {'text_fname': 'ModelF.SUMM', 'new_fname': 'ModelF.POV_PROP'}), '(text_fname=ModelF.SUMM, new_fname=ModelF.POV_PROP)\n', (3055, 3106), False, 'from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp, POVProp\n'), ((3124, 3313), 'fewsum.data_pipelines.steps.props.SummRougeProp', 'SummRougeProp', ([], {'summ_fname': 'ModelF.SUMM', 'rev_fname': 'ModelF.REV', 'group_rev_indxs_fname': 'ModelF.GROUP_REV_INDXS', 'summ_group_indx_fname': 'ModelF.SUMM_GROUP_INDX', 'new_fname': 'ModelF.ROUGE_PROP'}), '(summ_fname=ModelF.SUMM, rev_fname=ModelF.REV,\n group_rev_indxs_fname=ModelF.GROUP_REV_INDXS, summ_group_indx_fname=\n ModelF.SUMM_GROUP_INDX, new_fname=ModelF.ROUGE_PROP)\n', (3137, 3313), False, 'from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp, POVProp\n'), ((3416, 3484), 'fewsum.data_pipelines.steps.props.DummyProp', 'DummyProp', ([], {'fname': 'ModelF.SUMM', 'new_fname': 'ModelF.RATING_PROP', 'fval': '(0.0)'}), '(fname=ModelF.SUMM, new_fname=ModelF.RATING_PROP, fval=0.0)\n', (3425, 3484), False, 'from fewsum.data_pipelines.steps.props import SummRougeProp, DummyProp, SummLenProp, POVProp\n'), ((3532, 3625), 'fewsum.data_pipelines.steps.NumpyFormatter', 'NumpyFormatter', (['[ModelF.LEN_PROP, ModelF.RATING_PROP, ModelF.POV_PROP, ModelF.ROUGE_PROP]'], {}), '([ModelF.LEN_PROP, ModelF.RATING_PROP, ModelF.POV_PROP,\n ModelF.ROUGE_PROP])\n', (3546, 3625), False, 'from fewsum.data_pipelines.steps import RevMapper, AmazonTransformer, SummMapper, GoldSummRevIndxsCreator, NumpyFormatter\n'), ((3673, 3733), 'mltoolkit.mldp.PyTorchPipeline', 'PyTorchPipeline', ([], {'reader': 'reader', 'error_on_invalid_chunk': '(False)'}), '(reader=reader, error_on_invalid_chunk=False)\n', (3688, 3733), False, 'from mltoolkit.mldp import PyTorchPipeline\n')]
|
import themis.monitoring.emr_monitoring
import themis.scaling.emr_scaling
from themis.util import aws_common
from themis import config
from themis.model.aws_model import *
class EmrCluster(Scalable, Monitorable):
def __init__(self, id=None):
super(EmrCluster, self).__init__(id)
self.type = None
self.ip = None
self.ip_public = None
self.monitoring_data = {}
def fetch_data(self):
if self.needs_scaling():
self.monitoring_data = themis.monitoring.emr_monitoring.collect_info(self)
return self.monitoring_data
def needs_scaling(self, params=None):
app_config = config.get_config()
cluster_ids = app_config.general.get_autoscaling_clusters()
return self.id in cluster_ids
def perform_scaling(self, params=None):
themis.scaling.emr_scaling.perform_scaling(self)
class EmrClusterType(object):
PRESTO = aws_common.CLUSTER_TYPE_PRESTO
HIVE = aws_common.CLUSTER_TYPE_HIVE
|
[
"themis.config.get_config"
] |
[((652, 671), 'themis.config.get_config', 'config.get_config', ([], {}), '()\n', (669, 671), False, 'from themis import config\n')]
|
import sys
import os
import logging
from flask import Flask
HOME_SERVER_DIR = os.path.dirname(os.path.abspath(__file__))
#the absolute path of this script
app_path = os.path.dirname(os.path.realpath(__file__))
#config for the project
from homeserver.server_config import load_config, setup_logging
config = load_config(os.path.join(app_path, 'server.ini'))
# update logger settings
logger =setup_logging(config, logging.DEBUG)
#load devices and connect them
from homeserver.device_handler import DeviceHandler
device_handler = DeviceHandler( os.path.join( app_path, 'device_configs') )
def create_app(config):
#init the app
app = Flask(__name__)
#add the config parameters to the app config
app.config.update(config._values)
#load the webpage routes
from homeserver.server import api_routes
app.register_blueprint(api_routes)
#start voice control
# from homeserver.voice_control.voice_controller import VoiceController
# app.voice_controller = VoiceController(start=True)
# app.device_handler.add_interface(app.voice_controller)
return app
app = create_app(config)
|
[
"os.path.abspath",
"homeserver.server_config.setup_logging",
"os.path.realpath",
"flask.Flask",
"os.path.join"
] |
[((399, 435), 'homeserver.server_config.setup_logging', 'setup_logging', (['config', 'logging.DEBUG'], {}), '(config, logging.DEBUG)\n', (412, 435), False, 'from homeserver.server_config import load_config, setup_logging\n'), ((98, 123), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (113, 123), False, 'import os\n'), ((189, 215), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (205, 215), False, 'import os\n'), ((327, 363), 'os.path.join', 'os.path.join', (['app_path', '"""server.ini"""'], {}), "(app_path, 'server.ini')\n", (339, 363), False, 'import os\n'), ((552, 592), 'os.path.join', 'os.path.join', (['app_path', '"""device_configs"""'], {}), "(app_path, 'device_configs')\n", (564, 592), False, 'import os\n'), ((646, 661), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (651, 661), False, 'from flask import Flask\n')]
|
#!/usr/bin/env python3.7
import unittest
import numpy
import os
import librosa
import soundfile
import sys
from tempfile import TemporaryDirectory
def main():
dest = "tests/test_1_note_Csharp3.wav"
tone = librosa.tone(138.59, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with note C#3".format(dest))
dest = "tests/test_1_note_E4.wav"
tone = librosa.tone(329.63, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with note E4".format(dest))
dest = "tests/test_2_notes_E2_F3.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(82.41, sr=22050, length=44100)
tone += librosa.tone(174.61, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes E2, F3".format(dest))
dest = "tests/test_2_notes_G3_Asharp4.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(196, sr=22050, length=44100)
tone += librosa.tone(466.16, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes G3, A#4".format(dest))
dest = "tests/test_3_notes_G2_B2_G#3.wav"
tone = numpy.zeros(44100)
tone += librosa.tone(98, sr=22050, length=44100)
tone += librosa.tone(123.47, sr=22050, length=44100)
tone += librosa.tone(207.65, sr=22050, length=44100)
soundfile.write(dest, tone, 22050)
print("Created {0} with notes G2, B2, G#3".format(dest))
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"librosa.tone",
"numpy.zeros",
"soundfile.write"
] |
[((216, 260), 'librosa.tone', 'librosa.tone', (['(138.59)'], {'sr': '(22050)', 'length': '(44100)'}), '(138.59, sr=22050, length=44100)\n', (228, 260), False, 'import librosa\n'), ((265, 299), 'soundfile.write', 'soundfile.write', (['dest', 'tone', '(22050)'], {}), '(dest, tone, 22050)\n', (280, 299), False, 'import soundfile\n'), ((402, 446), 'librosa.tone', 'librosa.tone', (['(329.63)'], {'sr': '(22050)', 'length': '(44100)'}), '(329.63, sr=22050, length=44100)\n', (414, 446), False, 'import librosa\n'), ((451, 485), 'soundfile.write', 'soundfile.write', (['dest', 'tone', '(22050)'], {}), '(dest, tone, 22050)\n', (466, 485), False, 'import soundfile\n'), ((591, 609), 'numpy.zeros', 'numpy.zeros', (['(44100)'], {}), '(44100)\n', (602, 609), False, 'import numpy\n'), ((622, 665), 'librosa.tone', 'librosa.tone', (['(82.41)'], {'sr': '(22050)', 'length': '(44100)'}), '(82.41, sr=22050, length=44100)\n', (634, 665), False, 'import librosa\n'), ((678, 722), 'librosa.tone', 'librosa.tone', (['(174.61)'], {'sr': '(22050)', 'length': '(44100)'}), '(174.61, sr=22050, length=44100)\n', (690, 722), False, 'import librosa\n'), ((727, 761), 'soundfile.write', 'soundfile.write', (['dest', 'tone', '(22050)'], {}), '(dest, tone, 22050)\n', (742, 761), False, 'import soundfile\n'), ((877, 895), 'numpy.zeros', 'numpy.zeros', (['(44100)'], {}), '(44100)\n', (888, 895), False, 'import numpy\n'), ((908, 949), 'librosa.tone', 'librosa.tone', (['(196)'], {'sr': '(22050)', 'length': '(44100)'}), '(196, sr=22050, length=44100)\n', (920, 949), False, 'import librosa\n'), ((962, 1006), 'librosa.tone', 'librosa.tone', (['(466.16)'], {'sr': '(22050)', 'length': '(44100)'}), '(466.16, sr=22050, length=44100)\n', (974, 1006), False, 'import librosa\n'), ((1011, 1045), 'soundfile.write', 'soundfile.write', (['dest', 'tone', '(22050)'], {}), '(dest, tone, 22050)\n', (1026, 1045), False, 'import soundfile\n'), ((1161, 1179), 'numpy.zeros', 'numpy.zeros', (['(44100)'], {}), '(44100)\n', (1172, 1179), False, 'import numpy\n'), ((1192, 1232), 'librosa.tone', 'librosa.tone', (['(98)'], {'sr': '(22050)', 'length': '(44100)'}), '(98, sr=22050, length=44100)\n', (1204, 1232), False, 'import librosa\n'), ((1245, 1289), 'librosa.tone', 'librosa.tone', (['(123.47)'], {'sr': '(22050)', 'length': '(44100)'}), '(123.47, sr=22050, length=44100)\n', (1257, 1289), False, 'import librosa\n'), ((1302, 1346), 'librosa.tone', 'librosa.tone', (['(207.65)'], {'sr': '(22050)', 'length': '(44100)'}), '(207.65, sr=22050, length=44100)\n', (1314, 1346), False, 'import librosa\n'), ((1351, 1385), 'soundfile.write', 'soundfile.write', (['dest', 'tone', '(22050)'], {}), '(dest, tone, 22050)\n', (1366, 1385), False, 'import soundfile\n')]
|
# Copyright 2020. ThingsBoard
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
# Importing models and REST client class from Community Edition version
from tb_rest_client.rest_client_ce import *
from tb_rest_client.rest import ApiException
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(module)s - %(lineno)d - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# ThingsBoard REST API URL
url = "http://localhost:8080"
# Default Tenant Administrator credentials
username = "<EMAIL>"
password = "<PASSWORD>"
# Creating the REST client object with context manager to get auto token refresh
with RestClientCE(base_url=url) as rest_client:
try:
# Auth with credentials
rest_client.login(username=username, password=password)
# Creating an Asset
asset = Asset(name="Building 1", type="building")
asset = rest_client.save_asset(asset)
logging.info("Asset was created:\n%r\n", asset)
# creating a Device
device = Device(name="Thermometer 1", type="thermometer")
device = rest_client.save_device(device)
logging.info(" Device was created:\n%r\n", device)
# Creating relations from device to asset
relation = EntityRelation(_from=asset.id, to=device.id, type="Contains")
relation = rest_client.save_relation(relation)
logging.info(" Relation was created:\n%r\n", relation)
except ApiException as e:
logging.exception(e)
|
[
"logging.info",
"logging.exception",
"logging.basicConfig"
] |
[((816, 972), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(levelname)s - %(module)s - %(lineno)d - %(message)s"""', 'datefmt': '"""%Y-%m-%d %H:%M:%S"""'}), "(level=logging.DEBUG, format=\n '%(asctime)s - %(levelname)s - %(module)s - %(lineno)d - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n", (835, 972), False, 'import logging\n'), ((1529, 1578), 'logging.info', 'logging.info', (['"""Asset was created:\n%r\n"""', 'asset'], {}), '("""Asset was created:\n%r\n""", asset)\n', (1541, 1578), False, 'import logging\n'), ((1730, 1782), 'logging.info', 'logging.info', (['""" Device was created:\n%r\n"""', 'device'], {}), '(""" Device was created:\n%r\n""", device)\n', (1742, 1782), False, 'import logging\n'), ((1977, 2033), 'logging.info', 'logging.info', (['""" Relation was created:\n%r\n"""', 'relation'], {}), '(""" Relation was created:\n%r\n""", relation)\n', (1989, 2033), False, 'import logging\n'), ((2070, 2090), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (2087, 2090), False, 'import logging\n')]
|
#!/usr/bin/env python2
"""
grammar_gen.py - Use pgen2 to generate tables from Oil's grammar.
"""
from __future__ import print_function
import os
import sys
from _devbuild.gen.id_kind_asdl import Id, Kind
from _devbuild.gen.syntax_asdl import source
from core import alloc
from core import meta
from core.util import log
from frontend import lexer, match, reader
from pgen2 import parse, pgen
# Used at grammar BUILD time.
OPS = {
'(': Id.Op_LParen,
')': Id.Op_RParen,
'[': Id.Op_LBracket,
']': Id.Op_RBracket, # Problem: in OilOuter, this is OP_RBracket.
# OK I think the metalanguage needs to be
# extended to take something other than ']'
# It needs proper token names!
'{': Id.Op_LBrace,
'}': Id.Op_RBrace,
'$[': Id.Left_DollarBracket,
'${': Id.Left_DollarBrace,
'$(': Id.Left_DollarParen,
'$/': Id.Left_DollarSlash,
'@[': Id.Left_AtBracket,
'.': Id.Expr_Dot,
'->': Id.Expr_RArrow,
# TODO: Add Ellipsis.
'...': Id.Expr_Dot,
# TODO: do we need div= and xor= ?
}
# TODO: We should be able to remove all these.
TERMINALS = {
'NAME': Id.Expr_Name,
'NUMBER': Id.Expr_Digits,
# The grammar seems something like 'for' or '>='
# These need to be looked up at "_Classify" time?
#'STRING': Id.Expr_Name,
'NEWLINE': Id.Op_Newline,
'ENDMARKER': Id.Eof_Real,
}
if 0: # unused because the grammar compile keeps track of keywords!
KEYWORDS = {
'div': Id.Expr_Div,
'xor': Id.Expr_Xor,
'and': Id.Expr_And,
'or': Id.Expr_Or,
'not': Id.Expr_Not,
'for': Id.Expr_For,
'is': Id.Expr_Is,
'in': Id.Expr_In,
'if': Id.Expr_If,
'else': Id.Expr_Else,
'match': Id.Expr_Match,
'func': Id.Expr_Func,
}
class OilTokenDef(object):
def __init__(self, arith_ops):
self.arith_ops = arith_ops
def GetTerminalNum(self, label):
"""
e.g. translate Expr_Name in the grammar to 178
"""
id_ = TERMINALS.get(label) or getattr(Id, label)
#log('Id %s = %d', id_, id_.enum_id)
assert id_.enum_id < 256, id_
return id_.enum_id
def GetOpNum(self, value):
id_ = OPS.get(value) or self.arith_ops[value]
assert id_.enum_id < 256, id_
return id_.enum_id
def MakeOilLexer(code_str, arena):
arena.PushSource(source.MainFile('pgen2_main'))
line_reader = reader.StringLineReader(code_str, arena)
line_lexer = lexer.LineLexer(match.MATCHER, '', arena)
lex = lexer.Lexer(line_lexer, line_reader)
return lex
def main(argv):
action = argv[1]
argv = argv[2:]
# Common initialization
arith_ops = {}
for _, token_str, id_ in meta.ID_SPEC.LexerPairs(Kind.Arith):
arith_ops[token_str] = id_
if 0:
from pprint import pprint
pprint(arith_ops)
tok_def = OilTokenDef(arith_ops)
if action == 'marshal': # generate the grammar and parse it
grammar_path = argv[0]
out_dir = argv[1]
basename, _ = os.path.splitext(os.path.basename(grammar_path))
# HACK for find:
if basename == 'find':
from tools.find import parse as find_parse
tok_def = find_parse.TokenDef()
with open(grammar_path) as f:
gr = pgen.MakeGrammar(f, tok_def=tok_def)
marshal_path = os.path.join(out_dir, basename + '.marshal')
with open(marshal_path, 'wb') as out_f:
gr.dump(out_f)
nonterm_path = os.path.join(out_dir, basename + '_nt.py')
with open(nonterm_path, 'w') as out_f:
gr.dump_nonterminals(out_f)
log('Compiled %s -> %s and %s', grammar_path, marshal_path, nonterm_path)
#gr.report()
elif action == 'parse': # generate the grammar and parse it
# Remove build dependency
from oil_lang import expr_parse
grammar_path = argv[0]
start_symbol = argv[1]
code_str = argv[2]
# For choosing lexer and semantic actions
grammar_name, _ = os.path.splitext(os.path.basename(grammar_path))
with open(grammar_path) as f:
gr = pgen.MakeGrammar(f, tok_def=tok_def)
arena = alloc.Arena()
lex = MakeOilLexer(code_str, arena)
is_expr = grammar_name in ('calc', 'grammar')
p = expr_parse.ExprParser(gr)
try:
pnode, _ = p.Parse(lex, gr.symbol2number[start_symbol])
except parse.ParseError as e:
log('Parse Error: %s', e)
return 1
from frontend import parse_lib
names = parse_lib.MakeGrammarNames(gr)
p_printer = expr_parse.ParseTreePrinter(names) # print raw nodes
p_printer.Print(pnode)
if is_expr:
from oil_lang import expr_to_ast
tr = expr_to_ast.Transformer(gr)
if start_symbol == 'eval_input':
ast_node = tr.Expr(pnode)
else:
ast_node = tr.OilAssign(pnode)
ast_node.PrettyPrint()
print()
elif action == 'stdlib-test':
# This shows how deep Python's parse tree is. It doesn't use semantic
# actions to prune on the fly!
import parser # builtin module
t = parser.expr('1+2')
print(t)
t2 = parser.st2tuple(t)
print(t2)
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
sys.exit(main(sys.argv))
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
|
[
"frontend.reader.StringLineReader",
"frontend.lexer.LineLexer",
"pgen2.pgen.MakeGrammar",
"core.meta.ID_SPEC.LexerPairs",
"core.util.log",
"parser.st2tuple",
"tools.find.parse.TokenDef",
"pprint.pprint",
"os.path.join",
"parser.expr",
"oil_lang.expr_parse.ParseTreePrinter",
"_devbuild.gen.syntax_asdl.source.MainFile",
"os.path.basename",
"core.alloc.Arena",
"oil_lang.expr_to_ast.Transformer",
"frontend.lexer.Lexer",
"sys.exit",
"oil_lang.expr_parse.ExprParser",
"frontend.parse_lib.MakeGrammarNames"
] |
[((2440, 2480), 'frontend.reader.StringLineReader', 'reader.StringLineReader', (['code_str', 'arena'], {}), '(code_str, arena)\n', (2463, 2480), False, 'from frontend import lexer, match, reader\n'), ((2496, 2537), 'frontend.lexer.LineLexer', 'lexer.LineLexer', (['match.MATCHER', '""""""', 'arena'], {}), "(match.MATCHER, '', arena)\n", (2511, 2537), False, 'from frontend import lexer, match, reader\n'), ((2546, 2582), 'frontend.lexer.Lexer', 'lexer.Lexer', (['line_lexer', 'line_reader'], {}), '(line_lexer, line_reader)\n', (2557, 2582), False, 'from frontend import lexer, match, reader\n'), ((2722, 2757), 'core.meta.ID_SPEC.LexerPairs', 'meta.ID_SPEC.LexerPairs', (['Kind.Arith'], {}), '(Kind.Arith)\n', (2745, 2757), False, 'from core import meta\n'), ((2393, 2422), '_devbuild.gen.syntax_asdl.source.MainFile', 'source.MainFile', (['"""pgen2_main"""'], {}), "('pgen2_main')\n", (2408, 2422), False, 'from _devbuild.gen.syntax_asdl import source\n'), ((2833, 2850), 'pprint.pprint', 'pprint', (['arith_ops'], {}), '(arith_ops)\n', (2839, 2850), False, 'from pprint import pprint\n'), ((3307, 3351), 'os.path.join', 'os.path.join', (['out_dir', "(basename + '.marshal')"], {}), "(out_dir, basename + '.marshal')\n", (3319, 3351), False, 'import os\n'), ((3437, 3479), 'os.path.join', 'os.path.join', (['out_dir', "(basename + '_nt.py')"], {}), "(out_dir, basename + '_nt.py')\n", (3449, 3479), False, 'import os\n'), ((3562, 3635), 'core.util.log', 'log', (['"""Compiled %s -> %s and %s"""', 'grammar_path', 'marshal_path', 'nonterm_path'], {}), "('Compiled %s -> %s and %s', grammar_path, marshal_path, nonterm_path)\n", (3565, 3635), False, 'from core.util import log\n'), ((3036, 3066), 'os.path.basename', 'os.path.basename', (['grammar_path'], {}), '(grammar_path)\n', (3052, 3066), False, 'import os\n'), ((3182, 3203), 'tools.find.parse.TokenDef', 'find_parse.TokenDef', ([], {}), '()\n', (3201, 3203), True, 'from tools.find import parse as find_parse\n'), ((3250, 3286), 'pgen2.pgen.MakeGrammar', 'pgen.MakeGrammar', (['f'], {'tok_def': 'tok_def'}), '(f, tok_def=tok_def)\n', (3266, 3286), False, 'from pgen2 import parse, pgen\n'), ((4075, 4088), 'core.alloc.Arena', 'alloc.Arena', ([], {}), '()\n', (4086, 4088), False, 'from core import alloc\n'), ((4189, 4214), 'oil_lang.expr_parse.ExprParser', 'expr_parse.ExprParser', (['gr'], {}), '(gr)\n', (4210, 4214), False, 'from oil_lang import expr_parse\n'), ((4415, 4445), 'frontend.parse_lib.MakeGrammarNames', 'parse_lib.MakeGrammarNames', (['gr'], {}), '(gr)\n', (4441, 4445), False, 'from frontend import parse_lib\n'), ((4462, 4496), 'oil_lang.expr_parse.ParseTreePrinter', 'expr_parse.ParseTreePrinter', (['names'], {}), '(names)\n', (4489, 4496), False, 'from oil_lang import expr_parse\n'), ((5270, 5281), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5278, 5281), False, 'import sys\n'), ((3947, 3977), 'os.path.basename', 'os.path.basename', (['grammar_path'], {}), '(grammar_path)\n', (3963, 3977), False, 'import os\n'), ((4025, 4061), 'pgen2.pgen.MakeGrammar', 'pgen.MakeGrammar', (['f'], {'tok_def': 'tok_def'}), '(f, tok_def=tok_def)\n', (4041, 4061), False, 'from pgen2 import parse, pgen\n'), ((4610, 4637), 'oil_lang.expr_to_ast.Transformer', 'expr_to_ast.Transformer', (['gr'], {}), '(gr)\n', (4633, 4637), False, 'from oil_lang import expr_to_ast\n'), ((4993, 5011), 'parser.expr', 'parser.expr', (['"""1+2"""'], {}), "('1+2')\n", (5004, 5011), False, 'import parser\n'), ((5034, 5052), 'parser.st2tuple', 'parser.st2tuple', (['t'], {}), '(t)\n', (5049, 5052), False, 'import parser\n'), ((4326, 4351), 'core.util.log', 'log', (['"""Parse Error: %s"""', 'e'], {}), "('Parse Error: %s', e)\n", (4329, 4351), False, 'from core.util import log\n')]
|
#!/usr/bin/env python3
# TODO: how will commands handle incorrectly cased names? will need to be able to do that, preferably without losing original case in messages.
# TODO: initial 'all clear'? here, or in main?
# TODO: save 'seen' persistently upon changes?
# TODO: commands, reporting/unique players (later), saving 'seen' to disk
# figure out clearing of state after a disconnect (or is that a 'main' thing?)
# TODO: inverted warnings, for population info
import news_reel_monitor
import bisect
import datetime
from enum import Enum, auto
UNCLANNED_PLACEHOLDER = 'unclanned'
class DataSetCategory(Enum):
ADMINISTRATOR = 'admin'
ALLY_CLAN = 'allyclan'
ALLY_PLAYER = 'allyplayer'
IGNORE_CLAN = 'ignoreclan'
IGNORE_PLAYER = 'ignoreplayer'
IGNORE_HOLDING = 'ignoreholding'
class DataMappingCategory(Enum):
FILTER_PLAYER = 'filterplayer'
FILTER_CLAN = 'filterclan'
FILTER_HOLDING = 'filterholding'
def oxford_comma_delimited_string(entries):
count = len(entries)
if count:
if count == 1:
return entries[0]
return f"{', '.join(entries[:-1])}, and {entries[-1]}"
return ''
class AlertBot(object):
def __init__(self, monitor):
self.holding_alert = {}
self.monitor = monitor
self.seen_players = {}
# TODO: the 'unique' stuff
self._data_sets = {}
self._data_mappings = {}
def data_set(self, category):
data = self._data_sets.get(category)
if data is None:
self._data_sets[category] = data = set()
return data
def data_mapping(self, category):
data = self._data_mappings.get(category)
if data is None:
self._data_mappings[category] = data = {}
return data
def is_friendly(self, name, clan):
name = name.lower()
clan = clan.lower()
return (
name in self.data_set(DataSetCategory.ALLY_PLAYER) or
clan in self.data_set(DataSetCategory.ALLY_CLAN) or
name in self.data_set(DataSetCategory.IGNORE_PLAYER) or
clan in self.data_set(DataSetCategory.IGNORE_CLAN)
)
def filter_player(self, name):
# TODO: offensive/stupid player names
filtered_name = self.data_mapping(DataMappingCategory.FILTER_PLAYER).get(name.lower())
if filtered_name is not None:
return filtered_name
return name
def filter_clan(self, clan):
if clan is None:
global UNCLANNED_PLACEHOLDER
return UNCLANNED_PLACEHOLDER
# TODO: offensive/stupid clan names
filtered_clan = self.data_mapping(DataMappingCategory.FILTER_CLAN).get(clan.lower())
if filtered_clan is not None:
return filtered_clan
return clan
def filter_holding(self, holding):
# TODO: change it to change how TTS pronounces it? to fix the capitalization of certain cities?
filtered_holding = self.data_mapping(DataMappingCategory.FILTER_HOLDING).get(holding.lower())
if filtered_holding is not None:
return filtered_holding
return holding
def _get_alerts(self, full_status, all_warnings_on_change=False):
any_alert_changed = False
prioritized_warnings = []
notices = []
total_enemies = 0
if full_status:
all_warnings_on_change = True
# for simplicity, just always check all holdings... we only report new events anyway,
# and this is necessary for 'all clear' messages anyway
for holding in self.monitor.holdings():
holding_string = self.filter_holding(holding)
if holding_string == holding:
# unfiltered, fix the case instead
holding_string = self.monitor.cased_holding_name.get(holding)
# Get the full holding message
last_alert = self.holding_alert.get(holding)
if last_alert is None:
self.holding_alert[holding] = last_alert = f'{holding_string} is clear'
holding_state = self.monitor.holding_state(holding)
enemies_by_clan = {}
enemy_count = 0
most_numerous_clan_enemy_count = 0
most_numerous_clan = None
for name in holding_state.players:
clan, rank = self.monitor.get_player_clan_info(name)
if self.is_friendly(name, clan):
continue
enemies = enemies_by_clan.get(clan)
if enemies is None:
enemies_by_clan[clan] = enemies = set()
enemies.add(name)
# if it's a new highest total or the same but with a clan alphabetically earlier (prioritizing clans over unclanned None entries)
clan_enemy_count = len(enemies)
enemy_count += clan_enemy_count
if clan_enemy_count > most_numerous_clan_enemy_count or (clan_enemy_count == most_numerous_clan_enemy_count and (
# most numerous is unclanned, or it is a clan and this clan is one alphabetically earlier
# (prioritizing clans over unclanned 'None' entries)
not most_numerous_clan or (clan and clan < most_numerous_clan))):
most_numerous_clan_enemy_count = clan_enemy_count
most_numerous_clan = clan
if enemy_count:
total_enemies += enemy_count
if len(enemies_by_clan) == 1:
clan, enemies = next(iter(enemies_by_clan.items()))
clan_string = self.filter_clan(clan)
if clan_string == clan:
# unfiltered, fix the case instead
clan_string = self.monitor.cased_clan_name.get(clan)
if len(enemies) == 1:
name = next(iter(enemies))
name_string = self.filter_player(name)
if name_string == name:
# unfiltered, fix the case instead
name_string = self.monitor.cased_player_name.get(name)
alert = f'{holding_string} has enemy {name_string} from {clan_string}'
else:
alert = f'{holding_string} has {enemy_count} enemies from {clan_string}'
else:
clan_string = self.filter_clan(most_numerous_clan)
if clan_string == most_numerous_clan:
# unfiltered, fix the case instead
clan_string = self.monitor.cased_clan_name.get(most_numerous_clan)
alert = f'{holding_string} has {enemy_count} enemies, mostly from {clan_string}'
is_warning = True
else:
alert = f'{holding_string} is clear'
is_warning = False
this_alert_changed = (last_alert != alert)
if this_alert_changed or (is_warning and all_warnings_on_change):
if this_alert_changed:
any_alert_changed = True
# this is a new alert, add it to the list to be output
if is_warning:
# just for sorting the messages by enemy count and holding name
bisect.insort(prioritized_warnings, (-enemy_count, holding, alert))
else:
# for sorting by holding name
bisect.insort(notices, (holding, alert))
#print(f'CHANGED! "{last_alert}" != {alert}')
self.holding_alert[holding] = alert
alerts = []
if any_alert_changed or full_status:
warnings = [entry[2] for entry in prioritized_warnings]
notices = [entry[1] for entry in notices]
#print(f'ALERT CHANGED: {warnings} ____ {notices}')
if warnings:
alerts.append(f'WARNING: {oxford_comma_delimited_string(warnings)}')
# if everything is clear, and either we want a status
# update or this is indeed new (because a new notice exists)
if not total_enemies and (full_status or notices):
alerts.append('NOTICE: all clear')
elif notices:
alerts.append(f'NOTICE: {oxford_comma_delimited_string(notices)}')
# TODO: remove debug divider
#print('----------------')
return alerts
def check_for_changes(self, full_status=False, all_warnings_on_change=False):
now = datetime.datetime.now()
changed_proximity, changed_resources = self.monitor.check_for_changes()
if changed_proximity:
for holding, player_state in changed_proximity.items():
# check the new events for 'seen' functionality
for name, state in player_state.items():
present, is_current = state
if is_current:
# by checking if it's current, we're sure that this is the latest
# location, for situations where the player has left multiple holdings
# within the contents of a single update.
self.seen_players[name] = (now, holding)
return (now, self._get_alerts(full_status=full_status, all_warnings_on_change=all_warnings_on_change))
# get the status without checking
def status(self):
now = datetime.datetime.now()
return (now, self._get_alerts(full_status=True))
|
[
"bisect.insort",
"datetime.datetime.now"
] |
[((8892, 8915), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8913, 8915), False, 'import datetime\n'), ((9824, 9847), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9845, 9847), False, 'import datetime\n'), ((7605, 7672), 'bisect.insort', 'bisect.insort', (['prioritized_warnings', '(-enemy_count, holding, alert)'], {}), '(prioritized_warnings, (-enemy_count, holding, alert))\n', (7618, 7672), False, 'import bisect\n'), ((7768, 7808), 'bisect.insort', 'bisect.insort', (['notices', '(holding, alert)'], {}), '(notices, (holding, alert))\n', (7781, 7808), False, 'import bisect\n')]
|
# Copyright(C) 2020 Horus View and Explore B.V.
import psycopg2
from horus_db import Frames, Recordings, Frame, Recording
# This example shows how to iterate over all the recordings
def get_connection():
return psycopg2.connect(
"dbname=HorusWebMoviePlayer user=postgres password=<PASSWORD>")
connection = get_connection()
recordings = Recordings(connection)
cursor = recordings.all()
recording = Recording(cursor)
while recording is not None:
print(" ", recording.id, " ", recording.directory)
recording = Recording(cursor)
|
[
"psycopg2.connect",
"horus_db.Recording",
"horus_db.Recordings"
] |
[((353, 375), 'horus_db.Recordings', 'Recordings', (['connection'], {}), '(connection)\n', (363, 375), False, 'from horus_db import Frames, Recordings, Frame, Recording\n'), ((416, 433), 'horus_db.Recording', 'Recording', (['cursor'], {}), '(cursor)\n', (425, 433), False, 'from horus_db import Frames, Recordings, Frame, Recording\n'), ((219, 304), 'psycopg2.connect', 'psycopg2.connect', (['"""dbname=HorusWebMoviePlayer user=postgres password=<PASSWORD>"""'], {}), "('dbname=HorusWebMoviePlayer user=postgres password=<PASSWORD>'\n )\n", (235, 304), False, 'import psycopg2\n'), ((535, 552), 'horus_db.Recording', 'Recording', (['cursor'], {}), '(cursor)\n', (544, 552), False, 'from horus_db import Frames, Recordings, Frame, Recording\n')]
|
'''
###############################################################################
# twist_controller.py #
# --------------------------------------------------------------------------- #
# #
# Description: #
# ------------ #
# This module contains the source for the command controller for the throttle #
# brakes and steering for the Self-Driving Car System. #
# #
# Change Log: #
# ----------- #
# +--------------------+---------------+------------------------------------+ #
# | Date | Author | Description | #
# +--------------------+---------------+------------------------------------+ #
# | 2/24/2018 | <NAME> | Initial pass on the code | #
# +--------------------+---------------+------------------------------------+ #
# | 2/27/2018 | <NAME> | Integrated a velocity controller | #
# | | | that works better than a PID | #
# +--------------------+---------------+------------------------------------+ #
# | 2/28/2018 | <NAME> | Remove a few irrelevant lines of | #
# | | | code and added comments | #
# +--------------------+---------------+------------------------------------+ #
# | 3/13/2018 | <NAME> | Changed twist cmd update interface | #
# | | | to "set_target_*" for clarity | #
# +--------------------+---------------+------------------------------------+ #
# | 3/29/2018 | <NAME> | Updated the velocity_controller | #
# +--------------------+---------------+------------------------------------+ #
# | 4/12/2018 | <NAME> | Reverted some changes to carry max | #
# | | | accel values for thresholding in | #
# | | | the velocity controller | #
# +--------------------+---------------+------------------------------------+ #
###############################################################################
'''
# Debug prints - to be removed
import rospy
# For steering control
from yaw_controller import YawController
# For throttle/brake control
from velocity_controller import VelocityController
class Controller(object):
def __init__(self, wheel_base=0.0, steer_ratio=0.0, min_speed=0.0,
max_lat_accel=0.0, max_steer_angle=0.0, vehicle_mass=1e-6,
max_accel=0.0, max_decel=0.0, max_input_accel=0.0,
max_input_decel=0.0, deadband=0.0, fuel_capacity=0.0,
wheel_radius=0.0):
'''
Initializes the controller object
'''
# Steering controller
self.steering_controller = YawController(wheel_base=wheel_base,
steer_ratio=steer_ratio,
min_speed=min_speed,
max_lat_accel=max_lat_accel,
max_steer_angle=max_steer_angle)
# Throttle/Brake Controller
self.throttle_controller = VelocityController(
vehicle_mass=vehicle_mass,
max_accel=max_accel,
max_decel=max_decel,
max_input_accel=max_input_accel,
max_input_decel=max_input_decel,
wheel_radius=wheel_radius,
deadband=deadband,
fuel_capacity=fuel_capacity)
# Vehicle Status variables
self.cur_linear_velocity = 0
self.cur_angular_velocity = 0
# Desired state variables
self.target_linear_velocity = 0
self.target_angular_velocity = 0
def set_current_linear_velocity(self, vel=0):
'''
Sets the current linear velocity of the vehicle for the controller
to use
Returns:
float: vel - the current linear velocity (m/s)
Complexity: O(1)
'''
self.cur_linear_velocity = vel
def set_current_angular_velocity(self, vel=0):
'''
Sets the current angular velocity of the vehicle for the controller
to use
Returns:
float: vel - the current angular velocity (m/s)
Complexity: O(1)
'''
self.cur_angular_velocity = vel
def set_target_linear_velocity(self, vel=0):
'''
Sets the target linear velocity of the vehicle for the controller
to use
Returns:
float: vel - the target linear velocity (m/s)
Complexity: O(1)
'''
self.target_linear_velocity = vel
def set_target_angular_velocity(self, vel=0):
'''
Sets the target angular velocity of the vehicle for the controller
to use
Returns:
float: vel - the target angular velocity (m/s)
Complexity: O(1)
'''
self.target_angular_velocity = vel
def control(self):
'''
Returns a list of the desired throttle, brake and steering values
Returns:
list<float>: [throttle, brake, steering]
Complexity: O(1)
'''
# Values to return
throttle = 0.0
brake = 0.0
steer = 0.0
# Run steering controller
steer = self.steering_controller.get_steering(
self.target_linear_velocity,
self.target_angular_velocity,
self.cur_linear_velocity
)
# Run throttle controller
throttle, brake = self.throttle_controller.get_throttle_brake(
self.target_linear_velocity,
self.target_angular_velocity,
self.cur_linear_velocity
)
# Hand back values
return throttle, brake, steer
|
[
"yaw_controller.YawController",
"velocity_controller.VelocityController"
] |
[((3209, 3358), 'yaw_controller.YawController', 'YawController', ([], {'wheel_base': 'wheel_base', 'steer_ratio': 'steer_ratio', 'min_speed': 'min_speed', 'max_lat_accel': 'max_lat_accel', 'max_steer_angle': 'max_steer_angle'}), '(wheel_base=wheel_base, steer_ratio=steer_ratio, min_speed=\n min_speed, max_lat_accel=max_lat_accel, max_steer_angle=max_steer_angle)\n', (3222, 3358), False, 'from yaw_controller import YawController\n'), ((3622, 3863), 'velocity_controller.VelocityController', 'VelocityController', ([], {'vehicle_mass': 'vehicle_mass', 'max_accel': 'max_accel', 'max_decel': 'max_decel', 'max_input_accel': 'max_input_accel', 'max_input_decel': 'max_input_decel', 'wheel_radius': 'wheel_radius', 'deadband': 'deadband', 'fuel_capacity': 'fuel_capacity'}), '(vehicle_mass=vehicle_mass, max_accel=max_accel,\n max_decel=max_decel, max_input_accel=max_input_accel, max_input_decel=\n max_input_decel, wheel_radius=wheel_radius, deadband=deadband,\n fuel_capacity=fuel_capacity)\n', (3640, 3863), False, 'from velocity_controller import VelocityController\n')]
|
from typing import List, Set, Optional, Tuple
from random import randrange, shuffle, random
from RatInterface import Rat, MazeInfo
from SimpleRats import AlwaysLeftRat, RandomRat
from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer
from graphviz import Graph
class SimpleMaze:
"""
A simple maze is a vector of vectors of edges. It supports one
rat at a time. It has one start and one end. WLOG, the start is
always the first element and the end is one after the last. There is no
concept of compass directions in this maze, and there is no
policing to prevent crossing paths.
"""
def __init__(self, edges: List[List[int]], fill_back_steps: bool):
"""
Initialise with a set of edges. If fill_back_steps is true, we
generate backward edges to make it an undirected graph.
"""
validate_edges(edges, fill_back_steps)
self.all_edges = edges
def __str__(self):
return "SimpleMaze(%s)" % self.all_edges
def maze(self) -> List[List[int]]:
return self.all_edges
def solve(self, rat: Rat, max_iterations: int, info: Optional[MazeInfo] = None) -> bool:
"""
Tries to solve the maze. Returns the number of iterations used.
If it exceeds max_iterations, returns max_iterations + 1. If it
fails for any other reason, returns 0.
"""
# always start from the beginning
pos = 0
iterations = 0
# set the last_pos such that the back path is the last in the first list
last_pos = self.all_edges[pos][-1]
#print("pos=%i last_pos=%i" % (pos, last_pos))
# keep going until the end
end = len(self.all_edges)
while (pos < end) and (iterations <= max_iterations):
# find the edges from the current node
edges = self.all_edges[pos]
# one of these edges should point back to where we came from
if edges.count(last_pos) != 1:
print("Problem: no edge from %i to %i" % (pos, last_pos))
back = edges.index(last_pos)
# supply maze info for rats that need it. There is only one rat,
# so supply rat number zero
num_edges = len(edges)
if info:
info.set_pos(pos, back, num_edges, rat)
# get the rat to choose a direction
turn = rat.turn(num_edges, info)
if (turn >= num_edges) or (turn < 0):
return 0 # give up
# going in some direction
direction = (turn + back) % num_edges
last_pos = pos
pos = edges[direction]
iterations = iterations + 1
#print("pos=%i last_pos=%i" % (pos, last_pos))
# hit the end, or failed with an iteration count that is too high
# (technically we should worry about the case where we hit max
# iterations with a valid exit, but this is unlikely and does not
# matter much).
return iterations
def validate_edges(edges: List[List[int]], fill_back_steps: bool):
"""
validate and optionally fill back steps
"""
end = len(edges)
if end < 1:
raise Exception("Must be at least one node")
has_end = False
edge_from = 0
for node in edges:
if len(set(node)) != len(node):
raise Exception("Must not have duplicate edges")
for edge_to in node:
if edge_to == end:
has_end = True # OK to have multiple routes to end
elif edge_to > end:
raise Exception("Edge out of range")
elif edge_to == edge_from:
raise Exception("Not allowed to have edges to self")
elif fill_back_steps:
# make sure we have a return edge matching this
ensure_edge(edges, edge_to, edge_from)
# next node
edge_from = edge_from + 1
# We validate that at least one node has an edge leading to the
# exit. However, we do not currently check that there is a clear
# path to any such node.
if not has_end:
raise Exception("No edge to the end node")
def ensure_edge(maze: List[List[int]], edge_from: int, edge_to: int):
"""
Validates that we have an edge (and if necessary inserts one)
"""
node = maze[edge_from]
count = node.count(edge_to)
if count == 1:
return # already have this edge. Nothing more to do
elif count > 1:
raise Exception("Edges must be unique")
# We need this edge. Append it (no attempt to avoid crossing paths)
node.append(edge_to)
def random_maze(allow_loops: float, local: Localizer) -> List[List[int]]:
"""
Creates a random maze with the specified number of nodes.
"""
# Do NOT write maze = [[]] * node_count as this makes all list elements the same memory!
node_count = local.node_count()
maze = [[] for y in range(node_count)]
# Remember all the nodes that connect to the origin. So far, just
# contains the origin, which is zero by definition.
accessible = { 0 }
# First do a random walk until we hit the end. There may be loops,
# but we don't worry about that. Just make sure there are no duplicate
# edges. Also, create bidirectional edges as we go.
edge_from = 0
while edge_from != node_count:
edge_to = local.random_step(edge_from, True)
add_bidirectional_edges(maze, accessible, edge_from, edge_to, allow_loops)
edge_from = edge_to
# We now have a working maze, but not a very interesting one, in that it
# just has one random path from start to end. Add some blind alleys and
# ensure that every node has at least one edge, which somehow connects to
# the original random walk, hence the start (and the end)
for i in range(node_count):
if not (i in accessible):
# random walk from i until we hit the accessible set
new_path = { i }
edge_from = i
while not (edge_from in accessible):
edge_to = local.random_step(edge_from, False) # avoid the exit
add_bidirectional_edges(maze, new_path, edge_from, edge_to, allow_loops)
edge_from = edge_to
# all these nodes are now accessible
accessible.update(new_path)
# We now have a maze with some blind alleys and all nodes are accessible.
# Shuffle the edges in each node (we do not want the first edge to always
# be the one that leads to the exit) and return it.
for node in maze:
shuffle(node)
return maze
def add_bidirectional_edges(
maze: List[List[int]],
accessible: Set[int],
edge_from: int,
edge_to: int,
allow_loops: float):
"""
Adds (or at least ensures the existence of) bidirectional edges, and adds
the end node to a set of accessible nodes. If allow_loops is zero, we prevent
loops (avoid adding an edge that leads to an accessible node). If it is one,
we allow them. If between zero and one, we randomly allow them or not.
"""
if edge_to != edge_from and allow_edge(allow_loops, edge_to, accessible):
ensure_edge(maze, edge_from, edge_to)
if edge_to != len(maze): # do not need back path from the exit
ensure_edge(maze, edge_to, edge_from)
accessible.add(edge_to)
def allow_edge(allow_loops: float, edge_to: int, accessible: Set[int]) -> bool:
EPSILON = 1e-10
if allow_loops > 1.0 - EPSILON:
return True
elif not (edge_to in accessible):
return True
elif allow_loops < EPSILON:
return False
elif random() < allow_loops:
return True
else:
return False
def render_graph(maze: List[List[int]], file_name):
"""
Generate a PDF file showing the maze as an undirected graph. Uses
GraphViz, which must be installed and on the PATH. Note that
the resulting graph shows only the nodes and their connections. The
ordering of edges around each node is determined by GraphViz itself.
You therefore cannot rely on this rendering to tell you whether to
turn left or right at each node.
"""
if len(maze) > 26:
raise Exception("render_graph can only handle up to 26 nodes")
dot = Graph()
this = 0
edges = []
unknowns = 0
A = ord('A')
a = ord('a')
for node in maze:
id = str(chr(A + this))
if this == 0:
dot.node(id, "Start (A)")
else:
dot.node(id, id)
for edge in node:
# avoid duplicating edges by only showing to > from
if edge > this:
edge_str = id + str(chr(A + edge))
edges.append(edge_str)
elif edge < 0:
unknown_id = str(chr(a + unknowns))
unknowns = unknowns + 1
edge_str = id + unknown_id
edges.append(edge_str)
dot.node(unknown_id, "Unknown")
this = this + 1
# The final node is not in the list, as it exists only as the destination
# of one or more edge
id = str(chr(A + len(maze)))
dot.node(id, "End (%s)" % id)
#print(edges)
dot.edges(edges)
#print(dot.source)
dot.render(file_name, view=True)
def are_equal_mazes(left: List[List[int]], right: List[List[int]],
left_start: int = 0, right_start: int = 0) -> bool:
"""
Test whether two mazes are the same. The nodes may not be in the same
order, and the edges may be rotated, but the topology should be the same.
Handle negative nodes as a wildcard, matching anything.
"""
return are_nodes_equal(left, right, left_start, right_start, -1, -1, set())
def are_nodes_equal(
left: List[List[int]],
right: List[List[int]],
left_node: int,
right_node: int,
left_back: int,
right_back: int,
already_checked: Set[Tuple[int, int]]) -> bool:
#print("are_nodes_equal(%i, %i, %i, %i)"
# % (left_node, right_node, left_back, right_back))
# Treat negative nodes as wildcards, matching anything
if left_node < 0 or right_node < 0:
return True
# Only match nodes that are out of range if both are
left_ended = left_node >= len(left)
right_ended = right_node >= len(right)
if left_ended != right_ended:
#print("not equal: one of %i and %i is the end" % (left_node, right_node))
return False
elif left_ended:
return True
# Avoid recursing for ever if there are loops in the mazes
if (left_node, right_node) in already_checked:
return True
already_checked.add((left_node, right_node))
# Got two real nodes. Make sure they have the same number of edges
left_edges = left[left_node]
right_edges = right[right_node]
edge_count = len(left_edges)
if edge_count != len(right_edges):
#print("not equal: %i has %i edges and %i has %i" % (left_node, len(left_edges), right_node, len(right_edges)))
return False
# May both be empty (unlikely, as this would make a very trivial maze)
if not left_edges:
return True
# We rely on the back pointer to tell us the relative rotation.
if left_back >= 0 and left_back in left_edges and right_back >= 0 and right_back in right_edges:
left_index = left_edges.index(left_back)
right_index = right_edges.index(right_back)
rotation = right_index - left_index
return are_edges_equal(left_edges, right_edges, right, left,
left_node, right_node, rotation, already_checked)
# if no back-pointer defined, just try all the possibilities
else:
for r in range(edge_count):
if are_edges_equal(left_edges, right_edges, right, left,
left_node, right_node, r, already_checked):
return True
#print("not equal: no possible rotation of %i and %i works" % (left_node, right_node))
return False
def are_edges_equal(
left_edges: List[int],
right_edges: List[int],
left: List[List[int]],
right: List[List[int]],
left_node: int,
right_node: int,
rotation: int,
already_checked: Set[Tuple[int, int]]) -> bool:
#print("are_edges_equal(%s, %s, %i, %i, %i)"
# % (left_edges, right_edges, left_node, right_node, rotation))
edge_count = len(left_edges)
assert(edge_count == len(right_edges))
for i, left_edge in enumerate(left_edges):
right_edge = right_edges[(i + rotation) % edge_count]
if not are_nodes_equal(right, left, left_edge, right_edge,
left_node, right_node, already_checked):
return False
return True
def test_fill_back_steps():
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
print("test_fill_back_steps: %s" % maze)
assert(maze.maze() == [[1, 3, 2], [2, 0], [3, 0, 1]])
def test_equal_mazes():
maze1 = SimpleMaze([[1, 3], [2], [0, 3]], True)
maze2 = SimpleMaze([[2, 3], [0, 3], [1]], True)
#print(maze1.maze())
#print(maze2.maze())
assert(are_equal_mazes(maze1.maze(), maze2.maze()))
print("test_equal_mazes succeeded")
def test_unequal_mazes():
maze1 = SimpleMaze([[1, 3, 2], [2, 0], [0, 3, 1]], False)
maze2 = SimpleMaze([[2, 3, 1], [3, 0, 2], [1, 0]], False)
#print(maze1.maze())
#print(maze2.maze())
assert(not are_equal_mazes(maze1.maze(), maze2.maze()))
print("test_unequal_mazes succeeded")
def test_left_rat():
rat = AlwaysLeftRat()
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
MAX_ITER = 10
iter = maze.solve(rat, MAX_ITER)
print("test_left_rat solved in %i iterations" % iter)
assert(iter > 0 and iter <= MAX_ITER)
def test_left_rat_fail():
rat = AlwaysLeftRat()
# this maze has a loop in it (0 -> 1 -> 2 -> 0)
maze = SimpleMaze([[1, 3], [2], [0, 3]], True)
MAX_ITER = 10
iter = maze.solve(rat, MAX_ITER)
print("test_left_rat_fail timed out as desired after %i iterations" % iter)
assert(iter > MAX_ITER)
def test_random_rat():
rat = RandomRat()
maze = SimpleMaze([[1, 3], [2], [3, 0]], True)
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_rat solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_big_maze():
rat = RandomRat()
maze = SimpleMaze([[5, 3], [6], [5, 3, 17, 14, 13, 20],
[2, 0, 4, 14, 13, 5, 17, 12], [7, 3], [0, 14, 9, 2, 6, 3],
[5, 13, 1], [8, 4, 19, 10], [14, 7], [14, 5, 17], [7, 13],
[15, 16], [3, 15], [6, 17, 10, 3, 16, 2], [5, 9, 2, 8, 3, 19],
[12, 11, 18], [11, 13], [13, 2, 9, 3], [15], [14, 7]], False)
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_big_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_maze():
maze = SimpleMaze(random_maze(0.5, NonLocalLocalizer(25)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_noloop_maze():
maze = SimpleMaze(random_maze(0.0, NonLocalLocalizer(25)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_noloop_maze")
rat = AlwaysLeftRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_noloop_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_1d_maze():
maze = SimpleMaze(random_maze(0.5, OneDimensionalLocalizer(25, 5)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_1d_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_1d_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
def test_random_2d_maze():
maze = SimpleMaze(random_maze(0.1, TwoDimensionalOneStepLocalizer(25, 5)), False)
#print(maze)
render_graph(maze.maze(), "temp/random_2d_maze")
rat = RandomRat()
MAX_ITER = 1000
iter = maze.solve(rat, MAX_ITER)
print("test_random_2d_maze solved in %i iterations" % iter)
assert(iter > 0 and iter < MAX_ITER)
if __name__ == "__main__":
test_fill_back_steps()
test_equal_mazes()
test_unequal_mazes()
test_left_rat()
test_left_rat_fail()
test_random_rat()
test_big_maze()
test_random_maze()
test_random_noloop_maze()
test_random_1d_maze()
test_random_2d_maze()
|
[
"Localizer.TwoDimensionalOneStepLocalizer",
"Localizer.OneDimensionalLocalizer",
"SimpleRats.RandomRat",
"random.shuffle",
"Localizer.NonLocalLocalizer",
"SimpleRats.AlwaysLeftRat",
"random.random",
"graphviz.Graph"
] |
[((8627, 8634), 'graphviz.Graph', 'Graph', ([], {}), '()\n', (8632, 8634), False, 'from graphviz import Graph\n'), ((13970, 13985), 'SimpleRats.AlwaysLeftRat', 'AlwaysLeftRat', ([], {}), '()\n', (13983, 13985), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((14237, 14252), 'SimpleRats.AlwaysLeftRat', 'AlwaysLeftRat', ([], {}), '()\n', (14250, 14252), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((14562, 14573), 'SimpleRats.RandomRat', 'RandomRat', ([], {}), '()\n', (14571, 14573), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((14823, 14834), 'SimpleRats.RandomRat', 'RandomRat', ([], {}), '()\n', (14832, 14834), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((15516, 15527), 'SimpleRats.RandomRat', 'RandomRat', ([], {}), '()\n', (15525, 15527), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((15883, 15898), 'SimpleRats.AlwaysLeftRat', 'AlwaysLeftRat', ([], {}), '()\n', (15896, 15898), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((16262, 16273), 'SimpleRats.RandomRat', 'RandomRat', ([], {}), '()\n', (16271, 16273), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((16640, 16651), 'SimpleRats.RandomRat', 'RandomRat', ([], {}), '()\n', (16649, 16651), False, 'from SimpleRats import AlwaysLeftRat, RandomRat\n'), ((6868, 6881), 'random.shuffle', 'shuffle', (['node'], {}), '(node)\n', (6875, 6881), False, 'from random import randrange, shuffle, random\n'), ((15405, 15426), 'Localizer.NonLocalLocalizer', 'NonLocalLocalizer', (['(25)'], {}), '(25)\n', (15422, 15426), False, 'from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer\n'), ((15765, 15786), 'Localizer.NonLocalLocalizer', 'NonLocalLocalizer', (['(25)'], {}), '(25)\n', (15782, 15786), False, 'from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer\n'), ((16139, 16169), 'Localizer.OneDimensionalLocalizer', 'OneDimensionalLocalizer', (['(25)', '(5)'], {}), '(25, 5)\n', (16162, 16169), False, 'from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer\n'), ((16510, 16547), 'Localizer.TwoDimensionalOneStepLocalizer', 'TwoDimensionalOneStepLocalizer', (['(25)', '(5)'], {}), '(25, 5)\n', (16540, 16547), False, 'from Localizer import Localizer, NonLocalLocalizer, OneDimensionalLocalizer, TwoDimensionalOneStepLocalizer\n'), ((7967, 7975), 'random.random', 'random', ([], {}), '()\n', (7973, 7975), False, 'from random import randrange, shuffle, random\n')]
|
import os
#from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#from tensorflow.keras.models import Sequential
#from tensorflow.keras.layers import Activation, Dropout, Flatten, Dense
#from tensorflow.keras import optimizers
#from tensorflow.keras.utils
import np_utils
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D,Input
import tensorflow.keras.callbacks
from tensorflow.keras.optimizers import SGD
import numpy as np
#from smallcnn import save_history
"""
classes = ['Tulip', 'Snowdrop', 'LilyValley', 'Bluebell', 'Crocus',
'Iris', 'Tigerlily', 'Daffodil', 'Fritillary', 'Sunflower',
'Daisy', 'ColtsFoot', 'Dandelion', 'Cowslip', 'Buttercup',
'Windflower', 'Pansy']
"""
classes = ['Dog', 'Cat', 'Raccoon', 'Macaque']
#IMAGE_SIZE = 150
BATCH_SIZE = 32
#1バッチの画像数
NUM_TRAINING_SAMPLES = 4000
#トレーニング画像の総枚数
NUM_VALIDATION_SAMPLES = 1000
#テストデータ画像の総枚数
EPOCHS = 50
#エポック数
N_CLASSES = len(classes)
#クラス数
IMG_ROWS, IMG_COLS = 150, 150
#画像の大きさ
CHANNELS = 3
#画像のチャンネル数(RGBなので3)
train_data_dir = 'data/train'
#トレーニングデータのディレクトリ
validation_data_dir = 'data/validation'
#テストデータのディレクトリ
result_dir = 'results'
#リザルトのディレクトリ
if not os.path.exists(result_dir):
os.mkdir(result_dir)
def save_history(history, result_file):
loss = history.history['loss']
acc = history.history['acc']
val_loss = history.history['val_loss']
val_acc = history.history['val_acc']
nb_epoch = len(acc)
with open(result_file, "w") as fp:
fp.write("epoch\tloss\tacc\tval_loss\tval_acc\n")
for i in range(nb_epoch):
fp.write("%d\t%f\t%f\t%f\t%f\n" % (i, loss[i], acc[i], val_loss[i], val_acc[i]))
def train_top_model():
#InceptionV3のボトルネック特徴量を入力とし、正解クラスを出力とするFCNNを作成する
input_tensor = Input(shape=(IMG_ROWS, IMG_COLS, CHANNELS))
#入力テンソル(画像の縦横ピクセルとRGBチャンネルによる3階テンソル)
base_model = InceptionV3(weights='imagenet', include_top=False,input_tensor=input_tensor)
x = base_model.output
x = GlobalAveragePooling2D()(x)
#出力テンソルをflatten
x = Dense(1024, activation='relu')(x)
#全結合,ノード数1024,活性化関数relu
predictions = Dense(N_CLASSES, activation='softmax')(x)
model = Model(inputs=base_model.input, outputs=predictions)
for layer in base_model.layers:
layer.trainable = False
model.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy',metrics=['accuracy'])
model.summary()
train_datagen = ImageDataGenerator(rescale=1.0 / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
rotation_range=10
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
train_generator = train_datagen.flow_from_directory(directory=train_data_dir,
target_size=(IMG_ROWS, IMG_COLS),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
validation_generator = test_datagen.flow_from_directory(directory=validation_data_dir,
target_size=(IMG_ROWS, IMG_COLS),
batch_size=BATCH_SIZE,
class_mode='categorical',
shuffle=True
)
hist = model.fit_generator(generator=train_generator,
steps_per_epoch=NUM_TRAINING_SAMPLES//BATCH_SIZE,
epochs=EPOCHS,
verbose=1,
validation_data=validation_generator,
validation_steps=NUM_VALIDATION_SAMPLES//BATCH_SIZE,
)
#model.save('vermins_fc_model.hdf5')
model.save(os.path.join(result_dir, 'vermins_fc_model.h5'))
save_history(hist, os.path.join(result_dir, 'history_extractor.txt'))
if __name__ == '__main__':
#save_bottleneck_features()
train_top_model()
|
[
"os.mkdir",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.keras.optimizers.SGD",
"os.path.exists",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Input",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"os.path.join"
] |
[((1356, 1382), 'os.path.exists', 'os.path.exists', (['result_dir'], {}), '(result_dir)\n', (1370, 1382), False, 'import os\n'), ((1388, 1408), 'os.mkdir', 'os.mkdir', (['result_dir'], {}), '(result_dir)\n', (1396, 1408), False, 'import os\n'), ((1947, 1990), 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(IMG_ROWS, IMG_COLS, CHANNELS)'}), '(shape=(IMG_ROWS, IMG_COLS, CHANNELS))\n', (1952, 1990), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input\n'), ((2049, 2126), 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_tensor': 'input_tensor'}), "(weights='imagenet', include_top=False, input_tensor=input_tensor)\n", (2060, 2126), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3\n'), ((2350, 2401), 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'base_model.input', 'outputs': 'predictions'}), '(inputs=base_model.input, outputs=predictions)\n', (2355, 2401), False, 'from tensorflow.keras.models import Model\n'), ((2636, 2751), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)', 'rotation_range': '(10)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True, rotation_range=10)\n', (2654, 2751), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2963, 3000), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (2981, 3000), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), ((2160, 2184), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), '()\n', (2182, 2184), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input\n'), ((2216, 2246), 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (2221, 2246), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input\n'), ((2296, 2334), 'tensorflow.keras.layers.Dense', 'Dense', (['N_CLASSES'], {'activation': '"""softmax"""'}), "(N_CLASSES, activation='softmax')\n", (2301, 2334), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Input\n'), ((4424, 4471), 'os.path.join', 'os.path.join', (['result_dir', '"""vermins_fc_model.h5"""'], {}), "(result_dir, 'vermins_fc_model.h5')\n", (4436, 4471), False, 'import os\n'), ((4496, 4545), 'os.path.join', 'os.path.join', (['result_dir', '"""history_extractor.txt"""'], {}), "(result_dir, 'history_extractor.txt')\n", (4508, 4545), False, 'import os\n'), ((2512, 2540), 'tensorflow.keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), '(lr=0.0001, momentum=0.9)\n', (2515, 2540), False, 'from tensorflow.keras.optimizers import SGD\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: pyatv/protocols/mrp/protobuf/AudioFadeResponseMessage.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n;pyatv/protocols/mrp/protobuf/AudioFadeResponseMessage.proto\x1a\x32pyatv/protocols/mrp/protobuf/ProtocolMessage.proto\"0\n\x18\x41udioFadeResponseMessage\x12\x14\n\x0c\x66\x61\x64\x65\x44uration\x18\x01 \x01(\x03:M\n\x18\x61udioFadeResponseMessage\x12\x10.ProtocolMessage\x18Y \x01(\x0b\x32\x19.AudioFadeResponseMessage')
AUDIOFADERESPONSEMESSAGE_FIELD_NUMBER = 89
audioFadeResponseMessage = DESCRIPTOR.extensions_by_name['audioFadeResponseMessage']
_AUDIOFADERESPONSEMESSAGE = DESCRIPTOR.message_types_by_name['AudioFadeResponseMessage']
AudioFadeResponseMessage = _reflection.GeneratedProtocolMessageType('AudioFadeResponseMessage', (_message.Message,), {
'DESCRIPTOR' : _AUDIOFADERESPONSEMESSAGE,
'__module__' : 'pyatv.protocols.mrp.protobuf.AudioFadeResponseMessage_pb2'
# @@protoc_insertion_point(class_scope:AudioFadeResponseMessage)
})
_sym_db.RegisterMessage(AudioFadeResponseMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension(audioFadeResponseMessage)
DESCRIPTOR._options = None
_AUDIOFADERESPONSEMESSAGE._serialized_start=115
_AUDIOFADERESPONSEMESSAGE._serialized_end=163
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage.RegisterExtension",
"google.protobuf.descriptor_pool.Default",
"google.protobuf.reflection.GeneratedProtocolMessageType"
] |
[((522, 548), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (546, 548), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((1315, 1531), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""AudioFadeResponseMessage"""', '(_message.Message,)', "{'DESCRIPTOR': _AUDIOFADERESPONSEMESSAGE, '__module__':\n 'pyatv.protocols.mrp.protobuf.AudioFadeResponseMessage_pb2'}"], {}), "('AudioFadeResponseMessage', (\n _message.Message,), {'DESCRIPTOR': _AUDIOFADERESPONSEMESSAGE,\n '__module__': 'pyatv.protocols.mrp.protobuf.AudioFadeResponseMessage_pb2'})\n", (1355, 1531), True, 'from google.protobuf import reflection as _reflection\n'), ((1697, 1827), 'pyatv.protocols.mrp.protobuf.ProtocolMessage_pb2.ProtocolMessage.RegisterExtension', 'pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2.ProtocolMessage.RegisterExtension', (['audioFadeResponseMessage'], {}), '(\n audioFadeResponseMessage)\n', (1796, 1827), True, 'from pyatv.protocols.mrp.protobuf import ProtocolMessage_pb2 as pyatv_dot_protocols_dot_mrp_dot_protobuf_dot_ProtocolMessage__pb2\n'), ((696, 722), 'google.protobuf.descriptor_pool.Default', '_descriptor_pool.Default', ([], {}), '()\n', (720, 722), True, 'from google.protobuf import descriptor_pool as _descriptor_pool\n')]
|
#!/usr/bin/env python3
#
# Copyright 2018-2020 Internet Corporation for Assigned Names and Numbers.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, you can obtain one at https://mozilla.org/MPL/2.0/.
#
# Developed by Sinodun IT (sinodun.com)
#
# Find the node ID given server and node names.
#
# Usage: dsv-find-node-id <server-name> <node-name>
#
import psycopg2
description = 'find node ID from Postgres.'
def add_args(parser):
parser.add_argument('servername',
help='the server name',
metavar='SERVERNAME')
parser.add_argument('nodename',
help='the node name',
metavar='NODENAME')
def main(args, cfg):
conn = None
try:
pgcfg = cfg['postgres']
conn = psycopg2.connect(host=pgcfg['host'],
dbname=pgcfg['database'],
user=pgcfg['user'],
password=pgcfg['password'])
with conn.cursor() as cur:
cur.execute('SELECT node.id FROM node '
'INNER JOIN node_server ON node_server.id = node.server_id '
'WHERE (node_server.name=%(server)s OR '
' node_server.altname=%(server)s) '
'AND (node.name=%(node)s OR node.altname=%(node)s)',
{'server': args.servername, 'node': args.nodename})
res = cur.fetchone()
conn.close()
if res:
print(res[0])
return 0
return 1
except Exception:
if conn is not None:
conn.rollback()
conn.close()
raise
|
[
"psycopg2.connect"
] |
[((888, 1003), 'psycopg2.connect', 'psycopg2.connect', ([], {'host': "pgcfg['host']", 'dbname': "pgcfg['database']", 'user': "pgcfg['user']", 'password': "pgcfg['password']"}), "(host=pgcfg['host'], dbname=pgcfg['database'], user=pgcfg[\n 'user'], password=pgcfg['password'])\n", (904, 1003), False, 'import psycopg2\n')]
|
from gksdudaovld import KoEngMapper as Mapper
from SimpleTester import SimpleTester
tester = SimpleTester("./test_en2ko.txt", Mapper.conv_en2ko)
print(tester.start().log)
# tester = SimpleTester("./test_ko2en.txt", Mapper.conv_ko2en)
# print(tester.start().log)
|
[
"SimpleTester.SimpleTester"
] |
[((94, 145), 'SimpleTester.SimpleTester', 'SimpleTester', (['"""./test_en2ko.txt"""', 'Mapper.conv_en2ko'], {}), "('./test_en2ko.txt', Mapper.conv_en2ko)\n", (106, 145), False, 'from SimpleTester import SimpleTester\n')]
|
"""Test the Nanoleaf config flow."""
from unittest.mock import patch
from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable
from homeassistant import config_entries
from homeassistant.components.nanoleaf.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_TOKEN
from homeassistant.core import HomeAssistant
TEST_NAME = "Canvas ADF9"
TEST_HOST = "192.168.0.100"
TEST_TOKEN = "<KEY>"
TEST_OTHER_TOKEN = "<KEY>"
TEST_DEVICE_ID = "5E:2E:EA:XX:XX:XX"
TEST_OTHER_DEVICE_ID = "5E:2E:EA:YY:YY:YY"
async def test_user_unavailable_user_step(hass: HomeAssistant) -> None:
"""Test we handle Unavailable errors when host is not available in user step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "cannot_connect"}
assert not result2["last_step"]
async def test_user_unavailable_link_step(hass: HomeAssistant) -> None:
"""Test we abort if the device becomes unavailable in the link step."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect"
async def test_user_unavailable_setup_finish(hass: HomeAssistant) -> None:
"""Test we abort if the device becomes unavailable during setup_finish."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=Unavailable("message"),
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result3["type"] == "abort"
assert result3["reason"] == "cannot_connect"
async def test_user_not_authorizing_new_tokens(hass: HomeAssistant) -> None:
"""Test we handle NotAuthorizingNewTokens errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
assert not result["last_step"]
assert result["step_id"] == "user"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=NotAuthorizingNewTokens("message"),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["errors"] is None
assert result2["step_id"] == "link"
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
)
assert result3["type"] == "form"
assert result3["errors"] is None
assert result3["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=NotAuthorizingNewTokens("message"),
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result4["type"] == "form"
assert result4["step_id"] == "link"
assert result4["errors"] == {"base": "not_allowing_new_tokens"}
async def test_user_exception(hass: HomeAssistant) -> None:
"""Test we handle Exception errors."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result2["type"] == "form"
assert result2["step_id"] == "user"
assert result2["errors"] == {"base": "unknown"}
assert not result2["last_step"]
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: TEST_HOST,
},
)
assert result3["step_id"] == "link"
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Exception,
):
result4 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result4["type"] == "form"
assert result4["step_id"] == "link"
assert result4["errors"] == {"base": "unknown"}
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
return_value=None,
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=Exception,
):
result5 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result5["type"] == "abort"
assert result5["reason"] == "unknown"
async def test_zeroconf_discovery(hass: HomeAssistant) -> None:
"""Test zeroconfig discovery flow init."""
zeroconf = "_nanoleafms._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{zeroconf}",
"type": zeroconf,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
async def test_homekit_discovery_link_unavailable(
hass: HomeAssistant,
) -> None:
"""Test homekit discovery and abort if device is unavailable."""
homekit = "_hap._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={},
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{homekit}",
"type": homekit,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "form"
assert result["step_id"] == "link"
context = next(
flow["context"]
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
assert context["title_placeholders"] == {"name": TEST_NAME}
assert context["unique_id"] == TEST_NAME
with patch(
"homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize",
side_effect=Unavailable("message"),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
async def test_import_config(hass: HomeAssistant) -> None:
"""Test configuration import."""
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_config_invalid_token(hass: HomeAssistant) -> None:
"""Test configuration import with invalid token."""
with patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
side_effect=InvalidToken("message"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: TEST_HOST, CONF_TOKEN: TEST_TOKEN},
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_token"
async def test_import_last_discovery_integration_host_zeroconf(
hass: HomeAssistant,
) -> None:
"""
Test discovery integration import from < 2021.4 (host) with zeroconf.
Device is last in Nanoleaf config file.
"""
zeroconf = "_nanoleafapi._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={TEST_HOST: {"token": TEST_TOKEN}},
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.os.remove",
return_value=None,
) as mock_remove, patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{zeroconf}",
"type": zeroconf,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
mock_remove.assert_called_once()
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
async def test_import_not_last_discovery_integration_device_id_homekit(
hass: HomeAssistant,
) -> None:
"""
Test discovery integration import from >= 2021.4 (device_id) with homekit.
Device is not the only one in the Nanoleaf config file.
"""
homekit = "_hap._tcp.local"
with patch(
"homeassistant.components.nanoleaf.config_flow.load_json",
return_value={
TEST_DEVICE_ID: {"token": TEST_TOKEN},
TEST_OTHER_DEVICE_ID: {"token": TEST_OTHER_TOKEN},
},
), patch(
"homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info",
return_value={"name": TEST_NAME},
), patch(
"homeassistant.components.nanoleaf.config_flow.save_json",
return_value=None,
) as mock_save_json, patch(
"homeassistant.components.nanoleaf.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_HOMEKIT},
data={
"host": TEST_HOST,
"name": f"{TEST_NAME}.{homekit}",
"type": homekit,
"properties": {"id": TEST_DEVICE_ID},
},
)
assert result["type"] == "create_entry"
assert result["title"] == TEST_NAME
assert result["data"] == {
CONF_HOST: TEST_HOST,
CONF_TOKEN: TEST_TOKEN,
}
mock_save_json.assert_called_once()
await hass.async_block_till_done()
assert len(mock_setup_entry.mock_calls) == 1
|
[
"unittest.mock.patch",
"pynanoleaf.Unavailable",
"pynanoleaf.NotAuthorizingNewTokens",
"pynanoleaf.InvalidToken"
] |
[((1584, 1680), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n return_value=None)\n", (1589, 1680), False, 'from unittest.mock import patch\n'), ((2596, 2692), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n return_value=None)\n", (2601, 2692), False, 'from unittest.mock import patch\n'), ((2974, 3070), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n return_value=None)\n", (2979, 3070), False, 'from unittest.mock import patch\n'), ((5160, 5260), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'side_effect': 'Exception'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n side_effect=Exception)\n", (5165, 5260), False, 'from unittest.mock import patch\n'), ((5630, 5726), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n return_value=None)\n", (5635, 5726), False, 'from unittest.mock import patch\n'), ((5971, 6071), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'side_effect': 'Exception'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n side_effect=Exception)\n", (5976, 6071), False, 'from unittest.mock import patch\n'), ((6354, 6450), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.Nanoleaf.authorize',\n return_value=None)\n", (6359, 6450), False, 'from unittest.mock import patch\n'), ((6471, 6572), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'side_effect': 'Exception'}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n side_effect=Exception)\n", (6476, 6572), False, 'from unittest.mock import patch\n'), ((6958, 7070), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'return_value': "{'name': TEST_NAME}"}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n return_value={'name': TEST_NAME})\n", (6963, 7070), False, 'from unittest.mock import patch\n'), ((7091, 7176), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.load_json"""'], {'return_value': '{}'}), "('homeassistant.components.nanoleaf.config_flow.load_json',\n return_value={})\n", (7096, 7176), False, 'from unittest.mock import patch\n'), ((7833, 7945), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'return_value': "{'name': TEST_NAME}"}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n return_value={'name': TEST_NAME})\n", (7838, 7945), False, 'from unittest.mock import patch\n'), ((7966, 8051), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.load_json"""'], {'return_value': '{}'}), "('homeassistant.components.nanoleaf.config_flow.load_json',\n return_value={})\n", (7971, 8051), False, 'from unittest.mock import patch\n'), ((9199, 9311), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'return_value': "{'name': TEST_NAME}"}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n return_value={'name': TEST_NAME})\n", (9204, 9311), False, 'from unittest.mock import patch\n'), ((9332, 9411), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.nanoleaf.async_setup_entry', return_value=True)\n", (9337, 9411), False, 'from unittest.mock import patch\n'), ((10808, 10925), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.load_json"""'], {'return_value': "{TEST_HOST: {'token': TEST_TOKEN}}"}), "('homeassistant.components.nanoleaf.config_flow.load_json',\n return_value={TEST_HOST: {'token': TEST_TOKEN}})\n", (10813, 10925), False, 'from unittest.mock import patch\n'), ((10946, 11058), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'return_value': "{'name': TEST_NAME}"}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n return_value={'name': TEST_NAME})\n", (10951, 11058), False, 'from unittest.mock import patch\n'), ((11079, 11166), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.os.remove"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.os.remove',\n return_value=None)\n", (11084, 11166), False, 'from unittest.mock import patch\n'), ((11202, 11281), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.nanoleaf.async_setup_entry', return_value=True)\n", (11207, 11281), False, 'from unittest.mock import patch\n'), ((12304, 12481), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.load_json"""'], {'return_value': "{TEST_DEVICE_ID: {'token': TEST_TOKEN}, TEST_OTHER_DEVICE_ID: {'token':\n TEST_OTHER_TOKEN}}"}), "('homeassistant.components.nanoleaf.config_flow.load_json',\n return_value={TEST_DEVICE_ID: {'token': TEST_TOKEN},\n TEST_OTHER_DEVICE_ID: {'token': TEST_OTHER_TOKEN}})\n", (12309, 12481), False, 'from unittest.mock import patch\n'), ((12533, 12645), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info"""'], {'return_value': "{'name': TEST_NAME}"}), "('homeassistant.components.nanoleaf.config_flow.pynanoleaf_get_info',\n return_value={'name': TEST_NAME})\n", (12538, 12645), False, 'from unittest.mock import patch\n'), ((12666, 12753), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.config_flow.save_json"""'], {'return_value': 'None'}), "('homeassistant.components.nanoleaf.config_flow.save_json',\n return_value=None)\n", (12671, 12753), False, 'from unittest.mock import patch\n'), ((12792, 12871), 'unittest.mock.patch', 'patch', (['"""homeassistant.components.nanoleaf.async_setup_entry"""'], {'return_value': '(True)'}), "('homeassistant.components.nanoleaf.async_setup_entry', return_value=True)\n", (12797, 12871), False, 'from unittest.mock import patch\n'), ((923, 945), 'pynanoleaf.Unavailable', 'Unavailable', (['"""message"""'], {}), "('message')\n", (934, 945), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((2065, 2087), 'pynanoleaf.Unavailable', 'Unavailable', (['"""message"""'], {}), "('message')\n", (2076, 2087), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((3195, 3217), 'pynanoleaf.Unavailable', 'Unavailable', (['"""message"""'], {}), "('message')\n", (3206, 3217), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((3956, 3990), 'pynanoleaf.NotAuthorizingNewTokens', 'NotAuthorizingNewTokens', (['"""message"""'], {}), "('message')\n", (3979, 3990), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((4610, 4644), 'pynanoleaf.NotAuthorizingNewTokens', 'NotAuthorizingNewTokens', (['"""message"""'], {}), "('message')\n", (4633, 4644), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((8889, 8911), 'pynanoleaf.Unavailable', 'Unavailable', (['"""message"""'], {}), "('message')\n", (8900, 8911), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n'), ((10188, 10211), 'pynanoleaf.InvalidToken', 'InvalidToken', (['"""message"""'], {}), "('message')\n", (10200, 10211), False, 'from pynanoleaf import InvalidToken, NotAuthorizingNewTokens, Unavailable\n')]
|
import urllib
from bs4 import BeautifulSoup
from urllib import request
page = urllib.request.urlopen('https://www.python.org')
st = page.read()
soup = BeautifulSoup(st, 'html.parser')
print("Text in the first a tag:")
li = soup.find_all("li")
for i in li:
a = i.find('a')
print(a.attrs['href'])
|
[
"bs4.BeautifulSoup",
"urllib.request.urlopen"
] |
[((80, 128), 'urllib.request.urlopen', 'urllib.request.urlopen', (['"""https://www.python.org"""'], {}), "('https://www.python.org')\n", (102, 128), False, 'import urllib\n'), ((153, 185), 'bs4.BeautifulSoup', 'BeautifulSoup', (['st', '"""html.parser"""'], {}), "(st, 'html.parser')\n", (166, 185), False, 'from bs4 import BeautifulSoup\n')]
|
#!/usr/bin/env python3
import os
import sys
import unittest
from ietf.cmd.mirror import assemble_rsync
class TestAssembleRsync(unittest.TestCase):
boilerplate = ['rsync', '-az', '--delete-during']
rsync_no_path = (('charter', boilerplate +
['ietf.org::everything-ftp/ietf/']),
('conflict', boilerplate +
['rsync.ietf.org::everything-ftp/conflict-reviews/']),
('draft', boilerplate +
["--exclude='*.xml'",
"--exclude='*.pdf'",
'rsync.ietf.org::internet-drafts']),
('iana', boilerplate +
['rsync.ietf.org::everything-ftp/iana/']),
('iesg', boilerplate + ['rsync.ietf.org::iesg-minutes/']),
('rfc', boilerplate +
["--exclude='tar*'",
"--exclude='search*'",
"--exclude='PDF-RFC*'",
"--exclude='tst/'",
"--exclude='pdfrfc/'",
"--exclude='internet-drafts/'",
"--exclude='ien/'",
'ftp.rfc-editor.org::everything-ftp/in-notes/']),
('status', boilerplate +
['rsync.ietf.org::everything-ftp/status-changes/']))
def test_assemble_rsync(self):
test_path = '/sample/path'
for doc_type, cmd_array in self.rsync_no_path:
expected_path = test_path + '/' + doc_type
expected_cmd = cmd_array + [expected_path]
returned_cmd, returned_path = assemble_rsync(doc_type, test_path,
False)
self.assertEqual(expected_cmd, returned_cmd)
self.assertEqual(expected_path, returned_path)
def test_assemble_rsync_flat(self):
expected_path = '/sample/path'
for doc_type, cmd_array in self.rsync_no_path:
expected_cmd = cmd_array + [expected_path]
returned_cmd, returned_path = assemble_rsync(doc_type,
expected_path, True)
self.assertEqual(expected_cmd, returned_cmd)
self.assertEqual(expected_path, returned_path)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"ietf.cmd.mirror.assemble_rsync"
] |
[((2361, 2376), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2374, 2376), False, 'import unittest\n'), ((1657, 1699), 'ietf.cmd.mirror.assemble_rsync', 'assemble_rsync', (['doc_type', 'test_path', '(False)'], {}), '(doc_type, test_path, False)\n', (1671, 1699), False, 'from ietf.cmd.mirror import assemble_rsync\n'), ((2108, 2153), 'ietf.cmd.mirror.assemble_rsync', 'assemble_rsync', (['doc_type', 'expected_path', '(True)'], {}), '(doc_type, expected_path, True)\n', (2122, 2153), False, 'from ietf.cmd.mirror import assemble_rsync\n')]
|
# Subscribers are created with ZMQ.SUB socket types.
# A zmq subscriber can connect to many publishers.
import sys
import zmq
import base64
import simplejson as json
port = "5563"
if len(sys.argv) > 1:
port = sys.argv[1]
int(port)
# Socket to talk to server
context = zmq.Context()
socket = context.socket(zmq.SUB)
print("Collecting updates from server...")
#socket.connect("tcp://localhost:%s" % port)
socket.connect("tcp://127.0.0.1:%s" % port)
# Subscribes to all topics you can selectively create multiple workers
# that would be responsible for reading from one or more predefined topics
# if you have used AWS SNS this is a simliar concept
socket.subscribe("")
while True:
# Receives a string format message
print(socket.recv())
#data = socket.recv()
#dumps the json object into an element
#json_str = json.dumps(data)
#load the json to a string
#resp = json.loads(json_str)
#print the resp
#print (resp)
#print(resp["payload"])
#extract an element in the response
#print(resp['payload'])
#dataEncoded = base64.b64decode(socket.recv())
#print(dataEncoded)
# extract an element in the response
#print (dataEncoded['name'])
#print (dataEncoded['value'])
|
[
"zmq.Context"
] |
[((279, 292), 'zmq.Context', 'zmq.Context', ([], {}), '()\n', (290, 292), False, 'import zmq\n')]
|
import numpy as np
import itertools
from scintillations.stream import modulate as apply_turbulence
from scintillations.stream import transverse_speed
from streaming.stream import Stream, BlockStream
from streaming.signal import *
import streaming.signal
import logging
from acoustics.signal import impulse_response_real_even
import auraliser.tools
logger = auraliser.tools.create_logger(__name__)
def apply_atmospheric_attenuation(signal, fs, distance, nhop, atmosphere, ntaps, inverse=False, distance_reducer=np.mean):
"""Apply atmospheric attenuation to signal.
:param distance: Iterable with distances.
:param fs: Sample frequency.
:param atmosphere: Atmosphere.
:param ntaps: Amount of filter taps.
:param sign: Sign.
:rtype: :class:`streaming.Stream`
Compute and apply the attenuation due to atmospheric absorption.
The attenuation can change with distance. The attenuation is a magnitude-only filter.
We design a linear-phase filter
.. note:: The filter delay is compensated by dropping the first `ntaps//2` samples.
"""
# Partition `distance` into blocks, and reduce with `distance_reducer`.
distance = distance.blocks(nhop).map(distance_reducer)
ir = Stream(atmosphere.impulse_response(d, fs, ntaps=ntaps, inverse=inverse) for d in distance)
signal = convolve_overlap_save(signal, ir, nhop, ntaps)
signal = signal.samples().drop(int(ntaps//2)) # Linear phase, correct for group delay caused by FIR filter.
return signal
def apply_reflection_strength(emission, nhop, spectra, effective, ntaps, force_hard):
"""Apply mirror source strength.
:param signal: Signal.
:param nblock: Amount of samples per block.
:param spectra: Spectrum per block.
:param effective: Whether the source is effective or not.
:param ntaps: Amount of filter taps.
:param force_hard: Whether to force a hard ground.
:returns: Signal with correct strength.
.. warning:: This operation will cause a delay that may vary over time.
"""
if effective is not None:
# We have an effectiveness value for each hop (which is a block of samples)
emission = BlockStream(map(lambda x,y: x *y, emission.blocks(nhop), effective), nblock=nhop)
if force_hard:
logger.info("apply_reflection_strength: Hard ground.")
else:
logger.info("apply_reflection_strength: Soft ground.")
impulse_responses = Stream(impulse_response_real_even(s, ntaps) for s in spectra)
emission = convolve_overlap_save(emission, impulse_responses, nhop, ntaps)
# Filter has a delay we need to correct for.
emission = emission.samples().drop(int(ntaps//2))
return emission
#def apply_ground_reflection(signal, ir, nblock):
#"""Apply ground reflection strength.
#:param signal: Signal before ground reflection strength is applied.
#:param ir: Impulse response per block.
#:param nblock: Amount of samples per block.
#:returns: Signal after reflection strength is applied.
#:type: :class:`streaming.BlockStream`
#"""
#signal = convolve(signal=signal, impulse_responses=ir, nblock=nblock)
def apply_doppler(signal, delay, fs, initial_value=0.0, inverse=False):
"""Apply Doppler shift.
:param signal: Signal before Doppler shift.
:param delay: Propagation delay.
:param fs: Constant sample frequency.
:returns: Doppler-shifted signal.
:rtype: :class:`streaming.Stream`
"""
if inverse:
delay = delay * -1 # Unary operators are not yet implemented in Stream
return vdl(signal, times(1./fs), delay, initial_value=initial_value)
def apply_spherical_spreading(signal, distance, inverse=False):#, nblock):
"""Apply spherical spreading.
:param signal. Signal. Iterable.
:param distance: Distance. Iterable.
:param nblock: Amount of samples in block.
"""
if inverse:
return signal * distance
else:
return signal / distance
#def undo_reflection(signal, nhop, impedance, angle, ntaps, force_hard):
#"""Undo reflection
#:param signal: Signal.
#:param nhop: Hop size.
#:param impedance: Fixed impedance.
#:param angle: Angle per hop.
#:param ntaps: Taps.
#:param force_hard: Whether to assume infinite impedance.
#"""
#if force_hard:
#tf =
#strength = Stream(reflection_factor_plane_wave(impedance, a) for a in angles.samples())
#tf = 1. / (1. + strength)
#impulse_responses = Stream(atmosphere.impulse_response(d, fs, ntaps=ntaps, inverse=inverse) for d in distance)
#signal = convolve_overlap_save(signal, impulse_responses, nhop, ntaps)
def nextpow2(x):
return int(2**np.ceil(np.log2(x)))
|
[
"numpy.log2",
"acoustics.signal.impulse_response_real_even"
] |
[((2442, 2478), 'acoustics.signal.impulse_response_real_even', 'impulse_response_real_even', (['s', 'ntaps'], {}), '(s, ntaps)\n', (2468, 2478), False, 'from acoustics.signal import impulse_response_real_even\n'), ((4702, 4712), 'numpy.log2', 'np.log2', (['x'], {}), '(x)\n', (4709, 4712), True, 'import numpy as np\n')]
|
import subprocess
import sys
import time
from pathlib import Path
import click
from .database import Job, db
from .lint import lint
from .pdf_reader import pdf_workers
from .utils import add_files
dir = Path(__file__).resolve().parent.parent
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
if ctx.invoked_subcommand is None:
click.echo("I was invoked without subcommand")
@cli.command()
@click.option("-c", "--count", default=3, help="Number of workers.")
@click.option("-d", "--debug", default=False, is_flag=True, help="Debug.")
@click.option("-f", "--files", default=0, help="Number of files for debug.")
def worker(count, debug, files):
if files != 0:
add_files([dir.joinpath("files", "ege2016rus.pdf")] * files)
pdf_workers(workers_count=count, debug=debug, files=files)
@cli.command()
@click.argument("files", nargs=-1, type=click.Path())
def add(files):
all_files = []
for file_path in files:
full_file_path = dir.joinpath(file_path)
if not full_file_path.is_file() or not full_file_path.exists():
continue
all_files.append(full_file_path)
add_files(all_files)
PROMPT = "❯"
CHAR_SLEEP = 0.05
def slowprint(command):
for char in command + "\n":
sys.stdout.write(char)
sys.stdout.flush()
time.sleep(CHAR_SLEEP)
def show(command, execute=True):
slowprint(command)
if execute:
start = time.time()
subprocess.call(["python3", "-m", *command.split()])
print(f"took {int(time.time() - start)}s", end="")
@cli.command()
def debug_worker_speed():
db.drop_tables([Job])
db.create_tables([Job])
show("exam_reader worker -c 2 -d -f 2")
@cli.command()
def debug_worker():
db.drop_tables([Job])
db.create_tables([Job])
show("exam_reader worker -c 2 -d")
@cli.command("lint")
def lint_command():
lint()
"""
termtosvg docs/source/static/debug_worker_speed.svg \
--command='python3 -m exam_reader debug-worker-speed' \
--screen-geometry=80x3
"""
|
[
"sys.stdout.write",
"click.option",
"click.echo",
"time.sleep",
"time.time",
"pathlib.Path",
"sys.stdout.flush",
"click.Path",
"click.group"
] |
[((248, 288), 'click.group', 'click.group', ([], {'invoke_without_command': '(True)'}), '(invoke_without_command=True)\n', (259, 288), False, 'import click\n'), ((435, 502), 'click.option', 'click.option', (['"""-c"""', '"""--count"""'], {'default': '(3)', 'help': '"""Number of workers."""'}), "('-c', '--count', default=3, help='Number of workers.')\n", (447, 502), False, 'import click\n'), ((504, 577), 'click.option', 'click.option', (['"""-d"""', '"""--debug"""'], {'default': '(False)', 'is_flag': '(True)', 'help': '"""Debug."""'}), "('-d', '--debug', default=False, is_flag=True, help='Debug.')\n", (516, 577), False, 'import click\n'), ((579, 654), 'click.option', 'click.option', (['"""-f"""', '"""--files"""'], {'default': '(0)', 'help': '"""Number of files for debug."""'}), "('-f', '--files', default=0, help='Number of files for debug.')\n", (591, 654), False, 'import click\n'), ((370, 416), 'click.echo', 'click.echo', (['"""I was invoked without subcommand"""'], {}), "('I was invoked without subcommand')\n", (380, 416), False, 'import click\n'), ((896, 908), 'click.Path', 'click.Path', ([], {}), '()\n', (906, 908), False, 'import click\n'), ((1280, 1302), 'sys.stdout.write', 'sys.stdout.write', (['char'], {}), '(char)\n', (1296, 1302), False, 'import sys\n'), ((1311, 1329), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1327, 1329), False, 'import sys\n'), ((1338, 1360), 'time.sleep', 'time.sleep', (['CHAR_SLEEP'], {}), '(CHAR_SLEEP)\n', (1348, 1360), False, 'import time\n'), ((1451, 1462), 'time.time', 'time.time', ([], {}), '()\n', (1460, 1462), False, 'import time\n'), ((206, 220), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (210, 220), False, 'from pathlib import Path\n'), ((1550, 1561), 'time.time', 'time.time', ([], {}), '()\n', (1559, 1561), False, 'import time\n')]
|
from setuptools import setup, find_packages
import sys
import os.path
# Must be one line or PyPI will cut it off
DESC = ("A colormap tool")
LONG_DESC = open("README.rst").read()
setup(
name="viscm",
version="0.10.0",
description=DESC,
long_description=LONG_DESC,
author="<NAME>, <NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>, <EMAIL>",
url="https://github.com/1313e/viscm",
license="MIT",
classifiers =
[ "Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
],
packages=find_packages(),
install_requires=["colorspacious>=1.1.0",
"matplotlib>=3.2.0",
"numpy>=1.8",
"pyqt5==5.12.*",
"scipy>=1.0.0",
"cmasher>=1.5.0",
"guipy>=0.0.2",
"qtpy>=1.9.0"],
python_requires='>=3.5, <4',
package_data={'viscm': ['examples/*']},
entry_points={
'console_scripts': [
"viscm = viscm.gui:main"]},
)
|
[
"setuptools.find_packages"
] |
[((700, 715), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (713, 715), False, 'from setuptools import setup, find_packages\n')]
|
from unittest.mock import Mock
import pytest
from directory_header_footer import context_processors
from directory_constants.constants import urls as default_urls
@pytest.fixture
def sso_user():
return Mock(
id=1,
email='<EMAIL>'
)
@pytest.fixture
def request_logged_in(rf, sso_user):
request = rf.get('/')
request.sso_user = sso_user
return request
@pytest.fixture
def request_logged_out(rf):
request = rf.get('/')
request.sso_user = None
return request
def test_sso_logged_in(request_logged_in):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_is_logged_in'] is True
def test_sso_profile_url(request_logged_in, settings):
settings.SSO_PROFILE_URL = 'http://www.example.com/profile/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_profile_url'] == settings.SSO_PROFILE_URL
def test_sso_register_url_url(request_logged_in, settings):
settings.SSO_PROXY_SIGNUP_URL = 'http://www.example.com/signup/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_register_url'] == (
'http://www.example.com/signup/?next=http://testserver/'
)
def test_sso_logged_out(request_logged_out):
context = context_processors.sso_processor(request_logged_out)
assert context['sso_is_logged_in'] is False
def test_sso_login_url(request_logged_in, settings):
settings.SSO_PROXY_LOGIN_URL = 'http://www.example.com/login/'
expected = 'http://www.example.com/login/?next=http://testserver/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_login_url'] == expected
def test_sso_logout_url(request_logged_in, settings):
settings.SSO_PROXY_LOGOUT_URL = 'http://www.example.com/logout/'
context = context_processors.sso_processor(request_logged_in)
assert context['sso_logout_url'] == (
'http://www.example.com/logout/?next=http://testserver/'
)
def test_sso_user(request_logged_in, sso_user):
context = context_processors.sso_processor(request_logged_in)
assert context['sso_user'] == sso_user
def test_header_footer_context_processor(settings):
settings.HEADER_FOOTER_CONTACT_US_URL = 'http://bones.com'
settings.HEADER_FOOTER_CSS_ACTIVE_CLASSES = {'fab': True}
context = context_processors.header_footer_context_processor(None)
assert context == {
'header_footer_contact_us_url': 'http://bones.com',
'header_footer_css_active_classes': {'fab': True},
}
def test_urls_processor(rf, settings):
settings.GREAT_HOME = 'http://home.com'
settings.GREAT_EXPORT_HOME = 'http://export.com'
settings.EXPORTING_NEW = 'http://export.com/new'
settings.EXPORTING_OCCASIONAL = 'http://export.com/occasional'
settings.EXPORTING_REGULAR = 'http://export.com/regular'
settings.GUIDANCE_MARKET_RESEARCH = 'http://market-research.com'
settings.GUIDANCE_CUSTOMER_INSIGHT = 'http://customer-insight.com'
settings.GUIDANCE_FINANCE = 'http://finance.com'
settings.GUIDANCE_BUSINESS_PLANNING = 'http://business-planning.com'
settings.GUIDANCE_GETTING_PAID = 'http://getting-paid.com'
settings.GUIDANCE_OPERATIONS_AND_COMPLIANCE = 'http://compliance.com'
settings.SERVICES_FAB = 'http://export.com/fab'
settings.SERVICES_SOO = 'http://export.com/soo'
settings.SERVICES_EXOPPS = 'http://export.com/exopps'
settings.SERVICES_GET_FINANCE = 'http://export.com/get-finance'
settings.SERVICES_EVENTS = 'http://export.com/events'
settings.INFO_ABOUT = 'http://about.com'
settings.INFO_CONTACT_US_DIRECTORY = 'http://contact.com'
settings.INFO_PRIVACY_AND_COOKIES = 'http://privacy-and-cookies.com'
settings.INFO_TERMS_AND_CONDITIONS = 'http://terms-and-conditions.com'
settings.INFO_DIT = 'http://dit.com'
settings.CUSTOM_PAGE = 'http://custom.com'
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': 'http://home.com',
'great_export_home': 'http://export.com',
'new_to_exporting': 'http://export.com/new',
'occasional_exporter': 'http://export.com/occasional',
'regular_exporter': 'http://export.com/regular',
'guidance_market_research': 'http://market-research.com',
'guidance_customer_insight': 'http://customer-insight.com',
'guidance_finance': 'http://finance.com',
'guidance_business_planning': 'http://business-planning.com',
'guidance_getting_paid': 'http://getting-paid.com',
'guidance_operations_and_compliance': 'http://compliance.com',
'services_fab': 'http://export.com/fab',
'services_soo': 'http://export.com/soo',
'services_exopps': 'http://export.com/exopps',
'services_get_finance': 'http://export.com/get-finance',
'services_events': 'http://export.com/events',
'info_about': 'http://about.com',
'info_contact_us': 'http://contact.com',
'info_privacy_and_cookies': 'http://privacy-and-cookies.com',
'info_terms_and_conditions': 'http://terms-and-conditions.com',
'info_dit': 'http://dit.com',
'custom_page': 'http://custom.com',
}
assert actual == {
'header_footer_urls': expected_urls
}
def test_urls_processor_defaults(rf, settings):
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': default_urls.GREAT_HOME,
'great_export_home': default_urls.GREAT_EXPORT_HOME,
'new_to_exporting': default_urls.EXPORTING_NEW,
'occasional_exporter': default_urls.EXPORTING_OCCASIONAL,
'regular_exporter': default_urls.EXPORTING_REGULAR,
'guidance_market_research': default_urls.GUIDANCE_MARKET_RESEARCH,
'guidance_customer_insight': default_urls.GUIDANCE_CUSTOMER_INSIGHT,
'guidance_finance': default_urls.GUIDANCE_FINANCE,
'guidance_business_planning': default_urls.GUIDANCE_BUSINESS_PLANNING,
'guidance_getting_paid': default_urls.GUIDANCE_GETTING_PAID,
'guidance_operations_and_compliance': (
default_urls.GUIDANCE_OPERATIONS_AND_COMPLIANCE),
'services_fab': default_urls.SERVICES_FAB,
'services_soo': default_urls.SERVICES_SOO,
'services_exopps': default_urls.SERVICES_EXOPPS,
'services_get_finance': default_urls.SERVICES_GET_FINANCE,
'services_events': default_urls.SERVICES_EVENTS,
'info_about': default_urls.INFO_ABOUT,
'info_contact_us': default_urls.INFO_CONTACT_US_DIRECTORY,
'info_privacy_and_cookies': default_urls.INFO_PRIVACY_AND_COOKIES,
'info_terms_and_conditions': default_urls.INFO_TERMS_AND_CONDITIONS,
'info_dit': default_urls.INFO_DIT,
'custom_page': default_urls.CUSTOM_PAGE,
}
assert actual == {'header_footer_urls': expected_urls}
def test_urls_processor_defaults_explicitly_none(rf, settings):
settings.GREAT_HOME = None
settings.GREAT_EXPORT_HOME = None
settings.EXPORTING_NEW = None
settings.EXPORTING_OCCASIONAL = None
settings.EXPORTING_REGULAR = None
settings.GUIDANCE_MARKET_RESEARCH = None
settings.GUIDANCE_CUSTOMER_INSIGHT = None
settings.GUIDANCE_BUSINESS_PLANNING = None
settings.GUIDANCE_GETTING_PAID = None
settings.GUIDANCE_OPERATIONS_AND_COMPLIANCE = None
settings.SERVICES_FAB = None
settings.SERVICES_SOO = None
settings.SERVICES_EXOPPS = None
settings.SERVICES_GET_FINANCE = None
settings.SERVICES_EVENTS = None
settings.INFO_ABOUT = None
settings.INFO_CONTACT_US_DIRECTORY = None
settings.INFO_PRIVACY_AND_COOKIES = None
settings.INFO_TERMS_AND_CONDITIONS = None
settings.INFO_DIT = None
settings.CUSTOM_PAGE = None
actual = context_processors.urls_processor(None)
expected_urls = {
'great_home': default_urls.GREAT_HOME,
'great_export_home': default_urls.GREAT_EXPORT_HOME,
'new_to_exporting': default_urls.EXPORTING_NEW,
'occasional_exporter': default_urls.EXPORTING_OCCASIONAL,
'regular_exporter': default_urls.EXPORTING_REGULAR,
'guidance_market_research': default_urls.GUIDANCE_MARKET_RESEARCH,
'guidance_customer_insight': default_urls.GUIDANCE_CUSTOMER_INSIGHT,
'guidance_finance': default_urls.GUIDANCE_FINANCE,
'guidance_business_planning': default_urls.GUIDANCE_BUSINESS_PLANNING,
'guidance_getting_paid': default_urls.GUIDANCE_GETTING_PAID,
'guidance_operations_and_compliance': (
default_urls.GUIDANCE_OPERATIONS_AND_COMPLIANCE
),
'services_fab': default_urls.SERVICES_FAB,
'services_soo': default_urls.SERVICES_SOO,
'services_exopps': default_urls.SERVICES_EXOPPS,
'services_get_finance': default_urls.SERVICES_GET_FINANCE,
'services_events': default_urls.SERVICES_EVENTS,
'info_about': default_urls.INFO_ABOUT,
'info_contact_us': default_urls.INFO_CONTACT_US_DIRECTORY,
'info_privacy_and_cookies': default_urls.INFO_PRIVACY_AND_COOKIES,
'info_terms_and_conditions': default_urls.INFO_TERMS_AND_CONDITIONS,
'info_dit': default_urls.INFO_DIT,
'custom_page': default_urls.CUSTOM_PAGE,
}
assert actual == {'header_footer_urls': expected_urls}
|
[
"directory_header_footer.context_processors.header_footer_context_processor",
"directory_header_footer.context_processors.sso_processor",
"unittest.mock.Mock",
"directory_header_footer.context_processors.urls_processor"
] |
[((209, 236), 'unittest.mock.Mock', 'Mock', ([], {'id': '(1)', 'email': '"""<EMAIL>"""'}), "(id=1, email='<EMAIL>')\n", (213, 236), False, 'from unittest.mock import Mock\n'), ((569, 620), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (601, 620), False, 'from directory_header_footer import context_processors\n'), ((804, 855), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (836, 855), False, 'from directory_header_footer import context_processors\n'), ((1067, 1118), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (1099, 1118), False, 'from directory_header_footer import context_processors\n'), ((1295, 1347), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_out'], {}), '(request_logged_out)\n', (1327, 1347), False, 'from directory_header_footer import context_processors\n'), ((1603, 1654), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (1635, 1654), False, 'from directory_header_footer import context_processors\n'), ((1842, 1893), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (1874, 1893), False, 'from directory_header_footer import context_processors\n'), ((2071, 2122), 'directory_header_footer.context_processors.sso_processor', 'context_processors.sso_processor', (['request_logged_in'], {}), '(request_logged_in)\n', (2103, 2122), False, 'from directory_header_footer import context_processors\n'), ((2360, 2416), 'directory_header_footer.context_processors.header_footer_context_processor', 'context_processors.header_footer_context_processor', (['None'], {}), '(None)\n', (2410, 2416), False, 'from directory_header_footer import context_processors\n'), ((3934, 3973), 'directory_header_footer.context_processors.urls_processor', 'context_processors.urls_processor', (['None'], {}), '(None)\n', (3967, 3973), False, 'from directory_header_footer import context_processors\n'), ((5376, 5415), 'directory_header_footer.context_processors.urls_processor', 'context_processors.urls_processor', (['None'], {}), '(None)\n', (5409, 5415), False, 'from directory_header_footer import context_processors\n'), ((7809, 7848), 'directory_header_footer.context_processors.urls_processor', 'context_processors.urls_processor', (['None'], {}), '(None)\n', (7842, 7848), False, 'from directory_header_footer import context_processors\n')]
|
"""
A toy example of playing against random bot on Mocsár
Using env "mocsar" and 'human_mode'. It implies using random agent.
"""
import rlcard3
# Make environment and enable human mode
env = rlcard3.make(env_id='mocsar', config={'human_mode': True})
# Reset environment
state = env.reset()
while not env.is_over():
legal_actions = state['legal_actions']
legal_actions.insert(0, 0)
action = input('>> You choose action (integer): ')
if action == '-1':
print('Break the game...')
break
while not action.isdigit() \
or int(action) not in legal_actions:
print('Action illegal...')
action = input('>> Re-choose action (integer): ')
state, reward, done = env.step(int(action))
|
[
"rlcard3.make"
] |
[((194, 252), 'rlcard3.make', 'rlcard3.make', ([], {'env_id': '"""mocsar"""', 'config': "{'human_mode': True}"}), "(env_id='mocsar', config={'human_mode': True})\n", (206, 252), False, 'import rlcard3\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 14 13:04:49 2017
@author: acer
"""
from skpy import Skype
from getpass import getpass
Skype("pandey.divyanshu34", getpass(), ".tokens-pandey.divyanshu34")
sk = Skype(connect=False)
print(sk.contacts)
|
[
"getpass.getpass",
"skpy.Skype"
] |
[((208, 228), 'skpy.Skype', 'Skype', ([], {'connect': '(False)'}), '(connect=False)\n', (213, 228), False, 'from skpy import Skype\n'), ((162, 171), 'getpass.getpass', 'getpass', ([], {}), '()\n', (169, 171), False, 'from getpass import getpass\n')]
|
"""
爬虫的用途:12306抢票,短信轰炸,数据获取
分类:通用爬虫:是搜索引擎抓取系统的重要部分,主要是把互联网上的页面下载到本地作为一个镜像备份
聚焦爬虫:对特定需求进行数据获取,会对页面的内容进行筛选,保证只抓取和需求相关的网页信息
Http:端口号80
Https: 端口号443
使用第三方的requests进行请求:支持python2和3,在urllib中2和3的语法有些不一样
"""
import requests
kw = {'wd': '长城'}
# headers伪装成一个浏览器进行的请求
# 不加这个的话,网页会识别出请求来自一个python而不是浏览器的正常请求
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
response = requests.get("https://www.baidu.com/s?", params=kw, headers=headers)
# 返回的是unicode格式解码的str的数据
print(response.text)
# 返回字节流的二进制数据,并根据unicode进行解码
print(response.content)
print(response.content.decode())
# 返回完整的url地址
print(response.url)
# 返回字符编码
print(response.encoding)
# 返回状态吗
print(response.status_code)
# 保存响应结果
with open('baidu.html', 'wb') as f:
f.write(response.content)
|
[
"requests.get"
] |
[((484, 552), 'requests.get', 'requests.get', (['"""https://www.baidu.com/s?"""'], {'params': 'kw', 'headers': 'headers'}), "('https://www.baidu.com/s?', params=kw, headers=headers)\n", (496, 552), False, 'import requests\n')]
|
import markovify
# with open("esenin.txt", 'r', encoding='utf-8') as f0, \
# open("kish.txt", 'r', encoding='utf-8') as f1, \
# open("kino.txt", 'r', encoding='utf-8') as f2, \
# open("kukr.txt", 'r', encoding='utf-8') as f3, \
# open("dataset.txt", 'a', encoding='utf-8') as f:
# f.write(f0.read())
# f.write(f1.read())
# f.write(f2.read())
# f.write(f3.read())
with open("dataset.txt", 'r', encoding='utf-8') as f:
text = f.read()
text_model = markovify.Text(text)
my_file = open("result.txt", 'a', encoding='utf-8')
for i in range(10):
my_file.write(text_model.make_short_sentence(280))
my_file.close()
|
[
"markovify.Text"
] |
[((489, 509), 'markovify.Text', 'markovify.Text', (['text'], {}), '(text)\n', (503, 509), False, 'import markovify\n')]
|
"""halfway URL Configuration"""
from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import path, include
urlpatterns = [
path('admin', admin.site.urls),
path('users/login', auth_views.LoginView.as_view(), name='login'),
path('users/logout', auth_views.LogoutView.as_view(next_page='/'), name='logout'),
path('', include('page_maker.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"django.contrib.auth.views.LogoutView.as_view",
"django.contrib.auth.views.LoginView.as_view",
"django.urls.path",
"django.urls.include",
"django.conf.urls.static.static"
] |
[((488, 549), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (494, 549), False, 'from django.conf.urls.static import static\n'), ((252, 282), 'django.urls.path', 'path', (['"""admin"""', 'admin.site.urls'], {}), "('admin', admin.site.urls)\n", (256, 282), False, 'from django.urls import path, include\n'), ((308, 338), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', ([], {}), '()\n', (336, 338), True, 'from django.contrib.auth import views as auth_views\n'), ((380, 424), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', ([], {'next_page': '"""/"""'}), "(next_page='/')\n", (409, 424), True, 'from django.contrib.auth import views as auth_views\n'), ((455, 481), 'django.urls.include', 'include', (['"""page_maker.urls"""'], {}), "('page_maker.urls')\n", (462, 481), False, 'from django.urls import path, include\n')]
|
from django.shortcuts import render
from cms.models import Pages
from django.http import HttpResponse
from django.http import HttpResponseNotFound
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def slash(self):
response = ''
for Page in Pages.objects.all():
redirection = "<a href=/" + str(Page.id) + ">" + Page.name + "</a>"
response += (str(Page.id) + ' : ' + redirection + "/n")
return(HttpResponse(response))
def number(self, num):
try:
Page = Pages.objects.get(id=str(num))
return(HttpResponse(Page.page))
except ObjectDoesNotExist:
return(HttpResponse("Resource not in database"))
def notfound(self):
return(HttpResponseNotFound("NOT FOUND"))
|
[
"django.http.HttpResponseNotFound",
"cms.models.Pages.objects.all",
"django.http.HttpResponse"
] |
[((280, 299), 'cms.models.Pages.objects.all', 'Pages.objects.all', ([], {}), '()\n', (297, 299), False, 'from cms.models import Pages\n'), ((452, 474), 'django.http.HttpResponse', 'HttpResponse', (['response'], {}), '(response)\n', (464, 474), False, 'from django.http import HttpResponse\n'), ((716, 749), 'django.http.HttpResponseNotFound', 'HttpResponseNotFound', (['"""NOT FOUND"""'], {}), "('NOT FOUND')\n", (736, 749), False, 'from django.http import HttpResponseNotFound\n'), ((571, 594), 'django.http.HttpResponse', 'HttpResponse', (['Page.page'], {}), '(Page.page)\n', (583, 594), False, 'from django.http import HttpResponse\n'), ((642, 682), 'django.http.HttpResponse', 'HttpResponse', (['"""Resource not in database"""'], {}), "('Resource not in database')\n", (654, 682), False, 'from django.http import HttpResponse\n')]
|
from __future__ import print_function
import torch
''' Env
pip install -U torch torchvision
pip install -U cython
pip install -U 'git+https://github.com/facebookresearch/fvcore.git' 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
git clone https://github.com/facebookresearch/detectron2 detectron2_repo
pip install -e detectron2_repo
'''
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# import some common libraries
import numpy as np
import cv2
import random
# import some common detectron2 utilities
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
im = cv2.imread("./input.jpg")
cfg = get_cfg()
cfg.merge_from_file("../../detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can either use the https://dl.fbaipublicfiles.... url, or use the following shorthand
cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x/137849600/model_final_f10217.pkl"
predictor = DefaultPredictor(cfg)
outputs = predictor(im)
print(outputs["instances"].pred_classes)
print(outputs["instances"].pred_boxes)
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
#v.get_image()
v.save("output.jpg")
|
[
"detectron2.engine.DefaultPredictor",
"detectron2.utils.logger.setup_logger",
"cv2.imread",
"detectron2.config.get_cfg",
"detectron2.data.MetadataCatalog.get"
] |
[((459, 473), 'detectron2.utils.logger.setup_logger', 'setup_logger', ([], {}), '()\n', (471, 473), False, 'from detectron2.utils.logger import setup_logger\n'), ((779, 804), 'cv2.imread', 'cv2.imread', (['"""./input.jpg"""'], {}), "('./input.jpg')\n", (789, 804), False, 'import cv2\n'), ((812, 821), 'detectron2.config.get_cfg', 'get_cfg', ([], {}), '()\n', (819, 821), False, 'from detectron2.config import get_cfg\n'), ((1261, 1282), 'detectron2.engine.DefaultPredictor', 'DefaultPredictor', (['cfg'], {}), '(cfg)\n', (1277, 1282), False, 'from detectron2.engine import DefaultPredictor\n'), ((1421, 1463), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['cfg.DATASETS.TRAIN[0]'], {}), '(cfg.DATASETS.TRAIN[0])\n', (1440, 1463), False, 'from detectron2.data import MetadataCatalog\n')]
|
# Generated by Django 3.1.7 on 2021-04-06 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0004_auto_20210405_1042'),
]
operations = [
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=300),
),
]
|
[
"django.db.models.CharField"
] |
[((333, 365), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(300)'}), '(max_length=300)\n', (349, 365), False, 'from django.db import migrations, models\n')]
|
from keckdrpframework.core.framework import Framework
from keckdrpframework.config.framework_config import ConfigClass
from keckdrpframework.models.arguments import Arguments
from keckdrpframework.utils.drpf_logger import getLogger
import subprocess
import time
import argparse
import sys
import traceback
import pkg_resources
import logging.config
from pathlib import Path
from datetime import datetime
from glob import glob
# the preferred way to import the pipeline is a direct import
from iqmon.pipelines.ingest import IngestPipeline
def _parseArguments(in_args):
description = "Ingest pipeline CLI"
# this is a simple case where we provide a frame and a configuration file
parser = argparse.ArgumentParser(prog=f"{in_args[0]}", description=description)
parser.add_argument('-c', dest="config_file", type=str, help="Configuration file")
parser.add_argument('-frames', nargs='*', type=str, help='input image file (full path, list ok)', default=None)
# in this case, we are loading an entire directory, and ingesting all the files in that directory
parser.add_argument('-infiles', dest="infiles", help="Input files", nargs="*")
parser.add_argument('-d', '--directory', dest="dirname", type=str, help="Input directory", nargs='?', default=None)
# after ingesting the files, do we want to continue monitoring the directory?
parser.add_argument('-m', '--monitor', dest="monitor", action='store_true', default=False)
# special arguments, ignore
parser.add_argument("-i", "--ingest_data_only", dest="ingest_data_only", action="store_true",
help="Ingest data and terminate")
parser.add_argument("-w", "--wait_for_event", dest="wait_for_event", action="store_true", help="Wait for events")
parser.add_argument("-W", "--continue", dest="continuous", action="store_true",
help="Continue processing, wait for ever")
parser.add_argument("-s", "--start_queue_manager_only", dest="queue_manager_only", action="store_true",
help="Starts queue manager only, no processing",
)
args = parser.parse_args(in_args[1:])
return args
##-----------------------------------------------------------------------------
## Setup Framework
##-----------------------------------------------------------------------------
def setup_framework(args, pipeline=IngestPipeline):
# START HANDLING OF CONFIGURATION FILES ##########
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
framework_logcfg_file = 'configs/logger_ingest.cfg'
framework_logcfg_fullpath = pkg_resources.resource_filename(pkg, framework_logcfg_file)
# add PIPELINE specific config files
if args.config_file is None:
pipeline_config_file = 'configs/pipeline.cfg'
pipeline_config_fullpath = pkg_resources.resource_filename(pkg, pipeline_config_file)
pipeline_config = ConfigClass(pipeline_config_fullpath, default_section='DEFAULT')
else:
pipeline_config = ConfigClass(args.pipeline_config_file, default_section='DEFAULT')
# END HANDLING OF CONFIGURATION FILES ##########
try:
framework = Framework(IngestPipeline, framework_config_fullpath)
logging.config.fileConfig(framework_logcfg_fullpath)
framework.config.instrument = pipeline_config
except Exception as e:
print("Failed to initialize framework, exiting ...", e)
traceback.print_exc()
sys.exit(1)
# this part defines a specific logger for the pipeline, so that we can
# separate the output of the pipeline from the output of the framework
framework.context.pipeline_logger = getLogger(framework_logcfg_fullpath, name="pipeline")
framework.logger = getLogger(framework_logcfg_fullpath, name="DRPF")
framework.logger.info("Framework initialized")
return framework
##-----------------------------------------------------------------------------
## Analyze One File
##-----------------------------------------------------------------------------
def analyze_one():
args = _parseArguments(sys.argv)
p = Path(args.input).expanduser().absolute()
if p.exists() is False:
print(f'Unable to find file: {p}')
return
args.name = f"{p}"
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
queue = queues.get_event_queue(cfg.queue_manager_hostname,
cfg.queue_manager_portnr,
cfg.queue_manager_auth_code)
if queue is None:
print("Failed to connect to Queue Manager")
return
if args.overwrite is True:
pending = queue.get_pending()
event = Event("set_overwrite", args)
queue.put(event)
pending = queue.get_pending()
event = Event("next_file", args)
queue.put(event)
##-----------------------------------------------------------------------------
## Watch Directory
##-----------------------------------------------------------------------------
def watch_directory():
args = _parseArguments(sys.argv)
framework = setup_framework(args, pipeline=IngestPipeline)
now = datetime.utcnow()
data_path = framework.config.instrument.get('FileHandling', 'ingest_dir')
data_path = data_path.replace('YYYY', f'{now.year:4d}')
data_path = data_path.replace('MM', f'{now.month:02d}')
data_path = data_path.replace('DD', f'{now.day:02d}')
framework.logger.info(f'Setting data path: {data_path}')
data_path = Path(data_path).expanduser()
if data_path.exists() is False:
data_path.mkdir(parents=True, exist_ok=True)
framework.logger.info(f'Ingesting files from {data_path}')
infiles = data_path.glob(framework.config['DEFAULT']['file_type'])
framework.ingest_data(str(data_path), infiles, True)
framework.start(False, False, False, True)
##-----------------------------------------------------------------------------
## Change Watched Directory
##-----------------------------------------------------------------------------
def change_directory():
args = _parseArguments(sys.argv)
if args.input is not '':
newdir = Path(args.input).expanduser().absolute()
else:
now = datetime.utcnow()
data_path = framework.config.instrument.get('FileHandling', 'ingest_dir')
data_path = data_path.replace('YYYY', f'{now.year:4d}')
data_path = data_path.replace('MM', f'{now.month:02d}')
data_path = data_path.replace('DD', f'{now.day:02d}')
newdir = Path(data_path).expanduser()
args.input = str(newdir)
if newdir.exists() is False:
newdir.mkdir(parents=True)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
queue = queues.get_event_queue(cfg.queue_manager_hostname,
cfg.queue_manager_portnr,
cfg.queue_manager_auth_code)
if queue is None:
print("Failed to connect to Queue Manager")
else:
pending = queue.get_pending()
event = Event("set_file_type", args)
queue.put(event)
event = Event("update_directory", args)
queue.put(event)
##-----------------------------------------------------------------------------
## List Queue
##-----------------------------------------------------------------------------
def list_queue():
args = _parseArguments(sys.argv)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
drpif = FrameworkInterface(cfg)
# Print pending Events
if drpif.is_queue_ok():
events = drpif.pending_events()
print(f'Found {len(events)} in queue')
if args.verbose is True:
for event in events:
print(event)
else:
print ("Pending events: Queue not available", drpif.queue)
##-----------------------------------------------------------------------------
## Clear Queue
##-----------------------------------------------------------------------------
def clear_queue():
args = _parseArguments(sys.argv)
pkg = 'iqmon'
framework_config_file = "configs/framework.cfg"
framework_config_fullpath = pkg_resources.resource_filename(pkg, framework_config_file)
cfg = ConfigClass(framework_config_fullpath)
drpif = FrameworkInterface(cfg)
# Print pending Events
if drpif.is_queue_ok():
events = drpif.pending_events()
print(f'Found {len(events)} in queue')
else:
print ("Pending events: Queue not available", drpif.queue)
if drpif.is_queue_ok():
drpif.stop_event_queue()
print ("Queue manager stopped")
else:
print ("Queue manager already stopped")
if __name__ == "__main__":
analyze_one()
|
[
"traceback.print_exc",
"argparse.ArgumentParser",
"keckdrpframework.core.framework.Framework",
"pkg_resources.resource_filename",
"datetime.datetime.utcnow",
"pathlib.Path",
"keckdrpframework.config.framework_config.ConfigClass",
"sys.exit",
"keckdrpframework.utils.drpf_logger.getLogger"
] |
[((730, 800), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'prog': 'f"""{in_args[0]}"""', 'description': 'description'}), "(prog=f'{in_args[0]}', description=description)\n", (753, 800), False, 'import argparse\n'), ((2612, 2671), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_config_file'], {}), '(pkg, framework_config_file)\n', (2643, 2671), False, 'import pkg_resources\n'), ((2764, 2823), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_logcfg_file'], {}), '(pkg, framework_logcfg_file)\n', (2795, 2823), False, 'import pkg_resources\n'), ((3847, 3900), 'keckdrpframework.utils.drpf_logger.getLogger', 'getLogger', (['framework_logcfg_fullpath'], {'name': '"""pipeline"""'}), "(framework_logcfg_fullpath, name='pipeline')\n", (3856, 3900), False, 'from keckdrpframework.utils.drpf_logger import getLogger\n'), ((3925, 3974), 'keckdrpframework.utils.drpf_logger.getLogger', 'getLogger', (['framework_logcfg_fullpath'], {'name': '"""DRPF"""'}), "(framework_logcfg_fullpath, name='DRPF')\n", (3934, 3974), False, 'from keckdrpframework.utils.drpf_logger import getLogger\n'), ((4566, 4625), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_config_file'], {}), '(pkg, framework_config_file)\n', (4597, 4625), False, 'import pkg_resources\n'), ((4637, 4675), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['framework_config_fullpath'], {}), '(framework_config_fullpath)\n', (4648, 4675), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((5526, 5543), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5541, 5543), False, 'from datetime import datetime\n'), ((7170, 7229), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_config_file'], {}), '(pkg, framework_config_file)\n', (7201, 7229), False, 'import pkg_resources\n'), ((7241, 7279), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['framework_config_fullpath'], {}), '(framework_config_fullpath)\n', (7252, 7279), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((8089, 8148), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_config_file'], {}), '(pkg, framework_config_file)\n', (8120, 8148), False, 'import pkg_resources\n'), ((8160, 8198), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['framework_config_fullpath'], {}), '(framework_config_fullpath)\n', (8171, 8198), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((8904, 8963), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'framework_config_file'], {}), '(pkg, framework_config_file)\n', (8935, 8963), False, 'import pkg_resources\n'), ((8975, 9013), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['framework_config_fullpath'], {}), '(framework_config_fullpath)\n', (8986, 9013), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((2993, 3051), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['pkg', 'pipeline_config_file'], {}), '(pkg, pipeline_config_file)\n', (3024, 3051), False, 'import pkg_resources\n'), ((3079, 3143), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['pipeline_config_fullpath'], {'default_section': '"""DEFAULT"""'}), "(pipeline_config_fullpath, default_section='DEFAULT')\n", (3090, 3143), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((3182, 3247), 'keckdrpframework.config.framework_config.ConfigClass', 'ConfigClass', (['args.pipeline_config_file'], {'default_section': '"""DEFAULT"""'}), "(args.pipeline_config_file, default_section='DEFAULT')\n", (3193, 3247), False, 'from keckdrpframework.config.framework_config import ConfigClass\n'), ((3337, 3389), 'keckdrpframework.core.framework.Framework', 'Framework', (['IngestPipeline', 'framework_config_fullpath'], {}), '(IngestPipeline, framework_config_fullpath)\n', (3346, 3389), False, 'from keckdrpframework.core.framework import Framework\n'), ((6620, 6637), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (6635, 6637), False, 'from datetime import datetime\n'), ((3609, 3630), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3628, 3630), False, 'import traceback\n'), ((3640, 3651), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3648, 3651), False, 'import sys\n'), ((5883, 5898), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (5887, 5898), False, 'from pathlib import Path\n'), ((6932, 6947), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (6936, 6947), False, 'from pathlib import Path\n'), ((4305, 4321), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (4309, 4321), False, 'from pathlib import Path\n'), ((6553, 6569), 'pathlib.Path', 'Path', (['args.input'], {}), '(args.input)\n', (6557, 6569), False, 'from pathlib import Path\n')]
|
# -*- coding: utf-8 -*-
import explainaboard.error_analysis as ea
import numpy
import os
def get_aspect_value(sample_list, dict_aspect_func):
dict_span2aspect_val = {}
dict_span2aspect_val_pred = {}
for aspect, fun in dict_aspect_func.items():
dict_span2aspect_val[aspect] = {}
dict_span2aspect_val_pred[aspect] = {}
# maintain it for print error case
dict_sid2sent = {}
sample_id = 0
for info_list in sample_list:
#
#
#
# word_list = word_segment(sent).split(" ")
# Sentence Entities Paragraph True Relation Label Predicted Relation Label
# Sentence Length Paragraph Length Number of Entities in Ground Truth Relation Average Distance of Entities
sent, entities, paragraph, true_label, pred_label, sent_length, para_length, n_entity, avg_distance = info_list
dict_sid2sent[str(sample_id)] = ea.format4json2(entities + "|||" + sent)
sent_pos = ea.tuple2str((sample_id, true_label))
sent_pos_pred = ea.tuple2str((sample_id, pred_label))
# Sentence Length: sentALen
aspect = "sLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(sent_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(sent_length)
# Paragraph Length: pLen
aspect = "pLen"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(para_length)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(para_length)
# Number of Entity: nEnt
aspect = "nEnt"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(n_entity)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(n_entity)
# Average Distance: avgDist
aspect = "avgDist"
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = float(avg_distance)
dict_span2aspect_val_pred[aspect][sent_pos_pred] = float(avg_distance)
# Tag: tag
aspect = "tag" ############## MUST Be Gold Tag for text classification task
if aspect in dict_aspect_func.keys():
dict_span2aspect_val[aspect][sent_pos] = true_label
dict_span2aspect_val_pred[aspect][sent_pos_pred] = true_label
sample_id += 1
# print(dict_span2aspect_val["bleu"])
return dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent
def evaluate(task_type="ner", analysis_type="single", systems=[], dataset_name = 'dataset_name', model_name = 'model_name', output_filename="./output.json", is_print_ci=False,
is_print_case=False, is_print_ece=False):
path_text = systems[0] if analysis_type == "single" else ""
path_comb_output = "model_name" + "/" + path_text.split("/")[-1]
dict_aspect_func, dict_precomputed_path, obj_json = ea.load_task_conf(task_dir=os.path.dirname(__file__))
sample_list, sent_list, entity_list, true_list, pred_list = file_to_list(path_text)
error_case_list = []
if is_print_case:
error_case_list = get_error_case(sent_list, entity_list, true_list, pred_list)
print(" -*-*-*- the number of error casse:\t", len(error_case_list))
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
holistic_performance = ea.accuracy(true_list, pred_list)
holistic_performance = format(holistic_performance, '.3g')
# Confidence Interval of Holistic Performance
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(true_list, pred_list, n_times=1000)
dict_span2aspect_val, dict_span2aspect_val_pred, dict_sid2sent = get_aspect_value(sample_list, dict_aspect_func)
print("------------------ Holistic Result----------------------")
print(holistic_performance)
# print(f1(list_true_tags_token, list_pred_tags_token)["f1"])
dict_bucket2span = {}
dict_bucket2span_pred = {}
dict_bucket2f1 = {}
aspect_names = []
for aspect, func in dict_aspect_func.items():
# print(aspect, dict_span2aspect_val[aspect])
dict_bucket2span[aspect] = ea.select_bucketing_func(func[0], func[1], dict_span2aspect_val[aspect])
# print(aspect, dict_bucket2span[aspect])
# exit()
dict_bucket2span_pred[aspect] = ea.bucket_attribute_specified_bucket_interval(dict_span2aspect_val_pred[aspect],
dict_bucket2span[aspect].keys())
# dict_bucket2span_pred[aspect] = __select_bucketing_func(func[0], func[1], dict_span2aspect_val_pred[aspect])
dict_bucket2f1[aspect] = get_bucket_acc_with_error_case(dict_bucket2span[aspect],
dict_bucket2span_pred[aspect], dict_sid2sent,
is_print_ci, is_print_case)
aspect_names.append(aspect)
print("aspect_names: ", aspect_names)
print("------------------ Breakdown Performance")
for aspect in dict_aspect_func.keys():
ea.print_dict(dict_bucket2f1[aspect], aspect)
print("")
# Calculate databias w.r.t numeric attributes
dict_aspect2bias = {}
for aspect, aspect2Val in dict_span2aspect_val.items():
if type(list(aspect2Val.values())[0]) != type("string"):
dict_aspect2bias[aspect] = numpy.average(list(aspect2Val.values()))
print("------------------ Dataset Bias")
for k, v in dict_aspect2bias.items():
print(k + ":\t" + str(v))
print("")
dict_fine_grained = {}
for aspect, metadata in dict_bucket2f1.items():
dict_fine_grained[aspect] = []
for bucket_name, v in metadata.items():
# print("---------debug--bucket name old---")
# print(bucket_name)
bucket_name = ea.beautify_interval(bucket_name)
# print("---------debug--bucket name new---")
# print(bucket_name)
# bucket_value = format(v[0]*100,'.4g')
bucket_value = format(v[0], '.4g')
n_sample = v[1]
confidence_low_bucket = format(v[2], '.4g')
confidence_up_bucket = format(v[3], '.4g')
bucket_error_case = v[4]
# instantiation
dict_fine_grained[aspect].append({"bucket_name": bucket_name, "bucket_value": bucket_value, "num": n_sample,
"confidence_low": confidence_low_bucket,
"confidence_up": confidence_up_bucket,
"bucket_error_case": bucket_error_case})
obj_json["task"] = task_type
obj_json["data"]["language"] = "English"
obj_json["data"]["name"] = dataset_name
obj_json["data"]["bias"] = dict_aspect2bias
obj_json["data"]["output"] = path_comb_output
obj_json["model"]["name"] = model_name
obj_json["model"]["results"]["overall"]["error_case"] = error_case_list
obj_json["model"]["results"]["overall"]["performance"] = holistic_performance
obj_json["model"]["results"]["overall"]["confidence_low"] = confidence_low
obj_json["model"]["results"]["overall"]["confidence_up"] = confidence_up
obj_json["model"]["results"]["fine_grained"] = dict_fine_grained
raise NotImplementedError('RE is not fully implemented yet, see below')
# ece = 0
# dic_calibration = None
# if is_print_ece:
# ece, dic_calibration = process_all(path_text,
# size_of_bin=10, dataset=corpus_type, model=model_name)
# obj_json["model"]["results"]["calibration"] = dic_calibration
# # print(dic_calibration)
# ea.save_json(obj_json, output_filename)
#
# def main():
#
# parser = argparse.ArgumentParser(description='Interpretable Evaluation for NLP')
#
#
# parser.add_argument('--task', type=str, required=True,
# help="absa")
#
# parser.add_argument('--ci', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--case', type=str, required=False, default= False,
# help="True|False")
#
# parser.add_argument('--ece', type=str, required=False, default= False,
# help="True|False")
#
#
# parser.add_argument('--type', type=str, required=False, default="single",
# help="analysis type: single|pair|combine")
# parser.add_argument('--systems', type=str, required=True,
# help="the directories of system outputs. Multiple one should be separated by comma, for example, system1,system2 (no space)")
#
# parser.add_argument('--output', type=str, required=True,
# help="analysis output file")
# args = parser.parse_args()
#
#
# is_print_ci = args.ci
# is_print_case = args.case
# is_print_ece = args.ece
#
# task = args.task
# analysis_type = args.type
# systems = args.systems.split(",")
# output = args.output
#
#
# print("task", task)
# print("type", analysis_type)
# print("systems", systems)
# # sample_list = file_to_list_re(systems[0])
# # print(sample_list[0])
# evaluate(task_type=task, analysis_type=analysis_type, systems=systems, output=output, is_print_ci = is_print_ci, is_print_case = is_print_case, is_print_ece = is_print_ece)
#
# # python eval_spec.py --task re --systems ./test_re.tsv --output ./a.json
# if __name__ == '__main__':
# main()
def get_bucket_acc_with_error_case(dict_bucket2span, dict_bucket2span_pred, dict_sid2sent, is_print_ci, is_print_case):
# The structure of span_true or span_pred
# 2345|||Positive
# 2345 represents sentence id
# Positive represents the "label" of this instance
dict_bucket2f1 = {}
for bucket_interval, spans_true in dict_bucket2span.items():
spans_pred = []
if bucket_interval not in dict_bucket2span_pred.keys():
raise ValueError("Predict Label Bucketing Errors")
else:
spans_pred = dict_bucket2span_pred[bucket_interval]
# loop over samples from a given bucket
error_case_bucket_list = []
if is_print_case:
for info_true, info_pred in zip(spans_true, spans_pred):
sid_true, label_true = info_true.split("|||")
sid_pred, label_pred = info_pred.split("|||")
if sid_true != sid_pred:
continue
sent_entities = dict_sid2sent[sid_true]
if label_true != label_pred:
error_case_info = label_true + "|||" + label_pred + "|||" + sent_entities
error_case_bucket_list.append(error_case_info)
accuracy_each_bucket = ea.accuracy(spans_pred, spans_true)
confidence_low, confidence_up = 0, 0
if is_print_ci:
confidence_low, confidence_up = ea.compute_confidence_interval_acc(spans_pred, spans_true)
dict_bucket2f1[bucket_interval] = [accuracy_each_bucket, len(spans_true), confidence_low, confidence_up,
error_case_bucket_list]
return ea.sort_dict(dict_bucket2f1)
def get_error_case(sent_list, entity_list, true_label_list, pred_label_list):
error_case_list = []
for sent, entities, true_label, pred_label in zip(sent_list, entity_list, true_label_list, pred_label_list):
if true_label != pred_label:
error_case_list.append(true_label + "|||" + pred_label + "|||" + entities + "|||" + ea.format4json2(sent))
return error_case_list
def file_to_list(file_path):
sample_list = []
fin = open(file_path, "r")
true_list = []
pred_list = []
sent_list = []
entity_list = []
for idx, line in enumerate(fin):
if idx == 0:
continue
info_list = line.rstrip("\n").split("\t")
sample_list.append([info for info in info_list])
true_list.append(info_list[3])
pred_list.append(info_list[4])
sent_list.append(info_list[0])
entity_list.append(info_list[1])
return sample_list, sent_list, entity_list, true_list, pred_list
|
[
"explainaboard.error_analysis.compute_confidence_interval_acc",
"explainaboard.error_analysis.select_bucketing_func",
"os.path.dirname",
"explainaboard.error_analysis.print_dict",
"explainaboard.error_analysis.beautify_interval",
"explainaboard.error_analysis.sort_dict",
"explainaboard.error_analysis.format4json2",
"explainaboard.error_analysis.accuracy",
"explainaboard.error_analysis.tuple2str"
] |
[((3461, 3494), 'explainaboard.error_analysis.accuracy', 'ea.accuracy', (['true_list', 'pred_list'], {}), '(true_list, pred_list)\n', (3472, 3494), True, 'import explainaboard.error_analysis as ea\n'), ((11223, 11251), 'explainaboard.error_analysis.sort_dict', 'ea.sort_dict', (['dict_bucket2f1'], {}), '(dict_bucket2f1)\n', (11235, 11251), True, 'import explainaboard.error_analysis as ea\n'), ((909, 949), 'explainaboard.error_analysis.format4json2', 'ea.format4json2', (["(entities + '|||' + sent)"], {}), "(entities + '|||' + sent)\n", (924, 949), True, 'import explainaboard.error_analysis as ea\n'), ((970, 1007), 'explainaboard.error_analysis.tuple2str', 'ea.tuple2str', (['(sample_id, true_label)'], {}), '((sample_id, true_label))\n', (982, 1007), True, 'import explainaboard.error_analysis as ea\n'), ((1032, 1069), 'explainaboard.error_analysis.tuple2str', 'ea.tuple2str', (['(sample_id, pred_label)'], {}), '((sample_id, pred_label))\n', (1044, 1069), True, 'import explainaboard.error_analysis as ea\n'), ((3710, 3780), 'explainaboard.error_analysis.compute_confidence_interval_acc', 'ea.compute_confidence_interval_acc', (['true_list', 'pred_list'], {'n_times': '(1000)'}), '(true_list, pred_list, n_times=1000)\n', (3744, 3780), True, 'import explainaboard.error_analysis as ea\n'), ((4313, 4385), 'explainaboard.error_analysis.select_bucketing_func', 'ea.select_bucketing_func', (['func[0]', 'func[1]', 'dict_span2aspect_val[aspect]'], {}), '(func[0], func[1], dict_span2aspect_val[aspect])\n', (4337, 4385), True, 'import explainaboard.error_analysis as ea\n'), ((5288, 5333), 'explainaboard.error_analysis.print_dict', 'ea.print_dict', (['dict_bucket2f1[aspect]', 'aspect'], {}), '(dict_bucket2f1[aspect], aspect)\n', (5301, 5333), True, 'import explainaboard.error_analysis as ea\n'), ((10823, 10858), 'explainaboard.error_analysis.accuracy', 'ea.accuracy', (['spans_pred', 'spans_true'], {}), '(spans_pred, spans_true)\n', (10834, 10858), True, 'import explainaboard.error_analysis as ea\n'), ((2987, 3012), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3002, 3012), False, 'import os\n'), ((6051, 6084), 'explainaboard.error_analysis.beautify_interval', 'ea.beautify_interval', (['bucket_name'], {}), '(bucket_name)\n', (6071, 6084), True, 'import explainaboard.error_analysis as ea\n'), ((10972, 11030), 'explainaboard.error_analysis.compute_confidence_interval_acc', 'ea.compute_confidence_interval_acc', (['spans_pred', 'spans_true'], {}), '(spans_pred, spans_true)\n', (11006, 11030), True, 'import explainaboard.error_analysis as ea\n'), ((11603, 11624), 'explainaboard.error_analysis.format4json2', 'ea.format4json2', (['sent'], {}), '(sent)\n', (11618, 11624), True, 'import explainaboard.error_analysis as ea\n')]
|
import numpy as np
from libs.chromosome.chromosome_modifier import ChromosomeModifier
from libs.chromosome.mutation_types import MutationTypes
class MutationService:
def __init__(self, algorithm_configuration):
self.__algorithm_configuration = algorithm_configuration
self.__chromosome_modifier = ChromosomeModifier(algorithm_configuration.chromosome_config,
algorithm_configuration.left_range_number,
algorithm_configuration.right_range_number)
def handle_mut(self, pop_to_mut):
return [self.__apply_mut(chromosome) for chromosome in pop_to_mut]
def __apply_mut(self, chromosome):
mut_type = self.__algorithm_configuration.chromosome_config.mut_type
if mut_type == MutationTypes.INDICES_SWAP.name:
return self.__chromosome_modifier.mutation_indices_swap(chromosome)
if mut_type == MutationTypes.STEADY.name:
return self.__chromosome_modifier.mutation_steady(chromosome)
|
[
"libs.chromosome.chromosome_modifier.ChromosomeModifier"
] |
[((321, 478), 'libs.chromosome.chromosome_modifier.ChromosomeModifier', 'ChromosomeModifier', (['algorithm_configuration.chromosome_config', 'algorithm_configuration.left_range_number', 'algorithm_configuration.right_range_number'], {}), '(algorithm_configuration.chromosome_config,\n algorithm_configuration.left_range_number, algorithm_configuration.\n right_range_number)\n', (339, 478), False, 'from libs.chromosome.chromosome_modifier import ChromosomeModifier\n')]
|
import contextlib
import os
import pathlib
import shutil
import subprocess
import sys
import pytest
from cookiecutter.main import cookiecutter
_template_dir = pathlib.Path(__file__).parent.parent
_base_cookiecutter_args = {
"project_name": "my-python-package",
"package_name": "my_python_package",
"friendly_name": "My Python Package",
"author": "<NAME>",
"email": "<EMAIL>",
"github_user": "federicober",
"version": "0.1.0",
"dockerized": "false",
"docs_backend": "sphinx",
}
@contextlib.contextmanager
def change_dir(dir_name):
cwd = os.getcwd()
try:
os.chdir(dir_name)
yield
finally:
os.chdir(cwd)
@pytest.fixture(scope="session")
def default_generated_project(tmpdir_factory):
base_temp_dir = tmpdir_factory.mktemp("default_generated_project")
subprocess.check_call(
[
sys.executable,
"-m",
"cookiecutter",
"--no-input",
"--output-dir",
str(base_temp_dir),
str(_template_dir),
],
stderr=subprocess.STDOUT,
)
project_dir = base_temp_dir / "my-python-package"
with change_dir(project_dir):
yield project_dir
@pytest.fixture()
def tmp_generated_project(default_generated_project, tmp_path):
shutil.copytree(default_generated_project, tmp_path, dirs_exist_ok=True)
with change_dir(tmp_path):
yield tmp_path
@pytest.fixture()
def custom_generated_project(tmp_path, request):
cookiecutter_args = _base_cookiecutter_args.copy()
if hasattr(request, "param"):
cookiecutter_args.update(request.param)
cookiecutter(
str(_template_dir),
output_dir=str(tmp_path),
no_input=True,
extra_context=cookiecutter_args,
)
project_dir = tmp_path / cookiecutter_args["project_name"]
with change_dir(project_dir):
yield project_dir
|
[
"os.getcwd",
"pytest.fixture",
"pathlib.Path",
"shutil.copytree",
"os.chdir"
] |
[((680, 711), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (694, 711), False, 'import pytest\n'), ((1227, 1243), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1241, 1243), False, 'import pytest\n'), ((1442, 1458), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (1456, 1458), False, 'import pytest\n'), ((580, 591), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (589, 591), False, 'import os\n'), ((1312, 1384), 'shutil.copytree', 'shutil.copytree', (['default_generated_project', 'tmp_path'], {'dirs_exist_ok': '(True)'}), '(default_generated_project, tmp_path, dirs_exist_ok=True)\n', (1327, 1384), False, 'import shutil\n'), ((161, 183), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (173, 183), False, 'import pathlib\n'), ((609, 627), 'os.chdir', 'os.chdir', (['dir_name'], {}), '(dir_name)\n', (617, 627), False, 'import os\n'), ((663, 676), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (671, 676), False, 'import os\n')]
|
import pandas as pd
import numpy as np
import re
import math
import sys
def top_extract(s):
top = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'top':
top.append(s[i])
return top
def base_extract(s):
base = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'base':
base.append(s[i])
return base
def middle_extract(s):
middle = []
for i in range (1,len(s)+1):
if s[i-1].lower() == 'middle':
middle.append(s[i])
return middle
def note_extract(s):
result = []
location = ['top', 'middle', 'base']
for i in range (1,len(s)+1):
for ll in location:
if s[i-1].lower() == ll:
result.append(s[i])
return result
# 去掉所有notes前面的tag以及top,middle,base
# 如果没有的 设为空 nan
def delete_note_tag(s):
s = re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20',s)[1]
if len(s)==0:
s = np.nan
return s
def notes_table(ori_data):
data = ori_data.loc[:, ['title', 'notes_1', 'notes_2', 'notes_3', 'notes_4', 'notes_5',
'notes_6', 'notes_7', 'notes_8', 'notes_9', 'notes_10',
'notes_11', 'notes_12', 'notes_13', 'notes_14', 'notes_15',
'notes_16', 'notes_17', 'notes_18', 'notes_19', 'notes_20']]
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: delete_note_tag(s))
note_in_perfume = pd.DataFrame(columns=['perfume_name', 'note_name'])
rows, cols = data.shape
# 处理所有的notes 对应好(note与perfume的对应关系
for row in range(0, rows):
cur_perfume = split_data['title'][row]
i = 1
while i < 21:
if pd.isnull(data['notes_{}'.format(str(i))][row]):
i = 21
else:
new = pd.DataFrame({'perfume_name': cur_perfume,
'note_name': data['notes_{}'.format(str(i))][row]}, index=[1])
note_in_perfume = note_in_perfume.append(new, ignore_index=True)
i += 1
# 将所有的note 放到集合中,-》得到一张note的表格
note_list = list(set(note_in_perfume['note_name'].tolist()))
note_table = pd.DataFrame(note_list, columns=['note_name'])
note_table.to_csv('nnnnew_note.csv', index=False)
note_in_perfume.to_csv('note_in_perfume.csv', index=False)
'''
data = ori_data['title']
for i in range(1, 21):
data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))]
data = ori_data
split_data = data
split_data.fillna('-1', inplace=True)
for i in range(1, 21):
split_data['notes_{}'.format(str(i))] = data['notes_{}'.format(str(i))].apply(
lambda s: re.split('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20', s))
notes = split_data['notes_1']
for i in range(2, 21):
notes = notes + split_data['notes_{}'.format(str(i))]
notes = notes.apply(lambda s: list(filter(lambda x: x != '', s)))
# 提取了所有的的notes-》整合到一列里面
test_notes = notes.apply(note_extract)
#top_notes = notes.apply(top_extract)
#middle_notes = notes.apply(middle_extract)
#base_notes = notes.apply(base_extract)
'''
return
def perfume_table(original_data):
rows, cols = original_data.shape
data = pd.DataFrame(columns=['title', 'brand', 'date', 'image', 'description', 'target'])
data['title'] = data['title'].astype(np.str)
data['brand'] = original_data['brand']
data['date'] = original_data['date']
data['image'] = data['image'].astype(np.str)
data['description'] = data['description'].astype(np.str)
data['target'] = 0
# perfume_name, brand, date, image, description, target
# 处理title 去掉所有的for women/men 对应到target里面
target_dict = {'for women': 0,
'for men': 1,
'for women and men': 2}
for r in range(0, rows):
item = original_data['title'][r]
if 'for men' in item:
tt = target_dict['for men']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
elif 'for women' in item:
if 'for women and men' in item:
tt = target_dict['for women and men']
else:
tt = target_dict['for women']
title = item[0:(item.find('for') - 1)]
data.loc[r, 'title'] = title
else:
tt = 3
data.loc[r, 'title'] = title
data.loc[r, 'target'] = tt
data['target'] = data['target'].astype(dtype=int)
data.rename(columns={'title': 'perfume_name'}, inplace=True)
data.to_csv('nnnnew_perfume.csv', index = False)
return
# 将csv数据全部变成sql 的insert语句
def insert_perfume_data_into_sql():
pp_index = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/pp_index.csv')
pp_df = pp_index[['Unnamed: 0', 'perfume_name', 'brand', 'date', 'target']]
d = pp_df.values.tolist()
k_list = [0, 10000, 20000, 30000, 40000, 51212]
k = 0
while k in range(0, 5):
k_1 = k_list[k]
k_2 = k_list[k + 1]
result = 'INSERT INTO ttperfume(ttperfume_id, ttperfume_name, ttbrand, ttdate, tttarget) VALUES'
i = k_1
while i in range(k_1, k_2):
if pd.isna(d[i][1]):
d[i][1] = d[i][2]
if "'" in d[i][1]:
d[i][1] = d[i][1].replace("'", "''")
if "'" in d[i][2]:
d[i][2] = d[i][2].replace("'", "''")
if i != k_2 - 1:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + '),'
else:
dd = '(' + str(d[i][0]) + ", '" + str(d[i][1]) + "', '" + str(d[i][2]) + "', " + str(
d[i][3]) + ", " + str(d[i][4]) + ');'
result = result + dd
i += 1
# result = result.replace('"',"'",10086)
name = 'ttttpp_index_' + str(k_1) + '_' + str(k_2) + 'k.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
k += 1
return
# note_in_perfume 处理
# 去重 -》 将csv变成insert语句(str)
def process_n_in_p():
note_df = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_index.csv')
nn = note_df.set_index('note_name')
note_dic = nn.to_dict()['Unnamed: 0']
# key: perfume_name value:perfume_id
pp1 = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/perfume_for_index.csv')
pp12 = pp1.set_index('title')
pp_dic = pp12.to_dict()['Unnamed: 0']
# key: perfume_id value:perfume_name 用于检验用的
pp22 = pp1.set_index('Unnamed: 0')
p2_dic = pp22.to_dict()['title']
n_in_p = pd.read_csv('/Users/woody/UNSW-MIT/21T2-COMP9900/note_in_perfume.csv')
np_index = pd.DataFrame(columns=['perfume_id', 'note_id'])
for r in range(0, n_in_p.shape[0]):
pp = n_in_p['perfume_name'][r]
nn = n_in_p['note_name'][r]
pi = pp_dic[pp]
ni = note_dic[nn]
# 重要!!先创建一个DataFrame,用来增加进数据框的最后一行
new = pd.DataFrame({'perfume_id': pi,
'note_id': ni},
index=[1]) # 自定义索引为:1 ,这里也可以不设置index
np_index = np_index.append(new, ignore_index=True)
#将csv进行保存
np_index.to_csv('np_index.csv')
# 如果是同一个p——id 就加到同一个list里面
# 如果pid变化,前一个list-》set 然后全部加到txt里面
ex_p = np_index['perfume_id'][0]
ex_n = np_index['note_id'][0]
cur_pn_list = [ex_n]
nn_pp = pd.DataFrame(columns=['perfume_id', 'note_id'])
for r in range(1, np_index.shape[0]):
# for r in range(1,30):
cur_p = np_index['perfume_id'][r]
cur_n = np_index['note_id'][r]
if ex_p == cur_p:
cur_pn_list.append(cur_n)
else:
aset = list(set(cur_pn_list))
cur_pn_list = [cur_n]
# print(ex_p)
# print(aset)
for ni in aset:
new = pd.DataFrame({'perfume_id': ex_p, 'note_id': ni}, index=[1])
nn_pp = nn_pp.append(new, ignore_index=True)
ex_p = cur_p
nn_pp.to_csv('nn_pp.csv')
np_list = nn_pp.values.tolist()
for k1 in range(0, len(np_list), 50000):
k2 = k1 + 50000
result = 'INSERT INTO note_in_perfume(perfume_id, note_id) VALUES'
for i in range(k1, k2):
le = len(str(np_list[i]))
q = '(' + str(np_list[i])[1:le - 1] + ')'
result = result + q
if i != k2 - 1:
result = result + ','
else:
result = result + ';'
name = '50nip_' + str(k1 / 10000) + '_' + str(k2 / 10000) + 'w.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
k1 = 350000
k2 = len(np_list)
result = 'INSERT INTO note_in_perfume(perfume_id, note_id) VALUES'
for i in range(k1, k2):
le = len(str(np_list[i]))
q = '(' + str(np_list[i])[1:le - 1] + ')'
result = result + q
if i != k2 - 1:
result = result + ','
else:
result = result + ';'
name = '50nip_' + str(k1 / 10000) + '_' + str(k2 / 10000) + 'w.txt'
fh = open(name, 'w')
fh.write(result)
fh.close()
return
if __name__ == '__main__':
operation = sys.argv[1]
original_perfume = pd.read_csv('perfume.csv')
###########################
# 处理notes表
if operation == 'notes_table':
notes_table(original_perfume)
if operation == 'perfume_table':
perfume_table(original_perfume)
|
[
"pandas.DataFrame",
"pandas.isna",
"re.split",
"pandas.read_csv"
] |
[((1603, 1654), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['perfume_name', 'note_name']"}), "(columns=['perfume_name', 'note_name'])\n", (1615, 1654), True, 'import pandas as pd\n'), ((2329, 2375), 'pandas.DataFrame', 'pd.DataFrame', (['note_list'], {'columns': "['note_name']"}), "(note_list, columns=['note_name'])\n", (2341, 2375), True, 'import pandas as pd\n'), ((3435, 3521), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['title', 'brand', 'date', 'image', 'description', 'target']"}), "(columns=['title', 'brand', 'date', 'image', 'description',\n 'target'])\n", (3447, 3521), True, 'import pandas as pd\n'), ((4897, 4960), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/woody/UNSW-MIT/21T2-COMP9900/pp_index.csv"""'], {}), "('/Users/woody/UNSW-MIT/21T2-COMP9900/pp_index.csv')\n", (4908, 4960), True, 'import pandas as pd\n'), ((6335, 6400), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/woody/UNSW-MIT/21T2-COMP9900/note_index.csv"""'], {}), "('/Users/woody/UNSW-MIT/21T2-COMP9900/note_index.csv')\n", (6346, 6400), True, 'import pandas as pd\n'), ((6539, 6611), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/woody/UNSW-MIT/21T2-COMP9900/perfume_for_index.csv"""'], {}), "('/Users/woody/UNSW-MIT/21T2-COMP9900/perfume_for_index.csv')\n", (6550, 6611), True, 'import pandas as pd\n'), ((6831, 6901), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/woody/UNSW-MIT/21T2-COMP9900/note_in_perfume.csv"""'], {}), "('/Users/woody/UNSW-MIT/21T2-COMP9900/note_in_perfume.csv')\n", (6842, 6901), True, 'import pandas as pd\n'), ((6918, 6965), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['perfume_id', 'note_id']"}), "(columns=['perfume_id', 'note_id'])\n", (6930, 6965), True, 'import pandas as pd\n'), ((7625, 7672), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['perfume_id', 'note_id']"}), "(columns=['perfume_id', 'note_id'])\n", (7637, 7672), True, 'import pandas as pd\n'), ((9456, 9482), 'pandas.read_csv', 'pd.read_csv', (['"""perfume.csv"""'], {}), "('perfume.csv')\n", (9467, 9482), True, 'import pandas as pd\n'), ((853, 923), 're.split', 're.split', (['"""-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20"""', 's'], {}), "('-1|0|1|2|3|4|5|6|7|8|9|10|11|12|13|14|15|16|17|18|19|20', s)\n", (861, 923), False, 'import re\n'), ((7189, 7247), 'pandas.DataFrame', 'pd.DataFrame', (["{'perfume_id': pi, 'note_id': ni}"], {'index': '[1]'}), "({'perfume_id': pi, 'note_id': ni}, index=[1])\n", (7201, 7247), True, 'import pandas as pd\n'), ((5388, 5404), 'pandas.isna', 'pd.isna', (['d[i][1]'], {}), '(d[i][1])\n', (5395, 5404), True, 'import pandas as pd\n'), ((8086, 8146), 'pandas.DataFrame', 'pd.DataFrame', (["{'perfume_id': ex_p, 'note_id': ni}"], {'index': '[1]'}), "({'perfume_id': ex_p, 'note_id': ni}, index=[1])\n", (8098, 8146), True, 'import pandas as pd\n')]
|
######################################
# tooltip.py
# @author: <NAME>
# 6/24/2015
######################################
from Tkinter import Toplevel, TclError, Label, LEFT, SOLID
class ToolTip(object):
"""
Displays text in a label below a passed widget
:param widget: The widget tooltip will be binding text to
"""
def __init__(self, widget):
self.widget = widget
self.tipWindow = None
self.x = self.y = 0
self.text = ''
# noinspection PyProtectedMember
def show_tip(self, text):
"""
Create and pack the tooltip, bound to the ``'<Enter>'`` event when
:py:func:`createToolTip` is called
:param str text: string to place inside label
"""
self.text = text
if self.tipWindow or not self.text:
return
x, y, cx, cy = self.widget.bbox('insert') # @UnusedVariable
# Larger button should have the tip placed lower
if self.widget.winfo_height() > 70:
x = x + self.widget.winfo_rootx() + 50
y = y + cy + self.widget.winfo_rooty() + 50
else:
x = x + self.widget.winfo_rootx() + 27
y = y + cy + self.widget.winfo_rooty() + 27
self.tipWindow = tw = Toplevel(self.widget)
tw.wm_overrideredirect(1)
tw.wm_geometry('+%d+%d' % (x, y))
try:
# For Mac OS
tw.tk.call('::Tk::unsupported::MacWindowStyle',
'style', tw._w,
'help', 'noActivates')
except TclError:
pass
label = Label(tw, text=self.text, justify=LEFT,
background='#ffffe0', relief=SOLID, borderwidth=1,
font=('tahoma', '8', 'normal'))
label.pack(ipadx=1)
def hide_tip(self):
"""
Hide or destroy the tool tip label when the mouse leaves widget.
Bound to the ``'<Leave>'`` event when :py:func:`createToolTip` is called
"""
tw = self.tipWindow
self.tipWindow = None
if tw:
tw.destroy()
def create_tool_tip(widget, text):
"""
Create an instance of :py:class:`ToolTip` and bind the ``'<Enter>'`` and
``'<Leave>'`` events for displaying to the widget passed
:param widget: the widget for the tooltip to be displayed below
:param str text: text contained in the tooltip
"""
tool_tip = ToolTip(widget)
# noinspection PyUnusedLocal
def enter(event):
tool_tip.show_tip(text)
# noinspection PyUnusedLocal
def leave(event):
tool_tip.hide_tip()
widget.bind('<Enter>', enter)
widget.bind('<Leave>', leave)
|
[
"Tkinter.Label",
"Tkinter.Toplevel"
] |
[((1267, 1288), 'Tkinter.Toplevel', 'Toplevel', (['self.widget'], {}), '(self.widget)\n', (1275, 1288), False, 'from Tkinter import Toplevel, TclError, Label, LEFT, SOLID\n'), ((1606, 1732), 'Tkinter.Label', 'Label', (['tw'], {'text': 'self.text', 'justify': 'LEFT', 'background': '"""#ffffe0"""', 'relief': 'SOLID', 'borderwidth': '(1)', 'font': "('tahoma', '8', 'normal')"}), "(tw, text=self.text, justify=LEFT, background='#ffffe0', relief=SOLID,\n borderwidth=1, font=('tahoma', '8', 'normal'))\n", (1611, 1732), False, 'from Tkinter import Toplevel, TclError, Label, LEFT, SOLID\n')]
|
from pathlib import Path
def get_project_path():
return Path(__file__).parent
def get_data_path():
project_path = get_project_path()
parent_dir = project_path.parent
return parent_dir / 'data'
def get_results_path():
project_path = get_project_path()
parent_dir = project_path.parent
return parent_dir / 'results'
# get string path
def string_path(path_arg):
if not isinstance(path_arg, str):
if hasattr(path_arg, 'as_posix'):
path_arg = path_arg.as_posix()
else:
raise TypeError('Cannot convert variable to string path')
else:
path_arg = path_arg.replace('\\', '/')
return path_arg
image_formats = ('bmp', 'jpeg', 'tif', 'png', 'tiff')
|
[
"pathlib.Path"
] |
[((62, 76), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (66, 76), False, 'from pathlib import Path\n')]
|
#!/usr/bin/env python
#Copyright (c) 2014, <NAME> <<EMAIL>>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ccl_c_dir="ccl_single_pass_c_code/"
import subprocess
import signal
import glob
import os
import re
import numpy as np
import threading
from PIL import Image
from cola import ComponentLabeling as cola
from converter import Img_conv as Img_conv
class CompareCP:
def get_error_cnt (self):
return self.__error_cnt
def get_report (self):
return self.__report
def __init__(self, timeout, max_x, max_y, file_show=None):
self.__timeout = timeout
self.__report = ""
self.__error_cnt = 0
self.__c_box = []
# convert images
try:
os.mkdir(ccl_c_dir + "/img");
except:
None
for files in glob.glob(ccl_c_dir + "/img/*.pbm"):
os.remove(files)
destreg = re.compile(r".*/(.*)$")
file_chk=""
file_cnt=0
if file_show is None:
for files in glob.glob("../img/*.pbm"):
if files != "../img/sim_in.pbm" and files != "../img\\sim_in.pbm":
img = Img_conv(files, max_x, max_y, 0.5)
m = destreg.match(files)
if m.group(1) is not None:
file_cnt+=1
file_chk+="img/" + m.group(1) + "\n"
img.save(ccl_c_dir + "/img/" + m.group(1))
else:
img = Img_conv(file_show, max_x, max_y, 0.5)
m = destreg.match(file_show)
if m.group(1) is not None:
file_cnt+=1
file_chk+="img/" + m.group(1) + "\n"
img.save(ccl_c_dir + "/img/" + m.group(1))
f = open(ccl_c_dir + "/test_batch_01.txt", "w")
f.write(str(file_cnt) + "\n" + file_chk)
f.close()
del f
self.get_c_box()
if file_show is None:
for files in glob.glob("../img/*.pbm"):
if files != "../img/sim_in.pbm" and files != "../img\\sim_in.pbm":
file_cnt+=1
pycola = cola(files, max_x=max_x, max_y=max_y);
self.chk_file(files, pycola)
del pycola
else:
pycola = cola(file_show, max_x=max_x, max_y=max_y);
c_boxes=self.chk_file(file_show, pycola)
print((str(c_boxes)))
pycola.plot_sp_add('Boxes C', None, c_boxes)
def chk_file(self, files, pycola):
self.__report += "Check file: " + files + "\n"
py_boxes = pycola.get_boxes().copy()
c_boxes = {}
box_cnt = 0
for b in py_boxes:
((py_start_x, py_start_y), (py_end_x, py_end_y)) = py_boxes[b]
found = False
for bc in self.__c_box:
(stim_file, c_start_y, c_start_x, c_end_y, c_end_x) = bc
c_end_x -= 1
c_end_y -= 1
c_boxes[str(c_start_x) + str(c_start_y) + str(c_end_x) + str(c_end_y)] = ((c_start_x, c_start_y), (c_end_x, c_end_y))
box_cnt += 1
if stim_file == files[3:] and py_start_x == c_start_x and py_start_y == c_start_y and py_end_x == c_end_x and py_end_y == c_end_y:
found = True
self.__c_box.remove(bc)
break
if not found:
self.__report += "\033[91mError\033[0m" + " Python Box: ((" + str(py_start_x)
self.__report += ", " + str(py_start_y) + "), (" + str(py_end_x) + ", " + str(py_end_y) + ")"
self.__report += " not in C implementation\n"
self.__error_cnt += 1
for bc in self.__c_box:
(stim_file, c_start_y, c_start_x, c_end_y, c_end_x) = bc
c_end_x -= 1
c_end_y -= 1
if stim_file == files[3:]:
self.__report += "\033[91mError\033[0m" + " C Box: ((" + str(c_start_x)
self.__report += ", " + str(c_start_y) + "), (" + str(c_end_x) + ", " + str(c_end_y) + ")"
self.__report += " not in Python implementation\n"
self.__error_cnt += 1
del pycola
return c_boxes
def get_c_box(self):
c_box = C_parser()
c_box.start()
while not c_box.done:
c_box.event.wait(self.__timeout)
if not c_box.event.is_set():
break;
if not c_box.done:
self.__report += "\033[91mError\033[0m" + " Verification with C Code timedout\n"
self.__error_cnt += 1
else:
self.__c_box = c_box.getMessages()
del c_box
class CompareF:
def get_py_lable (self):
return self.__py_lable
def get_hdl_lable (self):
return self.__hdl_lable
def get_hdl_boxes (self):
return self.__hdl_boxes
def get_error_cnt (self):
return self.__error_cnt
def get_report (self):
return self.__report
def get_pycola(self):
return self.__pycola
def __init__(self, stim_file, passone, timeout, wdir, hdl_file, box_only,
resolution, max_x, max_y, continuous, run_only=False):
self.__timeout = timeout
self.__wdir = wdir
self.__max_x__ = max_x
self.__max_y__ = max_y
self.__passone__ = passone
self.__continuous__ = continuous
self.__resolution__ = resolution
self.__hdl_file__ = hdl_file
self.__stim_file__ = stim_file
self.__regmeta = re.compile(r".*metavalue detected.*")
self.__py_colas = {}
self.__py_lables = {}
self.__hdl_lables = {}
self.__px_boxes = {}
self.__hdl_boxes = {}
self.__report = ""
self.__error_cnt=0
if not run_only:
self.__prepare__()
else:
#write stimulus file
j = Image.fromarray(self.__stim_file__.astype(np.uint8))
j.mode = "1";
j.save("../img/sim_in.pbm")
del j
def __prepare__(self):
from cola import ComponentLabeling as cola
self.__pycola = cola(self.__stim_file__, max_x=self.__max_x__,
max_y=self.__max_y__);
#labels of first pass
if self.__passone__:
self.__py_lable = self.__pycola.get_lable_f()
else:
self.__py_lable = self.__pycola.get_lable_s()
#generate empty array to store results of vhdl output
self.__hdl_lable = -1*np.ones(self.__py_lable.shape, dtype=np.int)
if not self.__continuous__:
self.__py_colas[self.__stim_file__] = self.__pycola
self.__py_lables[self.__stim_file__] = self.__py_lable
self.__hdl_lables[self.__stim_file__] = self.__hdl_lable
#write test image file for vhdl
j = Image.fromarray(self.__pycola.get_img().astype(np.uint8))
j.mode = "1";
j.save("../img/sim_in.pbm")
del j
#if stim_file != "../img/sim_in.pbm":
# shutil.copy(stim_file, "../img/sim_in.pbm")
if not box_only:
self.verify_labels(self.__hdl_file__, self.__stim_file__,
self.__resolution__, self.__continuous__)
if not self.__passone__:
if self.__hdl_file__ == "tb_labeling":
self.run_boxes("tb_labeling_box", self.__stim_file__,
self.__resolution__, self.__continuous__)
elif self.__hdl_file__ == "tb_labeling_cont":
self.run_boxes("tb_labeling_box_cont", self.__stim_file__,
self.__resolution__, self.__continuous__)
else:
self.run_boxes(self.__hdl_file__, self.__stim_file__,
self.__resolution__, self.__continuous__)
def verify_labels(self, hdl_file, stim_file, resolution="ns", continuous=False):
vsim = VSIM_parser(hdl_file, "vhdl/", resolution)
vsim.start()
#compile some regex pattern
if continuous:
regline = re.compile(r"File: '([^']+)' Label: ([0-9]+).*")
else:
regline = re.compile(r"(Label:) ([0-9]+).*")
# index of picture
pos_x=0
pos_y=0
while not vsim.done:
vsim.event.wait(self.__timeout)
if not vsim.event.is_set():
break;
messages = vsim.getMessages()
for message in messages:
(time, severity, text) = message
if severity == "Note":
res = regline.match(text)
if res is None:
print(("unparsed text: " + text))
elif res.group(2) is not None:
label = int(res.group(2))
if continuous:
img_file = res.group(1)[3:]
stim_file = img_file
if img_file not in self.__py_lables:
pos_x = 0
pos_y = 0
self.__py_colas[img_file] = cola(stim_file, max_x=self.__max_x__, max_y=self.__max_y__);
self.__py_lables[img_file] = self.__py_colas[img_file].get_lable_s()
self.__hdl_lables[img_file] = -1*np.ones(self.__py_lables[img_file].shape, dtype=np.int)
if pos_y >= len(self.__py_lables[stim_file]):
self.__report += stim_file + ": additional pixel (x=" + str(pos_x) +", y=" + str(pos_y) +")\n"
self.__error_cnt += 1
else:
self.__hdl_lables[stim_file][pos_y][pos_x] = label
if self.__py_lables[stim_file][pos_y][pos_x] != label:
self.__report += ("\033[91mError\033[0m" + " File: "+ stim_file +" at pixel x=" + str(pos_x) + " y=" +
str(pos_y) + " expected: " + str(self.__py_lables[stim_file][pos_y][pos_x]) + " vhdl: " +
str(label) + " at time: " + str(time) + "\n")
self.__error_cnt += 1
pos_x = pos_x + 1
if pos_x == len(self.__py_lable[0]):
pos_y = pos_y + 1
pos_x = 0
elif res.group(2) is not None:
self.__report = "\033[91mError\033[0m" + "Unknown Message: " + text + "\n"
else:
metaval = self.__regmeta.match(text)
if not(severity == "Warning" and metaval is not None):
self.__report += severity + " " + text + "\n"
if severity != "Note" and severity != "Warning":
#self.__error_cnt += 1
None
#TODO report this seperately
if not vsim.done:
self.__report = self.__report + stim_file + ": Output of data reached timeout in 2-pass simulation. Simulation abort\n"
self.__error_cnt += 1
for files in self.__py_lables:
if len(self.__py_lables[files][0]) > pos_y and pos_x != 0:
self.__report = self.__report + files + ": Not all pixels processed. First unprocessed pixel: x=" + str(pos_x+1) + " y=" + str(pos_y+1) + "\n"
self.__error_cnt += 1
del vsim
def run_boxes(self, hdl_file, stim_file, resolution="ns",
continuous=False, compare=True):
vsim = VSIM_parser(hdl_file, self.__wdir, resolution)
vsim.start()
if continuous:
regline = re.compile(r"File: '([^']+)' Box: \(([0-9]+), ([0-9]+)\), \(([0-9]+), ([0-9]+)\).*|Box: (error).*")
else:
regline = re.compile(r"(Box): \(([0-9]+), ([0-9]+)\), \(([0-9]+), ([0-9]+)\).*|Box: (error).*")
cnt={}
if (stim_file not in self.__px_boxes) and compare:
self.__px_boxes[stim_file] = self.__py_colas[stim_file].get_boxes().copy()
self.__hdl_boxes[stim_file] = {}
cnt[stim_file] = 0
elif not compare:
self.__hdl_boxes[stim_file] = {}
cnt[stim_file] = 0
while not vsim.done:
vsim.event.wait(self.__timeout)
if not vsim.event.is_set():
break;
messages = vsim.getMessages()
for message in messages:
(time, severity, text) = message
#print ("test:" + str(message))
if severity == "Note":
res = regline.match(text)
if res is None:
print(("unparsed text: \""+text+ "\""))
elif res.group(6) is not None:
self.__error_cnt += 1
self.__report = "Recognised error with to small heap\n"
elif res.group(2) is not None:
img_file = res.group(1)[3:]
if continuous:
self.__px_boxes[img_file] = self.__px_boxes[stim_file]
self.__hdl_boxes[img_file] = self.__hdl_boxes[stim_file]
cnt[stim_file] = cnt[img_file]
stim_file = img_file
start_x = int(res.group(2))
start_y = int(res.group(3))
end_x = int(res.group(4))
end_y = int(res.group(5))
self.__hdl_boxes[stim_file][cnt[stim_file]] = ((start_x, start_y), (end_x, end_y))
cnt[stim_file] += 1
if compare:
found = False
for b in self.__px_boxes[stim_file]:
((py_start_x, py_start_y), (py_end_x, py_end_y)) = self.__px_boxes[stim_file][b]
if py_start_x == start_x and py_start_y == start_y and py_end_x == end_x and py_end_y == end_y:
found = True
del self.__px_boxes[stim_file][b]
break
if not found:
self.__report += "\033[91mError\033[0m" + " File: '" + stim_file
self.__report += "' VHDL found box ((" + str(start_x) + ", "
self.__report += str(start_y) + "), (" + str(end_x) + ", "
self.__report += str(end_y) + ")) but python not\n"
self.__error_cnt += 1
elif res.group(3) is not None:
self.__report = "\033[91mError\033[0m" + "Unknown Message: " + text
else:
metaval = self.__regmeta.match(text)
if not(severity == "Warning" and metaval is not None):
self.__report += severity + " " + text + "\n"
if severity != "Note" and severity != "Warning":
#self.__error_cnt += 1
#TODO: Report this separatly
None
if compare:
for f in self.__px_boxes:
if self.__px_boxes[f] != {}:
for b in self.__px_boxes[f]:
((start_x, start_y), (end_x, end_y)) = self.__px_boxes[f][b]
self.__report += "\033[91mError\033[0m" + " File: '" + f
self.__report += "' VHDL missing box ((" + str(start_x) + ", "
self.__report += str(start_y) + "), (" + str(end_x) + ", " + str(end_y) + "))\n"
self.__error_cnt += 1
if not vsim.done:
self.__report = self.__report + stim_file + ": Output of data reached timeout in simulation of 2-pass with boundbox calculation. Simulation abort\n"
self.__error_cnt += 1
del vsim
class Exec_parser(threading.Thread):
## Executes a binary file and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param cmd command to execute
# @param cwd working directory
# @param regex used to parse the output of each line of stdout and
# use the result as parameter to run the eval_line
def __init__(self, cmd, cwd=".", regex = None):
super(Exec_parser, self).__init__()
self.__cmd = cmd;
self.__cwd = cwd
self.event = threading.Event()
self.__sema = threading.Semaphore()
self.__messages = []
self.done = False
self.__stop = False
# store parsed messages
# overwrite this values
self.__regline = re.compile(regex)
print(("Exec: " + str(cmd)))
print(("CWD: " + self.__cwd))
self.__proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, cwd=self.__cwd)
def add_message(self, m):
with self.__sema:
self.__messages.append(m)
self.event.set()
## Get all Messages stored in the message queue
def getMessages(self):
with self.__sema:
ret_msg = self.__messages
self.__messages = []
self.event.clear()
return ret_msg
def __del__(self):
self.__stop = True
os.kill(self.__proc.pid, signal.SIGKILL)
## This methode has to evaluate the result of the regex for each line
# you need to overwrite this methode
def eval_line(self, res):
None
def run(self):
line = ' ';
while not self.__stop and line != '':
#apply precompile regex pattern
line = self.__proc.stdout.readline().decode()
res = self.__regline.match(line)
if res is not None:
self.eval_line(res)
# notify the event if done
with self.__sema:
self.event.set()
self.done = True
class VSIM_parser(Exec_parser):
vsim="vsim"
## Executes Modelsim and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param hdl_entity entity wich should be executed
# @param cwd working directory this has to be the directory where the vlib is stored
def __init__(self, hdl_entity, cwd=".", resolution="ns"):
super(VSIM_parser, self).__init__([self.vsim, "-c", "-do", "run -all;quit", "-t", resolution, hdl_entity], cwd, r"# Time: ([0-9]+ [fpnum]s).*|# \*\* (Note|Warning|Error|Failure): (.*)")
self.__msg = []
## This methode has to evaluate the result of the regex for each line
def eval_line(self, res):
if res.group(1) is not None:
# this is the output of a time info
for m in self.__msg:
(severity, text) = m
self.add_message((res.group(1), severity, text))
self.__msg = []
else:
if res.group(2) is not None:
severity = res.group(2)
if res.group(3) is not None:
self.__msg.append((severity, res.group(3)))
class C_parser(Exec_parser):
## Executes Cpp Code and parses the output
#
# You can use the event.wait to wait for new messages or the done signal
# The boolean done gives you the information if the simulation is done
# @param cwd working directory this has to be the directory where the vlib is stored
def __init__(self, cwd=ccl_c_dir):
super(C_parser, self).__init__(["./ccl"], cwd, r"Processing file '([^']+)' and .*|Completed object:\[([0-9]+), ([0-9]+)\]x\[([0-9]+), ([0-9]+)\].*")
self.__file=""
## This methode has to evaluate the result of the regex for each line
def eval_line(self, res):
if res.group(1) is not None:
# filename of analyzed file
self.__file = res.group(1)
else:
if res.group(2) is not None and res.group(3) is not None and res.group(4) is not None and res.group(5) is not None :
self.add_message((self.__file, int(res.group(2)), int(res.group(3)), int(res.group(4)), int(res.group(5))))
if __name__== "__main__":
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-s", "--sim-file", dest="sim_file",
help="filename for which the simulation should run and the result be visualized")
parser.add_option("-p", "--pass-one", dest="passone", action="store_true",
help="only the first pass will be analyzed otherwise the lables after the second pass and the boundboxes will be analyzed")
parser.add_option("-u", "--uart-tb", dest="uart_tb", action="store_true",
help="Simulates the uart_tb and compare the output with the python implementation")
parser.add_option("-n", "--continuous", dest="continuous", action="store_true",
help="Sends all Pictures in one continuous stream to the DUT")
parser.add_option("-t", "--timeout", dest="timeout", type="float", default=120.0,
help="seconds (as float) how long the time between two outputs before abort the simulation")
parser.add_option("-c", dest="c", action="store_true",
help="Checks the Python boundbox calculation against the cpp")
parser.add_option("-v", "--vhdl-dut", dest="v", action="store_true",
help="Checks the Python boundbox calculation against the vhdl DUT")
parser.add_option("--no-lables", dest="nl", action="store_true",
help="Don't check lables")
parser.add_option("-x", "--max-x", dest="max_x", type="int", default=32,
help="Max width of image send to ccl")
parser.add_option("-y", "--max-y", dest="max_y", type="int", default=32,
help="Max height of image send to ccl")
parser.add_option("-d", "--input-dir", dest="indir" , default="../img/",
help="Input dir used to check all Files")
parser.add_option("-e", "--file-extension", dest="fext", default="pbm",
help="File extension for the input dir run (default \"pbm\")")
(option, args) = parser.parse_args()
fext = option.fext
indir = option.indir
box_only = False
hdl_file = "tb_labeling"
resolution = "ns"
if option.uart_tb:
hdl_file = "tb_com_uart"
resolution = "ps"
box_only = True
if option.passone:
hdl_file = "tb_labeling_p1"
wdir="vhdl/"
if option.v:
wdir="vhdl/ccl_dut/"
box_only = True
if option.nl:
box_only = True
if (not option.c) and option.sim_file:
if option.passone:
comp_first=CompareF(option.sim_file, option.passone, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x,
option.max_y, False)
comp_first.get_pycola().plot_fp_add('First Pass HDL',
comp_first.get_hdl_lable())
else:
comp_first=CompareF(option.sim_file, False, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x,
option.max_y, False)
errors = comp_first.get_error_cnt()
print(str(errors) + " errors reported")
print("error report: \n" + comp_first.get_report())
if box_only:
boxes = comp_first.get_hdl_boxes()
if len(boxes) == 1:
for k in boxes:
comp_first.get_pycola().plot_sp_add('Boxes HDL', None, boxes[k])
elif len(boxes) == 0:
comp_first.get_pycola().plot_sp_add('Boxes HDL', None, None)
else:
print ("more outputs received than expected")
print((str(boxes)))
else:
boxes = comp_first.get_hdl_boxes()
if len(boxes) <= 1:
for k in boxes:
comp_first.get_pycola().plot_sp_add('Second Pass HDL',
comp_first.get_hdl_lable(), boxes[k])
elif len(boxes) == 0:
comp_first.get_pycola().plot_sp_add('Second Pass HDL',
comp_first.get_hdl_lable(), None)
else:
print ("more outputs received than expected")
print((str(boxes)))
else:
# run verification of all availible stimuli files and generate a report
# count errors
errors=0
chkdfiles=""
err_by_file={}
report=""
if option.c:
cmp_cp = CompareCP(option.timeout, option.max_x, option.max_y, option.sim_file)
errors = cmp_cp.get_error_cnt()
print((cmp_cp.get_report()))
elif option.continuous:
cnt = 0
filenames = ""
for files in glob.glob(indir + "/*." + option.fext):
if files != indir + "/sim_in." + fext and files != indir + "\\sim_in."+fext:
filenames += "../" + files + "\n"
cnt += 1
f = open("../img/continuous.files", 'w')
f.write(str(cnt) + "\n")
f.write(str(option.max_x) + "\n")
f.write(str(option.max_y) + "\n")
f.write(filenames)
f.close()
hdl_file="tb_labeling_cont"
comp_first=CompareF(files, option.passone, option.timeout, wdir, hdl_file,
box_only, resolution, option.max_x, option.max_y, True)
errors = errors + comp_first.get_error_cnt()
print((comp_first.get_report()))
else:
#run vhdl simulation for each input file
for files in glob.glob(indir + "/*."+fext):
if files != indir + "/sim_in." +fext and files != indir + "\\sim_in." +fext:
print(("\n\nStart verification with input of " + files+"\n"))
chkdfiles = chkdfiles + files +'\n'
comp_first=CompareF(files, option.passone, option.timeout, wdir,
hdl_file, box_only, resolution, option.max_x, option.max_y, False)
errors = errors + comp_first.get_error_cnt()
err_by_file[files] = comp_first.get_error_cnt()
print((comp_first.get_report()))
print("Verification with the following files:")
for filename in err_by_file:
if err_by_file[filename] == 0:
print(("\033[92m" + filename + "\033[0m"))
else:
print(("\033[91m" + filename + " errors: " + str(err_by_file[filename]) + "\033[0m"))
if errors == 0:
print("\033[92mVerification successful\033[0m")
else:
print(report)
print(("\033[91mVerification failed\033[0m with " + str(errors) + " errors"))
if wdir == "vhdl/ccl_dut/":
print(("The verification is only valid if you run ./mk_build.sh in "+wdir))
print("Don't forget to run ./mk_synthesis.sh before a synthesis run")
|
[
"os.mkdir",
"subprocess.Popen",
"os.remove",
"optparse.OptionParser",
"converter.Img_conv",
"numpy.ones",
"os.kill",
"threading.Event",
"glob.glob",
"cola.ComponentLabeling",
"threading.Semaphore",
"re.compile"
] |
[((21936, 21950), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (21948, 21950), False, 'from optparse import OptionParser\n'), ((2045, 2080), 'glob.glob', 'glob.glob', (["(ccl_c_dir + '/img/*.pbm')"], {}), "(ccl_c_dir + '/img/*.pbm')\n", (2054, 2080), False, 'import glob\n'), ((2130, 2152), 're.compile', 're.compile', (['""".*/(.*)$"""'], {}), "('.*/(.*)$')\n", (2140, 2152), False, 'import re\n'), ((6778, 6814), 're.compile', 're.compile', (['""".*metavalue detected.*"""'], {}), "('.*metavalue detected.*')\n", (6788, 6814), False, 'import re\n'), ((7382, 7450), 'cola.ComponentLabeling', 'cola', (['self.__stim_file__'], {'max_x': 'self.__max_x__', 'max_y': 'self.__max_y__'}), '(self.__stim_file__, max_x=self.__max_x__, max_y=self.__max_y__)\n', (7386, 7450), True, 'from cola import ComponentLabeling as cola\n'), ((18098, 18115), 'threading.Event', 'threading.Event', ([], {}), '()\n', (18113, 18115), False, 'import threading\n'), ((18138, 18159), 'threading.Semaphore', 'threading.Semaphore', ([], {}), '()\n', (18157, 18159), False, 'import threading\n'), ((18334, 18351), 're.compile', 're.compile', (['regex'], {}), '(regex)\n', (18344, 18351), False, 'import re\n'), ((18449, 18510), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'cwd': 'self.__cwd'}), '(cmd, stdout=subprocess.PIPE, cwd=self.__cwd)\n', (18465, 18510), False, 'import subprocess\n'), ((18930, 18970), 'os.kill', 'os.kill', (['self.__proc.pid', 'signal.SIGKILL'], {}), '(self.__proc.pid, signal.SIGKILL)\n', (18937, 18970), False, 'import os\n'), ((1960, 1988), 'os.mkdir', 'os.mkdir', (["(ccl_c_dir + '/img')"], {}), "(ccl_c_dir + '/img')\n", (1968, 1988), False, 'import os\n'), ((2094, 2110), 'os.remove', 'os.remove', (['files'], {}), '(files)\n', (2103, 2110), False, 'import os\n'), ((2249, 2274), 'glob.glob', 'glob.glob', (['"""../img/*.pbm"""'], {}), "('../img/*.pbm')\n", (2258, 2274), False, 'import glob\n'), ((2708, 2746), 'converter.Img_conv', 'Img_conv', (['file_show', 'max_x', 'max_y', '(0.5)'], {}), '(file_show, max_x, max_y, 0.5)\n', (2716, 2746), True, 'from converter import Img_conv as Img_conv\n'), ((3188, 3213), 'glob.glob', 'glob.glob', (['"""../img/*.pbm"""'], {}), "('../img/*.pbm')\n", (3197, 3213), False, 'import glob\n'), ((3513, 3554), 'cola.ComponentLabeling', 'cola', (['file_show'], {'max_x': 'max_x', 'max_y': 'max_y'}), '(file_show, max_x=max_x, max_y=max_y)\n', (3517, 3554), True, 'from cola import ComponentLabeling as cola\n'), ((7751, 7795), 'numpy.ones', 'np.ones', (['self.__py_lable.shape'], {'dtype': 'np.int'}), '(self.__py_lable.shape, dtype=np.int)\n', (7758, 7795), True, 'import numpy as np\n'), ((9301, 9348), 're.compile', 're.compile', (['"""File: \'([^\']+)\' Label: ([0-9]+).*"""'], {}), '("File: \'([^\']+)\' Label: ([0-9]+).*")\n', (9311, 9348), False, 'import re\n'), ((9386, 9419), 're.compile', 're.compile', (['"""(Label:) ([0-9]+).*"""'], {}), "('(Label:) ([0-9]+).*')\n", (9396, 9419), False, 'import re\n'), ((13049, 13161), 're.compile', 're.compile', (['"""File: \'([^\']+)\' Box: \\\\(([0-9]+), ([0-9]+)\\\\), \\\\(([0-9]+), ([0-9]+)\\\\).*|Box: (error).*"""'], {}), '(\n "File: \'([^\']+)\' Box: \\\\(([0-9]+), ([0-9]+)\\\\), \\\\(([0-9]+), ([0-9]+)\\\\).*|Box: (error).*"\n )\n', (13059, 13161), False, 'import re\n'), ((13193, 13291), 're.compile', 're.compile', (['"""(Box): \\\\(([0-9]+), ([0-9]+)\\\\), \\\\(([0-9]+), ([0-9]+)\\\\).*|Box: (error).*"""'], {}), "(\n '(Box): \\\\(([0-9]+), ([0-9]+)\\\\), \\\\(([0-9]+), ([0-9]+)\\\\).*|Box: (error).*'\n )\n", (13203, 13291), False, 'import re\n'), ((26581, 26619), 'glob.glob', 'glob.glob', (["(indir + '/*.' + option.fext)"], {}), "(indir + '/*.' + option.fext)\n", (26590, 26619), False, 'import glob\n'), ((27432, 27463), 'glob.glob', 'glob.glob', (["(indir + '/*.' + fext)"], {}), "(indir + '/*.' + fext)\n", (27441, 27463), False, 'import glob\n'), ((2385, 2419), 'converter.Img_conv', 'Img_conv', (['files', 'max_x', 'max_y', '(0.5)'], {}), '(files, max_x, max_y, 0.5)\n', (2393, 2419), True, 'from converter import Img_conv as Img_conv\n'), ((3359, 3396), 'cola.ComponentLabeling', 'cola', (['files'], {'max_x': 'max_x', 'max_y': 'max_y'}), '(files, max_x=max_x, max_y=max_y)\n', (3363, 3396), True, 'from cola import ComponentLabeling as cola\n'), ((10381, 10440), 'cola.ComponentLabeling', 'cola', (['stim_file'], {'max_x': 'self.__max_x__', 'max_y': 'self.__max_y__'}), '(stim_file, max_x=self.__max_x__, max_y=self.__max_y__)\n', (10385, 10440), True, 'from cola import ComponentLabeling as cola\n'), ((10608, 10663), 'numpy.ones', 'np.ones', (['self.__py_lables[img_file].shape'], {'dtype': 'np.int'}), '(self.__py_lables[img_file].shape, dtype=np.int)\n', (10615, 10663), True, 'import numpy as np\n')]
|
import re
import pytest
from pathlib import Path
from utilities import subprocess_runner, remove_ansible_warnings
TEST_CASES = [
"../class6/collateral/roles_test/test_pb1.yml",
"../class6/collateral/roles_test/test_pb2.yml",
"../class6/collateral/roles_test/test_pb3.yml",
"../class6/collateral/roles_test/test_pb4.yml",
"../class6/collateral/roles_test/test_pb5.yml",
"../class6/collateral/tasks/include_import_tags.yml",
"../class6/collateral/tasks/include_import_when.yml",
"../class6/collateral/tasks/include_tasks_loop.yml",
"../class6/collateral/tasks/standalone_pb.yml",
"../class6/collateral/tasks/standalone_pb2.yml",
"../class6/collateral/tasks/standalone_pb3.yml",
# Expected to fail
# "../class6/collateral/tasks/standalone_pb4.yml",
"../class6/collateral/vars/test_vars1.yml",
"../class6/collateral/vars/test_vars2.yml",
"../class6/collateral/vars/test_vars3.yml",
]
@pytest.mark.parametrize("test_case", TEST_CASES)
def test_runner_collateral(test_case):
path_obj = Path(test_case)
script = path_obj.name
script_dir = path_obj.parents[0]
cmd_list = ["ansible-playbook", script]
std_out, std_err, return_code = subprocess_runner(cmd_list, script_dir)
std_err = remove_ansible_warnings(std_err)
assert return_code == 0
assert std_err == ""
@pytest.mark.parametrize("exercise", ["exercise1a.yml", "exercise1b.yml"])
def test_class6_ex1a_1b(exercise):
base_path = "../class6/exercises/exercise1"
cmd_list = ["ansible-playbook", exercise]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert "localhost : ok=9" in std_out
@pytest.mark.parametrize(
"tags,result",
[(None, "ok=13"), ("foo1", "ok=5"), ("foo2", "ok=5"), ("foo3", "ok=5")],
)
def test_class6_ex1c(tags, result):
base_path = "../class6/exercises/exercise1"
if tags:
cmd_list = ["ansible-playbook", "exercise1c.yml", "--tags", tags]
else:
cmd_list = ["ansible-playbook", "exercise1c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert result in std_out
@pytest.mark.parametrize("exercise", ["exercise2a.yml", "exercise2b.yml"])
def test_class6_ex2a_2b(exercise):
base_path = "../class6/exercises/exercise2"
cmd_list = ["ansible-playbook", exercise]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert "localhost : ok=2" in std_out
@pytest.mark.parametrize(
"tags,result",
[(None, "ok=4"), ("foo1", "ok=2"), ("foo2", "ok=2"), ("foo3", "ok=2")],
)
def test_class6_ex2c(tags, result):
base_path = "../class6/exercises/exercise2"
if tags:
cmd_list = ["ansible-playbook", "exercise2c.yml", "--tags", tags]
else:
cmd_list = ["ansible-playbook", "exercise2c.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert result in std_out
def test_class6_ex3():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise3"
cmd_list = ["ansible-playbook", "exercise3.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=1.*$", std_out, flags=re.M)
def test_class6_ex4():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise4"
cmd_list = ["ansible-playbook", "exercise4.yml", "-f 12"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista5\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista6\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista7\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^arista8\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^nxos1\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
assert re.search(r"^nxos2\s+:\s+ok=2.*skipped=3.*$", std_out, flags=re.M)
def test_class6_ex5():
"""Should be idempotent on the second execution."""
base_path = "../class6/exercises/exercise5"
cmd_list = ["ansible-playbook", "exercise5.yml", "-f 12"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
# Execute script again
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco1\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco2\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco5\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista5\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista6\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista7\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^arista8\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^nxos1\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
assert re.search(r"^nxos2\s+:\s+ok=2.*skipped=2.*$", std_out, flags=re.M)
def test_class6_ex6():
base_path = "../class6/exercises/exercise6"
cmd_list = ["ansible-playbook", "exercise6.yml"]
std_out, std_err, return_code = subprocess_runner(cmd_list, exercise_dir=base_path)
std_err = remove_ansible_warnings(std_err)
assert std_err == ""
assert return_code == 0
assert re.search(r"^cisco5\s+:\s+ok=1 ", std_out, flags=re.M)
assert re.search(r"^cisco6\s+:\s+ok=1 ", std_out, flags=re.M)
|
[
"utilities.remove_ansible_warnings",
"pathlib.Path",
"utilities.subprocess_runner",
"pytest.mark.parametrize",
"re.search"
] |
[((948, 996), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""test_case"""', 'TEST_CASES'], {}), "('test_case', TEST_CASES)\n", (971, 996), False, 'import pytest\n'), ((1354, 1427), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""exercise"""', "['exercise1a.yml', 'exercise1b.yml']"], {}), "('exercise', ['exercise1a.yml', 'exercise1b.yml'])\n", (1377, 1427), False, 'import pytest\n'), ((1806, 1921), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tags,result"""', "[(None, 'ok=13'), ('foo1', 'ok=5'), ('foo2', 'ok=5'), ('foo3', 'ok=5')]"], {}), "('tags,result', [(None, 'ok=13'), ('foo1', 'ok=5'),\n ('foo2', 'ok=5'), ('foo3', 'ok=5')])\n", (1829, 1921), False, 'import pytest\n'), ((2388, 2461), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""exercise"""', "['exercise2a.yml', 'exercise2b.yml']"], {}), "('exercise', ['exercise2a.yml', 'exercise2b.yml'])\n", (2411, 2461), False, 'import pytest\n'), ((2840, 2955), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""tags,result"""', "[(None, 'ok=4'), ('foo1', 'ok=2'), ('foo2', 'ok=2'), ('foo3', 'ok=2')]"], {}), "('tags,result', [(None, 'ok=4'), ('foo1', 'ok=2'), (\n 'foo2', 'ok=2'), ('foo3', 'ok=2')])\n", (2863, 2955), False, 'import pytest\n'), ((1051, 1066), 'pathlib.Path', 'Path', (['test_case'], {}), '(test_case)\n', (1055, 1066), False, 'from pathlib import Path\n'), ((1211, 1250), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list', 'script_dir'], {}), '(cmd_list, script_dir)\n', (1228, 1250), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((1265, 1297), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (1288, 1297), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((1593, 1644), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (1610, 1644), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((1659, 1691), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (1682, 1691), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((2204, 2255), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (2221, 2255), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((2270, 2302), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (2293, 2302), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((2627, 2678), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (2644, 2678), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((2693, 2725), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (2716, 2725), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3237, 3288), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (3254, 3288), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3303, 3335), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (3326, 3335), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3636, 3687), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (3653, 3687), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3702, 3734), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (3725, 3734), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3851, 3902), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (3868, 3902), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((3917, 3949), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (3940, 3949), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((4014, 4082), 're.search', 're.search', (['"""^cisco1\\\\s+:\\\\s+ok=2.*skipped=1.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco1\\\\s+:\\\\s+ok=2.*skipped=1.*$', std_out, flags=re.M)\n", (4023, 4082), False, 'import re\n'), ((4093, 4161), 're.search', 're.search', (['"""^cisco2\\\\s+:\\\\s+ok=2.*skipped=1.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco2\\\\s+:\\\\s+ok=2.*skipped=1.*$', std_out, flags=re.M)\n", (4102, 4161), False, 'import re\n'), ((4172, 4240), 're.search', 're.search', (['"""^cisco5\\\\s+:\\\\s+ok=2.*skipped=1.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco5\\\\s+:\\\\s+ok=2.*skipped=1.*$', std_out, flags=re.M)\n", (4181, 4240), False, 'import re\n'), ((4251, 4319), 're.search', 're.search', (['"""^cisco6\\\\s+:\\\\s+ok=2.*skipped=1.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco6\\\\s+:\\\\s+ok=2.*skipped=1.*$', std_out, flags=re.M)\n", (4260, 4319), False, 'import re\n'), ((4546, 4597), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (4563, 4597), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((4612, 4644), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (4635, 4644), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((4761, 4812), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (4778, 4812), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((4827, 4859), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (4850, 4859), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((4924, 4992), 're.search', 're.search', (['"""^cisco1\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco1\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (4933, 4992), False, 'import re\n'), ((5003, 5071), 're.search', 're.search', (['"""^cisco2\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco2\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5012, 5071), False, 'import re\n'), ((5082, 5150), 're.search', 're.search', (['"""^cisco5\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco5\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5091, 5150), False, 'import re\n'), ((5161, 5229), 're.search', 're.search', (['"""^cisco6\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco6\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5170, 5229), False, 'import re\n'), ((5240, 5309), 're.search', 're.search', (['"""^arista5\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista5\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5249, 5309), False, 'import re\n'), ((5320, 5389), 're.search', 're.search', (['"""^arista6\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista6\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5329, 5389), False, 'import re\n'), ((5400, 5469), 're.search', 're.search', (['"""^arista7\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista7\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5409, 5469), False, 'import re\n'), ((5480, 5549), 're.search', 're.search', (['"""^arista8\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista8\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5489, 5549), False, 'import re\n'), ((5560, 5627), 're.search', 're.search', (['"""^nxos1\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^nxos1\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5569, 5627), False, 'import re\n'), ((5638, 5705), 're.search', 're.search', (['"""^nxos2\\\\s+:\\\\s+ok=2.*skipped=3.*$"""', 'std_out'], {'flags': 're.M'}), "('^nxos2\\\\s+:\\\\s+ok=2.*skipped=3.*$', std_out, flags=re.M)\n", (5647, 5705), False, 'import re\n'), ((5932, 5983), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (5949, 5983), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((5998, 6030), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (6021, 6030), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((6147, 6198), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (6164, 6198), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((6213, 6245), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (6236, 6245), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((6310, 6378), 're.search', 're.search', (['"""^cisco1\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco1\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6319, 6378), False, 'import re\n'), ((6389, 6457), 're.search', 're.search', (['"""^cisco2\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco2\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6398, 6457), False, 'import re\n'), ((6468, 6536), 're.search', 're.search', (['"""^cisco5\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco5\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6477, 6536), False, 'import re\n'), ((6547, 6615), 're.search', 're.search', (['"""^cisco6\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^cisco6\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6556, 6615), False, 'import re\n'), ((6626, 6695), 're.search', 're.search', (['"""^arista5\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista5\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6635, 6695), False, 'import re\n'), ((6706, 6775), 're.search', 're.search', (['"""^arista6\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista6\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6715, 6775), False, 'import re\n'), ((6786, 6855), 're.search', 're.search', (['"""^arista7\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista7\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6795, 6855), False, 'import re\n'), ((6866, 6935), 're.search', 're.search', (['"""^arista8\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^arista8\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6875, 6935), False, 'import re\n'), ((6946, 7013), 're.search', 're.search', (['"""^nxos1\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^nxos1\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (6955, 7013), False, 'import re\n'), ((7024, 7091), 're.search', 're.search', (['"""^nxos2\\\\s+:\\\\s+ok=2.*skipped=2.*$"""', 'std_out'], {'flags': 're.M'}), "('^nxos2\\\\s+:\\\\s+ok=2.*skipped=2.*$', std_out, flags=re.M)\n", (7033, 7091), False, 'import re\n'), ((7253, 7304), 'utilities.subprocess_runner', 'subprocess_runner', (['cmd_list'], {'exercise_dir': 'base_path'}), '(cmd_list, exercise_dir=base_path)\n', (7270, 7304), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((7319, 7351), 'utilities.remove_ansible_warnings', 'remove_ansible_warnings', (['std_err'], {}), '(std_err)\n', (7342, 7351), False, 'from utilities import subprocess_runner, remove_ansible_warnings\n'), ((7416, 7471), 're.search', 're.search', (['"""^cisco5\\\\s+:\\\\s+ok=1 """', 'std_out'], {'flags': 're.M'}), "('^cisco5\\\\s+:\\\\s+ok=1 ', std_out, flags=re.M)\n", (7425, 7471), False, 'import re\n'), ((7482, 7537), 're.search', 're.search', (['"""^cisco6\\\\s+:\\\\s+ok=1 """', 'std_out'], {'flags': 're.M'}), "('^cisco6\\\\s+:\\\\s+ok=1 ', std_out, flags=re.M)\n", (7491, 7537), False, 'import re\n')]
|
from decimal import Decimal
from django.conf import settings
from shop.models import Product
from coupons.models import Coupon
class Cart(object):
def __init__(self, request):
"""
Initialize the cart
:param request:
"""
self.session = request.session
cart = self.session.get(settings.CART_SESSION_ID)
if not cart:
# save an empty cart in the session
cart = self.session[settings.CART_SESSION_ID] = {}
self.cart = cart
# store current applied coupon
self.coupon_id = self.session.get('coupon_id')
def add(self, product, quantity=1, update_quantity=False):
"""
Add a product to the cart or update quantity of cart
:param product: product which needs to be added
:param quantity: by default 1
:param update_quantity: be default False
:return:
"""
# We use product_id to remember what has been added to cart till now.
# We have converted product_id to string because django uses json to serialize session data
# and json allow only string string for keys. for value part, we can put integers but not decimal
product_id = str(product.id)
if product_id not in self.cart:
self.cart[product_id] = {'quantity': 0,
'price': str(product.price)}
if update_quantity:
self.cart[product_id]['quantity'] = quantity
else:
self.cart[product_id]['quantity'] += quantity
self.save()
def save(self):
# update the session cart
self.session[settings.CART_SESSION_ID] = self.cart
# mark the session as "modified" to make sure it is saved
self.session.modified = True
def remove(self, product):
"""
Remove a product from the cart
:param product: product object which need to removed
:return:
"""
product_id = str(product.id)
if product_id in self.cart:
del self.cart[product_id]
self.save()
def __iter__(self):
"""
Iterate over the product_id in the cart and get the product form the backend
:return:
"""
product_ids = self.cart.keys()
# get the product objects and add them to the cart
products = Product.objects.filter(id__in=product_ids)
for product in products:
self.cart[str(product.id)]['product'] = product
for item in self.cart.values():
item['price'] = Decimal(item['price'])
item['total_price'] = item['price']*item['quantity']
yield item
def __len__(self):
"""
count all the items in the cart
:return:
"""
return sum(item['quantity'] for item in self.cart.values())
# TODO: Need to change UI for get_total_price or get_total_price_after_discount after adding discount
# currently for cart detail and invoice, after applying discount, old total cost printed
def get_total_price(self):
# Because we are not using iter method here, so price comes as string. we have to convert price to decimal
return sum(item['quantity']*Decimal(item['price']) for item in self.cart.values())
def clear(self):
"""
To empty the cart
:return:
"""
self.session[settings.CART_SESSION_ID] = {}
self.session.modified = True
@property
def coupon(self):
if self.coupon_id:
return Coupon.objects.get(id=self.coupon_id)
return None
def get_discount(self):
if self.coupon:
return (self.coupon.discount / Decimal('100')) \
* self.get_total_price()
return Decimal('0')
def get_total_price_after_discount(self):
return self.get_total_price() - self.get_discount()
|
[
"shop.models.Product.objects.filter",
"coupons.models.Coupon.objects.get",
"decimal.Decimal"
] |
[((2370, 2412), 'shop.models.Product.objects.filter', 'Product.objects.filter', ([], {'id__in': 'product_ids'}), '(id__in=product_ids)\n', (2392, 2412), False, 'from shop.models import Product\n'), ((3788, 3800), 'decimal.Decimal', 'Decimal', (['"""0"""'], {}), "('0')\n", (3795, 3800), False, 'from decimal import Decimal\n'), ((2575, 2597), 'decimal.Decimal', 'Decimal', (["item['price']"], {}), "(item['price'])\n", (2582, 2597), False, 'from decimal import Decimal\n'), ((3557, 3594), 'coupons.models.Coupon.objects.get', 'Coupon.objects.get', ([], {'id': 'self.coupon_id'}), '(id=self.coupon_id)\n', (3575, 3594), False, 'from coupons.models import Coupon\n'), ((3241, 3263), 'decimal.Decimal', 'Decimal', (["item['price']"], {}), "(item['price'])\n", (3248, 3263), False, 'from decimal import Decimal\n'), ((3711, 3725), 'decimal.Decimal', 'Decimal', (['"""100"""'], {}), "('100')\n", (3718, 3725), False, 'from decimal import Decimal\n')]
|
# Generated by Django 2.2.6 on 2020-08-20 14:01
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('core', '0042_conference_treasurer'),
]
operations = [
migrations.AddField(
model_name='conference',
name='waiver_avail_date',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='conference',
name='waiver_deadline',
field=models.DateField(default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='conference',
name='waiver_link',
field=models.CharField(default='www.bmun.org', max_length=300),
preserve_default=False,
),
]
|
[
"django.db.models.DateField",
"django.db.models.CharField"
] |
[((377, 428), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (393, 428), False, 'from django.db import migrations, models\n'), ((597, 648), 'django.db.models.DateField', 'models.DateField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (613, 648), False, 'from django.db import migrations, models\n'), ((813, 869), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""www.bmun.org"""', 'max_length': '(300)'}), "(default='www.bmun.org', max_length=300)\n", (829, 869), False, 'from django.db import migrations, models\n')]
|
from django.utils.translation import gettext as _
NA = 99
RATE_CHOICES = (
(0, _('0. Need more info')),
(1, _('1. Poor')),
(2, _('2. Not so good')),
(3, _('3. Is o.k.')),
(4, _('4. Good')),
(5, _('5. Excellent')),
(NA, _('n/a - choose not to answer')),
)
RATE_CHOICES_DICT = dict(RATE_CHOICES)
RATE_CHOICE_NA = RATE_CHOICES_DICT[NA]
NO = 0
MAYBE = 1
YES = 2
RECOMMENDATION_CHOICES = (
(NO, _('No')),
(MAYBE, _('Maybe')),
(YES, _('Yes')),
)
DISAGREE = 0
AGREE = 1
OPINION_CHOICES = (
(AGREE, _('Agree')),
(DISAGREE, _('Disagree')),
)
PRIVATE = 'private'
REVIEWER = 'reviewers'
VISIBILILTY_HELP_TEXT = {
PRIVATE: _('Visible only to staff.'),
REVIEWER: _('Visible to other reviewers and staff.'),
}
VISIBILITY = {
PRIVATE: _('Private'),
REVIEWER: _('Reviewers and Staff'),
}
|
[
"django.utils.translation.gettext"
] |
[((672, 699), 'django.utils.translation.gettext', '_', (['"""Visible only to staff."""'], {}), "('Visible only to staff.')\n", (673, 699), True, 'from django.utils.translation import gettext as _\n'), ((715, 757), 'django.utils.translation.gettext', '_', (['"""Visible to other reviewers and staff."""'], {}), "('Visible to other reviewers and staff.')\n", (716, 757), True, 'from django.utils.translation import gettext as _\n'), ((790, 802), 'django.utils.translation.gettext', '_', (['"""Private"""'], {}), "('Private')\n", (791, 802), True, 'from django.utils.translation import gettext as _\n'), ((818, 842), 'django.utils.translation.gettext', '_', (['"""Reviewers and Staff"""'], {}), "('Reviewers and Staff')\n", (819, 842), True, 'from django.utils.translation import gettext as _\n'), ((85, 107), 'django.utils.translation.gettext', '_', (['"""0. Need more info"""'], {}), "('0. Need more info')\n", (86, 107), True, 'from django.utils.translation import gettext as _\n'), ((118, 130), 'django.utils.translation.gettext', '_', (['"""1. Poor"""'], {}), "('1. Poor')\n", (119, 130), True, 'from django.utils.translation import gettext as _\n'), ((141, 160), 'django.utils.translation.gettext', '_', (['"""2. Not so good"""'], {}), "('2. Not so good')\n", (142, 160), True, 'from django.utils.translation import gettext as _\n'), ((171, 186), 'django.utils.translation.gettext', '_', (['"""3. Is o.k."""'], {}), "('3. Is o.k.')\n", (172, 186), True, 'from django.utils.translation import gettext as _\n'), ((197, 209), 'django.utils.translation.gettext', '_', (['"""4. Good"""'], {}), "('4. Good')\n", (198, 209), True, 'from django.utils.translation import gettext as _\n'), ((220, 237), 'django.utils.translation.gettext', '_', (['"""5. Excellent"""'], {}), "('5. Excellent')\n", (221, 237), True, 'from django.utils.translation import gettext as _\n'), ((249, 280), 'django.utils.translation.gettext', '_', (['"""n/a - choose not to answer"""'], {}), "('n/a - choose not to answer')\n", (250, 280), True, 'from django.utils.translation import gettext as _\n'), ((427, 434), 'django.utils.translation.gettext', '_', (['"""No"""'], {}), "('No')\n", (428, 434), True, 'from django.utils.translation import gettext as _\n'), ((449, 459), 'django.utils.translation.gettext', '_', (['"""Maybe"""'], {}), "('Maybe')\n", (450, 459), True, 'from django.utils.translation import gettext as _\n'), ((472, 480), 'django.utils.translation.gettext', '_', (['"""Yes"""'], {}), "('Yes')\n", (473, 480), True, 'from django.utils.translation import gettext as _\n'), ((542, 552), 'django.utils.translation.gettext', '_', (['"""Agree"""'], {}), "('Agree')\n", (543, 552), True, 'from django.utils.translation import gettext as _\n'), ((570, 583), 'django.utils.translation.gettext', '_', (['"""Disagree"""'], {}), "('Disagree')\n", (571, 583), True, 'from django.utils.translation import gettext as _\n')]
|
"""
Simple audio clustering
1. Get the embeddings - at an interval of 0.5s each
2. Get the VAD - variable interval
3. Get embeddings for a VAD interval -> Take average of the embeddings
4. Get the ground truth for embedding for each speaker - marked 0.5s interval
5. L2 Normalize the embeddings before taking a distance measure
6. Clustering - Speaker Verification Task
1. Fix the ground truth embedding as the centroid for each speaker
2. Cluster all the points to the closest centroid
3. Verify the output
"""
import os
import argparse
import json
import yaml
import pickle
import numpy as np
import pandas as pd
import utils
import isat_diarization as isat_d
import constants
def dist_emb(emb_1, emb_2, dist_type="euclid"):
"""
Distance between two embeddings
"""
dist = None
if dist_type == "euclid":
# Euclidean distance
dist = np.linalg.norm(emb_1 - emb_2)
elif dist_type == "cosine":
# Cosine similarity
dist = np.dot(emb_1, emb_2) / (np.linalg.norm(emb_1) * np.linalg.norm(emb_2))
return dist
def cluster_gt(embeddings, vad, dict_gt_speakers):
dict_clusters = {
val: {
"embedding_id": key,
"embedding_val": embeddings[key],
} for key, val in dict_gt_speakers.items()
}
list_emb = [(dict_gt_speakers[key], embeddings[key]) for key, val in dict_gt_speakers.items()]
labels = []
for emb_index, emb_actual in enumerate(embeddings):
min_dist = np.inf
label = "NoSpeaker"
for speaker, emb_ref in list_emb:
dist = dist_emb(emb_ref, emb_actual)
if min_dist > dist:
min_dist = dist
label = speaker
labels.append(label)
return labels
def normalize_embeddings(embeddings):
"""
https://numpy.org/doc/stable/reference/generated/numpy.linalg.norm.html
"""
l2_norm = np.linalg.norm(embeddings, ord=2)
return embeddings
def get_embeddings(audio_path, dir_target, src="gen"):
"""
:param src: "gen" for generate, "file" for read from file
"""
embeddings = None
if src == "gen":
print(f"Generating embeddings")
embeddings = isat_d.gen_embeddings(audio_path, dir_target)
elif src == "file":
embeddings_path = os.path.join(dir_target, "embeddings.pkl")
with open(embeddings_path, "rb") as fh:
embeddings = pickle.load(fh)
print(f"Loaded embeddings from: {embeddings_path}")
print(f"embeddings: type: {type(embeddings)}")
embeddings_data = embeddings.data
return embeddings_data
def get_vad(vad_path):
with open(vad_path, "rb") as fh:
vad = json.load(fh)
print(f"Loaded vad from: {vad_path}")
print(f"vad: type: {type(vad)}")
return vad
def get_gt_emb():
dict_gt = {
0: "A",
20: "B",
30: "C",
}
return dict_gt
def yml_dump():
import yaml
dict_gt = {
0: {
"audio_path": "x.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
},
1: {
"audio_path": "y.wav",
"output_path": "../outputs",
"num_speakers": 2,
"ground_truths": [
{
"start": 2.1,
"end": 3.1,
"id": 123,
"name": "Krishna"
},
{
"start": 4.4,
"end": 7.1,
"id": 500,
"name": "Gauranga"
}
]
}
}
with open("../data/spkr_diarization_gt_temp.yml", "w") as fh:
yaml.dump(dict_gt, fh)
def round_off_embedding(start_time, float_embed_width=0.5):
"""Round a number to the closest half integer.
round_off_embedding(1.3)
1.5
round_off_embedding(2.6)
2.5
round_off_embedding(3.0)
3.0
round_off_embedding(4.1)
4.0
round_off_embedding(4.1, 0.25)
4.0
"""
reciprocal = int(1 / float_embed_width)
embed_id = round(start_time * reciprocal) / reciprocal
embed_id = round(start_time * reciprocal)
return embed_id
def get_embed_from_start_end(dict_all_gt):
dict_all_embed_gt = {}
for file_index, dict_gt in dict_all_gt.items():
dict_embed_gt = {
"ground_truths": [],
"audio_path": dict_gt["audio_path"],
"output_path": dict_gt["output_path"],
"num_speakers": dict_gt["num_speakers"]
}
list_ground_truths = []
for spkr_index, dict_spkr in enumerate(dict_gt["ground_truths"]):
start = dict_spkr["start"]
# end = dict_spkr["end"]
# id = dict_spkr["id"]
# name = dict_spkr["name"]
embed_start_id = round_off_embedding(start)
dict_gt = {
"embed_start_id": embed_start_id,
"id": dict_spkr["id"],
"name": dict_spkr["name"]
}
list_ground_truths.append(dict_gt)
dict_embed_gt["ground_truths"] = list_ground_truths
dict_all_embed_gt[file_index] = dict_embed_gt
return dict_all_embed_gt
def cluster_all(gt_yml_fp):
dict_all_embed_gt = read_ground_truths(gt_yml_fp)
status = "Done"
for file_index, dict_gt in dict_all_embed_gt.items():
list_ground_truths = dict_gt["ground_truths"]
audio_path = dict_gt["audio_path"]
output_path = dict_gt["output_path"]
dict_emb_gt = {dict_spkr["embed_start_id"]: dict_spkr["name"] for dict_spkr in list_ground_truths}
# for spkr_index, dict_spkr in enumerate(list_ground_truths):
# dict_emb_gt[dict_spkr["embed_start_id"]] = dict_spkr["name"]
if not os.path.exists(output_path):
os.makedirs(output_path)
run_clustering(audio_path, output_path, dict_emb_gt)
return status
def read_ground_truths(gt_yml_fp):
with open(gt_yml_fp, "r") as fh:
dict_all_gt = yaml.load(fh)
print(dict_all_gt)
dict_all_embed_gt = get_embed_from_start_end(dict_all_gt)
print(dict_all_embed_gt)
return dict_all_embed_gt
def run_clustering(audio_path, output_path, dict_gt):
embeddings = get_embeddings(audio_path, output_path)
# vad_path = os.path.join(output_path, "vad.json")
# vad = get_vad(vad_path)
vad = None
labels = cluster_gt(embeddings, vad, dict_gt)
print(utils.print_list(labels, "Clustered Embeddings"))
df = pd.DataFrame()
df["embed_index"] = [x for x in range(len(labels))]
df["labels"] = labels
out_path = os.path.join(output_path, "cluster_labels.csv")
df.to_csv(out_path, index=False)
return df
def run_yaml(args):
gt_yml_fp = args.get("gt_yml_fp", "../data/spkr_diarization_gt.yml")
cluster_all(gt_yml_fp)
def run(args):
audio_path = args.get("audio_path", "../no/audio")
output_path = args.get("output_path", "../outputs")
dict_gt = get_gt_emb()
run_clustering(audio_path, output_path, dict_gt)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--audio_path",
type=str,
help="audio filepath",
default="../data/panel_discussion_0045_15s.wav")
parser.add_argument("--output_path",
type=str,
help="output_path",
default="../outputs/panel_discussion_0045_15s_5")
parser.add_argument("--gt_yml_fp",
type=str,
help="ground truth yaml file path",
default="../data/spkr_diarization_gt.yml")
parser.add_argument("--config_path",
type=str,
help="config_path",
default="../configs/config_5.yml")
# parser.add_argument("-v", "--verbose", action="store_true",
# help="increase output verbosity")
args = parser.parse_args()
dict_args = vars(args)
dir_output = dict_args.get("output_path", "../outputs")
if not os.path.exists(dir_output):
os.makedirs(dir_output)
else:
print(f"ATTENTION: directory: [{dir_output}] already exists.")
return dict_args
def main():
args = parse_args()
run_yaml(args)
# yml_dump()
# print(round_off_embedding(4.1, 0.25))
# print(round_off_embedding(4.1, 0.35))
# print(round_off_embedding(4.1, 0.5))
# print(round_off_embedding(4.35, 0.25))
# print(round_off_embedding(4.35, 0.35))
# print(round_off_embedding(4.35, 0.5))
# read_ground_truths()
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"yaml.load",
"json.load",
"argparse.ArgumentParser",
"os.makedirs",
"yaml.dump",
"os.path.exists",
"isat_diarization.gen_embeddings",
"utils.print_list",
"pickle.load",
"numpy.linalg.norm",
"numpy.dot",
"os.path.join"
] |
[((1913, 1946), 'numpy.linalg.norm', 'np.linalg.norm', (['embeddings'], {'ord': '(2)'}), '(embeddings, ord=2)\n', (1927, 1946), True, 'import numpy as np\n'), ((6918, 6932), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (6930, 6932), True, 'import pandas as pd\n'), ((7031, 7078), 'os.path.join', 'os.path.join', (['output_path', '"""cluster_labels.csv"""'], {}), "(output_path, 'cluster_labels.csv')\n", (7043, 7078), False, 'import os\n'), ((7495, 7520), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7518, 7520), False, 'import argparse\n'), ((886, 915), 'numpy.linalg.norm', 'np.linalg.norm', (['(emb_1 - emb_2)'], {}), '(emb_1 - emb_2)\n', (900, 915), True, 'import numpy as np\n'), ((2209, 2254), 'isat_diarization.gen_embeddings', 'isat_d.gen_embeddings', (['audio_path', 'dir_target'], {}), '(audio_path, dir_target)\n', (2230, 2254), True, 'import isat_diarization as isat_d\n'), ((2697, 2710), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (2706, 2710), False, 'import json\n'), ((4086, 4108), 'yaml.dump', 'yaml.dump', (['dict_gt', 'fh'], {}), '(dict_gt, fh)\n', (4095, 4108), False, 'import yaml\n'), ((6424, 6437), 'yaml.load', 'yaml.load', (['fh'], {}), '(fh)\n', (6433, 6437), False, 'import yaml\n'), ((6858, 6906), 'utils.print_list', 'utils.print_list', (['labels', '"""Clustered Embeddings"""'], {}), "(labels, 'Clustered Embeddings')\n", (6874, 6906), False, 'import utils\n'), ((8544, 8570), 'os.path.exists', 'os.path.exists', (['dir_output'], {}), '(dir_output)\n', (8558, 8570), False, 'import os\n'), ((8580, 8603), 'os.makedirs', 'os.makedirs', (['dir_output'], {}), '(dir_output)\n', (8591, 8603), False, 'import os\n'), ((2306, 2348), 'os.path.join', 'os.path.join', (['dir_target', '"""embeddings.pkl"""'], {}), "(dir_target, 'embeddings.pkl')\n", (2318, 2348), False, 'import os\n'), ((6181, 6208), 'os.path.exists', 'os.path.exists', (['output_path'], {}), '(output_path)\n', (6195, 6208), False, 'import os\n'), ((6222, 6246), 'os.makedirs', 'os.makedirs', (['output_path'], {}), '(output_path)\n', (6233, 6246), False, 'import os\n'), ((992, 1012), 'numpy.dot', 'np.dot', (['emb_1', 'emb_2'], {}), '(emb_1, emb_2)\n', (998, 1012), True, 'import numpy as np\n'), ((2422, 2437), 'pickle.load', 'pickle.load', (['fh'], {}), '(fh)\n', (2433, 2437), False, 'import pickle\n'), ((1016, 1037), 'numpy.linalg.norm', 'np.linalg.norm', (['emb_1'], {}), '(emb_1)\n', (1030, 1037), True, 'import numpy as np\n'), ((1040, 1061), 'numpy.linalg.norm', 'np.linalg.norm', (['emb_2'], {}), '(emb_2)\n', (1054, 1061), True, 'import numpy as np\n')]
|
# Generated by Django 2.2.10 on 2020-11-04 21:03
from django.db import migrations, models
import versatileimagefield.fields
class Migration(migrations.Migration):
dependencies = [("page", "0001_initial")]
operations = [
migrations.AddField(
model_name="page", name="in_menu", field=models.BooleanField(default=False)
),
migrations.AddField(
model_name="page",
name="picture",
field=versatileimagefield.fields.VersatileImageField(
blank=True, null=True, upload_to="page/page/", verbose_name="Image"
),
),
]
|
[
"django.db.models.BooleanField"
] |
[((315, 349), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (334, 349), False, 'from django.db import migrations, models\n')]
|
import numpy as np
from distributions.distribution import Distribution
class NonParametric(Distribution):
"""
Provides functions for a non-parametric forecast distribution.
"""
@staticmethod
def pdf(x, pdf_x, x_eval):
pass
@staticmethod
def cdf(x, cdf_x, x_eval):
"""
Computes the CDF of the non-parametric distribution at x given the CDF at evaluation points,
by linear interpolation.
"""
# Linear interpolation
insertion_points = np.searchsorted(x_eval, x)
r = np.minimum(insertion_points, len(x_eval) - 1)
l = np.maximum(0, insertion_points - 1)
idx = np.arange(len(x))
slope = (cdf_x[r, idx] - cdf_x[l, idx]) / np.maximum(x_eval[r] - x_eval[l], 1e-6)
return cdf_x[l, idx] + slope * (x - x_eval[l])
@staticmethod
def mean(pdf_x, x_eval):
"""
Computes the mean of the non-parametric distribution by integrating the PDF at evaluation points,
using the trapezoidal rule.
"""
return np.trapz(
y=x_eval[:, np.newaxis] * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
)
@staticmethod
def var(pdf_x, x_eval):
"""
Computes the variance of the non-parametric distribution by integrating the PDF at evaluation points,
using the trapezoidal rule.
"""
return np.trapz(
y=x_eval[:, np.newaxis] ** 2 * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
) - np.trapz(
y=x_eval[:, np.newaxis] * pdf_x,
x=x_eval[:, np.newaxis],
axis=0
) ** 2
@staticmethod
def percentile(p, cdf_x, x_eval):
"""
Computes the p-percentile of the non-parametric distribution given the CDF at evaluation points,
by linear interpolation.
"""
# Linear interpolation
insertion_points = []
for i in range(cdf_x.shape[1]):
insertion_points.append(np.searchsorted(cdf_x[:, i], p / 100))
insertion_points = np.array(insertion_points)
r = np.minimum(insertion_points, len(cdf_x) - 1)
l = np.maximum(0, insertion_points - 1)
idx = np.arange(cdf_x.shape[1])
slope = (x_eval[r] - x_eval[l]) / np.maximum(cdf_x[r, idx] - cdf_x[l, idx], 1e-6)
return x_eval[l] + slope * (p / 100 - cdf_x[l, idx])
@staticmethod
def crps(x, cdf_x, x_eval):
"""
Computes the Continuous Ranked Probability Score (CRPS) of the non-parametric distribution with true value x,
using the trapezoidal rule.
"""
return np.trapz(
y=(cdf_x - (x_eval[:, np.newaxis] >= x[np.newaxis, :])) ** 2,
x=x_eval[:, np.newaxis],
axis=0
)
|
[
"numpy.trapz",
"numpy.maximum",
"numpy.searchsorted",
"numpy.array",
"numpy.arange"
] |
[((522, 548), 'numpy.searchsorted', 'np.searchsorted', (['x_eval', 'x'], {}), '(x_eval, x)\n', (537, 548), True, 'import numpy as np\n'), ((619, 654), 'numpy.maximum', 'np.maximum', (['(0)', '(insertion_points - 1)'], {}), '(0, insertion_points - 1)\n', (629, 654), True, 'import numpy as np\n'), ((1061, 1135), 'numpy.trapz', 'np.trapz', ([], {'y': '(x_eval[:, np.newaxis] * pdf_x)', 'x': 'x_eval[:, np.newaxis]', 'axis': '(0)'}), '(y=x_eval[:, np.newaxis] * pdf_x, x=x_eval[:, np.newaxis], axis=0)\n', (1069, 1135), True, 'import numpy as np\n'), ((2091, 2117), 'numpy.array', 'np.array', (['insertion_points'], {}), '(insertion_points)\n', (2099, 2117), True, 'import numpy as np\n'), ((2187, 2222), 'numpy.maximum', 'np.maximum', (['(0)', '(insertion_points - 1)'], {}), '(0, insertion_points - 1)\n', (2197, 2222), True, 'import numpy as np\n'), ((2237, 2262), 'numpy.arange', 'np.arange', (['cdf_x.shape[1]'], {}), '(cdf_x.shape[1])\n', (2246, 2262), True, 'import numpy as np\n'), ((2658, 2766), 'numpy.trapz', 'np.trapz', ([], {'y': '((cdf_x - (x_eval[:, np.newaxis] >= x[np.newaxis, :])) ** 2)', 'x': 'x_eval[:, np.newaxis]', 'axis': '(0)'}), '(y=(cdf_x - (x_eval[:, np.newaxis] >= x[np.newaxis, :])) ** 2, x=\n x_eval[:, np.newaxis], axis=0)\n', (2666, 2766), True, 'import numpy as np\n'), ((737, 777), 'numpy.maximum', 'np.maximum', (['(x_eval[r] - x_eval[l])', '(1e-06)'], {}), '(x_eval[r] - x_eval[l], 1e-06)\n', (747, 777), True, 'import numpy as np\n'), ((1414, 1493), 'numpy.trapz', 'np.trapz', ([], {'y': '(x_eval[:, np.newaxis] ** 2 * pdf_x)', 'x': 'x_eval[:, np.newaxis]', 'axis': '(0)'}), '(y=x_eval[:, np.newaxis] ** 2 * pdf_x, x=x_eval[:, np.newaxis], axis=0)\n', (1422, 1493), True, 'import numpy as np\n'), ((2305, 2353), 'numpy.maximum', 'np.maximum', (['(cdf_x[r, idx] - cdf_x[l, idx])', '(1e-06)'], {}), '(cdf_x[r, idx] - cdf_x[l, idx], 1e-06)\n', (2315, 2353), True, 'import numpy as np\n'), ((1542, 1616), 'numpy.trapz', 'np.trapz', ([], {'y': '(x_eval[:, np.newaxis] * pdf_x)', 'x': 'x_eval[:, np.newaxis]', 'axis': '(0)'}), '(y=x_eval[:, np.newaxis] * pdf_x, x=x_eval[:, np.newaxis], axis=0)\n', (1550, 1616), True, 'import numpy as np\n'), ((2025, 2062), 'numpy.searchsorted', 'np.searchsorted', (['cdf_x[:, i]', '(p / 100)'], {}), '(cdf_x[:, i], p / 100)\n', (2040, 2062), True, 'import numpy as np\n')]
|
# from collections import ChainMap
# food_types = {'Vegetables': 15, 'Dairy': 20, 'Meat': 3, 'Cereals': 9, 'Fruits': 11, 'Fish': 7}
# countries = {'USA': 25, 'Australia': 15, 'Canada': 15, 'France': 6, 'India': 4}
# discount = {'gold': 20, 'regular': 10}
# chain = ChainMap(food_types, countries)
# food_types['Sweets'] = 10
# # some missing lines
# countries['USA'] = 35
# chain = chain.new_child(discount)
# print(chain)
# def range_sum(numbers, start, end):
# return sum([x for x in numbers if start <= x <= end])
# input_numbers = [int(i) for i in input().split()]
# a, b = map(int, input().split())
# print(range_sum(input_numbers, a, b))
# passwords = input().split()
# # passwords = ['<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>', '<PASSWORD>']
# passwords.sort(key=len)
# for i in passwords:
# print(i, len(i))
from datetime import datetime
def get_weekday(datetime_obj):
d = datetime.strptime(datetime_obj, "%Y-%m-%d")
return d.strftime("%A")
print(get_weekday('2019-12-31'))
# JetBrains Academy solution
# def get_weekday(datetime_obj):
# return datetime_obj.strftime("%A")
def get_release_date(release_str):
s = release_str.replace("Day of release: ", "")
release = datetime.strptime(s, "%d %B %Y")
return release.strftime("%Y-%m-%d %H:%M:%S")
print(get_release_date("Day of release: 4 July 2019"))
|
[
"datetime.datetime.strptime"
] |
[((917, 960), 'datetime.datetime.strptime', 'datetime.strptime', (['datetime_obj', '"""%Y-%m-%d"""'], {}), "(datetime_obj, '%Y-%m-%d')\n", (934, 960), False, 'from datetime import datetime\n'), ((1231, 1263), 'datetime.datetime.strptime', 'datetime.strptime', (['s', '"""%d %B %Y"""'], {}), "(s, '%d %B %Y')\n", (1248, 1263), False, 'from datetime import datetime\n')]
|
from functools import wraps
from unittest.mock import patch
from auth import get_user_token_string
def mock_decorator(f):
"""Fake decorator for mocking other decorators."""
@wraps(f)
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
def mock_auth_test(test_for, **kwargs):
"""Fake auth function for testing"""
return True
def mock_function_that_does_nothing(var):
return
get_auth_string_patch = patch(
"boxwise_flask.auth_helper.get_auth_string_from_header", get_user_token_string
)
requires_auth_patch = patch("boxwise_flask.auth_helper.requires_auth", mock_decorator)
authorization_test_patch = patch(
"boxwise_flask.auth_helper.authorization_test", mock_auth_test
)
add_user_to_request_context_patch = patch(
"boxwise_flask.auth_helper.add_user_to_request_context",
mock_function_that_does_nothing,
)
|
[
"unittest.mock.patch",
"functools.wraps"
] |
[((485, 574), 'unittest.mock.patch', 'patch', (['"""boxwise_flask.auth_helper.get_auth_string_from_header"""', 'get_user_token_string'], {}), "('boxwise_flask.auth_helper.get_auth_string_from_header',\n get_user_token_string)\n", (490, 574), False, 'from unittest.mock import patch\n'), ((599, 663), 'unittest.mock.patch', 'patch', (['"""boxwise_flask.auth_helper.requires_auth"""', 'mock_decorator'], {}), "('boxwise_flask.auth_helper.requires_auth', mock_decorator)\n", (604, 663), False, 'from unittest.mock import patch\n'), ((691, 760), 'unittest.mock.patch', 'patch', (['"""boxwise_flask.auth_helper.authorization_test"""', 'mock_auth_test'], {}), "('boxwise_flask.auth_helper.authorization_test', mock_auth_test)\n", (696, 760), False, 'from unittest.mock import patch\n'), ((803, 902), 'unittest.mock.patch', 'patch', (['"""boxwise_flask.auth_helper.add_user_to_request_context"""', 'mock_function_that_does_nothing'], {}), "('boxwise_flask.auth_helper.add_user_to_request_context',\n mock_function_that_does_nothing)\n", (808, 902), False, 'from unittest.mock import patch\n'), ((186, 194), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (191, 194), False, 'from functools import wraps\n')]
|
import numpy as np
from math import log, sqrt, ceil
import random
import string
from copy import copy
import pyximport
from tabulate import tabulate
pyximport.install()
from ..util import math_functions
import matplotlib.pyplot as plt
import textwrap
from textwrap import dedent
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
from joblib import Parallel, delayed
class FuzzyNode:
feature = None
is_terminal=None
classification=None
# __slots__ = ['is_terminal',
# 'classification',
# 'feature'
# 'partitioning']
class FuzzyPartitioning:
def __init__(self):
self.partitions = []
self.gain = None
# __slots__ = ['partitions', 'gain']
class FuzzyPartition:
__slots__ = ['f', 'node', 'properties', 'ranges']
class FuzzySetProperties:
__slots__ = ['cardinality',
'entropy',
'data',
'memberships']
# noinspection PyAttributeOutsideInit,PyPropertyAccess,PyUnresolvedReferences
class RandomFuzzyTree:
def __init__(self,
n_jobs=1,
p="sqrt",
terminal_n_threshold=10,
categorical_features=[],
a_cut = 0.5,
test_generation_file=None,
test_indendation_level=1):
self.test_generation_file = test_generation_file
self.test_cases_generated = 0
self.n_jobs = n_jobs
self.p = p
self.is_fit = False
self.terminal_n_threshold = terminal_n_threshold
self.categorical_features = categorical_features
self.a_cut = a_cut
self.test_indentation_level = test_indendation_level
def fit(self, data, ranges, copy_data=False, classes=(1, 2)):
self.classes = classes
if copy_data:
data = np.copy(data)
self.ranges = ranges
self.n_feature = self.count_features(data)
if self.p == "sqrt":
self.p = ceil(sqrt(self.n_feature))
elif self.p == "log":
self.p = ceil(log(self.n_feature, 2))
elif self.p == "all":
self.p = self.n_feature
tree = self.build_tree(data, np.array([1.0 for d in data]))
self.root = tree
self.is_fit = True
def predict(self, x):
assert self.is_fit
memberships = self.predict_memberships(x)
return max(memberships)
def predict_memberships(self, x):
memberships = dict([(c, 0) for c in self.classes])
self.forward_pass(memberships, x, self.root)
return memberships
def score(self, data):
correct = 0
for x in data:
if self.predict(x[:-1]) == x[-1]:
correct += 1
return correct / data.shape[0]
def build_tree(self, data, memberships, lvl=0, ranges=None):
if ranges == None:
ranges = self.ranges
# print("\t\t Bulting tree lvl %d" % (lvl + 1) )
regular_features = self.get_regular_features(data)
if len(regular_features) != 0:
node = self.select_partitioning(data, memberships, regular_features, ranges)
else:
node = self.generate_leaf(data, memberships)
if node.is_terminal or self.is_terminal(node, data, memberships):
node.is_terminal = True
node.classification = self.classification(data, memberships)
else:
for p in node.partitioning.partitions:
next_ranges = copy(ranges)
next_ranges[node.feature] = p.ranges[node.feature]
p.node = self.build_tree(p.properties.data,
p.properties.memberships,
lvl + 1,
next_ranges)
return node
def generate_leaf(self, data, memberships):
node = FuzzyNode()
node.is_terminal = True
node.classification = self.classification(data, memberships)
return node
def select_partitioning(self, data, memberships, regular_features, ranges):
node = FuzzyNode()
features = np.random.choice(regular_features,
min(self.p, len(regular_features)),
replace=False)
feature_partitionings = {}
for feature in features:
feature_partitionings[feature] = \
self.best_partitioning(feature, data, memberships, ranges)
node.feature = max(feature_partitionings,
key=lambda x: feature_partitionings[x].gain)
node.partitioning = feature_partitionings[node.feature]
node.partitioning.gain = self._fuzzy_entropy(data, memberships)# + node.partitioning.gain
return node
def get_regular_features(self, data):
regular_features = []
for i in range(len(self.ranges)):
curr_range = self.ranges[i]
inds = np.logical_and(data[:, i] != curr_range[0], data[:, i] != curr_range[1]).nonzero()[0]
if curr_range[0] != curr_range[1] and inds.shape[0] != 0:
regular_features.append(i)
return regular_features
def is_terminal(self, node, data, memberships):
if memberships.shape[0] == 0:
return True
empty_partitions = 0
for partition in node.partitioning.partitions:
if partition.properties.memberships.shape[0] <= 1:
empty_partitions += 1
if empty_partitions >= 2:
return True
data_classes = data[:, -1]
all_same = True
for i in range(1, data_classes.shape[0]):
if int(data_classes[i]) != int(data_classes[0]):
all_same = False
break
if all_same:
return True
if abs(node.partitioning.gain) <= 0.000001:
return True
else:
return False
def forward_pass(self,
result_memberships,
x,
node,
membership=1):
if node.is_terminal:
for c in self.classes:
result_memberships[c] += node.classification[c] * membership
else:
for partition in node.partitioning.partitions:
next_membership = membership * partition.f(x[node.feature])
next_node = partition.node
self.forward_pass(result_memberships,
x,
next_node,
next_membership)
@staticmethod
def count_features(data):
return data.shape[1] - 1
def classification(self, data, memberships):
classification_val = {}
for c in self.classes:
inds = (data[:, -1] == c).nonzero()[0]
classification_val[c] = np.sum(memberships[inds])
return classification_val
def best_partitioning(self, feature, data, memberships, ranges):
if feature in self.categorical_features:
max_partitioning = FuzzyPartitioning()
max_category = int(self.ranges[feature][1])
min_category = int(self.ranges[feature][0])
for category in range(min_category, max_category + 1):
partition = FuzzyPartition()
partition.properties = FuzzySetProperties()
def f(x):
if int(x) == category:
return 1
else:
return 0
partition.f = f
inds = (data[:, feature] == category).nonzero()[0]
partition.properties.data = data[inds, :]
max_partitioning.partitions.append(partition)
self.set_properties(max_partitioning.partitions,
data,
feature,
memberships)
max_partitioning.gain = \
self.gain(max_partitioning.partitions, memberships)
else:
points = np.unique(data[:, feature])
L, U = self.ranges[feature]
point_partitionings = {}
regular_point_occured = False
last_point = None
meaningful_length = (U - L) / 10
for p in points:
if last_point is None or p - last_point > meaningful_length:
if p != L and p != U:
curr_partitioning = self.partitioning(data, feature, p, memberships, ranges)
if self.count_zero(curr_partitioning) < 2:
regular_point_occured = True
point_partitionings[p] = \
curr_partitioning
last_point = p
if not regular_point_occured:
midpoint = L + (U - L) / 2
max_partitioning = self.partitioning(data,
feature,
midpoint,
memberships,
ranges)
max_partitioning.midpoint = midpoint
else:
max_partitioning_key = max(point_partitionings,
key=lambda x: point_partitionings[x].gain)
max_partitioning = point_partitionings[max_partitioning_key]
max_partitioning.midpoint = max_partitioning_key
self.print_partitioning(max_partitioning, data, feature, ranges)
return max_partitioning
def count_zero(self, partitioning):
cnt = 0
for part in partitioning.partitions:
if part.properties.entropy == 0:
cnt += 1
return cnt
def partitioning(self, data, feature, p, memberships, ranges):
part = FuzzyPartitioning()
L, U = self.ranges[feature]
W_left = 2 * (p - L)
W_middle_left = (p - L)
W_middle_right = (U - p)
W_right = 2 * (U - p)
# TODO: generalize to more
left_partition = FuzzyPartition()
left_partition.f = math_functions.triangular(L,
W_left)
left_partition.ranges = copy(ranges)
left_partition.ranges[feature] = L, p
left_partition.properties = []
middle_partition = FuzzyPartition()
middle_partition.f = \
math_functions.composite_triangular(p,
W_middle_left,
W_middle_right)
middle_partition.ranges = copy(ranges)
middle_partition.ranges[feature] = L, U
middle_partition.properties = []
right_partition = FuzzyPartition()
right_partition.f = math_functions.triangular(U,
W_right)
right_partition.ranges = copy(ranges)
right_partition.ranges[feature] = p, U
right_partition.properties = []
part.partitions = [left_partition,
middle_partition,
right_partition]
self.set_properties(part.partitions, data, feature, memberships)
part.gain = self.gain(part.partitions, memberships)
return part
def print_partitioning(self, partitioning, data, feature, ranges):
rng = ranges[feature]
data = data[data[:, feature].argsort()]
data_table = []
for d in data:
data_arr = [d[-1]]
for partition in partitioning.partitions:
data_arr.append(round(partition.f(d[feature]), 2))
data_table.append(data_arr)
print(tabulate(data_table,
headers=['Class', 'First', 'Second', 'Third'],
tablefmt='orgtbl'))
for partition in partitioning.partitions:
partition_sums = {}
for d in data:
for c in self.classes:
if d[-1] in partition_sums:
if partition.f(d[feature]) >= 0.5:
partition_sums[d[-1]] += partition.f(d[feature])
else:
partition_sums[d[-1]] = 0
print(partition_sums)
print("Gain: ", partitioning.gain)
xs = np.arange(rng[0], rng[1], 0.05).tolist()
for partition in partitioning.partitions:
ys = []
for x in xs:
ys.append(partition.f(x))
plt.plot(xs, ys, color="g")
xs = []
ys = []
zs = []
for d in data:
xs.append(d[feature])
ys.append(0.5)
zs.append(d[-1])
plt.scatter(xs, ys, c=zs)
plt.show()
def set_properties(self, partitions, data, feature, memberships):
for partition in partitions:
prop = self._fuzzy_set_properties(data,
feature,
partition,
memberships)
partition.properties = prop
def gain(self, partitions, memberships):
data_cardinality = np.sum(memberships)
if len(partitions) == 0:
raise ValueError("Empty partitions")
properties = [part.properties for part in partitions]
gain_value = 0
for prop in properties:
gain_value -= (prop.cardinality / data_cardinality) * prop.entropy
return gain_value
def _fuzzy_set_properties(self, data, feature, partition, memberships):
if data.shape.__contains__(0):
raise ValueError("Empty array")
membership_f = np.vectorize(partition.f)
data_at_feature = np.copy(data[:, feature])
set_memberships = membership_f(data_at_feature)
set_memberships = np.multiply(memberships, set_memberships)
non_zero_inds = (set_memberships >= self.a_cut).nonzero()[0]
set_memberships = set_memberships[non_zero_inds]
set_data = data[non_zero_inds, :]
cardinality = np.sum(set_memberships)
entropy = self._fuzzy_entropy(set_data,
set_memberships,
cardinality)
properties = FuzzySetProperties()
properties.cardinality = cardinality
properties.entropy = entropy
non_zero_inds = (set_memberships >= self.a_cut).nonzero()[0]
set_data = data[non_zero_inds, :]
set_memberships = set_memberships[non_zero_inds]
properties.data = set_data
properties.memberships = set_memberships
return properties
def _fuzzy_entropy(self, data, memberships, cardinality=None):
if self.should_generate_tests(data):
self.generate_fuzzy_entropy_test(data,
memberships,
cardinality)
if data.shape.__contains__(0):
return 0
# raise ValueError("Empty array")
entropy = 0
if cardinality is None:
cardinality = np.sum(memberships)
if cardinality != 0:
for c in self.classes:
inds = (data[:, -1] == c).nonzero()[0]
memberships_at_inds = memberships[inds]
proba = np.sum(memberships_at_inds) / cardinality
if proba != 0:
entropy -= proba * log(proba, 2)
return entropy
def should_generate_tests(self, data):
return self.test_generation_file is not None and \
20 < data.shape[0] < 50 and \
self.test_cases_generated < 3
def generate_fuzzy_entropy_test(self, data, memberships, cardinality):
self.test_cases_generated += 1
test_cases_file = open(self.test_generation_file, "a")
print("\t\tGenerating tests")
data = data[:, (-2, -1)].tolist()
memberships = memberships.tolist()
indentation = [" " for i in range(self.test_indentation_level)]
indentation = "".join(indentation)
print("", file=test_cases_file)
test_id = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))
print("%sdef testFuzzyEntropy_generated_%s(self):" % (indentation, test_id), file=test_cases_file)
wrapper = textwrap.TextWrapper(initial_indent="%s " % indentation, width=80,
subsequent_indent=' ' * 24)
data_str = "data = np.array(%s)" % (data)
print(wrapper.fill(data_str), file=test_cases_file)
memberships_str = "memberships= np.array(%s)" % (memberships)
print(wrapper.fill(memberships_str), file=test_cases_file)
print("%s cardinality = %s" % (indentation, cardinality), file=test_cases_file)
result = "self.tree._fuzzy_entropy(data, memberships, cardinality)"
print("%s self.assertAlmostEqual(%s, 0, 2)" % (indentation, result), file=test_cases_file)
print("", file=test_cases_file)
test_cases_file.close()
def __str__(self):
raise NotImplementedError()
|
[
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.vectorize",
"numpy.copy",
"numpy.multiply",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.plot",
"math.sqrt",
"copy.copy",
"random.choice",
"numpy.logical_and",
"textwrap.TextWrapper",
"numpy.array",
"tabulate.tabulate",
"numpy.arange",
"math.log",
"pyximport.install",
"numpy.unique"
] |
[((149, 168), 'pyximport.install', 'pyximport.install', ([], {}), '()\n', (166, 168), False, 'import pyximport\n'), ((10474, 10486), 'copy.copy', 'copy', (['ranges'], {}), '(ranges)\n', (10478, 10486), False, 'from copy import copy\n'), ((10860, 10872), 'copy.copy', 'copy', (['ranges'], {}), '(ranges)\n', (10864, 10872), False, 'from copy import copy\n'), ((11159, 11171), 'copy.copy', 'copy', (['ranges'], {}), '(ranges)\n', (11163, 11171), False, 'from copy import copy\n'), ((12980, 13005), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'c': 'zs'}), '(xs, ys, c=zs)\n', (12991, 13005), True, 'import matplotlib.pyplot as plt\n'), ((13014, 13024), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13022, 13024), True, 'import matplotlib.pyplot as plt\n'), ((13470, 13489), 'numpy.sum', 'np.sum', (['memberships'], {}), '(memberships)\n', (13476, 13489), True, 'import numpy as np\n'), ((13979, 14004), 'numpy.vectorize', 'np.vectorize', (['partition.f'], {}), '(partition.f)\n', (13991, 14004), True, 'import numpy as np\n'), ((14032, 14057), 'numpy.copy', 'np.copy', (['data[:, feature]'], {}), '(data[:, feature])\n', (14039, 14057), True, 'import numpy as np\n'), ((14141, 14182), 'numpy.multiply', 'np.multiply', (['memberships', 'set_memberships'], {}), '(memberships, set_memberships)\n', (14152, 14182), True, 'import numpy as np\n'), ((14375, 14398), 'numpy.sum', 'np.sum', (['set_memberships'], {}), '(set_memberships)\n', (14381, 14398), True, 'import numpy as np\n'), ((16677, 16778), 'textwrap.TextWrapper', 'textwrap.TextWrapper', ([], {'initial_indent': "('%s ' % indentation)", 'width': '(80)', 'subsequent_indent': "(' ' * 24)"}), "(initial_indent='%s ' % indentation, width=80,\n subsequent_indent=' ' * 24)\n", (16697, 16778), False, 'import textwrap\n'), ((1880, 1893), 'numpy.copy', 'np.copy', (['data'], {}), '(data)\n', (1887, 1893), True, 'import numpy as np\n'), ((2237, 2268), 'numpy.array', 'np.array', (['[(1.0) for d in data]'], {}), '([(1.0) for d in data])\n', (2245, 2268), True, 'import numpy as np\n'), ((6951, 6976), 'numpy.sum', 'np.sum', (['memberships[inds]'], {}), '(memberships[inds])\n', (6957, 6976), True, 'import numpy as np\n'), ((8179, 8206), 'numpy.unique', 'np.unique', (['data[:, feature]'], {}), '(data[:, feature])\n', (8188, 8206), True, 'import numpy as np\n'), ((11954, 12044), 'tabulate.tabulate', 'tabulate', (['data_table'], {'headers': "['Class', 'First', 'Second', 'Third']", 'tablefmt': '"""orgtbl"""'}), "(data_table, headers=['Class', 'First', 'Second', 'Third'],\n tablefmt='orgtbl')\n", (11962, 12044), False, 'from tabulate import tabulate\n'), ((12781, 12808), 'matplotlib.pyplot.plot', 'plt.plot', (['xs', 'ys'], {'color': '"""g"""'}), "(xs, ys, color='g')\n", (12789, 12808), True, 'import matplotlib.pyplot as plt\n'), ((15426, 15445), 'numpy.sum', 'np.sum', (['memberships'], {}), '(memberships)\n', (15432, 15445), True, 'import numpy as np\n'), ((2031, 2051), 'math.sqrt', 'sqrt', (['self.n_feature'], {}), '(self.n_feature)\n', (2035, 2051), False, 'from math import log, sqrt, ceil\n'), ((3545, 3557), 'copy.copy', 'copy', (['ranges'], {}), '(ranges)\n', (3549, 3557), False, 'from copy import copy\n'), ((12590, 12621), 'numpy.arange', 'np.arange', (['rng[0]', 'rng[1]', '(0.05)'], {}), '(rng[0], rng[1], 0.05)\n', (12599, 12621), True, 'import numpy as np\n'), ((16478, 16531), 'random.choice', 'random.choice', (['(string.ascii_uppercase + string.digits)'], {}), '(string.ascii_uppercase + string.digits)\n', (16491, 16531), False, 'import random\n'), ((2109, 2131), 'math.log', 'log', (['self.n_feature', '(2)'], {}), '(self.n_feature, 2)\n', (2112, 2131), False, 'from math import log, sqrt, ceil\n'), ((15646, 15673), 'numpy.sum', 'np.sum', (['memberships_at_inds'], {}), '(memberships_at_inds)\n', (15652, 15673), True, 'import numpy as np\n'), ((5027, 5099), 'numpy.logical_and', 'np.logical_and', (['(data[:, i] != curr_range[0])', '(data[:, i] != curr_range[1])'], {}), '(data[:, i] != curr_range[0], data[:, i] != curr_range[1])\n', (5041, 5099), True, 'import numpy as np\n'), ((15758, 15771), 'math.log', 'log', (['proba', '(2)'], {}), '(proba, 2)\n', (15761, 15771), False, 'from math import log, sqrt, ceil\n')]
|
#! /usr/bin/env python3
import argparse
from pathlib import Path
import json
import sys
from scriptutil import calc
C0_OFF = "Task: C0, Corunner: OFF"
C0_ON = "Task: C0, Corunner: ON"
C1_OFF = "Task: C1, Corunner: OFF"
C1_ON = "Task: C1, Corunner: ON"
def getopts(argv):
parser = argparse.ArgumentParser()
parser.add_argument("file1", type=Path)
parser.add_argument("file2", type=Path)
return parser.parse_args(argv[1:])
def gen_stats(data):
text = r"""
\begin{tabular}{ |c|r|r|r||r|r|r| }\hline
& \multicolumn{3}{c||}{\textbf{Core 1}} & \multicolumn{3}{c|}{\textbf{Core 2}} \\\hline
\textbf{EA} & \textbf{max(a)} \textit{(ms)} & \textbf{max(b)} \textit{(ms)} & %
$\bm{R(a, b)}$ \textit{(\%)}& %
\textbf{max(c)} \textit{(ms)} & \textbf{max(d)} \textit{(ms)} & %
$\bm{R(c, d)}$ \textit{(\%)} \\\hline
"""
for ea, info in sorted(data.items()):
values = {
C0_OFF: 0.0,
C0_ON: 0.0,
C1_OFF: 0.0,
C1_ON: 0.0,
}
for value, sample in zip(info["values"], info["sample"]):
assert sample in values, f"Unknown sample {sample}"
values[sample] = max(values[sample], value)
r0 = calc(values[C0_OFF], values[C0_ON])
r1 = calc(values[C1_OFF], values[C1_ON])
text += f"${ea}$ & "
text += f"{values[C0_OFF]:.3f} & {values[C0_ON]:.3f} & "
if r0 > 0.01:
text += r'\textbf{' + f"{r0:.3f} " + r'}'
else:
text += f"{r0:.3f}"
text += ' & '
text += f"{values[C1_OFF]:.3f} & {values[C1_ON]:.3f} &"
if r1 > 0.01:
text += r'\textbf{' + f"{r1:.3f} " + r'} '
else:
text += f"{r1:.3f}"
text += ' \\\\\n'
text += r"""\hline
\end{tabular}
"""
print(text)
def main(argv):
args = getopts(argv)
with open(args.file1, "r") as inp:
d1 = json.load(inp)
with open(args.file2, "r") as inp:
d2 = json.load(inp)
def collect_values(info):
values = {
C0_OFF: 0.0,
C0_ON: 0.0,
C1_OFF: 0.0,
C1_ON: 0.0,
}
for value, sample in zip(info["values"], info["sample"]):
assert sample in values, f"Unknown sample {sample}"
values[sample] = max(values[sample], value)
return values
text = r"""
\begin{tabular}{ |c|r|r|r||r|r|r| }\hline
& \multicolumn{3}{c||}{\textbf{Core 1}} & \multicolumn{3}{c|}{\textbf{Core 2}} \\\hline
\textbf{EA} & $\Delta_{max(a)}$ \textit{(ms)} & $\Delta_{max(b)}$ \textit{(ms)} & %
$\Delta_{R(a, b)}$ \textit{(\%)}& %
$\Delta_{max(c)}$ \textit{(ms)} & $\Delta_{max(d)}$ \textit{(ms)} & %
$\Delta_{R(c, d)}$ \textit{(\%)} \\\hline
"""
for ea in sorted(d1):
info1 = d1[ea]
info2 = d2[ea]
vals1 = collect_values(info1)
vals2 = collect_values(info2)
r0_1 = calc(vals1[C0_OFF], vals1[C0_ON])
r0_2 = calc(vals2[C0_OFF], vals2[C0_ON])
r1_1 = calc(vals1[C1_OFF], vals1[C1_ON])
r1_2 = calc(vals2[C1_OFF], vals2[C1_ON])
text += f"${ea}$ & "
text += f"{vals1[C0_OFF]-vals2[C0_OFF]:+.3f} & {vals1[C0_ON]-vals2[C0_ON]:+.3f} & "
text += f"{r0_1-r0_2:+.3f} & "
text += f"{vals1[C1_OFF]-vals2[C1_OFF]:+.3f} & {vals1[C1_ON]-vals2[C1_ON]:+.3f} & "
text += f"{r1_1-r1_2:+.3f}"
text += ' \\\\\n'
text += r"""\hline
\end{tabular}
"""
print(text)
if __name__ == "__main__":
main(sys.argv)
|
[
"scriptutil.calc",
"json.load",
"argparse.ArgumentParser"
] |
[((288, 313), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (311, 313), False, 'import argparse\n'), ((1213, 1248), 'scriptutil.calc', 'calc', (['values[C0_OFF]', 'values[C0_ON]'], {}), '(values[C0_OFF], values[C0_ON])\n', (1217, 1248), False, 'from scriptutil import calc\n'), ((1262, 1297), 'scriptutil.calc', 'calc', (['values[C1_OFF]', 'values[C1_ON]'], {}), '(values[C1_OFF], values[C1_ON])\n', (1266, 1297), False, 'from scriptutil import calc\n'), ((1904, 1918), 'json.load', 'json.load', (['inp'], {}), '(inp)\n', (1913, 1918), False, 'import json\n'), ((1971, 1985), 'json.load', 'json.load', (['inp'], {}), '(inp)\n', (1980, 1985), False, 'import json\n'), ((2911, 2944), 'scriptutil.calc', 'calc', (['vals1[C0_OFF]', 'vals1[C0_ON]'], {}), '(vals1[C0_OFF], vals1[C0_ON])\n', (2915, 2944), False, 'from scriptutil import calc\n'), ((2960, 2993), 'scriptutil.calc', 'calc', (['vals2[C0_OFF]', 'vals2[C0_ON]'], {}), '(vals2[C0_OFF], vals2[C0_ON])\n', (2964, 2993), False, 'from scriptutil import calc\n'), ((3009, 3042), 'scriptutil.calc', 'calc', (['vals1[C1_OFF]', 'vals1[C1_ON]'], {}), '(vals1[C1_OFF], vals1[C1_ON])\n', (3013, 3042), False, 'from scriptutil import calc\n'), ((3058, 3091), 'scriptutil.calc', 'calc', (['vals2[C1_OFF]', 'vals2[C1_ON]'], {}), '(vals2[C1_OFF], vals2[C1_ON])\n', (3062, 3091), False, 'from scriptutil import calc\n')]
|
"""
Multivariate Version of exchange prediciton.
"""
import os
os.system("clear")
import sys
sys.path.append("./core/containers/")
sys.path.append("./core/models/")
sys.path.append("./core/tools/")
import datetime
import keras
import pandas as pd
import numpy as np
import matplotlib
# TODO: add auto-detect
# for mac OS: os.name == "posix" and sys.platform == "darwin"
# Use this identifier to automatically decide the following.
on_server = bool(int(input("Are you on a server wihtout graphic output? [0/1] >>> ")))
if on_server:
matplotlib.use(
"agg",
warn=False,
force=True
)
from matplotlib import pyplot as plt
import sklearn
from bokeh.plotting import figure
from bokeh.layouts import row, column
from bokeh.models import HoverTool
from bokeh.io import show, output_file
from typing import Union, List
# import config
# import methods
# from methods import *
# from models import *
from multi_config import *
from multivariate_container import MultivariateContainer
from multivariate_lstm import MultivariateLSTM
from bokeh_visualize import advanced_visualize as bvis
def train_new_model():
"""
Train a new model.
"""
print(f"Control: Building new container from {file_dir}...")
print(f"\tTarget is {target}")
# Build up containers.
container = MultivariateContainer(
file_dir,
target,
load_multi_ex,
CON_config)
print(chr(9608))
print("Control: Building up models...")
model = MultivariateLSTM(container, NN_config)
print(chr(9608))
model.fit_model(epochs=int(input("Training epochs >>> ")))
save_destination = input("Folder name to save model? [Enter] Using default >>> ")
print("Control: Saving model training result...")
if save_destination == "":
model.save_model()
else:
model.save_model(file_dir=save_destination)
print(chr(9608))
def visualize_training_result():
print(f"Contro;: Building up container from {file_dir}...")
container = MultivariateContainer(
file_dir,
target,
load_multi_ex,
CON_config)
print(chr(9608))
print("Control: Building empty model...")
model = MultivariateLSTM(container, NN_config, create_empty=True)
print(chr(9608))
load_target = input("Model folder name >>> ")
load_target = f"./saved_models/{load_target}/"
print(f"Control: Loading model from {load_target}...")
model.load_model(
folder_dir=load_target
)
print(chr(9608))
# Forecast testing set.
yhat = model.predict(model.container.test_X)
yhat = model.container.invert_difference(
yhat,
range(
model.container.num_obs-len(yhat),
model.container.num_obs
),
fillnone=True
)
# Forecast trainign set.
train_yhat = model.predict(model.container.train_X)
train_yhat = model.container.invert_difference(
train_yhat, range(len(train_yhat)), fillnone=True
)
# Visualize
plt.close()
plt.plot(yhat, linewidth=0.6, alpha=0.6, label="Test set yhat")
plt.plot(train_yhat, linewidth=0.6, alpha=0.6, label="Train set yhat")
plt.plot(model.container.ground_truth_y, linewidth=1.2, alpha=0.3, label="actual")
plt.legend()
action = input("Plot result? \n\t[P] plot result. \n\t[S] save result. \n\t>>>")
assert action.lower() in ["p", "s"], "Invalid command."
if action.lower() == "p":
plt.show()
elif action.lower() == "s":
fig_name = str(datetime.datetime.now())
plt.savefig(f"./figure/{fig_name}.svg")
print(f"Control: figure saved to ./figure/{fig_name}.svg")
if __name__ == "__main__":
print("""
=====================================================================
Hey, you are using the Multivariate Exchange Rate Forecasting Model
This is a neural network developed to forecast economic indicators
The model is based on Keras
@Spikey
Version. 0.0.1, Sep. 11 2018
Important files
Configuration file: ./multi_config.py
Model definition file: ./models.py
""")
task = input("""
What to do?
[N] Train new model.
[R] Restore saved model and continue training.
[V] Visualize training result using matplotlib.
[B] Visualize training result using bokeh.
[Q] Quit.
>>> """)
assert task.lower() in ["n", "r", "v", "q", "b"], "Invalid task."
if task.lower() == "n":
train_new_model()
elif task.lower() == "r":
raise NotImplementedError
elif task.lower() == "v":
visualize_training_result()
elif task.lower() == "b":
bvis(
file_dir=file_dir,
target=target,
load_multi_ex=load_multi_ex,
CON_config=CON_config,
NN_config=NN_config
)
elif task.lower() == "q":
quit()
|
[
"sys.path.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"os.system",
"multivariate_container.MultivariateContainer",
"matplotlib.use",
"bokeh_visualize.advanced_visualize",
"multivariate_lstm.MultivariateLSTM",
"datetime.datetime.now",
"matplotlib.pyplot.savefig"
] |
[((63, 81), 'os.system', 'os.system', (['"""clear"""'], {}), "('clear')\n", (72, 81), False, 'import os\n'), ((93, 130), 'sys.path.append', 'sys.path.append', (['"""./core/containers/"""'], {}), "('./core/containers/')\n", (108, 130), False, 'import sys\n'), ((131, 164), 'sys.path.append', 'sys.path.append', (['"""./core/models/"""'], {}), "('./core/models/')\n", (146, 164), False, 'import sys\n'), ((165, 197), 'sys.path.append', 'sys.path.append', (['"""./core/tools/"""'], {}), "('./core/tools/')\n", (180, 197), False, 'import sys\n'), ((537, 582), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {'warn': '(False)', 'force': '(True)'}), "('agg', warn=False, force=True)\n", (551, 582), False, 'import matplotlib\n'), ((1323, 1389), 'multivariate_container.MultivariateContainer', 'MultivariateContainer', (['file_dir', 'target', 'load_multi_ex', 'CON_config'], {}), '(file_dir, target, load_multi_ex, CON_config)\n', (1344, 1389), False, 'from multivariate_container import MultivariateContainer\n'), ((1501, 1539), 'multivariate_lstm.MultivariateLSTM', 'MultivariateLSTM', (['container', 'NN_config'], {}), '(container, NN_config)\n', (1517, 1539), False, 'from multivariate_lstm import MultivariateLSTM\n'), ((2026, 2092), 'multivariate_container.MultivariateContainer', 'MultivariateContainer', (['file_dir', 'target', 'load_multi_ex', 'CON_config'], {}), '(file_dir, target, load_multi_ex, CON_config)\n', (2047, 2092), False, 'from multivariate_container import MultivariateContainer\n'), ((2206, 2263), 'multivariate_lstm.MultivariateLSTM', 'MultivariateLSTM', (['container', 'NN_config'], {'create_empty': '(True)'}), '(container, NN_config, create_empty=True)\n', (2222, 2263), False, 'from multivariate_lstm import MultivariateLSTM\n'), ((3030, 3041), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3039, 3041), True, 'from matplotlib import pyplot as plt\n'), ((3046, 3109), 'matplotlib.pyplot.plot', 'plt.plot', (['yhat'], {'linewidth': '(0.6)', 'alpha': '(0.6)', 'label': '"""Test set yhat"""'}), "(yhat, linewidth=0.6, alpha=0.6, label='Test set yhat')\n", (3054, 3109), True, 'from matplotlib import pyplot as plt\n'), ((3114, 3184), 'matplotlib.pyplot.plot', 'plt.plot', (['train_yhat'], {'linewidth': '(0.6)', 'alpha': '(0.6)', 'label': '"""Train set yhat"""'}), "(train_yhat, linewidth=0.6, alpha=0.6, label='Train set yhat')\n", (3122, 3184), True, 'from matplotlib import pyplot as plt\n'), ((3189, 3276), 'matplotlib.pyplot.plot', 'plt.plot', (['model.container.ground_truth_y'], {'linewidth': '(1.2)', 'alpha': '(0.3)', 'label': '"""actual"""'}), "(model.container.ground_truth_y, linewidth=1.2, alpha=0.3, label=\n 'actual')\n", (3197, 3276), True, 'from matplotlib import pyplot as plt\n'), ((3276, 3288), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3286, 3288), True, 'from matplotlib import pyplot as plt\n'), ((3473, 3483), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3481, 3483), True, 'from matplotlib import pyplot as plt\n'), ((3572, 3611), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""./figure/{fig_name}.svg"""'], {}), "(f'./figure/{fig_name}.svg')\n", (3583, 3611), True, 'from matplotlib import pyplot as plt\n'), ((3539, 3562), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3560, 3562), False, 'import datetime\n'), ((4697, 4812), 'bokeh_visualize.advanced_visualize', 'bvis', ([], {'file_dir': 'file_dir', 'target': 'target', 'load_multi_ex': 'load_multi_ex', 'CON_config': 'CON_config', 'NN_config': 'NN_config'}), '(file_dir=file_dir, target=target, load_multi_ex=load_multi_ex,\n CON_config=CON_config, NN_config=NN_config)\n', (4701, 4812), True, 'from bokeh_visualize import advanced_visualize as bvis\n')]
|
import numpy as np
a = np.array([
[1, 2, 3],
[4, 5, 6]
])
print("print(a)")
print(a)
print()
print("print(a.T)")
print(a.T)
print()
print("print(a.dot(2))")
print(a.dot(2))
print()
print("print(a.dot(np.array([2, 2, 2])))")
print(a.dot(np.array([2, 2, 2])))
print()
|
[
"numpy.array"
] |
[((24, 56), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (32, 56), True, 'import numpy as np\n'), ((250, 269), 'numpy.array', 'np.array', (['[2, 2, 2]'], {}), '([2, 2, 2])\n', (258, 269), True, 'import numpy as np\n')]
|
"""
Sample Python 3.5 application that has plugin support.
It dynamically loads plugins from the 'plugins' directory.
Two types of plugins are supported: commands and hooks.
A command is executed if it matches a cmdline argument.
A hook is executed before and after each command...
Example usage:
main.py print upper print lower print
"""
import sys
import logging
from app.args import get_args
from app.processor import process_commands
def main(argv):
print('My Plugin Demo')
logging.basicConfig(level=logging.DEBUG)
logging.debug("Starting")
args = get_args(argv)
input_obj = "HeLLo WOrLD!" # TODO: this could perhaps be stdin...
process_commands(input_obj, args.commands)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
[
"app.processor.process_commands",
"app.args.get_args",
"logging.debug",
"logging.basicConfig"
] |
[((497, 537), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (516, 537), False, 'import logging\n'), ((542, 567), 'logging.debug', 'logging.debug', (['"""Starting"""'], {}), "('Starting')\n", (555, 567), False, 'import logging\n'), ((579, 593), 'app.args.get_args', 'get_args', (['argv'], {}), '(argv)\n', (587, 593), False, 'from app.args import get_args\n'), ((669, 711), 'app.processor.process_commands', 'process_commands', (['input_obj', 'args.commands'], {}), '(input_obj, args.commands)\n', (685, 711), False, 'from app.processor import process_commands\n')]
|
import random
import numpy
import torch
from backobs.integration import extend as backobs_extend
from backobs.integration import (
extend_with_access_unreduced_loss as backobs_extend_with_access_unreduced_loss,
)
def set_deepobs_seed(seed=0):
"""Set all seeds used by DeepOBS."""
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
def set_up_problem(
tproblem_cls,
batch_size,
force_no_l2_reg=True,
seed=None,
extend=False,
unreduced_loss=False,
):
"""Create problem with neural network, and set to train mode."""
if seed is not None:
set_deepobs_seed(0)
if force_no_l2_reg:
tproblem = tproblem_cls(batch_size, l2_reg=0.0)
else:
tproblem = tproblem_cls(batch_size)
tproblem.set_up()
tproblem.train_init_op()
if unreduced_loss and not extend:
raise ValueError("To use unreduced_loss, enable the extend option.")
if extend:
if unreduced_loss:
backobs_extend_with_access_unreduced_loss(tproblem)
else:
tproblem = backobs_extend(tproblem)
return tproblem
def get_reduction_factor(loss, unreduced_loss):
"""Return the factor used to reduce the individual losses."""
mean_loss = unreduced_loss.flatten().mean()
sum_loss = unreduced_loss.flatten().sum()
if torch.allclose(mean_loss, sum_loss):
raise RuntimeError(
"Cannot determine reduction factor. ",
"Results from 'mean' and 'sum' reduction are identical. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}",
)
if torch.allclose(loss, mean_loss):
factor = 1.0 / unreduced_loss.numel()
elif torch.allclose(loss, sum_loss):
factor = 1.0
else:
raise RuntimeError(
"Reductions 'mean' or 'sum' do not match with loss. ",
f"'mean': {mean_loss}, 'sum': {sum_loss}, loss: {loss}",
)
return factor
atol = 1e-5
rtol = 1e-5
def report_nonclose_values(x, y):
x_numpy = x.data.cpu().numpy().flatten()
y_numpy = y.data.cpu().numpy().flatten()
close = numpy.isclose(x_numpy, y_numpy, atol=atol, rtol=rtol)
where_not_close = numpy.argwhere(numpy.logical_not(close))
for idx in where_not_close:
x, y = x_numpy[idx], y_numpy[idx]
print("{} versus {}. Ratio of {}".format(x, y, y / x))
def check_sizes_and_values(*plists, atol=atol, rtol=rtol):
check_sizes(*plists)
list1, list2 = plists
check_values(list1, list2, atol=atol, rtol=rtol)
def check_sizes(*plists):
for i in range(len(plists) - 1):
assert len(plists[i]) == len(plists[i + 1])
for params in zip(*plists):
for i in range(len(params) - 1):
assert params[i].size() == params[i + 1].size()
def check_values(list1, list2, atol=atol, rtol=rtol):
for i, (g1, g2) in enumerate(zip(list1, list2)):
print(i)
print(g1.size())
report_nonclose_values(g1, g2)
assert torch.allclose(g1, g2, atol=atol, rtol=rtol)
|
[
"numpy.random.seed",
"torch.manual_seed",
"numpy.logical_not",
"numpy.isclose",
"random.seed",
"backobs.integration.extend_with_access_unreduced_loss",
"torch.allclose",
"backobs.integration.extend"
] |
[((296, 313), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (307, 313), False, 'import random\n'), ((318, 341), 'numpy.random.seed', 'numpy.random.seed', (['seed'], {}), '(seed)\n', (335, 341), False, 'import numpy\n'), ((346, 369), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (363, 369), False, 'import torch\n'), ((1348, 1383), 'torch.allclose', 'torch.allclose', (['mean_loss', 'sum_loss'], {}), '(mean_loss, sum_loss)\n', (1362, 1383), False, 'import torch\n'), ((1608, 1639), 'torch.allclose', 'torch.allclose', (['loss', 'mean_loss'], {}), '(loss, mean_loss)\n', (1622, 1639), False, 'import torch\n'), ((2117, 2170), 'numpy.isclose', 'numpy.isclose', (['x_numpy', 'y_numpy'], {'atol': 'atol', 'rtol': 'rtol'}), '(x_numpy, y_numpy, atol=atol, rtol=rtol)\n', (2130, 2170), False, 'import numpy\n'), ((1696, 1726), 'torch.allclose', 'torch.allclose', (['loss', 'sum_loss'], {}), '(loss, sum_loss)\n', (1710, 1726), False, 'import torch\n'), ((2208, 2232), 'numpy.logical_not', 'numpy.logical_not', (['close'], {}), '(close)\n', (2225, 2232), False, 'import numpy\n'), ((2992, 3036), 'torch.allclose', 'torch.allclose', (['g1', 'g2'], {'atol': 'atol', 'rtol': 'rtol'}), '(g1, g2, atol=atol, rtol=rtol)\n', (3006, 3036), False, 'import torch\n'), ((995, 1046), 'backobs.integration.extend_with_access_unreduced_loss', 'backobs_extend_with_access_unreduced_loss', (['tproblem'], {}), '(tproblem)\n', (1036, 1046), True, 'from backobs.integration import extend_with_access_unreduced_loss as backobs_extend_with_access_unreduced_loss\n'), ((1084, 1108), 'backobs.integration.extend', 'backobs_extend', (['tproblem'], {}), '(tproblem)\n', (1098, 1108), True, 'from backobs.integration import extend as backobs_extend\n')]
|
import copy
import numpy as np
import os
import torch
import pickle
from detectron2.data import MetadataCatalog
from detectron2.data import detection_utils as utils
from detectron2.structures import (
BitMasks,
Boxes,
BoxMode,
Instances,
PolygonMasks,
polygons_to_bitmask,
)
import pycocotools.mask as mask_util
from PIL import Image
import torchvision.transforms as transforms
from . import GaussianBlur
__all__ = ["PlaneRCNNMapper"]
def convert_PIL_to_numpy(image, format):
"""
Convert PIL image to numpy array of target format.
Args:
image (PIL.Image): a PIL image
format (str): the format of output image
Returns:
(np.ndarray): also see `read_image`
"""
if format is not None:
# PIL only supports RGB, so convert to RGB and flip channels over below
conversion_format = format
if format in ["BGR", "YUV-BT.601"]:
conversion_format = "RGB"
image = image.convert(conversion_format)
image = np.asarray(image)
# PIL squeezes out the channel dimension for "L", so make it HWC
if format == "L":
image = np.expand_dims(image, -1)
# handle formats not supported by PIL
elif format == "BGR":
# flip channels if needed
image = image[:, :, ::-1]
elif format == "YUV-BT.601":
image = image / 255.0
image = np.dot(image, np.array(_M_RGB2YUV).T)
return image
def annotations_to_instances(
annos, image_size, mask_format="polygon", max_num_planes=20
):
"""
Create an :class:`Instances` object used by the models,
from instance annotations in the dataset dict.
Args:
annos (list[dict]): a list of annotations, one per instance.
image_size (tuple): height, width
Returns:
Instances: It will contains fields "gt_boxes", "gt_classes",
"gt_masks", "gt_keypoints", if they can be obtained from `annos`.
"""
boxes = [
BoxMode.convert(obj["bbox"], BoxMode(obj["bbox_mode"]), BoxMode.XYXY_ABS)
for obj in annos
]
target = Instances(image_size)
boxes = target.gt_boxes = Boxes(boxes)
boxes.clip(image_size)
classes = [obj["category_id"] for obj in annos]
classes = torch.tensor(classes, dtype=torch.int64)
target.gt_classes = classes
if len(annos) and "segmentation" in annos[0]:
segms = [obj["segmentation"] for obj in annos]
if mask_format == "polygon":
masks = PolygonMasks(segms)
else:
assert mask_format == "bitmask", mask_format
masks = []
for segm in segms:
if isinstance(segm, list):
# polygon
masks.append(polygons_to_bitmask(segm, *image_size))
elif isinstance(segm, dict):
# COCO RLE
masks.append(mask_util.decode(segm))
elif isinstance(segm, np.ndarray):
assert (
segm.ndim == 2
), "Expect segmentation of 2 dimensions, got {}.".format(segm.ndim)
# mask array
masks.append(segm)
else:
raise ValueError(
"Cannot convert segmentation of type '{}' to BitMasks!"
"Supported types are: polygons as list[list[float] or ndarray],"
" COCO-style RLE as a dict, or a full-image segmentation mask "
"as a 2D ndarray.".format(type(segm))
)
# torch.from_numpy does not support array with negative stride.
masks = BitMasks(
torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
)
target.gt_masks = masks
if len(annos) and "plane" in annos[0]:
plane = [torch.tensor(obj["plane"]) for obj in annos]
plane_idx = [torch.tensor([i]) for i in range(len(plane))]
target.gt_planes = torch.stack(plane, dim=0)
target.gt_plane_idx = torch.stack(plane_idx, dim=0)
return target
class PlaneRCNNMapper:
"""
A callable which takes a dict produced by the detection dataset, and applies transformations,
including image resizing and flipping. The transformation parameters are parsed from cfg file
and depending on the is_train condition.
Note that for our existing models, mean/std normalization is done by the model instead of here.
"""
def __init__(self, cfg, is_train=True, dataset_names=None):
self.cfg = cfg
# fmt: off
self.img_format = cfg.INPUT.FORMAT
self.depth_on = cfg.MODEL.DEPTH_ON
self.camera_on = cfg.MODEL.CAMERA_ON
self.load_proposals = cfg.MODEL.LOAD_PROPOSALS
self._eval_gt_box = cfg.TEST.EVAL_GT_BOX
self._augmentation = cfg.DATALOADER.AUGMENTATION
# fmt: on
if self.load_proposals:
raise ValueError("Loading proposals not yet supported")
self.is_train = is_train
assert dataset_names is not None
if self.camera_on:
kmeans_trans_path = cfg.MODEL.CAMERA_HEAD.KMEANS_TRANS_PATH
kmeans_rots_path = cfg.MODEL.CAMERA_HEAD.KMEANS_ROTS_PATH
assert os.path.exists(kmeans_trans_path)
assert os.path.exists(kmeans_rots_path)
with open(kmeans_trans_path, "rb") as f:
self.kmeans_trans = pickle.load(f)
with open(kmeans_rots_path, "rb") as f:
self.kmeans_rots = pickle.load(f)
if self._augmentation:
color_jitter = transforms.ColorJitter(0.8, 0.8, 0.8, 0.2)
augmentation = [
transforms.RandomApply([color_jitter], p=0.2),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([GaussianBlur([0.1, 2.0])], p=0.5),
transforms.ToTensor(),
]
self.img_transform = transforms.Compose(augmentation)
def __call__(self, dataset_dict):
"""
Transform the dataset_dict according to the configured transformations.
Args:
dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
Returns:
dict: a new dict that's going to be processed by the model.
It currently does the following:
1. Read the image from "file_name"
2. Transform the image and annotations
3. Prepare the annotations to :class:`Instances`
"""
dataset_dict = copy.deepcopy(dataset_dict)
for i in range(2):
image = utils.read_image(
dataset_dict[str(i)]["file_name"], format=self.img_format
)
utils.check_image_size(dataset_dict[str(i)], image)
if self.is_train and self._augmentation:
image = Image.fromarray(image)
dataset_dict[str(i)]["image"] = self.img_transform(image) * 255.0
image_shape = dataset_dict[str(i)]["image"].shape[1:]
else:
image_shape = image.shape[:2]
dataset_dict[str(i)]["image"] = torch.as_tensor(
image.transpose(2, 0, 1).astype("float32")
)
# Can use uint8 if it turns out to be slow some day
if self.depth_on:
if "depth_head" in self.cfg.MODEL.FREEZE:
dataset_dict[str(i)]["depth"] = torch.as_tensor(
np.zeros((480, 640)).astype("float32")
)
else:
# load depth map
house, img_id = dataset_dict[str(i)]["image_id"].split("_", 1)
depth_path = os.path.join(
"/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations",
house,
img_id + ".pkl",
)
with open(depth_path, "rb") as f:
obs = pickle.load(f)
# This assertion is to check dataset is clean
# assert((obs['color_sensor'][:,:,:3][:,:,::-1].transpose(2, 0, 1)-dataset_dict[str(i)]["image"].numpy()).sum()==0)
depth = obs["depth_sensor"]
dataset_dict[str(i)]["depth"] = torch.as_tensor(
depth.astype("float32")
)
if self.camera_on:
relative_pose = dataset_dict["rel_pose"]
x, y, z = relative_pose["position"]
w, xi, yi, zi = relative_pose["rotation"]
dataset_dict["rel_pose"]["tran_cls"] = torch.LongTensor(
self.xyz2class(x, y, z)
)
dataset_dict["rel_pose"]["rot_cls"] = torch.LongTensor(
self.quat2class(w, xi, yi, zi)
)
if not self.is_train and not self._eval_gt_box:
return dataset_dict
if not self._eval_gt_box:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)].pop("annotations")
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
else:
for i in range(2):
if "annotations" in dataset_dict[str(i)]:
annos = [
self.transform_annotations(obj)
for obj in dataset_dict[str(i)]["annotations"]
if obj.get("iscrowd", 0) == 0
]
# Should not be empty during training
instances = annotations_to_instances(annos, image_shape)
dataset_dict[str(i)]["instances"] = instances[
instances.gt_boxes.nonempty()
]
return dataset_dict
def transform_annotations(self, annotation, transforms=None, image_size=None):
"""
Apply image transformations to the annotations.
After this method, the box mode will be set to XYXY_ABS.
"""
return annotation
def xyz2class(self, x, y, z):
return self.kmeans_trans.predict([[x, y, z]])
def quat2class(self, w, xi, yi, zi):
return self.kmeans_rots.predict([[w, xi, yi, zi]])
def class2xyz(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_trans.n_clusters).all()
return self.kmeans_trans.cluster_centers_[cls]
def class2quat(self, cls):
assert (cls >= 0).all() and (cls < self.kmeans_rots.n_clusters).all()
return self.kmeans_rots.cluster_centers_[cls]
|
[
"pycocotools.mask.decode",
"pickle.load",
"detectron2.structures.Instances",
"os.path.join",
"detectron2.structures.polygons_to_bitmask",
"detectron2.structures.PolygonMasks",
"os.path.exists",
"torchvision.transforms.Compose",
"copy.deepcopy",
"numpy.asarray",
"detectron2.structures.Boxes",
"torchvision.transforms.RandomApply",
"detectron2.structures.BoxMode",
"torchvision.transforms.ColorJitter",
"torch.stack",
"numpy.zeros",
"numpy.expand_dims",
"torchvision.transforms.RandomGrayscale",
"numpy.array",
"PIL.Image.fromarray",
"numpy.ascontiguousarray",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((1016, 1033), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1026, 1033), True, 'import numpy as np\n'), ((2086, 2107), 'detectron2.structures.Instances', 'Instances', (['image_size'], {}), '(image_size)\n', (2095, 2107), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2138, 2150), 'detectron2.structures.Boxes', 'Boxes', (['boxes'], {}), '(boxes)\n', (2143, 2150), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2245, 2285), 'torch.tensor', 'torch.tensor', (['classes'], {'dtype': 'torch.int64'}), '(classes, dtype=torch.int64)\n', (2257, 2285), False, 'import torch\n'), ((1141, 1166), 'numpy.expand_dims', 'np.expand_dims', (['image', '(-1)'], {}), '(image, -1)\n', (1155, 1166), True, 'import numpy as np\n'), ((4025, 4050), 'torch.stack', 'torch.stack', (['plane'], {'dim': '(0)'}), '(plane, dim=0)\n', (4036, 4050), False, 'import torch\n'), ((4081, 4110), 'torch.stack', 'torch.stack', (['plane_idx'], {'dim': '(0)'}), '(plane_idx, dim=0)\n', (4092, 4110), False, 'import torch\n'), ((6618, 6645), 'copy.deepcopy', 'copy.deepcopy', (['dataset_dict'], {}), '(dataset_dict)\n', (6631, 6645), False, 'import copy\n'), ((1997, 2022), 'detectron2.structures.BoxMode', 'BoxMode', (["obj['bbox_mode']"], {}), "(obj['bbox_mode'])\n", (2004, 2022), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((2481, 2500), 'detectron2.structures.PolygonMasks', 'PolygonMasks', (['segms'], {}), '(segms)\n', (2493, 2500), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((3886, 3912), 'torch.tensor', 'torch.tensor', (["obj['plane']"], {}), "(obj['plane'])\n", (3898, 3912), False, 'import torch\n'), ((3952, 3969), 'torch.tensor', 'torch.tensor', (['[i]'], {}), '([i])\n', (3964, 3969), False, 'import torch\n'), ((5309, 5342), 'os.path.exists', 'os.path.exists', (['kmeans_trans_path'], {}), '(kmeans_trans_path)\n', (5323, 5342), False, 'import os\n'), ((5362, 5394), 'os.path.exists', 'os.path.exists', (['kmeans_rots_path'], {}), '(kmeans_rots_path)\n', (5376, 5394), False, 'import os\n'), ((5660, 5702), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', (['(0.8)', '(0.8)', '(0.8)', '(0.2)'], {}), '(0.8, 0.8, 0.8, 0.2)\n', (5682, 5702), True, 'import torchvision.transforms as transforms\n'), ((6007, 6039), 'torchvision.transforms.Compose', 'transforms.Compose', (['augmentation'], {}), '(augmentation)\n', (6025, 6039), True, 'import torchvision.transforms as transforms\n'), ((5484, 5498), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5495, 5498), False, 'import pickle\n'), ((5586, 5600), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5597, 5600), False, 'import pickle\n'), ((5748, 5793), 'torchvision.transforms.RandomApply', 'transforms.RandomApply', (['[color_jitter]'], {'p': '(0.2)'}), '([color_jitter], p=0.2)\n', (5770, 5793), True, 'import torchvision.transforms as transforms\n'), ((5811, 5844), 'torchvision.transforms.RandomGrayscale', 'transforms.RandomGrayscale', ([], {'p': '(0.2)'}), '(p=0.2)\n', (5837, 5844), True, 'import torchvision.transforms as transforms\n'), ((5937, 5958), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5956, 5958), True, 'import torchvision.transforms as transforms\n'), ((6940, 6962), 'PIL.Image.fromarray', 'Image.fromarray', (['image'], {}), '(image)\n', (6955, 6962), False, 'from PIL import Image\n'), ((7806, 7912), 'os.path.join', 'os.path.join', (['"""/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations"""', 'house', "(img_id + '.pkl')"], {}), "('/Pool1/users/jinlinyi/dataset/mp3d_rpnet_v4_sep20/observations',\n house, img_id + '.pkl')\n", (7818, 7912), False, 'import os\n'), ((1397, 1417), 'numpy.array', 'np.array', (['_M_RGB2YUV'], {}), '(_M_RGB2YUV)\n', (1405, 1417), True, 'import numpy as np\n'), ((2732, 2770), 'detectron2.structures.polygons_to_bitmask', 'polygons_to_bitmask', (['segm', '*image_size'], {}), '(segm, *image_size)\n', (2751, 2770), False, 'from detectron2.structures import BitMasks, Boxes, BoxMode, Instances, PolygonMasks, polygons_to_bitmask\n'), ((8088, 8102), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8099, 8102), False, 'import pickle\n'), ((2881, 2903), 'pycocotools.mask.decode', 'mask_util.decode', (['segm'], {}), '(segm)\n', (2897, 2903), True, 'import pycocotools.mask as mask_util\n'), ((3737, 3760), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['x'], {}), '(x)\n', (3757, 3760), True, 'import numpy as np\n'), ((7570, 7590), 'numpy.zeros', 'np.zeros', (['(480, 640)'], {}), '((480, 640))\n', (7578, 7590), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Python-Future Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
Based on the Jinja2 documentation extensions.
:copyright: Copyright 2008 by <NAME>.
:license: BSD.
"""
import collections
import os
import re
import inspect
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class FutureStyle(Style):
title = 'Future Style'
default_style = ""
styles = {
Comment: 'italic #0B6A94', # was: #0066ff',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #D15E27',
Keyword.Type: '#D15E27',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
def setup(app):
pass
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
|
[
"docutils.nodes.section"
] |
[((767, 782), 'docutils.nodes.section', 'nodes.section', ([], {}), '()\n', (780, 782), False, 'from docutils import nodes\n')]
|
from time import time
class CallController:
def __init__(self, max_call_interval):
self._max_call_interval = max_call_interval
self._last_call = time()
def __call__(self, function):
def wrapped(*args, **kwargs):
now = time()
if now - self._last_call > self._max_call_interval:
self._last_call = now
function(*args, **kwargs)
return wrapped
class Wrap:
__func: callable = None
__a = None
__b = None
def __init__(self, func, a, b):
self.__func = func
self.__a = a
self.__b = b
print("Building wrap: " + str(func))
def get(self):
return self.__func(self.__a, self.__b)
|
[
"time.time"
] |
[((168, 174), 'time.time', 'time', ([], {}), '()\n', (172, 174), False, 'from time import time\n'), ((266, 272), 'time.time', 'time', ([], {}), '()\n', (270, 272), False, 'from time import time\n')]
|
import re
import urllib.parse
from typing import Union
_scheme_regex = re.compile(r"^(?:https?)?$", flags=re.IGNORECASE | re.ASCII)
_netloc_regex = re.compile(r"^(?:www\.)?mangaupdates\.com$", flags=re.IGNORECASE | re.ASCII)
_query_regex = re.compile(r"id=(\d+)(?:&|$)", flags=re.IGNORECASE | re.ASCII)
def get_id_from_url(url: str) -> str:
if "//" not in url:
url = "//" + url
url_res = urllib.parse.urlparse(url)
if not _scheme_regex.match(url_res.scheme):
raise ValueError("URL is not valid.")
if not _netloc_regex.match(url_res.netloc):
raise ValueError("URL is not a valid mangaupdates.com link.")
if url_res.path != "/series.html":
raise ValueError("URL is not a manga series page.")
id_query = _query_regex.search(url_res.query)
if id_query is None:
raise ValueError("ID not found in the URL.")
return id_query.group(1)
def get_url_by_id(id_: Union[str, int]) -> str:
return "https://www.mangaupdates.com/series.html?id=" + str(id_)
|
[
"re.compile"
] |
[((73, 132), 're.compile', 're.compile', (['"""^(?:https?)?$"""'], {'flags': '(re.IGNORECASE | re.ASCII)'}), "('^(?:https?)?$', flags=re.IGNORECASE | re.ASCII)\n", (83, 132), False, 'import re\n'), ((150, 227), 're.compile', 're.compile', (['"""^(?:www\\\\.)?mangaupdates\\\\.com$"""'], {'flags': '(re.IGNORECASE | re.ASCII)'}), "('^(?:www\\\\.)?mangaupdates\\\\.com$', flags=re.IGNORECASE | re.ASCII)\n", (160, 227), False, 'import re\n'), ((242, 304), 're.compile', 're.compile', (['"""id=(\\\\d+)(?:&|$)"""'], {'flags': '(re.IGNORECASE | re.ASCII)'}), "('id=(\\\\d+)(?:&|$)', flags=re.IGNORECASE | re.ASCII)\n", (252, 304), False, 'import re\n')]
|
import pytest
from flag_engine.features.schemas import (
FeatureStateSchema,
MultivariateFeatureOptionSchema,
MultivariateFeatureStateValueSchema,
)
from flag_engine.utils.exceptions import InvalidPercentageAllocation
def test_can_load_multivariate_feature_option_dict_without_id_field():
MultivariateFeatureOptionSchema().load({"value": 1})
def test_can_load_multivariate_feature_state_value_without_id_field():
MultivariateFeatureStateValueSchema().load(
{
"multivariate_feature_option": {"value": 1},
"percentage_allocation": 10,
}
)
def test_dumping_fs_schema_raises_invalid_percentage_allocation_for_invalid_allocation():
# Given
data = {
"multivariate_feature_state_values": [
{"multivariate_feature_option": 12, "percentage_allocation": 100},
{"multivariate_feature_option": 9, "percentage_allocation": 80},
],
"feature_state_value": "value",
}
# Then
with pytest.raises(InvalidPercentageAllocation):
FeatureStateSchema().dump(data)
def test_dumping_fs_schema_works_for_valid_allocation():
# Given
data = {
"multivariate_feature_state_values": [
{"multivariate_feature_option": 12, "percentage_allocation": 20},
{"multivariate_feature_option": 9, "percentage_allocation": 80},
],
"feature_state_value": "value",
}
# Then
FeatureStateSchema().dump(data)
|
[
"pytest.raises",
"flag_engine.features.schemas.MultivariateFeatureOptionSchema",
"flag_engine.features.schemas.FeatureStateSchema",
"flag_engine.features.schemas.MultivariateFeatureStateValueSchema"
] |
[((1003, 1045), 'pytest.raises', 'pytest.raises', (['InvalidPercentageAllocation'], {}), '(InvalidPercentageAllocation)\n', (1016, 1045), False, 'import pytest\n'), ((308, 341), 'flag_engine.features.schemas.MultivariateFeatureOptionSchema', 'MultivariateFeatureOptionSchema', ([], {}), '()\n', (339, 341), False, 'from flag_engine.features.schemas import FeatureStateSchema, MultivariateFeatureOptionSchema, MultivariateFeatureStateValueSchema\n'), ((438, 475), 'flag_engine.features.schemas.MultivariateFeatureStateValueSchema', 'MultivariateFeatureStateValueSchema', ([], {}), '()\n', (473, 475), False, 'from flag_engine.features.schemas import FeatureStateSchema, MultivariateFeatureOptionSchema, MultivariateFeatureStateValueSchema\n'), ((1445, 1465), 'flag_engine.features.schemas.FeatureStateSchema', 'FeatureStateSchema', ([], {}), '()\n', (1463, 1465), False, 'from flag_engine.features.schemas import FeatureStateSchema, MultivariateFeatureOptionSchema, MultivariateFeatureStateValueSchema\n'), ((1055, 1075), 'flag_engine.features.schemas.FeatureStateSchema', 'FeatureStateSchema', ([], {}), '()\n', (1073, 1075), False, 'from flag_engine.features.schemas import FeatureStateSchema, MultivariateFeatureOptionSchema, MultivariateFeatureStateValueSchema\n')]
|
import torch
from torch import nn, Tensor
from torch.nn import functional as F
from abc import abstractmethod
from typing import List, Callable, Union, Any, TypeVar, Tuple
from .util import reparameterize
class BaseVAE(nn.Module):
def __init__(self,
name: str,
latent_dim: int) -> None:
super(BaseVAE, self).__init__()
self.name = name
self.latent_dim = latent_dim
@abstractmethod
def encode(self, input: Tensor) -> List[Tensor]:
raise NotImplementedError
@abstractmethod
def decode(self, input: Tensor, **kwargs) -> Any:
raise NotImplementedError
def get_sandwich_layers(self) -> List[nn.Module]:
raise NotImplementedError
@abstractmethod
def get_encoder(self) -> List[nn.Module]:
raise NotImplementedError
def forward(self, x: Tensor, **kwargs) -> List[Tensor]:
mu, log_var = self.encode(x)
z = reparameterize(mu, log_var)
y = self.decode(z, **kwargs)
return [y, x, mu, log_var, z]
def sample(self,
num_samples: int,
current_device: int, **kwargs) -> Tensor:
"""
Samples from the latent space and return the corresponding
image space map.
:param num_samples: (Int) Number of samples
:param current_device: (Int) Device to run the model
:return: (Tensor)
"""
z = torch.randn(num_samples,
self.latent_dim)
z = z.to(current_device)
samples = self.decode(z)
return samples
def generate(self, x: Tensor, **kwargs) -> Tensor:
"""
Given an input image x, returns the reconstructed image
:param x: (Tensor) [B x C x H x W]
:return: (Tensor) [B x C x H x W]
"""
return self.forward(x)[0]
def loss_function(self,
recons: Tensor,
input: Tensor,
mu: Tensor,
log_var: Tensor,
z: Tensor,
objective: str = 'default',
beta: float = 1.0,
gamma: float = 1.0,
target_capacity: float = 25.0,
**kwargs) -> dict:
recons_loss = F.mse_loss(recons, input)
result = {'loss': recons_loss,
'Reconstruction_Loss': recons_loss}
kld_loss = torch.mean(-0.5 * torch.sum(1 +
log_var - mu ** 2 - log_var.exp(), dim=1), dim=0)
result['KLD_Loss'] = kld_loss
if objective == 'default':
# O.G. beta loss term applied directly to KLD
result['loss'] += beta * kld_loss
elif objective == 'controlled_capacity':
# Use controlled capacity increase from
# https://arxiv.org/pdf/1804.03599.pdf
capacity_loss = torch.abs(kld_loss - target_capacity)
result['Capacity_Loss'] = capacity_loss
result['loss'] += gamma * capacity_loss
else:
raise ValueError(f'unknown objective "{objective}"')
return result
|
[
"torch.nn.functional.mse_loss",
"torch.abs",
"torch.randn"
] |
[((1428, 1469), 'torch.randn', 'torch.randn', (['num_samples', 'self.latent_dim'], {}), '(num_samples, self.latent_dim)\n', (1439, 1469), False, 'import torch\n'), ((2306, 2331), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['recons', 'input'], {}), '(recons, input)\n', (2316, 2331), True, 'from torch.nn import functional as F\n'), ((2933, 2970), 'torch.abs', 'torch.abs', (['(kld_loss - target_capacity)'], {}), '(kld_loss - target_capacity)\n', (2942, 2970), False, 'import torch\n')]
|
import socket
host = "192.168.0.103"
port_windows = 7899
while True:
try:
socket_windows = socket.socket()
socket_windows.connect((host, port_windows))
temp = socket_windows.recv(1024).decode()
if temp:
print(temp)
continue
except Exception:
continue
|
[
"socket.socket"
] |
[((105, 120), 'socket.socket', 'socket.socket', ([], {}), '()\n', (118, 120), False, 'import socket\n')]
|
from datetime import datetime, timedelta, timezone
import discord
import humanize
from apscheduler.jobstores.base import ConflictingIdError
from data.model import Case
from data.services import guild_service, user_service
from discord import app_commands
from discord.ext import commands
from discord.utils import escape_markdown, escape_mentions
from utils import GIRContext, cfg, transform_context
from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly
from utils.mod import (add_ban_case, add_kick_case, notify_user,
prepare_editreason_log, prepare_liftwarn_log,
prepare_mute_log, prepare_removepoints_log,
prepare_unban_log, prepare_unmute_log,
submit_public_log, warn)
from utils.views import warn_autocomplete
from utils.views.confirm import SecondStaffConfirm
class ModActions(commands.Cog):
def __init__(self, bot):
self.bot = bot
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="warn a user")
@app_commands.describe(user="User to warn")
@app_commands.describe(points="Points to warn the user with")
@app_commands.describe(reason="Reason for warning")
@transform_context
async def warn(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, points: app_commands.Range[int, 1, 600], reason: str):
if points < 1: # can't warn for negative/0 points
raise commands.BadArgument(message="Points can't be lower than 1.")
await warn(ctx, target_member=user, mod=ctx.author, points=points, reason=reason)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Kick a user")
@app_commands.describe(member="User to kick")
@app_commands.describe(reason="Reason for kicking")
@transform_context
async def kick(self, ctx: GIRContext, member: ModsAndAboveMember, reason: str) -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
log = add_kick_case(target_member=member, mod=ctx.author, reason=reason, db_guild=db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Kick a user")
@app_commands.describe(member="User to kick")
@transform_context
async def roblox(self, ctx: GIRContext, member: ModsAndAboveMember) -> None:
reason = "This Discord server is for iOS jailbreaking, not Roblox. Please join https://discord.gg/jailbreak instead, thank you!"
db_guild = guild_service.get_guild()
log = add_kick_case(target_member=member, mod=ctx.author, reason=reason, db_guild=db_guild)
await notify_user(member, f"You were kicked from {ctx.guild.name}", log)
await member.kick(reason=reason)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Mute a user")
@app_commands.describe(member="User to mute")
@app_commands.describe(duration="Duration of the mute (i.e 10m, 1h, 1d...)")
@app_commands.describe(reason="Reason for muting")
@transform_context
async def mute(self, ctx: GIRContext, member: ModsAndAboveMember, duration: Duration, reason: str = "No reason.") -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
now = datetime.now(tz=timezone.utc)
delta = duration
if delta is None:
raise commands.BadArgument("Please input a valid duration!")
if member.is_timed_out():
raise commands.BadArgument("This user is already muted.")
time = now + timedelta(seconds=delta)
if time > now + timedelta(days=14):
raise commands.BadArgument("Mutes can't be longer than 14 days!")
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="MUTE",
date=now,
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
case.until = time
case.punishment = humanize.naturaldelta(
time - now, minimum_unit="seconds")
try:
await member.timeout(time, reason=reason)
ctx.tasks.schedule_untimeout(member.id, time)
except ConflictingIdError:
raise commands.BadArgument(
"The database thinks this user is already muted.")
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_mute_log(ctx.author, member, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=member.display_avatar)
dmed = await notify_user(member, f"You have been muted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Unmute a user")
@app_commands.describe(member="User to unmute")
@app_commands.describe(reason="Reason for unmuting")
@transform_context
async def unmute(self, ctx: GIRContext, member: ModsAndAboveMember, reason: str) -> None:
db_guild = guild_service.get_guild()
if not member.is_timed_out():
raise commands.BadArgument("This user is not muted.")
await member.edit(timed_out_until=None)
try:
ctx.tasks.cancel_unmute(member.id)
except Exception:
pass
case = Case(
_id=db_guild.case_id,
_type="UNMUTE",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(member.id, case)
log = prepare_unmute_log(ctx.author, member, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
dmed = await notify_user(member, f"You have been unmuted in {ctx.guild.name}", log)
await submit_public_log(ctx, db_guild, member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Ban a user")
@app_commands.describe(user="User to ban")
@app_commands.describe(reason="Reason for banning")
@transform_context
async def ban(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, reason: str):
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
member_is_external = isinstance(user, discord.User)
# if the ID given is of a user who isn't in the guild, try to fetch the profile
if member_is_external:
if self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user is already banned!")
self.bot.ban_cache.ban(user.id)
log = await add_ban_case(user, ctx.author, reason, db_guild)
if not member_is_external:
if cfg.ban_appeal_url is None:
await notify_user(user, f"You have been banned from {ctx.guild.name}", log)
else:
await notify_user(user, f"You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>", log)
await user.ban(reason=reason)
else:
# hackban for user not currently in guild
await ctx.guild.ban(discord.Object(id=user.id))
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Ban a user anonymously")
@app_commands.describe(user="User to ban")
@app_commands.describe(reason="Reason for banning")
@transform_context
async def staffban(self, ctx: GIRContext, user: ModsAndAboveMemberOrUser, reason: str):
reason = escape_markdown(reason)
reason = escape_mentions(reason)
db_guild = guild_service.get_guild()
member_is_external = isinstance(user, discord.User)
# if the ID given is of a user who isn't in the guild, try to fetch the profile
if member_is_external:
if self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user is already banned!")
confirm_embed = discord.Embed(description=f"{ctx.author.mention} wants to staff ban {user.mention} with reason `{reason}`. Another Moderator needs to click Yes to submit this ban.\n\nClicking Yes means this was discussed amongst the staff team and will hide the banning Moderator. This should not be used often.", color=discord.Color.blurple())
view = SecondStaffConfirm(ctx, ctx.author)
await ctx.respond_or_edit(view=view, embed=confirm_embed)
await view.wait()
if not view.value:
await ctx.send_warning(f"Cancelled staff banning {user.mention}.")
return
self.bot.ban_cache.ban(user.id)
log = await add_ban_case(user, ctx.author, reason, db_guild)
log.set_field_at(1, name="Mod", value=f"{ctx.guild.name} Staff")
if not member_is_external:
if cfg.ban_appeal_url is None:
await notify_user(user, f"You have been banned from {ctx.guild.name}", log)
else:
await notify_user(user, f"You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>", log)
await user.ban(reason=reason)
else:
# hackban for user not currently in guild
await ctx.guild.ban(discord.Object(id=user.id))
await ctx.interaction.message.delete()
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Unban a user")
@app_commands.describe(user="User to unban")
@app_commands.describe(reason="Reason for unbanning")
@transform_context
async def unban(self, ctx: GIRContext, user: UserOnly, reason: str) -> None:
if ctx.guild.get_member(user.id) is not None:
raise commands.BadArgument(
"You can't unban someone already in the server!")
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if not self.bot.ban_cache.is_banned(user.id):
raise commands.BadArgument("That user isn't banned!")
try:
await ctx.guild.unban(discord.Object(id=user.id), reason=reason)
except discord.NotFound:
raise commands.BadArgument(f"{user} is not banned.")
self.bot.ban_cache.unban(user.id)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="UNBAN",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
reason=reason,
)
guild_service.inc_caseid()
user_service.add_case(user.id, case)
log = prepare_unban_log(ctx.author, user, case)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, user, log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Purge channel messages")
@app_commands.describe(amount="Number of messages to purge")
@transform_context
async def purge(self, ctx: GIRContext, amount: app_commands.Range[int, 1, 100]) -> None:
if amount <= 0:
raise commands.BadArgument(
"Number of messages to purge must be greater than 0")
elif amount >= 100:
amount = 100
msgs = [message async for message in ctx.channel.history(limit=amount)]
await ctx.channel.purge(limit=amount)
await ctx.send_success(f'Purged {len(msgs)} messages.', delete_after=10)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Marks a warn and lifted and removes points")
@app_commands.describe(member="Member to lift warn of")
@app_commands.describe(case_id="Case ID of the warn to lift")
@app_commands.autocomplete(case_id=warn_autocomplete)
@app_commands.describe(reason="Reason for lifting the warn")
@transform_context
async def liftwarn(self, ctx: GIRContext, member: ModsAndAboveMember, case_id: str, reason: str) -> None:
cases = user_service.get_cases(member.id)
case = cases.cases.filter(_id=case_id).first()
reason = escape_markdown(reason)
reason = escape_mentions(reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{member} has no case with ID {case_id}")
elif case._type != "WARN":
raise commands.BadArgument(
message=f"{member}'s case with ID {case_id} is not a warn case.")
elif case.lifted:
raise commands.BadArgument(
message=f"Case with ID {case_id} already lifted.")
u = user_service.get_user(id=member.id)
if u.warn_points - int(case.punishment) < 0:
raise commands.BadArgument(
message=f"Can't lift Case #{case_id} because it would make {member.mention}'s points negative.")
# passed sanity checks, so update the case in DB
case.lifted = True
case.lifted_reason = reason
case.lifted_by_tag = str(ctx.author)
case.lifted_by_id = ctx.author.id
case.lifted_date = datetime.now()
cases.save()
# remove the warn points from the user in DB
user_service.inc_points(member.id, -1 * int(case.punishment))
dmed = True
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_liftwarn_log(ctx.author, member, case)
dmed = await notify_user(member, f"Your warn has been lifted in {ctx.guild}.", log)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, guild_service.get_guild(), member, log, dmed)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Edit case reason")
@app_commands.describe(member="Member to edit case of")
@app_commands.describe(case_id="Case ID of the case to edit")
@app_commands.autocomplete(case_id=warn_autocomplete)
@app_commands.describe(new_reason="New reason for the case")
@transform_context
async def editreason(self, ctx: GIRContext, member: ModsAndAboveMemberOrUser, case_id: str, new_reason: str) -> None:
# retrieve user's case with given ID
cases = user_service.get_cases(member.id)
case = cases.cases.filter(_id=case_id).first()
new_reason = escape_markdown(new_reason)
new_reason = escape_mentions(new_reason)
# sanity checks
if case is None:
raise commands.BadArgument(
message=f"{member} has no case with ID {case_id}")
old_reason = case.reason
case.reason = new_reason
case.date = datetime.now()
cases.save()
dmed = True
log = prepare_editreason_log(ctx.author, member, case, old_reason)
dmed = await notify_user(member, f"Your case was updated in {ctx.guild.name}.", log)
public_chan = ctx.guild.get_channel(
guild_service.get_guild().channel_public)
found = False
async for message in public_chan.history(limit=200):
if message.author.id != ctx.me.id:
continue
if len(message.embeds) == 0:
continue
embed = message.embeds[0]
if embed.footer.text is None:
continue
if len(embed.footer.text.split(" ")) < 2:
continue
if f"#{case_id}" == embed.footer.text.split(" ")[1]:
for i, field in enumerate(embed.fields):
if field.name == "Reason":
embed.set_field_at(
i, name="Reason", value=new_reason)
await message.edit(embed=embed)
found = True
if found:
await ctx.respond_or_edit(f"We updated the case and edited the embed in {public_chan.mention}.", embed=log, delete_after=10)
else:
await ctx.respond_or_edit(f"We updated the case but weren't able to find a corresponding message in {public_chan.mention}!", embed=log, delete_after=10)
log.remove_author()
log.set_thumbnail(url=member.display_avatar)
await public_chan.send(member.mention if not dmed else "", embed=log)
@mod_and_up()
@app_commands.guilds(cfg.guild_id)
@app_commands.command(description="Edit case reason")
@app_commands.describe(member="Member to remove points from")
@app_commands.describe(points="Amount of points to remove")
@app_commands.describe(reason="Reason for removing points")
@transform_context
async def removepoints(self, ctx: GIRContext, member: ModsAndAboveMember, points: app_commands.Range[int, 1, 600], reason: str) -> None:
reason = escape_markdown(reason)
reason = escape_mentions(reason)
if points < 1:
raise commands.BadArgument("Points can't be lower than 1.")
u = user_service.get_user(id=member.id)
if u.warn_points - points < 0:
raise commands.BadArgument(
message=f"Can't remove {points} points because it would make {member.mention}'s points negative.")
# passed sanity checks, so update the case in DB
# remove the warn points from the user in DB
user_service.inc_points(member.id, -1 * points)
db_guild = guild_service.get_guild()
case = Case(
_id=db_guild.case_id,
_type="REMOVEPOINTS",
mod_id=ctx.author.id,
mod_tag=str(ctx.author),
punishment=str(points),
reason=reason,
)
# increment DB's max case ID for next case
guild_service.inc_caseid()
# add case to db
user_service.add_case(member.id, case)
# prepare log embed, send to #public-mod-logs, user, channel where invoked
log = prepare_removepoints_log(ctx.author, member, case)
dmed = await notify_user(member, f"Your points were removed in {ctx.guild.name}.", log)
await ctx.respond_or_edit(embed=log, delete_after=10)
await submit_public_log(ctx, db_guild, member, log, dmed)
async def setup(bot):
await bot.add_cog(ModActions(bot))
|
[
"data.services.guild_service.get_guild",
"utils.mod.prepare_liftwarn_log",
"discord.utils.escape_markdown",
"data.services.guild_service.inc_caseid",
"utils.mod.submit_public_log",
"discord.utils.escape_mentions",
"utils.mod.notify_user",
"discord.app_commands.describe",
"data.services.user_service.add_case",
"discord.Color.blurple",
"utils.mod.prepare_editreason_log",
"utils.framework.mod_and_up",
"utils.mod.prepare_unmute_log",
"datetime.timedelta",
"discord.app_commands.command",
"discord.app_commands.guilds",
"utils.mod.prepare_unban_log",
"utils.views.confirm.SecondStaffConfirm",
"datetime.datetime.now",
"utils.mod.add_kick_case",
"utils.mod.prepare_mute_log",
"discord.app_commands.autocomplete",
"utils.mod.prepare_removepoints_log",
"discord.ext.commands.BadArgument",
"data.services.user_service.get_user",
"utils.mod.add_ban_case",
"discord.Object",
"utils.mod.warn",
"data.services.user_service.inc_points",
"humanize.naturaldelta",
"data.services.user_service.get_cases"
] |
[((1002, 1014), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (1012, 1014), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((1020, 1053), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (1039, 1053), False, 'from discord import app_commands\n'), ((1059, 1106), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""warn a user"""'}), "(description='warn a user')\n", (1079, 1106), False, 'from discord import app_commands\n'), ((1112, 1154), 'discord.app_commands.describe', 'app_commands.describe', ([], {'user': '"""User to warn"""'}), "(user='User to warn')\n", (1133, 1154), False, 'from discord import app_commands\n'), ((1160, 1220), 'discord.app_commands.describe', 'app_commands.describe', ([], {'points': '"""Points to warn the user with"""'}), "(points='Points to warn the user with')\n", (1181, 1220), False, 'from discord import app_commands\n'), ((1226, 1276), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for warning"""'}), "(reason='Reason for warning')\n", (1247, 1276), False, 'from discord import app_commands\n'), ((1665, 1677), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (1675, 1677), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((1683, 1716), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (1702, 1716), False, 'from discord import app_commands\n'), ((1722, 1769), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Kick a user"""'}), "(description='Kick a user')\n", (1742, 1769), False, 'from discord import app_commands\n'), ((1775, 1819), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""User to kick"""'}), "(member='User to kick')\n", (1796, 1819), False, 'from discord import app_commands\n'), ((1825, 1875), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for kicking"""'}), "(reason='Reason for kicking')\n", (1846, 1875), False, 'from discord import app_commands\n'), ((2472, 2484), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (2482, 2484), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((2490, 2523), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (2509, 2523), False, 'from discord import app_commands\n'), ((2529, 2576), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Kick a user"""'}), "(description='Kick a user')\n", (2549, 2576), False, 'from discord import app_commands\n'), ((2582, 2626), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""User to kick"""'}), "(member='User to kick')\n", (2603, 2626), False, 'from discord import app_commands\n'), ((3267, 3279), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (3277, 3279), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((3285, 3318), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (3304, 3318), False, 'from discord import app_commands\n'), ((3324, 3371), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Mute a user"""'}), "(description='Mute a user')\n", (3344, 3371), False, 'from discord import app_commands\n'), ((3377, 3421), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""User to mute"""'}), "(member='User to mute')\n", (3398, 3421), False, 'from discord import app_commands\n'), ((3427, 3502), 'discord.app_commands.describe', 'app_commands.describe', ([], {'duration': '"""Duration of the mute (i.e 10m, 1h, 1d...)"""'}), "(duration='Duration of the mute (i.e 10m, 1h, 1d...)')\n", (3448, 3502), False, 'from discord import app_commands\n'), ((3508, 3557), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for muting"""'}), "(reason='Reason for muting')\n", (3529, 3557), False, 'from discord import app_commands\n'), ((5331, 5343), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (5341, 5343), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((5349, 5382), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (5368, 5382), False, 'from discord import app_commands\n'), ((5388, 5437), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Unmute a user"""'}), "(description='Unmute a user')\n", (5408, 5437), False, 'from discord import app_commands\n'), ((5443, 5489), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""User to unmute"""'}), "(member='User to unmute')\n", (5464, 5489), False, 'from discord import app_commands\n'), ((5495, 5546), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for unmuting"""'}), "(reason='Reason for unmuting')\n", (5516, 5546), False, 'from discord import app_commands\n'), ((6529, 6541), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (6539, 6541), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((6547, 6580), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (6566, 6580), False, 'from discord import app_commands\n'), ((6586, 6632), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Ban a user"""'}), "(description='Ban a user')\n", (6606, 6632), False, 'from discord import app_commands\n'), ((6638, 6679), 'discord.app_commands.describe', 'app_commands.describe', ([], {'user': '"""User to ban"""'}), "(user='User to ban')\n", (6659, 6679), False, 'from discord import app_commands\n'), ((6685, 6735), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for banning"""'}), "(reason='Reason for banning')\n", (6706, 6735), False, 'from discord import app_commands\n'), ((8063, 8075), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (8073, 8075), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((8081, 8114), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (8100, 8114), False, 'from discord import app_commands\n'), ((8120, 8178), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Ban a user anonymously"""'}), "(description='Ban a user anonymously')\n", (8140, 8178), False, 'from discord import app_commands\n'), ((8184, 8225), 'discord.app_commands.describe', 'app_commands.describe', ([], {'user': '"""User to ban"""'}), "(user='User to ban')\n", (8205, 8225), False, 'from discord import app_commands\n'), ((8231, 8281), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for banning"""'}), "(reason='Reason for banning')\n", (8252, 8281), False, 'from discord import app_commands\n'), ((10357, 10369), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (10367, 10369), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((10375, 10408), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (10394, 10408), False, 'from discord import app_commands\n'), ((10414, 10462), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Unban a user"""'}), "(description='Unban a user')\n", (10434, 10462), False, 'from discord import app_commands\n'), ((10468, 10511), 'discord.app_commands.describe', 'app_commands.describe', ([], {'user': '"""User to unban"""'}), "(user='User to unban')\n", (10489, 10511), False, 'from discord import app_commands\n'), ((10517, 10569), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for unbanning"""'}), "(reason='Reason for unbanning')\n", (10538, 10569), False, 'from discord import app_commands\n'), ((11770, 11782), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (11780, 11782), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((11788, 11821), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (11807, 11821), False, 'from discord import app_commands\n'), ((11827, 11885), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Purge channel messages"""'}), "(description='Purge channel messages')\n", (11847, 11885), False, 'from discord import app_commands\n'), ((11891, 11950), 'discord.app_commands.describe', 'app_commands.describe', ([], {'amount': '"""Number of messages to purge"""'}), "(amount='Number of messages to purge')\n", (11912, 11950), False, 'from discord import app_commands\n'), ((12468, 12480), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (12478, 12480), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((12486, 12519), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (12505, 12519), False, 'from discord import app_commands\n'), ((12525, 12603), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Marks a warn and lifted and removes points"""'}), "(description='Marks a warn and lifted and removes points')\n", (12545, 12603), False, 'from discord import app_commands\n'), ((12609, 12663), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""Member to lift warn of"""'}), "(member='Member to lift warn of')\n", (12630, 12663), False, 'from discord import app_commands\n'), ((12669, 12729), 'discord.app_commands.describe', 'app_commands.describe', ([], {'case_id': '"""Case ID of the warn to lift"""'}), "(case_id='Case ID of the warn to lift')\n", (12690, 12729), False, 'from discord import app_commands\n'), ((12735, 12787), 'discord.app_commands.autocomplete', 'app_commands.autocomplete', ([], {'case_id': 'warn_autocomplete'}), '(case_id=warn_autocomplete)\n', (12760, 12787), False, 'from discord import app_commands\n'), ((12793, 12852), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for lifting the warn"""'}), "(reason='Reason for lifting the warn')\n", (12814, 12852), False, 'from discord import app_commands\n'), ((14679, 14691), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (14689, 14691), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((14697, 14730), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (14716, 14730), False, 'from discord import app_commands\n'), ((14736, 14788), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Edit case reason"""'}), "(description='Edit case reason')\n", (14756, 14788), False, 'from discord import app_commands\n'), ((14794, 14848), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""Member to edit case of"""'}), "(member='Member to edit case of')\n", (14815, 14848), False, 'from discord import app_commands\n'), ((14854, 14914), 'discord.app_commands.describe', 'app_commands.describe', ([], {'case_id': '"""Case ID of the case to edit"""'}), "(case_id='Case ID of the case to edit')\n", (14875, 14914), False, 'from discord import app_commands\n'), ((14920, 14972), 'discord.app_commands.autocomplete', 'app_commands.autocomplete', ([], {'case_id': 'warn_autocomplete'}), '(case_id=warn_autocomplete)\n', (14945, 14972), False, 'from discord import app_commands\n'), ((14978, 15037), 'discord.app_commands.describe', 'app_commands.describe', ([], {'new_reason': '"""New reason for the case"""'}), "(new_reason='New reason for the case')\n", (14999, 15037), False, 'from discord import app_commands\n'), ((17291, 17303), 'utils.framework.mod_and_up', 'mod_and_up', ([], {}), '()\n', (17301, 17303), False, 'from utils.framework import mod_and_up, ModsAndAboveMemberOrUser, Duration, ModsAndAboveMember, UserOnly\n'), ((17309, 17342), 'discord.app_commands.guilds', 'app_commands.guilds', (['cfg.guild_id'], {}), '(cfg.guild_id)\n', (17328, 17342), False, 'from discord import app_commands\n'), ((17348, 17400), 'discord.app_commands.command', 'app_commands.command', ([], {'description': '"""Edit case reason"""'}), "(description='Edit case reason')\n", (17368, 17400), False, 'from discord import app_commands\n'), ((17406, 17466), 'discord.app_commands.describe', 'app_commands.describe', ([], {'member': '"""Member to remove points from"""'}), "(member='Member to remove points from')\n", (17427, 17466), False, 'from discord import app_commands\n'), ((17472, 17530), 'discord.app_commands.describe', 'app_commands.describe', ([], {'points': '"""Amount of points to remove"""'}), "(points='Amount of points to remove')\n", (17493, 17530), False, 'from discord import app_commands\n'), ((17536, 17594), 'discord.app_commands.describe', 'app_commands.describe', ([], {'reason': '"""Reason for removing points"""'}), "(reason='Reason for removing points')\n", (17557, 17594), False, 'from discord import app_commands\n'), ((2008, 2031), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (2023, 2031), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((2049, 2072), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (2064, 2072), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((2093, 2118), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (2116, 2118), False, 'from data.services import guild_service, user_service\n'), ((2134, 2224), 'utils.mod.add_kick_case', 'add_kick_case', ([], {'target_member': 'member', 'mod': 'ctx.author', 'reason': 'reason', 'db_guild': 'db_guild'}), '(target_member=member, mod=ctx.author, reason=reason, db_guild\n =db_guild)\n', (2147, 2224), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((2888, 2913), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (2911, 2913), False, 'from data.services import guild_service, user_service\n'), ((2929, 3019), 'utils.mod.add_kick_case', 'add_kick_case', ([], {'target_member': 'member', 'mod': 'ctx.author', 'reason': 'reason', 'db_guild': 'db_guild'}), '(target_member=member, mod=ctx.author, reason=reason, db_guild\n =db_guild)\n', (2942, 3019), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((3725, 3748), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (3740, 3748), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((3766, 3789), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (3781, 3789), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((3805, 3834), 'datetime.datetime.now', 'datetime.now', ([], {'tz': 'timezone.utc'}), '(tz=timezone.utc)\n', (3817, 3834), False, 'from datetime import datetime, timedelta, timezone\n'), ((4254, 4279), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (4277, 4279), False, 'from data.services import guild_service, user_service\n'), ((4544, 4601), 'humanize.naturaldelta', 'humanize.naturaldelta', (['(time - now)'], {'minimum_unit': '"""seconds"""'}), "(time - now, minimum_unit='seconds')\n", (4565, 4601), False, 'import humanize\n'), ((4892, 4918), 'data.services.guild_service.inc_caseid', 'guild_service.inc_caseid', ([], {}), '()\n', (4916, 4918), False, 'from data.services import guild_service, user_service\n'), ((4927, 4965), 'data.services.user_service.add_case', 'user_service.add_case', (['member.id', 'case'], {}), '(member.id, case)\n', (4948, 4965), False, 'from data.services import guild_service, user_service\n'), ((4981, 5023), 'utils.mod.prepare_mute_log', 'prepare_mute_log', (['ctx.author', 'member', 'case'], {}), '(ctx.author, member, case)\n', (4997, 5023), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((5683, 5708), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (5706, 5708), False, 'from data.services import guild_service, user_service\n'), ((6167, 6193), 'data.services.guild_service.inc_caseid', 'guild_service.inc_caseid', ([], {}), '()\n', (6191, 6193), False, 'from data.services import guild_service, user_service\n'), ((6202, 6240), 'data.services.user_service.add_case', 'user_service.add_case', (['member.id', 'case'], {}), '(member.id, case)\n', (6223, 6240), False, 'from data.services import guild_service, user_service\n'), ((6256, 6300), 'utils.mod.prepare_unmute_log', 'prepare_unmute_log', (['ctx.author', 'member', 'case'], {}), '(ctx.author, member, case)\n', (6274, 6300), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((6863, 6886), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (6878, 6886), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((6904, 6927), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (6919, 6927), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((6947, 6972), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (6970, 6972), False, 'from data.services import guild_service, user_service\n'), ((8414, 8437), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (8429, 8437), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((8455, 8478), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (8470, 8478), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((8498, 8523), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (8521, 8523), False, 'from data.services import guild_service, user_service\n'), ((9203, 9238), 'utils.views.confirm.SecondStaffConfirm', 'SecondStaffConfirm', (['ctx', 'ctx.author'], {}), '(ctx, ctx.author)\n', (9221, 9238), False, 'from utils.views.confirm import SecondStaffConfirm\n'), ((10852, 10875), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (10867, 10875), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((10893, 10916), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (10908, 10916), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((11290, 11315), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (11313, 11315), False, 'from data.services import guild_service, user_service\n'), ((11514, 11540), 'data.services.guild_service.inc_caseid', 'guild_service.inc_caseid', ([], {}), '()\n', (11538, 11540), False, 'from data.services import guild_service, user_service\n'), ((11549, 11585), 'data.services.user_service.add_case', 'user_service.add_case', (['user.id', 'case'], {}), '(user.id, case)\n', (11570, 11585), False, 'from data.services import guild_service, user_service\n'), ((11601, 11642), 'utils.mod.prepare_unban_log', 'prepare_unban_log', (['ctx.author', 'user', 'case'], {}), '(ctx.author, user, case)\n', (11618, 11642), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((13002, 13035), 'data.services.user_service.get_cases', 'user_service.get_cases', (['member.id'], {}), '(member.id)\n', (13024, 13035), False, 'from data.services import guild_service, user_service\n'), ((13109, 13132), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (13124, 13132), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((13150, 13173), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (13165, 13173), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((13634, 13669), 'data.services.user_service.get_user', 'user_service.get_user', ([], {'id': 'member.id'}), '(id=member.id)\n', (13655, 13669), False, 'from data.services import guild_service, user_service\n'), ((14111, 14125), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (14123, 14125), False, 'from datetime import datetime, timedelta, timezone\n'), ((14388, 14434), 'utils.mod.prepare_liftwarn_log', 'prepare_liftwarn_log', (['ctx.author', 'member', 'case'], {}), '(ctx.author, member, case)\n', (14408, 14434), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((15244, 15277), 'data.services.user_service.get_cases', 'user_service.get_cases', (['member.id'], {}), '(member.id)\n', (15266, 15277), False, 'from data.services import guild_service, user_service\n'), ((15355, 15382), 'discord.utils.escape_markdown', 'escape_markdown', (['new_reason'], {}), '(new_reason)\n', (15370, 15382), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((15404, 15431), 'discord.utils.escape_mentions', 'escape_mentions', (['new_reason'], {}), '(new_reason)\n', (15419, 15431), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((15676, 15690), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (15688, 15690), False, 'from datetime import datetime, timedelta, timezone\n'), ((15747, 15807), 'utils.mod.prepare_editreason_log', 'prepare_editreason_log', (['ctx.author', 'member', 'case', 'old_reason'], {}), '(ctx.author, member, case, old_reason)\n', (15769, 15807), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((17776, 17799), 'discord.utils.escape_markdown', 'escape_markdown', (['reason'], {}), '(reason)\n', (17791, 17799), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((17817, 17840), 'discord.utils.escape_mentions', 'escape_mentions', (['reason'], {}), '(reason)\n', (17832, 17840), False, 'from discord.utils import escape_markdown, escape_mentions\n'), ((17950, 17985), 'data.services.user_service.get_user', 'user_service.get_user', ([], {'id': 'member.id'}), '(id=member.id)\n', (17971, 17985), False, 'from data.services import guild_service, user_service\n'), ((18299, 18346), 'data.services.user_service.inc_points', 'user_service.inc_points', (['member.id', '(-1 * points)'], {}), '(member.id, -1 * points)\n', (18322, 18346), False, 'from data.services import guild_service, user_service\n'), ((18367, 18392), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (18390, 18392), False, 'from data.services import guild_service, user_service\n'), ((18686, 18712), 'data.services.guild_service.inc_caseid', 'guild_service.inc_caseid', ([], {}), '()\n', (18710, 18712), False, 'from data.services import guild_service, user_service\n'), ((18746, 18784), 'data.services.user_service.add_case', 'user_service.add_case', (['member.id', 'case'], {}), '(member.id, case)\n', (18767, 18784), False, 'from data.services import guild_service, user_service\n'), ((18883, 18933), 'utils.mod.prepare_removepoints_log', 'prepare_removepoints_log', (['ctx.author', 'member', 'case'], {}), '(ctx.author, member, case)\n', (18907, 18933), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((1506, 1567), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': '"""Points can\'t be lower than 1."""'}), '(message="Points can\'t be lower than 1.")\n', (1526, 1567), False, 'from discord.ext import commands\n'), ((1583, 1658), 'utils.mod.warn', 'warn', (['ctx'], {'target_member': 'user', 'mod': 'ctx.author', 'points': 'points', 'reason': 'reason'}), '(ctx, target_member=user, mod=ctx.author, points=points, reason=reason)\n', (1587, 1658), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((2234, 2300), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""You were kicked from {ctx.guild.name}"""', 'log'], {}), "(member, f'You were kicked from {ctx.guild.name}', log)\n", (2245, 2300), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((2420, 2465), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'member', 'log'], {}), '(ctx, db_guild, member, log)\n', (2437, 2465), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((3029, 3095), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""You were kicked from {ctx.guild.name}"""', 'log'], {}), "(member, f'You were kicked from {ctx.guild.name}', log)\n", (3040, 3095), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((3215, 3260), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'member', 'log'], {}), '(ctx, db_guild, member, log)\n', (3232, 3260), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((3905, 3959), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""Please input a valid duration!"""'], {}), "('Please input a valid duration!')\n", (3925, 3959), False, 'from discord.ext import commands\n'), ((4013, 4064), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""This user is already muted."""'], {}), "('This user is already muted.')\n", (4033, 4064), False, 'from discord.ext import commands\n'), ((4087, 4111), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'delta'}), '(seconds=delta)\n', (4096, 4111), False, 'from datetime import datetime, timedelta, timezone\n'), ((4174, 4233), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""Mutes can\'t be longer than 14 days!"""'], {}), '("Mutes can\'t be longer than 14 days!")\n', (4194, 4233), False, 'from discord.ext import commands\n'), ((5190, 5258), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""You have been muted in {ctx.guild.name}"""', 'log'], {}), "(member, f'You have been muted in {ctx.guild.name}', log)\n", (5201, 5258), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((5273, 5324), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'member', 'log', 'dmed'], {}), '(ctx, db_guild, member, log, dmed)\n', (5290, 5324), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((5766, 5813), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""This user is not muted."""'], {}), "('This user is not muted.')\n", (5786, 5813), False, 'from discord.ext import commands\n'), ((6386, 6456), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""You have been unmuted in {ctx.guild.name}"""', 'log'], {}), "(member, f'You have been unmuted in {ctx.guild.name}', log)\n", (6397, 6456), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((6471, 6522), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'member', 'log', 'dmed'], {}), '(ctx, db_guild, member, log, dmed)\n', (6488, 6522), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((7344, 7392), 'utils.mod.add_ban_case', 'add_ban_case', (['user', 'ctx.author', 'reason', 'db_guild'], {}), '(user, ctx.author, reason, db_guild)\n', (7356, 7392), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((8013, 8056), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'user', 'log'], {}), '(ctx, db_guild, user, log)\n', (8030, 8056), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((9518, 9566), 'utils.mod.add_ban_case', 'add_ban_case', (['user', 'ctx.author', 'reason', 'db_guild'], {}), '(user, ctx.author, reason, db_guild)\n', (9530, 9566), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((10307, 10350), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'user', 'log'], {}), '(ctx, db_guild, user, log)\n', (10324, 10350), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((10746, 10816), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""You can\'t unban someone already in the server!"""'], {}), '("You can\'t unban someone already in the server!")\n', (10766, 10816), False, 'from discord.ext import commands\n'), ((10990, 11037), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""That user isn\'t banned!"""'], {}), '("That user isn\'t banned!")\n', (11010, 11037), False, 'from discord.ext import commands\n'), ((11720, 11763), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'user', 'log'], {}), '(ctx, db_guild, user, log)\n', (11737, 11763), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((12109, 12183), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""Number of messages to purge must be greater than 0"""'], {}), "('Number of messages to purge must be greater than 0')\n", (12129, 12183), False, 'from discord.ext import commands\n'), ((13242, 13313), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""{member} has no case with ID {case_id}"""'}), "(message=f'{member} has no case with ID {case_id}')\n", (13262, 13313), False, 'from discord.ext import commands\n'), ((13741, 13868), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""Can\'t lift Case #{case_id} because it would make {member.mention}\'s points negative."""'}), '(message=\n f"Can\'t lift Case #{case_id} because it would make {member.mention}\'s points negative."\n )\n', (13761, 13868), False, 'from discord.ext import commands\n'), ((14456, 14526), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""Your warn has been lifted in {ctx.guild}."""', 'log'], {}), "(member, f'Your warn has been lifted in {ctx.guild}.', log)\n", (14467, 14526), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((15500, 15571), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""{member} has no case with ID {case_id}"""'}), "(message=f'{member} has no case with ID {case_id}')\n", (15520, 15571), False, 'from discord.ext import commands\n'), ((15830, 15901), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""Your case was updated in {ctx.guild.name}."""', 'log'], {}), "(member, f'Your case was updated in {ctx.guild.name}.', log)\n", (15841, 15901), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((17883, 17936), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""Points can\'t be lower than 1."""'], {}), '("Points can\'t be lower than 1.")\n', (17903, 17936), False, 'from discord.ext import commands\n'), ((18043, 18172), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""Can\'t remove {points} points because it would make {member.mention}\'s points negative."""'}), '(message=\n f"Can\'t remove {points} points because it would make {member.mention}\'s points negative."\n )\n', (18063, 18172), False, 'from discord.ext import commands\n'), ((18955, 19029), 'utils.mod.notify_user', 'notify_user', (['member', 'f"""Your points were removed in {ctx.guild.name}."""', 'log'], {}), "(member, f'Your points were removed in {ctx.guild.name}.', log)\n", (18966, 19029), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((19107, 19158), 'utils.mod.submit_public_log', 'submit_public_log', (['ctx', 'db_guild', 'member', 'log', 'dmed'], {}), '(ctx, db_guild, member, log, dmed)\n', (19124, 19158), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((4136, 4154), 'datetime.timedelta', 'timedelta', ([], {'days': '(14)'}), '(days=14)\n', (4145, 4154), False, 'from datetime import datetime, timedelta, timezone\n'), ((4794, 4865), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""The database thinks this user is already muted."""'], {}), "('The database thinks this user is already muted.')\n", (4814, 4865), False, 'from discord.ext import commands\n'), ((7230, 7282), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""That user is already banned!"""'], {}), "('That user is already banned!')\n", (7250, 7282), False, 'from discord.ext import commands\n'), ((8781, 8833), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['"""That user is already banned!"""'], {}), "('That user is already banned!')\n", (8801, 8833), False, 'from discord.ext import commands\n'), ((9163, 9186), 'discord.Color.blurple', 'discord.Color.blurple', ([], {}), '()\n', (9184, 9186), False, 'import discord\n'), ((11180, 11226), 'discord.ext.commands.BadArgument', 'commands.BadArgument', (['f"""{user} is not banned."""'], {}), "(f'{user} is not banned.')\n", (11200, 11226), False, 'from discord.ext import commands\n'), ((13384, 13475), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""{member}\'s case with ID {case_id} is not a warn case."""'}), '(message=\n f"{member}\'s case with ID {case_id} is not a warn case.")\n', (13404, 13475), False, 'from discord.ext import commands\n'), ((14627, 14652), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (14650, 14652), False, 'from data.services import guild_service, user_service\n'), ((15960, 15985), 'data.services.guild_service.get_guild', 'guild_service.get_guild', ([], {}), '()\n', (15983, 15985), False, 'from data.services import guild_service, user_service\n'), ((7494, 7563), 'utils.mod.notify_user', 'notify_user', (['user', 'f"""You have been banned from {ctx.guild.name}"""', 'log'], {}), "(user, f'You have been banned from {ctx.guild.name}', log)\n", (7505, 7563), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((7604, 7775), 'utils.mod.notify_user', 'notify_user', (['user', 'f"""You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>"""', 'log'], {}), '(user,\n f"""You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>"""\n , log)\n', (7615, 7775), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((7908, 7934), 'discord.Object', 'discord.Object', ([], {'id': 'user.id'}), '(id=user.id)\n', (7922, 7934), False, 'import discord\n'), ((9742, 9811), 'utils.mod.notify_user', 'notify_user', (['user', 'f"""You have been banned from {ctx.guild.name}"""', 'log'], {}), "(user, f'You have been banned from {ctx.guild.name}', log)\n", (9753, 9811), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((9852, 10023), 'utils.mod.notify_user', 'notify_user', (['user', 'f"""You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>"""', 'log'], {}), '(user,\n f"""You have been banned from {ctx.guild.name}\n\nIf you would like to appeal your ban, please fill out this form: <{cfg.ban_appeal_url}>"""\n , log)\n', (9863, 10023), False, 'from utils.mod import add_ban_case, add_kick_case, notify_user, prepare_editreason_log, prepare_liftwarn_log, prepare_mute_log, prepare_removepoints_log, prepare_unban_log, prepare_unmute_log, submit_public_log, warn\n'), ((10155, 10181), 'discord.Object', 'discord.Object', ([], {'id': 'user.id'}), '(id=user.id)\n', (10169, 10181), False, 'import discord\n'), ((11086, 11112), 'discord.Object', 'discord.Object', ([], {'id': 'user.id'}), '(id=user.id)\n', (11100, 11112), False, 'import discord\n'), ((13532, 13603), 'discord.ext.commands.BadArgument', 'commands.BadArgument', ([], {'message': 'f"""Case with ID {case_id} already lifted."""'}), "(message=f'Case with ID {case_id} already lifted.')\n", (13552, 13603), False, 'from discord.ext import commands\n')]
|
'''
------------------------------------------------------------------------
MOMENTUM.PY
------------------------------------------------------------------------
If extreme, overtraded or wix issue long, or short signals, trigger the
executor
'''
'''
------------------------------------------------------------------------
IMPORTS
------------------------------------------------------------------------
'''
# Standard library imports
pass
# Third party imports
import pandas as pd
import numpy as np
# Local application imports
from ccat import wix
from ccat import overtraded
from ccat import extreme
# from ccat import height
# from ccat import ema
# from ccat import df_x_df
'''
------------------------------------------------------------------------
CLASSES
------------------------------------------------------------------------
'''
class Momentum:
def __init__(self,
df_bucket:pd.DataFrame,
len_ma_top_wix:int,
len_ma_bottom_wix:int,
len_ma_top_Extreme:int,
len_ma_bottom_Extreme:int,
len_rsi:int,
overbought:int,
oversold:int,
peak:int,
trough:int,
col:str = 'price_close'):
# Shared
self.df_bucket = df_bucket
# Wix
self.len_ma_top_wix = len_ma_top_wix
self.len_ma_bottom_wix = len_ma_bottom_wix
# Extreme
self.len_ma_top_Extreme = len_ma_top_Extreme
self.len_ma_bottom_Extreme = len_ma_bottom_Extreme
# Overtraded
self.len_rsi = len_rsi
self.overbought = overbought
self.oversold = oversold
self.peak = peak
self.trough = trough
self.col = col
def wixes(self):
'''Get Wix signal'''
w = wix.Wix(
df_bucket = self.df_bucket,
len_ma_top = self.len_ma_top_wix,
len_ma_bottom = self.len_ma_bottom_wix)
df_wix = w.get()
return df_wix
def extreme(self):
'''Get Extreme signal
'''
e = extreme.Extreme(
df_bucket = self.df_bucket,
len_ma_top = self.len_ma_top_Extreme,
len_ma_bottom = self.len_ma_bottom_Extreme)
df_extreme = e.get()
return df_extreme
def overtraded(self):
'''Get Overtraded signal
'''
o = overtraded.Overtraded(
df_bucket = self.df_bucket,
len_rsi = self.len_rsi,
overbought = self.overbought,
oversold = self.oversold,
peak = self.peak,
trough = self.trough,
col = self.col)
df_overtraded = o.get()
return df_overtraded
def merge(self):
''' Merges the top and bottom wick ema's into a df_out dataframe
'''
# Initialize df_out dataframe
self.df_out = pd.DataFrame()
# Read the individual signals used in the strategy
df_w = self.wixes()
df_o = self.overtraded()
df_e = self.extreme()
# Merge the three dataframes
# self.df_out = pd.merge(df_w, df_o, on='id')
# Merge the three dataframes
self.df_out = pd.merge(
pd.merge(
df_w,
df_o,
on='id'),
df_e,on='id')
cols = [
'signal_wix',
'signal_overtraded',
'signal_extreme']
# Compiled signal
self.df_out['signal'] = self.df_out[cols].sum(axis=1)
def signals(self):
'''Triggers the chain of methods and returns the df_out
dataframe
'''
self.merge()
return self.df_out
'''
------------------------------------------------------------------------
__MAIN__
------------------------------------------------------------------------
'''
if __name__ == '__main__':
from ccat import config as cnf
from ccat import bucket
# Create a momentum strategy for the 1d BTCUSD candles on Bitmex
# Settings
market_id = 1 # Bitmex
timeframe_id = 6 # 1d
time_end = cnf.now()
count = 500
len_ma_top_wix = 40
len_ma_bottom_wix = 40
len_ma_top_Extreme = 40
len_ma_bottom_Extreme = 40
len_rsi = 40
overbought = 60
oversold = 40
peak = 92
trough = 32
col = 'price_close'
# Get a bucket object from Bucket
b = bucket.Bucket(market_id=market_id, timeframe_id=timeframe_id)
# Update the table
b.update()
# Get a dataframe with all the data for the market and timeframe
df_bucket = b.read_until(count = count, time_end = time_end)
m = Momentum(
df_bucket = df_bucket,
len_ma_top_wix=len_ma_top_wix,
len_ma_bottom_wix=len_ma_bottom_wix,
len_ma_top_Extreme=len_ma_top_Extreme,
len_ma_bottom_Extreme=len_ma_bottom_Extreme,
len_rsi=len_rsi,
overbought=overbought,
oversold=oversold,
peak=peak,
trough=trough,
col=col)
df_signal = m.signals()
df_s = df_signal[['id', 'signal']]
df_b = df_bucket[['id', 'time_close', 'price_close']]
# print(df_s)
df_out = pd.merge(df_b, df_s, on='id')
print(df_out)
|
[
"pandas.DataFrame",
"ccat.bucket.Bucket",
"ccat.wix.Wix",
"pandas.merge",
"ccat.extreme.Extreme",
"ccat.config.now",
"ccat.overtraded.Overtraded"
] |
[((4125, 4134), 'ccat.config.now', 'cnf.now', ([], {}), '()\n', (4132, 4134), True, 'from ccat import config as cnf\n'), ((4422, 4483), 'ccat.bucket.Bucket', 'bucket.Bucket', ([], {'market_id': 'market_id', 'timeframe_id': 'timeframe_id'}), '(market_id=market_id, timeframe_id=timeframe_id)\n', (4435, 4483), False, 'from ccat import bucket\n'), ((5192, 5221), 'pandas.merge', 'pd.merge', (['df_b', 'df_s'], {'on': '"""id"""'}), "(df_b, df_s, on='id')\n", (5200, 5221), True, 'import pandas as pd\n'), ((1806, 1913), 'ccat.wix.Wix', 'wix.Wix', ([], {'df_bucket': 'self.df_bucket', 'len_ma_top': 'self.len_ma_top_wix', 'len_ma_bottom': 'self.len_ma_bottom_wix'}), '(df_bucket=self.df_bucket, len_ma_top=self.len_ma_top_wix,\n len_ma_bottom=self.len_ma_bottom_wix)\n', (1813, 1913), False, 'from ccat import wix\n'), ((2082, 2206), 'ccat.extreme.Extreme', 'extreme.Extreme', ([], {'df_bucket': 'self.df_bucket', 'len_ma_top': 'self.len_ma_top_Extreme', 'len_ma_bottom': 'self.len_ma_bottom_Extreme'}), '(df_bucket=self.df_bucket, len_ma_top=self.\n len_ma_top_Extreme, len_ma_bottom=self.len_ma_bottom_Extreme)\n', (2097, 2206), False, 'from ccat import extreme\n'), ((2388, 2567), 'ccat.overtraded.Overtraded', 'overtraded.Overtraded', ([], {'df_bucket': 'self.df_bucket', 'len_rsi': 'self.len_rsi', 'overbought': 'self.overbought', 'oversold': 'self.oversold', 'peak': 'self.peak', 'trough': 'self.trough', 'col': 'self.col'}), '(df_bucket=self.df_bucket, len_rsi=self.len_rsi,\n overbought=self.overbought, oversold=self.oversold, peak=self.peak,\n trough=self.trough, col=self.col)\n', (2409, 2567), False, 'from ccat import overtraded\n'), ((2891, 2905), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2903, 2905), True, 'import pandas as pd\n'), ((3231, 3260), 'pandas.merge', 'pd.merge', (['df_w', 'df_o'], {'on': '"""id"""'}), "(df_w, df_o, on='id')\n", (3239, 3260), True, 'import pandas as pd\n')]
|
from datetime import datetime
from multiprocessing import Array
from queue import Empty, Full
# except AttributeError:
# from multiprocessing import Queue
import numpy as np
# try:
from arrayqueues.portable_queue import PortableQueue # as Queue
class ArrayView:
def __init__(self, array, max_bytes, dtype, el_shape, i_item=0):
self.dtype = dtype
self.el_shape = el_shape
self.nbytes_el = self.dtype.itemsize * np.product(self.el_shape)
self.n_items = int(np.floor(max_bytes / self.nbytes_el))
self.total_shape = (self.n_items,) + self.el_shape
self.i_item = i_item
self.view = np.frombuffer(array, dtype, np.product(self.total_shape)).reshape(
self.total_shape
)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(self, other.__class__):
return self.el_shape == other.el_shape and self.dtype == other.dtype
return False
def push(self, element):
self.view[self.i_item, ...] = element
i_inserted = self.i_item
self.i_item = (self.i_item + 1) % self.n_items
# a tuple is returned to maximise performance
return self.dtype, self.el_shape, i_inserted
def pop(self, i_item):
return self.view[i_item, ...]
def fits(self, item):
if isinstance(item, np.ndarray):
return item.dtype == self.dtype and item.shape == self.el_shape
return (
item[0] == self.dtype
and item[1] == self.el_shape
and item[2] < self.n_items
)
class ArrayQueue:
"""A drop-in replacement for the multiprocessing queue, usable
only for numpy arrays, which removes the need for pickling and
should provide higher speeds and lower memory usage
"""
def __init__(self, max_mbytes=10):
self.maxbytes = int(max_mbytes * 1000000)
self.array = Array("c", self.maxbytes)
self.view = None
self.queue = PortableQueue()
self.read_queue = PortableQueue()
self.last_item = 0
def check_full(self):
while True:
try:
self.last_item = self.read_queue.get(timeout=0.00001)
except Empty:
break
if self.view.i_item == self.last_item:
raise Full(
"Queue of length {} full when trying to insert {},"
" last item read was {}".format(
self.view.n_items, self.view.i_item, self.last_item
)
)
def put(self, element):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
self.last_item = 0
else:
self.check_full()
qitem = self.view.push(element)
self.queue.put(qitem)
def get(self, **kwargs):
aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return self.view.pop(aritem[2])
def clear(self):
"""Empties the queue without the need to read all the existing
elements
:return: nothing
"""
self.view = None
while True:
try:
_ = self.queue.get_nowait()
except Empty:
break
while True:
try:
_ = self.read_queue.get_nowait()
except Empty:
break
self.last_item = 0
def empty(self):
return self.queue.empty()
def qsize(self):
return self.queue.qsize()
class TimestampedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, qitem))
def get(self, **kwargs):
timestamp, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, self.view.pop(aritem[2])
class IndexedArrayQueue(ArrayQueue):
"""A small extension to support timestamps saved alongside arrays"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
def put(self, element, timestamp=None):
if self.view is None or not self.view.fits(element):
self.view = ArrayView(
self.array.get_obj(), self.maxbytes, element.dtype, element.shape
)
else:
self.check_full()
qitem = self.view.push(element)
if timestamp is None:
timestamp = datetime.now()
self.queue.put((timestamp, self.counter, qitem))
self.counter += 1
def get(self, **kwargs):
timestamp, index, aritem = self.queue.get(**kwargs)
if self.view is None or not self.view.fits(aritem):
self.view = ArrayView(self.array.get_obj(), self.maxbytes, *aritem)
self.read_queue.put(aritem[2])
return timestamp, index, self.view.pop(aritem[2])
|
[
"multiprocessing.Array",
"arrayqueues.portable_queue.PortableQueue",
"numpy.floor",
"numpy.product",
"datetime.datetime.now"
] |
[((1929, 1954), 'multiprocessing.Array', 'Array', (['"""c"""', 'self.maxbytes'], {}), "('c', self.maxbytes)\n", (1934, 1954), False, 'from multiprocessing import Array\n'), ((2001, 2016), 'arrayqueues.portable_queue.PortableQueue', 'PortableQueue', ([], {}), '()\n', (2014, 2016), False, 'from arrayqueues.portable_queue import PortableQueue\n'), ((2043, 2058), 'arrayqueues.portable_queue.PortableQueue', 'PortableQueue', ([], {}), '()\n', (2056, 2058), False, 'from arrayqueues.portable_queue import PortableQueue\n'), ((443, 468), 'numpy.product', 'np.product', (['self.el_shape'], {}), '(self.el_shape)\n', (453, 468), True, 'import numpy as np\n'), ((496, 532), 'numpy.floor', 'np.floor', (['(max_bytes / self.nbytes_el)'], {}), '(max_bytes / self.nbytes_el)\n', (504, 532), True, 'import numpy as np\n'), ((4287, 4301), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4299, 4301), False, 'from datetime import datetime\n'), ((5257, 5271), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5269, 5271), False, 'from datetime import datetime\n'), ((670, 698), 'numpy.product', 'np.product', (['self.total_shape'], {}), '(self.total_shape)\n', (680, 698), True, 'import numpy as np\n')]
|
# ------------------------------
# 460. LFU Cache
#
# Description:
# Design and implement a data structure for Least Frequently Used (LFU) cache. It should support the following operations: get and put.
# get(key) - Get the value (will always be positive) of the key if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present. When the cache reaches its capacity, it should invalidate the least frequently used item before inserting a new item. For the purpose of this problem, when there is a tie (i.e., two or more keys that have the same frequency), the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
#
# Version: 1.0
# 11/02/18 by Jianfa
# ------------------------------
from collections import OrderedDict
class LFUCache:
def __init__(self, capacity):
"""
:type capacity: int
"""
self.capacity = capacity
self.valueDict = {} # value of key
self.countDict = {} # count of key
self.frequencyDict = {} # {fre, OrderedDict} keys of every frequency number. OrderedDict can be sorted so use it to record recently used order
self.frequencyDict[1] = OrderedDict()
self.min = -1 # least frequency so far
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.valueDict:
return -1
count = self.countDict[key]
self.countDict[key] = count + 1
del self.frequencyDict[count][key] # remove key in previous frequencyDict[count]
if count == self.min and len(self.frequencyDict[count]) == 0: # If least frequency needs to add 1
self.min += 1
if count+1 not in self.frequencyDict:
self.frequencyDict[count+1] = OrderedDict()
self.frequencyDict[count+1][key] = 1 # {fre, {key:1}} add {key:1} to frequencyDict
return self.valueDict[key]
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.capacity <= 0:
return
if key in self.valueDict:
self.valueDict[key] = value
self.get(key) # Add frequency
return
if len(self.valueDict) >= self.capacity: # It's over capacity
leastFreq = self.frequencyDict[self.min].popitem(last=False)
self.valueDict.pop(leastFreq[0])
self.valueDict[key] = value # key is not in valueDict, so add it
self.countDict[key] = 1 # update countDict with {key:1}
self.min = 1 # least frequency becomes to 1 again
self.frequencyDict[self.min][key] = 1
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Follow idea from https://leetcode.com/problems/lfu-cache/discuss/94521/JAVA-O(1)-very-easy-solution-using-3-HashMaps-and-LinkedHashSet
# Used a data structure from collections.OrderedDict
|
[
"collections.OrderedDict"
] |
[((1685, 1698), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1696, 1698), False, 'from collections import OrderedDict\n'), ((2322, 2335), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2333, 2335), False, 'from collections import OrderedDict\n')]
|
import sys
from calendar import monthrange
from contextlib import contextmanager
from csv import DictReader, DictWriter
from datetime import date, datetime, timedelta, MINYEAR
from enum import Enum
from gzip import GzipFile
from importlib.resources import open_binary as open_binary_package
from io import TextIOWrapper
from pathlib import Path
from re import compile as re_compile, IGNORECASE
from typing import Callable, Generator, IO, List, NamedTuple, Union
from urllib.parse import urlencode, urljoin
import requests
from weather.configuration import get_setting, get_logger
# bring the data package into scope
from weather.domain import data
log = get_logger(__name__)
DataPath = Union[str, Path]
DictionaryWriter = Callable[[dict], None]
class CsvDictWriter:
def __init__(self, fields: List[str]):
if not fields or len(fields) == 0:
raise ValueError("Dictionary fields are required...")
self._fields = fields.copy()
@property
def fields(self):
return self._fields.copy()
@contextmanager
def file_writer(self, data_path: DataPath) -> DictionaryWriter:
data_path = Path(data_path) if isinstance(data_path, str) else data_path
if not data_path.exists():
mode = "w"
elif not data_path.is_file():
raise ValueError("CSV filename exists and is not writable...")
else:
mode = "a"
with data_path.open(mode) as fp:
dict_writer = self._get_dict_writer(fp, mode == "w")
yield lambda d: dict_writer.writerow(d)
@contextmanager
def stdout(self) -> DictionaryWriter:
dict_writer = self._get_dict_writer(sys.stdout, True)
yield lambda content: dict_writer.writerow(content)
def _get_dict_writer(self, fp: IO, include_headers: bool = False) -> DictWriter:
dict_writer = DictWriter(fp, fieldnames=self._fields, extrasaction='ignore')
if include_headers:
dict_writer.writeheader()
return dict_writer
class DateRange(NamedTuple('_DateRange', low=date, high=date)):
def __new__(cls, low: date, high: date = None):
if not low:
error = "{}: a low date is required.".format(cls.__name__)
raise ValueError(error)
if not high:
high = low
elif high < low:
error = "{}: high date ({}) cannot be less than low date ({}).".format(cls.__name__, high, low)
raise ValueError(error)
# It looks like there is an open issue with PyCharm, PY-39755, that falsely reports
# "unexpected arguments" when calling the super class.
# noinspection PyArgumentList
return super().__new__(cls, low, high)
def __str__(self):
return "{}(low={},high={})".format(self.__class__.__name__, self.low, self.high)
def __eq__(self, other) -> bool:
if isinstance(other, DateRange):
return self.low == other.low and self.high == other.high
raise NotImplemented
def __contains__(self, other) -> bool:
if isinstance(other, DateRange):
return self.low <= other.low and self.high >= other.high
def total_days(self) -> int:
return (self.high - self.low).days
def get_dates(self) -> Generator[date, None, None]:
if self.low == self.high:
yield self.low
else:
one_day = timedelta(days=1)
ts = self.low
while ts <= self.high:
yield ts
ts += one_day
def spans_years(self) -> bool:
return self.low.year < self.high.year
def as_neutral_date_range(self) -> 'DateRange':
def neutral_day(_date) -> int:
if 2 != _date.month:
is_leap_day = False
else:
is_leap_day = (29 == _date.day)
# MINYEAR and the following year are not leap years
return 28 if is_leap_day else _date.day
low = date(MINYEAR, self.low.month, neutral_day(self.low))
high = date(MINYEAR + 1 if self.spans_years() else MINYEAR, self.high.month, neutral_day(self.high))
return DateRange(low, high)
def with_month_offset(self, low_months: int, high_month: int) -> 'DateRange':
pass
def with_low_month_offset(self, months: int) -> 'DateRange':
pass
def with_high_month_offset(self, months: int) -> 'DateRange':
pass
@staticmethod
def _days_in_month(year: int, month: int):
return monthrange(year, month)[1]
class Location(NamedTuple):
name: str
alias: str
longitude: str
latitude: str
tz: str
def __eq__(self, other):
"""In weather data the location is identified by name and alias which allows this to work."""
if isinstance(other, Location):
return (self.name, other.name) == (self.alias, other.alias)
raise NotImplemented
def __hash__(self):
"""Since equality is base on name and alias this will work for a hash identifier."""
return hash((self.name, self.alias))
def __ne__(self, other):
"""Be explicit as to what not equal to means."""
return not self.__eq__(other)
def __repr__(self) -> str:
return "(name='{}', alias={}, longitude={}, latitude={}, tz={})" \
.format(self.name, self.alias, self.longitude, self.latitude, self.tz)
def is_name(self, name: str, case_sensitive=False) -> bool:
return name == self.name if case_sensitive else name.casefold() == self.name.casefold()
def is_alias(self, alias: str, case_sensitive=False) -> bool:
return alias == self.alias if case_sensitive else alias.casefold() == self.alias.casefold()
def is_considered(self, value: str) -> bool:
return self.is_name(value) or self.is_alias(value)
class Field(Enum):
NAME = "name"
LONGITUDE = "longitude"
LATITUDE = "latitude"
ALIAS = "alias"
TZ = "tz"
def to_dict(self) -> dict:
return {
Location.Field.NAME.value: self.name,
Location.Field.ALIAS.value: self.alias.casefold() if self.alias else self.alias,
Location.Field.LONGITUDE.value: self.longitude,
Location.Field.LATITUDE.value: self.latitude,
Location.Field.TZ.value: self.tz
}
@staticmethod
def from_dict(dictionary: dict) -> 'Location':
def get_field(field_: Location.Field) -> str:
data_ = dictionary.get(field_.value)
if not data_:
raise ValueError("The location {} is required.".format(field_.value))
return str(data_)
return Location(name=get_field(Location.Field.NAME),
alias=get_field(Location.Field.ALIAS).casefold(),
longitude=get_field(Location.Field.LONGITUDE),
latitude=get_field(Location.Field.LATITUDE),
tz=get_field(Location.Field.TZ))
class CityDB:
class Record(NamedTuple):
name: str
state: str
longitude: str
latitude: str
tz: str
zips: str
@staticmethod
def from_dict(db_row: dict) -> 'CityDB.Record':
return CityDB.Record(name=db_row["city"],
state=db_row["state"],
longitude=db_row["long"],
latitude=db_row["lat"],
tz=db_row["tz"],
zips=db_row["zips"])
def to_location(self) -> Location:
return Location(name="{}, {}".format(self.name, self.state),
alias="{} {}".format(self.name, self.state).replace(" ", "_").casefold(),
longitude=self.longitude,
latitude=self.latitude,
tz=self.tz)
def __init__(self):
self._city_db: List[CityDB.Record] = []
# PyCharm is having issues figuring out the import api
# noinspection PyTypeChecker
with open_binary_package(data, 'cities_db.csv.gz') as pkg_file:
with GzipFile(mode="rb", fileobj=pkg_file) as gzip_file:
for row in DictReader(TextIOWrapper(gzip_file, encoding="UTF-8")):
self._city_db.append(CityDB.Record.from_dict(row))
def find(self, city: str = None, state: str = None, zip_code: str = None) -> List['CityDB.Record']:
city_finder = re_compile(city.replace('*', '.*'), IGNORECASE) if city else None
state_finder = re_compile(state, IGNORECASE) if state else None
zip_code_finder = re_compile(zip_code.replace('*', '.*')) if zip_code else None
matches = []
for record in self._city_db:
if city_finder and not city_finder.match(record.name):
continue
if state_finder and not state_finder.match(record.state):
continue
if zip_code_finder:
matches = list(filter(zip_code_finder.match, record.zips.split()))
continue
matches.append(record)
return matches
class WeatherProviderAPI:
RECORDED = "recorded"
ERROR = "error"
API_CALLS_MADE = "api_calls_made"
API_USAGE_LIMIT = 900
API_REQUESTS_MADE_TODAY_HEADER = "X-Forecast-API-Calls"
def __init__(self, key: str = None):
self._key = key if key else get_setting("domain", "history_api_key")
self._url = urljoin("https://api.darksky.net/forecast/", self._key) + "/"
self._api_calls_made = 0
@property
def url(self) -> str:
return self._url
@property
def key(self) -> str:
return self._key
def recorded(self, location: Location, when: datetime) -> dict:
"""
The returned dictionary should always contain either RECORDED_KEY
or ERROR_KEY. Optionally it can contain API_CALLS_MADE_KEY.
"""
def mk_error(reason: str) -> dict:
return {
WeatherProviderAPI.ERROR: reason,
WeatherProviderAPI.API_CALLS_MADE: self._api_calls_made
}
if self._api_calls_made > self.API_USAGE_LIMIT:
return mk_error("You've made too many API requests to Dark Sky today...")
url = urljoin(self.url, "{},{},{}".format(location.latitude, location.longitude, when.isoformat()))
log.debug("url: %s", url)
try:
response = requests.get(url, urlencode({"exclude": "currently,flags"}))
if response.ok:
api_calls = response.headers.get(self.API_REQUESTS_MADE_TODAY_HEADER.lower())
log.debug("api calls: %s", api_calls)
if not api_calls:
log.error("Yikes... Didn't find {} header!!!".format(self.API_REQUESTS_MADE_TODAY_HEADER))
self._api_calls_made = self.API_USAGE_LIMIT + 1
else:
self._api_calls_made = int(api_calls)
return {
WeatherProviderAPI.RECORDED: response.json(),
WeatherProviderAPI.API_CALLS_MADE: self._api_calls_made
}
else:
return mk_error("HTTP {}: {}".format(response.status_code, response.reason))
except Exception as error:
return mk_error(str(error))
class FullHistory(NamedTuple):
date: date
daily: dict
hourly: List[dict]
|
[
"weather.configuration.get_logger",
"urllib.parse.urljoin",
"weather.configuration.get_setting",
"re.compile",
"urllib.parse.urlencode",
"importlib.resources.open_binary",
"typing.NamedTuple",
"pathlib.Path",
"datetime.timedelta",
"io.TextIOWrapper",
"gzip.GzipFile",
"calendar.monthrange",
"csv.DictWriter"
] |
[((658, 678), 'weather.configuration.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (668, 678), False, 'from weather.configuration import get_setting, get_logger\n'), ((2038, 2083), 'typing.NamedTuple', 'NamedTuple', (['"""_DateRange"""'], {'low': 'date', 'high': 'date'}), "('_DateRange', low=date, high=date)\n", (2048, 2083), False, 'from typing import Callable, Generator, IO, List, NamedTuple, Union\n'), ((1864, 1926), 'csv.DictWriter', 'DictWriter', (['fp'], {'fieldnames': 'self._fields', 'extrasaction': '"""ignore"""'}), "(fp, fieldnames=self._fields, extrasaction='ignore')\n", (1874, 1926), False, 'from csv import DictReader, DictWriter\n'), ((1144, 1159), 'pathlib.Path', 'Path', (['data_path'], {}), '(data_path)\n', (1148, 1159), False, 'from pathlib import Path\n'), ((3394, 3411), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (3403, 3411), False, 'from datetime import date, datetime, timedelta, MINYEAR\n'), ((4502, 4525), 'calendar.monthrange', 'monthrange', (['year', 'month'], {}), '(year, month)\n', (4512, 4525), False, 'from calendar import monthrange\n'), ((8111, 8156), 'importlib.resources.open_binary', 'open_binary_package', (['data', '"""cities_db.csv.gz"""'], {}), "(data, 'cities_db.csv.gz')\n", (8130, 8156), True, 'from importlib.resources import open_binary as open_binary_package\n'), ((8609, 8638), 're.compile', 're_compile', (['state', 'IGNORECASE'], {}), '(state, IGNORECASE)\n', (8619, 8638), True, 'from re import compile as re_compile, IGNORECASE\n'), ((9466, 9506), 'weather.configuration.get_setting', 'get_setting', (['"""domain"""', '"""history_api_key"""'], {}), "('domain', 'history_api_key')\n", (9477, 9506), False, 'from weather.configuration import get_setting, get_logger\n'), ((9527, 9582), 'urllib.parse.urljoin', 'urljoin', (['"""https://api.darksky.net/forecast/"""', 'self._key'], {}), "('https://api.darksky.net/forecast/', self._key)\n", (9534, 9582), False, 'from urllib.parse import urlencode, urljoin\n'), ((8187, 8224), 'gzip.GzipFile', 'GzipFile', ([], {'mode': '"""rb"""', 'fileobj': 'pkg_file'}), "(mode='rb', fileobj=pkg_file)\n", (8195, 8224), False, 'from gzip import GzipFile\n'), ((10530, 10571), 'urllib.parse.urlencode', 'urlencode', (["{'exclude': 'currently,flags'}"], {}), "({'exclude': 'currently,flags'})\n", (10539, 10571), False, 'from urllib.parse import urlencode, urljoin\n'), ((8277, 8319), 'io.TextIOWrapper', 'TextIOWrapper', (['gzip_file'], {'encoding': '"""UTF-8"""'}), "(gzip_file, encoding='UTF-8')\n", (8290, 8319), False, 'from io import TextIOWrapper\n')]
|
# jsb/plugs/core/user.py
#
#
""" users related commands. """
## jsb imports
from jsb.utils.generic import getwho
from jsb.utils.exception import handle_exception
from jsb.utils.name import stripname
from jsb.lib.users import users
from jsb.lib.commands import cmnds
from jsb.lib.examples import examples
## basic imports
import logging
## user-whoami command
def handle_whoami(bot, ievent):
""" no arguments - get your username. """
ievent.reply('%s' % bot.users.getname(ievent.auth))
cmnds.add('user-whoami', handle_whoami, ['OPER', 'USER', 'GUEST'])
examples.add('user-whoami', 'get your username', 'user-whoami')
## user-meet command
def handle_meet(bot, ievent):
""" arguments: <nick> - introduce a new user to the bot. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
if bot.users.exist(nick):
ievent.reply('there is already a user with username %s' % nick)
return
userhost = getwho(bot, nick)
logging.warn("users - meet - userhost is %s" % userhost)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = 0
name = stripname(nick.lower())
result = bot.users.add(name, [userhost, ], ['USER', 'GUEST'])
if result: ievent.reply('%s - %s - (%s) added to user database' % (nick, userhost, name))
else: ievent.reply('add failed')
cmnds.add('user-meet', handle_meet, ['OPER', 'MEET'])
examples.add('user-meet', '<nick> .. introduce <nick> to the bot', 'user-meet dunker')
## user-add command
def handle_adduser(bot, ievent):
""" arguments: <name> <userhost> - introduce a new user to the bot. """
try: (name, userhost) = ievent.args
except ValueError:
ievent.missing('<name> <userhost>')
return
username = bot.users.getname(userhost)
if username:
ievent.reply('we already have a user with userhost %s (%s)' % (userhost, username))
return
result = 0
name = stripname(name.lower())
result = bot.users.add(name, [userhost, ], ['USER', 'GUEST'])
if result: ievent.reply('%s added to user database' % name)
else: ievent.reply('add failed')
cmnds.add('user-add', handle_adduser, 'OPER')
examples.add('user-add', 'add user to the bot', 'user-add dunker bart@localhost')
## user-merge command
def handle_merge(bot, ievent):
""" arguments: <name> <nick> - merge the userhost belonging to <nick> into an already existing user. """
if len(ievent.args) != 2:
ievent.missing('<name> <nick>')
return
name, nick = ievent.args
name = name.lower()
if bot.users.gotperm(name, 'OPER') and not bot.users.allowed(ievent.userhost, 'OPER'):
ievent.reply("only OPER perm can merge with OPER user")
return
if name == 'owner' and not bot.ownercheck(ievent.userhost):
ievent.reply("you are not the owner")
return
if not bot.users.exist(name):
ievent.reply("we have no user %s" % name)
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
if bot.ownercheck(userhost):
ievent.reply("can't merge with owner")
return
result = bot.users.merge(name, userhost)
if result: ievent.reply('%s merged' % nick)
else: ievent.reply('merge failed')
cmnds.add('user-merge', handle_merge, ['OPER', 'MEET'])
examples.add('user-merge', '<name> <nick> .. merge record with <name> with userhost from <nick>', 'user-merge bart dunker')
## user-import command
def handle_import(bot, ievent):
""" arguments: <userhost> - merge the userhost into user giving the command. """
if len(ievent.args) != 1:
ievent.missing('<userhost>')
return
userhost = ievent.args[0]
if bot.ownercheck(userhost):
ievent.reply("can't merge owner")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
result = bot.users.merge(name, userhost)
if result: ievent.reply('%s imported' % userhost)
else: ievent.reply('import failed')
cmnds.add('user-import', handle_import, ['IMPORT', 'OPER'])
examples.add('user-import', 'user-import <userhost> .. merge record with \
<name> with userhost from the person giving the command (self merge)', 'user-import <EMAIL>')
## user-del command
def handle_delete(bot, ievent):
""" arguments: <name> - remove user. """
if not bot.ownercheck(ievent.userhost):
ievent.reply('only owner can use delete')
return
if len(ievent.args) == 0:
ievent.missing('<name>')
return
name = ievent.args[0]
result = 0
name = stripname(name)
name = name.lower()
try:
result = bot.users.delete(name)
if result:
ievent.reply('%s deleted' % name)
return
except KeyError: pass
ievent.reply('no %s item in database' % name)
cmnds.add('user-del', handle_delete, 'OPER')
examples.add('user-del', 'user-del <name> .. delete user with <username>' , 'user-del dunker')
## user-undel command
def handle_undelete(bot, ievent):
""" arguments: <name> - remove user. """
if not bot.ownercheck(ievent.userhost):
ievent.reply('only owner can use delete')
return
if len(ievent.args) == 0:
ievent.missing('<name>')
return
name = ievent.args[0]
result = 0
name = stripname(name)
name = name.lower()
user = bot.users.grab(name)
if user:
user.data.deleted = False
user.save()
ievent.reply('%s undeleted' % name)
return
else: ievent.reply('no %s item in database' % name)
cmnds.add('user-undel', handle_undelete, 'OPER')
examples.add('user-undel', 'user-del <name> .. undelete user with <username>' , 'user-undel dunker')
## user-scan command
def handle_userscan(bot, ievent):
""" arguments: <searchtxt> - scan for user. """
try:name = ievent.args[0]
except IndexError:
ievent.missing('<txt>')
return
name = name.lower()
names = bot.users.names()
result = []
for i in names:
if i.find(name) != -1: result.append(i)
if result: ievent.reply("users matching %s: " % name, result)
else: ievent.reply('no users matched')
cmnds.add('user-scan', handle_userscan, 'OPER')
examples.add('user-scan', '<txt> .. search database for matching usernames', 'user-scan dunk')
## user-names command
def handle_names(bot, ievent):
""" no arguments - show registered users. """
ievent.reply("usernames: ", bot.users.names())
cmnds.add('user-names', handle_names, 'OPER')
examples.add('user-names', 'show names of registered users', 'user-names')
## user-name command
def handle_name(bot, ievent):
""" no arguments - show name of user giving the command. """
ievent.reply('your name is %s' % bot.users.getname(ievent.auth))
cmnds.add('user-name', handle_name, ['USER', 'GUEST'])
examples.add('user-name', 'show name of user giving the commands', 'user-name')
## user-getname command
def handle_getname(bot, ievent):
""" arguments: <nick> - fetch username of nick. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing("<nick>")
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user for %s" % userhost)
return
ievent.reply(name)
cmnds.add('user-getname', handle_getname, ['USER', 'GUEST'])
examples.add('user-getname', 'user-getname <nick> .. get the name of <nick>', 'user-getname dunker')
## user-addperm command
def handle_addperm(bot, ievent):
""" arguments: <name> <permission> - add permission to user. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = 0
if bot.users.gotperm(name, perm):
ievent.reply('%s already has permission %s' % (name, perm))
return
result = bot.users.adduserperm(name, perm)
if result: ievent.reply('%s perm added' % perm)
else: ievent.reply('perm add failed')
cmnds.add('user-addperm', handle_addperm, 'OPER')
examples.add('user-addperm', 'user-addperm <name> <perm> .. add permissions to user <name>', 'user-addperm dunker rss')
## user-getperms command
def handle_getperms(bot, ievent):
""" arguments: <name> - get permissions of name. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
perms = bot.users.getuserperms(name)
if perms: ievent.reply("permissions of %s: " % name, perms)
else: ievent.reply('%s has no permissions set' % name)
cmnds.add('user-getperms', handle_getperms, 'OPER')
examples.add('user-getperms', 'user-getperms <name> .. get permissions of <name>', 'user-getperms dunker')
## user-perms command
def handle_perms(bot, ievent):
""" no arguments - get permissions of the user given the command. """
if ievent.rest:
ievent.reply("use getperms to get the permissions of somebody else")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find username for %s" % ievent.userhost)
return
perms = bot.users.getuserperms(name)
if perms: ievent.reply("you have permissions: ", perms)
cmnds.add('user-perms', handle_perms, ['USER', 'GUEST'])
examples.add('user-perms', 'get permissions', 'user-perms')
## user-delperm command
def handle_delperm(bot, ievent):
""" arguments: <name> <perm> - delete permission from user. """
if len(ievent.args) != 2:
ievent.missing('<name> <perm>')
return
name, perm = ievent.args
perm = perm.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserperm(name, perm)
if result: ievent.reply('%s perm removed' % perm)
else: ievent.reply("%s has no %s permission" % (name, perm))
cmnds.add('user-delperm', handle_delperm, 'OPER')
examples.add('user-delperm', 'delete from user <name> permission <perm>', 'user-delperm dunker rss')
## user-addstatus command
def handle_addstatus(bot, ievent):
""" arguments: <name> <status> - add status to a user. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotstatus(name, status):
ievent.reply('%s already has status %s' % (name, status))
return
result = bot.users.adduserstatus(name, status)
if result: ievent.reply('%s status added' % status)
else: ievent.reply('add failed')
cmnds.add('user-addstatus', handle_addstatus, 'OPER')
examples.add('user-addstatus', 'user-addstatus <name> <status>', 'user-addstatus dunker #dunkbots')
## user-getstatus command
def handle_getstatus(bot, ievent):
""" arguments: <name> - get status of a user. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
status = bot.users.getuserstatuses(name)
if status: ievent.reply("status of %s: " % name, status)
else: ievent.reply('%s has no status set' % name)
cmnds.add('user-getstatus', handle_getstatus, 'OPER')
examples.add('user-getstatus', 'user-getstatus <name> .. get status of <name>', 'user-getstatus dunker')
## user-status command
def handle_status(bot, ievent):
""" no arguments - get status of user given the command. """
status = bot.users.getstatuses(ievent.userhost)
if status: ievent.reply("you have status: ", status)
else: ievent.reply('you have no status set')
cmnds.add('user-status', handle_status, ['USER', 'GUEST'])
examples.add('user-status', 'get status', 'user-status')
## user-delstatus command
def handle_delstatus(bot, ievent):
""" arguments: <name> <status> - delete status. """
if len(ievent.args) != 2:
ievent.missing('<name> <status>')
return
name, status = ievent.args
status = status.upper()
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserstatus(name, status)
if result: ievent.reply('%s status deleted' % status)
else: ievent.reply("%s has no %s status" % (name, status))
cmnds.add('user-delstatus', handle_delstatus, 'OPER')
examples.add('user-delstatus', '<name> <status>', 'user-delstatus dunker #dunkbots')
## user-adduserhost command
def handle_adduserhost(bot, ievent):
""" arguments: <name> <userhost> - add to userhosts of user. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
if bot.users.gotuserhost(name, userhost):
ievent.reply('%s already has userhost %s' % (name, userhost))
return
result = bot.users.adduserhost(name, userhost)
if result: ievent.reply('userhost added')
else: ievent.reply('add failed')
cmnds.add('user-adduserhost', handle_adduserhost, 'OPER')
examples.add('user-adduserhost', 'user-adduserhost <name> <userhost>', 'user-adduserhost dunker <EMAIL>')
## user-deluserhost command
def handle_deluserhost(bot, ievent):
""" arguments: <name> <userhost> - remove from userhosts of name. """
if len(ievent.args) != 2:
ievent.missing('<name> <userhost>')
return
name, userhost = ievent.args
name = name.lower()
if bot.ownercheck(userhost):
ievent.reply('can delete userhosts from owner')
return
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
result = bot.users.deluserhost(name, userhost)
if result: ievent.reply('userhost removed')
else: ievent.reply("%s has no %s in userhost list" % (name, userhost))
cmnds.add('user-deluserhost', handle_deluserhost, 'OPER')
examples.add('user-deluserhost', 'user-deluserhost <name> <userhost> .. delete from usershosts of <name> userhost <userhost>','user-deluserhost dunker <EMAIL>')
## user-getuserhosts command
def handle_getuserhosts(bot, ievent):
""" arguments: <name> - get userhosts of a user. """
try: who = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
who = who.lower()
userhosts = bot.users.getuserhosts(who)
if userhosts: ievent.reply("userhosts of %s: " % who, userhosts)
else: ievent.reply("can't find user %s" % who)
cmnds.add('user-getuserhosts', handle_getuserhosts, 'OPER')
examples.add('user-getuserhosts', 'user-getuserhosts <name> .. get userhosts of <name>', 'user-getuserhosts dunker')
## user-userhosts command
def handle_userhosts(bot, ievent):
""" no arguments - get userhosts of user giving the command. """
userhosts = bot.users.gethosts(ievent.userhost)
if userhosts: ievent.reply("you have userhosts: ", userhosts)
else: ievent.reply('no userhosts found')
cmnds.add('user-userhosts', handle_userhosts, ['USER', 'GUEST'])
examples.add('user-userhosts', 'get userhosts', 'user-userhosts')
## user-getemail command
def handle_getemail(bot, ievent):
""" arguments: <user> - get email addres of a user. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
email = bot.users.getuseremail(name)
if email: ievent.reply(email)
else: ievent.reply('no email set')
cmnds.add('user-getemail', handle_getemail, ['USER', ])
examples.add('user-getemail', 'user-getemail <name> .. get email from user <name>', 'user-getemail dunker')
## user-setemail command
def handle_setemail(bot, ievent):
""" arguments: <name> <email> - set email of a user. """
try: name, email = ievent.args
except ValueError:
ievent.missing('<name> <email>')
return
if not bot.users.exist(name):
ievent.reply("can't find user %s" % name)
return
bot.users.setemail(name, email)
ievent.reply('email set')
cmnds.add('user-setemail', handle_setemail, 'OPER')
examples.add('user-setemail', 'user-setemail <name> <email>.. set email of user <name>', 'user-setemail dunker <EMAIL>')
## user-email command
def handle_email(bot, ievent):
""" no arguments - show email of user giving the command. """
if len(ievent.args) != 0:
ievent.reply('use getemail to get the email address of an user .. email shows your own mail address')
return
email = bot.users.getemail(ievent.userhost)
if email: ievent.reply(email)
else: ievent.reply('no email set')
cmnds.add('user-email', handle_email, ['USER', 'GUEST'])
examples.add('user-email', 'get email', 'user-email')
## user-delemail command
def handle_delemail(bot, ievent):
""" no arguments - reset email of user giving the command. """
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
result = bot.users.delallemail(name)
if result: ievent.reply('email removed')
else: ievent.reply('delete failed')
cmnds.add('user-delemail', handle_delemail, 'OPER')
examples.add('user-delemail', 'reset email', 'user-delemail')
## user-addpermit command
def handle_addpermit(bot, ievent):
""" arguments: <name> <permit> - allow another user to perform actions on your data. """
try: who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exist(who):
ievent.reply("can't find username of %s" % who)
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i dont know %s" % ievent.userhost)
return
if bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already allowed to do %s' % (who, what))
return
result = bot.users.adduserpermit(name, who, what)
if result: ievent.reply('permit added')
else: ievent.reply('add failed')
cmnds.add('user-addpermit', handle_addpermit, ['USER', 'GUEST'])
examples.add('user-addpermit', 'user-addpermit <nick> <what> .. permit nick access to <what> .. use setperms to add permissions', 'user-addpermit dunker todo')
## user-permit command
def handle_permit(bot, ievent):
""" no arguments - get permit list of user giving the command. """
if ievent.rest:
ievent.reply("use the user-addpermit command to allow somebody something .. use getname <nick> to get the username of somebody .. this command shows what permits you have")
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("can't find user for %s" % ievent.userhost)
return
permits = bot.users.getuserpermits(name)
if permits: ievent.reply("you permit the following: ", permits)
else: ievent.reply("you don't have any permits")
cmnds.add('user-permit', handle_permit, ['USER', 'GUEST'])
examples.add('user-permit', 'show permit of user giving the command', 'user-permit')
## user-delpermit command
def handle_userdelpermit(bot, ievent):
""" arguments: <name> <permit> - remove (name, permit) from permit list. """
try: who, what = ievent.args
except ValueError:
ievent.missing("<name> <permit>")
return
if not bot.users.exist(who):
ievent.reply("can't find registered name of %s" % who)
return
name = bot.users.getname(ievent.auth)
if not name:
ievent.reply("i don't know you %s" % ievent.userhost)
return
if not bot.users.gotpermit(name, (who, what)):
ievent.reply('%s is already not allowed to do %s' % (who, what))
return
result = bot.users.deluserpermit(name, (who, what))
if result: ievent.reply('%s denied' % what)
else: ievent.reply('delete failed')
cmnds.add('user-delpermit', handle_userdelpermit, ['USER', 'GUEST'])
examples.add('user-delpermit', 'user-delpermit <name> <permit>', 'user-delpermit dunker todo')
## user-check command
def handle_check(bot, ievent):
""" arguments: <nick> - get data of a user based on nick name. """
try: nick = ievent.args[0]
except IndexError:
ievent.missing('<nick>')
return
userhost = getwho(bot, nick)
if not userhost:
ievent.reply("can't find userhost of %s" % nick)
return
name = bot.users.getname(userhost)
if not name:
ievent.reply("can't find user")
return
userhosts = bot.users.getuserhosts(name)
perms = bot.users.getuserperms(name)
email = bot.users.getuseremail(name)
permits = bot.users.getuserpermits(name)
status = bot.users.getuserstatuses(name)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, str(userhosts), str(perms), str(email), str(permits), str(status)))
cmnds.add('user-check', handle_check, 'OPER')
examples.add('user-check', 'user-check <nick>', 'user-check dunker')
## user-show command
def handle_show(bot, ievent):
""" arguments: <name> - get data of a user based on username. """
try: name = ievent.args[0]
except IndexError:
ievent.missing('<name>')
return
name = name.lower()
user = bot.users.byname(name)
if not user:
ievent.reply("can't find user %s" % name)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (name, userhosts, perms, email, permits, status))
cmnds.add('user-show', handle_show, 'OPER')
examples.add('user-show', 'user-show <name> .. show data of <name>', 'user-show dunker')
## user-match command
def handle_match(bot, ievent):
""" arguments: <userhost> - get data of user based on userhost. """
try: userhost = ievent.args[0]
except IndexError:
ievent.missing('<userhost>')
return
user = bot.users.getuser(userhost)
if not user:
ievent.reply("can't find user with userhost %s" % userhost)
return
userhosts = str(user.data.userhosts)
perms = str(user.data.perms)
email = str(user.data.email)
permits = str(user.data.permits)
status = str(user.data.status)
ievent.reply('userrecord of %s = userhosts: %s perms: %s email: %s permits: %s status: %s' % (userhost, userhosts, perms, email, permits, status))
cmnds.add('user-match', handle_match, ['OPER', ])
examples.add('user-match', 'user-match <userhost>', 'user-match test@test')
## user-allstatus command
def handle_getuserstatus(bot, ievent):
""" arguments: <status> - list users with <status>. """
try: status = ievent.args[0].upper()
except IndexError:
ievent.missing('<status>')
return
result = bot.users.getstatususers(status)
if result: ievent.reply("users with %s status: " % status, result)
else: ievent.reply("no users with %s status found" % status)
cmnds.add('user-allstatus', handle_getuserstatus, 'OPER')
examples.add('user-allstatus', 'user-allstatus <status> .. get all users with <status> status', 'user-allstatus #dunkbots')
## user-allperm command
def handle_getuserperm(bot, ievent):
""" arguments: <perm> - list users with permission <perm>. """
try: perm = ievent.args[0].upper()
except IndexError:
ievent.missing('<perm>')
return
result = bot.users.getpermusers(perm)
if result: ievent.reply('users with %s permission: ' % perm, result)
else: ievent.reply("no users with %s permission found" % perm)
cmnds.add('user-allperm', handle_getuserperm, 'OPER')
examples.add('user-allperm', 'user-allperm <perm> .. get users with <perm> permission', 'user-allperm rss')
## user-search command
def handle_usersearch(bot, ievent):
""" arguments: <searchtxt> - search for user matching given userhost. """
try: what = ievent.args[0]
except IndexError:
ievent.missing('<searchtxt>')
return
result = bot.users.usersearch(what)
if result:
res = ["(%s) %s" % u for u in result]
ievent.reply('users matching %s: ' % what, res)
else: ievent.reply('no userhost matching %s found' % what)
cmnds.add('user-search', handle_usersearch, 'OPER')
examples.add('user-search', 'search users userhosts', 'user-search gozerbot')
|
[
"logging.warn",
"jsb.lib.commands.cmnds.add",
"jsb.utils.generic.getwho",
"jsb.lib.examples.examples.add",
"jsb.utils.name.stripname"
] |
[((501, 567), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-whoami"""', 'handle_whoami', "['OPER', 'USER', 'GUEST']"], {}), "('user-whoami', handle_whoami, ['OPER', 'USER', 'GUEST'])\n", (510, 567), False, 'from jsb.lib.commands import cmnds\n'), ((568, 631), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-whoami"""', '"""get your username"""', '"""user-whoami"""'], {}), "('user-whoami', 'get your username', 'user-whoami')\n", (580, 631), False, 'from jsb.lib.examples import examples\n'), ((1572, 1625), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-meet"""', 'handle_meet', "['OPER', 'MEET']"], {}), "('user-meet', handle_meet, ['OPER', 'MEET'])\n", (1581, 1625), False, 'from jsb.lib.commands import cmnds\n'), ((1626, 1716), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-meet"""', '"""<nick> .. introduce <nick> to the bot"""', '"""user-meet dunker"""'], {}), "('user-meet', '<nick> .. introduce <nick> to the bot',\n 'user-meet dunker')\n", (1638, 1716), False, 'from jsb.lib.examples import examples\n'), ((2352, 2397), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-add"""', 'handle_adduser', '"""OPER"""'], {}), "('user-add', handle_adduser, 'OPER')\n", (2361, 2397), False, 'from jsb.lib.commands import cmnds\n'), ((2398, 2483), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-add"""', '"""add user to the bot"""', '"""user-add dunker bart@localhost"""'], {}), "('user-add', 'add user to the bot',\n 'user-add dunker bart@localhost')\n", (2410, 2483), False, 'from jsb.lib.examples import examples\n'), ((3533, 3588), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-merge"""', 'handle_merge', "['OPER', 'MEET']"], {}), "('user-merge', handle_merge, ['OPER', 'MEET'])\n", (3542, 3588), False, 'from jsb.lib.commands import cmnds\n'), ((3589, 3720), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-merge"""', '"""<name> <nick> .. merge record with <name> with userhost from <nick>"""', '"""user-merge bart dunker"""'], {}), "('user-merge',\n '<name> <nick> .. merge record with <name> with userhost from <nick>',\n 'user-merge bart dunker')\n", (3601, 3720), False, 'from jsb.lib.examples import examples\n'), ((4333, 4392), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-import"""', 'handle_import', "['IMPORT', 'OPER']"], {}), "('user-import', handle_import, ['IMPORT', 'OPER'])\n", (4342, 4392), False, 'from jsb.lib.commands import cmnds\n'), ((4393, 4568), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-import"""', '"""user-import <userhost> .. merge record with <name> with userhost from the person giving the command (self merge)"""', '"""user-import <EMAIL>"""'], {}), "('user-import',\n 'user-import <userhost> .. merge record with <name> with userhost from the person giving the command (self merge)'\n , 'user-import <EMAIL>')\n", (4405, 4568), False, 'from jsb.lib.examples import examples\n'), ((5150, 5194), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-del"""', 'handle_delete', '"""OPER"""'], {}), "('user-del', handle_delete, 'OPER')\n", (5159, 5194), False, 'from jsb.lib.commands import cmnds\n'), ((5195, 5292), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-del"""', '"""user-del <name> .. delete user with <username>"""', '"""user-del dunker"""'], {}), "('user-del', 'user-del <name> .. delete user with <username>',\n 'user-del dunker')\n", (5207, 5292), False, 'from jsb.lib.examples import examples\n'), ((5887, 5935), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-undel"""', 'handle_undelete', '"""OPER"""'], {}), "('user-undel', handle_undelete, 'OPER')\n", (5896, 5935), False, 'from jsb.lib.commands import cmnds\n'), ((5936, 6039), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-undel"""', '"""user-del <name> .. undelete user with <username>"""', '"""user-undel dunker"""'], {}), "('user-undel',\n 'user-del <name> .. undelete user with <username>', 'user-undel dunker')\n", (5948, 6039), False, 'from jsb.lib.examples import examples\n'), ((6494, 6541), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-scan"""', 'handle_userscan', '"""OPER"""'], {}), "('user-scan', handle_userscan, 'OPER')\n", (6503, 6541), False, 'from jsb.lib.commands import cmnds\n'), ((6542, 6640), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-scan"""', '"""<txt> .. search database for matching usernames"""', '"""user-scan dunk"""'], {}), "('user-scan', '<txt> .. search database for matching usernames',\n 'user-scan dunk')\n", (6554, 6640), False, 'from jsb.lib.examples import examples\n'), ((6794, 6839), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-names"""', 'handle_names', '"""OPER"""'], {}), "('user-names', handle_names, 'OPER')\n", (6803, 6839), False, 'from jsb.lib.commands import cmnds\n'), ((6840, 6914), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-names"""', '"""show names of registered users"""', '"""user-names"""'], {}), "('user-names', 'show names of registered users', 'user-names')\n", (6852, 6914), False, 'from jsb.lib.examples import examples\n'), ((7103, 7157), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-name"""', 'handle_name', "['USER', 'GUEST']"], {}), "('user-name', handle_name, ['USER', 'GUEST'])\n", (7112, 7157), False, 'from jsb.lib.commands import cmnds\n'), ((7158, 7237), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-name"""', '"""show name of user giving the commands"""', '"""user-name"""'], {}), "('user-name', 'show name of user giving the commands', 'user-name')\n", (7170, 7237), False, 'from jsb.lib.examples import examples\n'), ((7737, 7797), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-getname"""', 'handle_getname', "['USER', 'GUEST']"], {}), "('user-getname', handle_getname, ['USER', 'GUEST'])\n", (7746, 7797), False, 'from jsb.lib.commands import cmnds\n'), ((7798, 7902), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-getname"""', '"""user-getname <nick> .. get the name of <nick>"""', '"""user-getname dunker"""'], {}), "('user-getname',\n 'user-getname <nick> .. get the name of <nick>', 'user-getname dunker')\n", (7810, 7902), False, 'from jsb.lib.examples import examples\n'), ((8575, 8624), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-addperm"""', 'handle_addperm', '"""OPER"""'], {}), "('user-addperm', handle_addperm, 'OPER')\n", (8584, 8624), False, 'from jsb.lib.commands import cmnds\n'), ((8625, 8752), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-addperm"""', '"""user-addperm <name> <perm> .. add permissions to user <name>"""', '"""user-addperm dunker rss"""'], {}), "('user-addperm',\n 'user-addperm <name> <perm> .. add permissions to user <name>',\n 'user-addperm dunker rss')\n", (8637, 8752), False, 'from jsb.lib.examples import examples\n'), ((9253, 9304), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-getperms"""', 'handle_getperms', '"""OPER"""'], {}), "('user-getperms', handle_getperms, 'OPER')\n", (9262, 9304), False, 'from jsb.lib.commands import cmnds\n'), ((9305, 9420), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-getperms"""', '"""user-getperms <name> .. get permissions of <name>"""', '"""user-getperms dunker"""'], {}), "('user-getperms',\n 'user-getperms <name> .. get permissions of <name>', 'user-getperms dunker'\n )\n", (9317, 9420), False, 'from jsb.lib.examples import examples\n'), ((9900, 9956), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-perms"""', 'handle_perms', "['USER', 'GUEST']"], {}), "('user-perms', handle_perms, ['USER', 'GUEST'])\n", (9909, 9956), False, 'from jsb.lib.commands import cmnds\n'), ((9957, 10016), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-perms"""', '"""get permissions"""', '"""user-perms"""'], {}), "('user-perms', 'get permissions', 'user-perms')\n", (9969, 10016), False, 'from jsb.lib.examples import examples\n'), ((10572, 10621), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-delperm"""', 'handle_delperm', '"""OPER"""'], {}), "('user-delperm', handle_delperm, 'OPER')\n", (10581, 10621), False, 'from jsb.lib.commands import cmnds\n'), ((10622, 10726), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-delperm"""', '"""delete from user <name> permission <perm>"""', '"""user-delperm dunker rss"""'], {}), "('user-delperm', 'delete from user <name> permission <perm>',\n 'user-delperm dunker rss')\n", (10634, 10726), False, 'from jsb.lib.examples import examples\n'), ((11386, 11439), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-addstatus"""', 'handle_addstatus', '"""OPER"""'], {}), "('user-addstatus', handle_addstatus, 'OPER')\n", (11395, 11439), False, 'from jsb.lib.commands import cmnds\n'), ((11440, 11543), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-addstatus"""', '"""user-addstatus <name> <status>"""', '"""user-addstatus dunker #dunkbots"""'], {}), "('user-addstatus', 'user-addstatus <name> <status>',\n 'user-addstatus dunker #dunkbots')\n", (11452, 11543), False, 'from jsb.lib.examples import examples\n'), ((12043, 12096), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-getstatus"""', 'handle_getstatus', '"""OPER"""'], {}), "('user-getstatus', handle_getstatus, 'OPER')\n", (12052, 12096), False, 'from jsb.lib.commands import cmnds\n'), ((12097, 12205), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-getstatus"""', '"""user-getstatus <name> .. get status of <name>"""', '"""user-getstatus dunker"""'], {}), "('user-getstatus',\n 'user-getstatus <name> .. get status of <name>', 'user-getstatus dunker')\n", (12109, 12205), False, 'from jsb.lib.examples import examples\n'), ((12483, 12541), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-status"""', 'handle_status', "['USER', 'GUEST']"], {}), "('user-status', handle_status, ['USER', 'GUEST'])\n", (12492, 12541), False, 'from jsb.lib.commands import cmnds\n'), ((12542, 12598), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-status"""', '"""get status"""', '"""user-status"""'], {}), "('user-status', 'get status', 'user-status')\n", (12554, 12598), False, 'from jsb.lib.examples import examples\n'), ((13160, 13213), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-delstatus"""', 'handle_delstatus', '"""OPER"""'], {}), "('user-delstatus', handle_delstatus, 'OPER')\n", (13169, 13213), False, 'from jsb.lib.commands import cmnds\n'), ((13214, 13302), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-delstatus"""', '"""<name> <status>"""', '"""user-delstatus dunker #dunkbots"""'], {}), "('user-delstatus', '<name> <status>',\n 'user-delstatus dunker #dunkbots')\n", (13226, 13302), False, 'from jsb.lib.examples import examples\n'), ((13947, 14004), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-adduserhost"""', 'handle_adduserhost', '"""OPER"""'], {}), "('user-adduserhost', handle_adduserhost, 'OPER')\n", (13956, 14004), False, 'from jsb.lib.commands import cmnds\n'), ((14005, 14114), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-adduserhost"""', '"""user-adduserhost <name> <userhost>"""', '"""user-adduserhost dunker <EMAIL>"""'], {}), "('user-adduserhost', 'user-adduserhost <name> <userhost>',\n 'user-adduserhost dunker <EMAIL>')\n", (14017, 14114), False, 'from jsb.lib.examples import examples\n'), ((14776, 14833), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-deluserhost"""', 'handle_deluserhost', '"""OPER"""'], {}), "('user-deluserhost', handle_deluserhost, 'OPER')\n", (14785, 14833), False, 'from jsb.lib.commands import cmnds\n'), ((14834, 15004), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-deluserhost"""', '"""user-deluserhost <name> <userhost> .. delete from usershosts of <name> userhost <userhost>"""', '"""user-deluserhost dunker <EMAIL>"""'], {}), "('user-deluserhost',\n 'user-deluserhost <name> <userhost> .. delete from usershosts of <name> userhost <userhost>'\n , 'user-deluserhost dunker <EMAIL>')\n", (14846, 15004), False, 'from jsb.lib.examples import examples\n'), ((15409, 15468), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-getuserhosts"""', 'handle_getuserhosts', '"""OPER"""'], {}), "('user-getuserhosts', handle_getuserhosts, 'OPER')\n", (15418, 15468), False, 'from jsb.lib.commands import cmnds\n'), ((15469, 15593), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-getuserhosts"""', '"""user-getuserhosts <name> .. get userhosts of <name>"""', '"""user-getuserhosts dunker"""'], {}), "('user-getuserhosts',\n 'user-getuserhosts <name> .. get userhosts of <name>',\n 'user-getuserhosts dunker')\n", (15481, 15593), False, 'from jsb.lib.examples import examples\n'), ((15882, 15946), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-userhosts"""', 'handle_userhosts', "['USER', 'GUEST']"], {}), "('user-userhosts', handle_userhosts, ['USER', 'GUEST'])\n", (15891, 15946), False, 'from jsb.lib.commands import cmnds\n'), ((15947, 16012), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-userhosts"""', '"""get userhosts"""', '"""user-userhosts"""'], {}), "('user-userhosts', 'get userhosts', 'user-userhosts')\n", (15959, 16012), False, 'from jsb.lib.examples import examples\n'), ((16474, 16527), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-getemail"""', 'handle_getemail', "['USER']"], {}), "('user-getemail', handle_getemail, ['USER'])\n", (16483, 16527), False, 'from jsb.lib.commands import cmnds\n'), ((16530, 16645), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-getemail"""', '"""user-getemail <name> .. get email from user <name>"""', '"""user-getemail dunker"""'], {}), "('user-getemail',\n 'user-getemail <name> .. get email from user <name>',\n 'user-getemail dunker')\n", (16542, 16645), False, 'from jsb.lib.examples import examples\n'), ((17040, 17091), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-setemail"""', 'handle_setemail', '"""OPER"""'], {}), "('user-setemail', handle_setemail, 'OPER')\n", (17049, 17091), False, 'from jsb.lib.commands import cmnds\n'), ((17092, 17220), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-setemail"""', '"""user-setemail <name> <email>.. set email of user <name>"""', '"""user-setemail dunker <EMAIL>"""'], {}), "('user-setemail',\n 'user-setemail <name> <email>.. set email of user <name>',\n 'user-setemail dunker <EMAIL>')\n", (17104, 17220), False, 'from jsb.lib.examples import examples\n'), ((17611, 17667), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-email"""', 'handle_email', "['USER', 'GUEST']"], {}), "('user-email', handle_email, ['USER', 'GUEST'])\n", (17620, 17667), False, 'from jsb.lib.commands import cmnds\n'), ((17668, 17721), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-email"""', '"""get email"""', '"""user-email"""'], {}), "('user-email', 'get email', 'user-email')\n", (17680, 17721), False, 'from jsb.lib.examples import examples\n'), ((18116, 18167), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-delemail"""', 'handle_delemail', '"""OPER"""'], {}), "('user-delemail', handle_delemail, 'OPER')\n", (18125, 18167), False, 'from jsb.lib.commands import cmnds\n'), ((18168, 18229), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-delemail"""', '"""reset email"""', '"""user-delemail"""'], {}), "('user-delemail', 'reset email', 'user-delemail')\n", (18180, 18229), False, 'from jsb.lib.examples import examples\n'), ((19001, 19065), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-addpermit"""', 'handle_addpermit', "['USER', 'GUEST']"], {}), "('user-addpermit', handle_addpermit, ['USER', 'GUEST'])\n", (19010, 19065), False, 'from jsb.lib.commands import cmnds\n'), ((19066, 19234), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-addpermit"""', '"""user-addpermit <nick> <what> .. permit nick access to <what> .. use setperms to add permissions"""', '"""user-addpermit dunker todo"""'], {}), "('user-addpermit',\n 'user-addpermit <nick> <what> .. permit nick access to <what> .. use setperms to add permissions'\n , 'user-addpermit dunker todo')\n", (19078, 19234), False, 'from jsb.lib.examples import examples\n'), ((19876, 19934), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-permit"""', 'handle_permit', "['USER', 'GUEST']"], {}), "('user-permit', handle_permit, ['USER', 'GUEST'])\n", (19885, 19934), False, 'from jsb.lib.commands import cmnds\n'), ((19935, 20023), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-permit"""', '"""show permit of user giving the command"""', '"""user-permit"""'], {}), "('user-permit', 'show permit of user giving the command',\n 'user-permit')\n", (19947, 20023), False, 'from jsb.lib.examples import examples\n'), ((20812, 20880), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-delpermit"""', 'handle_userdelpermit', "['USER', 'GUEST']"], {}), "('user-delpermit', handle_userdelpermit, ['USER', 'GUEST'])\n", (20821, 20880), False, 'from jsb.lib.commands import cmnds\n'), ((20881, 20979), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-delpermit"""', '"""user-delpermit <name> <permit>"""', '"""user-delpermit dunker todo"""'], {}), "('user-delpermit', 'user-delpermit <name> <permit>',\n 'user-delpermit dunker todo')\n", (20893, 20979), False, 'from jsb.lib.examples import examples\n'), ((21832, 21877), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-check"""', 'handle_check', '"""OPER"""'], {}), "('user-check', handle_check, 'OPER')\n", (21841, 21877), False, 'from jsb.lib.commands import cmnds\n'), ((21878, 21946), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-check"""', '"""user-check <nick>"""', '"""user-check dunker"""'], {}), "('user-check', 'user-check <nick>', 'user-check dunker')\n", (21890, 21946), False, 'from jsb.lib.examples import examples\n'), ((22640, 22683), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-show"""', 'handle_show', '"""OPER"""'], {}), "('user-show', handle_show, 'OPER')\n", (22649, 22683), False, 'from jsb.lib.commands import cmnds\n'), ((22684, 22776), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-show"""', '"""user-show <name> .. show data of <name>"""', '"""user-show dunker"""'], {}), "('user-show', 'user-show <name> .. show data of <name>',\n 'user-show dunker')\n", (22696, 22776), False, 'from jsb.lib.examples import examples\n'), ((23480, 23527), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-match"""', 'handle_match', "['OPER']"], {}), "('user-match', handle_match, ['OPER'])\n", (23489, 23527), False, 'from jsb.lib.commands import cmnds\n'), ((23530, 23605), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-match"""', '"""user-match <userhost>"""', '"""user-match test@test"""'], {}), "('user-match', 'user-match <userhost>', 'user-match test@test')\n", (23542, 23605), False, 'from jsb.lib.examples import examples\n'), ((24030, 24087), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-allstatus"""', 'handle_getuserstatus', '"""OPER"""'], {}), "('user-allstatus', handle_getuserstatus, 'OPER')\n", (24039, 24087), False, 'from jsb.lib.commands import cmnds\n'), ((24088, 24219), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-allstatus"""', '"""user-allstatus <status> .. get all users with <status> status"""', '"""user-allstatus #dunkbots"""'], {}), "('user-allstatus',\n 'user-allstatus <status> .. get all users with <status> status',\n 'user-allstatus #dunkbots')\n", (24100, 24219), False, 'from jsb.lib.examples import examples\n'), ((24635, 24688), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-allperm"""', 'handle_getuserperm', '"""OPER"""'], {}), "('user-allperm', handle_getuserperm, 'OPER')\n", (24644, 24688), False, 'from jsb.lib.commands import cmnds\n'), ((24689, 24804), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-allperm"""', '"""user-allperm <perm> .. get users with <perm> permission"""', '"""user-allperm rss"""'], {}), "('user-allperm',\n 'user-allperm <perm> .. get users with <perm> permission',\n 'user-allperm rss')\n", (24701, 24804), False, 'from jsb.lib.examples import examples\n'), ((25264, 25315), 'jsb.lib.commands.cmnds.add', 'cmnds.add', (['"""user-search"""', 'handle_usersearch', '"""OPER"""'], {}), "('user-search', handle_usersearch, 'OPER')\n", (25273, 25315), False, 'from jsb.lib.commands import cmnds\n'), ((25316, 25393), 'jsb.lib.examples.examples.add', 'examples.add', (['"""user-search"""', '"""search users userhosts"""', '"""user-search gozerbot"""'], {}), "('user-search', 'search users userhosts', 'user-search gozerbot')\n", (25328, 25393), False, 'from jsb.lib.examples import examples\n'), ((985, 1002), 'jsb.utils.generic.getwho', 'getwho', (['bot', 'nick'], {}), '(bot, nick)\n', (991, 1002), False, 'from jsb.utils.generic import getwho\n'), ((1007, 1063), 'logging.warn', 'logging.warn', (["('users - meet - userhost is %s' % userhost)"], {}), "('users - meet - userhost is %s' % userhost)\n", (1019, 1063), False, 'import logging\n'), ((3194, 3211), 'jsb.utils.generic.getwho', 'getwho', (['bot', 'nick'], {}), '(bot, nick)\n', (3200, 3211), False, 'from jsb.utils.generic import getwho\n'), ((4900, 4915), 'jsb.utils.name.stripname', 'stripname', (['name'], {}), '(name)\n', (4909, 4915), False, 'from jsb.utils.name import stripname\n'), ((5632, 5647), 'jsb.utils.name.stripname', 'stripname', (['name'], {}), '(name)\n', (5641, 5647), False, 'from jsb.utils.name import stripname\n'), ((7471, 7488), 'jsb.utils.generic.getwho', 'getwho', (['bot', 'nick'], {}), '(bot, nick)\n', (7477, 7488), False, 'from jsb.utils.generic import getwho\n'), ((21220, 21237), 'jsb.utils.generic.getwho', 'getwho', (['bot', 'nick'], {}), '(bot, nick)\n', (21226, 21237), False, 'from jsb.utils.generic import getwho\n')]
|
# -*- coding: utf-8 -*-
"""
Names database models
A structure for holding a (user-provided) name for an Individual.
--------------------
"""
import uuid
from app.extensions import db, HoustonModel, Timestamp
import logging
import app.extensions.logging as AuditLog
log = logging.getLogger(__name__) # pylint: disable=invalid-name
class NamePreferringUsersJoin(db.Model, HoustonModel):
name_guid = db.Column(
db.GUID, db.ForeignKey('name.guid', ondelete='CASCADE'), primary_key=True
)
user_guid = db.Column(
db.GUID, db.ForeignKey('user.guid', ondelete='CASCADE'), primary_key=True
)
name = db.relationship('Name', back_populates='preferring_user_joins')
user = db.relationship('User')
class Name(db.Model, HoustonModel, Timestamp):
"""
Names database model. For a name (one of possibly many) on an Individual.
"""
def __init__(self, *args, **kwargs):
AuditLog.user_create_object(
log, self, f"for Individual {kwargs.get('individual_guid')}"
)
super().__init__(*args, **kwargs)
guid = db.Column(
db.GUID, default=uuid.uuid4, primary_key=True
) # pylint: disable=invalid-name
value = db.Column(db.String(), index=True, nullable=False)
context = db.Column(db.String(), index=True, nullable=False)
individual_guid = db.Column(
db.GUID, db.ForeignKey('individual.guid'), index=True, nullable=False
)
individual = db.relationship('Individual', back_populates='names')
creator_guid = db.Column(
db.GUID, db.ForeignKey('user.guid'), index=True, nullable=False
)
creator = db.relationship(
'User',
backref=db.backref(
'names_created',
primaryjoin='User.guid == Name.creator_guid',
order_by='Name.guid',
),
)
preferring_user_joins = db.relationship(
'NamePreferringUsersJoin', back_populates='name'
)
# this will ensure individual+context is unique (one context per individual)
__table_args__ = (db.UniqueConstraint(context, individual_guid),)
def __repr__(self):
return (
'<{class_name}('
'guid={self.guid}, '
"context='{self.context}', "
'value={self.value} '
')>'.format(class_name=self.__class__.__name__, self=self)
)
def get_preferring_users(self):
return [join.user for join in self.preferring_user_joins]
def add_preferring_user(self, user):
if user in self.get_preferring_users():
raise ValueError(f'{user} already in list')
pref_join = NamePreferringUsersJoin(name_guid=self.guid, user_guid=user.guid)
with db.session.begin(subtransactions=True):
db.session.add(pref_join)
def add_preferring_users(self, users):
if not users or not isinstance(users, list):
return
for user in set(users): # forces unique
self.add_preferring_user(user)
def remove_preferring_user(self, user):
found = None
for pref_join in self.preferring_user_joins:
if pref_join.user_guid == user.guid:
found = pref_join
if found:
with db.session.begin(subtransactions=True):
db.session.delete(found)
return True
return False
def delete(self):
AuditLog.delete_object(log, self, f'from Individual {self.individual.guid}')
with db.session.begin(subtransactions=True):
for join in self.preferring_user_joins:
db.session.delete(join)
db.session.delete(self)
|
[
"app.extensions.db.String",
"app.extensions.db.ForeignKey",
"app.extensions.db.relationship",
"app.extensions.db.session.add",
"app.extensions.db.backref",
"app.extensions.db.session.begin",
"app.extensions.db.UniqueConstraint",
"app.extensions.db.Column",
"app.extensions.logging.delete_object",
"app.extensions.db.session.delete",
"logging.getLogger"
] |
[((274, 301), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (291, 301), False, 'import logging\n'), ((632, 695), 'app.extensions.db.relationship', 'db.relationship', (['"""Name"""'], {'back_populates': '"""preferring_user_joins"""'}), "('Name', back_populates='preferring_user_joins')\n", (647, 695), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((707, 730), 'app.extensions.db.relationship', 'db.relationship', (['"""User"""'], {}), "('User')\n", (722, 730), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1091, 1147), 'app.extensions.db.Column', 'db.Column', (['db.GUID'], {'default': 'uuid.uuid4', 'primary_key': '(True)'}), '(db.GUID, default=uuid.uuid4, primary_key=True)\n', (1100, 1147), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1459, 1512), 'app.extensions.db.relationship', 'db.relationship', (['"""Individual"""'], {'back_populates': '"""names"""'}), "('Individual', back_populates='names')\n", (1474, 1512), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1864, 1929), 'app.extensions.db.relationship', 'db.relationship', (['"""NamePreferringUsersJoin"""'], {'back_populates': '"""name"""'}), "('NamePreferringUsersJoin', back_populates='name')\n", (1879, 1929), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((435, 481), 'app.extensions.db.ForeignKey', 'db.ForeignKey', (['"""name.guid"""'], {'ondelete': '"""CASCADE"""'}), "('name.guid', ondelete='CASCADE')\n", (448, 481), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((550, 596), 'app.extensions.db.ForeignKey', 'db.ForeignKey', (['"""user.guid"""'], {'ondelete': '"""CASCADE"""'}), "('user.guid', ondelete='CASCADE')\n", (563, 596), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1217, 1228), 'app.extensions.db.String', 'db.String', ([], {}), '()\n', (1226, 1228), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1283, 1294), 'app.extensions.db.String', 'db.String', ([], {}), '()\n', (1292, 1294), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1375, 1407), 'app.extensions.db.ForeignKey', 'db.ForeignKey', (['"""individual.guid"""'], {}), "('individual.guid')\n", (1388, 1407), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((1561, 1587), 'app.extensions.db.ForeignKey', 'db.ForeignKey', (['"""user.guid"""'], {}), "('user.guid')\n", (1574, 1587), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((2048, 2093), 'app.extensions.db.UniqueConstraint', 'db.UniqueConstraint', (['context', 'individual_guid'], {}), '(context, individual_guid)\n', (2067, 2093), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3384, 3460), 'app.extensions.logging.delete_object', 'AuditLog.delete_object', (['log', 'self', 'f"""from Individual {self.individual.guid}"""'], {}), "(log, self, f'from Individual {self.individual.guid}')\n", (3406, 3460), True, 'import app.extensions.logging as AuditLog\n'), ((1685, 1784), 'app.extensions.db.backref', 'db.backref', (['"""names_created"""'], {'primaryjoin': '"""User.guid == Name.creator_guid"""', 'order_by': '"""Name.guid"""'}), "('names_created', primaryjoin='User.guid == Name.creator_guid',\n order_by='Name.guid')\n", (1695, 1784), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((2704, 2742), 'app.extensions.db.session.begin', 'db.session.begin', ([], {'subtransactions': '(True)'}), '(subtransactions=True)\n', (2720, 2742), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((2756, 2781), 'app.extensions.db.session.add', 'db.session.add', (['pref_join'], {}), '(pref_join)\n', (2770, 2781), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3474, 3512), 'app.extensions.db.session.begin', 'db.session.begin', ([], {'subtransactions': '(True)'}), '(subtransactions=True)\n', (3490, 3512), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3618, 3641), 'app.extensions.db.session.delete', 'db.session.delete', (['self'], {}), '(self)\n', (3635, 3641), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3227, 3265), 'app.extensions.db.session.begin', 'db.session.begin', ([], {'subtransactions': '(True)'}), '(subtransactions=True)\n', (3243, 3265), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3283, 3307), 'app.extensions.db.session.delete', 'db.session.delete', (['found'], {}), '(found)\n', (3300, 3307), False, 'from app.extensions import db, HoustonModel, Timestamp\n'), ((3582, 3605), 'app.extensions.db.session.delete', 'db.session.delete', (['join'], {}), '(join)\n', (3599, 3605), False, 'from app.extensions import db, HoustonModel, Timestamp\n')]
|
from __future__ import division
from math import radians as rad, cos, sin
from PIL import Image, ImageDraw
from random import randint
width,height = 1280,1280
def randColour(s=0,e=255):
return (randint(s,e),randint(s,e),randint(s,e))
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def convert(r,theta):
return [r*cos(theta), r*sin(theta)]
def rotate(x,y, angle):
return [x * cos(angle) - y * sin(angle),x*sin(angle) + y*cos(angle)]
class DrawImage(object):
"""docstring for DrawImage"""
width = 1280
height = 1280
def __init__(self):
self.size = (self.width, self.height)
self.mid = (self.width/2, self.height/2)
def create(self, size=(1280,1280), colour=(0,0,0)):
self.image = Image.new("RGB", size, colour)
self.pixels = self.image.load()
self.size = size
self.width = size[0]
self.height = size[1]
self.mid = (self.width/2, self.height/2)
def open(self, name):
self.image = Image.open(name)
self.pixels = self.image.load()
self.size = self.image.size
self.width = self.size[0]
self.height = self.size[1]
self.mid = (self.width/2, self.height/2)
def plot(self, spot, colour=(255,255,255)):
x,y = spot[0], self.height - spot[1]
if x >= self.width or y >= self.height:
return
if x<0 or y < 0:
return
self.pixels[x,y] = colour
def plotRadians(self, function, start=0, end=100, offset=(0,0), steps=1.0, scale=1.0, colour=(255,255,255), rotation=0.0):
for i in drange(start, end, steps):
t = rad(i)
r = function(t) * scale
if type(r) is tuple:
x,y = r
else:
x,y = convert(r,t)
x,y = rotate(x,y,rad(rotation))
self.plot((x+offset[0],y+offset[1]), colour)
def addOver(self, image):
for x in xrange(self.width):
for y in xrange(self.height):
if x < image.width and y < image.height and self.pixels[x,y] != (0,0,0):
image.plot((x,y), self.pixels[x,y])
self.image = image.image
self.pixels = image.pixels
self.size = image.size
self.width = image.width
self.height = image.height
def addUnder(self, image):
w,h = image.size
for x in xrange(image.width):
for y in xrange(image.height):
if x < w and y < h and image.pixels[x,y] != (0,0,0):
self.plot((x,y), image.pixels[x,y])
def show(self):
self.image.show()
def save(self, name, imageType = None):
if type is None:
self.image.save(name)
else:
self.image.save(name, imageType)
|
[
"PIL.Image.new",
"random.randint",
"math.radians",
"math.sin",
"PIL.Image.open",
"math.cos"
] |
[((200, 213), 'random.randint', 'randint', (['s', 'e'], {}), '(s, e)\n', (207, 213), False, 'from random import randint\n'), ((213, 226), 'random.randint', 'randint', (['s', 'e'], {}), '(s, e)\n', (220, 226), False, 'from random import randint\n'), ((226, 239), 'random.randint', 'randint', (['s', 'e'], {}), '(s, e)\n', (233, 239), False, 'from random import randint\n'), ((794, 824), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'size', 'colour'], {}), "('RGB', size, colour)\n", (803, 824), False, 'from PIL import Image, ImageDraw\n'), ((1046, 1062), 'PIL.Image.open', 'Image.open', (['name'], {}), '(name)\n', (1056, 1062), False, 'from PIL import Image, ImageDraw\n'), ((377, 387), 'math.cos', 'cos', (['theta'], {}), '(theta)\n', (380, 387), False, 'from math import radians as rad, cos, sin\n'), ((391, 401), 'math.sin', 'sin', (['theta'], {}), '(theta)\n', (394, 401), False, 'from math import radians as rad, cos, sin\n'), ((1684, 1690), 'math.radians', 'rad', (['i'], {}), '(i)\n', (1687, 1690), True, 'from math import radians as rad, cos, sin\n'), ((444, 454), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (447, 454), False, 'from math import radians as rad, cos, sin\n'), ((461, 471), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (464, 471), False, 'from math import radians as rad, cos, sin\n'), ((474, 484), 'math.sin', 'sin', (['angle'], {}), '(angle)\n', (477, 484), False, 'from math import radians as rad, cos, sin\n'), ((489, 499), 'math.cos', 'cos', (['angle'], {}), '(angle)\n', (492, 499), False, 'from math import radians as rad, cos, sin\n'), ((1867, 1880), 'math.radians', 'rad', (['rotation'], {}), '(rotation)\n', (1870, 1880), True, 'from math import radians as rad, cos, sin\n')]
|
"""
Tests for typehinting.py
"""
# -----------------------------------------------------------------------------
# IMPORTS
# -----------------------------------------------------------------------------
import pytest
from hsr4hci.typehinting import (
BaseLinearModel,
BaseLinearModelCV,
RegressorModel,
)
# -----------------------------------------------------------------------------
# TEST CASES
# -----------------------------------------------------------------------------
def test__regressor_model() -> None:
"""
Test `hsr4hci.typehinting.RegressorModel`.
"""
with pytest.raises(TypeError) as type_error:
RegressorModel() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
def test__base_linear_model() -> None:
"""
Test `hsr4hci.typehinting.BaseLinearModel`.
"""
with pytest.raises(TypeError) as type_error:
BaseLinearModel() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
def test__base_linear_model_cv() -> None:
"""
Test `hsr4hci.typehinting.BaseLinearModelCV`.
"""
with pytest.raises(TypeError) as type_error:
BaseLinearModelCV() # type: ignore
assert 'Protocols cannot be instantiated' in str(type_error)
|
[
"hsr4hci.typehinting.BaseLinearModel",
"pytest.raises",
"hsr4hci.typehinting.RegressorModel",
"hsr4hci.typehinting.BaseLinearModelCV"
] |
[((606, 630), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (619, 630), False, 'import pytest\n'), ((654, 670), 'hsr4hci.typehinting.RegressorModel', 'RegressorModel', ([], {}), '()\n', (668, 670), False, 'from hsr4hci.typehinting import BaseLinearModel, BaseLinearModelCV, RegressorModel\n'), ((867, 891), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (880, 891), False, 'import pytest\n'), ((915, 932), 'hsr4hci.typehinting.BaseLinearModel', 'BaseLinearModel', ([], {}), '()\n', (930, 932), False, 'from hsr4hci.typehinting import BaseLinearModel, BaseLinearModelCV, RegressorModel\n'), ((1134, 1158), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1147, 1158), False, 'import pytest\n'), ((1182, 1201), 'hsr4hci.typehinting.BaseLinearModelCV', 'BaseLinearModelCV', ([], {}), '()\n', (1199, 1201), False, 'from hsr4hci.typehinting import BaseLinearModel, BaseLinearModelCV, RegressorModel\n')]
|
'''
전략 : 하루의 주가를 놓고 보면 오른 경우가 42%, 내린 경우가 46%, 나머지 12%는 변동이 없다. 증명
알고리즘 : PyportfolioOpt 라이브러리 이용한 최적화
(max sharp, risk, return, fund remaining)
'''
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import FinanceDataReader as fdr
import datetime
from pykrx import stock
import requests
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from pypfopt import expected_returns
from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices
# 오늘 KOSPI&KOSDAQ 종목 전체 불러오기
today = datetime.datetime.today().strftime("%Y%m%d")
kospi = stock.get_market_fundamental_by_ticker(today, market='KOSPI').index
kosdaq = stock.get_market_fundamental_by_ticker(today, market='KOSDAQ').index
stocks = kospi.append(kosdaq)
def up_down_zero(code): # 종목과 연도에 맞는 상승/하락/변동 없는 날 수를 리스트 반환
today = datetime.datetime.today().strftime("%Y-%m-%d")
year = today[0:4]
month_day = today[4:]
one_year_ago = str(int(year) - 1) + month_day
data = fdr.DataReader(code, one_year_ago)[['Close']]
data_rtn = data.pct_change()
up = 0
nothing = 0
down = 0
for i, date in enumerate(data.index):
if data_rtn.Close.iloc[i] > 0:
up = up + 1
elif data_rtn.Close.iloc[i] == 0:
nothing = nothing + 1
else:
down = down + 1
total_days = len(data_rtn.index)
return up / total_days, down / total_days, nothing / total_days
def get_up_down_zero_df(stocks): # stocks 리스트를 넣으면, 상승/하락/변동없는 확률 데이터프레임 반환
up_list = []
down_list = []
zero_list = []
for i in stocks:
temp = up_down_zero(i)
up_list.append(temp[0])
down_list.append(temp[1])
zero_list.append(temp[2])
# 데이터 프레임 만들기
up_down_zero_df = pd.DataFrame()
up_down_zero_df['종목 코드'] = stocks # 종목코드
up_down_zero_df['상승 확률'] = up_list # 일간 변동률이 양수인 날의 수
up_down_zero_df['하락 확률'] = down_list # 일간 변동률이 음수인 날의 수
up_down_zero_df['변동 없는 확률'] = zero_list # 일간 변동률이 0인 날의 수
up_down_zero_df['상승 확률 높은 순위'] = up_down_zero_df['상승 확률'].rank(ascending=False)
up_down_zero_df = up_down_zero_df.sort_values(by='상승 확률 높은 순위')
return up_down_zero_df
up_down_zero_df = get_up_down_zero_df(stocks)
symbol_udz = []
for i in idx_list:
symbol_udz.append(up_down_zero_df.loc[i][0])
symbol_udz
# 급등주 종목 저장
assets = np.array(symbol_udz)
start_date = '2018-07-21'
end_date = '2021-07-21'
df = pd.DataFrame()
for stock in assets:
df[stock] = fdr.DataReader(stock, start_date, end_date)['Close']
df_dropna = df.dropna(axis = 1)
mu = expected_returns.mean_historical_return(df_dropna)
S = risk_models.sample_cov(df_dropna)
ef = EfficientFrontier(mu, S, solver="SCS")
weights = ef.max_sharpe()
cleaned_weights = ef.clean_weights()
print(ef.portfolio_performance(verbose=True))
portfolio_val = 15000000
latest_prices = get_latest_prices(df_dropna)
weights = cleaned_weights
da = DiscreteAllocation(weights, latest_prices, total_portfolio_value=portfolio_val)
allocation, leftover = da.lp_portfolio(verbose=False)
#rmse = da._allocation_rmse_error(verbose=False)
print('Discrete Allocaion: ', allocation)
print('Funds Remaining: ', leftover, ' KRW')
discrete_allocation_list = []
for symbol in allocation:
discrete_allocation_list.append(allocation.get(symbol))
portfolio_df = pd.DataFrame(columns = ['company_Ticker', 'Discrete_val_'+str(portfolio_val)])
portfolio_df['company_Ticker'] = allocation
portfolio_df['Discrete_val_'+str(portfolio_val)] = discrete_allocation_list
portfolio_df_sorted = portfolio_df.sort_values('Discrete_val_'+str(portfolio_val), ascending = False)
portfolio_df_sorted = portfolio_df_sorted.reset_index(drop=True)
print('Funds Remaining: ', leftover, ' KRW')
print(ef.portfolio_performance(verbose=True))
print('Allocation has RMSE: {:.3f}'.format(rmse))
|
[
"pandas.DataFrame",
"pypfopt.risk_models.sample_cov",
"pykrx.stock.get_market_fundamental_by_ticker",
"datetime.datetime.today",
"pypfopt.efficient_frontier.EfficientFrontier",
"pypfopt.discrete_allocation.get_latest_prices",
"numpy.array",
"pypfopt.expected_returns.mean_historical_return",
"pypfopt.discrete_allocation.DiscreteAllocation",
"FinanceDataReader.DataReader"
] |
[((2378, 2398), 'numpy.array', 'np.array', (['symbol_udz'], {}), '(symbol_udz)\n', (2386, 2398), True, 'import numpy as np\n'), ((2454, 2468), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2466, 2468), True, 'import pandas as pd\n'), ((2596, 2646), 'pypfopt.expected_returns.mean_historical_return', 'expected_returns.mean_historical_return', (['df_dropna'], {}), '(df_dropna)\n', (2635, 2646), False, 'from pypfopt import expected_returns\n'), ((2651, 2684), 'pypfopt.risk_models.sample_cov', 'risk_models.sample_cov', (['df_dropna'], {}), '(df_dropna)\n', (2673, 2684), False, 'from pypfopt import risk_models\n'), ((2691, 2729), 'pypfopt.efficient_frontier.EfficientFrontier', 'EfficientFrontier', (['mu', 'S'], {'solver': '"""SCS"""'}), "(mu, S, solver='SCS')\n", (2708, 2729), False, 'from pypfopt.efficient_frontier import EfficientFrontier\n'), ((2881, 2909), 'pypfopt.discrete_allocation.get_latest_prices', 'get_latest_prices', (['df_dropna'], {}), '(df_dropna)\n', (2898, 2909), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((2941, 3020), 'pypfopt.discrete_allocation.DiscreteAllocation', 'DiscreteAllocation', (['weights', 'latest_prices'], {'total_portfolio_value': 'portfolio_val'}), '(weights, latest_prices, total_portfolio_value=portfolio_val)\n', (2959, 3020), False, 'from pypfopt.discrete_allocation import DiscreteAllocation, get_latest_prices\n'), ((604, 665), 'pykrx.stock.get_market_fundamental_by_ticker', 'stock.get_market_fundamental_by_ticker', (['today'], {'market': '"""KOSPI"""'}), "(today, market='KOSPI')\n", (642, 665), False, 'from pykrx import stock\n'), ((681, 743), 'pykrx.stock.get_market_fundamental_by_ticker', 'stock.get_market_fundamental_by_ticker', (['today'], {'market': '"""KOSDAQ"""'}), "(today, market='KOSDAQ')\n", (719, 743), False, 'from pykrx import stock\n'), ((1789, 1803), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1801, 1803), True, 'import pandas as pd\n'), ((551, 576), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (574, 576), False, 'import datetime\n'), ((1013, 1047), 'FinanceDataReader.DataReader', 'fdr.DataReader', (['code', 'one_year_ago'], {}), '(code, one_year_ago)\n', (1027, 1047), True, 'import FinanceDataReader as fdr\n'), ((2504, 2547), 'FinanceDataReader.DataReader', 'fdr.DataReader', (['stock', 'start_date', 'end_date'], {}), '(stock, start_date, end_date)\n', (2518, 2547), True, 'import FinanceDataReader as fdr\n'), ((856, 881), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (879, 881), False, 'import datetime\n')]
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_json_generic
short_description: Config Fortinet's FortiOS and FortiGate with json generic method.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify json feature and generic category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.4
version_added: "2.9"
author:
- <NAME> (@frankshen01)
- <NAME> (@fgtdev-hblu)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
enable_log:
description:
- Enable/Disable logging for task.
type: bool
required: false
default: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
json_generic:
description:
- json generic
default: null
type: dict
suboptions:
dictbody:
description:
- Body with YAML list of key/value format
type: dict
jsonbody:
description:
- Body with JSON string format, will always give priority to jsonbody
type: str
method:
description:
- HTTP methods
type: str
choices:
- GET
- PUT
- POST
- DELETE
path:
description:
- URL path, e.g./api/v2/cmdb/firewall/address
type: str
specialparams:
description:
- Extra URL parameters, e.g.start=1&count=10
type: str
'''
EXAMPLES = '''
---
# host
# [fortigates]
# fortigate01 ansible_host=192.168.52.177 ansible_user="admin" ansible_password="<PASSWORD>"
# [fortigates:vars]
# ansible_network_os=fortinet.fortios.fortios
# sample1.yml
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: test add with string
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
jsonbody: |
{
"name": "111",
"type": "geography",
"fqdn": "",
"country": "AL",
"comment": "ccc",
"visibility": "enable",
"associated-interface": "port1",
"allow-routing": "disable"
}
register: info
- name: display vars
debug: msg="{{info}}"
# sample2.yml
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: test delete
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "DELETE"
path: "/api/v2/cmdb/firewall/address/111"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test add with dict
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
dictbody:
name: "111"
type: "geography"
fqdn: ""
country: "AL"
comment: "ccc"
visibility: "enable"
associated-interface: "port1"
allow-routing: "disable"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test delete
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "DELETE"
path: "/api/v2/cmdb/firewall/address/111"
register: info
- name: display vars
debug: msg="{{info}}"
- name: test add with string
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "POST"
path: "/api/v2/cmdb/firewall/address"
jsonbody: |
{
"name": "111",
"type": "geography",
"fqdn": "",
"country": "AL",
"comment": "ccc",
"visibility": "enable",
"associated-interface": "port1",
"allow-routing": "disable"
}
register: info
- name: display vars
debug: msg="{{info}}"
- name: test speical params
fortios_json_generic:
vdom: "{{ vdom }}"
json_generic:
method: "PUT"
path: "/api/v2/cmdb/firewall/policy/1"
specialparams: "action=move&after=2"
register: info
- name: display vars
debug: msg="{{info}}"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
import json
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def json_generic(data, fos):
vdom = data['vdom']
json_generic_data = data['json_generic']
# Give priority to jsonbody
data = ""
if json_generic_data['jsonbody']:
data = json.loads(json_generic_data['jsonbody'])
else:
if json_generic_data['dictbody']:
data = json_generic_data['dictbody']
return fos.jsonraw(json_generic_data['method'],
json_generic_data['path'],
data=data,
specific_params=json_generic_data['specialparams'],
vdom=vdom)
def is_successful_status(resp):
return 'status' in resp and resp['status'] == 'success' \
or 'http_method' in resp and resp['http_method'] == 'DELETE' \
and 'http_status' in resp and resp['http_status'] == 404
def fortios_json(data, fos):
if data['json_generic']:
resp = json_generic(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"enable_log": {"required": False, "type": bool},
"vdom": {"required": False, "type": "str", "default": "root"},
"json_generic": {
"required": False, "type": "dict", "default": None,
"options": {
"dictbody": {"required": False, "type": "dict"},
"jsonbody": {"required": False, "type": "str"},
"method": {"required": True, "type": "str",
"choices": ["GET", "PUT", "POST",
"DELETE"]},
"path": {"required": True, "type": "str"},
"specialparams": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
if 'enable_log' in module.params:
connection.set_option('enable_log', module.params['enable_log'])
else:
connection.set_option('enable_log', False)
fos = FortiOSHandler(connection, module)
is_error, has_changed, result = fortios_json(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Unable to precess the request, please provide correct parameters and make sure the path exists.", meta=result)
if __name__ == '__main__':
main()
|
[
"json.loads",
"ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios.check_legacy_fortiosapi",
"ansible.module_utils.connection.Connection",
"ansible.module_utils.basic.AnsibleModule",
"ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios.FortiOSHandler"
] |
[((9973, 9998), 'ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios.check_legacy_fortiosapi', 'check_legacy_fortiosapi', ([], {}), '()\n', (9996, 9998), False, 'from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi\n'), ((10012, 10074), 'ansible.module_utils.basic.AnsibleModule', 'AnsibleModule', ([], {'argument_spec': 'fields', 'supports_check_mode': '(False)'}), '(argument_spec=fields, supports_check_mode=False)\n', (10025, 10074), False, 'from ansible.module_utils.basic import AnsibleModule\n'), ((8342, 8383), 'json.loads', 'json.loads', (["json_generic_data['jsonbody']"], {}), "(json_generic_data['jsonbody'])\n", (8352, 8383), False, 'import json\n'), ((10152, 10183), 'ansible.module_utils.connection.Connection', 'Connection', (['module._socket_path'], {}), '(module._socket_path)\n', (10162, 10183), False, 'from ansible.module_utils.connection import Connection\n'), ((10511, 10545), 'ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios.FortiOSHandler', 'FortiOSHandler', (['connection', 'module'], {}), '(connection, module)\n', (10525, 10545), False, 'from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler\n')]
|
import time
import logging
import os
import adb
from .. import app_test
from ..dummy_driver import TouchAction
logger = logging.getLogger(__name__)
IMG_TO_MV = 'IMG_1555.jpg'
ANDROID_PIC_DIR = '/sdcard/Pictures'
ANDROID_DL_DIR = '/sdcard/Download'
class App(app_test.AppTest):
def __init__(self, **kwargs):
extra_cap = kwargs.setdefault('extra_cap', {})
extra_cap.setdefault('noReset', False)
super().__init__('com.amaze.filemanager.debug',
'com.amaze.filemanager.activities.MainActivity',
**kwargs)
self.grant_permissions(['WRITE_EXTERNAL_STORAGE'])
self.res_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'res', 'amaze')
def _tap_properties(self, el):
rect = el.rect
x = rect['x'] + 800
y = rect['y'] + rect['height']
TouchAction(self.driver).tap(x=x, y=y).perform()
def actions(self):
for img in os.listdir(self.res_dir):
adb.push(os.path.join(self.res_dir, img), ANDROID_PIC_DIR, True)
adb.shell(['mv', ANDROID_PIC_DIR + '/' + IMG_TO_MV, ANDROID_DL_DIR])
# time.sleep(30) # cov
self.may_start_profiler()
time.sleep(1)
self.find_element_by_name('Download').click()
time.sleep(2)
img_to_mv = self.find_element_by_name(IMG_TO_MV)
self._tap_properties(img_to_mv)
time.sleep(1)
self.find_element_by_name('Cut').click()
time.sleep(1)
self.back()
time.sleep(2)
self.swipe()
self.find_element_by_name('Pictures').click()
time.sleep(2)
self.swipe('down')
time.sleep(2)
self.find_element_by_res_id('paste').click()
time.sleep(2)
img_to_del = self.find_element_by_res_id('firstline')
self._tap_properties(img_to_del)
time.sleep(1)
self.find_element_by_name('Delete').click()
time.sleep(1)
self.find_element_by_res_id('md_buttonDefaultPositive').click()
time.sleep(2)
self.back()
time.sleep(2)
self.may_stop_profiler()
|
[
"os.path.dirname",
"time.sleep",
"adb.shell",
"os.path.join",
"os.listdir",
"logging.getLogger"
] |
[((122, 149), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (139, 149), False, 'import logging\n'), ((959, 983), 'os.listdir', 'os.listdir', (['self.res_dir'], {}), '(self.res_dir)\n', (969, 983), False, 'import os\n'), ((1070, 1138), 'adb.shell', 'adb.shell', (["['mv', ANDROID_PIC_DIR + '/' + IMG_TO_MV, ANDROID_DL_DIR]"], {}), "(['mv', ANDROID_PIC_DIR + '/' + IMG_TO_MV, ANDROID_DL_DIR])\n", (1079, 1138), False, 'import adb\n'), ((1215, 1228), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1225, 1228), False, 'import time\n'), ((1291, 1304), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1301, 1304), False, 'import time\n'), ((1410, 1423), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1420, 1423), False, 'import time\n'), ((1481, 1494), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1491, 1494), False, 'import time\n'), ((1523, 1536), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1533, 1536), False, 'import time\n'), ((1620, 1633), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1630, 1633), False, 'import time\n'), ((1669, 1682), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1679, 1682), False, 'import time\n'), ((1745, 1758), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1755, 1758), False, 'import time\n'), ((1870, 1883), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1880, 1883), False, 'import time\n'), ((1944, 1957), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1954, 1957), False, 'import time\n'), ((2039, 2052), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2049, 2052), False, 'import time\n'), ((2081, 2094), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (2091, 2094), False, 'import time\n'), ((678, 703), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (693, 703), False, 'import os\n'), ((1006, 1037), 'os.path.join', 'os.path.join', (['self.res_dir', 'img'], {}), '(self.res_dir, img)\n', (1018, 1037), False, 'import os\n')]
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.11
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_CameraMindVision', [dirname(__file__)])
except ImportError:
from . import _CameraMindVision
return _CameraMindVision
if fp is not None:
try:
_mod = imp.load_module('_CameraMindVision', fp, pathname, description)
finally:
fp.close()
return _mod
_CameraMindVision = swig_import_helper()
del swig_import_helper
else:
from . import _CameraMindVision
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
class SwigPyIterator(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SwigPyIterator, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SwigPyIterator, name)
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _CameraMindVision.delete_SwigPyIterator
__del__ = lambda self : None;
def value(self): return _CameraMindVision.SwigPyIterator_value(self)
def incr(self, n=1): return _CameraMindVision.SwigPyIterator_incr(self, n)
def decr(self, n=1): return _CameraMindVision.SwigPyIterator_decr(self, n)
def distance(self, *args): return _CameraMindVision.SwigPyIterator_distance(self, *args)
def equal(self, *args): return _CameraMindVision.SwigPyIterator_equal(self, *args)
def copy(self): return _CameraMindVision.SwigPyIterator_copy(self)
def __next__(self): return _CameraMindVision.SwigPyIterator_next(self)
def __next__(self): return _CameraMindVision.SwigPyIterator___next__(self)
def previous(self): return _CameraMindVision.SwigPyIterator_previous(self)
def advance(self, *args): return _CameraMindVision.SwigPyIterator_advance(self, *args)
def __eq__(self, *args): return _CameraMindVision.SwigPyIterator___eq__(self, *args)
def __ne__(self, *args): return _CameraMindVision.SwigPyIterator___ne__(self, *args)
def __iadd__(self, *args): return _CameraMindVision.SwigPyIterator___iadd__(self, *args)
def __isub__(self, *args): return _CameraMindVision.SwigPyIterator___isub__(self, *args)
def __add__(self, *args): return _CameraMindVision.SwigPyIterator___add__(self, *args)
def __sub__(self, *args): return _CameraMindVision.SwigPyIterator___sub__(self, *args)
def __iter__(self): return self
SwigPyIterator_swigregister = _CameraMindVision.SwigPyIterator_swigregister
SwigPyIterator_swigregister(SwigPyIterator)
CAPTURE_RETRY_TIME = _CameraMindVision.CAPTURE_RETRY_TIME
class CameraMindVision(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, CameraMindVision, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, CameraMindVision, name)
__repr__ = _swig_repr
def __init__(self, _mode=0, _single_mode=True, _grabTimeout=5000, _strobe_enable=True, _trigger_delay=0.018,
_packetSize=9000, _interPacketDelay=1000, _intp_method=7, _debug=False,
_is_hardware_trigger=False):
this = _CameraMindVision.new_CameraMindVision(_mode, _single_mode, _grabTimeout, _strobe_enable, _trigger_delay, _packetSize, _interPacketDelay,
_intp_method, _debug, _is_hardware_trigger)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _CameraMindVision.delete_CameraMindVision
__del__ = lambda self : None;
def init_SDK(self): return _CameraMindVision.CameraMindVision_init_SDK(self)
def open(self): return _CameraMindVision.CameraMindVision_open(self)
def capture_one_frame(self): return _CameraMindVision.CameraMindVision_capture_one_frame(self)
def is_connected(self): return _CameraMindVision.CameraMindVision_is_connected(self)
def release_camera(self): return _CameraMindVision.CameraMindVision_release_camera(self)
def reboot_camera(self): return _CameraMindVision.CameraMindVision_reboot_camera(self)
def get_frame_count(self): return _CameraMindVision.CameraMindVision_get_frame_count(self)
def get_error_frame_count(self): return _CameraMindVision.CameraMindVision_get_error_frame_count(self)
def get_frame_rate(self): return _CameraMindVision.CameraMindVision_get_frame_rate(self)
def set_wb(self, *args): return _CameraMindVision.CameraMindVision_set_wb(self, *args)
def set_wb_red(self, *args): return _CameraMindVision.CameraMindVision_set_wb_red(self, *args)
def set_wb_green(self, *args): return _CameraMindVision.CameraMindVision_set_wb_green(self, *args)
def set_wb_blue(self, *args): return _CameraMindVision.CameraMindVision_set_wb_blue(self, *args)
def set_shutter(self, *args): return _CameraMindVision.CameraMindVision_set_shutter(self, *args)
def get_shutter(self): return _CameraMindVision.CameraMindVision_get_shutter(self)
def get_white_balance_red(self): return _CameraMindVision.CameraMindVision_get_white_balance_red(self)
def get_white_balance_green(self): return _CameraMindVision.CameraMindVision_get_white_balance_green(self)
def get_white_balance_blue(self): return _CameraMindVision.CameraMindVision_get_white_balance_blue(self)
def get_firmware_version(self): return _CameraMindVision.CameraMindVision_get_firmware_version(self)
def get_camera_id(self): return _CameraMindVision.CameraMindVision_get_camera_id(self)
def get_camera_temperature(self): return _CameraMindVision.CameraMindVision_get_camera_temperature(self)
def get_frame_w_h(self, *args): return _CameraMindVision.CameraMindVision_get_frame_w_h(self, *args)
def save_parmeter(self): return _CameraMindVision.CameraMindVision_save_parmeter(self)
def load_parmeter(self): return _CameraMindVision.CameraMindVision_load_parmeter(self)
def get_image_in_numpy(self): return _CameraMindVision.CameraMindVision_get_image_in_numpy(self)
CameraMindVision_swigregister = _CameraMindVision.CameraMindVision_swigregister
CameraMindVision_swigregister(CameraMindVision)
# This file is compatible with both classic and new-style classes.
|
[
"os.path.dirname",
"imp.load_module"
] |
[((655, 718), 'imp.load_module', 'imp.load_module', (['"""_CameraMindVision"""', 'fp', 'pathname', 'description'], {}), "('_CameraMindVision', fp, pathname, description)\n", (670, 718), False, 'import imp\n'), ((459, 476), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from os.path import dirname\n')]
|
#!/usr/bin/env python3
import libvirt
##
## vstb_launch.py
## EU INPUT
##
## Created by <NAME> on 28/10/2017
## Copyright (c) 2017 <NAME>. All rights reserved.
##
class VSTB(object):
'''
This class define, start, and then destroy and undefine the vSTB VMs
'''
def __init__(self, base_path, domains):
self.base_path = base_path
self.conn = libvirt.open("qemu:///system")
self.domains = domains
def define_domans(self):
'''
This methods load the proper xml file for each domain and then define the domain
'''
for d in self.domains:
path = str("%s/%s/%s.xml" % (self.base_path, d, d))
vm_xml = self.read_file(path)
self.conn.defineXML(vm_xml)
def launch_domains(self):
'''
This method start each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.create()
def stop_domains(self):
'''
This method stop each domain (stop means that the vm is destroyed)
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.destroy()
def undefine_domains(self):
'''
This method undefine each domain
'''
for d in self.domains:
dom = self.conn.lookupByName(d)
dom.undefine()
def read_file(self, file_path):
'''
This method read a file from the filesystem
'''
data = ""
with open(file_path, 'r') as data_file:
data = data_file.read()
return data
if __name__ == '__main__':
print("########################################")
print("###### vSTB VM Launcher ######")
print("########################################")
images_path = "/home/ubuntu/Scrivania/images"
components = ['es','ea','cp','pa','dms','dmc','vdi']
vstb = VSTB(images_path, components)
print(">>>> Defining Domains... <<<<")
vstb.define_domans()
print(">>>> [ DONE ] Defining Domains <<<<")
print(">>>> Starting Domains... <<<<")
vstb.launch_domains()
print(">>>> [ DONE ] Starting Domains <<<<")
print("########################################")
print("##### vSTB Running #####")
print("########################################")
input("<<<< Press enter to stop the vSTB >>>>")
print(">>>> Stopping Domains... <<<<")
vstb.stop_domains()
print(">>>> [ DONE ] Stopping Domains <<<<")
print(">>>> Undefining Domains... <<<<")
vstb.undefine_domains()
print(">>>> [ DONE ] Undefining Domains <<<<")
print("########################################")
print("##### vSTB Stopped #####")
print("########################################")
print(">>>> Bye <<<<")
|
[
"libvirt.open"
] |
[((379, 409), 'libvirt.open', 'libvirt.open', (['"""qemu:///system"""'], {}), "('qemu:///system')\n", (391, 409), False, 'import libvirt\n')]
|
# pylint: disable=invalid-name,protected-access
from copy import deepcopy
from unittest import TestCase
import codecs
import gzip
import logging
import os
import shutil
from keras import backend as K
import numpy
from numpy.testing import assert_allclose
from deep_qa.common.checks import log_keras_version_info
from deep_qa.data.instances.instance import TextInstance
from deep_qa.data.tokenizers import tokenizers
from deep_qa.common.params import Params
class DeepQaTestCase(TestCase): # pylint: disable=too-many-public-methods
TEST_DIR = './TMP_TEST/'
TRAIN_FILE = TEST_DIR + 'train_file'
VALIDATION_FILE = TEST_DIR + 'validation_file'
TEST_FILE = TEST_DIR + 'test_file'
TRAIN_BACKGROUND = TEST_DIR + 'train_background'
VALIDATION_BACKGROUND = TEST_DIR + 'validation_background'
SNLI_FILE = TEST_DIR + 'snli_file'
PRETRAINED_VECTORS_FILE = TEST_DIR + 'pretrained_glove_vectors_file'
PRETRAINED_VECTORS_GZIP = TEST_DIR + 'pretrained_glove_vectors_file.gz'
def setUp(self):
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
level=logging.DEBUG)
log_keras_version_info()
os.makedirs(self.TEST_DIR, exist_ok=True)
def tearDown(self):
shutil.rmtree(self.TEST_DIR)
TextInstance.tokenizer = tokenizers["words"](Params({}))
K.clear_session()
def get_model_params(self, additional_arguments=None):
params = Params({})
params['save_models'] = False
params['model_serialization_prefix'] = self.TEST_DIR
params['train_files'] = [self.TRAIN_FILE]
params['validation_files'] = [self.VALIDATION_FILE]
params['embeddings'] = {'words': {'dimension': 6}, 'characters': {'dimension': 2}}
params['encoder'] = {"default": {'type': 'bow'}}
params['num_epochs'] = 1
params['validation_split'] = 0.0
if additional_arguments:
for key, value in additional_arguments.items():
params[key] = deepcopy(value)
return params
def get_model(self, model_class, additional_arguments=None):
params = self.get_model_params(additional_arguments)
return model_class(params)
def ensure_model_trains_and_loads(self, model_class, args: Params):
args['save_models'] = True
# Our loading tests work better if you're not using data generators. Unless you
# specifically request it in your test, we'll avoid using them here, and if you _do_ use
# them, we'll skip some of the stuff below that isn't compatible.
args.setdefault('data_generator', None)
model = self.get_model(model_class, args)
model.train()
# load the model that we serialized
loaded_model = self.get_model(model_class, args)
loaded_model.load_model()
# verify that original model and the loaded model predict the same outputs
if model._uses_data_generators():
# We shuffle the data in the data generator. Instead of making that logic more
# complicated, we'll just pass on the loading tests here. See comment above.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(model.validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
# We should get the same result if we index the data from the original model and the loaded
# model.
_, indexed_validation_arrays = loaded_model.load_data_arrays(model.validation_files)
if model._uses_data_generators():
# As above, we'll just pass on this.
pass
else:
model_predictions = model.model.predict(model.validation_arrays[0])
loaded_model_predictions = loaded_model.model.predict(indexed_validation_arrays[0])
for model_prediction, loaded_prediction in zip(model_predictions, loaded_model_predictions):
assert_allclose(model_prediction, loaded_prediction)
return model, loaded_model
@staticmethod
def one_hot(index, length):
vector = numpy.zeros(length)
vector[index] = 1
return vector
def write_snli_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ttext 1\thypothesis1\tentails\n')
train_file.write('2\ttext 2\thypothesis2\tcontradicts\n')
train_file.write('3\ttext3\thypothesis3\tentails\n')
train_file.write('4\ttext 4\thypothesis4\tneutral\n')
train_file.write('5\ttext5\thypothesis 5\tentails\n')
train_file.write('6\ttext6\thypothesis6\tcontradicts\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\ttext 1 with extra words\thypothesis1\tentails\n')
validation_file.write('2\ttext 2\tlonger hypothesis 2\tcontradicts\n')
validation_file.write('3\ttext3\thypothesis withreallylongfakeword\tentails\n')
def write_sequence_tagging_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('cats###N\tare###V\tanimals###N\t.###N\n')
train_file.write('dogs###N\tare###V\tanimals###N\t.###N\n')
train_file.write('snakes###N\tare###V\tanimals###N\t.###N\n')
train_file.write('birds###N\tare###V\tanimals###N\t.###N\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('horses###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('blue###N\tcows###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('monkeys###N\tare###V\tanimals###N\t.###N\n')
validation_file.write('caterpillars###N\tare###V\tanimals###N\t.###N\n')
def write_verb_semantics_files(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
train_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
train_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('root####absorb####water\t1,1\t2,2\tMOVE\t-1,-1\t0,0\n')
validation_file.write('this####mixture####is####converted####into####sugar####inside####leaf'
'\t2,3\t5,5\tCREATE\t7,7\t-1,-1\n')
validation_file.write('lakes####contain####water\t1,1\t2,2\tNONE\t-1,-1\t-1,-1\n')
def write_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq1a1\t0\n')
validation_file.write('2\tq1a2\t1\n')
validation_file.write('3\tq1a3\t0\n')
validation_file.write('4\tq1a4\t0\n')
validation_file.write('5\tq2a1\t0\n')
validation_file.write('6\tq2a2\t0\n')
validation_file.write('7\tq2a3\t1\n')
validation_file.write('8\tq2a4\t0\n')
validation_file.write('9\tq3a1\t0\n')
validation_file.write('10\tq3a2\t0\n')
validation_file.write('11\tq3a3\t0\n')
validation_file.write('12\tq3a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence1\t0\n')
train_file.write('2\tsentence2 word2 word3\t1\n')
train_file.write('3\tsentence3 word2\t0\n')
train_file.write('4\tsentence4\t1\n')
train_file.write('5\tsentence5\t0\n')
train_file.write('6\tsentence6\t0\n')
with codecs.open(self.TEST_FILE, 'w', 'utf-8') as test_file:
test_file.write('1\ttestsentence1\t0\n')
test_file.write('2\ttestsentence2 word2 word3\t1\n')
test_file.write('3\ttestsentence3 word2\t0\n')
test_file.write('4\ttestsentence4\t1\n')
test_file.write('5\ttestsentence5 word4\t0\n')
test_file.write('6\ttestsentence6\t0\n')
def write_additional_true_false_model_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tq4a1\t0\n')
validation_file.write('2\tq4a2\t1\n')
validation_file.write('3\tq4a3\t0\n')
validation_file.write('4\tq4a4\t0\n')
validation_file.write('5\tq5a1\t0\n')
validation_file.write('6\tq5a2\t0\n')
validation_file.write('7\tq5a3\t1\n')
validation_file.write('8\tq5a4\t0\n')
validation_file.write('9\tq6a1\t0\n')
validation_file.write('10\tq6a2\t0\n')
validation_file.write('11\tq6a3\t0\n')
validation_file.write('12\tq6a4\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tsentence7\t0\n')
train_file.write('2\tsentence8 word4 word5\t1\n')
train_file.write('3\tsentence9 word4\t0\n')
train_file.write('4\tsentence10\t1\n')
train_file.write('5\tsentence11 word3 word2\t0\n')
train_file.write('6\tsentence12\t0\n')
def write_question_answer_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion1\tanswer1###answer2\t0\n')
with codecs.open(self.VALIDATION_BACKGROUND, 'w', 'utf-8') as validation_background:
validation_background.write('1\tvb1\tvb2\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\ta b e i d\tanswer 1###answer2\t0\n')
train_file.write('2\ta b c d\tanswer3###answer4\t1\n')
train_file.write('3\te d w f d s a b\tanswer5###answer6###answer9\t2\n')
train_file.write('4\te fj k w q\tanswer7###answer8\t0\n')
with codecs.open(self.TRAIN_BACKGROUND, 'w', 'utf-8') as train_background:
train_background.write('1\tsb1\tsb2\n')
train_background.write('2\tsb3\n')
train_background.write('3\tsb4\n')
train_background.write('4\tsb5\tsb6\n')
def write_who_did_what_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tHe went to the store to buy goods, because he wanted to.'
'\tHe bought xxxxx\tgoods###store\t0\n')
validation_file.write('1\tShe hiking on the weekend with her friend.'
'\tShe went xxxxx\thiking###friend###weekend###her friend\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tFred hit the ball with the bat.\tHe hit the ball with the xxxxx\tbat###ball\t0\n')
train_file.write('1\tShe walked the dog today.\tThe xxxxx was walked today.\tShe###dog###today\t1\n')
train_file.write('1\tHe kept typing at his desk.\tHe typed at his xxxxx\tdesk###kept\t0\n')
train_file.write('1\tThe pup at the bone but not the biscuit.\tThe pup ate the xxxxx\t'
'bone###biscuit\t0\n')
def write_tuple_inference_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tss<>v f d<>oo o<>c$$$s<>v ff<>o i###ss r<>v<>o e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t0\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# document, question, answers
train_file.write('1\tss<>v<>oo o<>c$$$s e<>ff<>o ii i###ss r<>rr<>o e<>o ee\t'
'ss<>ve gg<>o sd<>ccs\t0\n')
train_file.write('2\tsg g<>vg<>oo o<>c$$$s e<>v ff<>o ii i###ss<>v rr<>o e<>o ee'
'###hh kk<>hdj d<>hh\tss ss<>ve gg<>o sd<>ccs\t2\n')
train_file.write('3\ts r<>v f d<>o ss<>c$$$s e<>v ff<>o ss i$$$r<>v ss<>s o e<>o ee\t'
'ss ss<>v g<>o sd<>ccs\t0\n')
train_file.write('4\tty y<>cf fv ss<>s ss<>c$$$rt e<>vv f<>oss i i###ss<>v<>os e<>o ee\t'
'ss ss<>ve gg<>o sd<>ccs\t1\n')
def write_span_prediction_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tquestion 1 with extra words\t'
'passage with answer and a reallylongword\t13,18\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tquestion 1\tpassage1 with answer1\t14,20\n')
train_file.write('2\tquestion 2\tpassage2 with answer2\t0,8\n')
train_file.write('3\tquestion 3\tpassage3 with answer3\t9,13\n')
train_file.write('4\tquestion 4\tpassage4 with answer4\t14,20\n')
def write_sentence_selection_files(self):
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
validation_file.write('1\tWhere is Paris?\tParis is the capital of France.###It '
'is by the Seine.###It is quite old###this is a '
'very long sentence meant to test that loading '
'and padding works properly in the model.\t1\n')
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write('1\tWho won Super Bowl 50?\tSuper Bowl 50 was in Santa '
'Clara.###The Patriots beat the Broncos.\t1\n')
train_file.write('2\tWhen is Thanksgiving?\tFolk tales tell '
'of the Pilgrims celebrating the holiday.###Many '
'people eat a lot.###It is in November.\t2\n')
train_file.write('3\tWhen were computers invented?\tThe ancient Chinese used '
'abacuses.###Alan Turing cracked Enigma.###It is hard to '
'pinpoint an inventor of the computer.\t2\n')
def write_pretrained_vector_files(self):
# write the file
with codecs.open(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8') as vector_file:
vector_file.write('word2 0.21 0.57 0.51 0.31\n')
vector_file.write('sentence1 0.81 0.48 0.19 0.47\n')
# compress the file
with open(self.PRETRAINED_VECTORS_FILE, 'rb') as f_in:
with gzip.open(self.PRETRAINED_VECTORS_GZIP, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def write_sentence_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
train_file.write("This is a sentence for language modelling.\n")
train_file.write("Here's another one for language modelling.\n")
def write_original_snli_data(self):
with codecs.open(self.TRAIN_FILE, 'w', 'utf-8') as train_file:
# pylint: disable=line-too-long
train_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
train_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
with codecs.open(self.VALIDATION_FILE, 'w', 'utf-8') as validation_file:
# pylint: disable=line-too-long
validation_file.write("""{"annotator_labels": ["neutral"],"captionID": "3416050480.jpg#4", "gold_label": "neutral", "pairID": "3416050480.jpg#4r1n", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is training his horse for a competition.", "sentence2_binary_parse": "( ( A person ) ( ( is ( ( training ( his horse ) ) ( for ( a competition ) ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (VP (VBG training) (NP (PRP$ his) (NN horse)) (PP (IN for) (NP (DT a) (NN competition))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["contradiction"], "captionID": "3416050480.jpg#4", "gold_label": "contradiction", "pairID": "3416050480.jpg#4r1c", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is at a diner, ordering an omelette.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is ( at ( a diner ) ) ) , ) ( ordering ( an omelette ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (PP (IN at) (NP (DT a) (NN diner))) (, ,) (S (VP (VBG ordering) (NP (DT an) (NN omelette))))) (. .)))"}\n""")
validation_file.write("""{"annotator_labels": ["entailment"], "captionID": "3416050480.jpg#4", "gold_label": "entailment", "pairID": "3416050480.jpg#4r1e", "sentence1": "A person on a horse jumps over a broken down airplane.", "sentence1_binary_parse": "( ( ( A person ) ( on ( a horse ) ) ) ( ( jumps ( over ( a ( broken ( down airplane ) ) ) ) ) . ) )", "sentence1_parse": "(ROOT (S (NP (NP (DT A) (NN person)) (PP (IN on) (NP (DT a) (NN horse)))) (VP (VBZ jumps) (PP (IN over) (NP (DT a) (JJ broken) (JJ down) (NN airplane)))) (. .)))", "sentence2": "A person is outdoors, on a horse.", "sentence2_binary_parse": "( ( A person ) ( ( ( ( is outdoors ) , ) ( on ( a horse ) ) ) . ) )", "sentence2_parse": "(ROOT (S (NP (DT A) (NN person)) (VP (VBZ is) (ADVP (RB outdoors)) (, ,) (PP (IN on) (NP (DT a) (NN horse)))) (. .)))"}\n""")
# pylint: enable=line-too-long
|
[
"copy.deepcopy",
"gzip.open",
"os.makedirs",
"logging.basicConfig",
"codecs.open",
"numpy.testing.assert_allclose",
"numpy.zeros",
"deep_qa.common.params.Params",
"deep_qa.common.checks.log_keras_version_info",
"shutil.rmtree",
"shutil.copyfileobj",
"keras.backend.clear_session"
] |
[((1030, 1143), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG\n )\n", (1049, 1143), False, 'import logging\n'), ((1170, 1194), 'deep_qa.common.checks.log_keras_version_info', 'log_keras_version_info', ([], {}), '()\n', (1192, 1194), False, 'from deep_qa.common.checks import log_keras_version_info\n'), ((1203, 1244), 'os.makedirs', 'os.makedirs', (['self.TEST_DIR'], {'exist_ok': '(True)'}), '(self.TEST_DIR, exist_ok=True)\n', (1214, 1244), False, 'import os\n'), ((1278, 1306), 'shutil.rmtree', 'shutil.rmtree', (['self.TEST_DIR'], {}), '(self.TEST_DIR)\n', (1291, 1306), False, 'import shutil\n'), ((1380, 1397), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (1395, 1397), True, 'from keras import backend as K\n'), ((1475, 1485), 'deep_qa.common.params.Params', 'Params', (['{}'], {}), '({})\n', (1481, 1485), False, 'from deep_qa.common.params import Params\n'), ((4340, 4359), 'numpy.zeros', 'numpy.zeros', (['length'], {}), '(length)\n', (4351, 4359), False, 'import numpy\n'), ((1360, 1370), 'deep_qa.common.params.Params', 'Params', (['{}'], {}), '({})\n', (1366, 1370), False, 'from deep_qa.common.params import Params\n'), ((4454, 4496), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (4465, 4496), False, 'import codecs\n'), ((4927, 4974), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (4938, 4974), False, 'import codecs\n'), ((5316, 5358), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (5327, 5358), False, 'import codecs\n'), ((5678, 5725), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (5689, 5725), False, 'import codecs\n'), ((6133, 6175), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (6144, 6175), False, 'import codecs\n'), ((6546, 6593), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (6557, 6593), False, 'import codecs\n'), ((7034, 7081), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (7045, 7081), False, 'import codecs\n'), ((7718, 7760), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (7729, 7760), False, 'import codecs\n'), ((8107, 8148), 'codecs.open', 'codecs.open', (['self.TEST_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TEST_FILE, 'w', 'utf-8')\n", (8118, 8148), False, 'import codecs\n'), ((8574, 8621), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (8585, 8621), False, 'import codecs\n'), ((9258, 9300), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (9269, 9300), False, 'import codecs\n'), ((9706, 9753), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (9717, 9753), False, 'import codecs\n'), ((9861, 9914), 'codecs.open', 'codecs.open', (['self.VALIDATION_BACKGROUND', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_BACKGROUND, 'w', 'utf-8')\n", (9872, 9914), False, 'import codecs\n'), ((10011, 10053), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (10022, 10053), False, 'import codecs\n'), ((10374, 10422), 'codecs.open', 'codecs.open', (['self.TRAIN_BACKGROUND', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_BACKGROUND, 'w', 'utf-8')\n", (10385, 10422), False, 'import codecs\n'), ((10696, 10743), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (10707, 10743), False, 'import codecs\n'), ((11129, 11171), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (11140, 11171), False, 'import codecs\n'), ((11773, 11820), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (11784, 11820), False, 'import codecs\n'), ((12016, 12058), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (12027, 12058), False, 'import codecs\n'), ((12819, 12866), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (12830, 12866), False, 'import codecs\n'), ((13056, 13098), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (13067, 13098), False, 'import codecs\n'), ((13483, 13530), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (13494, 13530), False, 'import codecs\n'), ((13908, 13950), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (13919, 13950), False, 'import codecs\n'), ((14697, 14752), 'codecs.open', 'codecs.open', (['self.PRETRAINED_VECTORS_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.PRETRAINED_VECTORS_FILE, 'w', 'utf-8')\n", (14708, 14752), False, 'import codecs\n'), ((15156, 15198), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (15167, 15198), False, 'import codecs\n'), ((15422, 15464), 'codecs.open', 'codecs.open', (['self.TRAIN_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.TRAIN_FILE, 'w', 'utf-8')\n", (15433, 15464), False, 'import codecs\n'), ((18223, 18270), 'codecs.open', 'codecs.open', (['self.VALIDATION_FILE', '"""w"""', '"""utf-8"""'], {}), "(self.VALIDATION_FILE, 'w', 'utf-8')\n", (18234, 18270), False, 'import codecs\n'), ((2041, 2056), 'copy.deepcopy', 'deepcopy', (['value'], {}), '(value)\n', (2049, 2056), False, 'from copy import deepcopy\n'), ((3500, 3552), 'numpy.testing.assert_allclose', 'assert_allclose', (['model_prediction', 'loaded_prediction'], {}), '(model_prediction, loaded_prediction)\n', (3515, 3552), False, 'from numpy.testing import assert_allclose\n'), ((4184, 4236), 'numpy.testing.assert_allclose', 'assert_allclose', (['model_prediction', 'loaded_prediction'], {}), '(model_prediction, loaded_prediction)\n', (4199, 4236), False, 'from numpy.testing import assert_allclose\n'), ((15003, 15048), 'gzip.open', 'gzip.open', (['self.PRETRAINED_VECTORS_GZIP', '"""wb"""'], {}), "(self.PRETRAINED_VECTORS_GZIP, 'wb')\n", (15012, 15048), False, 'import gzip\n'), ((15075, 15106), 'shutil.copyfileobj', 'shutil.copyfileobj', (['f_in', 'f_out'], {}), '(f_in, f_out)\n', (15093, 15106), False, 'import shutil\n')]
|
# Copyright (c) 2015 <NAME>
# See the file LICENSE for copying permission.
# This object is intended to run from script bassist.py
import logging
import logging.config
import argparse
import ConfigParser
from ..parser import host as parser_host
from ..flavor import directory as flavor_directory
class Script(object):
'''This module contains things that are available for use of our packages'
scripts. This includes setting up argparse, logger, etc.'''
def set_logging(self):
'''If we're running from the root of the project, this stuff will work.
If not, load logging_config.'''
try:
logging.config.fileConfig('log.conf', disable_existing_loggers=False)
except ConfigParser.NoSectionError:
# probably no log.conf file
logging.basicConfig(
format='%(message)s',
)
self.logger = logging.getLogger(__name__)
try:
from log_override import LOG_OVERRIDES
logging.config.dictConfig(LOG_OVERRIDES)
except:
self.logger.debug('unable to load log_override; ignoring')
def set_arg_parser(self):
self.arg_parser = argparse.ArgumentParser( description=self.description )
self.required_args = self.arg_parser.add_argument_group('required arguments')
self.required_args.add_argument(
'-f', '--flavor-db',
required=True,
help='The path to the directory containing the flavor ZODB files')
self.required_args.add_argument(
'-n', '--flavor-name',
required=True,
help=self.flavor_arg_description)
self.required_args.add_argument(
'-s', '--scanner-directory',
required=True,
help='The path to the directory containing scanner results')
def read_flavors(self):
self.logger.debug('reading flavors')
self.flavors = flavor_directory.Directory(self.args.flavor_db).db
self.requested_flavor = self.flavors.get_obj_from_name(self.args.flavor_name)
self.logger.debug('retrieved requested flavor %s', self.requested_flavor)
def parse(self):
self.logger.debug('importing parsers')
self.parsed_host = parser_host.Host(self.args.scanner_directory)
self.logger.debug('finished importing parsers')
for parser in self.parsed_host.parsers:
self.logger.debug('parser log: %s', parser.log)
self.logger.debug('parsing: %s', parser.path)
parser.parse()
def finish(self):
self.flavors.close()
|
[
"argparse.ArgumentParser",
"logging.basicConfig",
"logging.config.dictConfig",
"logging.config.fileConfig",
"logging.getLogger"
] |
[((911, 938), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (928, 938), False, 'import logging\n'), ((1201, 1254), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'self.description'}), '(description=self.description)\n', (1224, 1254), False, 'import argparse\n'), ((637, 706), 'logging.config.fileConfig', 'logging.config.fileConfig', (['"""log.conf"""'], {'disable_existing_loggers': '(False)'}), "('log.conf', disable_existing_loggers=False)\n", (662, 706), False, 'import logging\n'), ((1016, 1056), 'logging.config.dictConfig', 'logging.config.dictConfig', (['LOG_OVERRIDES'], {}), '(LOG_OVERRIDES)\n', (1041, 1056), False, 'import logging\n'), ((803, 844), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(message)s"""'}), "(format='%(message)s')\n", (822, 844), False, 'import logging\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.