filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_17153
|
import argparse
import numpy as np
import healpy
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--weight-paths", nargs="+", required=True)
parser.add_argument("--jk-def", required=True)
parser.add_argument("--output", required=True)
args = parser.parse_args()
jk_def = np.load(args.jk_def)
print("Loading ", args.weight_paths[0])
m = healpy.read_map(args.weight_paths[0], dtype=np.float64)
for p in args.weight_paths[1:]:
print("Loading ", p)
m *= healpy.read_map(p, dtype=np.float64)
jk_resolutions = [k for k in jk_def if k != "nside"]
jk_weights = {}
for jk_res in jk_resolutions:
jk_weights[jk_res] = np.zeros(jk_def[jk_res].shape[0], dtype=np.float64)
for i, idx in enumerate(jk_def[jk_res]):
w = m[idx]
w = w[w != healpy.UNSEEN]
jk_weights[jk_res][i] = np.sum(w)
np.savez(args.output, **jk_weights)
|
the-stack_106_17154
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Filename: mb_pendulum.py
# @Date: 2019-06-16-18-38
# @Author: Hany Abdulsamad
# @Contact: [email protected]
import gym
from trajopt.gps import MBGPS
# pendulum env
env = gym.make('Pendulum-TO-v0')
env._max_episode_steps = 150
alg = MBGPS(env, nb_steps=150,
kl_bound=1.,
init_ctl_sigma=4.,
activation=range(150))
# run gps
trace = alg.run(nb_iter=50)
# plot dists
alg.plot()
# plot objective
import matplotlib.pyplot as plt
plt.figure()
plt.plot(trace)
plt.show()
|
the-stack_106_17156
|
# -*- coding: utf-8 -*-
def clear_not_using_groups():
import sqlite3
bot_db = sqlite3.connect('Bot_db')
cursor = bot_db.cursor()
cursor.execute("SELECT g_tt.id FROM groups_tt as g_tt LEFT OUTER JOIN users AS u ON g_tt.url = u.url WHERE u.id ISNULL")
ids = cursor.fetchall()
for id in ids:
cursor.execute("DELETE FROM groups_tt WHERE id = ?", (id[0],))
bot_db.commit()
print('deleted: ' + str(id[0]))
cursor.close()
bot_db.close()
def update_db():
print('-----------------------------------')
import requests
import sqlite3
from time import time
cookies = {'_gat': '1', '_ga': 'GA1.2.76615387.1488377623', '_culture': 'ru'}
tic = time()
bot_db = sqlite3.connect('Bot_db')
cursor = bot_db.cursor()
cursor.execute("SELECT id, url FROM groups_tt")
urls = cursor.fetchall()
cursor.execute("SELECT g_tt.id FROM groups_tt as g_tt LEFT OUTER JOIN users AS u ON g_tt.url = u.url WHERE u.id ISNULL")
ids = [id[0] for id in cursor.fetchall()]
print('count of groups: ' + str(len(urls)))
count = 0
for url in urls:
if url[0] in ids:
count += 1
continue
soup = requests.get(url[1], cookies=cookies).text
cursor.execute("UPDATE groups_tt SET soup = ? WHERE url = ?", (soup, url[1]))
bot_db.commit()
cursor.close()
bot_db.close()
print('count of skipped: ' + str(count))
print('success')
print('update time: ' + str(time() - tic))
print('-----------------------------------\n')
def update_ia():
import requests
import sqlite3
from bs4 import BeautifulSoup
cookies = {'_gat': '1', '_ga': 'GA1.2.76615387.1488377623', '_culture': 'ru'}
bot_db = sqlite3.connect('Bot_db')
cursor = bot_db.cursor()
cursor.execute("SELECT id, soup FROM groups_tt")
soups = cursor.fetchall()
cursor.execute("SELECT g_tt.id FROM groups_tt as g_tt LEFT OUTER JOIN users AS u ON g_tt.url = u.url WHERE u.id ISNULL")
ids = [id[0] for id in cursor.fetchall()]
for soup in soups:
if soup[0] in ids:
continue
url_soup = BeautifulSoup(soup[1], "lxml")
att_url = url_soup.find('a', text=u'пром. аттестация').get('href')
ia_soup = requests.get('http://timetable.spbu.ru' + att_url, cookies=cookies).text
cursor.execute("UPDATE groups_tt SET interim_attestation = ? WHERE id = ?", (ia_soup, soup[0]))
bot_db.commit()
cursor.close()
bot_db.close()
def clear_db():
import sqlite3
bot_db = sqlite3.connect('Bot_db')
cursor = bot_db.cursor()
cursor.execute("DELETE FROM users")
bot_db.commit()
cursor.execute("DELETE FROM skips")
bot_db.commit()
cursor.execute("DELETE FROM lessons")
bot_db.commit()
cursor.execute("DELETE FROM groups_tt")
bot_db.commit()
cursor.close()
bot_db.close()
if __name__ == '__main__':
import time
if time.localtime().tm_hour == 18:
print('-----------------------------------')
import os
os.system('python3.5 sender.py')
elif time.localtime().tm_hour == 17:
time.sleep(2400)
update_db()
if time.localtime().tm_hour == 0:
update_ia()
|
the-stack_106_17157
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.4.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### COVID-19 Global Stats
# This notebook lets you examine and plot the data shared throught the JHU CSSE git repo
#
# git clone https://github.com/CSSEGISandData/COVID-19.git
#
# The git repo provides country-by-country (and province by province, for some countries) daily stats for confirmed COVID-19 cases, and deaths.
#
# This notebook pulls just the country-level data out of the repo. An accompanying notebook does the same for the US state-by-state stats.
import pandas as pd
import numpy as np
from pathlib import Path
# %autosave 0
# You will need to set 'repo' to the absolute pathname for the COVID-19 repo, on your system.
repo = Path.home() / "data-stuff/COVID-19"
data_dir = Path(repo) / "csse_covid_19_data/csse_covid_19_time_series"
confirmed = pd.read_csv(data_dir / "time_series_covid19_confirmed_global.csv")
deaths = pd.read_csv(data_dir / "time_series_covid19_deaths_global.csv")
cols_to_drop = ["Province/State", "Lat", "Long"]
confirmed = confirmed.drop(columns=cols_to_drop).rename(columns={"Country/Region": "country"})
deaths = deaths.drop(columns=cols_to_drop).rename(columns={"Country/Region": "country"})
confirmed = confirmed.groupby("country").agg(np.sum).transpose()
deaths = deaths.groupby("country").agg(np.sum).transpose()
confirmed.index = pd.to_datetime(confirmed.index)
deaths.index = pd.to_datetime(deaths.index)
# +
# list(confirmed.columns)
# -
countries = ["China", "Italy", "Iran", "US", "Germany", "France", "Spain", "United Kingdom"]
options = {"logy": True, "figsize": (13,8)}
confirmed[countries].last("30D").plot(title="Confirmed Cases, by Country", **options)
deaths[countries].last("30D").plot(title="Deaths, by Country", **options)
confirmed[countries].last("10D")
deaths[countries].last("10D")
|
the-stack_106_17158
|
import base64
import web3
from snet.sdk.payment_channel import PaymentChannel
from snet.snet_cli.utils import get_contract_object, get_contract_deployment_block
BLOCKS_PER_BATCH = 20000
class MPEContract:
def __init__(self, w3):
self.web3 = w3
self.contract = get_contract_object(self.web3, "MultiPartyEscrow.json")
self.event_topics = [self.web3.sha3(
text="ChannelOpen(uint256,uint256,address,address,address,bytes32,uint256,uint256)").hex()]
self.deployment_block = get_contract_deployment_block(
self.web3, "MultiPartyEscrow.json")
def get_past_open_channels(self, account, service, starting_block_number=0, to_block_number=None):
if to_block_number is None:
to_block_number = self.web3.eth.getBlock("latest")["number"]
if starting_block_number == 0:
starting_block_number = self.deployment_block
logs = []
from_block = starting_block_number
while from_block <= to_block_number:
to_block = min(from_block + BLOCKS_PER_BATCH, to_block_number)
logs = logs + self.web3.eth.getLogs({"fromBlock": from_block, "toBlock": to_block,
"address": self.contract.address, "topics": self.event_topics})
from_block = to_block + 1
event_abi = self.contract._find_matching_event_abi(
event_name="ChannelOpen")
group = service.metadata.get_group_id(service.group['group_name'])
channels_opened = list(filter(
lambda channel: channel.sender == account.address and channel.signer == account.signer_address and channel.recipient == service.group[
"payment_address"] and channel.groupId == group,
[web3.utils.events.get_event_data(
event_abi, l)["args"] for l in logs]
))
return list(map(lambda channel: PaymentChannel(channel["channelId"], self.web3, account, service, self), channels_opened))
def balance(self, address):
return self.contract.functions.balances(address).call()
def deposit(self, account, amount_in_cogs):
return account.send_transaction(self.contract.functions.deposit, amount_in_cogs)
def open_channel(self, account, service, amount, expiration):
return account.send_transaction(self.contract.functions.openChannel, account.signer_address, service.group["payment_address"], base64.b64decode(str(service.group["group_id"])), amount, expiration)
def deposit_and_open_channel(self, account, service, amount, expiration):
already_approved_amount = account.allowance()
if amount > already_approved_amount:
account.approve_transfer(amount)
return account.send_transaction(self.contract.functions.depositAndOpenChannel, account.signer_address, service.group["payment_address"], base64.b64decode(str(service.group["group_id"])), amount, expiration)
def channel_add_funds(self, account, channel_id, amount):
self._fund_escrow_account(account, amount)
return account.send_transaction(self.contract.functions.channelAddFunds, channel_id, amount)
def channel_extend(self, account, channel_id, expiration):
return account.send_transaction(self.contract.functions.channelExtend, channel_id, expiration)
def channel_extend_and_add_funds(self, account, channel_id, expiration, amount):
self._fund_escrow_account(account, amount)
return account.send_transaction(self.contract.functions.channelExtendAndAddFunds, channel_id, expiration, amount)
def _fund_escrow_account(self, account, amount):
current_escrow_balance = self.balance(account.address)
if amount > current_escrow_balance:
account.deposit_to_escrow_account(amount - current_escrow_balance)
|
the-stack_106_17159
|
import logging
from collections import OrderedDict as _o
logger = logging.getLogger(__name__)
default_ns_order = ['WM', 'UN', 'HUME', 'SOFIA', 'CWMS']
class Concept(object):
"""A concept/entity of interest that is the argument of a Statement
Parameters
----------
name : str
The name of the concept, possibly a canonicalized name.
db_refs : dict
Dictionary of database identifiers associated with this concept.
"""
def __init__(self, name, db_refs=None):
self.name = name
self.db_refs = db_refs if db_refs else {}
def matches(self, other):
return self.matches_key() == other.matches_key()
def matches_key(self):
key = self.entity_matches_key()
return str(key)
def entity_matches(self, other):
return self.entity_matches_key() == other.entity_matches_key()
def entity_matches_key(self):
# Get the grounding first
db_ns, db_id = self.get_grounding()
# If there's no grounding, just use the name as key
if not db_ns and not db_id:
return self.name
return str((db_ns, db_id))
def equals(self, other):
matches = (self.name == other.name) and \
(self.db_refs == other.db_refs)
return matches
def get_grounding(self, ns_order=None):
# There are the following possibilities here:
# 1. a single unscored entry (str)
# 2. a list of scored entries with one element per entry
# (list of tuple(str, float))
# 3. a list of entries with each entry cosisting of a tuple
# of 4 scored groundings (list of tuple(tuple(str, float)))
ns_order = ns_order if ns_order else default_ns_order
for db_ns in ns_order:
# If there is no such entry, we continue
db_id = self.db_refs.get(db_ns)
# Note, this includes an empty list in case that comes up
if not db_id:
continue
# Case 1, a simple string ID
if isinstance(db_id, str):
return db_ns, db_id
# Cases 2 and 3 where db_id here is a list
elif isinstance(db_id, (list, tuple)):
first_entry = db_id[0]
# Case 2: each entry is a grounding and a score
if len(first_entry) == 2:
top_entry = \
sorted(db_id, key=lambda x: x[1],
reverse=True)[0][0]
return db_ns, top_entry
# Case 3: each entry is a tuple with 4 elements
# each of which is a tuple consisting of a grounding
# and a score
else:
top_entry = get_top_compositional_grounding(db_id)
return db_ns, tuple([gr[0] if gr is not None else None
for gr in top_entry])
else:
continue
return None, None
def isa(self, other, ontology):
# Get the namespaces for the comparison
(self_ns, self_id) = self.get_grounding()
(other_ns, other_id) = other.get_grounding()
# If one of the agents isn't grounded to a relevant namespace,
# there can't be an isa relationship
if not all((self_ns, self_id, other_ns, other_id)):
return False
# Check for isa relationship
return ontology.isa(self_ns, self_id, other_ns, other_id)
def is_opposite(self, other, ontology):
# Get the namespaces for the comparison
(self_ns, self_id) = self.get_grounding()
(other_ns, other_id) = other.get_grounding()
# If one of the agents isn't grounded to a relevant namespace,
# there can't be an is_opposite relationship
if not all((self_ns, self_id, other_ns, other_id)):
return False
# Check for is_opposite relationship
return ontology.is_opposite(self_ns, self_id,
other_ns, other_id)
def refinement_of(self, other, ontology, entities_refined=False):
# Make sure the Agent types match
if type(self) != type(other):
return False
# Check that the basic entity of the agent either matches or is related
# to the entity of the other agent. If not, no match.
# If the entities, match, then we can continue
if entities_refined:
return True
if self.entity_matches(other):
return True
if self.isa(other, ontology):
return True
return False
def to_json(self):
json_dict = _o({'name': self.name})
json_dict['db_refs'] = self.db_refs
return json_dict
@classmethod
def _from_json(cls, json_dict):
name = json_dict.get('name')
db_refs = json_dict.get('db_refs', {})
if not name:
logger.error('Concept missing name.')
return None
# This fixes the fact that scored lists of groundings
# are deserialized as lists of lists instead of lists
# of tuples.
for key, val in db_refs.items():
if isinstance(val, list):
db_refs[key] = [tuple(v) for v in val]
concept = Concept(name, db_refs=db_refs)
return concept
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def get_top_compositional_grounding(groundings):
def sort_key(entry):
scores = [grounding[1] for grounding in entry
if grounding is not None]
return sum(scores)/len(scores)
top_grounding = max(groundings, key=sort_key)
return top_grounding
|
the-stack_106_17160
|
import sys
import re
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
import numpy as np
import itertools
def systematize_error(e, model_name, errors, bigger_errors):
pattern = re.compile('Observation: \{(.+)\}, Prediction: (.+), True label: (.+)')
m = re.findall(pattern, e)
overpredicted = m[0][1]
underpredicted = m[0][2]
obs = m[0][0]
pattern = re.compile("\'w\': \'(.+)\', \'pos\'.+")
m = re.findall(pattern, obs)
token = m[0]
store_error_info(errors, model_name, obs, overpredicted, token, underpredicted)
pattern = re.compile("(.+?)_.+")
if underpredicted != 'UNK':
pos = re.findall(pattern,underpredicted)[0]
else:
pos = 'UNK'
if not overpredicted.startswith(pos):
store_error_info(bigger_errors, model_name, obs, overpredicted, token, underpredicted)
def store_error_info(errors, model_name, obs, overpredicted, token, underpredicted):
if not token in errors[model_name]['token']:
errors[model_name]['token'][token] = []
errors[model_name]['token'][token].append({'obs': obs, 'pred': overpredicted, 'true': underpredicted})
if not overpredicted in errors[model_name]['overpredicted']:
errors[model_name]['overpredicted'][overpredicted] = []
errors[model_name]['overpredicted'][overpredicted].append(
{'obs': obs, 'pred': overpredicted, 'true': underpredicted})
if not underpredicted in errors[model_name]['underpredicted']:
errors[model_name]['underpredicted'][underpredicted] = []
errors[model_name]['underpredicted'][underpredicted].append(
{'obs': obs, 'pred': overpredicted, 'true': underpredicted})
def analyze_errors(errors, min_frequency):
for model in errors:
list_of_misclassified_tokens = sort_mistakes_by_len(errors, model,"token")
list_of_underpredicted_labels = sort_mistakes_by_len(errors,model,"underpredicted")
list_of_overpredicted_labels = sort_mistakes_by_len(errors,model,"overpredicted")
print("Errors made by {}".format(model))
num_errors = sum([n for n,t in list_of_misclassified_tokens])
print("Total mistakes: {}".format(num_errors))
#print("Wrongly classified token orthographies: {}".format(len(errors[model]['token'])))
print("Tokens that were misclassified most times:")
report_most_common_mistakes(list_of_misclassified_tokens, min_frequency)
print("True labels which were missed most often:")
report_most_common_mistakes(list_of_underpredicted_labels, min_frequency)
print("Labels which were most often predicted instead of a true label:")
report_most_common_mistakes(list_of_overpredicted_labels, min_frequency)
print('Reporting differences between two models')
for error_type in ['underpredicted','overpredicted']:
y_true=[]
y_pred=[]
for i, model_name in enumerate(errors.keys()):
model = errors[model_name]
diffset = find_error_diff(errors, error_type, i)
print("{} ONLY by {}:".format(error_type, model_name))
for e in diffset:
report = "{} ({})\t".format(e,len(model[error_type][e]))
other = {}
for ee in model[error_type][e]:
if error_type == 'underpredicted':
if not ee['pred'] in other:
other[ee['pred']] = 0
other[ee['pred']] += 1
elif error_type == 'overpredicted':
if not ee['true'] in other:
other[ee['true']] = 0
other[ee['true']] += 1
if len(model[error_type][e]) >= min_frequency:
y_pred.append(ee['pred'])
y_true.append(ee['true'])
if error_type == "underpredicted":
report += 'Predicted instead: '
elif error_type == "overpredicted":
report += 'The true labels were: '
for ee in other:
report += "{} ({}) ".format(ee,other[ee])
print(report)
#if len(y_true) > 0:
#classes = list(set(y_pred+y_true))
#cm = ConfusionMatrixDisplay.from_predictions(y_true, y_pred,labels=classes,display_labels=classes,xticks_rotation="vertical")
#fig, ax = plt.subplots(figsize=(20,20))
#cm.plot(ax=ax,xticks_rotation="vertical")
#plt.savefig(model_name+'-'+error_type+'-confmatrix.png')
def plotConfusionMatrix(cm, classes, normalize=False, title='Confusion Matrix', cmap = plt.cm.Blues):
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
plt.ylabel('Actual Class')
plt.xlabel('Predicted Class')
# if normalize:
# cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print('Normalized Confusion Matrix')
# else:
# print('Un-normalized Confusion Matrix')
#print(cm)
# thresh = cm.max()/2
#
# for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
# plt.text(j,i, cm[i,j], horizontalalignment='center', color='white' if cm[i,j] > thresh else 'black', fontsize=25, fontweight='bold')
# plt.tight_layout()
# plt.ylabel('Actual Class')
# plt.xlabel('Predicted Class')
return plt
def find_error_diff(errors, k, i):
set1 = set()
set2 = set()
for j, model in enumerate(errors.keys()):
if j == 0:
set1 = set(errors[model][k].keys())
elif j == 1:
set2 = set(errors[model][k].keys())
if set1 and set2:
if i == 0:
return set1 - set2
elif i == 1:
return set2 - set1
else:
return set1
def report_most_common_mistakes(list_of_mistakes, m):
for n, t in list_of_mistakes:
if n > m:
print("{} ({})".format(t, n))
else:
break
def sort_mistakes_by_len(errors, model, key):
list_of_mistakes = []
for t in errors[model][key]:
list_of_mistakes.append((len(errors[model][key][t]), t))
list_of_mistakes = sorted(list_of_mistakes, reverse=True)
return list_of_mistakes
def find_errors_neural(pred,gold):
errors = []
for i, p in enumerate(pred):
tok_p, lp = p.split(' ')
g = gold[i]
tok_g, gl = g.split('\t')
# if tok_p != tok_g:
# print(tok_p + '\t' + tok_g)
if gl != lp:
w = "{{'w': '{}', 'pos': NEURAL}}".format(tok_p)
errors.append("Observation: {}, Prediction: {}, "
"True label: {}".format(w,lp.strip(),gl.strip()))
return errors
if sys.argv[3] == 'classic':
with open(sys.argv[2], 'r') as f:
errors1 = f.readlines()
else:
with open(sys.argv[2], 'r') as f:
predictions_ = [ ln for ln in f.readlines() if ln != '\n' ]
with open(sys.argv[3], 'r') as f:
gold = [g for g in f.readlines() if g != '\n']
predictions = [p for p in predictions_ if not (p == '\n' or p.startswith('# 1.0'))]
errors1 = find_errors_neural(predictions, gold)
errors = {sys.argv[2]: {'token':{}, 'overpredicted':{}, 'underpredicted':{}}}
bigger_errors = {sys.argv[2]: {'token': {}, 'overpredicted': {}, 'underpredicted': {}}}
for e in errors1:
systematize_error(e,sys.argv[2],errors, bigger_errors)
if len(sys.argv) > 5:
with open(sys.argv[3], 'r') as f:
errors2 = f.readlines()
errors[sys.argv[3]] = {'token':{}, 'overpredicted':{}, 'underpredicted':{}}
bigger_errors[sys.argv[3]] = {'token': {}, 'overpredicted': {}, 'underpredicted': {}}
for e in errors2:
systematize_error(e,sys.argv[3],errors,bigger_errors)
min_freq = int(sys.argv[1])
analyze_errors(errors, min_freq)
print('Errors between types with different prefix:')
analyze_errors(bigger_errors,0)
|
the-stack_106_17161
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020 Wu Yi-Chiao (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import os
import sys
import yaml
import copy
import fnmatch
import datetime
import numpy as np
class ConfigInfo(object):
def __init__(self, yml_path, c_text):
self.flag = True
self.yml_path = yml_path # file path of yml format config file
self.c_text = c_text # dictionary of text
conf_user, self.fconf_user = self._load_record()
self.conf, self.fconf = self._load_config()
# check the number of sub set
n_subset = conf_user[c_text['n_subset']]
for t_idx in range(len(self.conf)):
n_setlist = len(self.conf[t_idx][c_text['t_set']])
if n_setlist != n_subset:
print('The number of sub set lists of "%s(%d)" is not same as %s in %s.yml(%d) !!' \
% (self.conf[t_idx][c_text['t_type']], n_setlist,
c_text['n_subset'], c_text['recordf'], n_subset))
sys.exit(0)
self.n_subset = n_subset
def _check_file_exist(self, filename):
if not os.path.exists(filename):
print("%s doesn't exist!"%filename)
return False
return True
def _yaml_dump(self, filename, data, encoding='utf-8'):
with open(filename, "w", encoding=encoding) as yf:
yaml.safe_dump(data, yf)
def _yaml_load(self, filename, encoding='utf-8'):
if not os.path.exists(filename):
print("%s doesn't exist!" % filename)
sys.exit(0)
with open(filename, "r", encoding=encoding) as yf:
return yaml.safe_load(yf)
def _load_record(self):
# fconf_user: the config file of the information of all users
# conf_user (dict):
# MAX_count: the total number of the testing subsets
# Subject_name: the list of users
# Subject_set: the corresponding subset index of each user
# count: the index of the current subset
# time: last updated time
fconf_user = "%s%s.yml" % (self.yml_path, self.c_text['recordf'])
if not self._check_file_exist(fconf_user):
sys.exit(0)
conf_user = self._yaml_load(fconf_user)
return conf_user, fconf_user
def _load_config(self):
# fconf: the config file of all evaluations information
# conf (list of dict):
# method: evaluated methods
# set: the name of each subset
# type: the type of the test (MOS, SIM, XAB, PK)
fconf = "%s%s.yml"%(self.yml_path, self.c_text['configf'])
if not self._check_file_exist(fconf):
sys.exit(0)
conf = self._yaml_load(fconf)
return conf, fconf
class ParserConf(ConfigInfo):
"""
Class to parse config files
"""
def __init__(self, data_path, template_path, yml_path, c_text):
super().__init__(yml_path, c_text)
self.data_path = data_path
self.template_path = template_path
self.t_info = self._load_sysinfo()
def _get_spkidx(self, t_path, spk):
for idx, item in enumerate(t_path):
if spk == item:
break
if idx == 0:
print('The path format of %s or %s is wrong! It should be "/spk/"!')
sys.exit(0)
return idx
def _get_pairidx(self, t_path, srcspk, tarspk):
for idx, item in enumerate(t_path):
if fnmatch.fnmatch(item, '%s*%s' % (srcspk, tarspk)):
break
if idx == 0:
print('The path format of %s and %s is wrong! It should be "/spk/spk/" or "/spk-spk/"!')
sys.exit(0)
return idx
def _check_idx(self, item, name, idx):
if idx >= len(item):
msg = "idx '%s' (%d) is out of the length of %s\n" % (name, idx, str(item))
msg += "The template path cannot be correctly splitted by slash.\n"
msg += "Please check the template path in 'test_system.yml'."
raise ValueError(msg)
def _load_sysinfo(self):
sysinfof = "%s%s.yml"%(self.yml_path, self.c_text['systemf'])
if not self._check_file_exist(sysinfof):
sys.exit(0)
sysinfo = self._yaml_load(sysinfof)
t_paths = sysinfo[self.c_text['templatedir']]
srcspk = self.c_text['srcspk']
tarspk = self.c_text['tarspk']
for system in list(t_paths.keys()):
t_paths[system] = t_paths[system].replace('\\', '/')
t_path = t_paths[system].split('/')
if srcspk in t_paths[system]:
if tarspk in t_paths[system]: # voice conversion
if tarspk in t_path and srcspk in t_path:
# path format: /srcspk/tarspk/ or /tarspk/srcspk/
srcidx = t_path.index(srcspk)
taridx = t_path.index(tarspk)
t_paths[system] = {'src':srcidx, 'tar':taridx, 'split':None,
'src_sub':None, 'tar_sub':None }
else:
# path format /srcspk-tarspk/ or /tarspk-srcspk/
pairidx = self._get_pairidx(t_path, srcspk, tarspk)
symbol = t_path[pairidx].replace(srcspk, "").replace(tarspk, "")
subsrc = t_path[pairidx].split(symbol).index(srcspk)
subtar = t_path[pairidx].split(symbol).index(tarspk)
t_paths[system] = {'src':pairidx, 'tar':pairidx, 'split':symbol,
'src_sub':subsrc, 'tar_sub':subtar }
else: # source speaker only
spkidx = self._get_spkidx(t_path, srcspk)
t_paths[system] = {'src':spkidx, 'tar':None, 'split':None,
'src_sub':None, 'tar_sub':None }
elif tarspk in t_paths[system]: # target speaker only
spkidx = self._get_spkidx(t_path, tarspk)
t_paths[system] = {'src':None, 'tar':spkidx, 'split':None,
'src_sub':None, 'tar_sub':None }
else:
print('%s or %s is not in the template path of %s of %s!'\
% (srcspk, tarspk, system, sysinfof))
sys.exit(0)
return sysinfo
def _load_divide_list(self, flistf, ref=None, reflen=0):
# load file list and divide it into sub set lists
if not self._check_file_exist(flistf):
sys.exit(0)
with open(flistf, "r") as f:
file_list = f.readlines()
flen = len(file_list)
if ref != None:
if reflen != flen:
print('The list lengths of %s(%d) and %s(%d) should be the same!' \
% (ref, reflen, flistf, flen))
sys.exit(0)
file_list= [file.strip() for file in file_list]
file_lists = np.reshape(file_list, (-1, self.n_subset))
file_lists = [file_lists[:,i].tolist() for i in range(self.n_subset)]
return file_lists, flen
def _parse_spkinfo(self, pathinfo, genderinfo, t_dict, filename):
filename = filename.replace('\\', '/')
item = filename.split('/')
if pathinfo['tar'] == None: # only source speaker
self._check_idx(item, 'src', pathinfo['src'])
t_dict['srcspk'] = item[pathinfo['src']]
t_dict['gender'] = genderinfo[t_dict['srcspk']]
elif pathinfo['src'] == None: # only target speaker
self._check_idx(item, 'tar', pathinfo['tar'])
t_dict['tarspk'] = item[pathinfo['tar']]
t_dict['gender'] = genderinfo[t_dict['tarspk']]
else: # voice conversion
if pathinfo['src'] == pathinfo['tar']: # format /srcspk/tarspk/ or /tarspk/srcspk/
self._check_idx(item, 'src', pathinfo['src'])
spkpair = item[pathinfo['src']].split(pathinfo['split'])
t_dict['srcspk'] = spkpair[pathinfo['src_sub']]
t_dict['tarspk'] = spkpair[pathinfo['tar_sub']]
else: # format /srcspk-tarspk/ or /tarspk-srcspk/
self._check_idx(item, 'src', pathinfo['src'])
self._check_idx(item, 'tar', pathinfo['tar'])
t_dict['srcspk'] = item[pathinfo['src']]
t_dict['tarspk'] = item[pathinfo['tar']]
t_dict['conversion'] = True
srcgender = genderinfo[t_dict['srcspk']]
targender = genderinfo[t_dict['tarspk']]
if srcgender != targender:
t_dict['xgender'] = True
t_dict['gender'] = genderinfo[t_dict['tarspk']]
t_dict['pair'] = '%s-%s' % (srcgender, targender)
def _parse_mos(self, sysinfo, template, t_system, file_lists, conf):
for set_idx, file_list in enumerate(file_lists): # for each sub set
for filename in file_list: # for each file
t_dict = copy.deepcopy(template)
t_dict['File'] = filename
t_dict[self.c_text['system']] = t_system
self._parse_spkinfo(sysinfo[self.c_text['templatedir']][t_system],
sysinfo[self.c_text['spk']],
t_dict, filename)
conf[set_idx].append(t_dict)
def _parse_sim(self, sysinfo, template, t_system, file_lists, ref_lists, conf):
for set_idx, file_list in enumerate(file_lists): # for each sub set
for filename, refname in zip(file_list, ref_lists[set_idx]): # for each file
t_dict = copy.deepcopy(template)
t_dict['File'] = filename
t_dict['File_ans'] = refname
t_dict[self.c_text['system']] = t_system
self._parse_spkinfo(sysinfo[self.c_text['templatedir']][t_system],
sysinfo[self.c_text['spk']],
t_dict, filename)
conf[set_idx].append(t_dict)
def _parse_xab(self, sysinfo, template, t_system,
systemA, systemB, systemX,
fileA_lists, fileB_lists, fileX_lists, conf):
for set_idx in range(len(fileA_lists)): # for each sub set
listA = fileA_lists[set_idx]
listB = fileB_lists[set_idx]
listX = fileX_lists[set_idx]
assert len(listA)==len(listB)==len(listX)
for file_idx in range(len(listA)): # for each file
t_dict = copy.deepcopy(template)
t_dict['FileA'] = listA[file_idx]
t_dict['FileB'] = listB[file_idx]
t_dict['FileX'] = listX[file_idx]
t_dict[self.c_text['system']] = t_system
t_dict[self.c_text['system']+'A'] = systemA
t_dict[self.c_text['system']+'B'] = systemB
t_dict[self.c_text['system']+'X'] = systemX
self._parse_spkinfo(sysinfo[self.c_text['templatedir']][systemA],
sysinfo[self.c_text['spk']],
t_dict, listA[file_idx])
conf[set_idx].append(t_dict)
def _parse_pk(self, sysinfo, template, t_system,
systemA, systemB,
fileA_lists, fileB_lists, conf):
for set_idx in range(len(fileA_lists)): # for each sub set
listA = fileA_lists[set_idx]
listB = fileB_lists[set_idx]
assert len(listA)==len(listB)
for file_idx in range(len(listA)): # for each file
t_dict = copy.deepcopy(template)
t_dict['FileA'] = listA[file_idx]
t_dict['FileB'] = listB[file_idx]
t_dict[self.c_text['system']] = t_system
t_dict[self.c_text['system']+'A'] = systemA
t_dict[self.c_text['system']+'B'] = systemB
self._parse_spkinfo(sysinfo[self.c_text['templatedir']][systemA],
sysinfo[self.c_text['spk']],
t_dict, listA[file_idx])
conf[set_idx].append(t_dict)
def subset_gen(self):
print('Test files will be divided into %d sub-sets.' % self.n_subset)
for t_idx in range(len(self.conf)): # for each evaluation type
t_type = self.conf[t_idx][self.c_text['t_type']]
t_systems = self.conf[t_idx][self.c_text['system']]
t_sets = self.conf[t_idx][self.c_text['t_set']]
assert len(t_sets) == self.n_subset
tempf = '%s/%s.yml' % (self.template_path, t_type)
template = self._yaml_load(tempf)
conf = [[] for i in range(self.n_subset)]
for t_system in t_systems: # for each test method
if t_type == 'MOS' or t_type == 'SIM':
# load divied file list
flistf = '%s%s.list' % (self.data_path, t_system)
file_lists, flen = self._load_divide_list(flistf)
if t_type == 'MOS':
self._parse_mos(self.t_info, template, t_system, file_lists, conf)
elif t_type == 'SIM':
t_reference = self.conf[t_idx][self.c_text['reference']]
reflistf = '%s%s.list' % (self.data_path, t_reference)
ref_lists, _ = self._load_divide_list(reflistf, flistf, flen)
self._parse_sim(self.t_info, template, t_system, file_lists, ref_lists, conf)
elif t_type == 'XAB' or t_type == 'PK':
# prase sytemA, systemB
items = t_system.split('-')
systemA = items[0].strip('*')
systemB = items[1]
# load divied file list
listAf = '%s%s.list' % (self.data_path, systemA)
fileA_lists, flen = self._load_divide_list(listAf)
listBf = '%s%s.list' % (self.data_path, systemB)
fileB_lists, _ = self._load_divide_list(listBf, listAf, flen)
if t_type == 'PK':
self._parse_pk(self.t_info, template, t_system,
systemA, systemB,
fileA_lists, fileB_lists, conf)
elif t_type == 'XAB':
systemX = items[2]
listXf = '%s%s.list' % (self.data_path, systemX)
fileX_lists, _ = self._load_divide_list(listXf, listAf, flen)
self._parse_xab(self.t_info, template, t_system,
systemA, systemB, systemX,
fileA_lists, fileB_lists, fileX_lists, conf)
else:
print('Type %s is not supported! Please check %s!!' % (t_type, self.fconf))
for set_idx, t_set in enumerate(t_sets): # for each sub set
subsetf = '%s%s' % (self.yml_path, t_set)
self._yaml_dump(subsetf, conf[set_idx])
class UserInfo(ConfigInfo):
"""
Class of all information of each user
"""
def __init__(self, yml_path, c_text):
super().__init__(yml_path, c_text)
self.t_idxs = range(len(self.conf))
def _conf_user_save(self, conf_user, fconf_user, name, t_set):
conf_user[self.c_text['user_name']].append(name)
conf_user[self.c_text['t_subset']].append(t_set)
conf_user[self.c_text['date']] = datetime.datetime.now()
self._yaml_dump(fconf_user, conf_user)
def _check_progress(self, recordf_user):
return list(filter(lambda item: item[self.c_text['t_finish']] == False, recordf_user))
def _load_test_data(self, conf, fconf, name, t_set):
test_type = [] # the list of test type
test_set = [] # the list of user yml corresponding to each test type
test_system = [] # the list of methods corresponding to each test type
total_dict = [] # the result list of dicts of all tests
eval_dict = [] # the result list of dicts of the uncompleted tests
for t_idx in range(len(conf)):
test_type.append(conf[t_idx][self.c_text['t_type']])
test_system.append(conf[t_idx][self.c_text['system']])
# load template record yml file
if not self._check_file_exist(self.yml_path + conf[t_idx][self.c_text['t_set']][t_set]):
print('Please check the "set" setting in %s' % fconf)
sys.exit(0)
recordf_user = self._yaml_load(self.yml_path + conf[t_idx][self.c_text['t_set']][t_set])
# create user record yml file
user_set = conf[t_idx][self.c_text['t_set']][t_set].replace(
".yml", "_%s.yml" % name)
test_set.append(self.yml_path + user_set)
if not self._check_file_exist(os.path.dirname(test_set[t_idx])):
print('Please check the "set" setting in %s' % fconf)
sys.exit(0)
self._yaml_dump(test_set[t_idx], recordf_user)
total_dict.append(recordf_user)
eval_dict.append(recordf_user)
self.test_type = test_type
self.test_system = test_system
self.test_set = test_set
self.eval_dict = eval_dict
self.total_dict = total_dict
def _reload_test_data(self, conf, fconf_user, name, t_set):
test_type = [] # the list of test type
test_set = [] # the list of user yml corresponding to each test type
test_system = [] # the list of methods corresponding to each test type
total_dict = [] # the result list of dicts of all tests
eval_dict = [] # the result list of dicts of the uncompleted tests
for t_idx in range(len(conf)):
test_type.append(conf[t_idx][self.c_text['t_type']])
test_system.append(conf[t_idx][self.c_text['system']])
# load user record yml file (record_user)
user_set = conf[t_idx][self.c_text['t_set']][t_set].replace(
".yml", "_%s.yml"%name)
test_set.append(self.yml_path + user_set)
if not self._check_file_exist(test_set[t_idx]):
print('User %s data lost!! Please check %s' % (name, fconf_user))
sys.exit(0)
recordf_user = self._yaml_load(test_set[t_idx])
total_dict.append(recordf_user)
#CHECK PROGRESS
# remaining unfinished parts
r_recordf_user = self._check_progress(recordf_user)
eval_dict.append(r_recordf_user)
if len(r_recordf_user) == 0:
self.finished[t_idx] = True
if len(r_recordf_user) < len(recordf_user):
self.initial[t_idx] = False
self.test_type = test_type
self.test_system = test_system
self.test_set = test_set
self.eval_dict = eval_dict
self.total_dict = total_dict
def check_user(self, name):
"""LOAD AND CHECK USER PROGRESS
Args:
name (str): the name of the user
Return:
flag (bool): True: all tests of the user have been completed
False: new user or the tests are not completed
"""
#LOAD RECORD
conf_user, _ = self._load_record()
#LOAD USER INFO
self.finished = [False]*len(self.conf)
self.initial = [True]*len(self.conf)
if name in conf_user[self.c_text['user_name']]:
# load user (conf_user)
t_set = conf_user[self.c_text['t_subset']][conf_user[self.c_text['user_name']].index(name)]
# reload testing data
self._reload_test_data(self.conf, self.fconf_user, name, t_set)
else:
# create new user (conf_user)
conf_user[self.c_text['subset_idx']] += 1
if conf_user[self.c_text['subset_idx']] >= conf_user[self.c_text['n_subset']]:
conf_user[self.c_text['subset_idx']] = 0
t_set = conf_user[self.c_text['subset_idx']]
# load testing data
self._load_test_data(self.conf, self.fconf, name, t_set)
self._conf_user_save(conf_user, self.fconf_user, name, t_set)
self.flag = (sum(self.finished)==len(self.conf))
return self.flag
def save_result(self, t_idx):
"""SAVE RESULTS
Args:
t_idx (int): the index of test
"""
self._yaml_dump(self.test_set[t_idx], self.total_dict[t_idx])
#CHECK PROGRESS
if len(self._check_progress(self.total_dict[t_idx])) == 0:
self.finished[t_idx] = True
# USER RESULT CLASS
class UserResult(UserInfo):
"""
Class of results of each user
"""
def __init__(self, user_name, user_set, test_type, yml_path, c_text):
super().__init__(yml_path, c_text)
self.name = user_name
self.t_set = user_set
self.t_type = test_type
self._load_result()
# remaining unfinished parts
r_recordf_user = self._check_progress(self.recordf_user)
if len(r_recordf_user) == 0:
self.finished = True
else:
self.finished = False
def _load_result(self):
# load user yml
frecordf_user = "%s%s_%d_%s.yml" % (self.yml_path,
self.t_type,
self.t_set,
self.name)
if not self._check_file_exist(frecordf_user):
sys.exit(0)
self.recordf_user = self._yaml_load(frecordf_user)
|
the-stack_106_17162
|
from flask import render_template,redirect,url_for, abort,request,flash
from . import main
from .forms import CommentsForm, UpdateProfile, CaseForm
from ..models import User, Case,Comment
from flask_login import login_required, current_user
from .. import db, photos
import markdown2
@main.route('/')
def index():
'''
View root page function that returns the index page and its data
'''
posts = Case.query.order_by(Case.posted.desc()).all()
title = 'Home - Welcome to the Caseing site'
return render_template('index.html', title = title, posts=posts)
@main.route('/post/<int:post_id>')
def post(post_id):
'''
View root page function that returns the posts page and its data
'''
post = Case.query.filter_by(id=post_id).one()
post_comments = Comment.get_comments(post_id)
title = ''
return render_template('post.html', title = title, post=post, post_comments=post_comments )
@main.route('/add', methods=['GET', 'POST'])
@login_required
def add():
'''
View root page function that returns the add post page and its data
'''
form = CaseForm()
if form.validate_on_submit():
title = form.title.data
content = form.content.data
new_post = Case(title=title,content=content, user=current_user)
db.session.add(new_post)
db.session.commit()
return redirect(url_for('main.index'))
return render_template('add.html', form=form)
@main.route('/pitch/comments/new/<int:id>',methods = ['GET','POST'])
@login_required
def new_comment(id):
form = CommentsForm()
if form.validate_on_submit():
new_comment = Comment(case_id =id,comment=form.comment.data)
new_comment.save_comments()
return redirect(url_for('main.post',post_id=id))
return render_template('new_comment.html',comment_form=form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
def update_post(post_id):
post = Case.query.get_or_404(post_id)
if post.user != current_user:
abort(403)
form = CaseForm()
if form.validate_on_submit():
post.title = form.title.data
post.subtitle = form.subtitle.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('main.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('add.html', title='Update Post', form=form)
@main.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Case.query.get_or_404(post_id)
if post.user != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('main.index'))
|
the-stack_106_17163
|
# coding=utf-8
# Copyright 2018 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Auto Model class. """
import warnings
from collections import OrderedDict
from .configuration_auto import (
AlbertConfig,
AutoConfig,
BartConfig,
BertConfig,
BertGenerationConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
EncoderDecoderConfig,
FlaubertConfig,
FSMTConfig,
FunnelConfig,
GPT2Config,
LayoutLMConfig,
LongformerConfig,
LxmertConfig,
MBartConfig,
MobileBertConfig,
OpenAIGPTConfig,
PegasusConfig,
ReformerConfig,
RetriBertConfig,
RobertaConfig,
T5Config,
TransfoXLConfig,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
replace_list_option_in_docstrings,
)
from .configuration_marian import MarianConfig
from .configuration_utils import PretrainedConfig
from .file_utils import add_start_docstrings
from .modeling_albert import (
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from .modeling_bart import (
BartForConditionalGeneration,
BartForQuestionAnswering,
BartForSequenceClassification,
BartModel,
)
from .modeling_bert import (
BertForMaskedLM,
BertForMultipleChoice,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLMHeadModel,
BertModel,
)
from .modeling_bert_generation import BertGenerationDecoder, BertGenerationEncoder
from .modeling_camembert import (
CamembertForCausalLM,
CamembertForMaskedLM,
CamembertForMultipleChoice,
CamembertForQuestionAnswering,
CamembertForSequenceClassification,
CamembertForTokenClassification,
CamembertModel,
)
from .modeling_ctrl import CTRLLMHeadModel, CTRLModel
from .modeling_distilbert import (
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
from .modeling_dpr import DPRQuestionEncoder
from .modeling_electra import (
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
)
from .modeling_encoder_decoder import EncoderDecoderModel
from .modeling_flaubert import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel
from .modeling_funnel import (
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
)
from .modeling_gpt2 import GPT2LMHeadModel, GPT2Model
from .modeling_layoutlm import LayoutLMForMaskedLM, LayoutLMForTokenClassification, LayoutLMModel
from .modeling_longformer import (
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
)
from .modeling_lxmert import LxmertForPreTraining, LxmertModel
from .modeling_marian import MarianMTModel
from .modeling_mbart import MBartForConditionalGeneration
from .modeling_mobilebert import (
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
from .modeling_openai import OpenAIGPTLMHeadModel, OpenAIGPTModel
from .modeling_pegasus import PegasusForConditionalGeneration
from .modeling_rag import ( # noqa: F401 - need to import all RagModels to be in globals() function
RagModel,
RagSequenceForGeneration,
RagTokenForGeneration,
)
from .modeling_reformer import (
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerModel,
ReformerModelWithLMHead,
)
from .modeling_retribert import RetriBertModel
from .modeling_roberta import (
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
)
from .modeling_t5 import T5ForConditionalGeneration, T5Model
from .modeling_transfo_xl import TransfoXLLMHeadModel, TransfoXLModel
from .modeling_xlm import (
XLMForMultipleChoice,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from .modeling_xlm_roberta import (
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
)
from .modeling_xlnet import (
XLNetForMultipleChoice,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
)
from .utils import logging
logger = logging.get_logger(__name__)
MODEL_MAPPING = OrderedDict(
[
(RetriBertConfig, RetriBertModel),
(T5Config, T5Model),
(DistilBertConfig, DistilBertModel),
(AlbertConfig, AlbertModel),
(CamembertConfig, CamembertModel),
(XLMRobertaConfig, XLMRobertaModel),
(BartConfig, BartModel),
(LongformerConfig, LongformerModel),
(RobertaConfig, RobertaModel),
(LayoutLMConfig, LayoutLMModel),
(BertConfig, BertModel),
(OpenAIGPTConfig, OpenAIGPTModel),
(GPT2Config, GPT2Model),
(MobileBertConfig, MobileBertModel),
(TransfoXLConfig, TransfoXLModel),
(XLNetConfig, XLNetModel),
(FlaubertConfig, FlaubertModel),
(FSMTConfig, FSMTModel),
(XLMConfig, XLMModel),
(CTRLConfig, CTRLModel),
(ElectraConfig, ElectraModel),
(ReformerConfig, ReformerModel),
(FunnelConfig, FunnelModel),
(LxmertConfig, LxmertModel),
(BertGenerationConfig, BertGenerationEncoder),
(DPRConfig, DPRQuestionEncoder),
]
)
MODEL_FOR_PRETRAINING_MAPPING = OrderedDict(
[
(LayoutLMConfig, LayoutLMForMaskedLM),
(RetriBertConfig, RetriBertModel),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForPreTraining),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(BertConfig, BertForPreTraining),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForPreTraining),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForPreTraining),
(LxmertConfig, LxmertForPreTraining),
]
)
MODEL_WITH_LM_HEAD_MAPPING = OrderedDict(
[
(LayoutLMConfig, LayoutLMForMaskedLM),
(T5Config, T5ForConditionalGeneration),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(MarianConfig, MarianMTModel),
(FSMTConfig, FSMTForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(BertConfig, BertForMaskedLM),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(MobileBertConfig, MobileBertForMaskedLM),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(CTRLConfig, CTRLLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(EncoderDecoderConfig, EncoderDecoderModel),
(ReformerConfig, ReformerModelWithLMHead),
(FunnelConfig, FunnelForMaskedLM),
]
)
MODEL_FOR_CAUSAL_LM_MAPPING = OrderedDict(
[
(CamembertConfig, CamembertForCausalLM),
(XLMRobertaConfig, XLMRobertaForCausalLM),
(RobertaConfig, RobertaForCausalLM),
(BertConfig, BertLMHeadModel),
(OpenAIGPTConfig, OpenAIGPTLMHeadModel),
(GPT2Config, GPT2LMHeadModel),
(TransfoXLConfig, TransfoXLLMHeadModel),
(XLNetConfig, XLNetLMHeadModel),
(
XLMConfig,
XLMWithLMHeadModel,
), # XLM can be MLM and CLM => model should be split similar to BERT; leave here for now
(CTRLConfig, CTRLLMHeadModel),
(ReformerConfig, ReformerModelWithLMHead),
(BertGenerationConfig, BertGenerationDecoder),
]
)
MODEL_FOR_MASKED_LM_MAPPING = OrderedDict(
[
(LayoutLMConfig, LayoutLMForMaskedLM),
(DistilBertConfig, DistilBertForMaskedLM),
(AlbertConfig, AlbertForMaskedLM),
(BartConfig, BartForConditionalGeneration),
(CamembertConfig, CamembertForMaskedLM),
(XLMRobertaConfig, XLMRobertaForMaskedLM),
(LongformerConfig, LongformerForMaskedLM),
(RobertaConfig, RobertaForMaskedLM),
(BertConfig, BertForMaskedLM),
(MobileBertConfig, MobileBertForMaskedLM),
(FlaubertConfig, FlaubertWithLMHeadModel),
(XLMConfig, XLMWithLMHeadModel),
(ElectraConfig, ElectraForMaskedLM),
(ReformerConfig, ReformerForMaskedLM),
(FunnelConfig, FunnelForMaskedLM),
]
)
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = OrderedDict(
[
(T5Config, T5ForConditionalGeneration),
(PegasusConfig, PegasusForConditionalGeneration),
(MarianConfig, MarianMTModel),
(MBartConfig, MBartForConditionalGeneration),
(BartConfig, BartForConditionalGeneration),
(FSMTConfig, FSMTForConditionalGeneration),
(EncoderDecoderConfig, EncoderDecoderModel),
]
)
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = OrderedDict(
[
(DistilBertConfig, DistilBertForSequenceClassification),
(AlbertConfig, AlbertForSequenceClassification),
(CamembertConfig, CamembertForSequenceClassification),
(XLMRobertaConfig, XLMRobertaForSequenceClassification),
(BartConfig, BartForSequenceClassification),
(LongformerConfig, LongformerForSequenceClassification),
(RobertaConfig, RobertaForSequenceClassification),
(BertConfig, BertForSequenceClassification),
(XLNetConfig, XLNetForSequenceClassification),
(MobileBertConfig, MobileBertForSequenceClassification),
(FlaubertConfig, FlaubertForSequenceClassification),
(XLMConfig, XLMForSequenceClassification),
(ElectraConfig, ElectraForSequenceClassification),
(FunnelConfig, FunnelForSequenceClassification),
]
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING = OrderedDict(
[
(DistilBertConfig, DistilBertForQuestionAnswering),
(AlbertConfig, AlbertForQuestionAnswering),
(CamembertConfig, CamembertForQuestionAnswering),
(BartConfig, BartForQuestionAnswering),
(LongformerConfig, LongformerForQuestionAnswering),
(XLMRobertaConfig, XLMRobertaForQuestionAnswering),
(RobertaConfig, RobertaForQuestionAnswering),
(BertConfig, BertForQuestionAnswering),
(XLNetConfig, XLNetForQuestionAnsweringSimple),
(FlaubertConfig, FlaubertForQuestionAnsweringSimple),
(MobileBertConfig, MobileBertForQuestionAnswering),
(XLMConfig, XLMForQuestionAnsweringSimple),
(ElectraConfig, ElectraForQuestionAnswering),
(ReformerConfig, ReformerForQuestionAnswering),
(FunnelConfig, FunnelForQuestionAnswering),
]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[
(LayoutLMConfig, LayoutLMForTokenClassification),
(DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(XLMConfig, XLMForTokenClassification),
(XLMRobertaConfig, XLMRobertaForTokenClassification),
(LongformerConfig, LongformerForTokenClassification),
(RobertaConfig, RobertaForTokenClassification),
(BertConfig, BertForTokenClassification),
(MobileBertConfig, MobileBertForTokenClassification),
(XLNetConfig, XLNetForTokenClassification),
(AlbertConfig, AlbertForTokenClassification),
(ElectraConfig, ElectraForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(FunnelConfig, FunnelForTokenClassification),
]
)
MODEL_FOR_MULTIPLE_CHOICE_MAPPING = OrderedDict(
[
(CamembertConfig, CamembertForMultipleChoice),
(ElectraConfig, ElectraForMultipleChoice),
(XLMRobertaConfig, XLMRobertaForMultipleChoice),
(LongformerConfig, LongformerForMultipleChoice),
(RobertaConfig, RobertaForMultipleChoice),
(BertConfig, BertForMultipleChoice),
(DistilBertConfig, DistilBertForMultipleChoice),
(MobileBertConfig, MobileBertForMultipleChoice),
(XLNetConfig, XLNetForMultipleChoice),
(AlbertConfig, AlbertForMultipleChoice),
(XLMConfig, XLMForMultipleChoice),
(FlaubertConfig, FlaubertForMultipleChoice),
(FunnelConfig, FunnelForMultipleChoice),
]
)
AUTO_MODEL_PRETRAINED_DOCSTRING = r"""
The model class to instantiate is selected based on the :obj:`model_type` property of the config object
(either passed as an argument or loaded from :obj:`pretrained_model_name_or_path` if possible), or when it's
missing, by falling back to using pattern matching on :obj:`pretrained_model_name_or_path`:
List options
The model is set in evaluation mode by default using ``model.eval()`` (so for instance, dropout modules are
deactivated). To train the model, you should first set it back in training mode with ``model.train()``
Args:
pretrained_model_name_or_path:
Can be either:
- A string with the `shortcut name` of a pretrained model to load from cache or download, e.g.,
``bert-base-uncased``.
- A string with the `identifier name` of a pretrained model that was user-uploaded to our S3, e.g.,
``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
model_args (additional positional arguments, `optional`):
Will be passed along to the underlying model ``__init__()`` method.
config (:class:`~transformers.PretrainedConfig`, `optional`):
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `shortcut name` string of a
pretrained model).
- The model was saved using :meth:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
by suppling the save directory.
- The model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
state_dict (`Dict[str, torch.Tensor]`, `optional`):
A state dictionary to use instead of a state dictionary loaded from saved weights file.
This option can be used if you want to create a model from a pretrained configuration but load your own
weights. In this case though, you should check if using
:func:`~transformers.PreTrainedModel.save_pretrained` and
:func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a TensorFlow checkpoint save file (see docstring of
``pretrained_model_name_or_path`` argument).
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g.,
:obj:`{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each
request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionnary containing missing keys, unexpected keys and error
messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_cdn(:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not to use Cloudfront (a Content Delivery Network, or CDN) when searching for the model on
our S3 (faster). Should be set to :obj:`False` for checkpoints larger than 20GB.
kwargs (additional keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
"""
class AutoModel:
r"""
This is a generic model class that will be instantiated as one of the base model classes of the library
when created with the when created with the :meth:`~transformers.AutoModel.from_pretrained` class method or the
:meth:`~transformers.AutoModel.from_config` class methods.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModel is designed to be instantiated "
"using the `AutoModel.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModel.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the base model classes of the library from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use :meth:`~transformers.AutoModel.from_pretrained` to load
the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModel.from_config(config)
"""
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_MAPPING)
@add_start_docstrings(
"Instantiate one of the base model classes of the library from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModel
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModel.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModel.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_MAPPING.keys())
)
)
class AutoModelForPreTraining:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with the
architecture used for pretraining this model---when created with the when created with the
:meth:`~transformers.AutoModelForPreTraining.from_pretrained` class method or the
:meth:`~transformers.AutoModelForPreTraining.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForPreTraining is designed to be instantiated "
"using the `AutoModelForPreTraining.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForPreTraining.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with the architecture used for pretraining this
model---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForPreTraining.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForPreTraining.from_config(config)
"""
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_PRETRAINING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with the architecture used for pretraining this ",
"model---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForPreTraining
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForPreTraining.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForPreTraining.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_PRETRAINING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_PRETRAINING_MAPPING.keys())
)
)
class AutoModelWithLMHead:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelWithLMHead.from_pretrained` class method or the
:meth:`~transformers.AutoModelWithLMHead.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
.. warning::
This class is deprecated and will be removed in a future version. Please use
:class:`~transformers.AutoModelForCausalLM` for causal language models,
:class:`~transformers.AutoModelForMaskedLM` for masked language models and
:class:`~transformers.AutoModelForSeq2SeqLM` for encoder-decoder models.
"""
def __init__(self):
raise EnvironmentError(
"AutoModelWithLMHead is designed to be instantiated "
"using the `AutoModelWithLMHead.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelWithLMHead.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a language modeling head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use :meth:`~transformers.AutoModelWithLMHead.from_pretrained`
to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelWithLMHead.from_config(config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_WITH_LM_HEAD_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a language modeling head---from a pretrained ",
"model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelWithLMHead
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelWithLMHead.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelWithLMHead.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
warnings.warn(
"The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
"`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
"`AutoModelForSeq2SeqLM` for encoder-decoder models.",
FutureWarning,
)
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_WITH_LM_HEAD_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_WITH_LM_HEAD_MAPPING.keys())
)
)
class AutoModelForCausalLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
causal language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForCausalLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForCausalLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForCausalLM is designed to be instantiated "
"using the `AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForCausalLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a causal language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use :meth:`~transformers.AutoModelForCausalLM.from_pretrained`
to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('gpt2')
>>> model = AutoModelForCausalLM.from_config(config)
"""
for config_class, model_class in MODEL_FOR_CAUSAL_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a causal language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForCausalLM
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForCausalLM.from_pretrained('gpt2')
>>> # Update configuration during loading
>>> model = AutoModelForCausalLM.from_pretrained('gpt2', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/gpt2_tf_model_config.json')
>>> model = AutoModelForCausalLM.from_pretrained('./tf_model/gpt2_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_CAUSAL_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_CAUSAL_LM_MAPPING.keys())
)
)
class AutoModelForMaskedLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
masked language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForMaskedLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMasedLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMaskedLM is designed to be instantiated "
"using the `AutoModelForMaskedLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMaskedLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a masked language modeling head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use :meth:`~transformers.AutoModelForMaskedLM.from_pretrained`
to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMaskedLM.from_config(config)
"""
for config_class, model_class in MODEL_FOR_MASKED_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MASKED_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a masked language modeling head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMaskedLM
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMaskedLM.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMaskedLM.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_MASKED_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__, cls.__name__, ", ".join(c.__name__ for c in MODEL_FOR_MASKED_LM_MAPPING.keys())
)
)
class AutoModelForSeq2SeqLM:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence-to-sequence language modeling head---when created with the when created with the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSeq2SeqLM.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSeq2SeqLM is designed to be instantiated "
"using the `AutoModelForSeq2SeqLM.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSeq2SeqLM.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence-to-sequence language modeling
head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use :meth:`~transformers.AutoModelForSeq2SeqLM.from_pretrained`
to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('t5')
>>> model = AutoModelForSeq2SeqLM.from_config(config)
"""
for config_class, model_class in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence-to-sequence language modeling "
"head---from a pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSeq2SeqLM
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base')
>>> # Update configuration during loading
>>> model = AutoModelForSeq2SeqLM.from_pretrained('t5-base', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/t5_tf_model_config.json')
>>> model = AutoModelForSeq2SeqLM.from_pretrained('./tf_model/t5_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.keys()),
)
)
class AutoModelForSequenceClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
sequence classification head---when created with the when created with the
:meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForSequenceClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForSequenceClassification is designed to be instantiated "
"using the `AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForSequenceClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a sequence classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForSequenceClassification.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForSequenceClassification.from_config(config)
"""
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a sequence classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForSequenceClassification
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForSequenceClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForSequenceClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForQuestionAnswering:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
question answering head---when created with the when created with the
:meth:`~transformers.AutoModeForQuestionAnswering.from_pretrained` class method or the
:meth:`~transformers.AutoModelForQuestionAnswering.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForQuestionAnswering is designed to be instantiated "
"using the `AutoModelForQuestionAnswering.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForQuestionAnswering.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a question answering head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForQuestionAnswering.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForQuestionAnswering.from_config(config)
"""
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_QUESTION_ANSWERING_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a question answering head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForQuestionAnswering
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForQuestionAnswering.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForQuestionAnswering.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_QUESTION_ANSWERING_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()),
)
)
class AutoModelForTokenClassification:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
token classification head---when created with the when created with the
:meth:`~transformers.AutoModelForTokenClassification.from_pretrained` class method or the
:meth:`~transformers.AutoModelForTokenClassification.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForTokenClassification is designed to be instantiated "
"using the `AutoModelForTokenClassification.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForTokenClassification.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a token classification head---from a configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForTokenClassification.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForTokenClassification.from_config(config)
"""
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a token classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForTokenClassification
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForTokenClassification.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForTokenClassification.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.keys()),
)
)
class AutoModelForMultipleChoice:
r"""
This is a generic model class that will be instantiated as one of the model classes of the library---with a
multiple choice classifcation head---when created with the when created with the
:meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` class method or the
:meth:`~transformers.AutoModelForMultipleChoice.from_config` class method.
This class cannot be instantiated directly using ``__init__()`` (throws an error).
"""
def __init__(self):
raise EnvironmentError(
"AutoModelForMultipleChoice is designed to be instantiated "
"using the `AutoModelForMultipleChoice.from_pretrained(pretrained_model_name_or_path)` or "
"`AutoModelForMultipleChoice.from_config(config)` methods."
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING, use_model_types=False)
def from_config(cls, config):
r"""
Instantiates one of the model classes of the library---with a multiple choice classification head---from a
configuration.
Note:
Loading a model from its configuration file does **not** load the model weights.
It only affects the model's configuration. Use
:meth:`~transformers.AutoModelForMultipleChoice.from_pretrained` to load the model weights.
Args:
config (:class:`~transformers.PretrainedConfig`):
The model class to instantiate is selected based on the configuration class:
List options
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download configuration from S3 and cache.
>>> config = AutoConfig.from_pretrained('bert-base-uncased')
>>> model = AutoModelForMultipleChoice.from_config(config)
"""
for config_class, model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.items():
if isinstance(config, config_class):
return model_class(config)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
@classmethod
@replace_list_option_in_docstrings(MODEL_FOR_MULTIPLE_CHOICE_MAPPING)
@add_start_docstrings(
"Instantiate one of the model classes of the library---with a multiple choice classification head---from a "
"pretrained model.",
AUTO_MODEL_PRETRAINED_DOCSTRING,
)
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Examples::
>>> from transformers import AutoConfig, AutoModelForMultipleChoice
>>> # Download model and configuration from S3 and cache.
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased')
>>> # Update configuration during loading
>>> model = AutoModelForMultipleChoice.from_pretrained('bert-base-uncased', output_attentions=True)
>>> model.config.output_attentions
True
>>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
>>> config = AutoConfig.from_json_file('./tf_model/bert_tf_model_config.json')
>>> model = AutoModelForMultipleChoice.from_pretrained('./tf_model/bert_tf_checkpoint.ckpt.index', from_tf=True, config=config)
"""
config = kwargs.pop("config", None)
if not isinstance(config, PretrainedConfig):
config, kwargs = AutoConfig.from_pretrained(
pretrained_model_name_or_path, return_unused_kwargs=True, **kwargs
)
for config_class, model_class in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.items():
if isinstance(config, config_class):
return model_class.from_pretrained(pretrained_model_name_or_path, *model_args, config=config, **kwargs)
raise ValueError(
"Unrecognized configuration class {} for this kind of AutoModel: {}.\n"
"Model type should be one of {}.".format(
config.__class__,
cls.__name__,
", ".join(c.__name__ for c in MODEL_FOR_MULTIPLE_CHOICE_MAPPING.keys()),
)
)
|
the-stack_106_17165
|
#!/bin/python
#
# simple restful client tester for the API application.
#
import requests
import json
import uuid
base_url = 'http://localhost:5000/'
user_name = 'test'
user_email = str(uuid.uuid4()) + "@example.net"
user_pass = '1234'
# register new user
print('=> Registering new user')
register = requests.post(base_url + 'profile',
params={'user': user_name,
'email': user_email,
'password': user_pass})
print(register.text)
# creating new token
print('=> Create new token')
token_request = requests.post(base_url + 'token',
auth=(user_email, user_pass),
params={'key': 'master'})
print(token_request.text)
# get token for newly registered user.
token = token_request.json().get('unhashed')
# get profile
print('=> Fetch profile')
profile_request = requests.get(base_url + 'profile',
auth=(token, 'unused'))
print(profile_request.text)
# set username
print('=> Setting username')
profile_username_request = requests.put(base_url + 'profile',
auth=(token, 'unused'),
params={'username': str(uuid.uuid4())})
print(profile_username_request.text)
# create settings object
print('=> Create new setting')
new_setting = requests.post(base_url + 'setting',
auth=(token, 'unused'),
params={'name': 'setting1', 'json': '{1,2,3,4}'})
print(new_setting.text)
setting_name = new_setting.json().get('name')
# get setting
print('=> Fetching new setting')
setting = requests.get(base_url + 'setting/' + setting_name,
auth=(token, 'unused'))
print(setting.text)
# update setting
print('=> Updating new setting')
updated_setting = requests.put(base_url + 'setting/' + setting_name,
auth=(token, 'unused'),
params={'json': '{1,2,3,4,5,6,7}'})
print(updated_setting.text)
print('=> Delete setting')
print(requests.delete(base_url + 'setting/' + setting_name,
auth=(token, 'unused')).text)
# create mark
print('=> Create new mark')
mark_payload = {'type': 'bookmark',
'title': 'test',
'tags': 'tag1,tag2,tag3',
'url': 'http://example.org'
}
new_mark = requests.post(base_url + 'mark',
auth=(token, 'unused'),
params=mark_payload)
print(new_mark.text)
# get new mark id
mark_id = str(new_mark.json().get('id'))
# update mark
print('=> Update mark')
update_mark = requests.put(base_url + 'mark/' + mark_id,
auth=(token, 'unused'),
params={'title': 'updated title'})
print(update_mark.text)
# get all marks
print('=> Get all marks')
marks = requests.get(base_url + 'marks',
auth=(token, 'unused'))
print(marks.text)
|
the-stack_106_17166
|
from email import header
import os
from django.db.models import Q
from django.urls import reverse
from django.conf import settings
from django.shortcuts import render
from django.core.cache import cache
from django.views.generic import View
from utils.mixin import LoginRequiredMixin
from utils.aliyun_utility import AliyunObjectStorage
from django.http import JsonResponse, HttpResponse, FileResponse
from Apps.ARExperiences.models import ARExperienceModelV2, ARExperienceResourceV2,ARExperienceResourceV2
from django.forms.models import model_to_dict
from utils.create_md5 import create_md5
import json
import time
# Create your views here.
aliyunStorage = AliyunObjectStorage()
dashboard_project_detail_field = ['project_id','project_name','app_uid','project_brief','project_status',
'project_permission','project_description','project_recommend','project_tags',
'project_weight','project_icon','create_time','update_time',
'project_header','project_preview']
dashboard_project_asset_detail_field =['project_id','json_url','bundle_url','bundle_size','platform_type',
'create_time','update_time']
class DashboardProjectListView(LoginRequiredMixin, View):
def get(self, request, app_uid, arexperience_uid):
"""Show ARExperiences project detail"""
query_key = f"{request.user.user_uid}_{app_uid}_{arexperience_uid}"
data = cache.get(query_key)
if data is None:
arexperience = ARExperienceModelV2.objects.get(app_uid=app_uid, project_id=arexperience_uid)
data = model_to_dict(arexperience,dashboard_project_detail_field)
data['create_time']=arexperience.create_time
data['update_time']=arexperience.update_time
try:
resourcesQuerySet = ARExperienceResourceV2.objects.filter(project_id=arexperience_uid)
resources = []
for resource in resourcesQuerySet:
resources.append(model_to_dict(resource,dashboard_project_asset_detail_field))
data['resources'] = resources
except:
assets = None
cache.set(query_key,data,settings.API_CACHE_EXPIRED)
return JsonResponse({'code': 200, 'message': 'Success', 'data': data})
def post(self, request):
response = {}
method_type = request.POST.get('method_type')
if method_type == 'UpdateProject':
response = self.update_project(request)
elif method_type == 'UploadARResources':
response = self.upload_ar_resources(request)
return JsonResponse(response)
def update_project(self,request):
ret = {'code':200,'message':None,'data':None}
project_name = request.POST.get('project_name')
project_id = request.POST.get('project_id')
app_uid = request.POST.get('app_uid')
if not all([app_uid,project_id,project_name]):
ret['code'] = 201
ret['message'] = 'Incomplete data'
return ret
try:
arexperience = ARExperienceModelV2.objects.get(app_uid=app_uid,project_id=project_id)
except ARExperienceModelV2.DoesNotExist:
arexperience = None
ret['code'] = 201
ret['message'] = 'The ARExperience is not exsit!'
return ret
if arexperience.project_name != project_name:
arexperience.project_name = project_name
project_description = request.POST.get('project_description')
project_support_url = request.POST.get('project_support_url')
project_brief = request.POST.get('project_brief')
project_weight = request.POST.get('project_weight')
project_permission = request.POST.get('project_permission')
project_status = request.POST.get('project_status')
project_recommend = request.POST.get('project_recommend')
project_header= request.FILES.get("project_header")
project_icon= request.FILES.get("project_icon")
project_preview_will_delete_id = json.loads(request.POST.get('project_preview_delete_id'))
project_preview_count = int(request.POST.get("project_preview_count"))
project_tags = json.loads(request.POST.get('project_tags'))
if project_description is not None:
arexperience.project_description = project_description
if project_support_url is not None:
arexperience.project_support_url = project_support_url
if project_brief is not None:
arexperience.project_brief = project_brief
if project_weight is not None:
arexperience.project_weight = project_weight
if project_permission is not None:
arexperience.project_permission = project_permission
if project_status is not None:
arexperience.project_status = project_status
if project_recommend is not None:
arexperience.project_recommend = project_recommend
arexperience.project_tags = project_tags
if project_header is not None:
delete_img_from_oss(request.user.user_uid,app_uid,arexperience.project_id,arexperience.project_header)
project_header_image_url = save_image_to_oss(request.user.user_uid,app_uid,arexperience.project_id,project_header,custom_name="%s_banner" %(project_name))
arexperience.project_header = project_header_image_url
if project_icon is not None:
delete_img_from_oss(request.user.user_uid,app_uid,arexperience.project_id,arexperience.project_icon)
project_icon_image_url = save_image_to_oss(request.user.user_uid,app_uid,arexperience.project_id,project_icon,custom_name="%s_icon"%(project_name))
arexperience.project_icon = project_icon_image_url
if project_preview_will_delete_id is not None and len(project_preview_will_delete_id)>0:
imgs = json.loads(arexperience.project_preview)
duplicate_imgs = json.loads(arexperience.project_preview)
for img_idx in range(0,len(project_preview_will_delete_id)):
idx = project_preview_will_delete_id[img_idx]
will_remove_img = imgs[idx]
delete_img_from_oss(request.user.user_uid,app_uid,project_id,will_remove_img)
duplicate_imgs.remove(will_remove_img)
arexperience.project_preview = json.dumps(duplicate_imgs)
if project_preview_count > 0:
if len(arexperience.project_preview)>0:
preview_img_url = json.loads(arexperience.project_preview)
else:
preview_img_url = []
for idx in range(0,project_preview_count):
preview_img = request.FILES.get('project_preview_%s' % (idx))
project_preview_image_url = save_image_to_oss(request.user.user_uid,app_uid,arexperience.project_id,preview_img)
preview_img_url.append(project_preview_image_url)
arexperience.project_preview = json.dumps(preview_img_url)
try:
arexperience.save()
except:
ret['code'] = 201
ret['message'] = 'Duplicate project name'
return ret
query_key = f"{request.user.user_uid}_{app_uid}_{project_id}"
query_app_uid_key = f"{request.user.user_uid}_{app_uid}"
cache.delete(query_app_uid_key)
cache.delete(query_key)
cache.delete(f"api_{project_id}_get_arexperience_detail")
cache.delete(f"api_{request.user.user_uid}_get_arexperience_page")
cache.delete(f"api_{request.user.user_uid}_{app_uid}_get_arexperiencepubliclist")
cache.delete(f"api_{request.user.user_uid}_{app_uid}_get_recommendList")
query_by_app_uid_cache_key = f"api_{request.user.user_uid}_{app_uid}_get_arexperiencebytagslist"
cache.delete(query_by_app_uid_cache_key)
ret['code'] = 200
ret['message'] = 'Success'
return ret
def upload_ar_resources(self,request):
"""Upload ARExperiences"""
try:
app_uid = request.POST.get('app_uid')
project_id = request.POST.get('project_id')
platform = request.POST.get('platform')
filesIndex = ['arexperience', 'json']
files = []
file_size = 0
upload_response = ''
for fileidx in filesIndex:
files.append(request.FILES.get(fileidx))
user_uid = request.user.user_uid
assets_save_folder = os.path.join(str(user_uid), str(app_uid), str(project_id), settings.AREXPERIENCE_URL, platform)
assets_save_folder = assets_save_folder.replace('\\', '/')
try:
arexperience_assets = ARExperienceResourceV2.objects.get(project_id=project_id,platform_type=platform)
except Exception as e:
print(e)
arexperience_assets = ARExperienceResourceV2.objects.create(project_id=project_id,platform_type=platform)
for file in files:
oss_path = "%s/%s" % (assets_save_folder, file.name)
if 'json' in file.name:
arexperience_assets.json_url = os.path.join(settings.OSS_BASE_URL, oss_path)
elif 'arexperience' in file.name:
arexperience_assets.bundle_url = os.path.join(settings.OSS_BASE_URL, oss_path)
arexperience_assets.platform_type = platform
file_size += file.size
data_bytes = file.read()
upload_response = aliyunStorage._save(
name=oss_path, content=data_bytes, progress_callback=percentage)
arexperience_assets.bundle_size = round(file_size/1048576,2)
arexperience_assets.save()
arexperience = ARExperienceModelV2.objects.get(app_uid=app_uid, project_id=project_id)
arexperience.save()
data = {
'json_url': arexperience_assets.json_url,
'bundle_url': arexperience_assets.bundle_url,
'bundle_size': arexperience_assets.bundle_size,
'platform_type': arexperience_assets.platform_type,
}
query_key = f"{user_uid}_{app_uid}_{project_id}"
query_app_uid_key = f"{user_uid}_{app_uid}"
cache.delete(query_app_uid_key)
cache.delete(query_key)
query_by_app_uid_cache_key = f"api_{request.user.user_uid}_{app_uid}_get_arexperiencebytagslist"
cache.delete(query_by_app_uid_cache_key)
cache.delete(f"api_{project_id}_get_arexperience_detail")
cache.delete(f"api_{request.user.user_uid}_get_arexperience_page")
cache.delete(f"api_{project_id}_get_arresources")
public_project_list_cache_key = f"api_{request.user.user_uid}_{app_uid}_get_arexperiencepubliclist"
cache.delete(public_project_list_cache_key)
return {'code': upload_response.status, 'message': 'The ARExperience Project is edited successfully!', 'data': data}
except Exception as e:
print(e)
return {'code': 201, 'message': e.__str__()}
def percentage(consumed_bytes, total_bytes):
if total_bytes:
rate = int(100 * (float(consumed_bytes) / float(total_bytes)))
print('\r{0}% '.format(rate), end='')
def save_image_to_oss(user_uid,app_uid,project_id,file,custom_name=None):
assets_save_folder = os.path.join(str(user_uid), str(app_uid), str(project_id))
assets_save_folder = assets_save_folder.replace('\\', '/')
if custom_name is not None:
fileName, fileExtension = os.path.splitext(file.name)
fileName = "%s%s"%(custom_name,fileExtension)
else:
fileName = f"{create_md5(file.name).replace('.jpg', '')}.jpg"
oss_path = "%s/%s" % (assets_save_folder, fileName)
from utils.aliyun_utility import AliyunObjectStorage
aliyunStorage = AliyunObjectStorage()
aliyunStorage._save(name=oss_path, content=file.read(), progress_callback=None)
return aliyunStorage.url(oss_path)
def delete_img_from_oss(user_uid,app_uid,project_id,url):
file = os.path.basename(url)
assets_save_folder = os.path.join(str(user_uid), str(app_uid), str(project_id))
assets_save_folder = assets_save_folder.replace('\\', '/')
oss_path = "%s/%s" % (assets_save_folder, file)
from utils.aliyun_utility import AliyunObjectStorage
aliyunStorage = AliyunObjectStorage()
aliyunStorage.delete(name=oss_path)
|
the-stack_106_17167
|
from setuptools import setup, find_packages
import os
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, 'requirements.txt')) as _file:
requirements = [line.strip() for line in _file]
setup(
name='pydicts',
version='v0.1.1',
packages=find_packages(),
install_requires=requirements,
license='MIT',
author='Victor Matheus de Castro Geraldo',
author_email='[email protected]',
description='Unofficial Python API for Dicio and DicionarioInformal'
)
|
the-stack_106_17169
|
#!/usr/bin/python
import calendar
import Tkinter
import ttk
import tkFont
def get_calendar(locale, fwday):
# instantiate proper calendar class
if locale is None:
return calendar.TextCalendar(fwday)
else:
return calendar.LocaleTextCalendar(fwday, locale)
class Calendar(ttk.Frame):
# XXX ToDo: cget and configure
datetime = calendar.datetime.datetime
timedelta = calendar.datetime.timedelta
def __init__(self, master=None, **kw):
"""
WIDGET-SPECIFIC OPTIONS
locale, firstweekday, year, month, selectbackground,
selectforeground
"""
# remove custom options from kw before initializating ttk.Frame
fwday = kw.pop('firstweekday', calendar.MONDAY)
year = kw.pop('year', self.datetime.now().year)
month = kw.pop('month', self.datetime.now().month)
locale = kw.pop('locale', None)
sel_bg = kw.pop('selectbackground', '#ecffc4')
sel_fg = kw.pop('selectforeground', '#05640e')
self._date = self.datetime(year, month, 1)
self._selection = None # no date selected
ttk.Frame.__init__(self, master, **kw)
self._cal = get_calendar(locale, fwday)
self.__setup_styles() # creates custom styles
self.__place_widgets() # pack/grid used widgets
self.__config_calendar() # adjust calendar columns and setup tags
# configure a canvas, and proper bindings, for selecting dates
self.__setup_selection(sel_bg, sel_fg)
# store items ids, used for insertion later
self._items = [self._calendar.insert('', 'end', values='')
for _ in range(6)]
# insert dates in the currently empty calendar
self._build_calendar()
# set the minimal size for the widget
#self._calendar.bind('<Map>', self.__minsize)
def __setitem__(self, item, value):
if item in ('year', 'month'):
raise AttributeError("attribute '%s' is not writeable" % item)
elif item == 'selectbackground':
self._canvas['background'] = value
elif item == 'selectforeground':
self._canvas.itemconfigure(self._canvas.text, item=value)
else:
ttk.Frame.__setitem__(self, item, value)
def __getitem__(self, item):
if item in ('year', 'month'):
return getattr(self._date, item)
elif item == 'selectbackground':
return self._canvas['background']
elif item == 'selectforeground':
return self._canvas.itemcget(self._canvas.text, 'fill')
else:
r = ttk.tclobjs_to_py({item: ttk.Frame.__getitem__(self, item)})
return r[item]
def __setup_styles(self):
# custom ttk styles
style = ttk.Style(self.master)
arrow_layout = lambda dir: (
[('Button.focus', {'children': [('Button.%sarrow' % dir, None)]})]
)
style.layout('L.TButton', arrow_layout('left'))
style.layout('R.TButton', arrow_layout('right'))
def __place_widgets(self):
# header frame and its widgets
hframe = ttk.Frame(self)
lbtn = ttk.Button(hframe, style='L.TButton', command=self._prev_month)
rbtn = ttk.Button(hframe, style='R.TButton', command=self._next_month)
self._header = ttk.Label(hframe, width=15, anchor='center')
# the calendar
#self._calendar = ttk.Treeview(show='', selectmode='none', height=7)
self._calendar = ttk.Treeview(self, show='', selectmode='none', height=7)
# pack the widgets
hframe.pack(in_=self, side='top', pady=4, anchor='center')
lbtn.grid(in_=hframe)
self._header.grid(in_=hframe, column=1, row=0, padx=12)
rbtn.grid(in_=hframe, column=2, row=0)
self._calendar.pack(in_=self, expand=1, fill='both', side='bottom')
def __config_calendar(self):
cols = self._cal.formatweekheader(3).split()
self._calendar['columns'] = cols
self._calendar.tag_configure('header', background='grey90')
self._calendar.insert('', 'end', values=cols, tag='header')
# adjust its columns width
font = tkFont.Font(family="Helvetica",size=14,weight="bold")
maxwidth = max(font.measure(col) for col in cols)
for col in cols:
self._calendar.column(col, width=maxwidth, minwidth=maxwidth,
anchor='e')
def __setup_selection(self, sel_bg, sel_fg):
self._font = tkFont.Font(family="Helvetica",size=14,weight="bold")
self._canvas = canvas = Tkinter.Canvas(self._calendar,
background=sel_bg, borderwidth=0, highlightthickness=0)
canvas.text = canvas.create_text(0, 0, fill=sel_fg, anchor='w')
canvas.bind('<ButtonPress-1>', lambda evt: canvas.place_forget())
self._calendar.bind('<Configure>', lambda evt: canvas.place_forget())
self._calendar.bind('<ButtonPress-1>', self._pressed)
#def __minsize(self, evt):
# width, height = self._calendar.master.geometry().split('x')
# height = height[:height.index('+')]
# self._calendar.master.minsize(width, height)
def _build_calendar(self):
year, month = self._date.year, self._date.month
# update header text (Month, YEAR)
header = self._cal.formatmonthname(year, month, 0)
self._header['text'] = header.title()
# update calendar shown dates
cal = self._cal.monthdayscalendar(year, month)
for indx, item in enumerate(self._items):
week = cal[indx] if indx < len(cal) else []
fmt_week = [('%02d' % day) if day else '' for day in week]
self._calendar.item(item, values=fmt_week)
def _show_selection(self, text, bbox):
"""Configure canvas for a new selection."""
x, y, width, height = bbox
textw = self._font.measure(text)
canvas = self._canvas
canvas.configure(width=width, height=height)
canvas.coords(canvas.text, width - textw, height / 2 - 1)
canvas.itemconfigure(canvas.text, text=text)
canvas.place(in_=self._calendar, x=x, y=y)
# Callbacks
def _pressed(self, evt):
"""Clicked somewhere in the calendar."""
x, y, widget = evt.x, evt.y, evt.widget
item = widget.identify_row(y)
column = widget.identify_column(x)
if not column or not item in self._items:
# clicked in the weekdays row or just outside the columns
return
item_values = widget.item(item)['values']
if not len(item_values): # row is empty for this month
return
text = item_values[int(column[1]) - 1]
if not text: # date is empty
return
bbox = widget.bbox(item, column)
if not bbox: # calendar not visible yet
return
# update and then show selection
text = '%02d' % text
self._selection = (text, item, column)
self._show_selection(text, bbox)
def _prev_month(self):
"""Updated calendar to show the previous month."""
self._canvas.place_forget()
self._date = self._date - self.timedelta(days=1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstuct calendar
def _next_month(self):
"""Update calendar to show the next month."""
self._canvas.place_forget()
year, month = self._date.year, self._date.month
self._date = self._date + self.timedelta(
days=calendar.monthrange(year, month)[1] + 1)
self._date = self.datetime(self._date.year, self._date.month, 1)
self._build_calendar() # reconstruct calendar
# Properties
@property
def selection(self):
"""Return a datetime representing the current selected date."""
if not self._selection:
return None
year, month = self._date.year, self._date.month
dateArray = [year, month, int(self._selection[0]),calendar.weekday(year, month, int(self._selection[0]))]
#return self.datetime(year, month, int(self._selection[0]))
return dateArray
def print_date(ttkcal):
#print "the date is:", ttkcal.selection
return ttkcal.selection
def chooseDate(ttkcal):
print(print_date(ttkcal))
quit()
def test():
import sys
root = Tkinter.Tk()
root.title('Ttk Calendar')
ttkcal = Calendar(firstweekday=calendar.SUNDAY)
ttkcal.pack(expand=1, fill='both')
if 'win' not in sys.platform:
style = ttk.Style()
style.theme_use('clam')
Tkinter.Button(root, text="Date", command=lambda: chooseDate(ttkcal)).pack()
root.mainloop()
if __name__ == '__main__':
test()
|
the-stack_106_17171
|
from __future__ import absolute_import
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.sites.shortcuts import get_current_site
from django.shortcuts import get_object_or_404, render
from django.views.decorators.csrf import csrf_protect
import django_comments
from django_comments import signals
from django_comments.views.utils import next_redirect, confirmation_view
@csrf_protect
@login_required
def flag(request, comment_id, next=None):
"""
Flags a comment. Confirmation on GET, action on POST.
Templates: :template:`comments/flag.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(django_comments.get_model(),
pk=comment_id,
site__pk=get_current_site(request).pk)
# Flag on POST
if request.method == 'POST':
perform_flag(request, comment)
return next_redirect(request, fallback=next or 'comments-flag-done',
c=comment.pk)
# Render a form on GET
else:
return render(request, 'comments/flag.html', {'comment': comment, "next": next})
@csrf_protect
@permission_required("django_comments.can_moderate")
def delete(request, comment_id, next=None):
"""
Deletes a comment. Confirmation on GET, action on POST. Requires the "can
moderate comments" permission.
Templates: :template:`comments/delete.html`,
Context:
comment
the flagged `comments.comment` object
"""
comment = get_object_or_404(django_comments.get_model(),
pk=comment_id,
site__pk=get_current_site(request).pk)
# Delete on POST
if request.method == 'POST':
# Flag the comment as deleted instead of actually deleting it.
perform_delete(request, comment)
return next_redirect(request, fallback=next or 'comments-delete-done',
c=comment.pk)
# Render a form on GET
else:
return render(request, 'comments/delete.html', {'comment': comment, "next": next})
@csrf_protect
@permission_required("django_comments.can_moderate")
def approve(request, comment_id, next=None):
"""
Approve a comment (that is, mark it as public and non-removed). Confirmation
on GET, action on POST. Requires the "can moderate comments" permission.
Templates: :template:`comments/approve.html`,
Context:
comment
the `comments.comment` object for approval
"""
comment = get_object_or_404(django_comments.get_model(),
pk=comment_id,
site__pk=get_current_site(request).pk)
# Delete on POST
if request.method == 'POST':
# Flag the comment as approved.
perform_approve(request, comment)
return next_redirect(request, fallback=next or 'comments-approve-done',
c=comment.pk)
# Render a form on GET
else:
return render(request, 'comments/approve.html', {'comment': comment, "next": next})
# The following functions actually perform the various flag/aprove/delete
# actions. They've been broken out into separate functions to that they
# may be called from admin actions.
def perform_flag(request, comment):
"""
Actually perform the flagging of a comment from a request.
"""
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.SUGGEST_REMOVAL
)
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_delete(request, comment):
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.MODERATOR_DELETION
)
comment.is_removed = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
def perform_approve(request, comment):
flag, created = django_comments.models.CommentFlag.objects.get_or_create(
comment=comment,
user=request.user,
flag=django_comments.models.CommentFlag.MODERATOR_APPROVAL,
)
comment.is_removed = False
comment.is_public = True
comment.save()
signals.comment_was_flagged.send(
sender=comment.__class__,
comment=comment,
flag=flag,
created=created,
request=request,
)
# Confirmation views.
flag_done = confirmation_view(
template="comments/flagged.html",
doc='Displays a "comment was flagged" success page.'
)
delete_done = confirmation_view(
template="comments/deleted.html",
doc='Displays a "comment was deleted" success page.'
)
approve_done = confirmation_view(
template="comments/approved.html",
doc='Displays a "comment was approved" success page.'
)
|
the-stack_106_17173
|
import unittest
from atc_tools.time import TimeSequence
class TimeTest(unittest.TestCase):
def test_01_sequence(self):
seq = TimeSequence("2021/01/10 14:23", delta_seconds=3600)
t1 = seq.next()
t2 = seq.next()
t3 = next(seq)
t4 = seq.reverse(2)
self.assertEqual(t4,t1)
self.assertEqual((t3-t2).total_seconds(), 3600)
|
the-stack_106_17175
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard Professional Edition IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3PAAS-RC1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from .alarm_condition_spec import AlarmConditionSpec
class DurationAlarmConditionSpec(AlarmConditionSpec):
"""
Do not edit the class manually.
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'predicate': 'FilterPredicateValuelong',
'unit': 'str'
}
if hasattr(AlarmConditionSpec, "swagger_types"):
swagger_types.update(AlarmConditionSpec.swagger_types)
attribute_map = {
'predicate': 'predicate',
'unit': 'unit'
}
if hasattr(AlarmConditionSpec, "attribute_map"):
attribute_map.update(AlarmConditionSpec.attribute_map)
def __init__(self, predicate=None, unit=None, *args, **kwargs): # noqa: E501
"""DurationAlarmConditionSpec - a model defined in Swagger""" # noqa: E501
self._predicate = None
self._unit = None
self.discriminator = None
if predicate is not None:
self.predicate = predicate
if unit is not None:
self.unit = unit
AlarmConditionSpec.__init__(self, *args, **kwargs)
@property
def predicate(self):
"""Gets the predicate of this DurationAlarmConditionSpec. # noqa: E501
:return: The predicate of this DurationAlarmConditionSpec. # noqa: E501
:rtype: FilterPredicateValuelong
"""
return self._predicate
@predicate.setter
def predicate(self, predicate):
"""Sets the predicate of this DurationAlarmConditionSpec.
:param predicate: The predicate of this DurationAlarmConditionSpec. # noqa: E501
:type: FilterPredicateValuelong
"""
self._predicate = predicate
@property
def unit(self):
"""Gets the unit of this DurationAlarmConditionSpec. # noqa: E501
:return: The unit of this DurationAlarmConditionSpec. # noqa: E501
:rtype: str
"""
return self._unit
@unit.setter
def unit(self, unit):
"""Sets the unit of this DurationAlarmConditionSpec.
:param unit: The unit of this DurationAlarmConditionSpec. # noqa: E501
:type: str
"""
allowed_values = ["DAYS", "HOURS", "MICROSECONDS", "MILLISECONDS", "MINUTES", "NANOSECONDS", "SECONDS"] # noqa: E501
if unit not in allowed_values:
raise ValueError(
"Invalid value for `unit` ({0}), must be one of {1}" # noqa: E501
.format(unit, allowed_values)
)
self._unit = unit
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DurationAlarmConditionSpec, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DurationAlarmConditionSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_106_17176
|
"""
Octahedral Lambda
=================
"""
from ..metal_complex import MetalComplex
from ..vertices import MetalVertex, BiDentateLigandVertex
from ...topology_graph import Edge
class OctahedralLambda(MetalComplex):
"""
Represents a metal complex topology graph.
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='[Fe+2]',
functional_groups=(
stk.SingleAtom(stk.Fe(0, charge=2))
for i in range(6)
),
position_matrix=[[0, 0, 0]],
)
bb2 = stk.BuildingBlock(
smiles=(
'C1=CC=NC(C2=CC=CC(C3=C'
'C=CC=N3)=C2)=C1'
),
functional_groups=[
stk.SmartsFunctionalGroupFactory(
smarts='[#6]~[#7X2]~[#6]',
bonders=(1, ),
deleters=(),
),
],
)
complex = stk.ConstructedMolecule(
topology_graph=stk.metal_complex.OctahedralLambda(
metals=bb1,
ligands=bb2,
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
complex.get_atoms(),
complex.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=(
1
if bond.get_order() == 9
else bond.get_order()
),
) for bond in complex.get_bonds()
),
)
Metal building blocks with at least six functional groups are
required for this topology.
Ligand building blocks with two functional groups are required for
this topology graph.
When using a :class:`dict` for initialization, a
:class:`.BuildingBlock` needs to be assigned to each of the
following numbers:
| metals: (0, )
| ligands: (0, 1, 2)
See :class:`.MetalComplex` for more details and examples.
"""
_metal_vertex_prototypes = (
MetalVertex(0, [0, 0, 0]),
)
_ligand_vertex_prototypes = (
BiDentateLigandVertex(1, [2.5, 2.5, 0]),
BiDentateLigandVertex(2, [0, -2.5, -2.5]),
BiDentateLigandVertex(3, [-2.5, 0, 2.5]),
)
# The ordering here matters for the stereochemistry.
# The first edge to appear between two vertices determines the
# directionality of the binding ligand.
_edge_prototypes = (
Edge(
id=0,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=[2.5, 0, 0],
),
Edge(
id=1,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[0],
position=[0, 2.5, 0],
),
Edge(
id=2,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[2],
position=[0, 0, 2.5],
),
Edge(
id=3,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[2],
position=[-2.5, 0, 0],
),
Edge(
id=4,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[1],
position=[0, -2.5, 0],
),
Edge(
id=5,
vertex1=_metal_vertex_prototypes[0],
vertex2=_ligand_vertex_prototypes[1],
position=[0, 0, -2.5],
),
)
|
the-stack_106_17177
|
# copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""HTML formatting drivers for ureports"""
from __future__ import generators
__docformat__ = "restructuredtext en"
from ..ureports import HTMLWriter
class DocbookWriter(HTMLWriter):
"""format layouts as HTML"""
def begin_format(self, layout):
"""begin to format a layout"""
super(HTMLWriter, self).begin_format(layout)
if self.snippet is None:
self.writeln('<?xml version="1.0" encoding="ISO-8859-1"?>')
self.writeln("""
<book xmlns:xi='http://www.w3.org/2001/XInclude'
lang='fr'>
""")
def end_format(self, layout):
"""finished to format a layout"""
if self.snippet is None:
self.writeln('</book>')
def visit_section(self, layout):
"""display a section (using <chapter> (level 0) or <section>)"""
if self.section == 0:
tag = "chapter"
else:
tag = "section"
self.section += 1
self.writeln(self._indent('<%s%s>' % (tag, self.handle_attrs(layout))))
self.format_children(layout)
self.writeln(self._indent('</%s>'% tag))
self.section -= 1
def visit_title(self, layout):
"""display a title using <title>"""
self.write(self._indent(' <title%s>' % self.handle_attrs(layout)))
self.format_children(layout)
self.writeln('</title>')
def visit_table(self, layout):
"""display a table as html"""
self.writeln(self._indent(' <table%s><title>%s</title>' \
% (self.handle_attrs(layout), layout.title)))
self.writeln(self._indent(' <tgroup cols="%s">'% layout.cols))
for i in range(layout.cols):
self.writeln(self._indent(' <colspec colname="c%s" colwidth="1*"/>' % i))
table_content = self.get_table_content(layout)
# write headers
if layout.cheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[0])
self.writeln(self._indent(' </thead>'))
table_content = table_content[1:]
elif layout.rcheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[-1])
self.writeln(self._indent(' </thead>'))
table_content = table_content[:-1]
# write body
self.writeln(self._indent(' <tbody>'))
for i in range(len(table_content)):
row = table_content[i]
self.writeln(self._indent(' <row>'))
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(self._indent(' <entry>%s</entry>' % cell))
self.writeln(self._indent(' </row>'))
self.writeln(self._indent(' </tbody>'))
self.writeln(self._indent(' </tgroup>'))
self.writeln(self._indent(' </table>'))
def _write_row(self, row):
"""write content of row (using <row> <entry>)"""
self.writeln(' <row>')
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(' <entry>%s</entry>' % cell)
self.writeln(self._indent(' </row>'))
def visit_list(self, layout):
"""display a list (using <itemizedlist>)"""
self.writeln(self._indent(' <itemizedlist%s>' % self.handle_attrs(layout)))
for row in list(self.compute_content(layout)):
self.writeln(' <listitem><para>%s</para></listitem>' % row)
self.writeln(self._indent(' </itemizedlist>'))
def visit_paragraph(self, layout):
"""display links (using <para>)"""
self.write(self._indent(' <para>'))
self.format_children(layout)
self.writeln('</para>')
def visit_span(self, layout):
"""display links (using <p>)"""
#TODO: translate in docbook
self.write('<literal %s>' % self.handle_attrs(layout))
self.format_children(layout)
self.write('</literal>')
def visit_link(self, layout):
"""display links (using <ulink>)"""
self.write('<ulink url="%s"%s>%s</ulink>' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using <programlisting>)"""
self.writeln(self._indent(' <programlisting>'))
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.writeln(self._indent(' </programlisting>'))
def visit_text(self, layout):
"""add some text"""
self.write(layout.data.replace('&', '&').replace('<', '<'))
def _indent(self, string):
"""correctly indent string according to section"""
return ' ' * 2*(self.section) + string
|
the-stack_106_17178
|
import libsubmit
from libsubmit.channels.ssh.ssh import SSHChannel as SSH
def connect_and_list(hostname, username):
conn = SSH(hostname, username=username)
ec, out, err = conn.execute_wait("echo $HOSTNAME")
conn.close()
return out
def test_push(conn, fname="test001.txt"):
with open(fname, 'w') as f:
f.write("Hello from parsl.ssh testing\n")
conn.push_file(fname, "/tmp")
ec, out, err = conn.execute_wait("ls /tmp/{0}".format(fname))
print(ec, out, err)
def test_pull(conn, fname="test001.txt"):
local = "foo"
conn.pull_file("/tmp/{0}".format(fname), local)
with open("{0}/{1}".format(local, fname), 'r') as f:
print(f.readlines())
if __name__ == "__main__":
libsubmit.set_stream_logger()
# This is for testing
conn = SSH("midway.rcc.uchicago.edu", username="yadunand")
test_push(conn)
test_pull(conn)
conn.close()
|
the-stack_106_17179
|
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Modifier_run2_miniAOD_devel_cff import run2_miniAOD_devel
from RecoJets.JetProducers.PileupJetIDParams_cfi import *
#_stdalgos_4x = cms.VPSet(full, cutbased,PhilV1)
_stdalgos_5x = cms.VPSet(full_5x,cutbased,PhilV1)
#_chsalgos_4x = cms.VPSet(full, cutbased)
_chsalgos_5x = cms.VPSet(full_5x_chs,cutbased)
_chsalgos_74x = cms.VPSet(full_74x_chs,cutbased)
_chsalgos_76x = cms.VPSet(full_76x_chs,cutbased)
_chsalgos_80x = cms.VPSet(full_80x_chs,cutbased)
_chsalgos_81x = cms.VPSet(full_81x_chs,cutbased)
_chsalgos_94x = cms.VPSet(full_94x_chs,cutbased)
_chsalgos_102x = cms.VPSet(full_102x_chs,cutbased)
_stdalgos = _chsalgos_81x
# Calculate+store variables and run MVAs
pileupJetId = cms.EDProducer('PileupJetIdProducer',
produceJetIds = cms.bool(True),
jetids = cms.InputTag(""),
runMvas = cms.bool(True),
jets = cms.InputTag("ak4PFJetsCHS"),
vertexes = cms.InputTag("offlinePrimaryVertices"),
algos = cms.VPSet(_stdalgos),
rho = cms.InputTag("fixedGridRhoFastjetAll"),
jec = cms.string("AK4PFchs"),
applyJec = cms.bool(True),
inputIsCorrected = cms.bool(False),
residualsFromTxt = cms.bool(False),
# residualsTxt = cms.FileInPath("RecoJets/JetProducers/data/download.url") # must be an existing file
)
run2_miniAOD_devel.toModify(pileupJetId, algos = _chsalgos_102x)
# Calculate variables, but don't run MVAs
pileupJetIdCalculator = pileupJetId.clone(
runMvas = cms.bool(False),
algos = cms.VPSet(cutbased),
)
# Run MVAs on precalculated variables
pileupJetIdEvaluator = pileupJetId.clone(
produceJetIds = cms.bool(False),
jetids = cms.InputTag("pileupJetIdCalculator"),
)
pileUpJetIDTask = cms.Task(pileupJetId,
pileupJetIdCalculator,
pileupJetIdEvaluator
)
|
the-stack_106_17184
|
"""Environment discovery"""
import os
import glob
from toposort import toposort_flatten
from .environment import Environment
__all__ = ('discover',)
def discover(glob_pattern):
"""
Find all files matching given glob_pattern,
parse them, and return list of environments:
>>> envs = discover("requirements/*.in")
>>> # import pprint; pprint.pprint(envs)
>>> envs == [
... {'name': 'base', 'refs': set()},
... {'name': 'py27', 'refs': set()},
... {'name': 'test35', 'refs': set()},
... {'name': 'test', 'refs': {'base'}},
... {'name': 'test27', 'refs': {'py27', 'test35'}},
... {'name': 'local', 'refs': {'test'}},
... {'name': 'local27', 'refs': {'test', 'test27'}},
... {'name': 'testwin', 'refs': {'test'}},
... ]
True
"""
in_paths = glob.glob(glob_pattern)
names = {
extract_env_name(path): path
for path in in_paths
}
return order_by_refs([
{'name': name, 'refs': Environment.parse_references(in_path)}
for name, in_path in names.items()
])
def extract_env_name(file_path):
"""Return environment name for given requirements file path"""
return os.path.splitext(os.path.basename(file_path))[0]
def order_by_refs(envs):
"""
Return topologicaly sorted list of environments.
I.e. all referenced environments are placed before their references.
"""
topology = {
env['name']: set(env['refs'])
for env in envs
}
by_name = {
env['name']: env
for env in envs
}
return [
by_name[name]
for name in toposort_flatten(topology)
]
|
the-stack_106_17185
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Geometry.DTGeometry.dtGeometry_cfi")
process.DTGeometryESModule.applyAlignment = False
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("CondCore.DBCommon.CondDBSetup_cfi")
process.load("DQMServices.Core.DQM_cfg")
process.source = cms.Source("EmptyIOVSource",
lastValue = cms.uint64(100),
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
interval = cms.uint64(90)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
process.eventInfoProvider = cms.EDFilter("EventCoordinatesSource",
eventInfoFolder = cms.untracked.string('EventInfo/')
)
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('resolutionTest_step1',
'resolutionTest_step2',
'resolutionTest_step3'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('ERROR'),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
resolution = cms.untracked.PSet(
limit = cms.untracked.int32(10000000)
),
noLineBreaks = cms.untracked.bool(True)
),
categories = cms.untracked.vstring('resolution'),
destinations = cms.untracked.vstring('cout')
)
from DQMServices.Core.DQMQualityTester import DQMQualityTester
process.qTester = DQMQualityTester(
prescaleFactor = cms.untracked.int32(1),
qtList = cms.untracked.FileInPath('DQM/DTMonitorClient/test/QualityTests_ttrig.xml')
)
#process.load("DQM.DTMonitorClient.dtResolutionTest_cfi")
#process.modulo1=process.resolutionTest.clone(
# histoTag2D = 'hResDistVsDist_STEP1',
# histoTag = 'hResDist_STEP1',
# STEP = 'STEP1',
# OutputMEsInRootFile = False,
# readFile = True,
# inputFile = '/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/DTkFactValidation_RUNNUMBERTEMPLATE.root'
# )
# process.load("DQM.DTMonitorClient.dtResolutionTest_cfi")
# process.modulo2=process.resolutionTest.clone(
# histoTag2D = 'hResDistVsDist_STEP2',
# histoTag = 'hResDist_STEP2',
# STEP = 'STEP2',
# OutputMEsInRootFile = False,
# readFile = True,
# inputFile = '/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/DTkFactValidation_RUNNUMBERTEMPLATE.root'
# )
process.load("DQM.DTMonitorClient.dtResolutionTest_cfi")
process.modulo=process.resolutionTest.clone(
histoTag2D = 'hResDistVsDist_STEP3',
histoTag = 'hResDist_STEP3',
STEP = 'STEP3',
OutputMEsInRootFile = True,
readFile = True,
inputFile = '/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/DTkFactValidation_RUNNUMBERTEMPLATE.root',
OutputFileName = '/afs/cern.ch/cms/CAF/CMSALCA/ALCA_MUONCALIB/DTCALIB/RUNPERIODTEMPLATE/ttrig/SummaryResiduals_RUNNUMBERTEMPLATE.root'
)
process.secondStep = cms.Sequence(process.modulo*process.qTester)
process.p = cms.Path(process.secondStep)
process.DQM.collectorHost = ''
|
the-stack_106_17186
|
from django.db.models.query import prefetch_related_objects
from iaso.models import OrgUnit, GroupSet
from .comparisons import as_field_types, Diff, Comparison
def index_pyramid(orgunits):
orgunits_by_source_ref = {}
for orgunit in orgunits:
if orgunits_by_source_ref.get(orgunit.source_ref, None) is None:
orgunits_by_source_ref[orgunit.source_ref] = [orgunit]
else:
print("TWO ORG UNITS WITH THE SAME source_ref: %s (THIS SHOULD NOT HAPPEN!)" % orgunit.source_ref)
orgunits_by_source_ref[orgunit.source_ref].append(orgunit)
return orgunits_by_source_ref
class Differ:
def __init__(self, logger):
self.iaso_logger = logger
def load_pyramid(self, version, validation_status=None, top_org_unit=None, org_unit_types=None):
self.iaso_logger.info("loading pyramid ", version.data_source, version, top_org_unit, org_unit_types)
queryset = (
OrgUnit.objects.prefetch_related("groups")
.prefetch_related("groups__group_sets")
.select_related("org_unit_type")
.filter(version=version)
)
if validation_status:
queryset = queryset.filter(validation_status=validation_status)
if top_org_unit:
parent = OrgUnit.objects.get(id=top_org_unit)
queryset = queryset.hierarchy(parent)
if org_unit_types:
queryset = queryset.filter(org_unit_type__in=org_unit_types)
return queryset
def diff(
self,
version_ref,
version,
ignore_groups=False,
show_deleted_org_units=False,
validation_status=None,
validation_status_ref=None,
top_org_unit=None,
top_org_unit_ref=None,
org_unit_types=None,
org_unit_types_ref=None,
):
field_names = ["name", "geometry", "parent"]
if not ignore_groups:
for group_set in GroupSet.objects.filter(source_version=version):
field_names.append("groupset:" + group_set.source_ref + ":" + group_set.name)
self.iaso_logger.info("will compare the following fields ", field_names)
field_types = as_field_types(field_names)
orgunits_dhis2 = self.load_pyramid(
version, validation_status=validation_status, top_org_unit=top_org_unit, org_unit_types=org_unit_types
)
orgunit_refs = self.load_pyramid(
version_ref,
validation_status=validation_status_ref,
top_org_unit=top_org_unit_ref,
org_unit_types=org_unit_types_ref,
)
self.iaso_logger.info(
"comparing ", version_ref, "(", len(orgunits_dhis2), ")", " and ", version, "(", len(orgunit_refs), ")"
)
# speed how to index_by(&:source_ref)
diffs = []
index = 0
orgunits_dhis2_by_ref = index_pyramid(orgunits_dhis2)
for orgunit_ref in orgunit_refs:
index = index + 1
orgunit_dhis2_with_ref = orgunits_dhis2_by_ref.get(orgunit_ref.source_ref, [])
status = "same"
orgunit_dhis2 = None
if len(orgunit_dhis2_with_ref) > 0:
orgunit_dhis2 = orgunit_dhis2_with_ref[0]
else:
status = "new"
if index % 100 == 0:
self.iaso_logger.info(index, "will compare ", orgunit_ref, " vs ", orgunit_dhis2)
comparisons = self.compare_fields(orgunit_dhis2, orgunit_ref, field_types)
all_same = all(map(lambda comp: comp.status == "same", comparisons))
if status != "new" and not all_same:
status = "modified"
elif status != "new" and all_same:
status = "same"
diff = Diff(
org_unit=orgunit_dhis2 if orgunit_dhis2 else orgunit_ref, status=status, comparisons=comparisons
)
diffs.append(diff)
if show_deleted_org_units:
target_set = set(orgunits_dhis2_by_ref.keys())
source_set = set([org_unit.source_ref for org_unit in orgunit_refs])
deleted_org_units_ids = target_set - source_set
for deleted_id in deleted_org_units_ids:
orgunit_dhis2 = orgunits_dhis2_by_ref.get(deleted_id)[0]
comparisons = []
for field in field_types:
comparison = Comparison(
before=field.access(orgunit_dhis2),
after=None,
field=field.field_name,
status="deleted",
distance=100,
)
comparisons.append(comparison)
used_to_exist = OrgUnit.objects.filter(source_ref=deleted_id, version=version).count() > 0
status = "deleted" if used_to_exist else "never_seen"
diff = Diff(orgunit_dhis2, status=status, comparisons=comparisons)
diffs.append(diff)
return diffs, field_names
def compare_fields(self, orgunit_dhis2, orgunit_ref, field_types):
comparisons = []
for field in field_types:
dhis2_value = field.access(orgunit_dhis2)
ref_value = field.access(orgunit_ref)
status = None
same = field.is_same(dhis2_value, ref_value)
if same:
status = "same"
else:
status = "modified"
if dhis2_value is None and ref_value is not None:
status = "new"
if not same and dhis2_value is not None and (ref_value is None or ref_value == []):
status = "deleted"
comparisons.append(
Comparison(
before=dhis2_value,
after=ref_value,
field=field.field_name,
status=status,
distance=0 if same else field.distance(dhis2_value, ref_value),
)
)
return comparisons
|
the-stack_106_17192
|
# Copyright Splunk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from importlib import reload
from unittest import TestCase, mock
from opentelemetry import propagate, trace
from opentelemetry.baggage.propagation import W3CBaggagePropagator
from opentelemetry.instrumentation.propagators import get_global_response_propagator
from opentelemetry.propagate import get_global_textmap
from opentelemetry.propagators.composite import CompositePropagator
from opentelemetry.trace.propagation.tracecontext import TraceContextTextMapPropagator
from splunk_otel.options import _Options
from splunk_otel.propagators import _ServerTimingResponsePropagator
from splunk_otel.tracing import _configure_tracing
class TestPropagator(TestCase):
def test_sets_tracecontext_and_baggage_are_default_propagator(self):
reload(propagate)
_configure_tracing(_Options())
propagator = get_global_textmap()
self.assertIsInstance(propagator, CompositePropagator)
propagators = propagator._propagators # pylint: disable=protected-access
self.assertEqual(len(propagators), 2)
self.assertIsInstance(propagators[0], TraceContextTextMapPropagator)
self.assertIsInstance(propagators[1], W3CBaggagePropagator)
@mock.patch.dict(
os.environ,
{"OTEL_PROPAGATORS": "baggage"},
)
def test_set_custom_propagator(self):
reload(propagate)
_configure_tracing(_Options())
propagator = get_global_textmap()
self.assertIsInstance(propagator, CompositePropagator)
propagators = propagator._propagators # pylint: disable=protected-access
self.assertEqual(len(propagators), 1)
self.assertIsInstance(propagators[0], W3CBaggagePropagator)
def test_server_timing_is_default_response_propagator(self):
_configure_tracing(_Options())
propagtor = get_global_response_propagator()
self.assertIsInstance(propagtor, _ServerTimingResponsePropagator)
def test_server_timing_is_global_response_propagator_disabled_code(self):
_configure_tracing(_Options(trace_response_header_enabled=False))
self.assertIsNone(get_global_response_propagator())
@mock.patch.dict(
os.environ,
{"SPLUNK_TRACE_RESPONSE_HEADER_ENABLED": "false"},
)
def test_server_timing_is_global_response_propagator_disabled_env(self):
_configure_tracing(_Options())
self.assertIsNone(get_global_response_propagator())
class TestServerTimingResponsePropagator(TestCase):
def test_inject(self):
span = trace.NonRecordingSpan(
trace.SpanContext(
trace_id=1,
span_id=2,
is_remote=False,
trace_flags=trace.TraceFlags(1),
trace_state=trace.DEFAULT_TRACE_STATE,
),
)
ctx = trace.set_span_in_context(span)
prop = _ServerTimingResponsePropagator()
carrier = {}
prop.inject(carrier, ctx)
self.assertEqual(carrier["Access-Control-Expose-Headers"], "Server-Timing")
self.assertEqual(
carrier["Server-Timing"],
'traceparent;desc="00-00000000000000000000000000000001-0000000000000002-01"',
)
def test_inject_not_sampled(self):
span = trace.NonRecordingSpan(
trace.SpanContext(
trace_id=1,
span_id=2,
is_remote=False,
trace_flags=trace.TraceFlags(0),
trace_state=trace.DEFAULT_TRACE_STATE,
),
)
ctx = trace.set_span_in_context(span)
prop = _ServerTimingResponsePropagator()
carrier = {}
prop.inject(carrier, ctx)
self.assertEqual(carrier["Access-Control-Expose-Headers"], "Server-Timing")
self.assertEqual(
carrier["Server-Timing"],
'traceparent;desc="00-00000000000000000000000000000001-0000000000000002-00"',
)
|
the-stack_106_17193
|
# Load Packages
import pandas as pd
import numpy as np
import random
from matplotlib import pyplot as plt
from sklearn.model_selection import LeaveOneOut
from sklearn import preprocessing
from sklearn.metrics import accuracy_score, precision_score, recall_score
from sklearn.model_selection import train_test_split
from sklearn.cluster import KMeans
import tensorflow as tf
# tf.enable_eager_execution()
# tf.executing_eagerly()
from tensorflow.keras import layers, losses
from tensorflow.keras.datasets import fashion_mnist
from tensorflow.keras.models import Model
configuration = tf.compat.v1.ConfigProto(device_count={"GPU": 0})
session = tf.compat.v1.Session(config=configuration)
random.seed(12)
class Autoencoder(Model):
def __init__(self, latent_dim):
super(Autoencoder, self).__init__()
self.latent_dim = latent_dim
self.encoder = tf.keras.Sequential([
layers.Dense(latent_dim, activation=layers.LeakyReLU(alpha=0.3)),
])
self.decoder = tf.keras.Sequential([
layers.Dense(1024, activation=layers.LeakyReLU(alpha=0.3))
])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def findPMD(filepath, outputpath1, outputpath2):
"""
findPMD(filepath, outputpath1, outputpath2)
The main function in PMDfinder.
* filepath: input BED file path.
* outputpath1: the output bed file path.
* outputpath2: the output grange file path.
"""
# load DSS methylation data
methylation = pd.read_csv(filepath, sep='\t', comment='t', header = 0, low_memory=False)
# store the location and the percent methylation
a = list(map(float, methylation['X']))
b = list(map(float, methylation['N']))
meth_ratio = [i / j for i, j in zip(a, b)]
# geno_pos = list(map(float, methylation['pos']))
### Data Conversion
# convert methylation ratio to PMD/non-PMD level (y=4x(1-x))
def methRatio2PMDLevel(meth_ratio):
n = len(meth_ratio)
PMD_level = [0]*n
for i in range(n):
PMD_level[i] = 4 * meth_ratio[i] * (1 - meth_ratio[i])
return PMD_level
PMD_level = methRatio2PMDLevel(meth_ratio)
### Sequential Data Matrix
# Extract sequenctial feature by sliding window
N = len(PMD_level)
X = np.zeros((N-1023, 1024))
for i in range(N-1023):
X[i, :] = PMD_level[i:i+1024]
X = X.astype(np.float32)
### Autoencoder
# latent is the last variable
latent_dim = 8
m = Autoencoder(latent_dim)
m.compile(optimizer='adam', loss=losses.MeanSquaredError())
# fit the model
m.fit(X, X, epochs=5, shuffle=True)
# get the encoded PMD
encoded_slicing_PMD = m.encoder(X).numpy()
### k-means
kmeans = KMeans(n_clusters=2, random_state=22).fit(encoded_slicing_PMD)
final_result = kmeans.labels_
### Post-processing steps
## Remove PMD that is less than 101 bp length
assign1 = [] # index for the location equal to 1
for i in range(len(final_result)):
if final_result[i] == 1:
assign1.append(i)
break_pts1 = [0] # index for the break point, the next equal to 1 is more than 1bp
for i in range(1, len(assign1)):
if assign1[i] - assign1[i-1] > 1:
break_pts1.append(i)
# small_PMD_intervals: identify region that is close with each other
small_PMD_intervals = []
for i in range(1, len(break_pts1)):
if assign1[break_pts1[i]-1] - assign1[break_pts1[i-1]] + 1 < 101:
small_PMD_intervals.append(i)
# change the PMD interval with less than 101 to Non-PMD
for interval in small_PMD_intervals:
final_result[assign1[break_pts1[interval-1] : break_pts1[interval]]] = 0
## Merge PMD that is less than 101 bp from the next one
# This need to check the non-PMD region length
assign2 = []
for i in range(len(final_result)):
if final_result[i] == 0:
assign2.append(i)
break_pts2 = [0]
for i in range(1, len(assign2)):
if assign2[i] - assign2[i-1] > 1:
break_pts2.append(i)
# small non_PMD intervals
small_non_PMD_intervals = []
for i in range(1, len(break_pts2)):
if assign2[break_pts2[i]-1] - assign2[break_pts2[i-1]] + 1 < 51:
small_non_PMD_intervals.append(i)
# change the PMD interval with less than 51 to Non-PMD
for interval in small_non_PMD_intervals:
final_result[assign2[break_pts2[interval-1] : break_pts2[interval]]] = 1
# file output
output_methylation = methylation[:len(methylation)-1023].copy()
output_methylation.loc[:, 'PMD_predict'] = pd.DataFrame(final_result)[0].map({1: 'Non-PMD', 0: 'PMD'})
output_methylation.to_csv(outputpath1, sep='\t', index = False, header=True)
# output grange file
df = pd.DataFrame(columns = ['chr', 'start', 'end', 'status'])
ncols = len(output_methylation)
i, j = 0, 0
while i < ncols:
if j == ncols:
df = df.append({'chr': output_methylation.iloc[i, 0], 'start': output_methylation.iloc[i, 1], 'end': output_methylation.iloc[j-1, 1], 'status': ti}, ignore_index = True)
break
ti = output_methylation.iloc[i, 4]
tj = output_methylation.iloc[j, 4]
if tj == ti:
j += 1
else:
df = df.append({'chr': output_methylation.iloc[i, 0], 'start': output_methylation.iloc[i, 1], 'end': output_methylation.iloc[j-1, 1], 'status': ti}, ignore_index = True)
i = j
df.to_csv(outputpath2, sep='\t', index = False, header=True)
# print(df)
print("Finished PMDfinder!")
# np.savetxt(outputpath1, outputpath2, final_result, delimiter=',')
|
the-stack_106_17196
|
# coding: utf8
def extract_slices(input_tensor, slice_direction=0, slice_mode='single'):
"""Extracts the slices from three directions
This function extracts slices form the preprocesed nifti image. The
direction of extraction can be defined either on sagital direction (0),
cornal direction (1) or axial direction (other). The output slices can be
stores following two modes: single (1 channel) ou RGB (3 channels, all the
same).
Args:
input_tensor: tensor version of the nifti MRI.
slice_direction: which axis direction that the slices were extracted
slice_mode: 'single' or 'RGB'.
Returns:
file: multiple tensors saved on the disk, suffixes corresponds to
indexes of the slices. Same location than input file.
"""
import torch
import os
image_tensor = torch.load(input_tensor)
# reshape the tensor, delete the first dimension for slice-level
image_tensor = image_tensor.view(image_tensor.shape[1], image_tensor.shape[2], image_tensor.shape[3])
# sagital
# M and N correspond to the first and last slices (if need to remove)
M = 0
N = 0
slice_list_sag = range(M, image_tensor.shape[0] - N) # delete the first M slices and last N slices
basedir = os.getcwd()
input_tensor_filename = os.path.basename(input_tensor)
txt_idx = input_tensor_filename.rfind("_")
it_filename_prefix = input_tensor_filename[0:txt_idx]
it_filename_suffix = input_tensor_filename[txt_idx:]
output_file_original = []
output_file_rgb = []
if slice_direction == 0:
for index_slice, index_slice_list in zip(slice_list_sag, range(len(slice_list_sag))):
# for i in slice_list:
# sagital
slice_select_sag = image_tensor[index_slice, :, :]
extracted_slice_original_sag = slice_select_sag.unsqueeze(0) # shape should be 1 * W * L
# train for transfer learning, creating the fake RGB image.
slice_select_sag = (slice_select_sag - slice_select_sag.min()) / (slice_select_sag.max() - slice_select_sag.min())
extracted_slice_rgb_sag = torch.stack((slice_select_sag, slice_select_sag, slice_select_sag)) # shape should be 3 * W * L
# save into .pt format
if slice_mode == 'single':
output_file_original.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-sag_channel-single_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_original_sag.clone(), output_file_original[index_slice_list])
elif slice_mode == 'rgb':
output_file_rgb.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-sag_channel-rgb_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_rgb_sag.clone(), output_file_rgb[index_slice_list])
elif slice_direction == 1:
# cornal
slice_list_cor = range(M, image_tensor.shape[1] - N) # delete the first M slices and last N slices
for index_slice, index_slice_list in zip(slice_list_cor, range(len(slice_list_cor))):
# for i in slice_list:
# sagital
slice_select_cor = image_tensor[:, index_slice, :]
extracted_slice_original_cor = slice_select_cor.unsqueeze(0) # shape should be 1 * W * L
# train for transfer learning, creating the fake RGB image.
slice_select_cor = (slice_select_cor - slice_select_cor.min()) / (slice_select_cor.max() - slice_select_cor.min())
extracted_slice_rgb_cor = torch.stack((slice_select_cor, slice_select_cor, slice_select_cor)) # shape should be 3 * W * L
# save into .pt format
if slice_mode == 'single':
output_file_original.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-cor_channel-single_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_original_cor.clone(), output_file_original[index_slice_list])
elif slice_mode == 'rgb':
output_file_rgb.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-cor_channel-rgb_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_rgb_cor.clone(), output_file_rgb[index_slice_list])
else:
# axial
slice_list_axi = range(M, image_tensor.shape[2] - N) # delete the first M slices and last N slices
for index_slice, index_slice_list in zip(slice_list_axi, range(len(slice_list_axi))):
# for i in slice_list:
# sagital
slice_select_axi = image_tensor[:, :, index_slice]
extracted_slice_original_axi = slice_select_axi.unsqueeze(0) # shape should be 1 * W * L
# train for transfer learning, creating the fake RGB image.
slice_select_axi = (slice_select_axi - slice_select_axi.min()) / (slice_select_axi.max() - slice_select_axi.min())
extracted_slice_rgb_axi = torch.stack((slice_select_axi, slice_select_axi, slice_select_axi)) # shape should be 3 * W * L
# save into .pt format
if slice_mode == 'single':
output_file_original.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-axi_channel-single_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_original_axi.clone(), output_file_original[index_slice_list])
elif slice_mode == 'rgb':
output_file_rgb.append(
os.path.join(
basedir,
it_filename_prefix
+ '_axis-axi_channel-rgb_slice-'
+ str(index_slice)
+ it_filename_suffix
)
)
torch.save(extracted_slice_rgb_axi.clone(), output_file_rgb[index_slice_list])
return output_file_rgb, output_file_original
def extract_patches(input_tensor, patch_size, stride_size):
"""Extracts the patches
This function extracts patches form the preprocesed nifti image. Patch size
if provieded as input and also the stride size. If stride size is smaller
than the patch size an overlap exist between consecutive patches. If stride
size is equal to path size there is no overlap. Otherwise, unprocessed
zones can exits.
Args:
input_tensor: tensor version of the nifti MRI.
patch_size: size of a single patch.
stride_size: size of the stride leading to next patch.
Returns:
file: multiple tensors saved on the disk, suffixes corresponds to
indexes of the patches. Same location than input file.
"""
import torch
import os
basedir = os.getcwd()
image_tensor = torch.load(input_tensor)
# use classifiers tensor.upfold to crop the patch.
patches_tensor = image_tensor.unfold(1, patch_size, stride_size).unfold(2, patch_size, stride_size).unfold(3, patch_size, stride_size).contiguous()
# the dimension of patch_tensor should be [1, patch_num1, patch_num2, patch_num3, patch_size1, patch_size2, patch_size3]
patches_tensor = patches_tensor.view(-1, patch_size, patch_size, patch_size)
input_tensor_filename = os.path.basename(input_tensor)
txt_idx = input_tensor_filename.rfind("_")
it_filename_prefix = input_tensor_filename[0:txt_idx]
it_filename_suffix = input_tensor_filename[txt_idx:]
output_patch = []
for index_patch in range(patches_tensor.shape[0]):
extracted_patch = patches_tensor[index_patch, ...].unsqueeze_(0) # add one dimension
# save into .pt format
output_patch.append(
os.path.join(
basedir,
it_filename_prefix
+ '_patchsize-'
+ str(patch_size)
+ '_stride-'
+ str(stride_size)
+ '_patch-'
+ str(index_patch)
+ it_filename_suffix
)
)
torch.save(extracted_patch.clone(), output_patch[index_patch])
return output_patch
def save_as_pt(input_img):
"""Saves PyTorch tensor version of the nifti image
This function convert nifti image to tensor (.pt) version of the image.
Tensor version is saved at the same location than input_img.
Args:
input_tensor: tensor version of the nifti MRI.
Returns:
filename (str): single tensor file saved on the disk. Same location than input file.
"""
import torch
import os
import nibabel as nib
basedir = os.getcwd()
image_array = nib.load(input_img).get_fdata()
image_tensor = torch.from_numpy(image_array).unsqueeze(0).float()
# make sure the tensor dtype is torch.float32
output_file = os.path.join(basedir, os.path.basename(input_img).split('.nii.gz')[0] + '.pt')
# save
torch.save(image_tensor.clone(), output_file)
return output_file
|
the-stack_106_17198
|
"""Extensions of core argparse classes."""
import argparse
import glob
import inspect
import logging
import os
import re
import sys
from copy import deepcopy
from typing import Any, Callable, Dict, List, NoReturn, Optional, Sequence, Set, Tuple, Type, Union
from unittest.mock import patch
from .formatters import DefaultHelpFormatter, empty_help, formatter_context, get_env_var
from .jsonnet import ActionJsonnet
from .jsonschema import ActionJsonSchema
from .loaders_dumpers import check_valid_dump_format, dump_using_format, get_loader_exceptions, loaders, load_value, load_value_context, yaml_load
from .namespace import is_meta_key, Namespace, split_key, split_key_leaf, strip_meta
from .signatures import is_pure_dataclass, SignatureArguments
from .typehints import ActionTypeHint, is_class_object
from .typing import is_final_class
from .actions import (
ActionParser,
ActionConfigFile,
_ActionSubCommands,
_ActionPrintConfig,
_ActionConfigLoad,
_ActionLink,
_ActionHelpClassPath,
_is_branch_key,
_find_action,
_find_action_and_subcommand,
_find_parent_action,
_find_parent_action_and_subcommand,
_is_action_value_list,
filter_default_actions,
parent_parsers,
)
from .optionals import (
argcomplete_autocomplete,
argcomplete_namespace,
fsspec_support,
get_config_read_mode,
import_fsspec,
import_jsonnet,
omegaconf_support,
)
from .util import (
identity,
ParserError,
usage_and_exit_error_handler,
change_to_path_dir,
Path,
LoggerProperty,
_lenient_check_context,
lenient_check,
return_parser_if_captured,
)
__all__ = ['ActionsContainer', 'ArgumentParser']
class ActionsContainer(SignatureArguments, argparse._ActionsContainer, LoggerProperty):
"""Extension of argparse._ActionsContainer to support additional functionalities."""
_action_groups: Sequence['_ArgumentGroup'] # type: ignore
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.register('type', None, identity)
self.register('action', 'parsers', _ActionSubCommands)
def add_argument(self, *args, enable_path:bool=False, **kwargs):
"""Adds an argument to the parser or argument group.
All the arguments from `argparse.ArgumentParser.add_argument
<https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument>`_
are supported. Additionally it accepts:
Args:
enable_path: Whether to try parsing path/subconfig when argument is a complex type.
"""
parser = self.parser if hasattr(self, 'parser') else self # type: ignore
if 'action' in kwargs and isinstance(kwargs['action'], ActionParser):
if kwargs['action']._parser == parser:
raise ValueError('Parser cannot be added as a subparser of itself.')
return ActionParser._move_parser_actions(parser, args, kwargs)
if 'type' in kwargs:
if is_final_class(kwargs['type']) or is_pure_dataclass(kwargs['type']):
theclass = kwargs.pop('type')
nested_key = re.sub('^--', '', args[0])
if is_final_class(theclass):
kwargs.pop('help', None)
self.add_class_arguments(theclass, nested_key, **kwargs)
else:
self.add_dataclass_arguments(theclass, nested_key, **kwargs)
return _find_action(parser, nested_key)
if ActionTypeHint.is_supported_typehint(kwargs['type']):
if 'action' in kwargs:
raise ValueError('Type hint as type does not allow providing an action.')
if ActionTypeHint.is_subclass_typehint(kwargs['type']):
dest = re.sub('^--', '', args[0])
super().add_argument(f'--{dest}.help', action=_ActionHelpClassPath(baseclass=kwargs['type'])) # type: ignore
kwargs['action'] = ActionTypeHint(typehint=kwargs.pop('type'), enable_path=enable_path)
action = super().add_argument(*args, **kwargs)
if isinstance(action, ActionConfigFile) and getattr(self, '_print_config', None) is not None:
self.add_argument(self._print_config, action=_ActionPrintConfig) # type: ignore
if is_meta_key(action.dest):
raise ValueError(f'Argument with destination name "{action.dest}" not allowed.')
if action.help is None:
action.help = empty_help
if action.required:
parser.required_args.add(action.dest)
action._required = True # type: ignore
action.required = False
return action
def add_argument_group(self, *args, name:str=None, **kwargs):
"""Adds a group to the parser.
All the arguments from `argparse.ArgumentParser.add_argument_group
<https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_argument_group>`_
are supported. Additionally it accepts:
Args:
name: Name of the group. If set the group object will be included in the parser.groups dict.
Returns:
The group object.
Raises:
ValueError: If group with the same name already exists.
"""
parser = self.parser if hasattr(self, 'parser') else self # type: ignore
if name is not None and name in parser.groups:
raise ValueError(f'Group with name {name} already exists.')
group = _ArgumentGroup(parser, *args, **kwargs)
group.parser = parser
parser._action_groups.append(group)
if name is not None:
parser.groups[name] = group
return group
class _ArgumentGroup(ActionsContainer, argparse._ArgumentGroup):
"""Extension of argparse._ArgumentGroup to support additional functionalities."""
dest: Optional[str] = None
parser: Optional['ArgumentParser'] = None
class ArgumentParser(ActionsContainer, argparse.ArgumentParser):
"""Parser for command line, yaml/jsonnet files and environment variables."""
formatter_class: Type[DefaultHelpFormatter] # type: ignore
groups: Optional[Dict[str, '_ArgumentGroup']] = None
_subcommands_action: Optional[_ActionSubCommands] = None
def __init__(
self,
*args,
env_prefix: Optional[str] = None,
error_handler: Optional[Callable[['ArgumentParser', str], None]] = usage_and_exit_error_handler,
formatter_class: Type[DefaultHelpFormatter] = DefaultHelpFormatter,
logger: Optional[Union[bool, Dict[str, str], logging.Logger]] = None,
version: Optional[str] = None,
print_config: Optional[str] = '--print_config',
parser_mode: str = 'yaml',
dump_header: Optional[List[str]] = None,
default_config_files: Optional[List[str]] = None,
default_env: bool = yaml_load(os.environ.get('JSONARGPARSE_DEFAULT_ENV', 'False')),
default_meta: bool = True,
**kwargs
):
"""Initializer for ArgumentParser instance.
All the arguments from the initializer of `argparse.ArgumentParser
<https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser>`_
are supported. Additionally it accepts:
Args:
env_prefix: Prefix for environment variables.
error_handler: Handler for parsing errors, set to None to simply raise exception.
formatter_class: Class for printing help messages.
logger: Configures the logger, see :class:`.LoggerProperty`.
version: Program version string to add --version argument.
print_config: Add this as argument to print config, set None to disable.
parser_mode: Mode for parsing configuration files: ``'yaml'``, ``'jsonnet'`` or ones added via :func:`.set_loader`.
dump_header: Header to include as comment when dumping a config object.
default_config_files: Default config file locations, e.g. :code:`['~/.config/myapp/*.yaml']`.
default_env: Set the default value on whether to parse environment variables.
default_meta: Set the default value on whether to include metadata in config objects.
"""
super().__init__(*args, formatter_class=formatter_class, **kwargs)
if self.groups is None:
self.groups = {}
self.required_args: Set[str] = set()
self.save_path_content: Set[str] = set()
self.default_config_files = default_config_files
self.default_meta = default_meta
self.default_env = default_env
self.env_prefix = env_prefix
self.parser_mode = parser_mode
self.dump_header = dump_header
self.logger = logger
self.error_handler = error_handler
self._print_config = print_config
if version is not None:
self.add_argument('--version', action='version', version='%(prog)s '+version, help='Print version and exit.')
## Parsing methods ##
def parse_known_args(self, args=None, namespace=None):
"""Raises NotImplementedError to dissuade its use, since typos in configs would go unnoticed."""
caller = inspect.getmodule(inspect.stack()[1][0]).__package__
if caller not in {'jsonargparse', 'argcomplete'}:
raise NotImplementedError('parse_known_args not implemented to dissuade its use, since typos in configs would go unnoticed.')
if args is None:
args = sys.argv[1:]
else:
args = list(args)
if not all(isinstance(a, str) for a in args):
self.error(f'All arguments are expected to be strings: {args}')
self.args = args
if namespace is None:
namespace = Namespace()
namespace = argcomplete_namespace(caller, self, namespace)
try:
with patch('argparse.Namespace', Namespace), _lenient_check_context(caller), ActionTypeHint.subclass_arg_context(self), load_value_context(self.parser_mode):
namespace, args = self._parse_known_args(args, namespace)
except (argparse.ArgumentError, ParserError) as ex:
self.error(str(ex), ex)
return namespace, args
def _parse_optional(self, arg_string):
subclass_arg = ActionTypeHint.parse_subclass_arg(arg_string)
if subclass_arg:
return subclass_arg
if arg_string == self._print_config:
arg_string += '='
return super()._parse_optional(arg_string)
def _parse_common(
self,
cfg: Namespace,
env: Optional[bool],
defaults: bool,
with_meta: Optional[bool],
skip_check: bool,
skip_required: bool = False,
skip_subcommands: bool = False,
fail_no_subcommand: bool = True,
cfg_base: Optional[Namespace] = None,
log_message: Optional[str] = None,
) -> Namespace:
"""Common parsing code used by other parse methods.
Args:
cfg: The configuration object.
env: Whether to merge with the parsed environment, None to use parser's default.
defaults: Whether to merge with the parser's defaults.
with_meta: Whether to include metadata in config object, None to use parser's default.
skip_check: Whether to skip check if configuration is valid.
skip_required: Whether to skip check of required arguments.
skip_subcommands: Whether to skip subcommand processing.
fail_no_subcommand: Whether to fail if no subcommand given.
cfg_base: A base configuration object.
log_message: Message to log at INFO level after parsing.
Returns:
A config object with all parsed values.
"""
if env is None and self._default_env:
env = True
if not skip_subcommands:
_ActionSubCommands.handle_subcommands(self, cfg, env=env, defaults=defaults, fail_no_subcommand=fail_no_subcommand)
if cfg_base is not None:
cfg = self.merge_config(cfg, cfg_base)
if env:
with _ActionPrintConfig.skip_print_config():
cfg_env = self.parse_env(defaults=defaults, _skip_check=True, _skip_subcommands=True)
cfg = self.merge_config(cfg, cfg_env)
elif defaults:
cfg = self.merge_config(cfg, self.get_defaults(skip_check=True))
with _lenient_check_context():
ActionTypeHint.add_sub_defaults(self, cfg)
if not (with_meta or (with_meta is None and self._default_meta)):
cfg = strip_meta(cfg)
_ActionPrintConfig.print_config_if_requested(self, cfg)
_ActionLink.apply_parsing_links(self, cfg)
if not skip_check and not lenient_check.get():
with load_value_context(self.parser_mode):
self.check_config(cfg, skip_required=skip_required)
if log_message is not None:
self._logger.info(log_message)
return cfg
def parse_args( # type: ignore[override]
self,
args: Optional[Sequence[str]] = None,
namespace: Namespace = None,
env: Optional[bool] = None,
defaults: bool = True,
with_meta: Optional[bool] = None,
_skip_check: bool = False,
) -> Namespace:
"""Parses command line argument strings.
All the arguments from `argparse.ArgumentParser.parse_args
<https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.parse_args>`_
are supported. Additionally it accepts:
Args:
args: List of arguments to parse or None to use sys.argv.
env: Whether to merge with the parsed environment, None to use parser's default.
defaults: Whether to merge with the parser's defaults.
with_meta: Whether to include metadata in config object, None to use parser's default.
Returns:
A config object with all parsed values.
Raises:
ParserError: If there is a parsing error and error_handler=None.
"""
return_parser_if_captured(self)
argcomplete_autocomplete(self)
try:
cfg, unk = self.parse_known_args(args=args, namespace=namespace)
if unk:
self.error(f'Unrecognized arguments: {" ".join(unk)}')
parsed_cfg = self._parse_common(
cfg=cfg,
env=env,
defaults=defaults,
with_meta=with_meta,
skip_check=_skip_check,
log_message='Parsed command line arguments.',
)
except (TypeError, KeyError) as ex:
self.error(str(ex), ex)
return parsed_cfg
def parse_object(
self,
cfg_obj: Union[Namespace, Dict[str, Any]],
cfg_base: Optional[Namespace] = None,
env: Optional[bool] = None,
defaults: bool = True,
with_meta: Optional[bool] = None,
_skip_check: bool = False,
_skip_required: bool = False,
) -> Namespace:
"""Parses configuration given as an object.
Args:
cfg_obj: The configuration object.
env: Whether to merge with the parsed environment, None to use parser's default.
defaults: Whether to merge with the parser's defaults.
with_meta: Whether to include metadata in config object, None to use parser's default.
Returns:
A config object with all parsed values.
Raises:
ParserError: If there is a parsing error and error_handler=None.
"""
try:
cfg = self._apply_actions(cfg_obj)
parsed_cfg = self._parse_common(
cfg=cfg,
env=env,
defaults=defaults,
with_meta=with_meta,
skip_check=_skip_check,
skip_required=_skip_required,
cfg_base=cfg_base,
log_message='Parsed object.',
)
except (TypeError, KeyError) as ex:
self.error(str(ex), ex)
return parsed_cfg
def _load_env_vars(self, env: Dict[str, str], defaults: bool) -> Namespace:
cfg = Namespace()
actions = filter_default_actions(self._actions)
for action in actions:
env_var = get_env_var(self, action)
if env_var in env and isinstance(action, ActionConfigFile):
ActionConfigFile.apply_config(self, cfg, action.dest, env[env_var])
for action in actions:
env_var = get_env_var(self, action)
if env_var in env and isinstance(action, _ActionSubCommands):
env_val = env[env_var]
if env_val in action.choices:
cfg[action.dest] = subcommand = self._check_value_key(action, env_val, action.dest, cfg)
pcfg = action._name_parser_map[env_val].parse_env(env=env, defaults=defaults, _skip_check=True)
for k, v in vars(pcfg).items():
cfg[subcommand+'.'+k] = v
for action in actions:
env_var = get_env_var(self, action)
if env_var in env and not isinstance(action, ActionConfigFile):
env_val = env[env_var]
if _is_action_value_list(action):
if re.match('^ *\\[.+,.+] *$', env_val):
try:
env_val = load_value(env_val)
except get_loader_exceptions():
env_val = [env_val] # type: ignore
else:
env_val = [env_val] # type: ignore
cfg[action.dest] = self._check_value_key(action, env_val, action.dest, cfg)
return cfg
def parse_env(
self,
env: Dict[str, str] = None,
defaults: bool = True,
with_meta: Optional[bool] = None,
_skip_check: bool = False,
_skip_subcommands: bool = False,
) -> Namespace:
"""Parses environment variables.
Args:
env: The environment object to use, if None `os.environ` is used.
defaults: Whether to merge with the parser's defaults.
with_meta: Whether to include metadata in config object, None to use parser's default.
Returns:
A config object with all parsed values.
Raises:
ParserError: If there is a parsing error and error_handler=None.
"""
try:
if env is None:
env = dict(os.environ)
with load_value_context(self.parser_mode):
cfg = self._load_env_vars(env=env, defaults=defaults)
self._apply_actions(cfg)
parsed_cfg = self._parse_common(
cfg=cfg,
env=False,
defaults=defaults,
with_meta=with_meta,
skip_check=_skip_check,
skip_subcommands=_skip_subcommands,
log_message='Parsed environment variables.',
)
except (TypeError, KeyError) as ex:
self.error(str(ex), ex)
return parsed_cfg
def parse_path(
self,
cfg_path: str,
ext_vars: Optional[dict] = None,
env: Optional[bool] = None,
defaults: bool = True,
with_meta: Optional[bool] = None,
_skip_check: bool = False,
_fail_no_subcommand: bool = True,
) -> Namespace:
"""Parses a configuration file (yaml or jsonnet) given its path.
Args:
cfg_path: Path to the configuration file to parse.
ext_vars: Optional external variables used for parsing jsonnet.
env: Whether to merge with the parsed environment, None to use parser's default.
defaults: Whether to merge with the parser's defaults.
with_meta: Whether to include metadata in config object, None to use parser's default.
Returns:
A config object with all parsed values.
Raises:
ParserError: If there is a parsing error and error_handler=None.
"""
fpath = Path(cfg_path, mode=get_config_read_mode())
with change_to_path_dir(fpath):
cfg_str = fpath.get_content()
parsed_cfg = self.parse_string(cfg_str,
os.path.basename(cfg_path),
ext_vars,
env,
defaults,
with_meta=with_meta,
_skip_check=_skip_check,
_fail_no_subcommand=_fail_no_subcommand)
self._logger.info(f'Parsed {self.parser_mode} from path: {cfg_path}')
return parsed_cfg
def parse_string(
self,
cfg_str: str,
cfg_path: str = '',
ext_vars: Optional[dict] = None,
env: Optional[bool] = None,
defaults: bool = True,
with_meta: Optional[bool] = None,
_skip_check: bool = False,
_fail_no_subcommand: bool = True,
) -> Namespace:
"""Parses configuration (yaml or jsonnet) given as a string.
Args:
cfg_str: The configuration content.
cfg_path: Optional path to original config path, just for error printing.
ext_vars: Optional external variables used for parsing jsonnet.
env: Whether to merge with the parsed environment, None to use parser's default.
defaults: Whether to merge with the parser's defaults.
nested: Whether the namespace should be nested.
with_meta: Whether to include metadata in config object, None to use parser's default.
Returns:
A config object with all parsed values.
Raises:
ParserError: If there is a parsing error and error_handler=None.
"""
try:
with load_value_context(self.parser_mode):
cfg = self._load_config_parser_mode(cfg_str, cfg_path, ext_vars)
parsed_cfg = self._parse_common(
cfg=cfg,
env=env,
defaults=defaults,
with_meta=with_meta,
skip_check=_skip_check,
fail_no_subcommand=_fail_no_subcommand,
log_message=(f'Parsed {self.parser_mode} string.'),
)
except (TypeError, KeyError) as ex:
self.error(str(ex), ex)
return parsed_cfg
def _load_config_parser_mode(
self,
cfg_str: str,
cfg_path: str = '',
ext_vars: Optional[dict] = None,
) -> Namespace:
"""Loads a configuration string (yaml or jsonnet) into a namespace.
Args:
cfg_str: The configuration content.
cfg_path: Optional path to original config path, just for error printing.
ext_vars: Optional external variables used for parsing jsonnet.
Raises:
TypeError: If there is an invalid value according to the parser.
"""
try:
cfg_dict = load_value(cfg_str, path=cfg_path, ext_vars=ext_vars)
except get_loader_exceptions() as ex:
raise TypeError(f'Problems parsing config :: {ex}') from ex
cfg = self._apply_actions(cfg_dict)
return cfg
def link_arguments(
self,
source: Union[str, Tuple[str, ...]],
target: str,
compute_fn: Callable = None,
apply_on: str = 'parse',
):
"""Makes an argument value be derived from the values of other arguments.
Refer to :ref:`argument-linking` for a detailed explanation and examples-
Args:
source: Key(s) from which the target value is derived.
target: Key to where the value is set.
compute_fn: Function to compute target value from source.
apply_on: At what point to set target value, 'parse' or 'instantiate'.
Raises:
ValueError: If an invalid parameter is given.
"""
_ActionLink(self, source, target, compute_fn, apply_on)
## Methods for adding to the parser ##
def add_subparsers(self, **kwargs) -> NoReturn:
"""Raises a NotImplementedError since jsonargparse uses add_subcommands."""
raise NotImplementedError('In jsonargparse sub-commands are added using the add_subcommands method.')
def add_subcommands(self, required: bool = True, dest: str = 'subcommand', **kwargs) -> _ActionSubCommands:
"""Adds sub-command parsers to the ArgumentParser.
The aim is the same as `argparse.ArgumentParser.add_subparsers
<https://docs.python.org/3/library/argparse.html#argparse.ArgumentParser.add_subparsers>`_
the difference being that dest by default is 'subcommand' and the parsed
values of the sub-command are stored in a nested namespace using the
sub-command's name as base key.
Args:
required: Whether the subcommand must be provided.
dest: Destination key where the chosen subcommand name is stored.
**kwargs: All options that `argparse.ArgumentParser.add_subparsers` accepts.
"""
if 'description' not in kwargs:
kwargs['description'] = 'For more details of each subcommand add it as argument followed by --help.'
subcommands: _ActionSubCommands = super().add_subparsers(dest=dest, **kwargs) # type: ignore
if required:
self.required_args.add(dest)
subcommands._required = required # type: ignore
subcommands.required = False
subcommands.parent_parser = self # type: ignore
subcommands._env_prefix = self.env_prefix
self._subcommands_action = subcommands
return subcommands
## Methods for serializing config objects ##
def dump(
self,
cfg: Namespace,
format: str = 'parser_mode',
skip_none: bool = True,
skip_default: bool = False,
skip_check: bool = False,
yaml_comments: bool = False,
) -> str:
"""Generates a yaml or json string for the given configuration object.
Args:
cfg: The configuration object to dump.
format: The output format: ``'yaml'``, ``'json'``, ``'json_indented'``, ``'parser_mode'`` or ones added via :func:`.set_dumper`.
skip_none: Whether to exclude entries whose value is None.
skip_default: Whether to exclude entries whose value is the same as the default.
skip_check: Whether to skip parser checking.
yaml_comments: Whether to add help content as comments. ``yaml_comments=True`` implies ``format='yaml'``.
Returns:
The configuration in yaml or json format.
Raises:
TypeError: If any of the values of cfg is invalid according to the parser.
"""
check_valid_dump_format(format)
cfg = deepcopy(cfg)
cfg = strip_meta(cfg)
_ActionLink.strip_link_target_keys(self, cfg)
if not skip_check:
with load_value_context(self.parser_mode):
self.check_config(cfg)
with load_value_context(self.parser_mode):
dump_kwargs = {'skip_check': skip_check, 'skip_none': skip_none}
self._dump_cleanup_actions(cfg, self._actions, dump_kwargs)
cfg = cfg.as_dict()
if skip_default:
self._dump_delete_default_entries(cfg, self.get_defaults().as_dict())
with formatter_context(self):
return dump_using_format(self, cfg, 'yaml_comments' if yaml_comments else format)
def _dump_cleanup_actions(self, cfg, actions, dump_kwargs, prefix=''):
skip_none = dump_kwargs['skip_none']
for action in filter_default_actions(actions):
action_dest = prefix + action.dest
if (action.help == argparse.SUPPRESS and not isinstance(action, _ActionConfigLoad)) or \
isinstance(action, ActionConfigFile) or \
(skip_none and action_dest in cfg and cfg[action_dest] is None):
cfg.pop(action_dest, None)
elif isinstance(action, _ActionSubCommands):
cfg.pop(action_dest, None)
for key, subparser in action.choices.items():
self._dump_cleanup_actions(cfg, subparser._actions, dump_kwargs, prefix=prefix+key+'.')
elif isinstance(action, ActionTypeHint):
value = cfg.get(action_dest)
if value is not None:
value = action.serialize(value, dump_kwargs=dump_kwargs)
cfg.update(value, action_dest)
def _dump_delete_default_entries(self, subcfg, subdefaults):
for key in list(subcfg.keys()):
if key in subdefaults:
val = subcfg[key]
default = subdefaults[key]
class_object_val = None
if is_class_object(val):
if val['class_path'] != default.get('class_path'):
parser = ActionTypeHint.get_class_parser(val['class_path'])
default = {'init_args': parser.get_defaults().as_dict()}
class_object_val = val
val = val['init_args']
default = default.get('init_args')
if val == default:
del subcfg[key]
elif isinstance(val, dict) and isinstance(default, dict):
self._dump_delete_default_entries(val, default)
if class_object_val and class_object_val.get('init_args') == {}:
del class_object_val['init_args']
def save(
self,
cfg: Namespace,
path: str,
format: str = 'parser_mode',
skip_none: bool = True,
skip_check: bool = False,
overwrite: bool = False,
multifile: bool = True,
branch: str = None,
) -> None:
"""Writes to file(s) the yaml or json for the given configuration object.
Args:
cfg: The configuration object to save.
path: Path to the location where to save config.
format: The output format: ``'yaml'``, ``'json'``, ``'json_indented'``, ``'parser_mode'`` or ones added via :func:`.set_dumper`.
skip_none: Whether to exclude entries whose value is None.
skip_check: Whether to skip parser checking.
overwrite: Whether to overwrite existing files.
multifile: Whether to save multiple config files by using the __path__ metas.
Raises:
TypeError: If any of the values of cfg is invalid according to the parser.
"""
check_valid_dump_format(format)
def check_overwrite(path):
if not overwrite and os.path.isfile(path()):
raise ValueError('Refusing to overwrite existing file: '+path())
dump_kwargs = {'format': format, 'skip_none': skip_none, 'skip_check': skip_check}
if fsspec_support:
try:
path_sw = Path(path, mode='sw')
except TypeError:
pass
else:
if path_sw.is_fsspec:
if multifile:
raise NotImplementedError('multifile=True not supported for fsspec paths: '+path)
fsspec = import_fsspec('ArgumentParser.save')
with fsspec.open(path, 'w') as f:
f.write(self.dump(cfg, **dump_kwargs)) # type: ignore
return
path_fc = Path(path, mode='fc')
check_overwrite(path_fc)
if not multifile:
with open(path_fc(), 'w') as f:
f.write(self.dump(cfg, **dump_kwargs)) # type: ignore
else:
cfg = deepcopy(cfg)
_ActionLink.strip_link_target_keys(self, cfg)
if not skip_check:
with load_value_context(self.parser_mode):
self.check_config(strip_meta(cfg), branch=branch)
def save_paths(cfg):
for key in cfg.get_sorted_keys():
val = cfg[key]
if isinstance(val, (Namespace, dict)) and '__path__' in val:
action = _find_action(self, key)
if isinstance(action, (ActionJsonSchema, ActionJsonnet, ActionTypeHint, _ActionConfigLoad)):
val_path = Path(os.path.basename(val['__path__']()), mode='fc')
check_overwrite(val_path)
val_out = strip_meta(val)
if isinstance(val, Namespace):
val_out = val_out.as_dict()
if '__orig__' in val:
val_str = val['__orig__']
else:
is_json = str(val_path).lower().endswith('.json')
val_str = dump_using_format(self, val_out, 'json_indented' if is_json else format)
with open(val_path(), 'w') as f:
f.write(val_str)
cfg[key] = os.path.basename(val_path())
elif isinstance(val, Path) and key in self.save_path_content and 'r' in val.mode:
val_path = Path(os.path.basename(val()), mode='fc')
check_overwrite(val_path)
with open(val_path(), 'w') as f:
f.write(val.get_content())
cfg[key] = type(val)(str(val_path))
with change_to_path_dir(path_fc), formatter_context(self):
save_paths(cfg)
dump_kwargs['skip_check'] = True
with open(path_fc(), 'w') as f:
f.write(self.dump(cfg, **dump_kwargs)) # type: ignore
## Methods related to defaults ##
def set_defaults(self, *args, **kwargs) -> None:
"""Sets default values from dictionary or keyword arguments.
Args:
*args (dict): Dictionary defining the default values to set.
**kwargs: Sets default values based on keyword arguments.
Raises:
KeyError: If key not defined in the parser.
"""
if len(args) > 0:
for n in range(len(args)):
self._defaults.update(args[n])
for dest in args[n].keys():
action = _find_action(self, dest)
if action is None:
raise KeyError(f'No action for destination key "{dest}" to set its default.')
action.default = args[n][dest]
if isinstance(action, ActionTypeHint):
action.normalize_default()
if kwargs:
self.set_defaults(kwargs)
def _get_default_config_files(self) -> List[Tuple[Optional[str], Path]]:
default_config_files = []
for key, parser in parent_parsers.get():
for pattern in parser.default_config_files:
files = sorted(glob.glob(os.path.expanduser(pattern)))
default_config_files += [(key, v) for v in files]
for pattern in self.default_config_files:
files = sorted(glob.glob(os.path.expanduser(pattern)))
default_config_files += [(None, x) for x in files]
if len(default_config_files) > 0:
try:
return [(k, Path(v, mode=get_config_read_mode())) for k, v in default_config_files]
except TypeError:
pass
return []
def get_default(self, dest: str) -> Any:
"""Gets a single default value for the given destination key.
Args:
dest: Destination key from which to get the default.
Raises:
KeyError: If key or its default not defined in the parser.
"""
action, _ = _find_parent_action_and_subcommand(self, dest)
if action is None or dest != action.dest or action.dest == argparse.SUPPRESS:
raise KeyError(f'No action for destination key "{dest}" to get its default.')
def check_suppressed_default():
if action.default == argparse.SUPPRESS:
raise KeyError(f'Action for destination key "{dest}" does not specify a default.')
if not self._get_default_config_files():
check_suppressed_default()
return action.default
defaults = self.get_defaults()
if action.dest not in defaults:
check_suppressed_default()
return defaults.get(action.dest)
def get_defaults(self, skip_check: bool = False) -> Namespace:
"""Returns a namespace with all default values.
Args:
nested: Whether the namespace should be nested.
skip_check: Whether to skip check if configuration is valid.
Returns:
An object with all default values as attributes.
"""
cfg = Namespace()
for action in filter_default_actions(self._actions):
if action.default != argparse.SUPPRESS and action.dest != argparse.SUPPRESS:
cfg[action.dest] = deepcopy(action.default)
self._logger.info('Loaded default values from parser.')
default_config_files = self._get_default_config_files()
for key, default_config_file in default_config_files:
with change_to_path_dir(default_config_file), load_value_context(self.parser_mode):
cfg_file = self._load_config_parser_mode(default_config_file.get_content())
if key is not None:
cfg_file = cfg_file.get(key)
try:
with _ActionPrintConfig.skip_print_config():
cfg_file = self._parse_common(
cfg=cfg_file,
env=None,
defaults=False,
with_meta=None,
skip_check=skip_check,
skip_required=True,
)
except (TypeError, KeyError, ParserError) as ex:
raise ParserError(f'Problem in default config file "{default_config_file}" :: {ex.args[0]}') from ex
cfg = self.merge_config(cfg_file, cfg)
meta = cfg.get('__default_config__')
if isinstance(meta, list):
meta.append(default_config_file)
elif isinstance(meta, Path):
cfg['__default_config__'] = [meta, default_config_file]
else:
cfg['__default_config__'] = default_config_file
self._logger.info(f'Parsed configuration from default path: {default_config_file}')
ActionTypeHint.add_sub_defaults(self, cfg)
return cfg
## Other methods ##
def error(self, message: str, ex: Exception = None) -> NoReturn:
"""Logs error message if a logger is set, calls the error handler and raises a ParserError."""
self._logger.error(message)
if self._error_handler is not None:
self._error_handler(self, message)
if ex is None:
raise ParserError(message)
else:
raise ParserError(message) from ex
def check_config(
self,
cfg: Namespace,
skip_none: bool = True,
skip_required: bool = False,
branch: str = None,
) -> None:
"""Checks that the content of a given configuration object conforms with the parser.
Args:
cfg: The configuration object to check.
skip_none: Whether to skip checking of values that are None.
skip_required: Whether to skip checking required arguments.
branch: Base key in case cfg corresponds only to a branch.
Raises:
TypeError: If any of the values are not valid.
KeyError: If a key in cfg is not defined in the parser.
"""
cfg = ccfg = cfg.clone()
if isinstance(branch, str):
branch_cfg = cfg
cfg = Namespace()
cfg[branch] = branch_cfg
def check_required(cfg, parser, prefix=''):
for reqkey in parser.required_args:
try:
val = cfg[reqkey]
if val is None:
raise TypeError
except (KeyError, TypeError) as ex:
raise TypeError(f'Key "{prefix}{reqkey}" is required but not included in config object or its value is None.') from ex
subcommand, subparser = _ActionSubCommands.get_subcommand(parser, cfg, fail_no_subcommand=False)
if subcommand is not None and subparser is not None:
check_required(cfg.get(subcommand), subparser, subcommand+'.')
def check_values(cfg):
for key in cfg.get_sorted_keys():
val = cfg[key]
action = _find_action(self, key)
if action is None:
if _is_branch_key(self, key) or key.endswith('.class_path') or '.init_args' in key:
continue
action = _find_parent_action(self, key, exclude=_ActionConfigLoad)
if action and not ActionTypeHint.is_subclass_typehint(action):
continue
if action is not None:
if val is None and skip_none:
continue
try:
self._check_value_key(action, val, key, ccfg)
except TypeError as ex:
if not (val == {} and ActionTypeHint.is_subclass_typehint(action) and key not in self.required_args):
raise ex
else:
raise KeyError(f'No action for destination key "{key}" to check its value.')
try:
if not skip_required and not lenient_check.get():
check_required(cfg, self)
with load_value_context(self.parser_mode):
check_values(cfg)
except (TypeError, KeyError) as ex:
prefix = 'Configuration check failed :: '
message = ex.args[0]
if prefix not in message:
message = prefix+message
raise type(ex)(message) from ex
def instantiate_classes(
self,
cfg: Namespace,
instantiate_groups: bool = True,
) -> Namespace:
"""Recursively instantiates all subclasses defined by 'class_path' and 'init_args' and class groups.
Args:
cfg: The configuration object to use.
instantiate_groups: Whether class groups should be instantiated.
Returns:
A configuration object with all subclasses and class groups instantiated.
"""
components: List[Union[ActionTypeHint, _ActionConfigLoad, _ArgumentGroup]] = []
for action in filter_default_actions(self._actions):
if isinstance(action, ActionTypeHint) or \
(isinstance(action, _ActionConfigLoad) and is_pure_dataclass(action.basetype)):
components.append(action)
if instantiate_groups:
skip = set(c.dest for c in components)
groups = [g for g in self._action_groups if hasattr(g, 'instantiate_class') and g.dest not in skip]
components.extend(groups)
components.sort(key=lambda x: -len(split_key(x.dest))) # type: ignore
order = _ActionLink.instantiation_order(self)
components = _ActionLink.reorder(order, components)
cfg = strip_meta(cfg)
for component in components:
if isinstance(component, (ActionTypeHint, _ActionConfigLoad)):
try:
value, parent, key = cfg.get_value_and_parent(component.dest)
except KeyError:
pass
else:
if value is not None:
with load_value_context(self.parser_mode):
parent[key] = component.instantiate_classes(value)
_ActionLink.apply_instantiation_links(self, cfg, component.dest)
else:
with load_value_context(self.parser_mode):
component.instantiate_class(component, cfg)
_ActionLink.apply_instantiation_links(self, cfg, component.dest)
subcommand, subparser = _ActionSubCommands.get_subcommand(self, cfg, fail_no_subcommand=False)
if subcommand is not None and subparser is not None:
cfg[subcommand] = subparser.instantiate_classes(cfg[subcommand], instantiate_groups=instantiate_groups)
return cfg
def strip_unknown(self, cfg: Namespace) -> Namespace:
"""Removes all unknown keys from a configuration object.
Args:
cfg: The configuration object to strip.
Returns:
The stripped configuration object.
"""
cfg = deepcopy(cfg)
del_keys = []
for key in cfg.keys():
if _find_action(self, key) is None and not is_meta_key(key):
del_keys.append(key)
for key in del_keys:
del cfg[key]
return cfg
def get_config_files(self, cfg: Namespace) -> List[str]:
"""Returns a list of loaded config file paths.
Args:
cfg: The configuration object.
Returns:
Paths to loaded config files.
"""
cfg_files = []
if '__default_config__' in cfg:
cfg_files.append(cfg['__default_config__'])
for action in filter_default_actions(self._actions):
if isinstance(action, ActionConfigFile) and action.dest in cfg and cfg[action.dest] is not None:
cfg_files.extend(p for p in cfg[action.dest] if p is not None)
return cfg_files
def format_help(self) -> str:
defaults = None
if len(self._default_config_files) > 0:
note = 'no existing default config file found.'
try:
defaults = self.get_defaults()
if '__default_config__' in defaults:
config_files = defaults['__default_config__']
if isinstance(config_files, list):
config_files = [str(x) for x in config_files]
note = f'default values below are the ones overridden by the contents of: {config_files}'
except ParserError as ex:
note = f'tried getting defaults considering default_config_files but failed due to: {ex}'
group = self._default_config_files_group
group.description = f'{self._default_config_files}, Note: {note}'
with formatter_context(self, defaults):
help_str = super().format_help()
return help_str
def _apply_actions(self, cfg: Union[Namespace, Dict[str, Any]], parent_key: str = '') -> Namespace:
"""Runs _check_value_key on actions present in config."""
if isinstance(cfg, dict):
cfg = Namespace(cfg)
if parent_key:
cfg_branch = cfg
cfg = Namespace()
cfg[parent_key] = cfg_branch
keys = [parent_key+'.'+k for k in cfg_branch.__dict__.keys()]
else:
keys = list(cfg.__dict__.keys())
config_keys: Set[str] = set()
num = 0
while num < len(keys):
key = keys[num]
num += 1
exclude = _ActionConfigLoad if key in config_keys else None
action, subcommand = _find_action_and_subcommand(self, key, exclude=exclude)
if action is None or isinstance(action, _ActionSubCommands):
value = cfg[key]
if isinstance(value, dict):
value = Namespace(value)
if isinstance(value, Namespace):
new_keys = value.__dict__.keys()
keys += [key+'.'+k for k in new_keys if key+'.'+k not in keys]
cfg[key] = value
continue
action_dest = action.dest if subcommand is None else subcommand+'.'+action.dest
with _lenient_check_context():
value = cfg[action_dest]
with load_value_context(self.parser_mode):
value = self._check_value_key(action, value, action_dest, cfg)
if isinstance(action, _ActionConfigLoad):
config_keys.add(action_dest)
keys.append(action_dest)
cfg[action_dest] = value
return cfg[parent_key] if parent_key else cfg
@staticmethod
def merge_config(cfg_from: Namespace, cfg_to: Namespace) -> Namespace:
"""Merges the first configuration into the second configuration.
Args:
cfg_from: The configuration from which to merge.
cfg_to: The configuration into which to merge.
Returns:
A new object with the merged configuration.
"""
del_keys = []
for key_class_path in [k for k in cfg_from.keys() if k.endswith('.class_path')]:
key_init_args = key_class_path[:-len('class_path')] + 'init_args'
if key_class_path in cfg_to and key_init_args in cfg_to and cfg_from[key_class_path] != cfg_to[key_class_path]:
del_keys.append(key_init_args)
cfg = cfg_to.clone()
for key in reversed(del_keys):
del cfg[key]
cfg.update(cfg_from)
return cfg
def _check_value_key(self, action: argparse.Action, value: Any, key: str, cfg: Namespace) -> Any:
"""Checks the value for a given action.
Args:
action: The action used for parsing.
value: The value to parse.
key: The configuration key.
Raises:
TypeError: If the value is not valid.
"""
if value is None and lenient_check.get():
return value
if action.choices is not None and isinstance(action, _ActionSubCommands):
leaf_key = split_key_leaf(key)[-1]
if leaf_key == action.dest:
return value
subparser = action._name_parser_map[leaf_key]
subparser.check_config(value)
elif isinstance(action, _ActionConfigLoad):
if isinstance(value, str):
fpath = None
if '.' in key:
parent = cfg.get(split_key_leaf(key)[0])
if isinstance(parent, Namespace):
fpath = parent.get('__path__')
with change_to_path_dir(fpath):
value = action.check_type(value, self)
elif hasattr(action, '_check_type'):
value = action._check_type(value, cfg=cfg) # type: ignore
elif action.type is not None:
try:
if action.nargs in {None, '?'} or action.nargs == 0:
value = action.type(value)
elif value is not None:
for k, v in enumerate(value):
value[k] = action.type(v)
except (TypeError, ValueError) as ex:
raise TypeError(f'Parser key "{key}": {ex}') from ex
return value
## Properties ##
@property
def error_handler(self):
"""Property for the error_handler function that is called when there are parsing errors.
:getter: Returns the current error_handler function.
:setter: Sets a new error_handler function (Callable[self, message:str] or None).
Raises:
ValueError: If an invalid value is given.
"""
return self._error_handler
@error_handler.setter
def error_handler(self, error_handler):
if callable(error_handler) or error_handler is None:
self._error_handler = error_handler
else:
raise ValueError('error_handler can be either a Callable or None.')
@property
def default_config_files(self):
"""Default config file locations.
:getter: Returns the current default config file locations.
:setter: Sets new default config file locations, e.g. :code:`['~/.config/myapp/*.yaml']`.
Raises:
ValueError: If an invalid value is given.
"""
return self._default_config_files
@default_config_files.setter
def default_config_files(self, default_config_files:Optional[List[str]]):
if default_config_files is None:
self._default_config_files = []
elif isinstance(default_config_files, list) and all(isinstance(x, str) for x in default_config_files):
self._default_config_files = default_config_files
else:
raise ValueError('default_config_files has to be None or List[str].')
if len(self._default_config_files) > 0:
if not hasattr(self, '_default_config_files_group'):
group_title = 'default config file locations'
group = _ArgumentGroup(self, title=group_title)
self._action_groups = [group] + self._action_groups # type: ignore
self._default_config_files_group = group
elif hasattr(self, '_default_config_files_group'):
self._action_groups = [g for g in self._action_groups if g != self._default_config_files_group]
delattr(self, '_default_config_files_group')
@property
def default_env(self) -> bool:
"""Whether by default environment variables parsing is enabled.
:getter: Returns the current default environment variables parsing setting.
:setter: Sets the default environment variables parsing setting.
Raises:
ValueError: If an invalid value is given.
"""
return self._default_env
@default_env.setter
def default_env(self, default_env:bool):
if isinstance(default_env, bool):
self._default_env = default_env
else:
raise ValueError('default_env has to be a boolean.')
@property
def default_meta(self) -> bool:
"""Whether by default metadata is included in config objects.
:getter: Returns the current default metadata setting.
:setter: Sets the default metadata setting.
Raises:
ValueError: If an invalid value is given.
"""
return self._default_meta
@default_meta.setter
def default_meta(self, default_meta:bool):
if isinstance(default_meta, bool):
self._default_meta = default_meta
else:
raise ValueError('default_meta has to be a boolean.')
@property
def env_prefix(self) -> Optional[str]:
"""The environment variables prefix property.
:getter: Returns the current environment variables prefix.
:setter: Sets the environment variables prefix.
Raises:
ValueError: If an invalid value is given.
"""
return self._env_prefix
@env_prefix.setter
def env_prefix(self, env_prefix: Optional[str]):
if env_prefix is None:
self._env_prefix = os.path.splitext(self.prog)[0]
elif isinstance(env_prefix, str):
self._env_prefix = env_prefix
else:
raise ValueError('env_prefix has to be a string or None.')
@property
def parser_mode(self) -> str:
"""Mode for parsing configuration files: ``'yaml'``, ``'jsonnet'`` or ones added via :func:`.set_loader`.
:getter: Returns the current parser mode.
:setter: Sets the parser mode.
Raises:
ValueError: If an invalid value is given.
"""
return self._parser_mode
@parser_mode.setter
def parser_mode(self, parser_mode: str):
if parser_mode not in loaders:
raise ValueError(f'The only accepted values for parser_mode are {set(loaders.keys())}.')
if parser_mode == 'jsonnet':
import_jsonnet('parser_mode=jsonnet')
self._parser_mode = parser_mode
if self._subcommands_action:
for subparser in self._subcommands_action._name_parser_map.values():
subparser.parser_mode = parser_mode
@property
def dump_header(self) -> Optional[List[str]]:
"""Header to include as comment when dumping a config object.
:getter: Returns the current dump header.
:setter: Sets the dump header.
Raises:
ValueError: If an invalid value is given.
"""
return self._dump_header
@dump_header.setter
def dump_header(self, dump_header: Optional[List[str]]):
if not (dump_header is None or (isinstance(dump_header, list) and all(isinstance(x, str) for x in dump_header))):
raise ValueError('Expected dump_header to be None or a list of strings.')
self._dump_header = dump_header
if omegaconf_support:
from .loaders_dumpers import set_loader
from .optionals import get_omegaconf_loader
set_loader('omegaconf', get_omegaconf_loader())
from .deprecated import parse_as_dict_patch, instantiate_subclasses_patch
instantiate_subclasses_patch()
if 'JSONARGPARSE_SKIP_DEPRECATION_PATCH' not in os.environ:
parse_as_dict_patch()
|
the-stack_106_17199
|
"""
Test cases for InventoryItem Model
"""
import logging
import unittest
import os
from flask_api import status
from werkzeug.exceptions import NotFound
from service.models import InventoryItem, DataValidationError, db
from service import app
DATABASE_URI = os.getenv(
"DATABASE_URI", "postgres://postgres:postgres@localhost:5432/postgres"
)
def _create_test_inventory_item(product_id, product_name, quantity, restock_threshold, supplier_name, supplier_id,
unit_price, supplier_status):
"""create inventory items in bulk """
return InventoryItem(
product_id=product_id,
product_name=product_name,
quantity=quantity,
restock_threshold=restock_threshold,
supplier_name=supplier_name,
supplier_id=supplier_id,
supplier_status=supplier_status,
unit_price=unit_price
)
######################################################################
# I N V E N T O R Y M O D E L T E S T C A S E S
######################################################################
class TestInventoryItem(unittest.TestCase):
""" Test Cases for InventoryItem Model """
@classmethod
def setUpClass(cls):
""" These run once per Test suite """
app.config['TESTING'] = True
app.config['DEBUG'] = False
app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI
app.logger.setLevel(logging.CRITICAL)
InventoryItem.init_db(app)
def setUp(self):
db.drop_all() # clean up the last tests
db.create_all() # make our sqlalchemy tables
def tearDown(self):
db.session.remove()
db.drop_all()
def _create_test_inventory_items(self, count):
""" Factory method to create inventories in bulk """
inventory_items = []
for _ in range(count):
test_item = _create_test_inventory_item(
product_id=123, product_name="test product", quantity=100, restock_threshold=50,
supplier_name="test supplier", supplier_id=123, unit_price=12.50, supplier_status="enabled")
test_item.create()
inventory_items.append(test_item)
return inventory_items
######################################################################
# T E S T C A S E S
#####################################################################
def test_create_inventory_item(self):
""" Create an inventory item and add it to the database """
test_item = InventoryItem.all()
self.assertEqual(test_item, [])
test_item = _create_test_inventory_item(
product_id=123, product_name="test product", quantity=100, restock_threshold=50,
supplier_name="test supplier", supplier_id=123, unit_price=12.50, supplier_status="enabled"
)
self.assertTrue(test_item is not None)
self.assertEqual(test_item.inventory_id, None)
test_item.create()
# Asert that it was assigned an id and shows up in the database
self.assertEqual(test_item.inventory_id, 1)
test_item = InventoryItem.all()
self.assertEqual(len(test_item), 1)
def test_find_inventory_item(self):
""" Find an inventory item by ID """
test_items = self._create_test_inventory_items(3)
logging.debug(test_items)
# make sure they got saved
self.assertEqual(len(InventoryItem.all()), 3)
# find the 2nd inventory item in the list
found_item = InventoryItem.find(test_items[1].inventory_id)
self.assertIsNot(found_item, None)
self.assertEqual(found_item.inventory_id, test_items[1].inventory_id)
self.assertEqual(found_item.product_name, test_items[1].product_name)
self.assertEqual(found_item.quantity, test_items[1].quantity)
def test_find_by_supplier_name(self):
""" Find inventory item by supplier name """
inventory_items = [
_create_test_inventory_item(
product_id=123, product_name="test product1", quantity=100, restock_threshold=50,
supplier_name="test supplier1", supplier_id=123, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=123, product_name="test product2", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=123, product_name="test product3", quantity=100, restock_threshold=50,
supplier_name="test supplier3", supplier_id=127, unit_price=12.50, supplier_status="enabled")]
for inventory_item in inventory_items:
inventory_item.create()
found_items = InventoryItem.find_by_supplier_name("test supplier1")
#added this line below
self.assertEqual(found_items[0].supplier_id, inventory_items[0].supplier_id)
#copied from "find_by_name" test
self.assertEqual(found_items[0].inventory_id, inventory_items[0].inventory_id)
self.assertEqual(found_items[0].product_name, inventory_items[0].product_name)
self.assertEqual(found_items[0].quantity, inventory_items[0].quantity)
def test_find_by_product_name(self):
""" Find an inventory item by Name """
inventory_items = [
_create_test_inventory_item(
product_id=123, product_name="test product", quantity=100, restock_threshold=50,
supplier_name="test supplier1", supplier_id=123, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=123, product_name="test product2", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=123, product_name="test product3", quantity=100, restock_threshold=50,
supplier_name="test supplier3", supplier_id=127, unit_price=12.50, supplier_status="enabled")]
for inventory_item in inventory_items:
inventory_item.create()
found_items = InventoryItem.find_by_product_name("test product")
self.assertEqual(found_items[0].inventory_id, inventory_items[0].inventory_id)
self.assertEqual(found_items[0].product_name, inventory_items[0].product_name)
self.assertEqual(found_items[0].quantity, inventory_items[0].quantity)
def test_find_by_supplier_id(self):
""" Find an inventory item by supplier id """
inventory_items = [
_create_test_inventory_item(
product_id=123, product_name="test product", quantity=100, restock_threshold=50,
supplier_name="test supplier1", supplier_id=123, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=124, product_name="test product2", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=125, product_name="test product2.5", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=127, product_name="test product3", quantity=100, restock_threshold=50,
supplier_name="test supplier3", supplier_id=127, unit_price=12.50, supplier_status="enabled")]
for inventory_item in inventory_items:
inventory_item.create()
matches = [item.serialize() for item in inventory_items if item.supplier_id == 125]
found_items = InventoryItem.find_by_supplier_id("125")
found_items = [item.serialize() for item in found_items]
self.assertEqual(found_items, matches)
def test_find_by_supplier_status(self):
""" Find an inventory item by supplier status """
inventory_items = [
_create_test_inventory_item(
product_id=123, product_name="test product", quantity=100, restock_threshold=50,
supplier_name="test supplier1", supplier_id=123, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=124, product_name="test product2", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="enabled"),
_create_test_inventory_item(
product_id=125, product_name="test product2.5", quantity=100, restock_threshold=50,
supplier_name="test supplier2", supplier_id=125, unit_price=12.50, supplier_status="disabled"),
_create_test_inventory_item(
product_id=127, product_name="test product3", quantity=100, restock_threshold=50,
supplier_name="test supplier3", supplier_id=127, unit_price=12.50, supplier_status="enabled")]
for inventory_item in inventory_items:
inventory_item.create()
matches = [item.serialize() for item in inventory_items if item.supplier_status == "enabled"]
found_items = InventoryItem.find_by_supplier_status("enabled")
found_items = [item.serialize() for item in found_items]
self.assertEqual(found_items, matches)
def test_update_a_inventory_item(self):
""" Update an inventory item """
test_item = self._create_test_inventory_items(1)[0]
logging.debug(test_item)
logging.debug(test_item)
self.assertEqual(test_item.inventory_id, 1)
# Change it an save it
test_item.supplier_name = "new supplier"
original_id = test_item.inventory_id
test_item.save()
self.assertEqual(test_item.inventory_id, original_id)
self.assertEqual(test_item.supplier_name, "new supplier")
# Fetch it back and make sure the id hasn't changed
# but the data did change
found_items = InventoryItem.all()
self.assertEqual(len(found_items), 1)
self.assertEqual(found_items[0].inventory_id, 1)
self.assertEqual(found_items[0].supplier_name, "new supplier")
def test_delete_a_inventory_item(self):
""" Delete an inventory item """
test_item = self._create_test_inventory_items(1)[0]
self.assertEqual(len(InventoryItem.all()), 1)
# delete the inventory item and make sure it isn't in the database
test_item.delete()
self.assertEqual(len(InventoryItem.all()), 0)
def test_serialize_a_inventory_item(self):
""" Test serialization of an inventory item """
test_item = self._create_test_inventory_items(1)[0]
data = test_item.serialize()
self.assertNotEqual(data, None)
self.assertIn("inventory_id", data)
self.assertEqual(data["inventory_id"], test_item.inventory_id)
self.assertIn("product_name", data)
self.assertEqual(data["product_name"], test_item.product_name)
self.assertIn("quantity", data)
self.assertEqual(data["quantity"], test_item.quantity)
self.assertIn("supplier_name", data)
self.assertEqual(data["supplier_name"], test_item.supplier_name)
self.assertIn("supplier_id", data)
self.assertEqual(data["supplier_id"], test_item.supplier_id)
def test_deserialize_a_inventory_item(self):
""" Test deserialization of an inventory item """
data = {
"inventory_id": 1,
"product_id": 123,
"product_name": "test product1",
"quantity": 100,
"restock_threshold": 200,
"supplier_id": 123,
"supplier_name": "supplier test",
"supplier_status": "enabled",
"unit_price": 12.50,
}
inventory_item = InventoryItem()
inventory_item.deserialize(data)
self.assertNotEqual(inventory_item, None)
self.assertEqual(inventory_item.inventory_id, None)
self.assertEqual(inventory_item.product_name, "test product1")
self.assertEqual(inventory_item.quantity, 100)
self.assertEqual(inventory_item.supplier_id, 123)
self.assertEqual(inventory_item.unit_price, 12.50)
def test_deserialize_missing_data(self):
""" Test deserialization of an inventory item """
data = {"id": 1, "product_name": "test product2", "quantity": 100}
inventory_item = InventoryItem()
self.assertRaises(DataValidationError, inventory_item.deserialize, data)
def test_deserialize_bad_data(self):
""" Test deserialization of bad data """
data = "this is not a dictionary"
inventory_item = InventoryItem()
self.assertRaises(DataValidationError, inventory_item.deserialize, data)
def test_find_or_404_found(self):
""" Find or return 404 found """
test_items = self._create_test_inventory_items(5)
found_item = InventoryItem.find_or_404(test_items[1].inventory_id)
self.assertIsNot(found_item, None)
self.assertEqual(found_item.inventory_id, test_items[1].inventory_id)
self.assertEqual(found_item.product_name, test_items[1].product_name)
self.assertEqual(found_item.quantity, test_items[1].quantity)
def test_find_or_404_not_found(self):
""" Find or return 404 NOT found """
self.assertRaises(NotFound, InventoryItem.find_or_404, 0)
|
the-stack_106_17201
|
import sys
import logging
from datetime import datetime
from .setup import put_to_s3
from .iam_aws import AssumedRoleSession
from .analizer import analizer_expose_sg, analizer_launch_days, security_groupUSE, ami_informationcreationDays, ami_informationOWNER, ami_informationNAME, vpc_informationLog, lb_informationLog, lb_informationLogV2
# SETUP LOGGING OPTIONS
logging.basicConfig(format="%(asctime)s %(message)s", stream=sys.stdout)
log = logging.getLogger("cloud-lusat-inventory-collector")
log.setLevel(logging.INFO)
def ENICollector(args):
print("starting")
try:
account_id = args.get('account_id')
role_spoke = args.get('role_assume')
session = AssumedRoleSession(account_id, role_spoke)
account_alias = session.client('iam').list_account_aliases()[
'AccountAliases'][0]
aws_regions = session.client('ec2').describe_regions()
for region in aws_regions['Regions']:
aws_region_name = region['RegionName']
client = session.client('ec2', region_name=aws_region_name)
log.info("getting ENI interfaces")
for data in client.describe_network_interfaces()['NetworkInterfaces']:
try:
data = {
"@timestamp": str(datetime.now()),
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"labels": "inventory",
"eni.ip.private": data.get('PrivateIpAddress'),
"eni.dns.private": data.get('PrivateDnsName'),
"eni.id": data.get('NetworkInterfaceId'),
"eni.owner.id": data.get('OwnerId'),
"eni.description": data.get('Description'),
"eni.vpc.id": data.get('VpcId'),
"eni.mac_address": data.get('MacAddress'),
"eni.type": data.get('InterfaceType'),
"eni.attachments": data.get('Attachment'),
"eni.status": data.get('Status'),
"eni.subnet.id": data.get('SubnetId'),
"eni.ip.addresses": data.get('PrivateIpAddresses'),
"tags": data.get('TagSet'),
"inventory.type": 'ENI',
"cloud.account.name": account_alias
}
log.info("sending to s3")
put_to_s3(data)
except Exception as e:
log.info("something goes wrong: "+str(e))
log.info("done ENI")
except Exception as e:
print(e)
log.info("something goes wrong: "+str(e))
def VPCCollector(args):
try:
account_id = args.get('account_id')
role_spoke = args.get('role_assume')
session = AssumedRoleSession(account_id, role_spoke)
account_alias = session.client('iam').list_account_aliases()[
'AccountAliases'][0]
aws_regions = session.client('ec2').describe_regions()
for region in aws_regions['Regions']:
aws_region_name = region['RegionName']
client = session.client('ec2', region_name=aws_region_name)
log.info("getting VPC interfaces")
for data in client.describe_vpcs()['Vpcs']:
try:
vpcid = data.get('VpcId')
data = {
"@timestamp": str(datetime.now()),
"vpc.dhcp.id": data.get('DhcpOptionsId'),
"vpc.id": data.get('VpcId'),
"vpc.owner.id": data.get('OwnerId'),
"vpc.state": data.get('State'),
"vpn.associations": data.get('CidrBlockAssociationSet'),
"tags": data.get('Tags'),
"inventory.type": 'VPC',
"labels": "inventory",
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"cloud.account.name": account_alias,
"vpc.flowlogs": str(vpc_informationLog(vpcid, aws_region_name, session)),
}
log.info("sending to ELK")
put_to_s3(data)
except Exception as e:
log.info("something goes wrong: "+str(e))
log.info("done VPC")
except Exception as e:
log.info("something goes wrong: "+str(e))
def SGCollector(args):
try:
account_id = args.get('account_id')
role_spoke = args.get('role_assume')
session = AssumedRoleSession(account_id, role_spoke)
account_alias = session.client('iam').list_account_aliases()[
'AccountAliases'][0]
aws_regions = session.client('ec2').describe_regions()
for region in aws_regions['Regions']:
aws_region_name = region['RegionName']
client = session.client('ec2', region_name=aws_region_name)
log.info("getting security groups")
for data in client.describe_security_groups()['SecurityGroups']:
try:
data = {
"@timestamp": str(datetime.now()),
"security_group.ip_permission": data['IpPermissions'],
"security_group.name": data['GroupName'],
"security_group.description": data['Description'],
"security_group.id": data['GroupId'],
"security_group.owner.id": data['OwnerId'],
"security_group.vpc.id": data.get('VpcId'),
"tags": data.get('Tags'),
"inventory.type": 'SG',
"labels": "inventory",
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"cloud.account.name": account_alias,
"security_group.analyzer.expose": analizer_expose_sg(data['IpPermissions']),
"security_group.status.use": security_groupUSE(data['GroupId'], aws_region_name, session)
}
if data['security_group.status.use'] == 0:
log.info("here you can send to sqs")
log.info("sending to ELK")
put_to_s3(data)
except Exception as e:
log.info("something goes wrong: "+str(e))
log.info("done SG")
except Exception as e:
log.info("something goes wrong: "+str(e))
def EC2Collector(args):
try:
account_id = args.get('account_id')
role_spoke = args.get('role_assume')
session = AssumedRoleSession(account_id, role_spoke)
account_alias = session.client('iam').list_account_aliases()[
'AccountAliases'][0]
aws_regions = session.client('ec2').describe_regions()
for region in aws_regions['Regions']:
aws_region_name = region['RegionName']
client = session.client('ec2', region_name=aws_region_name)
log.info("getting EC2 instances")
for data in client.describe_instances()['Reservations']:
try:
for instances in data['Instances']:
instances = {
"labels": "inventory",
"@timestamp": str(datetime.now()),
"instance.id": instances['InstanceId'],
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"instance.state": instances.get('State'),
"instance.running.days": str(analizer_launch_days(instances['LaunchTime'])),
"instance.launch_time": str(instances['LaunchTime'].replace(tzinfo=None)),
"instance.architecture": instances['Architecture'],
"instance.subnet": instances.get('SubnetId'),
"instance.dns.public": instances.get('PublicDnsName'),
"instance.dns.private": instances.get('PrivateDnsName'),
"instance.type": instances.get('InstanceType'),
"instance.image.id": instances.get('ImageId'),
"instance.image.owner.id": ami_informationOWNER(instances.get('ImageId'), aws_region_name, session),
"instance.image.name": ami_informationNAME(instances.get('ImageId'), aws_region_name, session),
"instance.image.create.days": str(ami_informationcreationDays(instances.get('ImageId'), aws_region_name, session)),
"inventory.type": 'EC2',
"tags": instances.get('Tags'),
"instance.security_group": instances.get('SecurityGroups'),
"instance.role": instances.get('IamInstanceProfile'),
"instance.ip.private": instances.get('PrivateIpAddress'),
"instance.ip.public": instances.get('PublicIpAddress'),
"instance.vpc": instances.get('VpcId'),
"account_id": account_id,
"cloud.account.name": account_alias,
"instance.key": instances.get('KeyName'),
}
log.info("sending to s3")
put_to_s3(instances)
except Exception as e:
log.error("something goes wrong: "+str(e))
log.info("done EC2")
except Exception as e:
log.error("something goes wrong: "+str(e))
def LBCollector(args):
try:
account_id = args.get('account_id')
role_spoke = args.get('role_assume')
session = AssumedRoleSession(account_id, role_spoke)
account_alias = session.client('iam').list_account_aliases()[
'AccountAliases'][0]
aws_regions = session.client('ec2').describe_regions()
for region in aws_regions['Regions']:
aws_region_name = region['RegionName']
client = session.client('elb', region_name=aws_region_name)
log.info("getting elastic load balancers")
for data in client.describe_load_balancers()['LoadBalancerDescriptions']:
print(data)
try:
lbname = data.get('LoadBalancerName')
data = {
"@timestamp": str(datetime.now()),
"loadbalancer.name": data.get('LoadBalancerName'),
"loadbalancer.dns": data.get('DNSName'),
"loadbalancer.vpc": data.get('VPCId'),
"loadbalancer.security_group": data.get('SecurityGroups'),
"inventory.type": 'LB',
"labels": "inventory",
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"cloud.account.name": account_alias,
"loadbalancer.access_log": str(lb_informationLog(lbname, aws_region_name, session)),
}
log.info("sending to ELK")
put_to_s3(data)
except Exception as e:
log.info("something goes wrong: "+str(e))
client = session.client('elbv2', region_name=aws_region_name)
log.info("getting application load balancers")
for data in client.describe_load_balancers()['LoadBalancers']:
try:
lbarn = data.get('LoadBalancerArn')
data = {
"@timestamp": str(datetime.now()),
"loadbalancer.name": data.get('LoadBalancerName'),
"loadbalancer.arn": data.get('LoadBalancerArn'),
"loadbalancer.dns": data.get('DNSName'),
"loadbalancer.vpc": data.get('VPCId'),
"loadbalancer.security_group": data.get('SecurityGroups'),
"loadbalancer.scheme": data.get('Scheme'),
"inventory.type": 'LB',
"labels": "inventory",
"cloud.account.id": account_id,
"cloud.region": aws_region_name,
"cloud.account.name": account_alias,
"loadbalancer.access_log": str(lb_informationLogV2(lbarn, aws_region_name, session)),
}
log.info("sending to ELK")
put_to_s3(data)
except Exception as e:
log.info("something goes wrong: "+str(e))
log.info("done LoadBalancers")
log.info("done "+aws_region_name)
except Exception as e:
log.info("something goes wrong: "+str(e))
|
the-stack_106_17202
|
"""PyTorch implementation of Wide-ResNet taken from
https://github.com/jeromerony/fast_adversarial/blob/master/fast_adv/models/cifar10/wide_resnet.py"""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.FiLM import FiLM_Layer
from models.DualBN import DualBN2d
class BasicBlockOAT(nn.Module):
def __init__(self, in_planes, out_planes, stride, dropRate=0.0, use2BN=False, FiLM_in_channels=1):
super(BasicBlockOAT, self).__init__()
self.use2BN = use2BN
if self.use2BN:
Norm2d = DualBN2d
else:
Norm2d = nn.BatchNorm2d
self.bn1 = Norm2d(in_planes)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = Norm2d(out_planes)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1,
padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
padding=0, bias=False) or None
self.film1 = FiLM_Layer(channels=in_planes, in_channels=FiLM_in_channels)
self.film2 = FiLM_Layer(channels=out_planes, in_channels=FiLM_in_channels)
def forward(self, x, _lambda, idx2BN=None):
if self.use2BN:
out = self.bn1(x, idx2BN)
else:
# print('x device:', x.get_device())
# print('bn1 device:', self.bn1.weight.get_device())
out = self.bn1(x)
out = self.film1(out, _lambda)
out = self.relu1(out)
if not self.equalInOut:
sc = self.convShortcut(out)
else:
sc = x
out = self.conv1(out)
if self.use2BN:
out = self.bn2(out, idx2BN)
else:
out = self.bn2(out)
out = self.film2(out, _lambda)
out = self.relu2(out)
if self.droprate > 0:
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
out = torch.add(sc, out)
return out
class WideResNetOAT(nn.Module):
def __init__(self, depth, num_classes, widen_factor=1, dropRate=0.0, FiLM_in_channels=1, use2BN=False):
super(WideResNetOAT, self).__init__()
nChannels = [16, 16 * widen_factor, 32 * widen_factor, 64 * widen_factor]
assert (depth - 4) % 6 == 0, 'depth should be 6n+4'
n = (depth - 4) // 6
block = BasicBlockOAT
self.use2BN = use2BN
# 1st conv before any network block
self.conv1 = nn.Conv2d(3, nChannels[0], kernel_size=3, stride=1,
padding=1, bias=False)
# 1st block
self.bundle1 = [block(nChannels[0], nChannels[1], 1, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels)]
for _ in range(1, n):
self.bundle1.append(block(nChannels[1], nChannels[1], 1, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels))
self.bundle1 = nn.ModuleList(self.bundle1)
# 2nd block
self.bundle2 = [block(nChannels[1], nChannels[2], 2, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels)]
for _ in range(1, n):
self.bundle2.append(block(nChannels[2], nChannels[2], 1, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels))
self.bundle2 = nn.ModuleList(self.bundle2)
# 3rd block
self.bundle3 = [block(nChannels[2], nChannels[3], 2, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels)]
for _ in range(1, n):
self.bundle3.append(block(nChannels[3], nChannels[3], 1, dropRate=dropRate, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels))
self.bundle3 = nn.ModuleList(self.bundle3)
# global average pooling and classifier
if self.use2BN:
self.bn1 = DualBN2d(nChannels[3])
else:
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = nn.Linear(nChannels[3], num_classes)
self.nChannels = nChannels[3]
self.bundles = [self.bundle1, self.bundle2, self.bundle3]
def forward(self, x, _lambda, idx2BN=None):
out = self.conv1(x)
for bundle in self.bundles:
for block in bundle:
out = block(out, _lambda, idx2BN)
if self.use2BN:
out = self.bn1(out, idx2BN)
else:
out = self.bn1(out)
out = self.relu(out)
out = F.avg_pool2d(out, 8)
out = out.view(-1, self.nChannels)
return self.fc(out)
def WRN16_8OAT(FiLM_in_channels=1, use2BN=False):
return WideResNetOAT(depth=16, num_classes=10, widen_factor=8, dropRate=0.3, use2BN=use2BN, FiLM_in_channels=FiLM_in_channels)
|
the-stack_106_17203
|
#*****************************************************#
# This file is part of GRIDOPT. #
# #
# Copyright (c) 2015, Tomas Tinoco De Rubira. #
# #
# GRIDOPT is released under the BSD 2-clause license. #
#*****************************************************#
import numpy as np
from .method_error import *
class PFmethod:
def __init__(self):
"""
Power flow method class.
"""
self._parameters = {}
self.results = {'solver name': 'unknown',
'solver status': 'unknown',
'solver message': 'none',
'solver iterations': 0,
'solver time': 0.,
'solver primal variables': None,
'solver dual variables': None,
'problem' : None,
'problem time' : 0.,
'network snapshot' : None}
def create_problem(self,net):
"""
Creates optimization problem.
Parameters
----------
net : |Network|
Returns
-------
prob : |Problem|
"""
return None
def get_info_printer(self):
"""
Gets function for printing information
about method progress.
Returns
-------
printer : Function
"""
return lambda solver,header: None
def get_results(self):
"""
Gets dictionary with results.
Returns
-------
results : dict
"""
return self.results
def set_solver_name(self, name):
"""
Sets solver name.
Parameters
----------
name : string
"""
self.results['solver name'] = name
def set_solver_status(self, status):
"""
Sets solver status.
Parameters
----------
status : string
"""
self.results['solver status'] = status
def set_solver_message(self, msg):
"""
Sets solver message.
Parameters
----------
msg : string
"""
self.results['solver message'] = msg
def set_solver_iterations(self, k):
"""
Sets solver iterations.
Parameters
----------
k : int
"""
self.results['solver iterations'] = k
def set_solver_time(self, t):
"""
Sets solver time in seconds.
Parameters
----------
t : float
"""
self.results['solver time'] = t
def set_solver_primal_variables(self, x):
"""
Sets solver primal variables.
Parameters
----------
x : vector
"""
self.results['solver primal variables'] = x
def set_solver_dual_variables(self, d):
"""
Sets solver dual variables.
Parameters
----------
d : list
"""
self.results['solver dual variables'] = d
def set_problem(self, p):
"""
Sets problem.
Parameters
----------
p : |Problem|
"""
self.results['problem'] = p
def set_problem_time(self, t):
"""
Sets problem construction time in seconds.
Parameters
----------
t : float
"""
self.results['problem time'] = t
def set_network_snapshot(self, net):
"""
Sets network snapshot.
Parameters
----------
net : |Network|
"""
self.results['network snapshot'] = net
def get_parameters(self):
"""
Gets method parameters.
Returns
-------
params : dict
"""
return self._parameters
def set_parameters(self,params=None,strparams=None):
"""
Sets method parameters.
Parameters
----------
params : dict
Name-value pairs
strparams: dict
Name-value pairs where value is a string
"""
invalid_params = []
SOLVER_PARAMS = 'solver_parameters'
# List of method/solver parameter dictionaries
dict_list = [self._parameters]
if SOLVER_PARAMS in self._parameters:
dict_list += list(self._parameters[SOLVER_PARAMS].values())
# Parameters
if params:
for key,value in list(params.items()):
if key == SOLVER_PARAMS:
continue
valid_key = False
for parameter_dict in dict_list:
if key in parameter_dict:
valid_key = True
parameter_dict[key] = value
if not valid_key:
invalid_params.append(key)
if SOLVER_PARAMS in params and SOLVER_PARAMS in self._parameters:
solver_params = params[SOLVER_PARAMS]
for solver_name in self._parameters[SOLVER_PARAMS].keys():
if solver_name in solver_params:
self._parameters[SOLVER_PARAMS][solver_name].update(solver_params[solver_name])
# String-based parameters (from command-line utility)
if strparams:
for key,valuestr in list(strparams.items()):
valid_key = False
for parameter_dict in dict_list:
if key in parameter_dict:
valid_key = True
value = parameter_dict[key]
if type(value) is float:
new_value = float(valuestr)
elif type(value) is int:
new_value = int(valuestr)
elif type(value) is bool:
if valuestr == 'True':
new_value = True
elif valuestr == 'False':
new_value = False
else:
raise PFmethodError_ParamNotBool()
else:
new_value = valuestr
parameter_dict[key] = new_value
if not valid_key:
invalid_params.append(key)
# Invalid params
if invalid_params:
raise PFmethodError_BadParams(invalid_params)
def set_results(self,results):
"""
Sets method results.
Parameters
----------
results : dict
"""
self.results = results
def solve(self,net):
"""
Solves power flow problem.
Parameters
----------
net : |Network|
"""
pass
def update_network(self, net):
"""
Updates network with results.
Parameters
----------
net : |Network|
"""
if self.results['network snapshot'] is not None:
net.copy_from_network(self.results['network snapshot'], merged=True)
net.update_properties()
|
the-stack_106_17204
|
# -----------------------------------------------------------------------------
# lex_closure.py
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path:
sys.path.insert(0, "..")
import ply.lex as lex
tokens = (
'NAME', 'NUMBER',
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
'LPAREN', 'RPAREN',
)
def make_calc():
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# Build the lexer
return lex.lex()
make_calc()
lex.runmain(data="3+4")
|
the-stack_106_17205
|
"""
ProvidedMakeWorkflow
"""
from aws_lambda_builders.workflows.custom_make.validator import CustomMakeRuntimeValidator
from aws_lambda_builders.workflow import BaseWorkflow, Capability
from aws_lambda_builders.actions import CopySourceAction
from aws_lambda_builders.path_resolver import PathResolver
from .actions import CustomMakeAction
from .utils import OSUtils
from .make import SubProcessMake
from ...exceptions import WorkflowFailedError
class CustomMakeWorkflow(BaseWorkflow):
"""
A Lambda builder workflow for provided runtimes based on make.
"""
NAME = "CustomMakeBuilder"
CAPABILITY = Capability(language="provided", dependency_manager=None, application_framework=None)
EXCLUDED_FILES = (".aws-sam", ".git")
def __init__(self, source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=None, osutils=None, **kwargs):
super(CustomMakeWorkflow, self).__init__(
source_dir, artifacts_dir, scratch_dir, manifest_path, runtime=runtime, **kwargs
)
self.os_utils = OSUtils()
# Find the logical id of the function to be built.
options = kwargs.get("options") or {}
build_logical_id = options.get("build_logical_id", None)
if not build_logical_id:
raise WorkflowFailedError(
workflow_name=self.NAME,
action_name=None,
reason="Build target {} is not found!".format(build_logical_id),
)
subprocess_make = SubProcessMake(make_exe=self.binaries["make"].binary_path, osutils=self.os_utils)
make_action = CustomMakeAction(
artifacts_dir,
scratch_dir,
manifest_path,
osutils=self.os_utils,
subprocess_make=subprocess_make,
build_logical_id=build_logical_id,
)
self.actions = [CopySourceAction(source_dir, scratch_dir, excludes=self.EXCLUDED_FILES), make_action]
def get_resolvers(self):
return [PathResolver(runtime="provided", binary="make", executable_search_paths=self.executable_search_paths)]
def get_validators(self):
return [CustomMakeRuntimeValidator(runtime=self.runtime, architecture=self.architecture)]
|
the-stack_106_17206
|
import argparse
import importlib
import os
import time
import numpy as np
import tensorflow as tf
import models
FLAGS = tf.flags.FLAGS
DEFAULT_MODEL = 'vmnet'
if __name__ == '__main__':
tf.flags.DEFINE_string('model', DEFAULT_MODEL, 'Name of the model.')
tf.flags.DEFINE_string('cuda_device', '-1', 'CUDA device index to be used in the validation. This parameter may be set to the environment variable \'CUDA_VISIBLE_DEVICES\'. Specify this to employ GPUs.')
tf.flags.DEFINE_string('restore_path', None, 'Checkpoint path to be restored. Specify this to resume the training or use pre-trained parameters.')
tf.flags.DEFINE_string('restore_target', None, 'Target of the restoration.')
tf.flags.DEFINE_integer('restore_global_step', 0, 'Global step of the restored model. Some models may require to specify this.')
tf.flags.DEFINE_string('scales', '4', 'Upscaling factors. Use the \',\' character to specify multiple scales (e.g., 2,3,4).')
tf.flags.DEFINE_string('input_path', 'LR', 'Base path of the input images.')
tf.flags.DEFINE_string('output_path', 'SR', 'Base path of the upscaled images to be saved.')
# parse model first and import them
pre_parser = argparse.ArgumentParser(add_help=False)
pre_parser.add_argument('--model', default=DEFAULT_MODEL)
pre_parsed = pre_parser.parse_known_args()[0]
if (pre_parsed.model is not None):
MODEL_MODULE = importlib.import_module('models.' + pre_parsed.model)
def main(unused_argv):
# initialize
FLAGS.vmnet_intermediate_outputs = True
os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.cuda_device
scale_list = list(map(lambda x: int(x), FLAGS.scales.split(',')))
tf.logging.set_verbosity(tf.logging.INFO)
# image reading session
tf_image_read_graph = tf.Graph()
with tf_image_read_graph.as_default():
tf_image_read_path = tf.placeholder(tf.string, [])
tf_image = tf.read_file(tf_image_read_path)
tf_image = tf.image.decode_png(tf_image, channels=3, dtype=tf.uint8)
tf_image_read = tf_image
tf_image_read_init = tf.global_variables_initializer()
tf_image_read_session = tf.Session(config=tf.ConfigProto(
device_count={'GPU': 0}
))
tf_image_read_session.run(tf_image_read_init)
# image saving session
tf_image_save_graph = tf.Graph()
with tf_image_save_graph.as_default():
tf_image_save_path = tf.placeholder(tf.string, [])
tf_image_save_image = tf.placeholder(tf.float32, [None, None, 3])
tf_image = tf_image_save_image
tf_image = tf.round(tf_image)
tf_image = tf.clip_by_value(tf_image, 0, 255)
tf_image = tf.cast(tf_image, tf.uint8)
tf_image_png = tf.image.encode_png(tf_image)
tf_image_save_op = tf.write_file(tf_image_save_path, tf_image_png)
tf_image_save_init = tf.global_variables_initializer()
tf_image_save_session = tf.Session(config=tf.ConfigProto(
device_count={'GPU': 0}
))
tf_image_save_session.run(tf_image_save_init)
# model
model = MODEL_MODULE.create_model()
model.prepare(is_training=False, global_step=FLAGS.restore_global_step)
# model > restore
model.restore(ckpt_path=FLAGS.restore_path, target=FLAGS.restore_target)
tf.logging.info('restored the model')
# get image path list
image_list = [f for f in os.listdir(FLAGS.input_path) if f.lower().endswith('.png')]
tf.logging.info('found %d images' % (len(image_list)))
# iterate
running_time_list = []
num_total_outputs = FLAGS.vmnet_recursions // FLAGS.vmnet_recursion_frequency
for scale in scale_list:
for image_name in image_list:
tf.logging.info('- x%d: %s' % (scale, image_name))
input_image_path = os.path.join(FLAGS.input_path, image_name)
input_image = tf_image_read_session.run(tf_image_read, feed_dict={tf_image_read_path: input_image_path})
t1 = time.perf_counter()
output_images = model.upscale(input_list=[input_image], scale=scale)
t2 = time.perf_counter()
running_time = (t2 - t1)
output_image_ensemble = np.zeros_like(output_images[0][0])
ensemble_factor_total = 0.0
for i in range(num_total_outputs):
num_recursions = (i + 1) * FLAGS.vmnet_recursion_frequency
output_image = output_images[i][0]
ensemble_factor = 1.0 / (2.0 ** (num_total_outputs-num_recursions))
output_image_ensemble = output_image_ensemble + (output_image * ensemble_factor)
ensemble_factor_total += ensemble_factor
output_image = output_image_ensemble / ensemble_factor_total
output_image_path = os.path.join(FLAGS.output_path, 'x%d' % (scale), os.path.splitext(image_name)[0]+'.png')
tf_image_save_session.run(tf_image_save_op, feed_dict={tf_image_save_path:output_image_path, tf_image_save_image:output_image})
running_time_list.append(running_time)
# finalize
tf.logging.info('finished')
tf.logging.info('%.6f sec' % (np.mean(running_time_list)))
if __name__ == '__main__':
tf.app.run()
|
the-stack_106_17207
|
# Copyright 2011-2013 Luis Pedro Coelho <[email protected]>
# License: MIT
import numpy as np
from mahotas.thresholding import otsu, rc, bernsen, gbernsen
from mahotas.histogram import fullhistogram
def slow_otsu(img, ignore_zeros=False):
hist = fullhistogram(img)
hist = hist.astype(np.double)
Hsum = img.size - hist[0]
if ignore_zeros:
hist[0] = 0
if Hsum == 0:
return 0
Ng = len(hist)
nB = np.cumsum(hist)
nO = nB[-1]-nB
mu_B = 0
mu_O = np.dot(np.arange(Ng), hist)/ Hsum
best = nB[0]*nO[0]*(mu_B-mu_O)*(mu_B-mu_O)
bestT = 0
for T in range(1, Ng):
if nB[T] == 0: continue
if nO[T] == 0: break
mu_B = (mu_B*nB[T-1] + T*hist[T]) / nB[T]
mu_O = (mu_O*nO[T-1] - T*hist[T]) / nO[T]
sigma_between = nB[T]*nO[T]*(mu_B-mu_O)*(mu_B-mu_O)
if sigma_between > best:
best = sigma_between
bestT = T
return bestT
def test_otsu_fast():
np.random.seed(120)
for i in range(12):
A = 32*np.random.rand(128,128)
A = A.astype(np.uint8)
fast = otsu(A)
slow = slow_otsu(A)
assert fast == slow
def test_thresholding():
np.random.seed(123)
A = np.random.rand(128,128)
A[24:48,24:48] += 4 * np.random.rand(24,24)
A *= 255//A.max()
A = A.astype(np.uint8)
def tm(method):
T = method(A)
assert (A > T)[24:48,24:48].mean() > .5
assert (A > T)[:24,:24].mean() < .5
assert (A > T)[48:,:].mean() < .5
assert (A > T)[:,48:].mean() < .5
yield tm, otsu
yield tm, rc
def test_nozeros():
np.seterr(all='raise')
np.random.seed(22)
A = (np.random.rand(100,100)*50).astype(np.uint8)+201
assert rc(A) > 200
assert otsu(A) > 200
def test_ignore_zeros():
np.seterr(all='raise')
np.random.seed(22)
A = np.zeros((1024,24), np.uint8)
A[:24,:24] = np.random.random_integers(100, 200, size=(24,24))
assert rc(A) < 100
assert otsu(A) < 100
assert rc(A, ignore_zeros=1) > 100
assert otsu(A, ignore_zeros=1) > 100
def test_zero_image():
A = np.zeros((16,16), np.uint8)
def tm(method):
assert method(A, ignore_zeros=0) == 0
assert method(A, ignore_zeros=1) == 0
yield tm, rc
yield tm, otsu
def test_soft_threhold():
from mahotas.thresholding import soft_threshold
np.random.seed(223)
for i in range(4):
f = np.random.randint(-256,256, size=(128,128,4))
fo = f.copy()
t = soft_threshold(f, 16)
assert not np.all(fo == t)
assert np.all(t[np.abs(f) < 16] == 0)
assert t.max() == f.max()-16
assert t.min() == f.min()+16
assert np.all( (np.abs(f) <= 16) | (np.abs(f)-16 == np.abs(t)))
def test_bernsen():
np.random.seed(120)
for i in range(4):
f = 32*np.random.rand(40,68)
f = f.astype(np.uint8)
b = bernsen(f, 8, 15)
assert f.shape == b.shape
b = bernsen(f, 8, 15, 34)
assert f.shape == b.shape
def test_gbernsen():
np.random.seed(120)
for i in range(4):
f = 32*np.random.rand(64,96)
f = f.astype(np.uint8)
b = gbernsen(f, np.ones((3,3), bool), 15, 145)
assert f.shape == b.shape
|
the-stack_106_17208
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import datetime
import idc
import six
import idaapi
import idautils
import capa
logger = logging.getLogger("capa")
SUPPORTED_IDA_VERSIONS = [
"7.1",
"7.2",
"7.3",
"7.4",
"7.5",
]
# file type names as returned by idaapi.get_file_type_name()
SUPPORTED_FILE_TYPES = [
"Portable executable for 80386 (PE)",
"Portable executable for AMD64 (PE)",
"Binary file", # x86/AMD64 shellcode support
]
def inform_user_ida_ui(message):
idaapi.info("%s. Please refer to IDA Output window for more information." % message)
def is_supported_ida_version():
version = idaapi.get_kernel_version()
if version not in SUPPORTED_IDA_VERSIONS:
warning_msg = "This plugin does not support your IDA Pro version"
logger.warning(warning_msg)
logger.warning(
"Your IDA Pro version is: %s. Supported versions are: %s." % (version, ", ".join(SUPPORTED_IDA_VERSIONS))
)
return False
return True
def is_supported_file_type():
file_type = idaapi.get_file_type_name()
if file_type not in SUPPORTED_FILE_TYPES:
logger.error("-" * 80)
logger.error(" Input file does not appear to be a PE file.")
logger.error(" ")
logger.error(
" capa currently only supports analyzing PE files (or binary files containing x86/AMD64 shellcode) with IDA."
)
logger.error(" If you don't know the input file type, you can try using the `file` utility to guess it.")
logger.error("-" * 80)
return False
return True
def get_disasm_line(va):
""" """
return idc.generate_disasm_line(va, idc.GENDSM_FORCE_CODE)
def is_func_start(ea):
""" check if function stat exists at virtual address """
f = idaapi.get_func(ea)
return f and f.start_ea == ea
def get_func_start_ea(ea):
""" """
f = idaapi.get_func(ea)
return f if f is None else f.start_ea
def collect_metadata():
md5 = idautils.GetInputFileMD5()
if not isinstance(md5, six.string_types):
md5 = capa.features.bytes_to_str(md5)
sha256 = idaapi.retrieve_input_file_sha256()
if not isinstance(sha256, six.string_types):
sha256 = capa.features.bytes_to_str(sha256)
return {
"timestamp": datetime.datetime.now().isoformat(),
# "argv" is not relevant here
"sample": {
"md5": md5,
"sha1": "", # not easily accessible
"sha256": sha256,
"path": idaapi.get_input_file_path(),
},
"analysis": {
"format": idaapi.get_file_type_name(),
"extractor": "ida",
},
"version": capa.version.__version__,
}
|
the-stack_106_17212
|
import pandas as pd
from joblib import Parallel, delayed
from tqdm import tqdm
def calc_temp_overlap(start_1, end_1, start_2, end_2):
"""
Calculate the portion of the first time span that overlaps with the second
Parameters
----------
start_1: datetime
start of first time span
end_1: datetime
end of first time span
start_2: datetime
start of second time span
end_2: datetime
end of second time span
Returns
-------
float:
The ratio by which the
"""
# case 1: no overlap - 1 was before 2
if end_1 < start_2:
return 0
# case 2: no overlap - 1 comes after 2
elif end_2 < start_1:
return 0
# case 3: 2 fully in 1
if (start_1 <= start_2) and (end_1 >= end_2):
temp_overlap = end_2 - start_2
# case 4: 1 fully in 2
elif (start_2 <= start_1) and (end_2 >= end_1):
temp_overlap = end_1 - start_1
# case 5: 1 overlaps 2 from right
elif (start_2 <= start_1) and (end_2 <= end_1):
temp_overlap = end_2 - start_1
# case 6: 1 overlaps 2 from left
elif (start_1 <= start_2) and (end_1 <= end_2):
temp_overlap = end_1 - start_2
else:
raise Exception("wrong case")
temp_overlap = temp_overlap.total_seconds()
# no overlap at all
assert temp_overlap >= 0, "the overlap can not be lower than 0"
dur = end_1 - start_1
if dur.total_seconds() == 0:
return 0
else:
overlap_ratio = temp_overlap / dur.total_seconds()
return overlap_ratio
def applyParallel(dfGrouped, func, n_jobs, print_progress, **kwargs):
"""
Funtion warpper to parallelize funtions after .groupby().
Parameters
----------
dfGrouped: pd.DataFrameGroupBy
The groupby object after calling df.groupby(COLUMN).
func: function
Function to apply to the dfGrouped object, i.e., dfGrouped.apply(func).
n_jobs: int
The maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given, no parallel
computing code is used at all, which is useful for debugging. See
https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation
for a detailed description
print_progress: boolean
If set to True print the progress of apply.
**kwargs:
Other arguments passed to func.
Returns
-------
pd.DataFrame:
The result of dfGrouped.apply(func)
"""
df_ls = Parallel(n_jobs=n_jobs)(
delayed(func)(group, **kwargs) for _, group in tqdm(dfGrouped, disable=not print_progress)
)
return pd.concat(df_ls)
|
the-stack_106_17213
|
# MongoDB and Flask Application
# Dependencies and Setup
from flask import Flask, render_template
from flask_pymongo import PyMongo
import scrape_mars
# Flask Setup
app = Flask(__name__)
# PyMongo Connection Setup
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
# Flask Routes
# Root Route to Query MongoDB & Pass Mars Data Into HTML Template: index.html to Display Data
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", mars=mars)
# Scrape Route to Import `scrape_mars.py` Script & Call `scrape` Function
@app.route("/scrape")
def scrapper():
mars = mongo.db.mars
mars_data = scrape_mars.scrape_all()
mars.update({}, mars_data, upsert=True)
return "Successful"
# Define Main Behavior
if __name__ == "__main__":
app.run(debug=True)
|
the-stack_106_17214
|
# -*- coding: utf-8 -*-
"""
tracker.models
~~~~~~~~~~~~~~
tracker models file
:copyright: (c) 2014 by arruda.
"""
from django.db import models
from django.utils import timezone
class Realm(models.Model):
"""
A Wow realm
"""
name = models.CharField(u"Realm Name", max_length=350, blank=False, null=True)
class Meta:
app_label = 'tracker'
def __unicode__(self):
return self.name
class Character(models.Model):
"""
A wow character
"""
name = models.CharField(u"Character Name", max_length=350, blank=False, null=True)
realm = models.ForeignKey('tracker.Realm', related_name=u"characters", blank=False, null=True)
user = models.ForeignKey('users.User', related_name=u"characters", blank=True, null=True)
class Meta:
app_label = 'tracker'
def __unicode__(self):
return "%s / %s" % (self.realm, self.name)
class CharacterItem(models.Model):
"""
The connection of Character and Item
"""
item = models.ForeignKey('tracker.Item', related_name='character_items')
character = models.ForeignKey('tracker.Character', related_name='character_items')
class Meta:
app_label = 'tracker'
def __unicode__(self):
return "%s - %s" % (self.item, self.character)
class Item(models.Model):
"""
Represents an Item that is tracked
"""
blizzard_id = models.PositiveIntegerField(u"Blizzard Item Id", blank=False, null=True)
name = models.CharField(u"Item Name", max_length=350, blank=True, null=True)
characters = models.ManyToManyField(
Character,
through=CharacterItem,
related_name=u"items",
blank=True, null=True
)
class Meta:
app_label = 'tracker'
def __unicode__(self):
if self.name is None:
return "#" + str(self.b_id)
else:
return self.name + " (" + "#" + str(self.blizzard_id) + ")"
class RealmItemPriceOnDate(models.Model):
"""
Represent the informations of price for a given Item
in a Realm, on given a Date
"""
realm = models.ForeignKey(
'tracker.Realm',
related_name=u"realm_item_prices_on_date",
blank=False, null=True
)
item = models.ForeignKey(
'tracker.Item',
related_name=u"realm_item_prices_on_date",
blank=False, null=True
)
date = models.DateField(u"Date",
default=timezone.now,
blank=False, null=True
)
avg_price = models.DecimalField(
u"Average Price",
max_digits=10,
decimal_places=2,
default="0.0"
)
class Meta:
app_label = 'tracker'
ordering = ['-date', ]
def __unicode__(self):
return "%s %s %s" % (self.realm, self.item, self.date)
class CharacterItemPriceOnDate(models.Model):
"""
Represents the price of an Item belonging to a given character
on a given Date
"""
character = models.ForeignKey(
'tracker.Character',
related_name=u"character_item_prices_on_date",
blank=False, null=True
)
item = models.ForeignKey(
'tracker.Item',
related_name=u"character_item_prices_on_date",
blank=False, null=True
)
date = models.DateField(u"Date",
default=timezone.now,
blank=False, null=True
)
avg_price = models.DecimalField(
u"Average Price",
max_digits=10,
decimal_places=2,
default="0.0"
)
class Meta:
app_label = 'tracker'
ordering = ['-date', ]
def __unicode__(self):
return "%s %s %s" % (self.character, self.item, self.date)
|
the-stack_106_17222
|
def dump_by_quantiles(df, q_low = 0.01 , q_high = 0.99):
"""Inputs:
df : A Pandas dataframe
q_low: float, the lower quantile cut off
q_high: float, the higher quantile cut off
Returns:
the dataframe without the rows containing outliers"""
# create a df of the high and low outliers
quant_df = df.quantile([q_low, q_high])
print('Row count before outlier removal', len(df))
# filter the data by the quant_df - note breaks the memory limit if done together
df = df[df >= quant_df.loc[q_low]].dropna()
df = df[df <= quant_df.loc[q_high]] .dropna()
print('Row count after outlier removal', len(df))
return df
|
the-stack_106_17223
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# %%
import sys
import os
import multiprocessing
import numpy as np
import torch
from torch import nn, optim
from kbc.util import set_seed
from kbc.training.data import Data
from kbc.training.batcher import Batcher
from kbc.models import DistMult, ComplEx, TransE
import logging
logger = logging.getLogger(os.path.basename(sys.argv[0]))
torch.set_num_threads(multiprocessing.cpu_count())
# %%
def p_target_regularizer(entity_embedding, reg_param):
reg = torch.norm(entity_embedding-reg_param)
return reg
# %%
def main():
# args
train_path = TRAIN_DIR
dev_path = DEV_DIR
test_path = TEST_DIR
model_name = MODEL
optimizer_name = OPTIMIZER
embedding_size = EMBEDDING_SIZE
batch_size = BATCH_SIZE
nb_epochs = EPOCHS
learning_rate = LEARNING_RATE
meta_loss_type = META_LOSS_TYPE
input_type = INPUT_TYPE
is_quiet = QUIET
seed = SEED
set_seed(seed)
random_state = np.random.RandomState(seed)
device = torch.device('cpu')
# Load toy dataset
data = Data(train_path=train_path, dev_path=dev_path, test_path=test_path,
test_i_path=None, test_ii_path=None, input_type=input_type)
print(data.entity_to_idx) # {'A': 0, 'B': 1, 'C': 2, 'D': 3}
rank = embedding_size * 2 if model_name in {'complex'} else embedding_size
init_size = 1
ent_embs = torch.normal(0, 1, (data.nb_entities, rank)).detach()
pred_embs = torch.normal(0, 1, (data.nb_predicates, rank)).detach()
print(f"\nSTARTING entity embeddings:\n {ent_embs}")
print(f"STARTING predicate embeddings:\n {pred_embs}\n")
if embedding_size == 1:
reg_param_grid = np.array(np.linspace(-2.5, 2.5, 51))
outer_steps = reg_param_grid.shape[0]
elif embedding_size == 2:
reg_param_grid = np.array(np.meshgrid(np.linspace(-2.5, 2.5, 11), np.linspace(-2.5, 2.5, 11))).T.reshape(-1, 2)
outer_steps = reg_param_grid.shape[0]
# outer loop
mean_meta_losses = []
best_meta_loss = 999 # arbitrary large number to initialise
best_reg_term = 999 # arbitrary large number to initialise
for outer_step in range(outer_steps):
# nn.Embedding using to a lookup table of embeddings (i.e. you can index entity_embeddings to return given entities embedding)
# Nice explanation found in Escachator's answer here: https://stackoverflow.com/questions/50747947/embedding-in-pytorch
entity_embeddings = nn.Embedding(data.nb_entities, rank, sparse=False).to(device)
predicate_embeddings = nn.Embedding(data.nb_predicates, rank, sparse=False).to(device)
# Set embeddings values the same in every outer loop so that each
entity_embeddings.weight = nn.Parameter(ent_embs.detach().clone())
predicate_embeddings.weight = nn.Parameter(pred_embs.detach().clone())
# Downscale the randomly initialised embeddings (initialised with N(0,1))
entity_embeddings.weight.data *= init_size
predicate_embeddings.weight.data *= init_size
parameters_lst = nn.ModuleDict({
'entities': entity_embeddings,
'predicates': predicate_embeddings
}).to(device)
# When this dictionary is indexed by model name, the appropriate model class will be initialised
model_factory = {
'distmult': lambda: DistMult(entity_embeddings=entity_embeddings.weight,
predicate_embeddings=predicate_embeddings.weight),
'complex': lambda: ComplEx(entity_embeddings=entity_embeddings.weight,
predicate_embeddings=predicate_embeddings.weight),
'transe': lambda: TransE(entity_embeddings=entity_embeddings.weight,
predicate_embeddings=predicate_embeddings.weight)
}
# Initialise correct model
model = model_factory[model_name]().to(device)
# When this dictionary is indexed by optimizer name, the appropriate optimizer class will be initialised
optimizer_factory = {
'adagrad': lambda: optim.Adagrad(parameters_lst.parameters(), lr=learning_rate),
'adam': lambda: optim.Adam(parameters_lst.parameters(), lr=learning_rate),
'sgd': lambda: optim.SGD(parameters_lst.parameters(), lr=learning_rate)
}
assert optimizer_name in optimizer_factory
optimizer = optimizer_factory[optimizer_name]()
# Specify loss function (cross-entropy by default)
loss_function = nn.CrossEntropyLoss(reduction='mean')
# inner loop
mean_losses = []
# reg_param = 2 * range_values_regparam_grid * (torch.rand(rank) - 0.5)
reg_param = torch.tensor(reg_param_grid[outer_step])
print(f"Random reg param (p) value {outer_step}: {reg_param}")
reg_param.requires_grad = False
for epoch_no in range(1, nb_epochs + 1):
batcher = Batcher(data.Xs, data.Xp, data.Xo, batch_size, 1, random_state)
nb_batches = len(batcher.batches)
epoch_loss_values = [] # to store loss for each batch in the epoch
epoch_loss_nonreg_values = []
for batch_no, (batch_start, batch_end) in enumerate(batcher.batches, 1):
model.train() # model in training mode
# Size [B] numpy arrays containing indices of each subject_entity, predicate, and object_entity in the batch
xp_batch, xs_batch, xo_batch, xi_batch = batcher.get_batch(batch_start, batch_end)
xs_batch = torch.tensor(xs_batch, dtype=torch.long, device=device)
xp_batch = torch.tensor(xp_batch, dtype=torch.long, device=device)
xo_batch = torch.tensor(xo_batch, dtype=torch.long, device=device)
# Return embeddings for each s, p, o in the batch
# This returns tensors of shape (batch_size, rank)
xp_batch_emb = predicate_embeddings(xp_batch)
xs_batch_emb = entity_embeddings(xs_batch)
xo_batch_emb = entity_embeddings(xo_batch)
loss = 0.0
# "sp" corruption applied here (i.e. loss calculated based on model predications for subjects and objects)
# shape of po_scores is (batch_size, Nb_entities in entire dataset)
po_scores = model.forward(xp_batch_emb, None, xo_batch_emb)
non_c_idx = [i for i in range(po_scores.shape[1]) if i != data.entity_to_idx['C']]
xs_batch_c_removed = torch.where(xs_batch > data.entity_to_idx['C'], xs_batch-1, xs_batch)
loss += loss_function(po_scores[:, non_c_idx], xs_batch_c_removed) # train loss ignoring <A,r,C> terms
# shape of sp_scores is (batch_size, Nb_entities in entire dataset)
sp_scores = model.forward(xp_batch_emb, xs_batch_emb, None)
xo_batch_c_removed = torch.where(xo_batch > data.entity_to_idx['C'], xo_batch-1, xo_batch)
loss += loss_function(sp_scores[:, non_c_idx], xo_batch_c_removed) # train loss ignoring <A,r,C> terms
# store loss
loss_nonreg_value = loss.item()
epoch_loss_nonreg_values += [loss_nonreg_value]
# add on regularization term ||embedding(B)-reg_param||
reg_term = p_target_regularizer(entity_embeddings(torch.tensor(data.entity_to_idx['B'])), reg_param)
loss += reg_term
# compute gradient for inner-loop (training backprop)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_value = loss.item()
epoch_loss_values += [loss_value]
if not is_quiet:
# logger.info(f'Epoch {epoch_no}/{nb_epochs}\tBatch {batch_no}/{nb_batches}\tLoss {loss_value:.6f} ({loss_nonreg_value:.6f})')
print(f'Epoch {epoch_no}/{nb_epochs}\tBatch {batch_no}/{nb_batches}\tLoss {loss_value:.6f} ({loss_nonreg_value:.6f})')
loss_mean, loss_std = np.mean(epoch_loss_values), np.std(epoch_loss_values)
mean_losses += [loss_mean]
with torch.no_grad():
if meta_loss_type == "cross-entropy":
model.eval()
dev_batcher = Batcher(data.dev_Xs, data.dev_Xp, data.dev_Xo, 1, 1, random_state)
batch_meta_loss_values = [] # to store meta loss for each triple
for batch_no, (batch_start, batch_end) in enumerate(dev_batcher.batches, 1):
# Size [B] numpy arrays containing indices of each subject_entity, predicate, and object_entity in the batch
dev_xp_batch, dev_xs_batch, dev_xo_batch, dev_xi_batch = dev_batcher.get_batch(batch_start, batch_end)
dev_xs_batch = torch.tensor(dev_xs_batch, dtype=torch.long, device=device)
dev_xp_batch = torch.tensor(dev_xp_batch, dtype=torch.long, device=device)
dev_xo_batch = torch.tensor(dev_xo_batch, dtype=torch.long, device=device)
# Return embeddings for each s, p, o in the batch
# This returns tensors of shape (batch_size, rank)
dev_xp_batch_emb = predicate_embeddings(dev_xp_batch)
dev_xs_batch_emb = entity_embeddings(dev_xs_batch)
dev_xo_batch_emb = entity_embeddings(dev_xo_batch)
meta_loss = 0.0
# "sp" corruption applied here (i.e. loss calculated based on model predications for subjects and objects)
# shape of po_scores is (batch_size, Nb_entities in entire dataset)
dev_po_scores = model.forward(dev_xp_batch_emb, None, dev_xo_batch_emb)
non_b_idx = [i for i in range(dev_po_scores.shape[1]) if i != data.entity_to_idx['B']]
dev_xs_batch_b_removed = torch.where(dev_xs_batch > data.entity_to_idx['B'], dev_xs_batch - 1, dev_xs_batch)
meta_loss += loss_function(dev_po_scores[:, non_b_idx], dev_xs_batch_b_removed)
# shape of sp_scores is (batch_size, Nb_entities in entire dataset)
dev_sp_scores = model.forward(dev_xp_batch_emb, dev_xs_batch_emb, None)
dev_xo_batch_b_removed = torch.where(dev_xo_batch > data.entity_to_idx['B'], dev_xo_batch - 1, dev_xo_batch)
meta_loss += loss_function(dev_sp_scores[:, non_b_idx], dev_xo_batch_b_removed)
# store loss
batch_meta_loss_values += [meta_loss.item()]
if reg_term < best_reg_term:
best_reg_term = reg_term
best_reg_term_params = reg_param.detach()
best_reg_term_entity_embeddings = entity_embeddings.weight.detach().detach()
best_reg_term_pred_embeddings = predicate_embeddings.weight.detach().detach()
best_reg_term_loss = mean_losses[-1]
meta_loss_mean, meta_loss_std = np.mean(batch_meta_loss_values), np.std(batch_meta_loss_values)
# print("\n")
# print(f"inner loop loss: {mean_losses[-1]}")
# print(f"meta loss: {meta_loss.item()}")
# # print(f"batch meta loss: {batch_meta_loss_values[-1]}")
# # print(f"meta loss mean: {meta_loss_mean}")
# print(f"reg param: {reg_param}")
# print(f"reg term: {reg_term}")
# print(f"entity embeddings: {entity_embeddings.weight}")
if meta_loss_mean < best_meta_loss:
best_meta_loss = meta_loss_mean
best_reg_param = reg_param.clone()
best_entity_embeddings = entity_embeddings.weight.detach().clone()
best_pred_embeddings = predicate_embeddings.weight.detach().clone()
mean_meta_losses += [meta_loss_mean]
if meta_loss_type == "||B-C||":
meta_loss = torch.norm(entity_embeddings(torch.tensor(data.entity_to_idx['B']))
- entity_embeddings(torch.tensor(data.entity_to_idx['C'])))
if reg_term < best_reg_term:
best_reg_term = reg_term
best_reg_term_params = reg_param.detach()
best_reg_term_entity_embeddings = entity_embeddings.weight.detach().detach()
best_reg_term_pred_embeddings = predicate_embeddings.weight.detach().detach()
best_reg_term_loss = mean_losses[-1]
if meta_loss < best_meta_loss:
best_meta_loss = meta_loss
best_reg_param = reg_param.clone()
best_entity_embeddings = entity_embeddings.weight.detach().clone()
best_pred_embeddings = predicate_embeddings.weight.detach().clone()
mean_meta_losses += [meta_loss]
# logger.info("Training finished")
print("\nTraining finished\n")
print(f"Best meta loss: {best_meta_loss}")
print(f"Corresponding reg param (p) value (based on meta-loss): {best_reg_param}")
print(f"Corresponding entity embeddings (based on meta-loss): {best_entity_embeddings}")
print(f"Corresponding predicate embeddings (based on meta-loss): {best_pred_embeddings}")
print(f"\nBest reg term value (||emb(B)-p||): {best_reg_term}")
print(f"Corresponding reg param (p) value (based on reg term value): {best_reg_term_params}")
print(f"Corresponding entity embeddings (based on reg term value): {best_reg_term_entity_embeddings}")
print(f"Corresponding predicate embeddings (based on meta-loss): {best_reg_term_pred_embeddings}")
if __name__ == '__main__':
# Specify experimental parameters
TRAIN_DIR = "../data/toy/train.tsv"
DEV_DIR = "../data/toy/dev.tsv"
TEST_DIR = None # "./data/toy/dev.tsv"
MODEL = "distmult"
EMBEDDING_SIZE = 1 # 1 or 2 will work
BATCH_SIZE = 2
EPOCHS = 25
LEARNING_RATE = 0.5
OPTIMIZER = "adagrad"
INPUT_TYPE = "standard"
META_LOSS_TYPE = "cross-entropy" # cross-entropy or ||B-C||
SEED = 6
QUIET = True
main()
|
the-stack_106_17227
|
"""
A Python Wrapper for WhenIWork.com
.. moduleauthor:: Alex Riviere <[email protected]>
"""
import requests
def raise_for_status_with_message(resp):
try:
resp.raise_for_status()
except requests.exceptions.HTTPError as error:
if resp.text:
raise requests.exceptions.HTTPError('{} \nError message: {}'.format(str(error), resp.text))
else:
raise error
class WhenIWork(object):
""".. py:class: WhenIWork([:param token:=None, :param options:=dict()])
:param token: The user WhenIWork API token
:param options: Allows you to set the `headers` and the `endpoint` from a dict.
Methods:
"""
# Library Version
__version = 0.1
# Private Variables
__api_token = None
__api_endpoint = "https://api.wheniwork.com/2"
__api_headers = {'user-agent': 'wheniwork-api-py/'+str(__version)}
__verify_ssl = False
__api_resp = None
def __init__(self, token=None, options=None):
"""
.. py:method:: init
Create a new instance.
:param token: The user WhenIWork API token
:param options: Allows you to set the `headers` and the `endpoint` from a dict.
"""
self.__api_token = token
if isinstance(options, dict):
if 'headers' in options:
self.__api_headers = options['headers']
if 'endpoint' in options:
self.__api_endpoint = options['endpoint']
@property
def token(self):
"""
Used to set or retrieve the user's api token::
from wheniwork import WhenIWork
a = WhenIWork()
a.token = "ilovemyboss"
print(a.token)
"""
return self.__api_token
@token.setter
def token(self, token):
"""
"""
self.__api_token = token
@property
def endpoint(self):
"""
Used to set or retrieve the api endpoint::
from wheniwork import WhenIWork
a = WhenIWork()
a.endpoint = "https://api.wheniwork.com/2"
print(a.endpoint)
"""
return self.__api_endpoint
@endpoint.setter
def endpoint(self, endpoint):
"""
:param endpoint:
:return:
"""
self.__api_endpoint = endpoint
@property
def headers(self):
"""
Used to set or retrieve the api endpoint::
from wheniwork import WhenIWork
a = WhenIWork()
a.headers = {W-Key:"iworksoharditsnotfunny"}
print(a.headers['W-Key'])
"""
return self.__api_headers
@headers.setter
def headers(self, headers):
"""
:param headers:
:return:
"""
self.__api_headers = headers
@property
def resp(self):
"""
Used to get the last API Response Data::
from wheniwork import WhenIWork
a = WhenIWork(token="iworksomuchitsnotfunny")
a.get("/locations")
print(a.resp)
Note: This is a read only variable.
"""
return self.__api_resp
def login(self, username, password, key):
"""
Sets the user API token, and returns a dictionary of user information.
:param username: The email for the user account.
:param password: The password for the user account.
:param key: the developer key given to you by WhenIWork.com
:return dict:
"""
url = self.endpoint+"/login"
params = {'username': username, 'password': password}
head = {'W-Key': key}
head.update(self.headers)
resp = requests.post(url, json=params, headers=head)
raise_for_status_with_message(resp)
self.__api_resp = resp.json()
data = self.resp
if 'login' in data and 'token' in data['login']:
self.token = data['login']['token']
return data
def get(self, method, params=None, headers=None):
"""
Send a get request to the WhenIWork api
:param method: The API method to call, e.g. '/users'
:param params: a dictionary of arguments to pass the method
:param headers: a dictionary of custom headers to be passed.
:return: a dictionary of the decoded json API response.
"""
if isinstance(method, str):
if self.token is not None:
url = self.endpoint+method
head = {'W-Token': self.token}
head.update(self.headers)
if headers:
head.update(headers)
resp = requests.get(url, params, headers=head)
raise_for_status_with_message(resp)
self.__api_resp = resp.json()
return self.resp
else:
return {'error': 'Token is not set!!'}
else:
return {'error': 'Method is not str!!'}
def post(self, method, params=None, headers=None):
"""
POST to the WhenIWork api
:param method: The API method to call, e.g. '/users'
:param params: a dictionary of arguments to pass the method
:param headers: a dictionary of custom headers to be passed.
:return: a dictionary of the decoded json API response.
"""
if isinstance(method, str):
if self.token is not None:
url = self.endpoint+method
head = {'W-Token': self.token}
head.update(self.headers)
if headers:
head.update(headers)
resp = requests.post(url, json=params, headers=head)
raise_for_status_with_message(resp)
self.__api_resp = resp.json()
return self.resp
else:
return {'error': 'Token is not set!!'}
else:
return {'error': 'Method is not str!!'}
def create(self, method, params=None, headers=None):
"""
Synonym of post
:param method:
:param params:
:param headers:
:return:
"""
return self.post(method, params=params, headers=headers)
def update(self, method, params=None, headers=None):
"""
Update an object on WhenIWork
:param method: The API method to call, e.g. '/users/1' MUST INCLUDE ID OF OBJECT.
:param params: a dictionary of arguments to pass the method
:param headers: a dictionary of custom headers to be passed.
:return: a dictionary of the decoded json API response.
"""
if isinstance(method, str):
if self.token is not None:
url = self.endpoint+method
head = {'W-Token': self.token}
head.update(self.headers)
if headers:
head.update(headers)
resp = requests.put(url, json=params, headers=head)
raise_for_status_with_message(resp)
self.__api_resp = resp.json()
return self.resp
else:
return {'error': 'Token is not set!!'}
else:
return {'error': 'Method is not str!!'}
def delete(self, method, headers=None):
"""
Delete an object on WhenIWork
:param method: The API method to call, e.g. '/users/1' MUST INCLUDE ID OF OBJECT.
:param headers: a dictionary of custom headers to be passed.
:return: a dictionary of the decoded json API response.
"""
if isinstance(method, str):
if self.token is not None:
url = self.endpoint + method
head = {'W-Token': self.token}
head.update(self.headers)
if headers:
head.update(headers)
resp = requests.delete(url, headers=head)
raise_for_status_with_message(resp)
self.__api_resp = resp.json()
return self.resp
else:
return {'error': 'Token is not set!!'}
else:
return {'error': 'Method is not str!!'}
|
the-stack_106_17230
|
# Copyright (C) 2009 by Eric Talevich ([email protected])
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Base classes for Bio.Phylo objects.
All object representations for phylogenetic trees should derive from these base
classes in order to use the common methods defined on them.
"""
__docformat__ = "restructuredtext en"
from Bio._py3k import basestring, filter, unicode, zip
import collections
import copy
import itertools
import random
import re
from Bio import _utils
# NB: On Python 2, repr() and str() are specified to return byte strings, not
# unicode. On Python 3, it's the opposite. Horrible.
import sys
if sys.version_info[0] < 3:
def as_string(s):
if isinstance(s, unicode):
return s.encode('utf-8')
return str(s)
else:
as_string = str
# General tree-traversal algorithms
def _level_traverse(root, get_children):
"""Traverse a tree in breadth-first (level) order."""
Q = collections.deque([root])
while Q:
v = Q.popleft()
yield v
Q.extend(get_children(v))
def _preorder_traverse(root, get_children):
"""Traverse a tree in depth-first pre-order (parent before children)."""
def dfs(elem):
yield elem
for v in get_children(elem):
for u in dfs(v):
yield u
for elem in dfs(root):
yield elem
def _postorder_traverse(root, get_children):
"""Traverse a tree in depth-first post-order (children before parent)."""
def dfs(elem):
for v in get_children(elem):
for u in dfs(v):
yield u
yield elem
for elem in dfs(root):
yield elem
def _sorted_attrs(elem):
"""Get a flat list of elem's attributes, sorted for consistency."""
singles = []
lists = []
# Sort attributes for consistent results
for attrname, child in sorted(elem.__dict__.items(),
key=lambda kv: kv[0]):
if child is None:
continue
if isinstance(child, list):
lists.extend(child)
else:
singles.append(child)
return (x for x in singles + lists
if isinstance(x, TreeElement))
# Factory functions to generalize searching for clades/nodes
def _identity_matcher(target):
"""Match a node to the target object by identity."""
def match(node):
return (node is target)
return match
def _class_matcher(target_cls):
"""Match a node if it's an instance of the given class."""
def match(node):
return isinstance(node, target_cls)
return match
def _string_matcher(target):
def match(node):
if isinstance(node, (Clade, Tree)):
# Avoid triggering specialized or recursive magic methods
return node.name == target
return as_string(node) == target
return match
def _attribute_matcher(kwargs):
"""Match a node by specified attribute values.
``terminal`` is a special case: True restricts the search to external (leaf)
nodes, False restricts to internal nodes, and None allows all tree elements
to be searched, including phyloXML annotations.
Otherwise, for a tree element to match the specification (i.e. for the
function produced by `_attribute_matcher` to return True when given a tree
element), it must have each of the attributes specified by the keys and
match each of the corresponding values -- think 'and', not 'or', for
multiple keys.
"""
def match(node):
if 'terminal' in kwargs:
# Special case: restrict to internal/external/any nodes
kwa_copy = kwargs.copy()
pattern = kwa_copy.pop('terminal')
if (pattern is not None and
(not hasattr(node, 'is_terminal') or
node.is_terminal() != pattern)):
return False
else:
kwa_copy = kwargs
for key, pattern in kwa_copy.items():
# Nodes must match all other specified attributes
if not hasattr(node, key):
return False
target = getattr(node, key)
if isinstance(pattern, basestring):
return (isinstance(target, basestring) and
re.match(pattern + '$', target))
if isinstance(pattern, bool):
return (pattern == bool(target))
if isinstance(pattern, int):
return (pattern == target)
if pattern is None:
return (target is None)
raise TypeError('invalid query type: %s' % type(pattern))
return True
return match
def _function_matcher(matcher_func):
"""Safer attribute lookup -- returns False instead of raising an error."""
def match(node):
try:
return matcher_func(node)
except (LookupError, AttributeError, ValueError, TypeError):
return False
return match
def _object_matcher(obj):
"""Retrieve a matcher function by passing an arbitrary object.
i.e. passing a `TreeElement` such as a `Clade` or `Tree` instance returns an
identity matcher, passing a type such as the `PhyloXML.Taxonomy` class
returns a class matcher, and passing a dictionary returns an attribute
matcher.
The resulting 'match' function returns True when given an object matching
the specification (identity, type or attribute values), otherwise False.
This is useful for writing functions that search the tree, and probably
shouldn't be used directly by the end user.
"""
if isinstance(obj, TreeElement):
return _identity_matcher(obj)
if isinstance(obj, type):
return _class_matcher(obj)
if isinstance(obj, basestring):
return _string_matcher(obj)
if isinstance(obj, dict):
return _attribute_matcher(obj)
if callable(obj):
return _function_matcher(obj)
raise ValueError("%s (type %s) is not a valid type for comparison."
% (obj, type(obj)))
def _combine_matchers(target, kwargs, require_spec):
"""Merge target specifications with keyword arguments.
Dispatch the components to the various matcher functions, then merge into a
single boolean function.
"""
if not target:
if not kwargs:
if require_spec:
raise ValueError("you must specify a target object or keyword "
"arguments.")
return lambda x: True
return _attribute_matcher(kwargs)
match_obj = _object_matcher(target)
if not kwargs:
return match_obj
match_kwargs = _attribute_matcher(kwargs)
return (lambda x: match_obj(x) and match_kwargs(x))
def _combine_args(first, *rest):
"""Convert ``[targets]`` or ``*targets`` arguments to a single iterable.
This helps other functions work like the built-in functions `max` and
`min`.
"""
# Background: is_monophyletic takes a single list or iterable (like the
# same method in Bio.Nexus.Trees); root_with_outgroup and common_ancestor
# take separate arguments. This mismatch was in the initial release and I
# didn't notice the inconsistency until after Biopython 1.55. I can think
# of cases where either style is more convenient, so let's support both
# (for backward compatibility and consistency between methods).
if hasattr(first, '__iter__') and not (isinstance(first, TreeElement) or
isinstance(first, type) or
isinstance(first, basestring) or
isinstance(first, dict)):
# `terminals` is an iterable of targets
if rest:
raise ValueError("Arguments must be either a single list of "
"targets, or separately specified targets "
"(e.g. foo(t1, t2, t3)), but not both.")
return first
# `terminals` is a single target -- wrap in a container
return itertools.chain([first], rest)
# Class definitions
class TreeElement(object):
"""Base class for all Bio.Phylo classes."""
def __repr__(self):
"""Show this object's constructor with its primitive arguments."""
def pair_as_kwarg_string(key, val):
if isinstance(val, basestring):
return ("%s='%s'"
% (key, _utils.trim_str(as_string(val), 60, '...')))
return "%s=%s" % (key, val)
return ('%s(%s)'
% (self.__class__.__name__,
', '.join(pair_as_kwarg_string(key, val)
for key, val in sorted(self.__dict__.items())
if val is not None and
type(val) in (str, int, float, bool, unicode))))
__str__ = __repr__
class TreeMixin(object):
"""Methods for Tree- and Clade-based classes.
This lets `Tree` and `Clade` support the same traversal and searching
operations without requiring Clade to inherit from Tree, so Clade isn't
required to have all of Tree's attributes -- just ``root`` (a Clade
instance) and ``is_terminal``.
"""
# Traversal methods
def _filter_search(self, filter_func, order, follow_attrs):
"""Perform a BFS or DFS traversal through all elements in this tree.
:returns: generator of all elements for which `filter_func` is True.
"""
order_opts = {'preorder': _preorder_traverse,
'postorder': _postorder_traverse,
'level': _level_traverse}
try:
order_func = order_opts[order]
except KeyError:
raise ValueError("Invalid order '%s'; must be one of: %s"
% (order, tuple(order_opts)))
if follow_attrs:
get_children = _sorted_attrs
root = self
else:
get_children = lambda elem: elem.clades
root = self.root
return filter(filter_func, order_func(root, get_children))
def find_any(self, *args, **kwargs):
"""Return the first element found by find_elements(), or None.
This is also useful for checking whether any matching element exists in
the tree, and can be used in a conditional expression.
"""
hits = self.find_elements(*args, **kwargs)
try:
return next(hits)
except StopIteration:
return None
def find_elements(self, target=None, terminal=None, order='preorder',
**kwargs):
"""Find all tree elements matching the given attributes.
The arbitrary keyword arguments indicate the attribute name of the
sub-element and the value to match: string, integer or boolean. Strings
are evaluated as regular expression matches; integers are compared
directly for equality, and booleans evaluate the attribute's truth value
(True or False) before comparing. To handle nonzero floats, search with
a boolean argument, then filter the result manually.
If no keyword arguments are given, then just the class type is used for
matching.
The result is an iterable through all matching objects, by depth-first
search. (Not necessarily the same order as the elements appear in the
source file!)
:Parameters:
target : TreeElement instance, type, dict, or callable
Specifies the characteristics to search for. (The default,
TreeElement, matches any standard Bio.Phylo type.)
terminal : bool
A boolean value to select for or against terminal nodes (a.k.a.
leaf nodes). True searches for only terminal nodes, False
excludes terminal nodes, and the default, None, searches both
terminal and non-terminal nodes, as well as any tree elements
lacking the ``is_terminal`` method.
order : {'preorder', 'postorder', 'level'}
Tree traversal order: 'preorder' (default) is depth-first
search, 'postorder' is DFS with child nodes preceding parents,
and 'level' is breadth-first search.
Example
-------
>>> from Bio.Phylo.IO import PhyloXMIO
>>> phx = PhyloXMLIO.read('phyloxml_examples.xml')
>>> matches = phx.phylogenies[5].find_elements(code='OCTVU')
>>> next(matches)
Taxonomy(code='OCTVU', scientific_name='Octopus vulgaris')
"""
if terminal is not None:
kwargs['terminal'] = terminal
is_matching_elem = _combine_matchers(target, kwargs, False)
return self._filter_search(is_matching_elem, order, True)
def find_clades(self, target=None, terminal=None, order='preorder',
**kwargs):
"""Find each clade containing a matching element.
That is, find each element as with find_elements(), but return the
corresponding clade object. (This is usually what you want.)
:returns: an iterable through all matching objects, searching
depth-first (preorder) by default.
"""
def match_attrs(elem):
orig_clades = elem.__dict__.pop('clades')
found = elem.find_any(target, **kwargs)
elem.clades = orig_clades
return (found is not None)
if terminal is None:
is_matching_elem = match_attrs
else:
def is_matching_elem(elem):
return ((elem.is_terminal() == terminal) and
match_attrs(elem))
return self._filter_search(is_matching_elem, order, False)
def get_path(self, target=None, **kwargs):
"""List the clades directly between this root and the given target.
:returns: list of all clade objects along this path, ending with the
given target, but excluding the root clade.
"""
# Only one path will work -- ignore weights and visits
path = []
match = _combine_matchers(target, kwargs, True)
def check_in_path(v):
if match(v):
path.append(v)
return True
elif v.is_terminal():
return False
for child in v:
if check_in_path(child):
path.append(v)
return True
return False
if not check_in_path(self.root):
return None
return path[-2::-1]
def get_nonterminals(self, order='preorder'):
"""Get a list of all of this tree's nonterminal (internal) nodes."""
return list(self.find_clades(terminal=False, order=order))
def get_terminals(self, order='preorder'):
"""Get a list of all of this tree's terminal (leaf) nodes."""
return list(self.find_clades(terminal=True, order=order))
def trace(self, start, finish):
"""List of all clade object between two targets in this tree.
Excluding `start`, including `finish`.
"""
mrca = self.common_ancestor(start, finish)
fromstart = mrca.get_path(start)[-2::-1]
to = mrca.get_path(finish)
return fromstart + [mrca] + to
# Information methods
def common_ancestor(self, targets, *more_targets):
"""Most recent common ancestor (clade) of all the given targets.
Edge cases:
- If no target is given, returns self.root
- If 1 target is given, returns the target
- If any target is not found in this tree, raises a ValueError
"""
paths = [self.get_path(t)
for t in _combine_args(targets, *more_targets)]
# Validation -- otherwise izip throws a spooky error below
for p, t in zip(paths, targets):
if p is None:
raise ValueError("target %s is not in this tree" % repr(t))
mrca = self.root
for level in zip(*paths):
ref = level[0]
for other in level[1:]:
if ref is not other:
break
else:
mrca = ref
if ref is not mrca:
break
return mrca
def count_terminals(self):
"""Counts the number of terminal (leaf) nodes within this tree."""
return _utils.iterlen(self.find_clades(terminal=True))
def depths(self, unit_branch_lengths=False):
"""Create a mapping of tree clades to depths (by branch length).
:Parameters:
unit_branch_lengths : bool
If True, count only the number of branches (levels in the tree).
By default the distance is the cumulative branch length leading
to the clade.
:returns: dict of {clade: depth}, where keys are all of the Clade
instances in the tree, and values are the distance from the root to
each clade (including terminals).
"""
if unit_branch_lengths:
depth_of = lambda c: 1
else:
depth_of = lambda c: c.branch_length or 0
depths = {}
def update_depths(node, curr_depth):
depths[node] = curr_depth
for child in node.clades:
new_depth = curr_depth + depth_of(child)
update_depths(child, new_depth)
update_depths(self.root, self.root.branch_length or 0)
return depths
def distance(self, target1, target2=None):
"""Calculate the sum of the branch lengths between two targets.
If only one target is specified, the other is the root of this tree.
"""
if target2 is None:
return sum(n.branch_length for n in self.get_path(target1)
if n.branch_length is not None)
mrca = self.common_ancestor(target1, target2)
return mrca.distance(target1) + mrca.distance(target2)
def is_bifurcating(self):
"""Return True if tree downstream of node is strictly bifurcating.
I.e., all nodes have either 2 or 0 children (internal or external,
respectively). The root may have 3 descendents and still be considered
part of a bifurcating tree, because it has no ancestor.
"""
# Root can be trifurcating
if isinstance(self, Tree) and len(self.root) == 3:
return (self.root.clades[0].is_bifurcating() and
self.root.clades[1].is_bifurcating() and
self.root.clades[2].is_bifurcating())
if len(self.root) == 2:
return (self.root.clades[0].is_bifurcating() and
self.root.clades[1].is_bifurcating())
if len(self.root) == 0:
return True
return False
def is_monophyletic(self, terminals, *more_terminals):
"""MRCA of terminals if they comprise a complete subclade, or False.
I.e., there exists a clade such that its terminals are the same set as
the given targets.
The given targets must be terminals of the tree.
To match both `Bio.Nexus.Trees` and the other multi-target methods in
Bio.Phylo, arguments to this method can be specified either of two ways:
(i) as a single list of targets, or (ii) separately specified targets,
e.g. is_monophyletic(t1, t2, t3) -- but not both.
For convenience, this method returns the common ancestor (MCRA) of the
targets if they are monophyletic (instead of the value True), and False
otherwise.
:returns: common ancestor if terminals are monophyletic, otherwise False.
"""
target_set = set(_combine_args(terminals, *more_terminals))
current = self.root
while True:
if set(current.get_terminals()) == target_set:
return current
# Try a narrower subclade
for subclade in current.clades:
if set(subclade.get_terminals()).issuperset(target_set):
current = subclade
break
else:
return False
def is_parent_of(self, target=None, **kwargs):
"""True if target is a descendent of this tree.
Not required to be a direct descendent.
To check only direct descendents of a clade, simply use list membership
testing: ``if subclade in clade: ...``
"""
return self.get_path(target, **kwargs) is not None
def is_preterminal(self):
"""True if all direct descendents are terminal."""
if self.root.is_terminal():
return False
for clade in self.root.clades:
if not clade.is_terminal():
return False
return True
def total_branch_length(self):
"""Calculate the sum of all the branch lengths in this tree."""
return sum(node.branch_length
for node in self.find_clades(branch_length=True))
# Tree manipulation methods
def collapse(self, target=None, **kwargs):
"""Deletes target from the tree, relinking its children to its parent.
:returns: the parent clade.
"""
path = self.get_path(target, **kwargs)
if not path:
raise ValueError("couldn't collapse %s in this tree"
% (target or kwargs))
if len(path) == 1:
parent = self.root
else:
parent = path[-2]
popped = parent.clades.pop(parent.clades.index(path[-1]))
extra_length = popped.branch_length or 0
for child in popped:
child.branch_length += extra_length
parent.clades.extend(popped.clades)
return parent
def collapse_all(self, target=None, **kwargs):
"""Collapse all the descendents of this tree, leaving only terminals.
Total branch lengths are preserved, i.e. the distance to each terminal
stays the same.
For example, this will safely collapse nodes with poor bootstrap
support:
>>> tree.collapse_all(lambda c: c.confidence is not None and
... c.confidence < 70)
This implementation avoids strange side-effects by using level-order
traversal and testing all clade properties (versus the target
specification) up front. In particular, if a clade meets the target
specification in the original tree, it will be collapsed. For example,
if the condition is:
>>> tree.collapse_all(lambda c: c.branch_length < 0.1)
Collapsing a clade's parent node adds the parent's branch length to the
child, so during the execution of collapse_all, a clade's branch_length
may increase. In this implementation, clades are collapsed according to
their properties in the original tree, not the properties when tree
traversal reaches the clade. (It's easier to debug.) If you want the
other behavior (incremental testing), modifying the source code of this
function is straightforward.
"""
# Read the iterable into a list to protect against in-place changes
matches = list(self.find_clades(target, False, 'level', **kwargs))
if not matches:
# No matching nodes to collapse
return
# Skip the root node -- it can't be collapsed
if matches[0] == self.root:
matches.pop(0)
for clade in matches:
self.collapse(clade)
def ladderize(self, reverse=False):
"""Sort clades in-place according to the number of terminal nodes.
Deepest clades are last by default. Use ``reverse=True`` to sort clades
deepest-to-shallowest.
"""
self.root.clades.sort(key=lambda c: c.count_terminals(),
reverse=reverse)
for subclade in self.root.clades:
subclade.ladderize(reverse=reverse)
def prune(self, target=None, **kwargs):
"""Prunes a terminal clade from the tree.
If taxon is from a bifurcation, the connecting node will be collapsed
and its branch length added to remaining terminal node. This might be no
longer be a meaningful value.
:returns: parent clade of the pruned target
"""
if 'terminal' in kwargs and kwargs['terminal']:
raise ValueError("target must be terminal")
path = self.get_path(target, terminal=True, **kwargs)
if not path:
raise ValueError("can't find a matching target below this root")
if len(path) == 1:
parent = self.root
else:
parent = path[-2]
parent.clades.remove(path[-1])
if len(parent) == 1:
# We deleted a branch from a bifurcation
if parent == self.root:
# If we're at the root, move the root upwards
# NB: This loses the length of the original branch
newroot = parent.clades[0]
newroot.branch_length = None
parent = self.root = newroot
else:
# If we're not at the root, collapse this parent
child = parent.clades[0]
if child.branch_length is not None:
child.branch_length += (parent.branch_length or 0.0)
if len(path) < 3:
grandparent = self.root
else:
grandparent = path[-3]
# Replace parent with child at the same place in grandparent
index = grandparent.clades.index(parent)
grandparent.clades.pop(index)
grandparent.clades.insert(index, child)
parent = grandparent
return parent
def split(self, n=2, branch_length=1.0):
"""Generate n (default 2) new descendants.
In a species tree, this is a speciation event.
New clades have the given branch_length and the same name as this
clade's root plus an integer suffix (counting from 0). For example,
splitting a clade named "A" produces sub-clades named "A0" and "A1".
If the clade has no name, the prefix "n" is used for child nodes, e.g.
"n0" and "n1".
"""
clade_cls = type(self.root)
base_name = self.root.name or 'n'
for i in range(n):
clade = clade_cls(name=base_name + str(i),
branch_length=branch_length)
self.root.clades.append(clade)
class Tree(TreeElement, TreeMixin):
"""A phylogenetic tree, containing global info for the phylogeny.
The structure and node-specific data is accessible through the 'root'
clade attached to the Tree instance.
:Parameters:
root : Clade
The starting node of the tree. If the tree is rooted, this will
usually be the root node.
rooted : bool
Whether or not the tree is rooted. By default, a tree is assumed to
be rooted.
id : str
The identifier of the tree, if there is one.
name : str
The name of the tree, in essence a label.
"""
def __init__(self, root=None, rooted=True, id=None, name=None):
self.root = root or Clade()
self.rooted = rooted
self.id = id
self.name = name
@classmethod
def from_clade(cls, clade, **kwargs):
"""Create a new Tree object given a clade.
Keyword arguments are the usual `Tree` constructor parameters.
"""
root = copy.deepcopy(clade)
return cls(root, **kwargs)
@classmethod
def randomized(cls, taxa, branch_length=1.0, branch_stdev=None):
"""Create a randomized bifurcating tree given a list of taxa.
:param taxa: Either an integer specifying the number of taxa to create
(automatically named taxon#), or an iterable of taxon names, as
strings.
:returns: a tree of the same type as this class.
"""
if isinstance(taxa, int):
taxa = ['taxon%s' % (i + 1) for i in range(taxa)]
elif hasattr(taxa, '__iter__'):
taxa = list(taxa)
else:
raise TypeError("taxa argument must be integer (# taxa) or "
"iterable of taxon names.")
rtree = cls()
terminals = [rtree.root]
while len(terminals) < len(taxa):
newsplit = random.choice(terminals)
newsplit.split(branch_length=branch_length)
newterms = newsplit.clades
if branch_stdev:
# Add some noise to the branch lengths
for nt in newterms:
nt.branch_length = max(0,
random.gauss(branch_length, branch_stdev))
terminals.remove(newsplit)
terminals.extend(newterms)
# Distribute taxon labels randomly
random.shuffle(taxa)
for node, name in zip(terminals, taxa):
node.name = name
return rtree
@property
def clade(self):
"""The first clade in this tree (not itself)."""
return self.root
def as_phyloxml(self, **kwargs):
"""Convert this tree to a PhyloXML-compatible Phylogeny.
This lets you use the additional annotation types PhyloXML defines, and
save this information when you write this tree as 'phyloxml'.
"""
from Bio.Phylo.PhyloXML import Phylogeny
return Phylogeny.from_tree(self, **kwargs)
# XXX Py3 Compatibility: In Python 3.0+, **kwargs can be replaced with the
# named keyword argument outgroup_branch_length=None
def root_with_outgroup(self, outgroup_targets, *more_targets, **kwargs):
"""Reroot this tree with the outgroup clade containing outgroup_targets.
Operates in-place.
Edge cases:
- If ``outgroup == self.root``, no change
- If outgroup is terminal, create new bifurcating root node with a
0-length branch to the outgroup
- If outgroup is internal, use the given outgroup node as the new
trifurcating root, keeping branches the same
- If the original root was bifurcating, drop it from the tree,
preserving total branch lengths
:param outgroup_branch_length: length of the branch leading to the
outgroup after rerooting. If not specified (None), then:
- If the outgroup is an internal node (not a single terminal taxon),
then use that node as the new root.
- Otherwise, create a new root node as the parent of the outgroup.
"""
# This raises a ValueError if any target is not in this tree
# Otherwise, common_ancestor guarantees outgroup is in this tree
outgroup = self.common_ancestor(outgroup_targets, *more_targets)
outgroup_path = self.get_path(outgroup)
if len(outgroup_path) == 0:
# Outgroup is the current root -- no change
return
prev_blen = outgroup.branch_length or 0.0
# Hideous kludge because Py2.x doesn't allow keyword args after *args
outgroup_branch_length = kwargs.get('outgroup_branch_length')
if outgroup_branch_length is not None:
assert 0 <= outgroup_branch_length <= prev_blen, \
"outgroup_branch_length must be between 0 and the " \
"original length of the branch leading to the outgroup."
if outgroup.is_terminal() or outgroup_branch_length is not None:
# Create a new root with a 0-length branch to the outgroup
outgroup.branch_length = outgroup_branch_length or 0.0
new_root = self.root.__class__(
branch_length=self.root.branch_length, clades=[outgroup])
# The first branch reversal (see the upcoming loop) is modified
if len(outgroup_path) == 1:
# No nodes between the original root and outgroup to rearrange.
# Most of the code below will be skipped, but we still need
# 'new_parent' pointing at the new root.
new_parent = new_root
else:
parent = outgroup_path.pop(-2)
# First iteration of reversing the path to the outgroup
parent.clades.pop(parent.clades.index(outgroup))
(prev_blen, parent.branch_length) = (parent.branch_length,
prev_blen - outgroup.branch_length)
new_root.clades.insert(0, parent)
new_parent = parent
else:
# Use the given outgroup node as the new (trifurcating) root
new_root = outgroup
new_root.branch_length = self.root.branch_length
new_parent = new_root
# Tracing the outgroup lineage backwards, reattach the subclades under a
# new root clade. Reverse the branches directly above the outgroup in
# the tree, but keep the descendants of those clades as they are.
for parent in outgroup_path[-2::-1]:
parent.clades.pop(parent.clades.index(new_parent))
prev_blen, parent.branch_length = parent.branch_length, prev_blen
new_parent.clades.insert(0, parent)
new_parent = parent
# Finally, handle the original root according to number of descendents
old_root = self.root
if outgroup in old_root.clades:
assert len(outgroup_path) == 1
old_root.clades.pop(old_root.clades.index(outgroup))
else:
old_root.clades.pop(old_root.clades.index(new_parent))
if len(old_root) == 1:
# Delete the old bifurcating root & add branch lengths
ingroup = old_root.clades[0]
if ingroup.branch_length:
ingroup.branch_length += prev_blen
else:
ingroup.branch_length = prev_blen
new_parent.clades.insert(0, ingroup)
# ENH: If annotations are attached to old_root, do... something.
else:
# Keep the old trifurcating/polytomous root as an internal node
old_root.branch_length = prev_blen
new_parent.clades.insert(0, old_root)
self.root = new_root
self.rooted = True
return
def root_at_midpoint(self):
"""Root the tree at the midpoint of the two most distant taxa.
This operates in-place, leaving a bifurcating root. The topology of the
tree is otherwise retained, though no guarantees are made about the
stability of clade/node/taxon ordering.
"""
# Identify the largest pairwise distance
max_distance = 0.0
tips = self.get_terminals()
for tip in tips:
self.root_with_outgroup(tip)
new_max = max(self.depths().items(), key=lambda nd: nd[1])
if new_max[1] > max_distance:
tip1 = tip
tip2 = new_max[0]
max_distance = new_max[1]
self.root_with_outgroup(tip1)
# Depth to go from the ingroup tip toward the outgroup tip
root_remainder = 0.5 * (max_distance - (self.root.branch_length or 0))
assert root_remainder >= 0
# Identify the midpoint and reroot there.
# Trace the path to the outgroup tip until all of the root depth has
# been traveled/accounted for.
for node in self.get_path(tip2):
root_remainder -= node.branch_length
if root_remainder < 0:
outgroup_node = node
outgroup_branch_length = -root_remainder
break
else:
raise ValueError("Somehow, failed to find the midpoint!")
self.root_with_outgroup(outgroup_node,
outgroup_branch_length=outgroup_branch_length)
# Method assumed by TreeMixin
def is_terminal(self):
"""True if the root of this tree is terminal."""
return (not self.root.clades)
# Convention from SeqRecord and Alignment classes
def __format__(self, format_spec):
"""Serialize the tree as a string in the specified file format.
This method supports the ``format`` built-in function added in Python
2.6/3.0.
:param format_spec: a lower-case string supported by `Bio.Phylo.write`
as an output file format.
"""
if format_spec:
from Bio._py3k import StringIO
from Bio.Phylo import _io
handle = StringIO()
_io.write([self], handle, format_spec)
return handle.getvalue()
else:
# Follow python convention and default to using __str__
return str(self)
def format(self, format):
"""Serialize the tree as a string in the specified file format.
This duplicates the __format__ magic method for pre-2.6 Pythons.
"""
return self.__format__(format)
# Pretty-printer for the entire tree hierarchy
def __str__(self):
"""String representation of the entire tree.
Serializes each sub-clade recursively using ``repr`` to create a summary
of the object structure.
"""
TAB = ' '
textlines = []
def print_tree(obj, indent):
"""Recursively serialize sub-elements.
This closes over textlines and modifies it in-place.
"""
if isinstance(obj, (Tree, Clade)):
# Avoid infinite recursion or special formatting from str()
objstr = repr(obj)
else:
objstr = as_string(obj)
textlines.append(TAB * indent + objstr)
indent += 1
for attr in obj.__dict__:
child = getattr(obj, attr)
if isinstance(child, TreeElement):
print_tree(child, indent)
elif isinstance(child, list):
for elem in child:
if isinstance(elem, TreeElement):
print_tree(elem, indent)
print_tree(self, 0)
return '\n'.join(textlines)
class Clade(TreeElement, TreeMixin):
"""A recursively defined sub-tree.
:Parameters:
branch_length : str
The length of the branch leading to the root node of this clade.
name : str
The clade's name (a label).
clades : list
Sub-trees rooted directly under this tree's root.
confidence : number
Support.
color : BranchColor
The display color of the branch and descendents.
width : number
The display width of the branch and descendents.
"""
def __init__(self, branch_length=None, name=None, clades=None,
confidence=None, color=None, width=None):
self.branch_length = branch_length
self.name = name
self.clades = clades or []
self.confidence = confidence
self.color = color
self.width = width
@property
def root(self):
"""Allow TreeMixin methods to traverse clades properly."""
return self
def is_terminal(self):
"""True if this is a terminal (leaf) node."""
return (not self.clades)
# Sequence-type behavior methods
def __getitem__(self, index):
"""Get clades by index (integer or slice)."""
if isinstance(index, int) or isinstance(index, slice):
return self.clades[index]
ref = self
for idx in index:
ref = ref[idx]
return ref
def __iter__(self):
"""Iterate through this tree's direct descendent clades (sub-trees)."""
return iter(self.clades)
def __len__(self):
"""Number of clades directy under the root."""
return len(self.clades)
# Python 3:
def __bool__(self):
"""Boolean value of an instance of this class (True).
NB: If this method is not defined, but ``__len__`` is, then the object
is considered true if the result of ``__len__()`` is nonzero. We want
Clade instances to always be considered True.
"""
return True
# Python 2:
__nonzero__ = __bool__
def __str__(self):
if self.name:
return _utils.trim_str(self.name, 40, '...')
return self.__class__.__name__
# Syntax sugar for setting the branch color
def _get_color(self):
return self._color
def _set_color(self, arg):
if arg is None or isinstance(arg, BranchColor):
self._color = arg
elif isinstance(arg, basestring):
if arg in BranchColor.color_names:
# Known color name
self._color = BranchColor.from_name(arg)
elif arg.startswith('#') and len(arg) == 7:
# HTML-style hex string
self._color = BranchColor.from_hex(arg)
else:
raise ValueError("invalid color string %s" % arg)
elif hasattr(arg, '__iter__') and len(arg) == 3:
# RGB triplet
self._color = BranchColor(*arg)
else:
raise ValueError("invalid color value %s" % arg)
color = property(_get_color, _set_color, doc="Branch color.")
class BranchColor(object):
"""Indicates the color of a clade when rendered graphically.
The color should be interpreted by client code (e.g. visualization
programs) as applying to the whole clade, unless overwritten by the
color(s) of sub-clades.
Color values must be integers from 0 to 255.
"""
color_names = {
'red': (255, 0, 0),
'r': (255, 0, 0),
'yellow': (255, 255, 0),
'y': (255, 255, 0),
'green': ( 0, 128, 0),
'g': ( 0, 128, 0),
'cyan': ( 0, 255, 255),
'c': ( 0, 255, 255),
'blue': ( 0, 0, 255),
'b': ( 0, 0, 255),
'magenta': (255, 0, 255),
'm': (255, 0, 255),
'black': ( 0, 0, 0),
'k': ( 0, 0, 0),
'white': (255, 255, 255),
'w': (255, 255, 255),
# Names standardized in HTML/CSS spec
# http://w3schools.com/html/html_colornames.asp
'maroon': (128, 0, 0),
'olive': (128, 128, 0),
'lime': ( 0, 255, 0),
'aqua': ( 0, 255, 255),
'teal': ( 0, 128, 128),
'navy': ( 0, 0, 128),
'fuchsia': (255, 0, 255),
'purple': (128, 0, 128),
'silver': (192, 192, 192),
'gray': (128, 128, 128),
# More definitions from matplotlib/gcolor2
'grey': (128, 128, 128),
'pink': (255, 192, 203),
'salmon': (250, 128, 114),
'orange': (255, 165, 0),
'gold': (255, 215, 0),
'tan': (210, 180, 140),
'brown': (165, 42, 42),
}
def __init__(self, red, green, blue):
for color in (red, green, blue):
assert (isinstance(color, int) and
0 <= color <= 255
), "Color values must be integers between 0 and 255."
self.red = red
self.green = green
self.blue = blue
@classmethod
def from_hex(cls, hexstr):
"""Construct a BranchColor object from a hexadecimal string.
The string format is the same style used in HTML and CSS, such as
'#FF8000' for an RGB value of (255, 128, 0).
"""
assert (isinstance(hexstr, basestring) and
hexstr.startswith('#') and
len(hexstr) == 7
), "need a 24-bit hexadecimal string, e.g. #000000"
RGB = hexstr[1:3], hexstr[3:5], hexstr[5:]
return cls(*[int('0x' + cc, base=16) for cc in RGB])
@classmethod
def from_name(cls, colorname):
"""Construct a BranchColor object by the color's name."""
return cls(*cls.color_names[colorname])
def to_hex(self):
"""Return a 24-bit hexadecimal RGB representation of this color.
The returned string is suitable for use in HTML/CSS, as a color
parameter in matplotlib, and perhaps other situations.
Example:
>>> bc = BranchColor(12, 200, 100)
>>> bc.to_hex()
'#0cc864'
"""
return "#%02x%02x%02x" % (self.red, self.green, self.blue)
def to_rgb(self):
"""Return a tuple of RGB values (0 to 255) representing this color.
Example:
>>> bc = BranchColor(255, 165, 0)
>>> bc.to_rgb()
(255, 165, 0)
"""
return (self.red, self.green, self.blue)
def __repr__(self):
"""Preserve the standard RGB order when representing this object."""
return ('%s(red=%d, green=%d, blue=%d)'
% (self.__class__.__name__, self.red, self.green, self.blue))
def __str__(self):
"""Show the color's RGB values."""
return "(%d, %d, %d)" % (self.red, self.green, self.blue)
|
the-stack_106_17232
|
import os, inspect
import tensorflow as tf
import numpy as np
PACK_PATH = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))+"/.."
def training(sess, saver, neuralnet, dataset, epochs, batch_size, normalize=True):
print("\nTraining to %d epochs (%d of minibatch size)" %(epochs, batch_size))
summary_writer = tf.compat.v1.summary.FileWriter(PACK_PATH+'/Checkpoint', sess.graph)
iteration = 0
run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
test_sq = 20
test_size = test_sq**2
for epoch in range(epochs):
while(True):
x_tr, y_tr, terminator = dataset.next_train(batch_size) # y_tr does not used in this prj.
neuralnet.init_coeff_tr()
_, summaries = sess.run([neuralnet.optimizer, neuralnet.summaries], \
feed_dict={neuralnet.x:x_tr, neuralnet.y:y_tr, neuralnet.batch_size:x_tr.shape[0]}, \
options=run_options, run_metadata=run_metadata)
neuralnet.init_coeff_te()
loss, accuracy, correct_pred = sess.run([neuralnet.loss, neuralnet.accuracy, neuralnet.correct_pred], \
feed_dict={neuralnet.x:x_tr, neuralnet.y:y_tr, neuralnet.batch_size:x_tr.shape[0]})
summary_writer.add_summary(summaries, iteration)
iteration += 1
if(terminator): break
print("Epoch [%d / %d] (%d iteration) Loss:%.5f, Acc:%.5f" \
%(epoch, epochs, iteration, loss, accuracy))
saver.save(sess, PACK_PATH+"/Checkpoint/model_checker")
summary_writer.add_run_metadata(run_metadata, 'epoch-%d' % epoch)
def test(sess, saver, neuralnet, dataset, batch_size):
if(os.path.exists(PACK_PATH+"/Checkpoint/model_checker.index")):
print("\nRestoring parameters")
saver.restore(sess, PACK_PATH+"/Checkpoint/model_checker")
print("\nTest...")
confusion_matrix = np.zeros((dataset.num_class, dataset.num_class), np.int32)
while(True):
x_te, y_te, terminator = dataset.next_test(1) # y_te does not used in this prj.
class_score = sess.run(neuralnet.score, \
feed_dict={neuralnet.x:x_te, neuralnet.batch_size:x_te.shape[0]})
label, logit = np.argmax(y_te[0]), np.argmax(class_score)
confusion_matrix[label, logit] += 1
if(terminator): break
print("\nConfusion Matrix")
print(confusion_matrix)
tot_precision, tot_recall, tot_f1score = 0, 0, 0
diagonal = 0
for idx_c in range(dataset.num_class):
precision = confusion_matrix[idx_c, idx_c] / np.sum(confusion_matrix[:, idx_c])
recall = confusion_matrix[idx_c, idx_c] / np.sum(confusion_matrix[idx_c, :])
f1socre = 2 * (precision * recall / (precision + recall))
tot_precision += precision
tot_recall += recall
tot_f1score += f1socre
diagonal += confusion_matrix[idx_c, idx_c]
print("Class-%d | Precision: %.5f, Recall: %.5f, F1-Score: %.5f" \
%(idx_c, precision, recall, f1socre))
accuracy = diagonal / np.sum(confusion_matrix)
print("\nTotal | Accuracy: %.5f, Precision: %.5f, Recall: %.5f, F1-Score: %.5f" \
%(accuracy, tot_precision/dataset.num_class, tot_recall/dataset.num_class, tot_f1score/dataset.num_class))
|
the-stack_106_17238
|
from __future__ import absolute_import, print_function
from .ast_tools import token, symbol, ast_to_string, match, atom_list
def slice_ast_to_dict(ast_seq):
sl_vars = {}
if isinstance(ast_seq, (list, tuple)):
for pattern in slice_patterns:
found,data = match(pattern,ast_seq)
if found:
sl_vars = {'begin':'_beg',
'end':'_end',
'step':'_stp',
'single_index':'_index'}
for key in data.keys():
data[key] = ast_to_string(data[key])
sl_vars.update(data)
break;
return sl_vars
def build_slice_atom(slice_vars, position):
# Note: This produces slices that are incorrect for Python
# evaluation because of slicing being exclusive in Python
# and inclusive for blitz on the top end of the range.
# This difference should really be handle in a blitz specific transform,
# but I've put it here for convenience. This doesn't cause any
# problems in code, its just a maintance hassle (I'll forget I did it here)
# and inelegant. *FIX ME*.
###########################################################################
# Handling negative indices.
#
# Range indices that begin with a negative sign, '-', are assumed to be
# negative. Blitz++ interprets negative indices differently than
# Python. To correct this, we subtract negative indices from the length
# of the array (at run-time). If indices do not start with a negative
# sign, they are assumed to be positive.
#
# This scheme doesn't work in the general case. For example, if you
# are calculating negative indices from a math expression that doesn't
# start with the negative sign, then it will be assumed positive and
# hence generate wrong results (and maybe a seg-fault).
#
# I think this case can might be remedied by calculating all ranges on
# the fly, and then subtracting them from the length of the array in
# that dimension if they are negative. This is major code bloat in the
# funcitons and more work. Save till later...
###########################################################################
# I don't think the strip is necessary, but it insures
# that '-' is the first sign for negative indices.
if slice_vars['single_index'] != '_index':
expr = '%(single_index)s' % slice_vars
else:
begin = slice_vars['begin'].strip()
if begin[0] == '-':
slice_vars['begin'] = 'N' + slice_vars['var']+repr(position)+begin;
end = slice_vars['end'].strip()
if end != '_end' and end[0] != '-':
#compensate for blitz using inclusive indexing on top end
#of slice for positive indices.
slice_vars['end'] = end + '-1'
if end[0] == '-':
slice_vars['end'] = 'N%s[%d]%s-1' % (slice_vars['var'],position,end)
if slice_vars['step'] == '_stp':
# this if/then isn't strictly necessary, it'll
# just keep the output code a little cleaner
expr = 'slice(%(begin)s,%(end)s)' % slice_vars
else:
expr = 'slice(%(begin)s,%(end)s,%(step)s)' % slice_vars
val = atom_list(expr)
return val
def transform_subscript_list(subscript_dict):
# this is gonna edit the ast_list...
subscript_list = subscript_dict['subscript_list']
var = subscript_dict['var']
#skip the first entry (the subscript_list symbol)
slice_position = -1
for i in range(1,len(subscript_list)):
#skip commas...
if subscript_list[i][0] != token.COMMA:
slice_position += 1
slice_vars = slice_ast_to_dict(subscript_list[i])
slice_vars['var'] = var
# create a slice(b,e,s) atom and insert in
# place of the x:y:z atom in the tree.
subscript_list[i] = build_slice_atom(slice_vars, slice_position)
def harvest_subscript_dicts(ast_list):
""" Needs Tests!
"""
subscript_lists = []
if isinstance(ast_list, list):
found,data = match(indexed_array_pattern,ast_list)
# data is a dict with 'var' = variable name
# and 'subscript_list' = to the ast_seq for the subscript list
if found:
subscript_lists.append(data)
for item in ast_list:
if isinstance(item, list):
subscript_lists.extend(harvest_subscript_dicts(item))
return subscript_lists
def transform_slices(ast_list):
""" Walk through an ast_list converting all x:y:z subscripts
to slice(x,y,z) subscripts.
"""
all_dicts = harvest_subscript_dicts(ast_list)
for subscript_dict in all_dicts:
transform_subscript_list(subscript_dict)
slice_patterns = []
CLN = (token.COLON,':')
CLN2= (symbol.sliceop, (token.COLON, ':'))
CLN2_STEP = (symbol.sliceop, (token.COLON, ':'),['step'])
# [begin:end:step]
slice_patterns.append((symbol.subscript, ['begin'],CLN,['end'], CLN2_STEP ))
# [:end:step]
slice_patterns.append((symbol.subscript, CLN,['end'], CLN2_STEP ))
# [begin::step]
slice_patterns.append((symbol.subscript, ['begin'],CLN, CLN2_STEP ))
# [begin:end:]
slice_patterns.append((symbol.subscript, ['begin'],CLN,['end'], CLN2 ))
# [begin::]
slice_patterns.append((symbol.subscript, ['begin'],CLN, CLN2 ))
# [:end:]
slice_patterns.append((symbol.subscript, CLN,['end'], CLN2, ))
# [::step]
slice_patterns.append((symbol.subscript, CLN, CLN2_STEP ))
# [::]
slice_patterns.append((symbol.subscript, CLN, CLN2 ))
# begin:end variants
slice_patterns.append((symbol.subscript, ['begin'],CLN,['end']))
slice_patterns.append((symbol.subscript, CLN,['end']))
slice_patterns.append((symbol.subscript, ['begin'],CLN))
slice_patterns.append((symbol.subscript, CLN))
# a[0] variant -- can't believe I left this out...
slice_patterns.append((symbol.subscript,['single_index']))
indexed_array_pattern = \
(symbol.power,
(symbol.atom,(token.NAME, ['var'])),
(symbol.trailer,
(token.LSQB, '['),
['subscript_list'],
(token.RSQB, ']')
)
)
|
the-stack_106_17239
|
# -*- coding: utf-8 -*-
from os import path as os_path
import numpy as np
import logging
from ctypes import c_double
from multiprocessing.sharedctypes import RawArray
from .spectrum import hash_numpy_array, Spectrum
logger = logging.getLogger(__name__)
class SpectrumArray(object):
"""
An object representing an array of spectra.
:ivar np.ndarray wavelengths:
A 1D array listing the wavelengths at which this array of spectra are sampled.
:ivar np.ndarray values:
A 2D array listing the value measurements for each spectrum in this SpectrumArray.
:ivar np.ndarray value_errors:
A 2D array listing the standard errors in the value measurements for each spectrum in this SpectrumArray.
:ivar str raster_hash:
A string hash of the wavelength raster, used to quickly check whether spectra are sampled on a common raster.
:ivar list[dict] metadata_list:
A list of dictionaries of metadata about each of the spectra in this SpectrumArray
:ivar bool shared_memory:
Boolean flag indicating whether this SpectrumArray uses multiprocessing shared memory.
"""
def __init__(self, wavelengths, values, value_errors, metadata_list, shared_memory=False):
"""
Instantiate new SpectrumArray object.
:param wavelengths:
A 1D array listing the wavelengths at which this array of spectra are sampled.
:param values:
A 2D array listing the value measurements for each spectrum in this SpectrumArray.
:param value_errors:
A 2D array listing the standard errors in the value measurements for each spectrum in this SpectrumArray.
:param metadata_list:
A list of dictionaries of metadata about each of the spectra in this SpectrumArray
:param shared_memory:
Boolean flag indicating whether the data in this SpectrumArray is supplied in multiprocessing shared memory.
:type shared_memory:
bool
"""
# Sanity check inputs
assert wavelengths.shape[0] == values.shape[1], "Inconsistent number of wavelength samples."
assert wavelengths.shape[0] == value_errors.shape[1], "Inconsistent number of wavelength samples."
assert values.shape[0] == value_errors.shape[0], "Inconsistent number of spectra in SpectrumArray."
assert len(metadata_list) == values.shape[0], "Inconsistent number of spectra in SpectrumArray."
# Store inputs as instance variables
self.wavelengths = wavelengths
self.values = values
self.value_errors = value_errors
self.metadata_list = metadata_list
self.shared_memory = shared_memory
self._update_raster_hash()
def __len__(self):
"""
Return the number of spectra in this SpectrumArray.
:return:
Integer number of spectra in this SpectrumArray.
"""
return self.values.shape[0]
@staticmethod
def _allocate_memory(wavelengths, item_count, shared_memory):
# Allocate numpy array to store this SpectrumArray into
if not shared_memory:
# If we're not using shared memory (which the multiprocessing module can share between threads),
# we allocate a simple numpy array
values = np.empty([item_count, len(wavelengths)])
value_errors = np.empty([item_count, len(wavelengths)])
else:
# If we need to shared this array between threads (read only!), then we allocate the memory as a
# multiprocessing RawArray
wavelengths_shared_base = RawArray(c_double, wavelengths.size)
wavelengths_shared = np.frombuffer(wavelengths_shared_base)
wavelengths_shared[:] = wavelengths[:]
wavelengths = wavelengths_shared
values_shared_base = RawArray(c_double, wavelengths.size * item_count)
values = np.frombuffer(values_shared_base)
values = values.reshape([item_count, len(wavelengths)])
value_errors_shared_base = RawArray(c_double, wavelengths.size * item_count)
value_errors = np.frombuffer(value_errors_shared_base)
value_errors = value_errors.reshape([item_count, len(wavelengths)])
return wavelengths, values, value_errors
@classmethod
def from_spectra(cls, spectra, shared_memory=False):
"""
Instantiate new SpectrumArray object, using data in a list of existing Spectrum objects.
:param spectra:
List of Spectrum objects.
:param shared_memory:
Boolean flag indicating whether this SpectrumArray should use multiprocessing shared memory.
:type shared_memory:
bool
:return:
SpectrumArray object
"""
assert isinstance(spectra, (list, tuple)), "Argument <spectra> must be a list or tuple of spectra."
assert len(spectra) > 0, "Cannot open a SpectrumArray with no members: there is no wavelength raster"
for spectrum in spectra:
assert isinstance(spectrum, Spectrum), "Argument <spectra> must be a list or tuple of spectra. " \
"Got object of type <{}>".format(type(spectrum))
# Inspect first spectrum to work out what wavelength raster we're using
raster_hash = spectra[0].raster_hash
wavelengths = spectra[0].wavelengths
# Allocate numpy array to store this SpectrumArray into
wavelengths, values, value_errors = SpectrumArray._allocate_memory(wavelengths=wavelengths,
item_count=len(spectra),
shared_memory=shared_memory)
# Copy spectra into new array one by one
for i, item in enumerate(spectra):
assert item.raster_hash == raster_hash, \
"Item <{}> has a different wavelength raster from preceding spectra in SpectrumArray.".format(i)
values[i, :] = item.values
value_errors[i, :] = item.value_errors
# Instantiate a SpectrumArray object
return cls(wavelengths=wavelengths,
values=values,
value_errors=value_errors,
metadata_list=[i.metadata for i in spectra],
shared_memory=shared_memory)
@classmethod
def from_files(cls, filenames, metadata_list, path="", binary=True, shared_memory=False):
"""
Instantiate new SpectrumArray object, using data in a list of text files.
:param filenames:
List of the filenames of the text files from which to import spectra. Each file should have three columns:
wavelength, value, and error in value.
:type filenames:
List[str]
:param path:
The file path from which to load the list of spectrum files.
:type path:
str
:param metadata_list:
A list of dictionaries of metadata about each of the spectra in this SpectrumArray
:param binary:
Boolean specifying whether we store spectra on disk in binary format or plain text.
:type binary:
bool
:param shared_memory:
Boolean flag indicating whether this SpectrumArray should use multiprocessing shared memory.
:type shared_memory:
bool
:return:
SpectrumArray object
"""
assert isinstance(filenames, (list, tuple)), "Argument <filenames> must be a list or tuple of spectra."
assert len(filenames) > 0, "Cannot open a SpectrumArray with no members: there is no wavelength raster"
# Load first spectrum to work out what wavelength raster we're using
if not binary:
wavelengths, item_values, item_value_errors = np.loadtxt(str(os_path.join(path, filenames[0]))).T
else:
wavelengths, item_values, item_value_errors = np.load(str(os_path.join(path, filenames[0])))
raster_hash = hash_numpy_array(wavelengths)
# Allocate numpy array to store this SpectrumArray into
wavelengths, values, value_errors = SpectrumArray._allocate_memory(wavelengths=wavelengths,
item_count=len(filenames),
shared_memory=shared_memory)
# Load spectra one by one
for i, filename in enumerate(filenames):
filename = os_path.join(path, filename)
assert os_path.exists(filename), "File <{}> does not exist.".format(filename)
if not binary:
item_wavelengths, item_values, item_value_errors = np.loadtxt(str(filename)).T
else:
item_wavelengths, item_values, item_value_errors = np.load(str(filename))
assert hash_numpy_array(item_wavelengths) == raster_hash, \
"Item <{}> has a different wavelength raster from preceding spectra in SpectrumArray.".format(
filename)
values[i, :] = item_values
value_errors[i, :] = item_value_errors
# Instantiate a SpectrumArray object
return cls(wavelengths=wavelengths,
values=values,
value_errors=value_errors,
metadata_list=metadata_list,
shared_memory=shared_memory)
def __str__(self):
return "<{module}.{name} instance".format(module=self.__module__,
name=type(self).__name__)
def __repr__(self):
return "<{0}.{1} object at {2}>".format(self.__module__,
type(self).__name__, hex(id(self)))
def _update_raster_hash(self):
"""
Update the internal string hash of the wavelength raster that this spectrum array is sampled on.
This hash is used to quickly check whether two spectra are sampled on the same raster before doing arithmetic
operations on them.
:return:
None
"""
self.raster_hash = hash_numpy_array(self.wavelengths)
def get_metadata(self, index):
"""
Extract the metadata which we have on a single spectrum from a SpectrumArray.
:param index:
Index of the spectrum to extract
:type index:
int
:return:
dict
"""
return self.metadata_list[index]
def extract_item(self, index):
"""
Extract a single spectrum from a SpectrumArray. This creates a numpy view of the spectrum, without copying the
data.
:param index:
Index of the spectrum to extract
:type index:
int
:return:
Spectrum object
"""
# Check that requested index is within range
assert 0 <= index < len(self), "Index of SpectrumArray out of range."
index = int(index)
return Spectrum(wavelengths=self.wavelengths,
values=self.values[index, :],
value_errors=self.value_errors[index, :],
metadata=self.metadata_list[index])
|
the-stack_106_17241
|
#!/usr/bin/env python3
# Foundations of Python Network Programming, Third Edition
# https://github.com/brandon-rhodes/fopnp/blob/m/py3/chapter18/xmlrpc_introspect.py
# XML-RPC client
import xmlrpc.client
def main():
proxy = xmlrpc.client.ServerProxy('http://127.0.0.1:7001')
print('Here are the functions supported by this server:')
for method_name in proxy.system.listMethods():
if method_name.startswith('system.'):
continue
signatures = proxy.system.methodSignature(method_name)
if isinstance(signatures, list) and signatures:
for signature in signatures:
print('%s(%s)' % (method_name, signature))
else:
print('%s(...)' % (method_name,))
method_help = proxy.system.methodHelp(method_name)
if method_help:
print(' ', method_help)
if __name__ == '__main__':
main()
|
the-stack_106_17242
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
import os
# Set chrome options for working with headless mode (no screen)
chrome_options = webdriver.ChromeOptions()
# chrome_options.add_argument("headless")
# chrome_options.add_argument("no-sandbox")
# chrome_options.add_argument("disable-dev-shm-usage")
# Update webdriver instance of chrome-driver with adding chrome options
driver = webdriver.Chrome(options=chrome_options)
# driver = webdriver.Chrome("/Users/home/Desktop/chromedriver")
# Connect to the application
APP_IP = os.environ['MASTER_PUBLIC_IP']
url = "http://"+APP_IP.strip()+":8080/"
# url = "http://localhost:8080"
print(url)
driver.get(url)
sleep(3)
owners_link = driver.find_element_by_link_text("OWNERS")
owners_link.click()
sleep(2)
all_link = driver.find_element_by_link_text("ALL")
all_link.click()
sleep(2)
# Verify that table loaded
sleep(1)
verify_table = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.TAG_NAME, "table")))
print("Table loaded")
driver.quit()
|
the-stack_106_17243
|
"""
MIT License
Copyright (c) 2019 Mingqi Yuan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""
Encoding = UTF-8
By Mingqi, Yuan, 2019/3/18
Usage: An encoder for json.dumps()
"""
import numpy as np
import json
class encoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(encoder, self).default(obj)
|
the-stack_106_17244
|
import math
import numpy as np
import tensorflow as tf
def identity_initializer(scale=1.0):
"""Identity initializer by Quoc V. Le et al.
This is also recommended by at least one paper to initialize
the weights matrix in a RNN.
References:
Paper: Quoc V. Le et al., http://arxiv.org/abs/1504.00941
Parameters
----------
scale: float, optional
The scale of the indentity values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
if len(shape) == 1:
return tf.constant(0., dtype=dtype, shape=shape)
elif len(shape) == 2 and shape[0] == shape[1]:
return tf.constant(scale*np.identity(shape[0], dtype))
elif len(shape) == 4 and shape[2] == shape[3]:
array = np.zeros(shape, dtype=float)
cx, cy = shape[0]/2, shape[1]/2
for i in range(shape[2]):
array[cx, cy, i, i] = 1
return tf.constant(scale*array, dtype=dtype)
else:
raise ValueError("Invalid shape.")
return _initializer
def _orthogonal(shape):
flat_shape = (shape[0], np.prod(shape[1:]))
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
# pick the one with the correct shape
q = u if u.shape == flat_shape else v
return q.reshape(shape) #this needs to be corrected to float32
def orthogonal_initializer(scale=1.0):
"""Orthogonal initializer by Saxe et al.
This initialization is recommended for initializing the
hidden weights in a RNN.
References:
From Lasagne and Keras.
Paper: Saxe et al., http://arxiv.org/abs/1312.6120
Parameters
----------
scale: float, optional
The scale of the orthogonal values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
q = _orthogonal(shape)
return tf.constant(scale * q[:shape[0], :shape[1]], dtype=dtype)
return _initializer
def bn_lstm_identity_initializer(scale=1.0):
"""Special indentity initializer used for batch normalization in LSTMs.
References:
From: http://olavnymoen.com/2016/07/07/rnn-batch-normalization
Parameters
----------
scale: float, optional
The scale of the identity values.
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
'''Ugly cause LSTM params calculated in one matrix multiply'''
size = shape[0]
# gate (j) is identity
t = np.zeros(shape)
t[:, size:size * 2] = np.identity(size) * scale # j
t[:, :size] = _orthogonal([size, size]) # i
t[:, size * 2:size * 3] = _orthogonal([size, size]) # f
t[:, size * 3:] = _orthogonal([size, size]) # o
return tf.constant(t, dtype)
return _initializer
def bilinear_initializer():
"""Bilinear initializer, which is recommended for deconvolution when
used for upscaling. This op is called conv2d_transposed() in TensorFlow.
References:
J. Long et al.
From: http://arxiv.org/abs/1411.4038
Returns
----------
_initializer: function
Returns the init function.
"""
def _initializer(shape, dtype=tf.float32, partition_info=None):
width = shape[0]
heigh = shape[0]
f = math.ceil(width/2.0)
c = (2 * f - 1 - f % 2) / (2.0 * f)
bilinear = np.zeros([shape[0], shape[1]])
for x in range(width):
for y in range(heigh):
value = (1 - abs(x / f - c)) * (1 - abs(y / f - c))
bilinear[x, y] = value
weights = np.zeros(shape)
for i in range(shape[2]):
weights[:, :, i, i] = bilinear
return tf.constant(weights, dtype)
return _initializer
|
the-stack_106_17248
|
import os
from definitions.ir.dfg_node import *
class DFSSplitReader(DFGNode):
def __init__(self, inputs, outputs, com_name, com_category,
com_options = [], com_redirs = [], com_assignments=[]):
super().__init__(inputs, outputs, com_name, com_category,
com_options=com_options,
com_redirs=com_redirs,
com_assignments=com_assignments)
def set_server_address(self, addr): # ex addr: 127.0.0.1:50051
self.com_options.append((3, Arg(string_to_argument(f"--addr {addr}"))))
def make_dfs_split_reader_node(inputs, output, split_num, prefix):
split_reader_bin = os.path.join(config.PASH_TOP, config.config['runtime']['dfs_split_reader_binary'])
com_name = Arg(string_to_argument(split_reader_bin))
com_category = "pure"
options = []
options.append((1, Arg(string_to_argument(f"--prefix '{prefix}'"))))
options.append((2, Arg(string_to_argument(f"--split {split_num}"))))
return DFSSplitReader(inputs,
[output],
com_name,
com_category,
options)
|
the-stack_106_17249
|
# Copyright 2018 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib import constants as lib_const
from neutron_lib import context
from oslo_utils import uuidutils
from neutron.agent.l3 import agent as l3_agent
from neutron.agent.l3.extensions import port_forwarding as pf
from neutron.agent.l3 import l3_agent_extension_api as l3_ext_api
from neutron.agent.l3 import router_info as l3router
from neutron.agent.linux import iptables_manager
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.objects import port_forwarding as pf_obj
from neutron.objects import router
from neutron.tests import base
from neutron.tests.unit.agent.l3 import test_agent
_uuid = uuidutils.generate_uuid
TEST_FIP = '10.100.2.45'
BINARY_NAME = iptables_manager.get_binary_name()
DEFAULT_RULE = ('PREROUTING', '-j %s-fip-pf' % BINARY_NAME)
DEFAULT_CHAIN = 'fip-pf'
HOSTNAME = 'testhost'
class PortForwardingExtensionBaseTestCase(
test_agent.BasicRouterOperationsFramework):
def setUp(self):
super(PortForwardingExtensionBaseTestCase, self).setUp()
self.fip_pf_ext = pf.PortForwardingAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.floatingip2 = router.FloatingIP(context=None, id=_uuid(),
floating_ip_address='172.24.6.12',
floating_network_id=_uuid(),
router_id=_uuid(),
status='ACTIVE')
self.portforwarding1 = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip2.id,
external_port=1111, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='1.1.1.1', internal_port=11111,
floating_ip_address=self.floatingip2.floating_ip_address,
router_id=self.floatingip2.router_id)
self.agent = l3_agent.L3NATAgent(HOSTNAME, self.conf)
self.ex_gw_port = {'id': _uuid()}
self.fip = {'id': _uuid(),
'floating_ip_address': TEST_FIP,
'fixed_ip_address': '192.168.0.1',
'floating_network_id': _uuid(),
'port_id': _uuid(),
'host': HOSTNAME}
self.router = {'id': self.floatingip2.router_id,
'gw_port': self.ex_gw_port,
'ha': False,
'distributed': False,
lib_const.FLOATINGIP_KEY: [self.fip]}
self.router_info = l3router.RouterInfo(
self.agent, self.floatingip2.router_id, self.router,
**self.ri_kwargs)
self.centralized_port_forwarding_fip_set = set(
[str(self.floatingip2.floating_ip_address) + '/32'])
self.pf_managed_fips = [self.floatingip2.id]
self.router_info.ex_gw_port = self.ex_gw_port
self.router_info.fip_managed_by_port_forwardings = self.pf_managed_fips
self.agent.router_info[self.router['id']] = self.router_info
self.get_router_info = mock.patch(
'neutron.agent.l3.l3_agent_extension_api.'
'L3AgentExtensionAPI.get_router_info').start()
self.get_router_info.return_value = self.router_info
self.agent_api = l3_ext_api.L3AgentExtensionAPI(None, None)
self.fip_pf_ext.consume_api(self.agent_api)
self.port_forwardings = [self.portforwarding1]
class FipPortForwardingExtensionInitializeTestCase(
PortForwardingExtensionBaseTestCase):
@mock.patch.object(registry, 'register')
@mock.patch.object(resources_rpc, 'ResourcesPushRpcCallback')
def test_initialize_subscribed_to_rpc(self, rpc_mock, subscribe_mock):
call_to_patch = 'neutron_lib.rpc.Connection'
with mock.patch(call_to_patch,
return_value=self.connection) as create_connection:
self.fip_pf_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
create_connection.assert_has_calls([mock.call()])
self.connection.create_consumer.assert_has_calls(
[mock.call(
resources_rpc.resource_type_versioned_topic(
resources.PORTFORWARDING),
[rpc_mock()],
fanout=True)]
)
subscribe_mock.assert_called_with(
mock.ANY, resources.PORTFORWARDING)
class FipPortForwardingExtensionTestCase(PortForwardingExtensionBaseTestCase):
def setUp(self):
super(FipPortForwardingExtensionTestCase, self).setUp()
self.fip_pf_ext.initialize(
self.connection, lib_const.L3_AGENT_MODE)
self._set_bulk_pull_mock()
def _set_bulk_pull_mock(self):
def _bulk_pull_mock(context, resource_type, filter_kwargs=None):
if 'floatingip_id' in filter_kwargs:
result = []
for pfobj in self.port_forwardings:
if pfobj.floatingip_id in filter_kwargs['floatingip_id']:
result.append(pfobj)
return result
return self.port_forwardings
self.bulk_pull = mock.patch(
'neutron.api.rpc.handlers.resources_rpc.'
'ResourcesPullRpcApi.bulk_pull').start()
self.bulk_pull.side_effect = _bulk_pull_mock
def _get_chainrule_tag_from_pf_obj(self, target_obj):
rule_tag = 'fip_portforwarding-' + target_obj.id
chain_name = (
'pf-' + target_obj.id)[:lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP]
chain_rule = (chain_name,
'-d %s/32 -p %s -m %s --dport %s '
'-j DNAT --to-destination %s:%s' % (
target_obj.floating_ip_address,
target_obj.protocol,
target_obj.protocol,
target_obj.external_port,
target_obj.internal_ip_address,
target_obj.internal_port))
return chain_name, chain_rule, rule_tag
def _assert_called_iptables_process(self, mock_add_chain,
mock_add_rule, mock_add_fip,
mock_send_fip_status, target_obj=None):
if target_obj:
obj = target_obj
else:
obj = self.portforwarding1
(chain_name,
chain_rule, rule_tag) = self._get_chainrule_tag_from_pf_obj(obj)
mock_add_chain.assert_has_calls([mock.call('fip-pf'),
mock.call(chain_name)])
mock_add_rule.assert_has_calls(
[mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]),
mock.call(DEFAULT_CHAIN, ('-j %s-' % BINARY_NAME) + chain_name,
tag=rule_tag),
mock.call(chain_name, chain_rule[1], tag=rule_tag)])
mock_add_fip.assert_called_once_with(
{'floating_ip_address': str(obj.floating_ip_address)},
mock.ANY, mock.ANY)
fip_status = {
obj.floatingip_id:
lib_const.FLOATINGIP_STATUS_ACTIVE}
mock_send_fip_status.assert_called_once_with(mock.ANY, fip_status)
@mock.patch.object(pf.PortForwardingAgentExtension,
'_sending_port_forwarding_fip_status')
@mock.patch.object(iptables_manager.IptablesTable, 'add_rule')
@mock.patch.object(iptables_manager.IptablesTable, 'add_chain')
@mock.patch.object(l3router.RouterInfo, 'add_floating_ip')
def test_add_update_router(self, mock_add_fip,
mock_add_chain, mock_add_rule,
mock_send_fip_status):
# simulate the router add and already there is a port forwarding
# resource association.
mock_add_fip.return_value = lib_const.FLOATINGIP_STATUS_ACTIVE
self.fip_pf_ext.add_router(self.context, self.router)
self._assert_called_iptables_process(
mock_add_chain, mock_add_rule, mock_add_fip, mock_send_fip_status,
target_obj=self.portforwarding1)
# Then we create another port forwarding with the same fip
mock_add_fip.reset_mock()
mock_send_fip_status.reset_mock()
mock_add_chain.reset_mock()
mock_add_rule.reset_mock()
test_portforwarding = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip2.id,
external_port=2222, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='2.2.2.2', internal_port=22222,
floating_ip_address=self.floatingip2.floating_ip_address,
router_id=self.floatingip2.router_id)
self.pf_managed_fips.append(self.floatingip2.id)
self.port_forwardings.append(test_portforwarding)
self.fip_pf_ext.update_router(self.context, self.router)
self._assert_called_iptables_process(
mock_add_chain, mock_add_rule, mock_add_fip, mock_send_fip_status,
target_obj=test_portforwarding)
@mock.patch.object(iptables_manager.IptablesTable, 'add_rule')
@mock.patch.object(iptables_manager.IptablesTable, 'add_chain')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
@mock.patch.object(iptables_manager.IptablesTable, 'remove_chain')
def test_add_update_router_port_forwarding_change(
self, mock_remove_chain, mock_ip_device, mock_add_chain,
mock_add_rule):
self.fip_pf_ext.add_router(self.context, self.router)
update_portforwarding = pf_obj.PortForwarding(
context=None, id=self.portforwarding1.id,
floatingip_id=self.portforwarding1.floatingip_id,
external_port=2222, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='2.2.2.2', internal_port=22222,
floating_ip_address=self.portforwarding1.floating_ip_address,
router_id=self.portforwarding1.router_id)
self.port_forwardings = [update_portforwarding]
mock_delete = mock.Mock()
mock_ip_device.return_value = mock_delete
self.fip_pf_ext.update_router(self.context, self.router)
current_chain = ('pf-' + self.portforwarding1.id)[
:lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP]
mock_remove_chain.assert_called_once_with(current_chain)
mock_delete.delete_socket_conntrack_state.assert_called_once_with(
str(self.portforwarding1.floating_ip_address),
self.portforwarding1.external_port,
protocol=self.portforwarding1.protocol)
(chain_name,
chain_rule, rule_tag) = self._get_chainrule_tag_from_pf_obj(
update_portforwarding)
mock_add_chain.assert_has_calls([mock.call('fip-pf'),
mock.call(chain_name)])
mock_add_rule.assert_has_calls(
[mock.call(DEFAULT_RULE[0], DEFAULT_RULE[1]),
mock.call(DEFAULT_CHAIN, ('-j %s-' % BINARY_NAME) + chain_name,
tag=rule_tag),
mock.call(chain_name, chain_rule[1], tag=rule_tag)])
@mock.patch.object(pf.PortForwardingAgentExtension,
'_sending_port_forwarding_fip_status')
@mock.patch('neutron.agent.linux.ip_lib.IPDevice')
@mock.patch.object(iptables_manager.IptablesTable, 'remove_chain')
def test_add_update_router_port_forwarding_remove(
self, mock_remove_chain, mock_ip_device,
mock_send_fip_status):
self.fip_pf_ext.add_router(self.context, self.router)
mock_send_fip_status.reset_mock()
self.port_forwardings = []
mock_device = mock.Mock()
mock_ip_device.return_value = mock_device
self.fip_pf_ext.update_router(self.context, self.router)
current_chain = ('pf-' + self.portforwarding1.id)[
:lib_const.MAX_IPTABLES_CHAIN_LEN_WRAP]
mock_remove_chain.assert_called_once_with(current_chain)
mock_device.delete_socket_conntrack_state.assert_called_once_with(
str(self.portforwarding1.floating_ip_address),
self.portforwarding1.external_port,
protocol=self.portforwarding1.protocol)
mock_device.delete_addr_and_conntrack_state.assert_called_once_with(
str(self.portforwarding1.floating_ip_address))
fip_status = {
self.portforwarding1.floatingip_id:
lib_const.FLOATINGIP_STATUS_DOWN}
mock_send_fip_status.assert_called_once_with(mock.ANY, fip_status)
def test_check_if_need_process_no_snat_ns(self):
ex_gw_port = {'id': _uuid()}
router_id = _uuid()
router = {'id': router_id,
'gw_port': ex_gw_port,
'ha': False,
'distributed': True}
router_info = l3router.RouterInfo(
self.agent, router_id, router,
**self.ri_kwargs)
router_info.agent_conf.agent_mode = lib_const.L3_AGENT_MODE_DVR_SNAT
router_info.fip_managed_by_port_forwardings = True
router_info.snat_namespace = mock.Mock()
router_info.snat_namespace.exists.return_value = False
self.assertFalse(self.fip_pf_ext._check_if_need_process(router_info))
class RouterFipPortForwardingMappingTestCase(base.BaseTestCase):
def setUp(self):
super(RouterFipPortForwardingMappingTestCase, self).setUp()
self.mapping = pf.RouterFipPortForwardingMapping()
self.router1 = _uuid()
self.router2 = _uuid()
self.floatingip1 = _uuid()
self.floatingip2 = _uuid()
self.floatingip3 = _uuid()
self.portforwarding1 = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip1,
external_port=1111, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='1.1.1.1', internal_port=11111,
floating_ip_address='111.111.111.111',
router_id=self.router1)
self.portforwarding2 = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip1,
external_port=1112, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='1.1.1.2', internal_port=11112,
floating_ip_address='111.111.111.111',
router_id=self.router1)
self.portforwarding3 = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip2,
external_port=1113, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='1.1.1.3', internal_port=11113,
floating_ip_address='111.222.111.222',
router_id=self.router1)
self.portforwarding4 = pf_obj.PortForwarding(
context=None, id=_uuid(), floatingip_id=self.floatingip3,
external_port=2222, protocol='tcp', internal_port_id=_uuid(),
internal_ip_address='2.2.2.2', internal_port=22222,
floating_ip_address='222.222.222.222',
router_id=self.router2)
self.portforwardings_dict = {
self.portforwarding1.id: self.portforwarding1,
self.portforwarding2.id: self.portforwarding2,
self.portforwarding3.id: self.portforwarding3,
self.portforwarding4.id: self.portforwarding4}
def _set_pf(self):
self.mapping.set_port_forwardings(self.portforwardings_dict.values())
def test_set_port_forwardings(self):
self._set_pf()
pf_ids = self.portforwardings_dict.keys()
for pf_id, obj in self.mapping.managed_port_forwardings.items():
self.assertIn(pf_id, pf_ids)
self.assertEqual(obj, self.portforwardings_dict[pf_id])
self.assertEqual(
len(pf_ids), len(self.mapping.managed_port_forwardings.keys()))
fip_pf_set = {
self.floatingip1: set(
[self.portforwarding1.id, self.portforwarding2.id]),
self.floatingip2: set([self.portforwarding3.id]),
self.floatingip3: set([self.portforwarding4.id])
}
for fip_id, pf_set in self.mapping.fip_port_forwarding.items():
self.assertIn(
fip_id, [self.floatingip1, self.floatingip2, self.floatingip3])
self.assertEqual(0, len(pf_set - fip_pf_set[fip_id]))
self.assertEqual(
len([self.floatingip1, self.floatingip2, self.floatingip3]),
len(self.mapping.fip_port_forwarding))
router_fip = {
self.router1: set([self.floatingip1, self.floatingip2]),
self.router2: set([self.floatingip3])
}
for router_id, fip_set in self.mapping.router_fip_mapping.items():
self.assertIn(router_id, [self.router1, self.router2])
self.assertEqual(0, len(fip_set - router_fip[router_id]))
self.assertEqual(
len([self.router1, self.router2]),
len(self.mapping.router_fip_mapping.keys()))
def test_update_port_forwarding(self):
self._set_pf()
new_pf1 = pf_obj.PortForwarding(
context=None, id=self.portforwarding2.id,
floatingip_id=self.floatingip1,
external_port=11122, protocol='tcp',
internal_port_id=self.portforwarding2.internal_port_id,
internal_ip_address='1.1.1.22', internal_port=11122,
floating_ip_address='111.111.111.111',
router_id=self.router1)
self.mapping.update_port_forwardings([new_pf1])
self.assertEqual(
new_pf1,
self.mapping.managed_port_forwardings[self.portforwarding2.id])
def test_del_port_forwardings(self):
self._set_pf()
del_pfs = [self.portforwarding3, self.portforwarding2,
self.portforwarding4]
self.mapping.del_port_forwardings(del_pfs)
self.assertEqual(
[self.portforwarding1.id],
list(self.mapping.managed_port_forwardings.keys()))
self.assertEqual({self.floatingip1: set([self.portforwarding1.id])},
self.mapping.fip_port_forwarding)
self.assertEqual({self.router1: set([self.floatingip1])},
self.mapping.router_fip_mapping)
def test_clear_by_fip(self):
self._set_pf()
self.mapping.clear_by_fip(self.floatingip1, self.router1)
router_fip = {
self.router1: set([self.floatingip2]),
self.router2: set([self.floatingip3])
}
for router_id, fip_set in self.mapping.router_fip_mapping.items():
self.assertIn(router_id, [self.router1, self.router2])
self.assertEqual(0, len(fip_set - router_fip[router_id]))
fip_pf_set = {
self.floatingip2: set([self.portforwarding3.id]),
self.floatingip3: set([self.portforwarding4.id])
}
for fip_id, pf_set in self.mapping.fip_port_forwarding.items():
self.assertIn(
fip_id, [self.floatingip2, self.floatingip3])
self.assertEqual(0, len(pf_set - fip_pf_set[fip_id]))
self.assertEqual(
len([self.floatingip2, self.floatingip3]),
len(self.mapping.fip_port_forwarding))
pfs_dict = {self.portforwarding3.id: self.portforwarding3,
self.portforwarding4.id: self.portforwarding4}
for pf_id, obj in self.mapping.managed_port_forwardings.items():
self.assertIn(pf_id,
[self.portforwarding3.id, self.portforwarding4.id])
self.assertEqual(obj, pfs_dict[pf_id])
self.assertEqual(
len([self.portforwarding3.id, self.portforwarding4.id]),
len(self.mapping.managed_port_forwardings.keys()))
|
the-stack_106_17251
|
# Copyright 2021 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from datetime import datetime
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple
from feast import Entity
from feast.feature_view import FeatureView
from feast.infra.infra_object import InfraObject
from feast.protos.feast.core.Registry_pb2 import Registry as RegistryProto
from feast.protos.feast.types.EntityKey_pb2 import EntityKey as EntityKeyProto
from feast.protos.feast.types.Value_pb2 import Value as ValueProto
from feast.repo_config import RepoConfig
class OnlineStore(ABC):
"""
OnlineStore is an object used for all interaction between Feast and the service used for online storage of
features.
"""
@abstractmethod
def online_write_batch(
self,
config: RepoConfig,
table: FeatureView,
data: List[
Tuple[EntityKeyProto, Dict[str, ValueProto], datetime, Optional[datetime]]
],
progress: Optional[Callable[[int], Any]],
) -> None:
"""
Write a batch of feature rows to the online store. This is a low level interface, not
expected to be used by the users directly.
If a tz-naive timestamp is passed to this method, it should be assumed to be UTC by implementors.
Args:
config: The RepoConfig for the current FeatureStore.
table: Feast FeatureView
data: a list of quadruplets containing Feature data. Each quadruplet contains an Entity Key,
a dict containing feature values, an event timestamp for the row, and
the created timestamp for the row if it exists.
progress: Optional function to be called once every mini-batch of rows is written to
the online store. Can be used to display progress.
"""
...
@abstractmethod
def online_read(
self,
config: RepoConfig,
table: FeatureView,
entity_keys: List[EntityKeyProto],
requested_features: Optional[List[str]] = None,
) -> List[Tuple[Optional[datetime], Optional[Dict[str, ValueProto]]]]:
"""
Read feature values given an Entity Key. This is a low level interface, not
expected to be used by the users directly.
Args:
config: The RepoConfig for the current FeatureStore.
table: Feast FeatureView
entity_keys: a list of entity keys that should be read from the FeatureStore.
requested_features: (Optional) A subset of the features that should be read from the FeatureStore.
Returns:
Data is returned as a list, one item per entity key in the original order as the entity_keys argument.
Each item in the list is a tuple of event_ts for the row, and the feature data as a dict from feature names
to values. Values are returned as Value proto message.
"""
...
@abstractmethod
def update(
self,
config: RepoConfig,
tables_to_delete: Sequence[FeatureView],
tables_to_keep: Sequence[FeatureView],
entities_to_delete: Sequence[Entity],
entities_to_keep: Sequence[Entity],
partial: bool,
):
...
def plan(
self, config: RepoConfig, desired_registry_proto: RegistryProto
) -> List[InfraObject]:
"""
Returns the set of InfraObjects required to support the desired registry.
Args:
config: The RepoConfig for the current FeatureStore.
desired_registry_proto: The desired registry, in proto form.
"""
return []
@abstractmethod
def teardown(
self,
config: RepoConfig,
tables: Sequence[FeatureView],
entities: Sequence[Entity],
):
...
|
the-stack_106_17252
|
from collections import defaultdict
from caseworker.users.services import get_gov_user
from core import client
from core.helpers import convert_value_to_query_param
from caseworker.cases.constants import CaseType, CaseStatusEnum
from lite_forms.components import Option
def get_denial_reasons(request, convert_to_options=False, group=False):
data = client.get(request, "/static/denial-reasons/").json()["denial_reasons"]
if convert_to_options:
options = [Option(denial_reason["id"], denial_reason["id"]) for denial_reason in data]
if group:
return_dict = defaultdict(list)
for item in options:
return_dict[item.key[0]].append(item)
return dict(return_dict)
return options
return data
def get_countries(request, convert_to_options=False, exclude: list = None):
"""
Returns a list of GOV.UK countries and territories
param exclude: Takes a list of country codes and excludes them
"""
data = client.get(request, "/static/countries/?" + convert_value_to_query_param("exclude", exclude))
if convert_to_options:
converted_units = []
for country in data.json().get("countries"):
converted_units.append(Option(country.get("id"), country.get("name")))
return converted_units
return data.json(), data.status_code
# CaseStatuesEnum
def get_statuses(request, convert_to_options=False):
"""Get static list of case statuses."""
data = client.get(request, "/static/statuses/")
if convert_to_options:
return [Option(key=item["id"], value=item["value"]) for item in data.json().get("statuses")]
return data.json()["statuses"], data.status_code
def get_permissible_statuses(request, case):
"""Get a list of case statuses permissible for the user's role."""
user, _ = get_gov_user(request, str(request.session["lite_api_user_id"]))
user_permissible_statuses = user["user"]["role"]["statuses"]
statuses, _ = get_statuses(request)
case_sub_type = case["case_type"]["sub_type"]["key"]
case_type = case["case_type"]["type"]["key"]
if case_type == CaseType.APPLICATION.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
not in [
CaseStatusEnum.APPLICANT_EDITING,
CaseStatusEnum.CLOSED,
CaseStatusEnum.FINALISED,
CaseStatusEnum.REGISTERED,
CaseStatusEnum.CLC,
CaseStatusEnum.PV,
CaseStatusEnum.SURRENDERED,
]
]
elif case_type == CaseType.QUERY.value:
if case_sub_type == CaseType.END_USER_ADVISORY.value:
case_type_applicable_statuses = [
status for status in statuses if status["key"] in CaseStatusEnum.base_query_statuses()
]
else:
# if the query is not an end user advisory, then check if CLC/PV statuses are required
goods_query_status_keys = CaseStatusEnum.base_query_statuses().copy()
if case.data["clc_responded"] is not None:
goods_query_status_keys.insert(1, CaseStatusEnum.CLC)
if case.data["pv_grading_responded"] is not None:
# add PV status into the correct location
if case.data["clc_responded"] is not None:
goods_query_status_keys.insert(2, CaseStatusEnum.PV)
else:
goods_query_status_keys.insert(1, CaseStatusEnum.PV)
case_type_applicable_statuses = [status for status in statuses if status["key"] in goods_query_status_keys]
elif case_type == CaseType.COMPLIANCE.value:
if case_sub_type == CaseType.COMPLIANCE_SITE.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
in [
CaseStatusEnum.OPEN,
CaseStatusEnum.CLOSED,
]
]
elif case_sub_type == CaseType.COMPLIANCE_VISIT.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
in [
CaseStatusEnum.OPEN,
CaseStatusEnum.UNDER_INTERNAL_REVIEW,
CaseStatusEnum.RETURN_TO_INSPECTOR,
CaseStatusEnum.AWAITING_EXPORTER_RESPONSE,
CaseStatusEnum.CLOSED,
]
]
elif case_type == CaseType.REGISTRATION.value:
case_type_applicable_statuses = [
status
for status in statuses
if status["key"]
in [
CaseStatusEnum.REGISTERED,
CaseStatusEnum.UNDER_ECJU_REVIEW,
CaseStatusEnum.REVOKED,
CaseStatusEnum.SUSPENDED,
CaseStatusEnum.SURRENDERED,
CaseStatusEnum.DEREGISTERED,
]
]
return [status for status in case_type_applicable_statuses if status in user_permissible_statuses]
def get_status_properties(request, status):
data = client.get(request, f"/static/statuses/properties/{status}")
return data.json(), data.status_code
# Permissions
def get_user_permissions(request, with_team=False):
user, _ = get_gov_user(request)
if with_team:
return user["user"]["role"]["permissions"], user["user"]["team"]
return user["user"]["role"]["permissions"]
# Control List Entries
def get_control_list_entries(request, convert_to_options=False, include_parent=False, clc_entries_cache=[]): # noqa
"""
Preliminary caching mechanism, requires service restart to repopulate control list entries
"""
if convert_to_options:
if clc_entries_cache:
return clc_entries_cache
else:
data = client.get(request, "/static/control-list-entries/")
for control_list_entry in data.json().get("control_list_entries"):
clc_entries_cache.append(
Option(
key=control_list_entry["rating"],
value=control_list_entry["rating"],
description=control_list_entry["text"],
)
)
return clc_entries_cache
if include_parent:
response = client.get(request, "/static/control-list-entries/?include_parent=True")
else:
response = client.get(request, "/static/control-list-entries/?group=True")
response.raise_for_status()
return response.json().get("control_list_entries")
def get_gov_pv_gradings(request, convert_to_options=False):
pv_gradings = client.get(request, "/static/private-venture-gradings/gov/").json().get("pv_gradings")
if convert_to_options:
converted_units = []
for pv_grading_entry in pv_gradings:
for key in pv_grading_entry:
converted_units.append(Option(key=key, value=pv_grading_entry[key]))
return converted_units
return pv_gradings
def get_pv_gradings(request, convert_to_options=False):
pv_gradings = client.get(request, "/static/private-venture-gradings/").json().get("pv_gradings")
if convert_to_options:
converted_units = []
for pv_grading_entry in pv_gradings:
for key in pv_grading_entry:
converted_units.append(Option(key=key, value=pv_grading_entry[key]))
return converted_units
return pv_gradings
def get_menu_notifications(request):
if not hasattr(request, "cached_get_menu_notifications"):
request.cached_get_menu_notifications = client.get(request, "/gov-users/notifications/")
response = request.cached_get_menu_notifications
return response.json()
|
the-stack_106_17253
|
from geometry_msgs.msg import Pose, Point
from erdos.op import Op
from erdos.data_stream import DataStream
from erdos.message import Message
class RaiseObjectOperator(Op):
"""
Raises the Sawyer arm while gripping the object.
"""
stream_name = "raise-object-stream"
def __init__(self, name):
"""
Initializes the destination coordinates of the arm.
"""
super(RaiseObjectOperator, self).__init__(name)
self.des_EE_xyz = None
self.orientation = None
@staticmethod
def setup_streams(input_streams, location_stream_name,
trigger_stream_name):
"""
Registers a callback to retrieve the location where the arm needs to
be moved and returns a single output stream which sends the Pose
commands to move the robot to the required location.
"""
input_streams.filter_name(location_stream_name)\
.add_callback(RaiseObjectOperator.save_destination)
input_streams.filter_name(trigger_stream_name)\
.add_callback(RaiseObjectOperator.generate_move_commands)
return [
DataStream(data_type=Pose, name=RaiseObjectOperator.stream_name)
]
def save_destination(self, msg):
"""
Saves the destination coordinates and orientation of the arm.
"""
self.des_EE_xyz = msg.data.des_EE_xyz_above
self.orientation = msg.data.des_orientation_EE
def generate_move_commands(self, msg):
"""
Creates the Pose object from the retrieved coordinates.
"""
raise_object_pose = Pose(
position=Point(
x=self.des_EE_xyz[0],
y=self.des_EE_xyz[1],
z=self.des_EE_xyz[2]),
orientation=self.orientation)
raise_object_msg = Message(raise_object_pose, msg.timestamp)
self.get_output_stream(RaiseObjectOperator.stream_name).\
send(raise_object_msg)
def execute(self):
self.spin()
|
the-stack_106_17255
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on Nov 7, 2015
Don't blink...
@author: Juan_Insuasti
'''
import sys
import datetime
import os.path
import json
from Shared import Logger
class DataLogger:
def __init__(self, initFile, storage, storageRoute, logPrefix = "", logs = True,logName='Data Logger'):
self.console = Logger.Logger(logName=logName, enabled=logs, printConsole=True)
self.console.log("Initialization...")
self.initFile = initFile
self.logInfo = {} #metadata of the existing logs
self.storage = storage
self.storageRoute = str(storageRoute)
self.logPrefix = logPrefix
self.logData = {} #Actual data of a log
self.openInitFile()
def openInitFile(self):
#Open init file if it doesn't exist then creates it
self.console.log("Opening init file")
logInfo = {}
logInfo['logs'] = []
logInfo['openLog'] = self.getLogFile()
self.downloadFromStorage(self.initFile)
if(not os.path.exists(self.getFilePath(self.initFile)) ):
self.console.log("Init file does not exist")
self.console.log("Creting init file -> %s", self.getFilePath(self.initFile))
self.saveFile(self.initFile,logInfo)
self.logInfo = self.loadFile(self.initFile)
self.createNewLog(self.getLogFile())
self.saveLogToStorage(self.initFile)
self.console.log("Opening init file...")
self.logInfo = self.loadFile(self.initFile)
if(not os.path.exists(self.getFilePath(self.logInfo['openLog'])) ):
self.console.log("Open log file does not exist")
self.downloadFromStorage(self.logInfo['openLog'])
self.console.log("Opening log file...")
self.logData = self.loadFile(self.logInfo['openLog'])
self.saveLogToStorage(self.logInfo['openLog'])
def getFilePath(self, logFile):
return self.logPrefix + logFile + '.json'
def saveFile(self, file, data):
self.console.log("Saving data to local disk => %s", file)
filepath = self.getFilePath(file)
with open(filepath, 'w') as outfile:
json.dump(data, outfile)
def loadFile(self, file):
self.console.log("Loading data from local disk => %s", file)
filepath = self.getFilePath(file)
if(os.path.exists(filepath)):
with open(filepath) as data_file:
return json.load(data_file)
self.console.log("File does not exist")
#Saves historic data into cloud storage
def saveLogToStorage(self, file):
self.console.log("Uploading log file to storage.")
filepath = str(self.getFilePath(file))
path = self.storageRoute + filepath
self.console.log("Filepath = %s", path)
url = self.storage.saveFile(path,filepath)
return url
#gets data from storage
def downloadFromStorage(self, file):
self.console.log("Downloading log file from storage.")
filepath = str(self.getFilePath(file))
path = self.storageRoute + filepath
url = self.storage.downloadFile(path,filepath)
return url
def createNewLog(self, logFile):
self.console.log("Creating new log file %s", logFile)
logData = {}
logData['dataset'] = []
logData['datasetAvg'] = []
logData['datasetLabel'] = []
self.saveFile(logFile, logData)
self.logData = self.loadFile(logFile)
self.logInfo['openLog'] = logFile
url = self.saveLogToStorage(logFile)
self.logInfo['logs'].append({'date': logFile, 'url': url})
self.saveFile(self.initFile, self.logInfo)
self.saveLogToStorage(self.initFile)
def newLogEntry(self, data, dataAvg, label):
self.console.log("Creating new log entry", )
logFile = self.getLogFile()
self.checkLogOpen(logFile)
self.logData['dataset'].append(data)
self.logData['datasetAvg'].append(dataAvg)
self.logData['datasetLabel'].append(label)
self.saveFile(logFile, self.logData)
def getLogFile(self):
#The name of the logfile es automatically chosen
#using the current date. 1 log per day.
return datetime.datetime.now().strftime("%Y-%m-%d")
def checkLogOpen(self, logFile):
if (self.logInfo['openLog'] != logFile):
self.saveLogToStorage(self.logInfo['openLog'])
self.createNewLog(logFile)
if __name__ == '__main__':
print('Starting Program')
logger = DataLogger('device0.json')
pass
|
the-stack_106_17256
|
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# function to preprocess data and convert it into useful features
def raw_to_data(input):
columns = ['age', 'workclass', 'fnlwgt', 'education_level', 'education-num', 'marital-status', 'occupation', 'relationship', 'race',
'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'income']
data = pd.read_csv(input)
data.columns = columns
data.drop('fnlwgt', axis=1, inplace=True)
# apply logarithmic transformation on skewed data
skewed = ['capital-gain', 'capital-loss']
data[skewed] = data[skewed].apply(lambda x: np.log(x+1))
# normalize data (some columns have larger range than other) using minmax scaler
# initialize scaler
scaler = MinMaxScaler()
# storing numerical cols to list
numerical_col = [col for col in data.columns if data[col].dtype != 'object']
# fit and transform numerical data
data[numerical_col] = scaler.fit_transform(data[numerical_col])
# convert categorical columns to numerical
# one hot encoding
X = data.drop('income', axis=1)
y = data['income']
X = pd.get_dummies(X)
y = y.replace({'<=50K': 0, '>50K' : 1})
data = pd.concat([X, y], axis=1)
return data
if __name__ == '__main__':
pass
|
the-stack_106_17257
|
# -*- coding: utf-8 -*-
import logging
from functools import lru_cache
from typing import Tuple, Union
import numpy as np
Shape = Union[int, Tuple[int, int]]
logger = logging.getLogger(__name__)
@lru_cache(maxsize=8)
def filtergrid(
size: Shape, quadrant_shift: bool = True, normalize: bool = True
) -> Tuple[np.ndarray, np.ndarray]:
"""Generates grid for constructing frequency domain filters.
Parameters
----------
size : Size of the filter
quadrant_shift : Quadrant shift such that 0 values / frequencies are at the corners
normalize: Normalize the range to [-0.5,0.5]
Returns
-------
Coordinate matrices for x and y value for a 2D array. The out can be quadrant shifted
and / or normalized. This is basically a wrapper around np.meshgrid.
Inspired by filtergrid.m found at https://www.peterkovesi.com/matlabfns/
"""
if type(size) is int:
rows = cols = size
else:
rows = size[0]
cols = size[1]
range_1 = np.linspace(-(cols // 2), np.floor((cols - 1) / 2), cols)
range_2 = np.linspace(-(rows // 2), np.floor((rows - 1) / 2), rows)
if normalize:
range_1 = range_1 / cols
range_2 = range_2 / rows
x, y = np.meshgrid(range_1, range_2)
# Quadrant shift so that filters are constructed with 0 frequency at the corners
if quadrant_shift:
x = np.fft.ifftshift(x)
y = np.fft.ifftshift(y)
return x.T, y.T
@lru_cache(maxsize=8)
def radius_filtergrid(
size: Shape, quadrant_shift: bool = True, normalize: bool = True
) -> np.ndarray:
"""
Parameters
----------
size : Size of the filter
quadrant_shift : Quadrant shift such that 0 values / frequencies are at the corners
normalize: Normalize radius to [0 ,0.5]
Returns
-------
A matrix containing the radius from the center. This radius is in range [0, 0.5] if normalized.
The result can be quadrant shifted such that the 0 values are in the corners.
"""
x, y = filtergrid(size, quadrant_shift, normalize)
radius = np.sqrt(x ** 2 + y ** 2)
return radius
@lru_cache(maxsize=8)
def theta_filtergrid(size: Shape, quadrant_shift: bool = True) -> np.ndarray:
"""
Parameters
----------
size : Size of the filter
quadrant_shift : Quadrant shift such that 0 values / frequencies are at the corners
Returns
-------
A matrix containing the polar angle in radian at the respective position for a circle centered in the matrix.
The result can be returned quadrant shifted. The angle is 0 for all points on the positive x-axis.
The angles are pi/2 (90°) and -pi/2 (-90°) on the positive and negative y-axis respectively. On the negative
x-axis the angle is pi (180°). If you need the angle to be in range [0, 2pi] instead of [-pi, pi], you can simply
add 2pi whenever the angle is negative.
"""
y, x = filtergrid(size, quadrant_shift)
# Matrix values contain polar angle.
# 0 angle starts on the horizontal line and runs counter clock-wise
theta = np.arctan2(-y, x)
return theta
|
the-stack_106_17259
|
#!/usr/bin/env python
import rospy
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient
from moveit_msgs.msg import MoveGroupAction, MoveGroupGoal, Constraints, JointConstraint, MoveItErrorCodes
'''
Created on 10.10.2016
@author: Alberto Romay
'''
class JointStateToMoveit(EventState):
'''
State to send a joint state configuration to MoveIt to plan and move.
># config_name string Name of the joint configuration of interest.
># move_group string Name of the move group to be used for planning.
># action_topic string Topic on which MoveIt is listening for action calls.
># robot_name string Optional name of the robot to be used.
If left empty, the first one found will be used
(only required if multiple robots are specified in the same file).
># joint_names string[] Names of the target joints.
Same order as their corresponding names in joint_values.
># joint_values float[] Target configuration of the joints.
Same order as their corresponding names in joint_names.
<= reached Target joint configuration has been reached.
<= planning_failed Failed to find a plan to the given joint configuration.
<= control_failed Failed to move the arm along the planned trajectory.
'''
def __init__(self):
'''
Constructor
'''
super(JointStateToMoveit, self).__init__(input_keys=['config_name', 'move_group', 'robot_name', 'action_topic', 'joint_values', 'joint_names'],
outcomes=['reached', 'planning_failed', 'control_failed'],
output_keys=['config_name', 'move_group', 'robot_name', 'action_topic', 'joint_values', 'joint_names'])
def execute(self, userdata):
'''
Execute this state
'''
if self._planning_failed:
return 'planning_failed'
if self._control_failed:
return 'control_failed'
if self._success:
return 'reached'
if self._client.has_result(self._action_topic):
result = self._client.get_result(self._action_topic)
if result.error_code.val == MoveItErrorCodes.CONTROL_FAILED:
Logger.logwarn('Control failed for move action of group: %s (error code: %s)' % (self._move_group, str(result.error_code)))
self._control_failed = True
return 'control_failed'
elif result.error_code.val != MoveItErrorCodes.SUCCESS:
Logger.logwarn('Move action failed with result error code: %s' % str(result.error_code))
self._planning_failed = True
return 'planning_failed'
else:
self._success = True
return 'reached'
def on_enter(self, userdata):
self._planning_failed = False
self._control_failed = False
self._success = False
self._config_name = userdata.config_name # Currently not used
self._robot_name = userdata.robot_name # Currently not used
self._move_group = userdata.move_group
self._action_topic = userdata.action_topic
self._joint_config = userdata.joint_values
self._joint_names = userdata.joint_names
self._client = ProxyActionClient({self._action_topic: MoveGroupAction})
# Action Initialization
action_goal = MoveGroupGoal()
action_goal.request.group_name = self._move_group
action_goal.request.allowed_planning_time = 1.0
goal_constraints = Constraints()
for i in range(len(self._joint_names)):
goal_constraints.joint_constraints.append(JointConstraint(
joint_name=self._joint_names[i],
position=self._joint_config[i],
weight=1.0))
action_goal.request.goal_constraints.append(goal_constraints)
try:
self._client.send_goal(self._action_topic, action_goal)
userdata.action_topic = self._action_topic # Save action topic to output key
except Exception as e:
Logger.logwarn('Failed to send action goal for group: %s\n%s' % (self._move_group, str(e)))
self._planning_failed = True
def on_stop(self):
try:
if self._client.is_available(self._action_topic) \
and not self._client.has_result(self._action_topic):
self._client.cancel(self._action_topic)
except:
# client already closed
pass
def on_pause(self):
self._client.cancel(self._action_topic)
def on_resume(self, userdata):
self.on_enter(userdata)
|
the-stack_106_17261
|
import torch
import itertools
from .base_model import BaseModel
from .cycle_gan_model import CycleGANModel
import os
from collections import OrderedDict
from util.util import mkdirs
class CycleDualViewGANModel(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
return CycleGANModel.modify_commandline_options(parser, is_train)
def __init__(self, opt, rank):
"""
Initialize the CycleDualViewGANModel class.
Parameters:
opt (Option class)-- experiment flags
"""
BaseModel.__init__(self, opt, rank)
self.rank = rank
self.view_net0 = CycleGANModel(opt, rank)
self.view_net0.save_dir = os.path.join(self.view_net0.save_dir, 'view0')
if rank == 0:
mkdirs(self.view_net0.save_dir)
self.view_net1 = CycleGANModel(opt,rank)
self.view_net1.save_dir = os.path.join(self.view_net1.save_dir, 'view1')
if rank == 0:
mkdirs(self.view_net1.save_dir)
if self.isTrain:
# Specify the training losses that will be ploted and printed out in the Console
# when the training/test scripts will call <BaseModel.get_current_losses>
self.optimizer_G = torch.optim.Adam(itertools.chain(self.view_net0.netG_A.parameters(), self.view_net0.netG_B.parameters(), self.view_net1.netG_A.parameters(), self.view_net1.netG_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(itertools.chain(self.view_net0.netD_A.parameters(), self.view_net0.netD_B.parameters(),self.view_net1.netD_A.parameters(), self.view_net1.netD_B.parameters()), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
self.view_net0.set_input(input['view0'])
self.view_net1.set_input(input['view1'])
def forward(self):
self.view_net0.forward()
self.view_net1.forward()
def optimize_parameters(self):
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.set_requires_grad([self.view_net0.netD_A, self.view_net0.netD_B, self.view_net1.netD_A, self.view_net1.netD_B], False) # Ds require no gradients when optimizing Gs
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.view_net0.set_loss_G()
self.view_net1.set_loss_G()
self.loss_G = self.view_net0.loss_G + self.view_net1.loss_G
self.loss_G.backward()
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.set_requires_grad([self.view_net0.netD_A, self.view_net0.netD_B, self.view_net1.netD_A, self.view_net1.netD_B], True)
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.view_net0.backward_D_A() # calculate gradients for D_A
self.view_net0.backward_D_B() # calculate graidents for D_B
self.view_net1.backward_D_A() # calculate gradients for D_A
self.view_net1.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
def save_networks(self, epoch):
self.view_net0.save_networks(epoch)
self.view_net1.save_networks(epoch)
def get_current_losses(self):
errors_ret = OrderedDict()
errors_ret["loss_G"] = self.loss_G
losses = self.view_net0.get_current_losses()
for label, image in losses.items():
errors_ret['view0_' + label] = losses[label]
losses = self.view_net1.get_current_losses()
for label, image in losses.items():
errors_ret['view1_' + label] = losses[label]
return errors_ret
def get_current_visuals(self):
"""Return visualization images. train.py will display these images with visdom, and save the images to a HTML"""
visual_ret = OrderedDict()
visuals = self.view_net0.get_current_visuals()
for label,image in visuals.items():
visual_ret['view0_'+label] = visuals[label]
visuals = self.view_net1.get_current_visuals()
for label,image in visuals.items():
visual_ret['view1_'+label] = visuals[label]
return visual_ret
def load_networks(self, epoch):
self.view_net0.load_networks(epoch)
self.view_net1.load_networks(epoch)
|
the-stack_106_17262
|
import json
import geojson
from flask import current_app
from server.models.dtos.project_dto import DraftProjectDTO, ProjectDTO, ProjectCommentsDTO
from server.models.postgis.project import Project, Task, ProjectStatus
from server.models.postgis.statuses import TaskCreationMode
from server.models.postgis.task import TaskHistory, TaskStatus, TaskAction
from server.models.postgis.utils import NotFound, InvalidData, InvalidGeoJson
from server.services.grid.grid_service import GridService
from server.services.license_service import LicenseService
from server.services.users.user_service import UserService
class ProjectAdminServiceError(Exception):
""" Custom Exception to notify callers an error occurred when validating a Project """
def __init__(self, message):
if current_app:
current_app.logger.error(message)
class ProjectStoreError(Exception):
""" Custom Exception to notify callers an error occurred with database CRUD operations """
def __init__(self, message):
if current_app:
current_app.logger.error(message)
class ProjectAdminService:
@staticmethod
def create_draft_project(draft_project_dto: DraftProjectDTO) -> int:
"""
Validates and then persists draft projects in the DB
:param draft_project_dto: Draft Project DTO with data from API
:raises InvalidGeoJson
:returns ID of new draft project
"""
# If we're cloning we'll copy all the project details from the clone, otherwise create brand new project
if draft_project_dto.cloneFromProjectId:
draft_project = Project.clone(draft_project_dto.cloneFromProjectId, draft_project_dto.user_id)
else:
draft_project = Project()
draft_project.create_draft_project(draft_project_dto)
draft_project.set_project_aoi(draft_project_dto)
# if arbitrary_tasks requested, create tasks from aoi otherwise use tasks in DTO
if draft_project_dto.has_arbitrary_tasks:
tasks = GridService.tasks_from_aoi_features(draft_project_dto.area_of_interest)
draft_project.task_creation_mode = TaskCreationMode.ARBITRARY.value
else:
tasks = draft_project_dto.tasks
ProjectAdminService._attach_tasks_to_project(draft_project, tasks)
if draft_project_dto.cloneFromProjectId:
draft_project.save() # Update the clone
else:
draft_project.create() # Create the new project
draft_project.set_default_changeset_comment()
return draft_project.id
@staticmethod
def _set_default_changeset_comment(draft_project: Project):
""" Sets the default changesset comment when project created """
default_comment = current_app.config['DEFAULT_CHANGESET_COMMENT']
draft_project.changeset_comment = f'{default_comment}-{draft_project.id}'
draft_project.save()
@staticmethod
def _get_project_by_id(project_id: int) -> Project:
project = Project.get(project_id)
if project is None:
raise NotFound()
return project
@staticmethod
def get_project_dto_for_admin(project_id: int) -> ProjectDTO:
""" Get the project as DTO for project managers """
project = ProjectAdminService._get_project_by_id(project_id)
return project.as_dto_for_admin(project_id)
@staticmethod
def update_project(project_dto: ProjectDTO):
project = ProjectAdminService._get_project_by_id(project_dto.project_id)
if project_dto.project_status == ProjectStatus.PUBLISHED.name:
ProjectAdminService._validate_default_locale(project_dto.default_locale, project_dto.project_info_locales)
if project_dto.license_id:
ProjectAdminService._validate_imagery_licence(project_dto.license_id)
if project_dto.private:
ProjectAdminService._validate_allowed_users(project_dto)
project.update(project_dto)
return project
@staticmethod
def _validate_imagery_licence(license_id: int):
""" Ensures that the suppliced license Id actually exists """
try:
LicenseService.get_license_as_dto(license_id)
except NotFound:
raise ProjectAdminServiceError(f'LicenseId {license_id} not found')
@staticmethod
def _validate_allowed_users(project_dto: ProjectDTO):
""" Ensures that all usernames are known and returns their user ids """
if len(project_dto.allowed_usernames) == 0:
raise ProjectAdminServiceError('Must have at least one allowed user on a private project')
try:
allowed_users = []
for username in project_dto.allowed_usernames:
user = UserService.get_user_by_username(username)
allowed_users.append(user)
project_dto.allowed_users = allowed_users # Dynamically attach the user object to the DTO for more efficient persistence
except NotFound:
raise ProjectAdminServiceError(f'allowedUsers contains an unknown username {user}')
@staticmethod
def delete_project(project_id: int):
""" Deletes project if it has no completed tasks """
project = ProjectAdminService._get_project_by_id(project_id)
if project.can_be_deleted():
project.delete()
else:
raise ProjectAdminServiceError('Project has mapped tasks, cannot be deleted')
@staticmethod
def reset_all_tasks(project_id: int, user_id: int):
""" Resets all tasks on project, preserving history"""
tasks_to_reset = Task.query.filter(Task.project_id == project_id).all()
for task in tasks_to_reset:
task.set_task_history(TaskAction.COMMENT, user_id, "Task reset", TaskStatus.READY)
task.reset_task(user_id)
# Reset project counters
project = ProjectAdminService._get_project_by_id(project_id)
project.tasks_mapped = 0
project.tasks_validated = 0
project.tasks_bad_imagery = 0
project.save()
@staticmethod
def get_all_comments(project_id: int) -> ProjectCommentsDTO:
""" Gets all comments mappers, validators have added to tasks associated with project """
comments = TaskHistory.get_all_comments(project_id)
if len(comments.comments) == 0:
raise NotFound('No comments found on project')
return comments
@staticmethod
def _attach_tasks_to_project(draft_project: Project, tasks_geojson):
"""
Validates then iterates over the array of tasks and attach them to the draft project
:param draft_project: Draft project in scope
:param tasks_geojson: GeoJSON feature collection of mapping tasks
:raises InvalidGeoJson, InvalidData
"""
tasks = geojson.loads(json.dumps(tasks_geojson))
if type(tasks) is not geojson.FeatureCollection:
raise InvalidGeoJson('Tasks: Invalid GeoJson must be FeatureCollection')
is_valid_geojson = geojson.is_valid(tasks)
if is_valid_geojson['valid'] == 'no':
raise InvalidGeoJson(f"Tasks: Invalid FeatureCollection - {is_valid_geojson['message']}")
task_count = 1
for feature in tasks['features']:
try:
task = Task.from_geojson_feature(task_count, feature)
except (InvalidData, InvalidGeoJson) as e:
raise e
draft_project.tasks.append(task)
task_count += 1
task_count -= 1 # Remove last increment before falling out loop
draft_project.total_tasks = task_count
@staticmethod
def _validate_default_locale(default_locale, project_info_locales):
"""
Validates that all fields for the default project info locale have been completed
:param default_locale: Admin supplied default locale
:param project_info_locales: All locales supplied by admin
:raises ProjectAdminServiceError
:return: True if valid
"""
default_info = None
for info in project_info_locales:
if info.locale.lower() == default_locale.lower():
default_info = info
break
if default_info is None:
raise ProjectAdminServiceError('Project Info for Default Locale not provided')
for attr, value in default_info.items():
if attr == 'per_task_instructions':
continue # Not mandatory field
if not value:
raise (ProjectAdminServiceError(f'{attr} not provided for Default Locale'))
return True # Indicates valid default locale for unit testing
@staticmethod
def get_projects_for_admin(admin_id: int, preferred_locale: str):
""" Get all projects for provided admin """
return Project.get_projects_for_admin(admin_id, preferred_locale)
|
the-stack_106_17263
|
#! /usr/bin/env python
"""Genetic Programming in Python, with a scikit-learn inspired API"""
from setuptools import setup, find_packages
import gplearn
DESCRIPTION = __doc__
VERSION = gplearn.__version__
setup(name='gplearn',
version=VERSION,
description=DESCRIPTION,
long_description=open("README.rst").read(),
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'],
author='Trevor Stephens',
author_email='[email protected]',
url='https://github.com/trevorstephens/gplearn',
license='new BSD',
packages=find_packages(exclude=['*.tests',
'*.tests.*']),
zip_safe=False,
package_data={'': ['LICENSE']},
install_requires=['scikit-learn>=0.22.1',
'joblib>=0.13.0'])
|
the-stack_106_17266
|
from unittest.mock import MagicMock, patch
from colorama import Fore
from doddle.boards import (
EmojiScoreboardPrinter,
HtmlScoreboardPrinter,
Keyboard,
KeyboardPrinter,
Scoreboard,
ScoreboardPrinter,
ScoreboardRow,
)
from doddle.words import Word
class TestScoreboardRow:
def test_row_repr(self) -> None:
# Arrange
soln = Word("SMOKE")
guess = Word("GUESS")
sut = ScoreboardRow(3, soln, guess, "00110", 123)
expected = "n=3, soln=SMOKE, guess=GUESS, score=00110, num_left=123"
# Act
actual = repr(sut)
# Assert
assert actual == expected
def test_row_to_dict(self) -> None:
# Arrange
soln = Word("SMOKE")
guess = Word("GUESS")
score = "00110"
num_left = 123
sut = ScoreboardRow(3, soln, guess, score, num_left)
expected = {
"n": 3,
"Soln": str(soln),
"Guess": str(guess),
"Score": score,
"Poss": num_left,
}
# Act
actual = sut.to_dict(False)
# Assert
assert actual == expected
class TestScoreboard:
def test_emoji_repr_single(self) -> None:
# Arrange
sut = Scoreboard()
sut.add_row(1, Word("ULTRA"), Word("RAISE"), "01000", 117)
sut.add_row(2, Word("ULTRA"), Word("URBAN"), "20010", 5)
sut.add_row(3, Word("ULTRA"), Word("ULTRA"), "22222", 1)
emojis = """
Doddle 3/6
⬜🟨⬜⬜⬜
🟩⬜⬜🟨⬜
🟩🟩🟩🟩🟩
"""
expected = emojis.replace(" ", "")[1:-1]
# Act
actual = sut.emoji()
# Assert
assert actual == expected
def test_repr(self) -> None:
# Arrange
sut = Scoreboard()
sut.add_row(1, Word("ULTRA"), Word("RAISE"), "01000", 117)
sut.add_row(2, Word("ULTRA"), Word("URBAN"), "20010", 5)
sut.add_row(3, Word("ULTRA"), Word("ULTRA"), "22222", 1)
expected = "Soln=ULTRA (3 guesses)"
# Act
actual = repr(sut)
# Assert
assert actual == expected
def test_emoji_repr_dordle(self) -> None:
# Arrange
sut = Scoreboard()
sut.add_row(1, Word("ULTRA"), Word("RAISE"), "01000", 117)
sut.add_row(1, Word("BLAST"), Word("URBAN"), "20010", 5)
sut.add_row(2, Word("ULTRA"), Word("BLAST"), "02101", 1)
sut.add_row(2, Word("BLAST"), Word("BLAST"), "22222", 1)
sut.add_row(3, Word("ULTRA"), Word("ULTRA"), "22222", 1)
keypad = "\ufe0f\u20e3"
emojis = f"""
Doddle 3/7
3{keypad}2{keypad}
⬜🟨⬜⬜⬜ 🟩⬜⬜🟨⬜
⬜🟩🟨⬜🟨 🟩🟩🟩🟩🟩
🟩🟩🟩🟩🟩 ⬛⬛⬛⬛⬛
"""
expected = emojis.replace(" ", "")[1:-1]
# Act
actual = sut.emoji()
# Assert
assert actual == expected
def test_html_repr(self) -> None:
# Arrange
sut = Scoreboard()
sut.add_row(1, Word("ULTRA"), Word("RAISE"), "01000", 117)
sut.add_row(1, Word("BLAST"), Word("URBAN"), "20010", 5)
sut.add_row(2, Word("ULTRA"), Word("BLAST"), "02101", 1)
sut.add_row(2, Word("BLAST"), Word("BLAST"), "22222", 1)
sut.add_row(3, Word("ULTRA"), Word("ULTRA"), "22222", 1)
expected = """
<table>
<thead>
<tr>
<th></th>
<th>Soln</th>
<th>Guess</th>
<th>Score</th>
<th>Poss</th>
</tr>
</thead>
<tbody>
<tr>
<th>1</th>
<td><tt>ULTRA</tt></td>
<td><tt>RAISE</tt></td>
<td>⬜🟨⬜⬜⬜</td>
<td>117</td>
</tr>
<tr>
<th>1</th>
<td><tt>BLAST</tt></td>
<td><tt>URBAN</tt></td>
<td>🟩⬜⬜🟨⬜</td>
<td>5</td>
</tr>
<tr>
<td colspan="5" class="divider"><hr /></td>
</tr>
<tr>
<th>2</th>
<td><tt>ULTRA</tt></td>
<td><tt>BLAST</tt></td>
<td>⬜🟩🟨⬜🟨</td>
<td>1</td>
</tr>
<tr>
<th>2</th>
<td><tt>BLAST</tt></td>
<td><tt>BLAST</tt></td>
<td>🟩🟩🟩🟩🟩</td>
<td></td>
</tr>
<tr>
<td colspan="5" class="divider"><hr /></td>
</tr>
<tr>
<th>3</th>
<td><tt>ULTRA</tt></td>
<td><tt>ULTRA</tt></td>
<td>🟩🟩🟩🟩🟩</td>
<td></td>
</tr>
</tbody>
</table>
"""
# Act
actual = sut._repr_html_()
actual_sanitised = actual.strip().replace(" ", "")
expected_sanitised = expected.strip().replace(" ", "")
# Assert
assert actual_sanitised == expected_sanitised
class TestScoreboardPrinter:
def test_build_string(self) -> None:
# Arrange
scoreboard = Scoreboard()
scoreboard.add_row(1, Word("ULTRA"), Word("RAISE"), "01000", 117)
scoreboard.add_row(2, Word("ULTRA"), Word("URBAN"), "20010", 5)
scoreboard.add_row(3, Word("ULTRA"), Word("ULTRA"), "22222", 1)
sut = ScoreboardPrinter(size=5)
emojis = f"""
| # | Soln. | Guess | Score | Poss. |
|---|-------|-------|-------|-------|
| 1 | ULTRA | R{Fore.YELLOW}A{Fore.RESET}ISE | 0{Fore.YELLOW}1{Fore.RESET}000 | 117 |
| 2 | ULTRA | {Fore.GREEN}U{Fore.RESET}RB{Fore.YELLOW}A{Fore.RESET}N | {Fore.GREEN}2{Fore.RESET}00{Fore.YELLOW}1{Fore.RESET}0 | 5 |
| 3 | ULTRA | {Fore.GREEN}ULTRA{Fore.RESET} | {Fore.GREEN}22222{Fore.RESET} | |
"""
expected = emojis.replace(" ", "")[:-1]
# Act
scoreboard_str = sut.build_string(scoreboard)
# Assert
assert scoreboard_str == expected
def test_print_last_round_if_empty(self) -> None:
# Arrange
sut = ScoreboardPrinter(size=5)
scoreboard = Scoreboard()
# Act
expected = sut.print_last_round(scoreboard)
# Assert
assert expected is None
class TestKeyboard:
def test_with_one_update(self) -> None:
# Arrange
sut = Keyboard()
expected = {
-1: "BCDFGHIJLMOPQRTUVWXYZ",
0: "KN",
1: "AE",
2: "S",
}
# Act
sut.update("SNAKE", "20101")
# Assert
for expected_digit, letters in expected.items():
for char in letters:
actual_digit = sut.digit_by_char[char]
assert expected_digit == actual_digit
def test_with_two_updates(self) -> None:
# Arrange
sut = Keyboard()
expected = {
-1: "BCDFGHIJLOPQRUVWXZ",
0: "KMNTY",
1: "A",
2: "ES",
}
# Act
sut.update("SNAKE", "20101")
sut.update("MEATY", "02100")
# Assert
for expected_digit, letters in expected.items():
for char in letters:
actual_digit = sut.digit_by_char[char]
assert expected_digit == actual_digit
class TestKeyboardPrinter:
def test_printer_string(self) -> None:
# Arrange
keyboard = Keyboard()
sut = KeyboardPrinter()
keyboard.update("SNAKE", "20101")
expected = f"""
Q W {Fore.YELLOW}E {Fore.RESET}R T Y U I O P
{Fore.YELLOW}A {Fore.GREEN}S {Fore.RESET}D F G H J {Fore.LIGHTBLACK_EX}K {Fore.RESET}L
Z X C V B {Fore.LIGHTBLACK_EX}N {Fore.RESET}M
"""
# Act
actual = sut.build_string(keyboard)
# Assert
assert expected == actual
class TestHtmlScoreboardPrinter:
@patch.object(HtmlScoreboardPrinter, "build_string")
def test_print(self, mock_build_string: MagicMock) -> None:
# Arrange
sut = HtmlScoreboardPrinter()
mocked_printout = "Mocked printout"
mock_build_string.return_value = mocked_printout
scoreboard = Scoreboard()
# Act
sut.print(scoreboard)
# Assert
mock_build_string.assert_called_once_with(scoreboard)
class TestEmojiScoreboardPrinter:
@patch.object(EmojiScoreboardPrinter, "build_string")
def test_print(self, mock_build_string: MagicMock) -> None:
# Arrange
sut = EmojiScoreboardPrinter()
mocked_printout = "Mocked printout"
mock_build_string.return_value = mocked_printout
scoreboard = Scoreboard()
# Act
sut.print(scoreboard)
# Assert
mock_build_string.assert_called_once_with(scoreboard)
def test_build_string(self) -> None:
# Arrange
sut = EmojiScoreboardPrinter()
scoreboard = Scoreboard()
expected = ""
# Act
actual = sut.build_string(scoreboard)
# Assert
assert actual == expected
|
the-stack_106_17269
|
from __future__ import division
import os, subprocess, logging, sys, argparse, inspect, csv, time, re, shutil, datetime, platform, multiprocessing, itertools, hashlib, math, types, gzip, operator, textwrap
from natsort import natsorted
from lib.interlap import InterLap
from collections import defaultdict
import warnings
from Bio import SeqIO
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from Bio import SearchIO
from Bio import BiopythonWarning
warnings.simplefilter('ignore', BiopythonWarning)
#get the working directory, so you can move back into DB folder to find the files you need
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
LIB = os.path.join(parentdir, 'lib')
UTIL = os.path.join(parentdir, 'util')
GeneMark2GFF = os.path.join(UTIL, 'genemark_gtf2gff3.pl')
pref_colors2=["#CF3C57","#65B23A","#6170DD","#D18738","#D542B5",
"#724A63","#60AABA","#5DB07C","#6C5824","#D74B2B","#6B97D6","#893B2E",
"#B68DB7","#564E91","#ACA13C","#3C6171","#436B33","#D84088",
"#D67A77","#9D55C4","#8B336E","#DA77B9","#D850E5","#B188DF"]
#crayola 24 pack colors
#['red', 'yellow', 'blue', 'brown', 'orange', 'green',
#'violet', 'black', 'carnation pink', 'yellow orange', 'blue green', 'red violet',
#'red orange', 'yellow green', 'blue violet', 'white', 'violet red', 'dandelion', #
#'cerulean', 'apricot', 'scarlet', 'green yellow', 'indigo', 'gray']
pref_colors=['#EE204D', '#FCE883', '#1F75FE', '#B4674D', '#FF7538', '#1CAC78',
'#926EAE', '#232323', '#FFAACC', '#FFB653', '#199EBD', '#C0448F',
'#FF5349', '#C5E384', '#7366BD', '#EDEDED', '#F75394', '#FDDB6D',
'#1DACD6', '#FDD9B5', '#FC2847', '#F0E891', '#5D76CB', '#95918C']
Nogs = {'NOG': 'All organisms (5.0GB)',
'aciNOG': 'Acidobacteria (125.3MB)',
'acidNOG': 'Acidobacteriia (75.4MB)',
'acoNOG': 'Aconoidasida (217.1MB)',
'actNOG': 'Actinobacteria (765.3MB)',
'agaNOG': 'Agaricales (211.1MB)',
'agarNOG': 'Agaricomycetes (236.5MB)',
'apiNOG': 'Apicomplexa (322.7MB)',
'aproNOG': 'Proteobacteria_alpha (638.4MB)',
'aquNOG': 'Aquificae (51.5MB)',
'arNOG': 'Archaea (256.9MB)',
'arcNOG': 'Archaeoglobi (21.8MB)',
'artNOG': 'Arthropoda (725.0MB)',
'arthNOG': 'Arthrodermataceae (111.2MB)',
'ascNOG': 'Ascomycota (1.1GB)',
'aveNOG': 'Aves (186.1MB)',
'bacNOG': 'Bacilli (362.6MB)',
'bactNOG': 'Bacteria (3.3GB)',
'bacteNOG': 'Bacteroidia (199.2MB)',
'basNOG': 'Basidiomycota (356.5MB)',
'bctoNOG': 'Bacteroidetes (508.9MB)',
'biNOG': 'Bilateria (1.7GB)',
'bproNOG': 'Proteobacteria_beta (481.0MB)',
'braNOG': 'Brassicales (275.4MB)',
'carNOG': 'Carnivora (293.5MB)',
'chaNOG': 'Chaetomiaceae (180.9MB)',
'chlNOG': 'Chlorobi (51.3MB)',
'chlaNOG': 'Chlamydiae (39.1MB)',
'chloNOG': 'Chloroflexi (136.8MB)',
'chlorNOG': 'Chloroflexi (75.8MB)',
'chloroNOG': 'Chlorophyta (146.8MB)',
'chorNOG': 'Chordata (1.1GB)',
'chrNOG': 'Chromadorea (392.6MB)',
'cloNOG': 'Clostridia (505.6MB)',
'cocNOG': 'Coccidia (137.4MB)',
'creNOG': 'Crenarchaeota (110.0MB)',
'cryNOG': 'Cryptosporidiidae (105.4MB)',
'cyaNOG': 'Cyanobacteria (254.8MB)',
'cytNOG': 'Cytophagia (164.6MB)',
'debNOG': 'Debaryomycetaceae (145.5MB)',
'defNOG': 'Deferribacteres (41.6MB)',
'dehNOG': 'Dehalococcoidetes (15.0MB)',
'deiNOG': 'Deinococcusthermus (75.4MB)',
'delNOG': 'delta/epsilon (471.4MB)',
'dipNOG': 'Diptera (397.7MB)',
'dotNOG': 'Dothideomycetes (298.2MB)',
'dproNOG': 'Proteobacteria_delta (424.6MB)',
'droNOG': 'Drosophilidae (314.1MB)',
'eproNOG': 'Proteobacteria_epsilon (104.8MB)',
'eryNOG': 'Erysipelotrichi (85.8MB)',
'euNOG': 'Eukaryotes (3.1GB)',
'eurNOG': 'Euryarchaeota (264.7MB)',
'euroNOG': 'Eurotiomycetes (507.2MB)',
'eurotNOG': 'Eurotiales (358.1MB)',
'fiNOG': 'Fishes (641.2MB)',
'firmNOG': 'Firmicutes (728.8MB)',
'flaNOG': 'Flavobacteriia (222.5MB)',
'fuNOG': 'Fungi (1.2GB)',
'fusoNOG': 'Fusobacteria (74.9MB)',
'gproNOG': 'Proteobacteria_gamma (735.0MB)',
'haeNOG': 'Haemosporida (197.1MB)',
'halNOG': 'Halobacteria (106.6MB)',
'homNOG': 'Hominidae (229.9MB)',
'hymNOG': 'Hymenoptera (199.5MB)',
'hypNOG': 'Hypocreales (353.3MB)',
'inNOG': 'Insects (688.9MB)',
'kinNOG': 'Kinetoplastida (259.8MB)',
'lepNOG': 'Lepidoptera (208.0MB)',
'lilNOG': 'Liliopsida (660.0MB)',
'maNOG': 'Mammals (855.5MB)',
'magNOG': 'Magnaporthales (161.3MB)',
'meNOG': 'Animals (1.8GB)',
'metNOG': 'Methanobacteria (38.4MB)',
'methNOG': 'Methanococci (24.5MB)',
'methaNOG': 'Methanomicrobia (99.4MB)',
'necNOG': 'Nectriaceae (200.3MB)',
'negNOG': 'Negativicutes (96.5MB)',
'nemNOG': 'Nematodes (430.0MB)',
'onyNOG': 'Onygenales (282.8MB)',
'opiNOG': 'Opisthokonts (2.8GB)',
'perNOG': 'Peronosporales (154.1MB)',
'plaNOG': 'Planctomycetes (149.3MB)',
'pleNOG': 'Pleosporales (223.4MB)',
'poaNOG': 'Poales (596.3MB)',
'prNOG': 'Primates (448.8MB)',
'proNOG': 'Proteobacteria (1.5GB)',
'rhaNOG': 'Rhabditida (334.5MB)',
'roNOG': 'Rodents (381.4MB)',
'sacNOG': 'Saccharomycetaceae (202.7MB)',
'saccNOG': 'Saccharomycetes (275.9MB)',
'sorNOG': 'Sordariales (296.1MB)',
'sordNOG': 'Sordariomycetes (714.1MB)',
'sphNOG': 'Sphingobacteriia (154.0MB)',
'spiNOG': 'Spirochaetes (121.2MB)',
'spriNOG': 'Supraprimates (635.6MB)',
'strNOG': 'Streptophyta (960.6MB)',
'synNOG': 'Synergistetes (59.5MB)',
'tenNOG': 'Tenericutes (29.9MB)',
'thaNOG': 'Thaumarchaeota (15.3MB)',
'theNOG': 'Thermoplasmata (26.9MB)',
'therNOG': 'Thermotogae (66.5MB)',
'thermNOG': 'Thermococci (31.4MB)',
'treNOG': 'Tremellales (79.9MB)',
'veNOG': 'Vertebrates (1.0GB)',
'verNOG': 'Verrucomicrobia (140.9MB)',
'verrNOG': 'Verrucomicrobiae (73.0MB)',
'virNOG': 'Viridiplantae (1.0GB)'}
COGS = {'J': '(J) Translation, ribosomal structure and biogenesis',
'A': '(A) RNA processing and modification',
'K': '(K) Transcription',
'L': '(L) Replication, recombination and repair',
'B': '(B) Chromatin structure and dynamics',
'D': '(D) Cell cycle control, cell division, chromosome partitioning',
'Y': '(Y) Nuclear structure',
'V': '(V) Defense mechanisms',
'T': '(T) Signal transduction mechanisms',
'M': '(M) Cell wall/membrane/envelope biogenesis',
'N': '(N) Cell motility',
'Z': '(Z) Cytoskeleton',
'W': '(W) Extracellular structures',
'U': '(U) Intracellular trafficking, secretion, and vesicular transport',
'O': '(O) Posttranslational modification, protein turnover, chaperones',
'C': '(C) Energy production and conversion',
'G': '(G) Carbohydrate transport and metabolism',
'E': '(E) Amino acid transport and metabolism',
'F': '(F) Nucleotide transport and metabolism',
'H': '(H) Coenzyme transport and metabolism',
'I': '(I) Lipid transport and metabolism',
'P': '(P) Inorganic ion transport and metabolism',
'Q': '(Q) Secondary metabolites biosynthesis, transport and catabolism',
'R': '(R) General function prediction only',
'S': '(S) Function unknown'}
DBURL = { 'uniprot': 'ftp://ftp.ebi.ac.uk/pub/databases/uniprot/current_release/knowledgebase/complete/uniprot_sprot.fasta.gz',
'uniprot-release': 'ftp://ftp.ebi.ac.uk/pub/databases/uniprot/current_release/knowledgebase/complete/reldate.txt',
'merops': 'ftp://ftp.ebi.ac.uk/pub/databases/merops/current_release/merops_scan.lib',
'dbCAN': 'http://cys.bios.niu.edu/dbCAN2/download/Databases/dbCAN-HMMdb-V7.txt',
'dbCAN-tsv': 'http://cys.bios.niu.edu/dbCAN2/download/Databases/CAZyDB.07312018.fam-activities.txt',
'dbCAN-log': 'http://cys.bios.niu.edu/dbCAN2/download/Databases/dbCAN-old@UGA/readme.txt',
'pfam': 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam//current_release/Pfam-A.hmm.gz',
'pfam-tsv': 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam//current_release/Pfam-A.clans.tsv.gz',
'pfam-log': 'ftp://ftp.ebi.ac.uk/pub/databases/Pfam//current_release/Pfam.version.gz',
'outgroups': 'https://osf.io/r9sne/download?version=1',
'repeats': 'https://osf.io/vp87c/download?version=1',
'go-obo': 'http://purl.obolibrary.org/obo/go.obo',
'mibig': 'https://mibig.secondarymetabolites.org/MIBiG_prot_seqs_1.4.fasta',
'interpro': 'ftp://ftp.ebi.ac.uk/pub/databases/interpro/interpro.xml.gz',
'gene2product': 'https://raw.githubusercontent.com/nextgenusfs/gene2product/master/ncbi_cleaned_gene_products.txt'}
buscoTree='eukaryota (303)\n\tmetazoa (978)\n\t\tnematoda (982)\n\t\tarthropoda (1066)\n\t\t\tinsecta (1658)\n\t\t\tendopterygota (2442)\n\t\t\thymenoptera (4415)\n\t\t\tdiptera (2799)\n\t\tvertebrata (2586)\n\t\t\tactinopterygii (4584)\n\t\t\ttetrapoda (3950)\n\t\t\taves (4915)\n\t\t\tmammalia (4104)\n\t\teuarchontoglires (6192)\n\t\t\tlaurasiatheria (6253)\n\tfungi (290)\n\t\tdikarya (1312)\n\t\t\tascomycota (1315)\n\t\t\t\tpezizomycotina (3156)\n\t\t\t\t\teurotiomycetes (4046)\n\t\t\t\t\tsordariomycetes (3725)\n\t\t\t\t\tsaccharomycetes (1759)\n\t\t\t\t\t\tsaccharomycetales (1711)\n\t\t\tbasidiomycota (1335)\n\t\tmicrosporidia (518)\n\tembryophyta (1440)\n\tprotists (215)\n\t\talveolata_stramenophiles (234)\n'
busco_links = {
'fungi': ('https://osf.io/xvzmu/download?version=1', 'fungi_odb9'),
'microsporidia': ('https://osf.io/r47nx/download?version=1', 'microsporidia_odb9'),
'dikarya': ('https://osf.io/av6f8/download?version=1', 'dikarya_odb9'),
'ascomycota': ('https://osf.io/z2736/download?version=1', 'ascomycota_odb9'),
'pezizomycotina' :('https://osf.io/bj3sm/download?version=1', 'pezizomycotina_odb9'),
'eurotiomycetes' : ('https://osf.io/nvt3z/download?version=1', 'eurotiomycetes_odb9'),
'sordariomycetes' : ('https://osf.io/r24kn/download?version=1', 'sordariomyceta_odb9'),
'saccharomycetes' : ('https://osf.io/mpu2k/download?version=1', 'saccharomyceta_odb9'),
'saccharomycetales' : ('https://osf.io/dhk47/download?version=1', 'saccharomycetales_odb9'),
'basidiomycota' : ('https://osf.io/2xnsj/download?version=1', 'basidiomycota_odb9'),
'eukaryota' : ('https://osf.io/psj2k/download?version=1', 'eukaryota_odb9'),
'protists' : ('https://osf.io/a4tsk/download?version=1', 'protists_ensembl'),
'alveolata_stramenophiles' : ('https://osf.io/waqpe/download?version=1', 'alveolata_stramenophiles_ensembl'),
'metazoa' : ('https://osf.io/5bvam/download?version=1', 'metazoa_odb9'),
'nematoda' : ('https://osf.io/u87d3/download?version=1', 'nematoda_odb9'),
'arthropoda' : ('https://osf.io/w26ez/download?version=1', 'arthropoda_odb9'),
'insecta' : ('https://osf.io/8qsa5/download?version=1', 'insecta_odb9'),
'endopterygota' : ('https://osf.io/pxdqg/download?version=1', 'endopterygota_odb9'),
'hymenoptera' : ('https://osf.io/q4ce6/download?version=1', 'hymenoptera_odb9'),
'diptera' : ('https://osf.io/e2n49/download?version=1', 'diptera_odb9'),
'vertebrata' : ('https://osf.io/w6kf8/download?version=1', 'vertebrata_odb9'),
'actinopterygii' : ('https://osf.io/dj2cw/download?version=1', 'actinopterygii_odb9'),
'tetrapoda' : ('https://osf.io/bp4cf/download?version=1', 'tetrapoda_odb9'),
'aves' : ('https://osf.io/e7qym/download?version=1', 'aves_odb9'),
'mammalia' : ('https://osf.io/dvy5m/download?version=1', 'mammalia_odb9'),
'euarchontoglires' : ('https://osf.io/p3nc7/download?version=1', 'euarchontoglires_odb9'),
'laurasiatheria' : ('https://osf.io/2v9hj/download?version=1', 'laurasiatheria_odb9'),
'embryophyta' : ('https://osf.io/m67p4/download?version=1', 'embryophyta_odb9')}
class suppress_stdout_stderr(object):
'''
A context manager for doing a "deep suppression" of stdout and stderr in
Python, i.e. will suppress all print, even if the print originates in a
compiled C/Fortran sub-function.
This will not suppress raised exceptions, since exceptions are printed
to stderr just before a script exits, and after the context manager has
exited (at least, I think that is why it lets exceptions through).
'''
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull,os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0],1)
os.dup2(self.null_fds[1],2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0],1)
os.dup2(self.save_fds[1],2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
class colr:
GRN = '\033[92m'
END = '\033[0m'
WARN = '\033[93m'
class gzopen(object):
"""Generic opener that decompresses gzipped files
if needed. Encapsulates an open file or a GzipFile.
Use the same way you would use 'open()'.
"""
def __init__(self, fname):
f = open(fname)
# Read magic number (the first 2 bytes) and rewind.
magic_number = f.read(2)
f.seek(0)
# Encapsulated 'self.f' is a file or a GzipFile.
if magic_number == '\x1f\x8b':
self.f = gzip.GzipFile(fileobj=f)
else:
self.f = f
# Define '__enter__' and '__exit__' to use in
# 'with' blocks. Always close the file and the
# GzipFile if applicable.
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
try:
self.f.fileobj.close()
except AttributeError:
pass
finally:
self.f.close()
# Reproduce the interface of an open file
# by encapsulation.
def __getattr__(self, name):
return getattr(self.f, name)
def __iter__(self):
return iter(self.f)
def next(self):
return next(self.f)
def softwrap2(input):
return textwrap.fill(input, width=80)
def softwrap(string, every=80):
lines = []
for i in xrange(0, len(string), every):
lines.append(string[i:i+every])
return '\n'.join(lines)
def len_without_format(text):
try:
return len(remove_formatting(text))
except TypeError:
return len(str(text))
def remove_formatting(text):
return re.sub('\033.*?m', '', text)
def colour(text, text_colour):
bold_text = 'bold' in text_colour
text_colour = text_colour.replace('bold', '')
underline_text = 'underline' in text_colour
text_colour = text_colour.replace('underline', '')
text_colour = text_colour.replace('_', '')
text_colour = text_colour.replace(' ', '')
text_colour = text_colour.lower()
if 'red' in text_colour:
coloured_text = RED
elif 'green' in text_colour:
coloured_text = GREEN
elif 'yellow' in text_colour:
coloured_text = YELLOW
elif 'dim' in text_colour:
coloured_text = DIM
else:
coloured_text = ''
if bold_text:
coloured_text += BOLD
if underline_text:
coloured_text += UNDERLINE
if not coloured_text:
return text
coloured_text += text + END_FORMATTING
return coloured_text
END_FORMATTING = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
RED = '\033[31m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
YELLOW = '\033[93m'
DIM = '\033[2m'
def green(text):
return GREEN + text + END_FORMATTING
def bold_green(text):
return GREEN + BOLD + text + END_FORMATTING
def red(text):
return RED + text + END_FORMATTING
def magenta(text):
return MAGENTA + text + END_FORMATTING
def bold_red(text):
return RED + BOLD + text + END_FORMATTING
def bold(text):
return BOLD + text + END_FORMATTING
def bold_underline(text):
return BOLD + UNDERLINE + text + END_FORMATTING
def underline(text):
return UNDERLINE + text + END_FORMATTING
def dim(text):
return DIM + text + END_FORMATTING
def dim_underline(text):
return DIM + UNDERLINE + text + END_FORMATTING
def bold_yellow(text):
return YELLOW + BOLD + text + END_FORMATTING
def bold_yellow_underline(text):
return YELLOW + BOLD + UNDERLINE + text + END_FORMATTING
def bold_red_underline(text):
return RED + BOLD + UNDERLINE + text + END_FORMATTING
def print_table(table, alignments='', max_col_width=30, col_separation=3, indent=2,
row_colour=None, sub_colour=None, row_extra_text=None, leading_newline=False,
subsequent_indent='', return_str=False, header_format='underline',
hide_header=False, fixed_col_widths=None, left_align_header=True,
bottom_align_header=True, verbosity=1):
"""
Args:
table: a list of lists of strings (one row is one list, all rows should be the same length)
alignments: a string of L and R, indicating the alignment for each row
max_col_width: values longer than this will be wrapped
col_separation: the number of spaces between columns
indent: the number of spaces between the table and the left side of the terminal
row_colour: a dictionary of row indices and their colour names
sub_colour: a dictionary of values to colour names for which the text colour will be set
row_extra_text: a dictionary of row indices and extra text to display after the row
leading_newline: if True, the function will print a blank line above the table
subsequent_indent: this string will be added to the start of wrapped text lines
return_str: if True, this function will return a string of the table instead of printing it
header_format: the formatting (colour, underline, etc) of the header line
hide_header: if True, the header is not printed
fixed_col_widths: a list to specify exact column widths (automatic if not used)
left_align_header: if False, the header will follow the column alignments
bottom_align_header: if False, the header will align to the top, like other rows
verbosity: the table will only be logged if the logger verbosity is >= this value
"""
#this function is written by Ryan Wick in Unicycler code
#modified to not support colors
column_count = len(table[0])
table = [x[:column_count] for x in table]
table = [x + [''] * (column_count - len(x)) for x in table]
if row_colour is None:
row_colour = {}
if sub_colour is None:
sub_colour = {}
if row_extra_text is None:
row_extra_text = {}
if leading_newline:
print('')
# Ensure the alignments string is the same length as the column count
alignments += 'L' * (column_count - len(alignments))
alignments = alignments[:column_count]
if fixed_col_widths is not None:
col_widths = fixed_col_widths
else:
col_widths = [0] * column_count
for row in table:
col_widths = [min(max(col_widths[i], len_without_format(x)), max_col_width)
for i, x in enumerate(row)]
separator = ' ' * col_separation
indenter = ' ' * indent
full_table_str = ''
for i, row in enumerate(table):
row = [str(x) for x in row]
if hide_header and i == 0:
continue
if fixed_col_widths is not None:
wrapped_row = []
for col, fixed_width in zip(row, fixed_col_widths):
wrapper = textwrap.TextWrapper(subsequent_indent=subsequent_indent,
width=fixed_width)
wrapped_row.append(wrapper.wrap(col))
else:
wrapper = textwrap.TextWrapper(subsequent_indent=subsequent_indent, width=max_col_width)
wrapped_row = [wrapper.wrap(x) for x in row]
row_rows = max(len(x) for x in wrapped_row)
if i == 0 and bottom_align_header:
wrapped_row = [[''] * (row_rows - len(x)) + x for x in wrapped_row]
for j in range(row_rows):
row_line = [x[j] if j < len(x) else '' for x in wrapped_row]
aligned_row = []
for value, col_width, alignment in zip(row_line, col_widths, alignments):
if alignment == 'L' or (i == 0 and left_align_header):
aligned_row.append(value.ljust(col_width))
elif alignment == 'C':
aligned_row.append(value.center(col_width))
else:
aligned_row.append(value.rjust(col_width))
row_str = separator.join(aligned_row)
if i in row_extra_text:
row_str += row_extra_text[i]
if i == 0 and header_format:
row_str = colour(row_str, header_format)
if i in row_colour:
row_str = colour(row_str, row_colour[i])
for text, colour_name in sub_colour.items():
row_str = row_str.replace(text, colour(text, colour_name))
if j < row_rows - 1 and UNDERLINE in row_str:
row_str = re.sub('\033\[4m', '', row_str)
if return_str:
full_table_str += indenter + row_str + '\n'
else:
print(indenter + row_str)
if return_str:
return full_table_str
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
out = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = open(os.devnull, 'w'), cwd=currentdir).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', '--short', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = False
return GIT_REVISION
def Funzip(input, output, cpus):
'''
function to unzip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '--decompress', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '--decompress', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip(input, output, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-c', '-p', str(cpus), input]
else:
cmd = ['gzip', '-c', input]
try:
runSubprocess2(cmd, '.', log, output)
except NameError:
with open(output, 'w') as outfile:
subprocess.call(cmd, stdout=outfile)
def Fzip_inplace(input, cpus):
'''
function to zip as fast as it can, pigz -> bgzip -> gzip
'''
if which('pigz'):
cmd = ['pigz', '-f', '-p', str(cpus), input]
else:
cmd = ['gzip', '-f', input]
try:
runSubprocess(cmd, '.', log)
except NameError:
subprocess.call(cmd)
####RNA seq mediated modules
def concatenateReads(input, output):
'''
Since I can't seem to get the comma separated lists to work with subprocess modules, just
concatenate FASTQ files in order and use a single file, input should be a list of FASTQ files
using system cat here so that gzipped files are concatenated correctly
'''
cmd = ['cat']
cmd = cmd + input
runSubprocess2(cmd, '.', log, output)
def removeAntiSense(input, readTuple, output):
'''
function will map reads to the input transcripts, determine strandedness, and then filter
out transcripts that were assembled in antisense orientation. idea here is that the antisense
transcripts, while potentially valid, aren't going to help update the gene models and perhaps
could hurt the annotation effort?
'''
log.info("Running anti-sense filtering of Trinity transcripts")
bamthreads = int((args.cpus + 2 // 2) // 2) #use half number of threads for bam compression threads
aligner = choose_aligner()
if aligner == 'hisat2':
bowtie2bam = os.path.join(tmpdir, 'hisat2.transcripts.coordSorted.bam')
if not os.path.isfile(bowtie2bam):
log.info("Building Hisat2 index of "+"{0:,}".format(countfasta(input))+" trinity transcripts")
cmd = ['hisat2-build', input, os.path.join(tmpdir, 'hisat2.transcripts')]
runSubprocess4(cmd, '.', log)
#now launch the aligner
log.info("Aligning reads to trinity transcripts with Hisat2")
hisat2cmd = ['hisat2', '-p', str(args.cpus), '-k', '50', '--max-intronlen', str(args.max_intronlen), '-x', os.path.join(tmpdir, 'hisat2.transcripts')]
if readTuple[2]:
hisat2cmd = hisat2cmd + ['-U', readTuple[2]]
if readTuple[0] and readTuple[1]:
hisat2cmd = hisat2cmd + ['-1', readTuple[0], '-2', readTuple[1]]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(hisat2cmd), str(bamthreads), bowtie2bam]
runSubprocess4(cmd, '.', log)
elif aligner == 'bowtie2':
#using bowtie2
bowtie2bam = os.path.join(tmpdir, 'bowtie2.transcripts.coordSorted.bam')
if not os.path.isfile(bowtie2bam):
log.info("Building Bowtie2 index of "+"{0:,}".format(countfasta(input))+" trinity transcripts")
cmd = ['bowtie2-build', input, os.path.join(tmpdir, 'bowtie2.transcripts')]
runSubprocess4(cmd, '.', log)
#now launch the subprocess commands in order
log.info("Aligning reads to trinity transcripts with Bowtie2")
bowtie2cmd = ['bowtie2', '-p', str(args.cpus), '-k', '50', '--local', '--no-unal', '-x', os.path.join(tmpdir, 'bowtie2.transcripts')]
if readTuple[2]:
bowtie2cmd = bowtie2cmd + ['-U', readTuple[2]]
if readTuple[0] and readTuple[1]:
bowtie2cmd = bowtie2cmd + ['-1', readTuple[0], '-2', readTuple[1]]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(bowtie2cmd), str(bamthreads), bowtie2bam]
runSubprocess4(cmd, '.', log)
elif aligner == 'rapmap':
#using bowtie2
bowtie2bam = os.path.join(tmpdir, 'rapmap.transcripts.coordSorted.bam')
if not os.path.isfile(bowtie2bam):
log.info("Building RapMap index of "+"{0:,}".format(countfasta(input))+" trinity transcripts")
cmd = ['rapmap', 'quasiindex', '-t', input, '-i', os.path.join(tmpdir, 'rapmap_index')]
runSubprocess4(cmd, '.', log)
#now launch the subprocess commands in order
log.info("Aligning reads to trinity transcripts with RapMap")
rapmapcmd = ['rapmap', 'quasimap', '-t', str(args.cpus), '-i', os.path.join(tmpdir, 'rapmap_index'), '-1', readTuple[0], '-2', readTuple[1]]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(rapmapcmd), str(bamthreads), bowtie2bam]
runSubprocess(cmd, '.', log)
#now run Trinity examine strandeness tool
log.info("Examining strand specificity")
cmd = [os.path.join(TRINITY, 'util', 'misc', 'examine_strand_specificity.pl'), bowtie2bam, os.path.join(tmpdir, 'strand_specific')]
runSubprocess(cmd, '.', log)
#parse output dat file and get list of transcripts to remove
removeList = []
with open(os.path.join(tmpdir, 'strand_specific.dat'), 'rU') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
if args.stranded == 'RF': #then we want to keep negative ratios in cols[4]
if not cols[4].startswith('-'):
removeList.append(cols[0])
elif args.stranded == 'FR': #keep + values
if cols[4].startswith('-'):
removeList.append(cols[0])
#now parse the input fasta file removing records in list
with open(output, 'w') as outfile:
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'fasta'):
if not record.id in removeList:
outfile.write(">%s\n%s\n" % (record.description, str(record.seq)))
log.info("Removing %i antisense transcripts" % (len(removeList)))
def CheckFASTQandFix(forward, reverse):
from Bio.SeqIO.QualityIO import FastqGeneralIterator
from itertools import izip, izip_longest
#open and check first header, if okay exit, if not fix
file1 = FastqGeneralIterator(gzopen(forward))
file2 = FastqGeneralIterator(gzopen(reverse))
check = True
for read1, read2 in izip(file1, file2):
#see if index is valid
if ' ' in read1[0] and ' ' in read2[0]:
if read1[0].split(' ')[1].startswith('1') and read2[0].split(' ')[1].startswith('2'): #std illumina, exit
break
elif read1[0].endswith('/1') and read2[0].endswith('/2'): #also acceptable
break
else: #it is not okay missing paired information
check = False
break
file1.close()
file2.close()
if not check: #now need to fix these reads
log.info("PE reads do not conform to Trinity naming convention (need either /1 /2 or std illumina), fixing...")
#work on forward reads first
if forward.endswith('.gz'):
Funzip(forward, forward+'.bak', multiprocessing.cpu_count())
SafeRemove(forward)
else:
os.rename(forward, forward+'.bak')
#now add ending to reads
with open(forward+'.fix', 'w') as forwardfix:
for title, seq, qual in FastqGeneralIterator(open(forward+'.bak')):
title = title+'/1'
forwardfix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
Fzip(forward+'.fix', forward, multiprocessing.cpu_count())
SafeRemove(forward+'.bak')
SafeRemove(forward+'.fix')
#now work on reverse reads
if reverse.endswith('.gz'):
Funzip(reverse, reverse+'.bak', multiprocessing.cpu_count())
else:
os.rename(reverse, reverse+'.bak')
with open(reverse+'.fix', 'w') as reversefix:
for title, seq, qual in FastqGeneralIterator(open(reverse+'.bak')):
title = title+'/2'
reversefix.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
#zip back up to original file
Fzip(reverse+'.fix', reverse, multiprocessing.cpu_count())
SafeRemove(reverse+'.bak')
SafeRemove(reverse+'.fix')
return
def SafeRemove(input):
if os.path.isdir(input):
shutil.rmtree(input)
elif os.path.isfile(input):
os.remove(input)
else:
return
def runSubprocess(cmd, dir, logfile):
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stdout:
logfile.debug(stdout)
if stderr:
logfile.debug(stderr)
def runSubprocess2(cmd, dir, logfile, output):
#function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr)
def runSubprocess3(cmd, dir, logfile):
#function where STDOUT pipes to FNULL, capture STDERR in logfile
FNULL = open(os.devnull, 'w')
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=FNULL, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
logfile.debug(stderr)
def runSubprocess4(cmd, dir, logfile):
#function where STDOUT and STDERR pipes to FNULL
FNULL = open(os.devnull, 'w')
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, cwd=dir, stdout=FNULL, stderr=FNULL)
proc.communicate()
def runSubprocess5(cmd, dir, logfile, input, output):
#function where STDOUT to file, STDIN as input, STDERR pipes to logfile
logfile.debug(' '.join(cmd))
with open(input) as infile:
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdin=infile, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr)
def runSubprocess6(cmd, dir, logfile, logfile2):
#function where cmd captured in logfile, but both stdout and stdin piped to additional logfile
logfile.debug(' '.join(cmd))
with open(logfile2, 'w') as logout:
proc = subprocess.Popen(cmd, cwd=dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stdout:
logout.write(stdout)
if stderr:
logout.write(stderr)
def runSubprocess7(cmd, dir, logfile, output):
#function where output of cmd is STDOUT, capture STDERR in logfile
logfile.debug(' '.join(cmd))
with open(output, 'a') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] != None:
logfile.debug(stderr)
def runSubprocess8(cmd, dir, logfile, output):
#function where output of cmd is STDOUT, capture STDERR in FNULL
FNULL = open(os.devnull, 'w')
logfile.debug(' '.join(cmd))
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, cwd=dir, stdout=out, stderr=FNULL)
def evmGFFvalidate(input, evmpath, logfile):
Validator = os.path.join(evmpath, 'EvmUtils', 'gff3_gene_prediction_file_validator.pl')
cmd = ['perl', Validator, os.path.realpath(input)]
logfile.debug(' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if not stderr:
return True
else:
logfile.debug(stderr)
False
def hashfile(afile, hasher, blocksize=65536):
buf = afile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(blocksize)
return hasher.digest()
def sha256_check(file1, file2):
files = [file1, file2]
output = [(fname, hashfile(open(fname, 'rb'), hashlib.sha256())) for fname in files]
if output[0][1] == output[1][1]:
return True
else:
return False
def readBlocks(source, pattern):
buffer = []
for line in source:
if line.startswith(pattern):
if buffer: yield buffer
buffer = [ line ]
else:
buffer.append( line )
yield buffer
def readBlocks2(source, startpattern, endpattern):
buffer = []
for line in source:
if line.startswith(startpattern) or line.endswith(endpattern):
if buffer: yield buffer
buffer = [ line ]
else:
buffer.append( line )
yield buffer
def empty_line_sep(line):
return line=='\n'
def get_parent_dir(directory):
return os.path.dirname(directory)
def getSize(filename):
st = os.stat(filename)
return st.st_size
def checkinputs(filename):
if not os.path.isfile(filename):
log.error("%s is not a valid file, exiting" % filename)
sys.exit(1)
size = getSize(filename)
if size < 2: #this is 1 character...
log.error("%s appears to be empty, exiting" % filename)
sys.exit(1)
def make_tarfile(output_filename, source_dir):
import tarfile
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_dir, arcname=os.path.basename(source_dir))
def multipleReplace(text, wordDict):
for key in wordDict:
text = text.replace(key, wordDict[key])
return text
def which_path(file_name):
for path in os.environ["PATH"].split(os.pathsep):
full_path = os.path.join(path, file_name)
if os.path.exists(full_path) and os.access(full_path, os.X_OK):
return full_path
return None
def which(name):
try:
with open(os.devnull) as devnull:
diff = ['tbl2asn', 'dustmasker', 'mafft', 'signalp', 'proteinortho5.pl', 'ete3', 'phyml', 'phobius.pl']
if not any(name in x for x in diff):
subprocess.Popen([name], stdout=devnull, stderr=devnull).communicate()
else:
if name == 'signalp':
subprocess.Popen([name, '-V'], stdout=devnull, stderr=devnull).communicate()
elif name == 'dustmasker':
subprocess.Popen([name, '-version-full'], stdout=devnull, stderr=devnull).communicate()
elif name == 'tbl2asn':
subprocess.Popen([name, '--help'], stdout=devnull, stderr=devnull).communicate()
elif name == 'raxmlHPC-PTHREADS':
subprocess.Popen([name, '-version'], stdout=devnull, stderr=devnull).communicate()
elif name == 'ete3':
subprocess.Popen([name, 'version'], stdout=devnull, stderr=devnull).communicate()
elif name == 'phobius.pl':
subprocess.Popen([name, '-h'], stdout=devnull, stderr=devnull).communicate()
else:
subprocess.Popen([name, '--version'], stdout=devnull, stderr=devnull).communicate()
except OSError as e:
if e.errno == os.errno.ENOENT:
return False
return True
def vers_tblastn():
p1 = subprocess.Popen(['tblastn', '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
vers = p1.communicate()[0].split('+')[0]
vers = vers.split(' ')[-1]
return vers
def CheckDependencies(input):
missing = []
for p in input:
if which(p) == False:
missing.append(p)
if missing != []:
error = ", ".join(missing)
log.error("Missing Dependencies: %s. Please install missing dependencies and re-run script" % (error))
sys.exit(1)
def checkannotations(input):
if os.path.isfile(input):
filesize = getSize(input)
if int(filesize) < 1:
return False
else:
return True
elif os.path.islink(input):
return True
else:
return False
def line_count(fname):
with open(fname) as f:
i = -1
for i, l in enumerate(f):
pass
return i + 1
def countfasta(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if line.startswith (">"):
count += 1
return count
def getGeneBasename(fastafile):
bases = []
with open(fastafile, 'rU') as input:
for line in input:
line = line.replace('\n', '')
if line.startswith('>'):
line = line.replace('>', '')
Base = line.split('_')[0]+'_'
if not Base in bases:
bases.append(Base)
return bases
def get_version():
cmd = [os.path.join(parentdir, 'funannotate'), 'version']
version = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()[0].rstrip()
return version
def ver_tuple(z):
return tuple([int(x) for x in z.split('.') if x.isdigit()])
def ver_cmp(a, b):
return cmp(ver_tuple(a), ver_tuple(b))
def versionCheck(a, b):
if ver_cmp(a,b) == -1:
return False
else:
return True
def checkAugustusFunc(base):
'''
function to try to test Augustus installation is working, note segmentation fault still results in a pass
'''
brakerpass = 0
buscopass = 0
version = subprocess.Popen(['augustus', '--version'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0].rstrip()
version = version.split(' is ')[0]
bam2hints = which(os.path.join(base, 'bin', 'bam2hints'))
filterBam = which(os.path.join(base, 'bin', 'filterBam'))
if bam2hints and filterBam:
brakerpass = 1
model = os.path.join(parentdir, 'lib', 'EOG092C0B3U.prfl')
if not os.path.isfile(model):
log.error("Testing Augustus Error: installation seems wrong, can't find prfl model")
sys.exit(1)
profile = '--proteinprofile='+model
proteinprofile = subprocess.Popen(['augustus', '--species=anidulans', profile, os.path.join(parentdir, 'lib', 'busco_test.fa')], stderr=subprocess.STDOUT, stdout=subprocess.PIPE).communicate()[0].rstrip()
proteinprofile.strip()
if proteinprofile == '':
buscopass = 0
elif not 'augustus: ERROR' in proteinprofile:
buscopass = 1
return (version, brakerpass, buscopass)
def flatten(l):
flatList = []
for elem in l:
# if an element of a list is a list
# iterate over this list and add elements to flatList
if type(elem) == list:
for e in elem:
flatList.append(e)
else:
flatList.append(elem)
return flatList
def fmtcols(mylist, cols):
justify = []
for i in range(0,cols):
length = max(map(lambda x: len(x), mylist[i::cols]))
length += 2
ljust = map(lambda x: x.ljust(length), mylist[i::cols])
justify.append(ljust)
justify = flatten(justify)
num_lines = len(mylist) / cols
lines = (' '.join(justify[i::num_lines])
for i in range(0,num_lines))
return "\n".join(lines)
def list_columns(obj, cols=4, columnwise=True, gap=4):
"""
Print the given list in evenly-spaced columns.
Parameters
----------
obj : list
The list to be printed.
cols : int
The number of columns in which the list should be printed.
columnwise : bool, default=True
If True, the items in the list will be printed column-wise.
If False the items in the list will be printed row-wise.
gap : int
The number of spaces that should separate the longest column
item/s from the next column. This is the effective spacing
between columns based on the maximum len() of the list items.
"""
sobj = [str(item) for item in obj]
if cols > len(sobj): cols = len(sobj)
max_len = max([len(item) for item in sobj])
if columnwise: cols = int(math.ceil(float(len(sobj)) / float(cols)))
plist = [sobj[i: i+cols] for i in range(0, len(sobj), cols)]
if columnwise:
if not len(plist[-1]) == cols:
plist[-1].extend(['']*(len(sobj) - len(plist[-1])))
plist = zip(*plist)
printer = '\n'.join([
''.join([c.ljust(max_len + gap) for c in p])
for p in plist])
return printer
def roundup(x):
return x if x % 100 == 0 else x + 100 - x % 100
def maxabs(a, axis=None):
"""Return slice of a, keeping only those values that are furthest away
from 0 along axis"""
maxa = a.max(axis=axis)
mina = a.min(axis=axis)
p = abs(maxa) > abs(mina) # bool, or indices where +ve values win
n = abs(mina) > abs(maxa) # bool, or indices where -ve values win
if axis == None:
if p: return maxa
else: return mina
shape = list(a.shape)
shape.pop(axis)
out = np.zeros(shape, dtype=a.dtype)
out[p] = maxa[p]
out[n] = mina[n]
return out
def setupLogging(LOGNAME):
global log
if 'darwin' in sys.platform:
stdoutformat = logging.Formatter(colr.GRN+'%(asctime)s'+colr.END+': %(message)s', datefmt='[%b %d %I:%M %p]')
else:
stdoutformat = logging.Formatter('%(asctime)s: %(message)s', datefmt='[%I:%M %p]')
fileformat = logging.Formatter('%(asctime)s: %(message)s', datefmt='[%x %H:%M:%S]')
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
sth = logging.StreamHandler()
sth.setLevel(logging.INFO)
sth.setFormatter(stdoutformat)
log.addHandler(sth)
fhnd = logging.FileHandler(LOGNAME)
fhnd.setLevel(logging.DEBUG)
fhnd.setFormatter(fileformat)
log.addHandler(fhnd)
def countfasta(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if line.startswith (">"):
count += 1
return count
def renameGFF(input, newname, output):
with open(output, 'w') as outfile:
with open(input, 'rU') as infile:
for line in infile:
if line.startswith('>'): #remove any fasta sequences
continue
if line.startswith('#'):
outfile.write(line)
else:
cols = line.split('\t')
#make sure it has correct columns to be GFF
if len(cols) == 9:
outfile.write('%s\t%s\t%s' % (cols[0], newname, '\t'.join(cols[2:])))
def countGFFgenes(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if "\tgene\t" in line:
count += 1
return count
def countEVMpredictions(input):
Counts = {'total': 0, 'augustus': 0, 'genemark': 0, 'pasa': 0, 'hiq': 0}
with open(input, 'rU') as f:
for line in f:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.strip()
contig, source, feature, start, end, blank, strand, score, info = line.split('\t')
if feature == 'gene':
Counts['total'] += 1
if source == 'Augustus':
Counts['augustus'] += 1
elif source == 'GeneMark':
Counts['genemark'] += 1
elif source == 'pasa_pred':
Counts['pasa'] += 1
elif source == 'HiQ':
Counts['hiq'] += 1
elif source not in Counts:
Counts[source] = 1
else:
Counts[source] += 1
return Counts
def countGMAPtranscripts(input):
count = 0
with open(input, 'rU') as f:
for line in f:
if line.startswith('###'):
count += 1
return count
def runMultiProgress(function, inputList, cpus):
#setup pool
p = multiprocessing.Pool(cpus)
#setup results and split over cpus
tasks = len(inputList)
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
#refresh pbar every 5 seconds
while True:
incomplete_count = sum(1 for x in results if not x.ready())
if incomplete_count == 0:
break
sys.stdout.write(" Progress: %.2f%% \r" % (float(tasks - incomplete_count) / tasks * 100))
sys.stdout.flush()
time.sleep(1)
p.close()
p.join()
def runMultiNoProgress(function, inputList, cpus):
#setup pool
p = multiprocessing.Pool(cpus)
#setup results and split over cpus
tasks = len(inputList)
results = []
for i in inputList:
results.append(p.apply_async(function, [i]))
p.close()
p.join()
def cleanProteins(inputList, output):
#expecting a list of protein fasta files for combining/cleaning headers
#make sure you aren't duplicated sequences names
#dropping proteins less than 50 amino acids
seen = set()
with open(output, 'w') as out:
for x in inputList:
with open(x, 'rU') as input:
for rec in SeqIO.parse(input, 'fasta'):
if len(rec.seq) < 50:
continue
#explicitly check for swissprot and jgi
if rec.id.startswith('sp|') or rec.id.startswith('jgi|'):
ID = rec.id.split('|')[-1]
else:
ID = rec.id
#now clean up the shit
badshit = [':', ';', '/', '\\', '.', ',', '%']
for i in badshit:
if i in ID:
ID = ID.replace(i, '_')
if not ID in seen:
seen.add(ID)
else:
#means that ID has already been used, so add a number to it, auto increment
counter = 1
while ID in seen:
oldnum = counter-1
ID = ID.replace('_'+str(oldnum), '') + '_'+str(counter)
counter += 1
seen.add(ID)
out.write('>%s\n%s\n' % (ID, rec.seq))
def gb2output(input, output1, output2, output3):
with open(output1, 'w') as proteins:
with open(output2, 'w') as transcripts:
with open(output3, 'w') as scaffolds:
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
scaffolds.write(">%s\n%s\n" % (record.id, record.seq))
for f in record.features:
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(f.qualifiers['translation'][0].rstrip('*'))))
if f.type == "mRNA":
feature_seq = f.extract(record.seq)
transcripts.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(feature_seq)))
def sortGFF(input, output, order):
cmd = ['bedtools', 'sort', '-header', '-faidx', order, '-i', input]
with open(output, 'w') as out:
proc = subprocess.Popen(cmd, stdout=out, stderr=subprocess.PIPE)
stderr = proc.communicate()
if stderr:
if stderr[0] == None:
if stderr[1] != '':
log.error("Sort GFF failed, unreferenced scaffold present in gene predictions, check logfile")
sys.exit(1)
def checkGenBank(input):
count = 0
with open(input, 'rU') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
count += 1
if count == 0:
return False
else:
return True
def countGenBank(input):
cds = 0
trna = 0
dnas = 0
with open(input, 'rU') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
dnas += 1
for f in record.features:
if f.type == 'CDS':
cds += 1
elif f.type == 'tRNA':
trna += 1
return dnas, cds, trna
def checkFastaHeaders(input, limit):
length = 0
names = []
with open(input, 'rU') as fasta:
for line in fasta:
if line.startswith('>'):
line = line.replace('\n', '')
ID = line.replace('>', '').strip()
names.append(ID)
headlen = len(line) - 1 #subtract one character for fasta carrot
if headlen > length:
length = headlen
if length > int(limit):
return (False, names)
else:
return (True, names)
def BamHeaderTest(genome, mapping):
import pybam
#get list of fasta headers from genome
genome_headers = []
with open(genome, 'rU') as input:
for rec in SeqIO.parse(input, 'fasta'):
if rec.id not in genome_headers:
genome_headers.append(rec.id)
#get list of fasta headers from BAM
bam_headers = []
with open(mapping, 'rb') as bamin:
bam = pybam.read(bamin)
bam_headers = bam.file_chromosomes
#make sure the bam headers is a list
if not type(bam_headers) is list:
log.error("PyBam parsing failed, printing results, funannotate is expecting a list, not this....")
print(bam_headers)
sys.exit(1)
#now compare lists, basically if BAM headers not in genome headers, then output bad names to logfile and return FALSE
genome_headers = set(genome_headers)
diffs = [x for x in bam_headers if x not in genome_headers]
if len(diffs) > 0:
log.debug("ERROR: These BAM headers not found in genome FASTA headers\n%s" % ','.join(diffs))
return False
else:
return True
def mapCount(input, location_dict, output):
import pybam
#parse with pybam and count coverage (pileup)
Counts = {}
for aln in pybam.read(os.path.realpath(input)):
if not aln.sam_rname in Counts:
Counts[aln.sam_rname] = 1
else:
Counts[aln.sam_rname] += 1
with open(output, 'w') as outfile:
outfile.write("#mRNA-ID\tgene-ID\tLocation\tTPM\n")
for k,v in natsorted(location_dict.items()):
if k in Counts:
tpm = Counts.get(k)
else:
tpm = 0
geneID = v[0]
location = v[1]
outfile.write('{:}\t{:}\t{:}\t{:.2f}\n'.format(k, geneID, location, float(tpm)))
def tokenizeString(aString, separators):
#separators is an array of strings that are being used to split the the string.
#sort separators in order of descending length
separators.sort(key=len)
listToReturn = []
i = 0
while i < len(aString):
theSeparator = ""
for current in separators:
if current == aString[i:i+len(current)]:
theSeparator = current
if theSeparator != "":
listToReturn += [theSeparator]
i = i + len(theSeparator)
else:
if listToReturn == []:
listToReturn = [""]
if(listToReturn[-1] in separators):
listToReturn += [""]
listToReturn[-1] += aString[i]
i += 1
return listToReturn
def bam2gff3(input, output):
import pybam
with open(output, 'w') as gffout:
gffout.write('##gff-version 3\n')
for aln in pybam.read(os.path.realpath(input)):
if aln.sam_flag == 0:
strand = '+'
elif aln.sam_flag == 16:
strand = '-'
else:
continue
cs = None
nm = None
tags = aln.sam_tags_string.split('\t')
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
#print(aln.sam_qname, nm, cs)
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(aln.sam_pos1)]
position = int(aln.sam_pos1)
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':','*','+', '-', '~'])
for i,x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if aln.sam_flag == 0:
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
elif aln.sam_flag == 16:
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
#add last Position
exons.append(position)
query.append(aln.sam_l_seq)
#convert exon list into list of exon tuples
exons = zip(exons[0::2], exons[1::2])
queries = zip(query[0::2], query[1::2])
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
for i,exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
if strand == '+':
qstart = queries[i][0]
qend = queries[i][1]
else:
qstart = aln.sam_l_seq - queries[i][1] + 1
qend = aln.sam_l_seq - queries[i][0] + 1
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID={:};Target={:} {:} {:}\n'.format(aln.sam_rname,'genome','cDNA_match',start,end,pident,strand,'.',aln.sam_qname,aln.sam_qname,qstart,qend))
def bam2ExonsHints(input, gff3, hints):
import pybam
count = 0
with open(gff3, 'w') as gffout:
gffout.write('##gff-version 3\n')
with open(hints, 'w') as hintsout:
for num,aln in enumerate(pybam.read(os.path.realpath(input))):
if aln.sam_flag == 0:
strand = '+'
elif aln.sam_flag == 16:
strand = '-'
else:
continue
cs = None
nm = None
tags = aln.sam_tags_string.split('\t')
for x in tags:
if x.startswith('cs:'):
cs = x.replace('cs:Z:', '')
if x.startswith('NM:'):
nm = int(x.split(':')[-1])
if nm is None or cs is None:
continue
matches = 0
ProperSplice = True
splitter = []
exons = [int(aln.sam_pos1)]
position = int(aln.sam_pos1)
query = [1]
querypos = 0
num_exons = 1
gaps = 0
splitter = tokenizeString(cs, [':','*','+', '-', '~'])
for i,x in enumerate(splitter):
if x == ':':
matches += int(splitter[i+1])
position += int(splitter[i+1])
querypos += int(splitter[i+1])
elif x == '-':
gaps += 1
elif x == '+':
gaps += 1
querypos += len(splitter[i+1])
elif x == '~':
if aln.sam_flag == 0:
if splitter[i+1].startswith('gt') and splitter[i+1].endswith('ag'):
ProperSplice = True
elif splitter[i+1].startswith('at') and splitter[i+1].endswith('ac'):
ProperSplice = True
else:
ProperSplice = False
break
elif aln.sam_flag == 16:
if splitter[i+1].startswith('ct') and splitter[i+1].endswith('ac'):
ProperSplice = True
elif splitter[i+1].startswith('gt') and splitter[i+1].endswith('at'):
ProperSplice = True
else:
ProperSplice = False
break
num_exons += 1
exons.append(position)
query.append(querypos)
query.append(querypos+1)
intronLen = int(splitter[i+1][2:-2])
position += intronLen
exons.append(position)
#add last Position
exons.append(position)
query.append(aln.sam_l_seq)
#convert exon list into list of exon tuples
exons = zip(exons[0::2], exons[1::2])
queries = zip(query[0::2], query[1::2])
introns = []
if len(exons) > 1:
for x,y in enumerate(exons):
try:
introns.append((y[1], exons[x+1][0]-1))
except IndexError:
pass
if ProperSplice:
mismatches = nm - gaps
pident = 100 * (matches / (matches + mismatches))
if pident < 80:
continue
feature = 'EST_match'
if pident > 95:
feature = 'cDNA_match'
count += 1
for i,exon in enumerate(exons):
start = exon[0]
end = exon[1]-1
qstart = queries[i][0]
qend = queries[i][1]
if i == 0 or i == len(exons)-1:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(aln.sam_rname, 'genome', feature, start, end, pident, strand, '.', num+1, aln.sam_qname, qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(aln.sam_rname, 'b2h', 'ep', start, end, 0, strand, '.', num+1, aln.sam_qname))
else:
gffout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:.2f}\t{:}\t{:}\tID=minimap2_{:};Target={:} {:} {:} {:}\n'.format(aln.sam_rname, 'genome', feature, start, end, pident, strand, '.', num+1, aln.sam_qname, qstart, qend, strand))
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(aln.sam_rname, 'b2h', 'exon', start, end, 0, strand, '.', num+1, aln.sam_qname))
if len(introns) > 0:
for z in introns:
hintsout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tgrp=minimap2_{:};pri=4;src=E\n'.format(aln.sam_rname, 'b2h', 'intron', z[0], z[1], 1, strand, '.', num+1, aln.sam_qname))
return count
def combineTranscripts(minimap, gmap, output):
'''
function to combine minimap GFF3 and gmap GFF3 files
need to rename GMAP as you loop through and GFF3 from gmap is kind of messed up.
'''
with open(output, 'w') as out:
if minimap:
with open(minimap, 'rU') as mini:
for line in mini:
out.write(line)
else:
out.write('##gff-version 3\n')
with open(gmap, 'rU') as gmap_in:
for i,aln in enumerate(readBlocks(gmap_in, '###')):
for x in aln:
if not x.startswith('#'):
contig, source, feature, start, end, score, strand, phase, attributes = x.split('\t')
info = attributes.split(';')
for y in info:
if y.startswith('Target='):
Target = y
out.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID=gmap_{:};{:}\n'.format(contig,source,feature,start,end,score,strand,phase,i+1,Target))
def translate(cDNA, strand, phase):
'''
translate cDNA into protein sequence
trying to see if I can speed this up over Biopython
'''
def _RevComp(s):
rev_comp_lib = {'A':'T','C':'G','G':'C','T':'A','U':'A','M':'K','R':'Y','W':'W','S':'S','Y':'R','K':'M','V':'B','H':'D','D':'H','B':'V','X':'X','N':'N'}
cseq = ''
n = len(s)
s = s.upper()
for i in range(0,n):
c = s[n-i-1]
cseq += rev_comp_lib[c]
return cseq
def _split(str, num):
return [str[start:start+num] for start in range(0, len(str), num)]
codon_table = {'TTT': 'F', 'TTC': 'F', 'TTA': 'L', 'TTG': 'L', 'TCT': 'S',
'TCC': 'S', 'TCA': 'S', 'TCG': 'S', 'TAT': 'Y', 'TAC': 'Y',
'TGT': 'C', 'TGC': 'C', 'TGG': 'W', 'CTT': 'L', 'CTC': 'L',
'CTA': 'L', 'CTG': 'L', 'CCT': 'P', 'CCC': 'P', 'CCA': 'P',
'CCG': 'P', 'CAT': 'H', 'CAC': 'H', 'CAA': 'Q', 'CAG': 'Q',
'CGT': 'R', 'CGC': 'R', 'CGA': 'R', 'CGG': 'R', 'ATT': 'I',
'ATC': 'I', 'ATA': 'I', 'ATG': 'M', 'ACT': 'T', 'ACC': 'T',
'ACA': 'T', 'ACG': 'T', 'AAT': 'N', 'AAC': 'N', 'AAA': 'K',
'AAG': 'K', 'AGT': 'S', 'AGC': 'S', 'AGA': 'R', 'AGG': 'R',
'GTT': 'V', 'GTC': 'V', 'GTA': 'V', 'GTG': 'V', 'GCT': 'A',
'GCC': 'A', 'GCA': 'A', 'GCG': 'A', 'GAT': 'D', 'GAC': 'D',
'GAA': 'E', 'GAG': 'E', 'GGT': 'G', 'GGC': 'G', 'GGA': 'G',
'GGG': 'G', 'TAA': '*', 'TAG': '*', 'TGA': '*'}
if strand == '-' or strand == -1:
seq = _RevComp(cDNA)
else:
seq = cDNA
seq = seq[phase:]
#map seq to proteins
protSeq = []
for i in _split(seq, 3):
if len(i) == 3:
iSeq = i.upper()
if iSeq in codon_table:
aa = codon_table[iSeq]
protSeq.append(aa)
else:
protSeq.append('X')
return ''.join(protSeq)
def extend2stop(seqDict, header, coordinates, strand, phase, protLen):
'''
try to extend a CDS lacking a stop to find a stop codon
it will extend a CDS up to 20 codons (60 bp) from the current
frame to find a stop codon, if none is found it will return
the original coordinates
'''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
if strand == '+':
newStop = sorted_coordinates[-1][1]+60
if newStop > len(seqDict[header]):
newStop = len(seqDict[header])
lastTup = (sorted_coordinates[-1][0], newStop)
if len(sorted_coordinates) > 1:
newCoords = sorted_coordinates[:-1]
newCoords.append(lastTup)
else:
newCoords = [lastTup]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[-1][0], sorted_coordinates[-1][1]+num)
if len(sorted_coordinates) > 1:
finalCoords = sorted_coordinates[:-1]
finalCoords.append(finalTup)
else:
finalCoords = [finalTup]
return True, finalCoords
else:
return False, coordinates
else:
newStop = sorted_coordinates[0][0]-60
if newStop < 1:
newStop = 1
lastTup = (newStop, sorted_coordinates[0][1])
newCoords = [lastTup]
if len(sorted_coordinates) > 1:
newCoords += sorted_coordinates[1:]
updateCDS = getSeqRegions(seqDict, header, newCoords)
updateProt = translate(updateCDS, strand, phase)
if '*' in updateProt:
num = (updateProt.find('*') - protLen + 1) * 3
finalTup = (sorted_coordinates[0][0]-num, sorted_coordinates[0][1])
finalCoords = [finalTup]
if len(sorted_coordinates) > 1:
finalCoords += sorted_coordinates[1:]
finalSort = sorted(finalCoords, key=lambda tup: tup[0], reverse=True)
return True, finalSort
else:
return False, coordinates
def getSeqRegions(SeqRecordDict, header, coordinates):
#takes SeqRecord dictionary or Index, returns sequence string
#coordinates is a list of tuples [(1,10), (20,30)]
result = ''
sorted_coordinates = sorted(coordinates, key=lambda tup: tup[0])
for x in sorted_coordinates:
partial = SeqRecordDict[header][x[0]-1:x[1]]
result += str(partial.seq)
return result
def convertgff2tbl(gff, prefix, fasta, prots, trans, tblout):
from collections import OrderedDict
'''
function to convert directly from gff to tbl
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
#load GFF annotations into funannotate dictionary
Genes = {}
Genes = gff2dict(gff, fasta, Genes)
#get scaffold names/lengths
scaffLen = {}
with open(fasta, 'rU') as seqin:
for record in SeqIO.parse(seqin, 'fasta'):
if not record.id in scaffLen:
scaffLen[record.id] = len(record.seq)
#get partialStart/stop info and load scaffold dictionary with coordinates of Genes
sGenes = sorted(Genes.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
counter = 1
for k,v in sortedGenes.items():
if not prefix:
locusTag = k
else:
locusTag = prefix+'_'+str(counter).zfill(6)
if not locusTag in renamedGenes:
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
counter += 1
dicts2tbl(renamedGenes, scaff2genes, scaffLen, 'CFMR', '12345', [], tblout)
#write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k,v in natsorted(Genes.items()):
for i,x in enumerate(v['ids']):
Transcript = str(v['transcript'][i])
tranout.write('>%s %s\n%s\n' % (x, k, Transcript))
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>%s %s\n%s\n' % (x, k, Prot))
return len(Genes)
def tblfilter(input, remove, output):
'''
function to take an NCBI tbl file and drop gene models present in remove file
'''
#get items to remove list
removeModels = []
with open(remove, 'rU') as file:
for line in file:
if line.startswith('#') or line.startswith('\n'):
continue
line = line.strip()
if not line in removeModels:
removeModels.append(line)
#now loop through tbl file and get line positions of gene models
found = []
with open(output, 'w') as outfile:
with open(input, 'rU') as infile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag = None
for x in gene:
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
if locusTag and not locusTag in removeModels:
outfile.write(''.join(gene))
else:
if not locusTag:
log.debug('LocusTag not found parsing NCBI Tbl file (this should not happen)')
print(gene)
else:
found.append(locusTag)
log.debug("Removed %i out of %i gene models from annotation" % (len(found), len(removeModels)))
s = set(found)
diff = [x for x in removeModels if x not in s]
if len(diff) > 0:
log.debug('Could not find %i gene models:\n %s' % (len(diff), ','.join(diff)))
def annotations2dict(input):
Annotations = {}
with open(input, 'rU') as all_annots:
for line in all_annots:
line = line.replace('\n', '')
ID, refDB, description = line.split('\t')
if description == '': #there is nothing here, so skip
continue
if refDB == 'name' or refDB == 'product':
if '-T' in ID:
geneID = ID.split('-T')[0]
else:
geneID = ID
else:
geneID = ID
if not geneID in Annotations:
Annotations[geneID] = {refDB: [description]}
else:
if not refDB in Annotations[geneID]:
Annotations[geneID][refDB] = [description]
else:
Annotations[geneID][refDB].append(description)
return Annotations
def updateTBL(input, annotDict, output):
'''
general function to parse ncbi tbl format and add functional annotation
'''
log.debug('Parsing tbl file: {:}'.format(os.path.abspath(input)))
with open(input, 'rU') as infile:
with open(output, 'w') as outfile:
for gene in readBlocks2(infile, '>Feature', '\tgene\n'):
transcriptsSeen = []
#transcriptNum = 0
if gene[0].startswith('>Feature'):
outfile.write(''.join(gene))
else:
locusTag,locusTagIndex,LocusType,geneAnnot,transcriptAnnot = (None,)*5
for i,x in enumerate(gene):
if x.startswith('\t\t\tlocus_tag\t'):
locusTag = x.split('\t')[-1].rstrip()
locusTagIndex = i
try:
locusType = gene[locusTagIndex+1].split('\t')[-1].rstrip()
except IndexError:
print(gene)
if locusType == 'tRNA':
outfile.write(''.join(gene))
elif locusType == 'mRNA':
if locusTag in annotDict:
geneAnnot = annotDict.get(locusTag)
else:
geneAnnot = {}
for line in gene:
if line.startswith('\t\t\tlocus_tag\t'):
if 'name' in geneAnnot:
outfile.write('\t\t\tgene\t%s\n' % geneAnnot['name'][0])
outfile.write(line)
elif line.startswith('\t\t\tproduct\t'):
if not 'product' in geneAnnot:
outfile.write(line)
elif line.startswith('\t\t\ttranscript_id\t'):
ID = line.split('|')[-1]
ID = ID.split('_mrna')[0]
if not ID in transcriptsSeen:
transcriptsSeen.append(ID)
transcriptNum = len(transcriptsSeen)
if ID in annotDict:
transcriptAnnot = annotDict.get(ID)
if 'product' in geneAnnot:
Description = geneAnnot['product'][0]
if transcriptNum > 1:
Description = Description + ', variant {:}'.format(transcriptNum)
outfile.write('\t\t\tproduct\t%s\n' % Description)
outfile.write(line)
elif line.startswith('\t\t\tcodon_start\t'):
outfile.write(line)
if transcriptAnnot:
for item in transcriptAnnot:
if item == 'name' or item == 'product':
continue
for x in transcriptAnnot[item]:
outfile.write('\t\t\t%s\t%s\n' % (item, x))
else:
outfile.write(line)
def bed2gff3(input, output):
'''
convert repeats bed file into GFF3 format
Contig245 36 69 Repeat_1
Contig245 265 288 Repeat_2
Contig245 477 493 Repeat_3
Contig245 780 797 Repeat_4
Contig245 997 1016 Repeat_5
'''
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
with open(input, 'rU') as bedfile:
for line in bedfile:
line = line.strip()
if line.startswith('\n'):
continue
contig, start, end, name = line.split('\t')
start = int(start) + 1 #bed is 0-based, gff 1-based
outfile.write('{:}\tRepeatMasker\tdispersed_repeat\t{:}\t{:}\t.\t+\t.\tID={:}\n'.format(contig, start, end, name))
def dicts2tbl(genesDict, scaff2genes, scaffLen, SeqCenter, SeqRefNum, skipList, output):
'''
function to take funannotate annotation dictionaries and convert to NCBI tbl output
'''
duplicates = 0
pseudo = 0
nocds = 0
with open(output, 'w') as tbl:
for k,v in natsorted(scaff2genes.items()):
tbl.write('>Feature %s\n' % k)
tbl.write('1\t%s\tREFERENCE\n' % scaffLen.get(k))
tbl.write('\t\t\t%s\t%s\n' % (SeqCenter, SeqRefNum))
for genes in v: #now loop through each gene on the scaffold
if genes in skipList:
continue
geneInfo = genesDict.get(genes) #single funannotate standard dictionary
if not geneInfo:
continue
if 'pseudo' in geneInfo:
if geneInfo['pseudo']:
log.debug('{:} is pseudo, skipping'.format(genes))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not geneInfo['CDS']:
log.debug('Skipping {:} because no CDS found.'.format(genes))
pseudo += 1
continue
if geneInfo['type'] == 'mRNA' and not len(geneInfo['ids']) == len(geneInfo['mRNA']) == len(geneInfo['CDS']):
log.debug('Incompatible annotation found: {:}\n{:}'.format(genes, geneInfo))
duplicates += 1
continue
if geneInfo['type'] == 'mRNA' and len(geneInfo['CDS']) == 0:
nocds += 1
continue
if geneInfo['type'] == None:
continue
#check for partial models
if True in geneInfo['partialStart']:
ps = '<'
else:
ps = ''
if True in geneInfo['partialStop']:
pss = '>'
else:
pss = ''
#if geneInfo['type'] == 'rRNA' or geneInfo['type'] == 'tRNA':
#ps = '<'
#pss = '>'
#now write gene model
if geneInfo['strand'] == '+':
tbl.write('%s%i\t%s%i\tgene\n' % (ps, geneInfo['location'][0], pss, geneInfo['location'][1]))
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
else:
tbl.write('%s%i\t%s%i\tgene\n' % (ps, geneInfo['location'][1], pss, geneInfo['location'][0]))
tbl.write('\t\t\tlocus_tag\t%s\n' % genes)
#now will output the gene models with -T1, -T2, -T3 annotations based on expression values
#means need to get the order
order = []
if len(geneInfo['ids']) > 1: #multiple transcripts, so get order of highest TPM
tpms = []
for num,tpm in enumerate(geneInfo['note']):
for item in tpm:
if item.startswith('TPM:'):
value = float(item.split(':')[-1])
tpms.append((value,num))
if len(tpms) > 0:
for x in sorted(tpms, reverse=True):
order.append(x[1])
else:
order = range(0,len(geneInfo['ids']))
else:
order.append(0)
for num,i in enumerate(order): #now write mRNA and CDS features
if geneInfo['ids'][i].startswith('evm.model'): #if from predict, rename to match locus_tag
protein_id = genes+'-T'+str(num+1)
else:
protein_id = geneInfo['ids'][i]
if geneInfo['type'] == 'mRNA':
if geneInfo['partialStart'][i] == False:
ps = ''
else:
ps = '<'
if geneInfo['partialStop'][i] == False:
pss = ''
else:
pss = '>'
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1: #single exon, so slightly differnt method
tbl.write('%s%s\t%s%s\tmRNA\n' % (ps, exon[0], pss, exon[1]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' % (ps, exon[0], exon[1]))
elif num == len(geneInfo['mRNA'][i]) - 1: #this is last one
tbl.write('%s\t%s%s\n' % (exon[0], pss, exon[1]))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
if num == 0 and num == len(geneInfo['CDS'][i]) - 1: #single exon, so slightly differnt method
tbl.write('%s%s\t%s%s\tCDS\n' % (ps, cds[0], pss, cds[1]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' % (ps, cds[0], cds[1]))
elif num == len(geneInfo['CDS'][i]) - 1: #this is last one
tbl.write('%s\t%s%s\n' % (cds[0], pss, cds[1]))
else:
tbl.write('%s\t%s\n' % (cds[0], cds[1]))
tbl.write('\t\t\tcodon_start\t%i\n' % geneInfo['codon_start'][i])
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
else: #means this is on crick strand
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0 and num == len(geneInfo['mRNA'][i]) - 1: #single exon, so slightly differnt method
tbl.write('%s%s\t%s%s\tmRNA\n' % (ps, exon[1], pss, exon[0]))
elif num == 0:
tbl.write('%s%s\t%s\tmRNA\n' % (ps, exon[1], exon[0]))
elif num == len(geneInfo['mRNA'][i]) - 1: #this is last one
tbl.write('%s\t%s%s\n' % (exon[1], pss, exon[0]))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
for num, cds in enumerate(geneInfo['CDS'][i]):
if num == 0 and num == len(geneInfo['CDS'][i]) - 1: #single exon, so slightly differnt method
tbl.write('%s%s\t%s%s\tCDS\n' % (ps, cds[1], pss, cds[0]))
elif num == 0:
tbl.write('%s%s\t%s\tCDS\n' % (ps, cds[1], cds[0]))
elif num == (len(geneInfo['CDS'][i]) - 1): #this is last one
tbl.write('%s\t%s%s\n' % (cds[1], pss, cds[0]))
else:
tbl.write('%s\t%s\n' % (cds[1], cds[0]))
tbl.write('\t\t\tcodon_start\t%i\n' % geneInfo['codon_start'][i])
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
tbl.write('\t\t\ttranscript_id\tgnl|ncbi|%s_mrna\n' % (protein_id))
tbl.write('\t\t\tprotein_id\tgnl|ncbi|%s\n' % (protein_id))
elif geneInfo['type'] == 'tRNA':
if geneInfo['strand'] == '+':
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
#tbl.write('<%s\t>%s\t%s\n' % (exon[0], exon[1], geneInfo['type']))
tbl.write('%s\t%s\t%s\n' % (exon[0], exon[1], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[0], exon[1]))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
else:
for num, exon in enumerate(geneInfo['mRNA'][i]):
if num == 0:
#tbl.write('<%s\t>%s\t%s\n' % (exon[1], exon[0], geneInfo['type']))
tbl.write('%s\t%s\t%s\n' % (exon[1], exon[0], geneInfo['type']))
else:
tbl.write('%s\t%s\n' % (exon[1], exon[0]))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if geneInfo['product'] == 'tRNA-Xxx':
tbl.write('\t\t\tpseudo\n')
elif geneInfo['type'] == 'rRNA':
if geneInfo['strand'] == '+':
#tbl.write('<%s\t>%s\t%s\n' % (geneInfo['location'][0],geneInfo['location'][1], geneInfo['type']))
tbl.write('%s\t%s\t%s\n' % (geneInfo['location'][0],geneInfo['location'][1], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
else:
#tbl.write('<%s\t>%s\t%s\n' % (geneInfo['location'][1],geneInfo['location'][0], geneInfo['type']))
tbl.write('%s\t%s\t%s\n' % (geneInfo['location'][1],geneInfo['location'][0], geneInfo['type']))
tbl.write('\t\t\tproduct\t%s\n' % geneInfo['product'][i])
if any(i > 0 for i in [duplicates,pseudo,nocds]):
log.info('Skipped {:,} annotations: {:,} pseudo genes; {:,} no CDS; {:,} duplicated features'.format(sum([pseudo,nocds,duplicates]),pseudo,nocds,duplicates))
def GFF2tbl(evm, trnascan, fasta, scaffLen, prefix, Numbering, SeqCenter, SeqRefNum, tblout):
from collections import OrderedDict
'''
function to take EVM protein models and tRNA scan GFF to produce a GBK tbl file as well
as a new GFF3 file. The function will also rename locus_id if passed.
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][1])
#load GFF into dictionary
Genes = {}
Genes = gff2dict(evm, fasta, Genes)
Genes = gff2dict(trnascan, fasta, Genes)
#now sort dictionary by contig and location, rename using prefix, translate to protein space to get proper start/stop info
sGenes = sorted(Genes.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
renamedGenes = {}
scaff2genes = {}
count = Numbering
for k,v in sortedGenes.items():
if prefix:
locusTag = prefix+'_'+str(count).zfill(6)
else:
locusTag = k
renamedGenes[locusTag] = v
if not v['contig'] in scaff2genes:
scaff2genes[v['contig']] = [locusTag]
else:
scaff2genes[v['contig']].append(locusTag)
count += 1
#write tbl outputfile
dicts2tbl(renamedGenes, scaff2genes, scaffLen, SeqCenter, SeqRefNum, [], tblout)
def checkRefSeq(input):
refseq = False
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'genbank'):
if 'RefSeq' in record.annotations['keywords']:
refseq = True
break
return refseq
def getGBKinfo(input):
accession = None
organism = None
strain = None
isolate = None
gb_gi = None
WGS_accession = None
version = None
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'genbank'):
try:
WGS_accession = 'WGS:'+record.annotations['contig'].split(':')[0].replace('join(', '')[:4]
except KeyError:
pass
try:
accession = record.annotations['accessions'][0]
except KeyError:
pass
try:
organism = record.annotations['organism'].replace('Unclassified.', '').rstrip()
except KeyError:
pass
try:
gb_gi = record.annotations['gi']
except KeyError:
pass
try:
version = record.annotations['sequence_version']
except KeyError:
pass
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
break
return organism, strain, isolate, accession, WGS_accession, gb_gi, version
def getGBKLocusTag(input):
LocusTags = []
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
if f.type == 'gene':
ID = f.qualifiers['locus_tag'][0]
if not ID in LocusTags:
LocusTags.append(ID)
lastTag = natsorted(LocusTags)[-1]
if not '_' in lastTag:
print('ERROR: underscore "_" not found in locus_tag, exiting.')
sys.exit(1)
tag, count = lastTag.rsplit('_', 1)
justify = len(count)
return tag, count, justify
def gb2dna(input, output):
with open(output, 'w') as outfile:
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'genbank'):
outfile.write(">%s\n%s\n" % (record.id, softwrap(str(record.seq))))
def getID(input, type):
#function to get ID from genbank record.features
locusTag = None
ID = None
Parent = None
if type == 'gene':
try:
locusTag = input.qualifiers['locus_tag'][0]
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
else:
try:
ID = input.qualifiers['gene'][0]
except KeyError:
pass
return locusTag, ID, locusTag
elif type == 'mRNA' or type == 'tRNA' or type == 'ncRNA' or type == 'rRNA' or type == 'exon':
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['transcript_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['transcript_id'][0]
except KeyError:
pass
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
elif type == 'CDS':
try:
locusTag = input.qualifiers['locus_tag'][0]
Parent = locusTag
except KeyError:
pass
if not locusTag:
try:
locusTag = input.qualifiers['gene'][0]
except KeyError:
pass
if locusTag:
Parent = locusTag
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
pass
else:
try:
locusTag = input.qualifiers['protein_id'][0]
Parent = locusTag
except KeyError:
pass
else:
try:
ID = input.qualifiers['protein_id'][0]
except KeyError:
ID = locusTag
if ID:
if ':' in ID:
ID = ID.split(':')[-1]
else:
try:
ID = input.qualifiers['standard_name'][0]
except KeyError:
pass
return locusTag, ID, Parent
def gb2nucleotides(input, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'rU') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">%s\n%s\n" % (record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
#write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def dict2nucleotides(input, prots, trans):
'''
function to generate protein and transcripts from dictionary
'''
#write to protein and transcripts
with open(prots, 'w') as protout:
with open(trans, 'w') as tranout:
for k,v in natsorted(input.items()):
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i,x in enumerate(v['ids']):
try:
Transcript = str(v['transcript'][i])
tranout.write('>{:} {:}\n{:}\n'.format(x, k, softwrap(Transcript)))
except IndexError:
pass
if v['type'] == 'mRNA':
Prot = v['protein'][i]
protout.write('>{:} {:}\n{:}\n'.format(x, k, softwrap(Prot)))
def gb2gffnuc(input, gff, prots, trans, dna):
'''
function to generate protein, transcripts, and contigs from genbank file
'''
genes = {}
with open(dna, 'w') as dnaout:
with open(input, 'rU') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
#write gff3 output
dict2gff3(genes, gff)
#write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb2parts(input, tbl, gff, prots, trans, dna):
'''
function returns a dictionary of all gene models from a genbank file this function
can handle multiple transcripts per locus/gene
'''
genes = {}
scaff2genes = {}
scaffLen = {}
with open(dna, 'w') as dnaout:
with open(input, 'rU') as filein:
for record in SeqIO.parse(filein, 'genbank'):
dnaout.write(">{:}\n{:}\n".format(record.id, softwrap(str(record.seq))))
Contig = record.id
if not Contig in scaffLen:
scaffLen[Contig] = len(record.seq)
for f in record.features:
if f.type == 'gene':
locusTag, ID, Parent = getID(f, f.type)
if not Contig in scaff2genes:
scaff2genes[Contig] = [locusTag]
else:
scaff2genes[Contig].append(locusTag)
gb_feature_add2dict(f, record, genes)
#write tbl output
dicts2tbl(genes, scaff2genes, scaffLen, 'CFMR', '12345', [], tbl)
#write gff3 output
dict2gff3(genes, gff)
#write to protein and transcripts
dict2nucleotides(genes, prots, trans)
return len(genes)
def gb_feature_add2dict(f, record, genes):
'''
general function to take a genbank feature from flat file and add to funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] list of mRNA (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'protein_id': [id,id] #from NCBI
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'pseudo': True/False
}
'''
#get info from features, if there is no locusTag then exit
if f.type == 'gene' or f.type == 'mRNA' or f.type == 'CDS' or f.type == 'tRNA' or f.type == 'rRNA' or f.type == 'ncRNA' or f.type == 'exon':
locusTag, ID, Parent = getID(f, f.type)
if not locusTag:
return genes
else:
return genes
#check for mismatching funannotate ID locus tag basename
if ID and '-T' in ID: #then this is from funannotate, okay to modify - this is to capture apparent tbl2asn local error
if ID.split('-T')[0] != locusTag: #there is a problem, update locusTag with basename of ID
locusTag = ID.split('-T')[0]
#standard information from every feature
strand = f.location.strand
if strand == 1:
strand = '+'
elif strand == -1:
strand = '-'
start = f.location.nofuzzy_start + 1
end = f.location.nofuzzy_end
chr = record.id
num_parts = len(f.location.parts)
name,Product = (None,)*2
Fivepartial,Threepartial = (False,)*2
DBxref = []
Note = []
GO = []
pseudo = False
if 'pseudo' in f.qualifiers:
pseudo = True
#parse each type somewhat differently
if f.type == 'gene':
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], 'source': 'GenBank',
'codon_start': [], 'ids': [], 'CDS': [], 'mRNA': [], 'strand': strand,
'location': (int(start), int(end)), 'contig': chr, 'product': [],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': [], 'protein_id': [], 'pseudo': pseudo}
else:
genes[locusTag]['location'] = (int(start), int(end))
genes[locusTag]['strand'] = strand
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'tRNA' or f.type == 'rRNA' or f.type == 'ncRNA':
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
try:
Product = f.qualifiers['product'][0]
if Product == 'tRNA-OTHER':
Product = 'tRNA-Xxx'
except KeyError:
Product = None
exonTuples = []
if num_parts < 2: #only single exon
exonTuples.append((int(start),int(end)))
else: #more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start),int(ex_end)))
#now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if unicode(f.location.start).startswith('<'):
Fivepartial = True
if unicode(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(exonTuples, key=lambda tup: tup[0], reverse=True)
if unicode(f.location.start).startswith('<'):
Threepartial = True
if unicode(f.location.end).startswith('>'):
Fivepartial = True
#update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type, 'transcript': [feature_seq], 'cds_transcript': [], 'protein': [], 'source': 'GenBank',
'codon_start': [], 'ids': [locusTag+'-T1'], 'CDS': [], 'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)), 'contig': chr, 'product': [Product], 'protein_id': [], 'pseudo': pseudo,
'db_xref': [DBxref], 'go_terms': [GO], 'note': [Note], 'partialStart': [Fivepartial], 'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = f.type
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['ids'].append(locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
genes[locusTag]['product'].append(Product)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'mRNA':
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: #only single exon
exonTuples.append((int(start),int(end)))
else: #more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start),int(ex_end)))
#now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if unicode(f.location.start).startswith('<'):
Fivepartial = True
if unicode(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(exonTuples, key=lambda tup: tup[0], reverse=True)
if unicode(f.location.start).startswith('<'):
Threepartial = True
if unicode(f.location.end).startswith('>'):
Fivepartial = True
#update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type, 'transcript': [feature_seq], 'cds_transcript': [], 'protein': [], 'source': 'GenBank',
'codon_start': [], 'ids': [], 'CDS': [], 'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)), 'contig': chr, 'product': [], 'protein_id': [], 'pseudo': pseudo,
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [Fivepartial], 'partialStop': [Threepartial]}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['type'] = f.type
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
elif f.type == 'exon': #assuming need to overwrite mRNA feature then?
genes[locusTag]['mRNA'] = []
genes[locusTag]['transcript'] = []
feature_seq = f.extract(record.seq)
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
exonTuples = []
if num_parts < 2: #only single exon
exonTuples.append((int(start),int(end)))
else: #more than 1 exon, so loop through
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
exonTuples.append((int(ex_start),int(ex_end)))
#now we want to sort the positions I think...
if strand == '+':
sortedExons = sorted(exonTuples, key=lambda tup: tup[0])
if unicode(f.location.start).startswith('<'):
Fivepartial = True
if unicode(f.location.end).startswith('>'):
Threepartial = True
else:
sortedExons = sorted(exonTuples, key=lambda tup: tup[0], reverse=True)
if unicode(f.location.start).startswith('<'):
Threepartial = True
if unicode(f.location.end).startswith('>'):
Fivepartial = True
#update positions
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': f.type, 'transcript': [feature_seq], 'cds_transcript': [], 'protein': [], 'source': 'GenBank',
'codon_start': [], 'ids': [], 'CDS': [], 'mRNA': [sortedExons], 'strand': strand,
'location': (int(start), int(end)), 'contig': chr, 'product': [], 'protein_id': [],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [Fivepartial], 'partialStop': [Threepartial], 'pseudo': pseudo}
else:
genes[locusTag]['mRNA'].append(sortedExons)
genes[locusTag]['transcript'].append(feature_seq)
genes[locusTag]['partialStart'].append(Fivepartial)
genes[locusTag]['partialStop'].append(Threepartial)
elif f.type == 'CDS' and 'codon_start' in f.qualifiers:
feature_seq = f.extract(record.seq)
if not ID:
try:
log.info("putative transcript from %s has no ID\n(%s %s %s)" % (locusTag, locusTag, ID, Parent))
except NameError:
print("putative transcript from %s has no ID\n(%s %s %s)" % (locusTag, locusTag, ID, Parent))
return genes
try:
protSeq = f.qualifiers['translation'][0]
except KeyError:
try:
log.debug("%s has no translation" % ID)
except NameError:
print("%s has no translation" % ID)
protSeq = ''
cdsTuples = []
phase = int(f.qualifiers['codon_start'][0])
if num_parts < 2: #only single CDS
cdsTuples.append((int(start),int(end)))
else:
for i in range(0, num_parts):
ex_start = f.location.parts[i].nofuzzy_start + 1
ex_end = f.location.parts[i].nofuzzy_end
cdsTuples.append((int(ex_start),int(ex_end)))
if strand == '+':
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0])
else:
sortedCDS = sorted(cdsTuples, key=lambda tup: tup[0], reverse=True)
#check for annotations
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
name = f.qualifiers['gene'][0]
except KeyError:
pass
#note and dbxref are in a dictionary
for key,value in f.qualifiers.items():
if key == 'note':
notes = value[0].split('; ')
for n in notes:
if n.startswith('GO'):
GO.append(n)
else:
Note.append(n)
elif key == 'db_xref':
for ref in value:
DBxref.append(ref)
#update dictionary
if not locusTag in genes:
genes[locusTag] = {'name': name, 'type': 'mRNA', 'transcript': [], 'cds_transcript': [feature_seq], 'protein': [], 'source': 'GenBank',
'codon_start': [phase], 'ids': [locusTag+'-T1'], 'CDS': [sortedCDS], 'mRNA': [], 'strand': strand,
'location': (int(start), int(end)), 'contig': chr, 'product': [Product], 'protein_id': [ID],
'db_xref': [DBxref], 'go_terms': [GO], 'note': [Note], 'partialStart': [], 'partialStop': [], 'pseudo': pseudo}
else:
genes[locusTag]['protein_id'].append(ID)
genes[locusTag]['ids'].append(locusTag+'-T'+str(len(genes[locusTag]['ids'])+1))
genes[locusTag]['CDS'].append(sortedCDS)
genes[locusTag]['product'].append(Product)
genes[locusTag]['protein'].append(protSeq)
genes[locusTag]['cds_transcript'].append(feature_seq)
genes[locusTag]['codon_start'].append(phase)
genes[locusTag]['db_xref'].append(DBxref)
genes[locusTag]['note'].append(Note)
genes[locusTag]['go_terms'].append(GO)
if not genes[locusTag]['type']:
genes[locusTag]['type'] = 'mRNA'
if not genes[locusTag]['name']:
genes[locusTag]['name'] = name
return genes
def gff2interlap(input, fasta):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
for k,v in natsorted(Genes.items()):
inter[v['contig']].add((v['location'][0],v['location'][1],k))
return inter, Genes
def gff2interlapDict(input, fasta, inter, Dict):
'''
function to parse GFF3 file, construct scaffold/gene interlap dictionary and funannotate standard annotation dictionary
'''
Genes = {}
Genes = gff2dict(input, fasta, Genes)
for k,v in natsorted(Genes.items()):
inter[v['contig']].add((v['location'][0], v['location'][1], v['strand'], k))
#merge dictionary and return
Dict = merge_dicts(Dict, Genes)
return inter, Dict
def gff2interlapDictOLD(file, inter, Dict):
'''
function to return a scaffold level interlap object, i.e. gene coordinates on each contig
as well as a dictionary containing funannotate standardized dictionary
'''
#the interlap default dict must be created already as well as Genes dictionary
Genes = {}
idParent = {}
with open(file, 'rU') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
start = int(start)
end = int(end)
ID,Parent,Name,Product,GeneFeature = (None,)*5
Note,DBxref,GO = ([],)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
elif x.startswith('Name='):
Name = x.replace('Name=', '')
elif x.startswith('Note=') or x.startswith('note='):
Note = x.split('ote=')[-1]
if ',' in Note:
Note = Note.split(',')
else:
Note = [Note]
elif x.startswith('DBxref='):
DBxref = x.replace('DBxref=', '')
if ',' in DBxref:
DBxref = DBxref.split(',')
else:
DBxref = [DBxref]
elif x.startswith('Ontology_term='):
GO = x.replace('Ontology_term=', '')
if ',' in GO:
GO = GO.split(',')
else:
GO = [GO]
elif x.startswith('Product=') or x.startswith('product='):
Product = x.split('roduct=')[-1]
if feature == 'gene':
if not ID in Genes:
Genes[ID] = {'name': Name, 'type': None, 'transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': []}
else:
if not ID or not Parent:
print("Error, can't find ID or Parent. Malformed GFF file.")
print(line)
sys.exit(1)
if feature == 'mRNA' or feature == 'tRNA' or feature == 'rRNA':
if not Product:
if feature == 'mRNA':
Product = 'hypothetical protein'
if not Parent in Genes:
Genes[Parent] = {'name': Name, 'type': feature, 'transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [ID], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [Product], 'source': source,'phase': [[]],
'db_xref': [DBxref], 'go_terms': [GO], 'note': [Note], 'partialStart': [False], 'partialStop': [False]}
else:
Genes[Parent]['ids'].append(ID)
Genes[Parent]['partialStart'].append(False)
Genes[Parent]['partialStop'].append(False)
Genes[Parent]['product'].append(Product)
Genes[Parent]['db_xref'].append(DBxref)
Genes[Parent]['go_terms'].append(GO)
Genes[Parent]['note'].append(Note)
Genes[Parent]['type'] = feature
if not ID in idParent:
idParent[ID] = Parent
elif feature == 'exon':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [p], 'CDS': [[]], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': []}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
try:
Genes[GeneFeature]['mRNA'][i].append((start,end))
except IndexError: #means first exon, so create item
Genes[GeneFeature]['mRNA'].append([(start,end)])
elif feature == 'CDS':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [], 'ids': [p], 'CDS': [[(start, end)]], 'mRNA': [[]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': []}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
try:
Genes[GeneFeature]['CDS'][i].append((start,end))
except IndexError: #means first exon, so create item
Genes[GeneFeature]['CDS'].append([(start,end)])
#add phase
try:
phase = int(phase)
except ValueError:
phase = 0
try:
Genes[GeneFeature]['phase'][i].append(phase)
except IndexError: #means first exon, so create item
Genes[GeneFeature]['phase'].append([phase])
elif feature == 'five_prime_UTR':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'protein': [], '5UTR': [[(start, end)]], '3UTR': [[]],
'codon_start': [], 'ids': [p], 'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': []}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
try:
Genes[GeneFeature]['5UTR'][i].append((start,end))
except IndexError:
Genes[GeneFeature]['5UTR'].append([(start,end)])
elif feature == 'three_prime_UTR':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[(start, end)]],
'codon_start': [], 'ids': [p], 'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': []}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
try:
Genes[GeneFeature]['3UTR'][i].append((start,end))
except IndexError:
Genes[GeneFeature]['3UTR'].append([(start,end)])
#loop through and make sure CDS and exons are properly sorted and codon_start is correct
#also double check that gene location encompasses entire mRNA
#then add to interlap object
for k,v in Genes.items():
for i in range(0,len(v['ids'])):
if v['type'] == 'mRNA' or v['type'] == 'tRNA':
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
if sortedExons[0][0] < v['location'][0]:
Genes[k]['location'] = (sortedExons[0][0], v['location'][1])
if sortedExons[-1][1] > v['location'][1]:
Genes[k]['location'] = (v['location'][0], sortedExons[-1][1])
else:
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
if sortedExons[-1][0] < v['location'][0]:
Genes[k]['location'] = (sortedExons[-1][0], v['location'][1])
if sortedExons[0][1] > v['location'][1]:
Genes[k]['location'] = (v['location'][0], sortedExons[0][1])
Genes[k]['mRNA'][i] = sortedExons
if v['type'] == 'mRNA':
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0], reverse=True)
#get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(v['CDS'][i]) if y[0] == sortedCDS[0][0]]
codon_start = int(v['phase'][i][indexStart[0]]) + 1
Genes[k]['codon_start'].append(codon_start)
Genes[k]['CDS'][i] = sortedCDS
#add to interlap object
inter[v['contig']].add((v['location'][0], v['location'][1], v['strand'], k))
#merge dictionary and return
Dict = merge_dicts(Dict, Genes)
return inter, Dict
def merge_dicts(x, y):
"""Given two dicts, merge them into a new dict as a shallow copy."""
z = x.copy()
z.update(y)
return z
def exonerate2hints(file, outfile):
#mimic exonerate2hints from GFF3 exonerate file
#CDSpart +/- 15 bp to each match
#intron as is
'''
#gff3 via EVM
scaffold_20 exonerate nucleotide_to_protein_match 225035 225823 82.13 + . ID=match.11677.2;Target=VC83_07547 1 96
scaffold_20 exonerate nucleotide_to_protein_match 53957 54342 92.93 + . ID=match.11677.3;Target=VC83_02595 1 129
scaffold_20 exonerate nucleotide_to_protein_match 54397 54904 92.93 + . ID=match.11677.3;Target=VC83_02595 130 299
scaffold_107 exonerate nucleotide_to_protein_match 77634 78119 89.95 - . ID=match.11677.5;Target=VC83_08471 1 163
scaffold_107 exonerate nucleotide_to_protein_match 77501 77546 89.95 - . ID=match.11677.5;Target=VC83_08471 163 178
scaffold_107 exonerate nucleotide_to_protein_match 77385 77422 89.95 - . ID=match.11677.5;Target=VC83_08471 179 191
#corresponding exonerate2hints
scaffold_20 xnt2h CDSpart 225050 225808 . + . src=XNT;grp=VC83_07547;pri=4
scaffold_20 xnt2h CDSpart 53972 54327 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h intron 54343 54396 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_20 xnt2h CDSpart 54412 54889 . + . src=XNT;grp=VC83_02595;pri=4
scaffold_107 xnt2h CDSpart 77649 78104 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77547 77633 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77516 77531 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h intron 77423 77500 . - . src=XNT;grp=VC83_08471;pri=4
scaffold_107 xnt2h CDSpart 77400 77407 . - . src=XNT;grp=VC83_08471;pri=4
'''
Genes = {}
with open(file, 'rU') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
start = int(start)
end = int(end)
ID,Target = (None,)*2
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Target='):
Target = x.replace('Target=', '').split(' ')[0]
if not ID in Genes:
Genes[ID] = {'id' : ID, 'target': Target, 'loc': [(start, end)], 'strand': strand, 'contig': contig}
else:
Genes[ID]['loc'].append((start,end))
#now lets sort through and write hints file
with open(outfile, 'w') as output:
for k,v in natsorted(Genes.items()):
if v['strand'] == '+':
sortedCDS = sorted(v['loc'], key=lambda tup: tup[0])
for i,x in enumerate(sortedCDS): #loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(v['contig'],x[0]-15,x[1]+15,v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(v['contig'],x[1]+1,sortedCDS[i+1][0]-1,v['strand'], v['target']))
except IndexError:
pass
else:
sortedCDS = sorted(v['loc'], key=lambda tup: tup[0], reverse=True)
for i,x in enumerate(sortedCDS): #loop through tuples
output.write('{:}\txnt2h\tCDSpart\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(v['contig'],x[0]+15,x[1]-15,v['strand'], v['target']))
if len(sortedCDS) > 1:
try:
output.write('{:}\txnt2h\tintron\t{:}\t{:}\t.\t{:}\t.\tsrc=XNT;grp={:};pri=4\n'.format(v['contig'],sortedCDS[i+1][1]+1,x[0]-1,v['strand'], v['target']))
except IndexError:
pass
def gff2dict(file, fasta, Genes):
'''
general function to take a GFF3 file and return a funannotate standardized dictionary
locustag: {
'contig': contigName
'type': mRNA/rRNA/tRNA/ncRNA
'location': (start, end) #integer tuple
'strand': +/-
'ids': [transcript/protein IDs] #list
'mRNA':[[(ex1,ex1),(ex2,ex2)]] #list of lists of tuples (start, end)
'CDS':[[(cds1,cds1),(cds2,cds2)]] #list of lists of tuples (start, end)
'transcript': [seq1, seq2] #list of mRNA trnascripts
'cds_transcript': [seq1, seq2] #list of mRNA trnascripts (no UTRs)
'protein': [protseq1,protseq2] #list of CDS translations
'codon_start': [1,1] #codon start for translations
'note': [[first note, second note], [first, second, etc]] #list of lists
'name': genename
'product': [hypothetical protein, velvet complex] #list of product definitions
'go_terms': [[GO:0000001,GO:0000002]] #list of lists
'db_xref': [[InterPro:IPR0001,PFAM:004384]] #list of lists
'partialStart': True/False
'partialStop': True/False
'source': annotation source
'phase': [[0,2,1]] list of lists
'5UTR': [[(),()]] #list of lists of tuples (start, end)
'3UTR': [[(),()]] #list of lists of tuples (start, end)
}
'''
idParent = {}
with open(file, 'rU') as input:
for line in input:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
start = int(start)
end = int(end)
ID,Parent,Name,Product,GeneFeature = (None,)*5
Note,DBxref,GO = ([],)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
elif x.startswith('Name='):
Name = x.replace('Name=', '')
elif x.startswith('Note=') or x.startswith('note='):
Note = x.split('ote=')[-1]
if ',' in Note:
Note = Note.split(',')
else:
Note = [Note]
elif x.startswith('DBxref='):
DBxref = x.replace('DBxref=', '')
if ',' in DBxref:
DBxref = DBxref.split(',')
else:
DBxref = [DBxref]
elif x.startswith('Ontology_term='):
GO = x.replace('Ontology_term=', '')
if ',' in GO:
GO = GO.split(',')
else:
GO = [GO]
elif x.startswith('Product=') or x.startswith('product='):
Product = x.split('roduct=')[-1]
elif x.startswith('description='):
Product = x.replace('description=', '')
if feature == 'gene':
if not ID in Genes:
Genes[ID] = {'name': Name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [], '3UTR': [],
'codon_start': [], 'ids': [], 'CDS': [], 'mRNA': [], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [], 'source': source, 'phase': [],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [], 'partialStop': [], 'pseudo': False}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (start,Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0],end)
else:
if not ID or not Parent:
print("Error, can't find ID or Parent. Malformed GFF file.")
print(line)
sys.exit(1)
if feature == 'mRNA' or feature == 'tRNA' or feature == 'rRNA':
if not Product:
if feature == 'mRNA':
Product = 'hypothetical protein'
if not Parent in Genes:
Genes[Parent] = {'name': Name, 'type': feature, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [ID], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'product': [Product], 'source': source, 'phase': [[]],
'db_xref': [DBxref], 'go_terms': [GO], 'note': [Note], 'partialStart': [False], 'partialStop': [False], 'pseudo': False}
else:
Genes[Parent]['ids'].append(ID)
Genes[Parent]['mRNA'].append([])
Genes[Parent]['CDS'].append([])
Genes[Parent]['phase'].append([])
Genes[Parent]['5UTR'].append([])
Genes[Parent]['3UTR'].append([])
Genes[Parent]['codon_start'].append([])
Genes[Parent]['partialStart'].append(False)
Genes[Parent]['partialStop'].append(False)
Genes[Parent]['product'].append(Product)
Genes[Parent]['db_xref'].append(DBxref)
Genes[Parent]['go_terms'].append(GO)
Genes[Parent]['note'].append(Note)
Genes[Parent]['type'] = feature
#double check mRNA features are contained in gene coordinates
if start < Genes[Parent]['location'][0]:
#print('{:} update start: {:} to {:}'.format(Parent, Genes[Parent]['location'][0],start))
Genes[Parent]['location'] = (start,Genes[Parent]['location'][1])
if end > Genes[Parent]['location'][1]:
#print('{:} update stop: {:} to {:}'.format(Parent, Genes[Parent]['location'][1],end))
Genes[Parent]['location'] = (Genes[Parent]['location'][0],end)
if not ID in idParent:
idParent[ID] = Parent
elif feature == 'exon':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p], 'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [False], 'partialStop': [False], 'pseudo': False}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['mRNA'][i].append((start,end))
elif feature == 'CDS':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p], 'CDS': [[(start, end)]], 'mRNA': [], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [False], 'partialStop': [False], 'pseudo': False}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['CDS'][i].append((start,end))
#add phase
Genes[GeneFeature]['phase'][i].append(int(phase))
elif feature == 'five_prime_UTR' or feature == 'five_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[(start, end)]], '3UTR': [[]],
'codon_start': [[]], 'ids': [p], 'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [False], 'partialStop': [False], 'pseudo': False}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['5UTR'][i].append((start,end))
elif feature == 'three_prime_UTR' or feature == 'three_prime_utr':
if ',' in Parent:
parents = Parent.split(',')
else:
parents = [Parent]
for p in parents:
if p in idParent:
GeneFeature = idParent.get(p)
if GeneFeature:
if not GeneFeature in Genes:
Genes[GeneFeature] = {'name': Name, 'type': None, 'transcript': [], 'cds_transcript': [], 'protein': [], '5UTR': [[]], '3UTR': [[(start, end)]],
'codon_start': [[]], 'ids': [p], 'CDS': [], 'mRNA': [[(start, end)]], 'strand': strand,
'location': None, 'contig': contig, 'product': [], 'source': source, 'phase': [[]],
'db_xref': [], 'go_terms': [], 'note': [], 'partialStart': [False], 'partialStop': [False], 'pseudo': False}
else:
#determine which transcript this is get index from id
i = Genes[GeneFeature]['ids'].index(p)
Genes[GeneFeature]['3UTR'][i].append((start,end))
#loop through and make sure CDS and exons are properly sorted and codon_start is correct, translate to protein space
SeqRecords = SeqIO.to_dict(SeqIO.parse(fasta, 'fasta'))
for k,v in Genes.items():
for i in range(0,len(v['ids'])):
if v['type'] == 'mRNA' or v['type'] == 'tRNA':
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
mrnaSeq = getSeqRegions(SeqRecords, v['contig'], sortedExons)
v['transcript'].append(mrnaSeq)
if v['type'] == 'mRNA':
if v['strand'] == '+':
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0], reverse=True)
#get the codon_start by getting first CDS phase + 1
indexStart = [x for x, y in enumerate(v['CDS'][i]) if y[0] == sortedCDS[0][0]]
codon_start = int(v['phase'][i][indexStart[0]]) + 1
Genes[k]['codon_start'][i] = codon_start
Genes[k]['CDS'][i] = sortedCDS
#translate and get protein sequence
protSeq = None
cdsSeq = getSeqRegions(SeqRecords, v['contig'], v['CDS'][i])
v['cds_transcript'].append(cdsSeq)
protSeq = translate(cdsSeq, v['strand'], v['codon_start'][i]-1)
v['protein'].append(protSeq)
if protSeq:
if protSeq.endswith('*'):
v['partialStop'][i] = False
else:
v['partialStop'][i] = True
if v['codon_start'][i] == 1 and v['protein'][i].startswith('M'):
v['partialStart'][i] = False
else:
v['partialStart'][i] = True
return Genes
def dict2gff3(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
#sort the annotations by contig and start location
sGenes = sorted(input.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
#then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k,v in sortedGenes.items():
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
if v['type'] == 'mRNA' and len(v['CDS']) == 0:
continue
if v['type'] == None:
continue
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0,len(v['ids'])):
#build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + 'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + 'DBxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + 'note={:};'.format(','.join(v['note'][i]))
#now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'mRNA' or v['type'] == 'tRNA':
if '5UTR' in v:
#if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0,num_5utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
#write the exons
num_exons = len(v['mRNA'][i])
for x in range(0,num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
#if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0,num_3utrs):
u_num = z + 1
gffout.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
current_phase = v['codon_start'][i] - 1 #GFF3 phase is 1 less than flat file
for y in range(0,num_cds):
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def dict2gff3noUTRs(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output, no UTRs!
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
#sort the annotations by contig and start location
sGenes = sorted(input.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
#then loop through and write GFF3 format
with open(output, 'w') as gffout:
gffout.write("##gff-version 3\n")
for k,v in sortedGenes.items():
if v['name']:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};Name={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k, v['name']))
else:
gffout.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0,len(v['ids'])):
#build extra annotations for each transcript if applicable
extraAnnotations = ''
if len(v['go_terms'][i]) > 0:
extraAnnotations = extraAnnotations + 'Ontology_term={:};'.format(','.join(v['go_terms'][i]))
if len(v['db_xref'][i]) > 0:
extraAnnotations = extraAnnotations + 'DBxref={:};'.format(','.join(v['db_xref'][i]))
if len(v['note'][i]) > 0:
extraAnnotations = extraAnnotations + 'note={:};'.format(','.join(v['note'][i]))
#now write mRNA feature
gffout.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};{:}\n".format(v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['product'][i], extraAnnotations))
if v['type'] == 'tRNA':
#write the exons and CDS features
num_exons = len(v['mRNA'][i])
for x in range(0,num_exons):
ex_num = x + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
elif v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
current_phase = v['codon_start'][i] - 1 #GFF3 phase is 1 less than flat file
for y in range(0,num_cds):
ex_num = y + 1
gffout.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
gffout.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def gtf2dict(input):
Genes = {}
with open(input,'rU') as inFile:
for line in inFile:
if line.startswith('\n') or line.startswith('#'):
continue
line = line.rstrip()
#CM002242 StringTie transcript 4198460 4199001 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; cov "5.905163"; FPKM "3.279455"; TPM "9.789504";
#CM002242 StringTie exon 4198460 4198609 1000 + . gene_id "STRG.18087"; transcript_id "STRG.18087.2"; exon_number "1"; cov "6.999466";
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
start = int(start)
end = int(end)
ID,transcriptID,exonNum,TPM = (None,)*4
info = attributes.split(';')
for x in info:
x = x.strip()
x = x.replace('"','')
if x.startswith('gene_id '):
ID = x.replace('gene_id ', '')
elif x.startswith('transcript_id '):
transcriptID = x.replace('transcript_id ', '')
elif x.startswith('exon_number '):
exonNum = x.replace('exon_number ', '')
elif x.startswith('TPM '):
TPM = x.replace('TPM ', '')
if feature == 'transcript':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[]], 'mRNA': [[]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': [TPM]}
else:
if start < Genes[ID]['location'][0]:
Genes[ID]['location'] = (start,Genes[ID]['location'][1])
if end > Genes[ID]['location'][1]:
Genes[ID]['location'] = (Genes[ID]['location'][0],end)
Genes[ID]['ids'].append(transcriptID)
Genes[ID]['mRNA'].append([])
Genes[ID]['CDS'].append([])
Genes[ID]['codon_start'].append(1)
Genes[ID]['tpm'].append(TPM)
else:
if not ID or not transcriptID:
print("Error, can't find geneID or transcriptID. Malformed GTF file.")
print(line)
sys.exit(1)
if feature == 'exon':
if not ID in Genes:
Genes[ID] = {'type': 'mRNA', 'codon_start': [1], 'ids': [transcriptID], 'CDS': [[(start,end)]], 'mRNA': [[(start,end)]], 'strand': strand,
'location': (start, end), 'contig': contig, 'source': source, 'tpm': []}
else:
if transcriptID in Genes[ID]['ids']: #then add exon
i = Genes[ID]['ids'].index(transcriptID)
Genes[ID]['mRNA'][i].append((start,end))
Genes[ID]['CDS'][i].append((start,end))
#loop through dictionary and make sure properly sorted exons
for k,v in Genes.items():
for i in range(0,len(v['ids'])):
if v['strand'] == '+':
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0])
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0])
else:
sortedExons = sorted(v['mRNA'][i], key=lambda tup: tup[0], reverse=True)
sortedCDS = sorted(v['CDS'][i], key=lambda tup: tup[0], reverse=True)
Genes[k]['mRNA'][i] = sortedExons
Genes[k]['CDS'][i] = sortedCDS
return Genes
def Stringtie_dict2gff3(input, output):
from collections import OrderedDict
'''
function to convert funannotate gene dictionary to gff3 output
'''
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
#sort the annotations by contig and start location
sGenes = sorted(input.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
#then loop through and write GFF3 format
with open(output, 'w') as outfile:
outfile.write("##gff-version 3\n")
for k,v in sortedGenes.items():
outfile.write("{:}\t{:}\tgene\t{:}\t{:}\t.\t{:}\t.\tID={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], k))
for i in range(0,len(v['ids'])):
#build extra annotations for each transcript if applicable
extraAnnotations = ''
#now write mRNA feature
outfile.write("{:}\t{:}\t{:}\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};TPM={:}\n".format(v['contig'], v['source'], v['type'], v['location'][0], v['location'][1], v['strand'], v['ids'][i], k, v['tpm'][i]))
if v['type'] == 'mRNA':
if '5UTR' in v:
#if 5'UTR then write those first
num_5utrs = len(v['5UTR'][i])
if num_5utrs > 0:
for z in range(0,num_5utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tfive_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr5p{:};Parent={:};\n".format(v['contig'], v['source'], v['5UTR'][i][z][0], v['5UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
#write the exons
num_exons = len(v['mRNA'][i])
for x in range(0,num_exons):
ex_num = x + 1
outfile.write("{:}\t{:}\texon\t{:}\t{:}\t.\t{:}\t.\tID={:}.exon{:};Parent={:};\n".format(v['contig'], v['source'], v['mRNA'][i][x][0], v['mRNA'][i][x][1], v['strand'], v['ids'][i], ex_num, v['ids'][i]))
#if 3'UTR then write
if '3UTR' in v:
num_3utrs = len(v['3UTR'][i])
if num_3utrs > 0:
for z in range(0,num_3utrs):
u_num = z + 1
outfile.write("{:}\t{:}\tthree_prime_UTR\t{:}\t{:}\t.\t{:}\t.\tID={:}.utr3p{:};Parent={:};\n".format(v['contig'], v['source'], v['3UTR'][i][z][0], v['3UTR'][i][z][1], v['strand'], v['ids'][i], u_num, v['ids'][i]))
if v['type'] == 'mRNA':
num_cds = len(v['CDS'][i])
current_phase = v['codon_start'][i] - 1 #GFF3 phase is 1 less than flat file
for y in range(0,num_cds):
outfile.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t{:}\tID={:}.cds;Parent={:};\n".format(v['contig'], v['source'], v['CDS'][i][y][0], v['CDS'][i][y][1], v['strand'], current_phase, v['ids'][i], v['ids'][i]))
current_phase = (current_phase - (int(v['CDS'][i][y][1]) - int(v['CDS'][i][y][0]) + 1)) % 3
if current_phase == 3:
current_phase = 0
def Quarry2GFF3(input, output):
with open(output, 'w') as outfile:
outfile.write(("##gff-version 3\n"))
exonCounts = {}
GeneCount = 1
geneRef = {}
with open(input, 'rU') as infile:
for line in infile:
line = line.strip()
contig, source, feature, start, end, score, strand, phase, attributes = line.split('\t')
source = 'CodingQuarry'
ID,Parent,Name = (None,)*3
info = attributes.split(';')
for x in info:
if x.startswith('ID='):
ID = x.replace('ID=', '')
elif x.startswith('Parent='):
Parent = x.replace('Parent=', '')
elif x.startswith('Name='):
Name = x.replace('Name=', '')
if ID and ' ' in ID:
ID = ID.split(' ')[0]
if Parent and ' ' in Parent:
Parent = Parent.split(' ')[0]
if feature == 'gene':
geneID = 'gene_'+str(GeneCount)
transID = 'transcript_'+str(GeneCount)+'-T1'
#if not ID in geneRef:
# geneRef[ID] = (geneID, transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Name={:};Alias={:};\n'.format(contig,source,feature, start, end, score, strand, phase, geneID, geneID, ID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:};Parent={:};Alias={:};\n'.format(contig,source,'mRNA', start, end, '.', strand, '.',transID, geneID, ID))
GeneCount += 1
elif feature == 'CDS':
trimID = ID.replace('CDS:', '')
#if trimID in geneRef:
# geneID,transID = geneRef.get(trimID)
if not transID in exonCounts:
exonCounts[transID] = 1
else:
exonCounts[transID] += 1
num = exonCounts.get(transID)
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.exon{:};Parent={:};\n'.format(contig,source,'exon', start, end, '.', strand, '.',transID, num, transID))
outfile.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\tID={:}.cds;Parent={:};\n'.format(contig,source,feature, start, end, score, strand, phase, transID, transID))
def runStringtie(bamfile, cpus, output):
'''
Function to run stringtie from bamfile
Note that when given the bamfile, no way to determine strandeness so will run unstranded
'''
cmd = ['stringtie', '-p', str(cpus), os.path.realpath(bamfile)]
runSubprocess2(cmd, '.', log, os.path.abspath(output))
def runCodingQuarry(genome, stringtie, cpus, output):
'''
run CodingQuarry from stringtie GTF input file
'''
#first get basename directory as need to create tmp CodingQuarry dir
basedir = os.path.dirname(genome)
tmpdir = os.path.join(basedir, 'CodingQuarry')
if os.path.isdir(tmpdir):
SafeRemove(tmpdir)
os.makedirs(tmpdir)
#check for environmental variable QUARRY_PATH, copy if not there
try:
QUARRY_PATH = os.environ["QUARRY_PATH"]
except KeyError:
shutil.copytree(os.path.join(os.path.dirname(which_path('CodingQuarry')), 'QuarryFiles'), os.path.join(tmpdir,'QuarryFiles'))
#convert GTF to GFF3 file
stringtieGFF3 = os.path.join(basedir, 'stringtie.gff3')
Genes = gtf2dict(stringtie)
Stringtie_dict2gff3(Genes, stringtieGFF3)
#now setup command and run from tmpdir folder
cmd = ['CodingQuarry', '-p', str(cpus), '-f', os.path.realpath(genome), '-t', os.path.realpath(stringtieGFF3)]
runSubprocess(cmd, tmpdir, log)
#capture results and reformat to proper GFF3
result = os.path.join(tmpdir, 'out', 'PredictedPass.gff3')
if not checkannotations(result):
log.error('CodingQuarry failed, moving on without result, check logfile')
return False
else:
Quarry2GFF3(result, output)
return True
def dict2gtf(input, output):
from collections import OrderedDict
def _sortDict(d):
return (d[1]['contig'], d[1]['location'][0])
#sort the annotations by contig and start location
sGenes = sorted(input.iteritems(), key=_sortDict)
sortedGenes = OrderedDict(sGenes)
with open(output, 'w') as gtfout:
for k,v in sortedGenes.items():
if v['type'] != 'mRNA':
continue
if 'pseudo' in v:
if v['pseudo']:
continue
if v['type'] == 'mRNA' and not v['CDS']:
continue
if v['type'] == 'mRNA' and not len(v['ids']) == len(v['mRNA']) == len(v['CDS']):
continue
for i in range(0,len(v['ids'])):
#create attributes string
attributes = 'gene_id "{:}"; transcript_id "{:}";'.format(k,v['ids'][i])
#if v['name']:
# attributes = attributes + ' Name "{:}";'.format(v['name'])
if len(v['5UTR'][i]) > 0:
for utr in v['5UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], '5UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
if not v['partialStart'][i]:
if v['strand'] == '+':
startCodon = (v['CDS'][i][0][0], v['CDS'][i][0][0]+2)
else:
startCodon = (v['CDS'][i][0][1]-2, v['CDS'][i][0][1])
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'start_codon', startCodon[0], startCodon[1], 0, v['strand'], 0, attributes))
for x,cds in enumerate(v['CDS'][i]):
if v['partialStop'][i]: #then just write the whole CDS as no reason to move codon back
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if v['strand'] == '+':
if x == len(v['CDS'][i])-1: #this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'CDS', cds[0], cds[1]-3, 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'stop_codon', cds[1]-2, cds[1], 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
else:
if x == len(v['CDS'][i])-1: #this is last one
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'CDS', cds[0]+3, cds[1], 0, v['strand'], v['phase'][i][x], attributes))
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'stop_codon', cds[0], cds[0]+2, 0, v['strand'], 0, attributes))
else:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], 'CDS', cds[0], cds[1], 0, v['strand'], v['phase'][i][x], attributes))
if len(v['3UTR'][i]) > 0:
for utr in v['3UTR'][i]:
gtfout.write('{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\t{:}\n'.format(v['contig'], v['source'], '3UTR', utr[0], utr[1], 0, v['strand'], 0, attributes))
gtfout.write('\n')
def gff3_to_gtf(input, genome, output):
Genes = {}
Genes = gff2dict(input, genome, Genes)
dict2gtf(Genes, output)
def gb2allout(input, GFF, Proteins, Transcripts, DNA):
'''
function to split GBK file into parts, need to be able to deal with multiple transcripts and get naming correct
assumption is that the mRNA and CDS features from multiple transcripts are in order, i.e. the first mRNA feature
you see corresponds to first CDS feature, etc. **hopefully this is an okay assumption**
'''
#idea is to populate the dictionary first, then write GFF, proteins, transcripts, can write DNA on first pass
genes = {}
with open(DNA, 'w') as scaffolds:
with open(input, 'rU') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
scaffolds.write(">{:}\n{:}\n".format(record.id, softwrap(str(record.seq))))
for f in record.features:
gb_feature_add2dict(f, record, genes)
#write GFF
dict2gff3(genes, GFF)
#write to protein and transcripts
dict2nucleotides(genes, Proteins, Transcripts)
def minimap2Align(transcripts, genome, cpus, intron, output):
'''
function to align transcripts to genome using minimap2
huge speed increase over gmap + blat
'''
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t', str(cpus), '--cs', '-u', 'b', '-G', str(intron), genome, transcripts]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(minimap2_cmd),str(bamthreads), output]
runSubprocess(cmd, '.', log)
def iso_seq_minimap2(transcripts, genome, cpus, intron, output):
'''
function to align PB iso-seq data
'''
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t', str(cpus), '--cs', '-uf', '-C5', '-G', str(intron), genome, transcripts]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(minimap2_cmd), str(bamthreads), output]
runSubprocess(cmd, '.', log)
def nanopore_cDNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore 2d cDNA
'''
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t', str(cpus), '--cs', '-G', str(intron), genome, transcripts]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(minimap2_cmd), str(bamthreads), output]
runSubprocess(cmd, '.', log)
def nanopore_mRNA_minimap2(transcripts, genome, cpus, intron, output):
'''
function to nanopore direct mRNA reads
'''
bamthreads = int(round(int(cpus) / 2))
if bamthreads > 4:
bamthreads = 4
minimap2_cmd = ['minimap2', '-ax', 'splice', '-t', str(cpus), '--cs', '-uf', '-k14', '-G', str(intron), genome, transcripts]
cmd = [os.path.join(parentdir, 'util', 'sam2bam.sh'), " ".join(minimap2_cmd), str(bamthreads), output]
runSubprocess(cmd, '.', log)
def mergeBAMs(*args, **kwargs):
cmd = ['samtools', 'merge', '-@', str(kwargs['cpus']), kwargs['output']]
cmd = cmd + list(args)
runSubprocess(cmd, '.', log)
def catFiles(*args, **kwargs):
cmd = ['cat']
cmd = cmd + list(args)
runSubprocess2(cmd, '.', log, kwargs['output'])
def runGMAP(transcripts, genome, cpus, intron, tmpdir, output):
#first build genome database
build_log = os.path.join(tmpdir, 'gmap-build.log')
with open(build_log, 'w') as logfile:
subprocess.call(['gmap_build', '-D', tmpdir, '-d', 'genome', '-k', '13', genome], stdout = logfile, stderr = logfile)
#now map transcripts
map_log = os.path.join(tmpdir, 'gmap-map.log')
with open(map_log, 'w') as logfile:
with open(output, 'w') as out:
subprocess.call(['gmap', '--cross-species', '-f', '3', '-K', str(intron), '-n', '1', '-t', str(cpus), '-B', '5', '-D', tmpdir, '-d', 'genome', transcripts], stdout = out, stderr = logfile)
def runBUSCO(input, Database, cpus, tmpdir, output):
#run busco in protein mapping mode
BUSCO = os.path.join(UTIL, 'funannotate-BUSCO2.py')
cmd = [BUSCO, '-i', input, '-m', 'proteins', '-l', Database, '-o', 'busco', '-c', str(cpus), '-f']
runSubprocess(cmd, tmpdir, log)
#now parse output and write to annotation file
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'run_busco', 'full_table_busco.tsv'), 'rU') as busco:
for line in busco:
if line.startswith('#'):
continue
col = line.split('\t')
if col[1] == 'Complete' or col[1] == 'Duplicated': #if diploid these should show up, but problematic for drawing trees....
out.write("%s\tnote\tBUSCO:%s\n" % (col[2], col[0]))
def dupBUSCO2gff(ID, base_folder, locationID):
hmmerfolder = os.path.join(base_folder, 'hmmer_output')
geneID = ''
AugFile = ''
GFFfile = os.path.join(base_folder, 'augustus_output', 'gffs', ID+'.gff')
if geneID == '':
for file in os.listdir(hmmerfolder):
if file.startswith(ID):
with open(os.path.join(hmmerfolder, file), 'rU') as hmmer:
for line in hmmer:
if not line.startswith('#'):
longID = line.split()[0]
longID = longID.replace(']', '')
partsID = longID.split('[')
if locationID == partsID[1]:
geneID = partsID[0]
AugFile = os.path.join(base_folder, 'augustus_output', 'predicted_genes', file)
break
#so now should have gene name, get the GFF from augustus
with open(GFFfile, 'w') as gffout:
with open(AugFile, 'rU') as augustus:
for pred in readBlocks(augustus, '# start gene'):
if pred[0].startswith('# This output'):
continue
if pred[0].startswith('##gff-version 3'):
continue
if pred[0].startswith('# Please cite'):
continue
if geneID in pred[0]:
for x in pred:
if not x.startswith('#'):
gffout.write(x)
def parseBUSCO2genome(input, ploidy, ContigSizes, output):
#input is BUSCO output, ploidy is integer, ContigSizes is dictionary, output is a bedfile, function returns dictionary
busco_complete = {}
hits = {}
with open(output, 'w') as bedfile:
with open(input, 'rU') as buscoinput:
for line in buscoinput:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
if cols[1] == 'Complete' or cols[1] == 'Duplicated':
contig = cols[2]
start = cols[3]
end = cols[4]
score = cols[5]
length = cols[6]
ID = contig+':'+start+'-'+end
if cols[1] == 'Complete':
if not cols[0] in hits:
hits[cols[0]] = (ID,score,contig,start,end,length)
if ploidy > 1:
if cols[1] == 'Duplicated':
if not cols[0] in hits:
hits[cols[0]] = (ID,score,contig,start,end,length)
dupBUSCO2gff(cols[0], os.path.dirname(input), ID)
else:
oldscore = float(hits.get(cols[0])[1])
if float(score) > oldscore:
hits[cols[0]] = (ID,score,contig,start,end,length)
dupBUSCO2gff(cols[0], os.path.dirname(input), ID)
for k,v in natsorted(hits.items()):
#validate locations for bedfile, move 100 bp in each direction for bedfile
start = int(v[3]) - 100
if start < 1: #negative no good
start = 1
end = int(v[4]) + 100
if end > ContigSizes.get(contig): #check it doesn't go past contig length
end = ContigSizes.get(contig)
bedfile.write('%s\t%i\t%i\t%s\n' % (contig,start,end,k))
busco_complete[k] = v[0]
return busco_complete
def RepeatBlast(input, cpus, evalue, DataBase, tmpdir, output, diamond=True):
#run blastp against repeats
blast_tmp = os.path.join(tmpdir, 'repeats.xml')
if diamond:
blastdb = os.path.join(DataBase,'repeats.dmnd')
cmd = ['diamond', 'blastp', '--sensitive', '--query', input, '--threads', str(cpus), '--out', blast_tmp, '--db', blastdb, '--evalue', str(evalue), '--max-target-seqs', '1', '--outfmt', '5']
else:
blastdb = os.path.join(DataBase,'REPEATS')
cmd = ['blastp', '-db', blastdb, '-outfmt', '5', '-out', blast_tmp, '-num_threads', str(cpus), '-max_target_seqs', '1', '-evalue', str(evalue), '-query', input]
runSubprocess(cmd, '.', log)
#parse results
with open(output, 'w') as out:
with open(blast_tmp, 'rU') as results:
for qresult in SearchIO.parse(results, "blast-xml"):
hits = qresult.hits
qlen = qresult.seq_len
ID = qresult.id
num_hits = len(hits)
if num_hits > 0:
length = 0
for i in range(0,len(hits[0].hsps)):
length += hits[0].hsps[i].aln_span
pident = hits[0].hsps[0].ident_num / float(length)
out.write("%s\t%s\t%f\t%s\n" % (ID, hits[0].id, pident, hits[0].hsps[0].evalue))
def eggnog2dict(annotations):
#load in annotation dictionary
EggNog = {}
with open(annotations, 'rU') as input:
reader = csv.reader(input, delimiter='\t')
for line in reader:
EggNog[line[1]] = line[5]
return EggNog
def number_present(s):
return any(i.isdigit() for i in s)
def capfirst(x):
return x[0].upper() + x[1:]
def item2index(inputList, item):
#return the index of an item in the input list
item_index = None
for x in inputList:
if item in x:
item_index = inputList.index(x)
return item_index
def getEggNogHeaders(input):
IDi, DBi, OGi, Genei, COGi, Desci = (None,)*6
with open(input, 'rU') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#query_name'): #this is HEADER
headerCols = line.split('\t')
IDi = item2index(headerCols, 'query_name')
Genei = item2index(headerCols, 'predicted_gene_name')
DBi = item2index(headerCols, 'Annotation_tax_scope')
OGi = item2index(headerCols, 'OGs')
COGi = item2index(headerCols, 'COG cat')
Desci = item2index(headerCols, 'eggNOG annot')
break
return IDi, DBi, OGi, Genei, COGi, Desci
def parseEggNoggMapper(input, output):
Definitions = {}
#indexes from header file
IDi, DBi, OGi, Genei, COGi, Desci = getEggNogHeaders(input)
#take annotations file from eggnog-mapper and create annotations
with open(output, 'w') as out:
with open(input, 'rU') as infile:
for line in infile:
line = line.replace('\n', '')
if line.startswith('#'):
continue
cols = line.split('\t')
ID = cols[IDi]
DB = cols[DBi].split('[')[0]
OGs = cols[OGi].split(',')
NOG = ''
for x in OGs:
if DB in x:
NOG = 'ENOG41'+ x.split('@')[0]
Gene = ''
if cols[Genei] != '':
if not '_' in cols[Genei] and not '.' in cols[Genei] and number_present(cols[Genei]):
Gene = cols[Genei]
Description = cols[Desci]
if NOG == '':
continue
if not NOG in Definitions:
Definitions[NOG] = Description
out.write("%s\tnote\tEggNog:%s\n" % (ID, NOG))
if cols[COGi] != '':
out.write("%s\tnote\tCOG:%s\n" % (ID, cols[COGi].replace(' ','')))
if Gene != '':
product = Gene.lower()+'p'
product = capfirst(product)
out.write("%s\tname\t%s\n" % (ID.split('-T')[0], Gene))
out.write("%s\tproduct\t%s\n" % (ID, product))
if Description != '':
out.write("%s\tnote\t%s\n" % (ID, Description))
return Definitions
def batch_iterator(iterator, batch_size):
entry = True #Make sure we loop once
while entry :
batch = []
while len(batch) < batch_size :
try :
entry = iterator.next()
except StopIteration :
entry = None
if entry is None :
#End of file
break
batch.append(entry)
if batch :
yield batch
def fasta2chunks(input, chunks, tmpdir, output):
#split the input fasta file into 20 chunks to process
with open(input, 'rU') as seqs:
SeqCount = countfasta(input)
SeqRecords = SeqIO.parse(seqs, 'fasta')
chunks = SeqCount / int(chunks)
#divide into chunks, store in tmp file
folder = os.path.join(tmpdir, output)
if not os.path.exists(folder):
os.makedirs(folder)
else:
shutil.rmtree(folder)
os.makedirs(folder)
for i, batch in enumerate(batch_iterator(SeqRecords, chunks)) :
filename = "chunk_%i.fa" % (i+1)
tmpout = os.path.join(folder, filename)
handle = open(tmpout, "w")
count = SeqIO.write(batch, handle, "fasta")
handle.close()
def signalP(input, tmpdir, output):
#split input file into chunks, 20 should mean < 200 proteins per chunk
fasta2chunks(input, 40, tmpdir, 'signalp_tmp')
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.startswith('chunk'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
tmp_out = file.replace('.fa', '.signalp.out')
cmd = ['signalp', '-t', 'euk', '-f', 'short', file]
runSubprocess2(cmd, '.', log, tmp_out)
#now concatenate all outputs
if os.path.isfile(output):
os.remove(output)
with open(output, 'a') as finalout:
for file in os.listdir(os.path.join(tmpdir, 'signalp_tmp')):
if file.endswith('.signalp.out'):
file = os.path.join(tmpdir, 'signalp_tmp', file)
with open(file) as infile:
finalout.write(infile.read())
#cleanup tmp directory
shutil.rmtree(os.path.join(tmpdir, 'signalp_tmp'))
def parseSignalP(sigP, secretome_annot):
sigpDict = {}
with open(sigP, 'rU') as results:
for line in results:
line = line.replace('\n', '')
if line.startswith('#'):
continue
col = line.split(' ') #not tab delimited
col = filter(None, col) #clean up empty spaces
if col[9] == 'Y': #then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
sigpDict[ID] = end
with open(secretome_annot, 'w') as secout:
for k,v in natsorted(sigpDict.items()):
secout.write("%s\tnote\tSECRETED:SignalP(1-%s)\n" % (k, v))
def parsePhobiusSignalP(phobius, sigP, membrane_annot, secretome_annot):
#give directory of annotate_misc, first get phobius results
'''
This is what phobius results look like
ID TM SP Prediction
VE00_00001 0 0 o
VE00_00002 2 0 i198-219o283-301i
VE00_00003 0 0 o
VE00_00004 0 Y n8-18c23/24o
VE00_00005 12 0 i49-69o89-107i119-138o144-167i179-200o212-234i280-299o319-341i348-366o378-398i410-430o442-465i
'''
pSecDict = {}
pTMDict = {}
sigpDict = {}
#parsing short format phobius
with open(phobius, 'rU') as input1:
for line in input1:
line = line.replace('\n', '')
if line.startswith('ID\t'):
continue
cols = line.split('\t')
geneID = cols[0]
if int(cols[1]) > 0: #then found TM domain
annot = cols[3]
if '/' in annot:
annotation = annot.split('/')[-1]
if not geneID in pTMDict:
pTMDict[geneID] = 'TransMembrane:'+cols[1]+' ('+annot+')'
if cols[2] == 'Y': #then sig pep discovered
location = cols[3].split('/')[0]
clevage = location.split('c')[-1]
if not geneID in pSecDict:
pSecDict[geneID] = clevage
if sigP: #will be passed FALSE if signalP data missing
#parse signalp output and turn into annotation file
with open(sigP, 'rU') as results:
for line in results:
line = line.replace('\n', '')
if line.startswith('#'):
continue
col = line.split(' ') #not tab delimited
col = filter(None, col) #clean up empty spaces
if col[9] == 'Y': #then there is signal peptide
ID = col[0]
end = int(col[2]) - 1
#save as secreted only if also found in phobius
if ID in pSecDict:
sigpDict[ID] = end
else:
sigpDict = pSecDict
#write annotation files
with open(membrane_annot, 'w') as memout:
for k,v in natsorted(pTMDict.items()):
memout.write("%s\tnote\t%s\n" % (k, v))
with open(secretome_annot, 'w') as secout:
for k,v in natsorted(sigpDict.items()):
secout.write("%s\tnote\tSECRETED:SignalP(1-%s)\n" % (k, v))
def RepeatModelMask(input, cpus, tmpdir, output, repeatlib, debug):
log.info("Loading sequences and soft-masking genome")
outdir = os.path.join(tmpdir, 'RepeatModeler')
input = os.path.abspath(input)
output = os.path.abspath(output)
#lets run RepeatModeler here to get repeat library
if os.path.exists(outdir):
shutil.rmtree(outdir)
os.makedirs(outdir)
log.info("Soft-masking: building RepeatModeler database")
with open(debug, 'a') as debug_log:
subprocess.call(['BuildDatabase', '-name', 'Repeats', input], cwd=outdir, stdout = debug_log, stderr=debug_log)
log.info("Soft-masking: generating repeat library using RepeatModeler")
with open(debug, 'a') as debug_log:
subprocess.call(['RepeatModeler', '-e', 'ncbi', '-database', 'Repeats', '-pa', str(cpus)], cwd=outdir, stdout = debug_log, stderr=debug_log)
#find name of folder
for i in os.listdir(outdir):
if i.startswith('RM_'):
RP_folder = i
library = os.path.abspath(repeatlib)
if checkannotations(os.path.join(outdir, RP_folder, 'consensi.fa.classified')):
shutil.copyfile(os.path.join(outdir, RP_folder, 'consensi.fa.classified'), library)
#now soft-mask the genome for gene predictors
outdir2 = os.path.join(tmpdir, 'RepeatMasker')
if os.path.isdir(outdir2):
shutil.rmtree(outdir2)
os.makedirs(outdir2)
if not os.path.isfile(library):
log.info("Soft-masking: running RepeatMasker with default library (RepeatModeler found 0 models)")
with open(debug, 'a') as debug_log:
subprocess.call(['RepeatMasker', '-e', 'ncbi', '-gff','-species', 'fungi','-pa', str(cpus), '-xsmall', '-dir','.', input], cwd=outdir2, stdout=debug_log, stderr = debug_log)
else:
log.info("Soft-masking: running RepeatMasker with custom library")
with open(debug, 'a') as debug_log:
subprocess.call(['RepeatMasker', '-e', 'ncbi', '-gff','-lib', library, '-pa', str(cpus), '-xsmall', '-dir', '.', input], cwd=outdir2, stdout=debug_log, stderr = debug_log)
for file in os.listdir(outdir2):
if file.endswith('.masked'):
shutil.copyfile(os.path.join(outdir2, file), output)
def RepeatMask(input, library, cpus, tmpdir, output, debug):
FNULL = open(os.devnull, 'w')
input = os.path.abspath(input)
output = os.path.abspath(output)
outdir = os.path.join(tmpdir, 'RepeatMasker')
#now soft-mask the genome for gene predictors
log.info("Soft-masking: running RepeatMasker with custom library")
if not os.path.isdir(outdir):
os.makedirs(outdir)
with open(debug, 'a') as debug_log:
subprocess.call(['RepeatMasker', '-e', 'ncbi', '-lib', os.path.abspath(library), '-pa', str(cpus), '-xsmall', '-dir', 'RepeatMasker', input], stderr = debug_log, stdout=debug_log, cwd = tmpdir)
for file in os.listdir(outdir):
if file.endswith('.masked'):
os.rename(os.path.join(outdir, file), output)
def RepeatMaskSpecies(input, species, cpus, tmpdir, output, debug):
FNULL = open(os.devnull, 'w')
input = os.path.abspath(input)
output = os.path.abspath(output)
outdir = os.path.join(tmpdir, 'RepeatMasker')
#now soft-mask the genome for gene predictors
log.info("Soft-masking: running RepeatMasker using %s species" % species)
if not os.path.isdir(outdir):
os.makedirs(outdir)
with open(debug, 'a') as debug_log:
subprocess.call(['RepeatMasker', '-e', 'ncbi', '-species', species, '-pa', str(cpus), '-xsmall', '-dir', 'RepeatMasker', input], stderr = debug_log, stdout=debug_log, cwd = tmpdir)
for file in os.listdir(outdir):
if file.endswith('.masked'):
os.rename(os.path.join(outdir, file), output)
def n_lower_chars(string):
return sum(1 for c in string if c.islower())
def CheckAugustusSpecies(input):
#get the possible species from augustus
augustus_list = []
for i in os.listdir(os.path.join(os.environ["AUGUSTUS_CONFIG_PATH"], 'species')):
if not i.startswith('.'):
augustus_list.append(i)
augustus_list = set(augustus_list)
if input in augustus_list:
return True
else:
return False
def SortRenameHeaders(input, output):
#sort records and write temp file
with open(output, 'w') as out:
with open(input, 'rU') as input:
records = list(SeqIO.parse(input, 'fasta'))
records.sort(cmp=lambda x,y: cmp(len(y),len(x)))
counter = 1
for rec in records:
rec.name = ''
rec.description = ''
rec.id = 'scaffold_' + str(counter)
counter +=1
SeqIO.write(records, out, 'fasta')
#via https://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
def list2groups(L):
if len(L) < 1:
return
first = last = L[0]
for n in L[1:]:
if n - 1 == last: # Part of the group, bump the end
last = n
else: # Not part of the group, yield current group and start a new
yield first, last
first = last = n
yield first, last # Yield the last group
def checkMask(genome, bedfile):
from Bio.SeqIO.FastaIO import SimpleFastaParser
#load contig names and sizes into dictionary, get masked repeat stats
GenomeLength = 0
maskedSize = 0
masked = {}
ContigSizes = {}
with open(genome, 'rU') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in masked:
masked[ID] = []
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
GenomeLength += len(Seq)
maskedSize += n_lower_chars(Seq)
for i,c in enumerate(Seq):
if c.islower():
masked[ID].append(i) #0 based
if maskedSize == 0: #not softmasked, return False
with open(bedfile, 'w') as bedout:
bedout.write('')
return ContigSizes, GenomeLength, maskedSize, 0.0
else:
counter = 1
with open(bedfile, 'w') as bedout:
for k,v in natsorted(masked.items()):
repeats = list(list2groups(v))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_{:}\n'.format(k,item[0], item[1], counter))
counter += 1
percentMask = maskedSize / float(GenomeLength)
return ContigSizes, GenomeLength, maskedSize, percentMask
def maskingstats2bed(input, counter, alock):
from Bio.SeqIO.FastaIO import SimpleFastaParser
masked = []
maskedSize = 0
bedfilename = input.replace('.fasta', '.bed')
with open(input, 'rU') as infile:
for header, Seq in SimpleFastaParser(infile):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
for i,c in enumerate(Seq):
if c.islower():
masked.append(i) #0 based
maskedSize += 1
if maskedSize > 0: #not softmasked, return False
with open(bedfilename, 'w') as bedout:
repeats = list(list2groups(masked))
for item in repeats:
if len(item) == 2:
bedout.write('{:}\t{:}\t{:}\tRepeat_\n'.format(ID, item[0], item[1]))
with alock:
counter.value += maskedSize
def mask_safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try: maskingstats2bed(*args, **kwargs)
except Exception as e:
print("error: %s run(*%r, **%r)" % (e, args, kwargs))
def checkMasklowMem(genome, bedfile, cpus):
from Bio.SeqIO.FastaIO import SimpleFastaParser
#load contig names and sizes into dictionary, get masked repeat stats
maskedSize = 0
masked = {}
ContigSizes = {}
tmpdir = os.path.join(os.path.dirname(genome), 'mask_'+str(os.getpid()))
os.makedirs(tmpdir)
file_list = []
with open(genome, 'rU') as input:
for header, Seq in SimpleFastaParser(input):
if ' ' in header:
ID = header.split(' ')[0]
else:
ID = header
if not ID in ContigSizes:
ContigSizes[ID] = len(Seq)
with open(os.path.join(tmpdir, ID+'.fasta'), 'w') as fastaout:
fastaout.write('>{:}\n{:}\n'.format(ID, Seq))
file_list.append(os.path.join(tmpdir, ID+'.fasta'))
#num = 1
p = multiprocessing.Pool(processes=cpus)
TotalMask = multiprocessing.Manager().Value('i', 0)
lock = multiprocessing.Manager().Lock()
result = []
for i in file_list:
result.append(p.apply_async(mask_safe_run, [i, TotalMask, lock]))
p.close()
p.join()
repeatNum = 1
with open(bedfile, 'w') as bedout:
for file in natsorted(os.listdir(tmpdir)):
if file.endswith('.bed'):
with open(os.path.join(tmpdir, file), 'rU') as infile:
for line in infile:
line = line.replace('Repeat_', 'Repeat_'+str(repeatNum))
bedout.write(line)
repeatNum += 1
SafeRemove(tmpdir)
GenomeLength = sum(ContigSizes.values())
percentMask = TotalMask.value / float(GenomeLength)
return ContigSizes, GenomeLength, TotalMask.value, percentMask
def RunGeneMarkES(command, input, ini, maxintron, softmask, cpus, tmpdir, output, fungus):
#make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
contigs = os.path.abspath(input)
log.info("Running GeneMark-ES on assembly")
cmd = [command, '--ES', '--max_intron', str(maxintron), '--soft_mask', str(softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
#rename results and grab mod file
try:
os.rename(os.path.join(outdir,'output','gmhmm.mod'), os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ES failed: {:} file missing, please check logfiles.".format(os.path.join(outdir,'output','gmhmm.mod')))
#convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout = out)
def RunGeneMarkET(command, input, ini, evidence, maxintron, softmask, cpus, tmpdir, output, fungus):
#make directory to run script from
outdir = os.path.join(tmpdir, 'genemark')
if not os.path.isdir(outdir):
os.makedirs(outdir)
contigs = os.path.abspath(input)
#get only intron information from evidence
hintsfile = os.path.join(tmpdir, 'genemark.intron-hints.gff')
with open(hintsfile, 'w') as hints:
with open(evidence, 'rU') as evid:
for line in evid:
if '\tintron\t' in line and '\tb2h\t' in line:
hints.write(line)
log.info("Running GeneMark-ET on assembly")
cmd = [command, '--ET', os.path.abspath(hintsfile), '--max_intron', str(maxintron), '--soft_mask', str(softmask), '--cores', str(cpus), '--sequence', contigs]
if fungus == 'fungus':
cmd = cmd + ['--fungus']
if ini:
cmd = cmd + ['--ini_mod', os.path.abspath(ini)]
runSubprocess3(cmd, outdir, log)
#rename results and grab mod file
try:
os.rename(os.path.join(outdir,'output','gmhmm.mod'), os.path.join(tmpdir, 'gmhmm.mod'))
except OSError:
log.error("GeneMark-ET failed: {:} file missing, please check logfiles.".format(os.path.join(outdir,'output','gmhmm.mod')))
#convert genemark gtf to gff3 so GAG can interpret it
gm_gtf = os.path.join(outdir, 'genemark.gtf')
if checkannotations(gm_gtf):
log.info("Converting GeneMark GTF file to GFF3")
with open(output, 'w') as out:
subprocess.call([GeneMark2GFF, gm_gtf], stdout = out)
def MemoryCheck():
import psutil
mem = psutil.virtual_memory()
RAM = int(mem.total)
return round(RAM / 1024000000)
def systemOS():
if sys.platform == 'darwin':
system_os = 'MacOSX '+ platform.mac_ver()[0]
elif sys.platform == 'linux':
linux_version = platform.linux_distribution()
system_os = linux_version[0]+ ' '+linux_version[1]
else:
system_os = sys.platform
return system_os
def SystemInfo():
system_os = systemOS()
python_vers = str(sys.version_info[0])+'.'+str(sys.version_info[1])+'.'+str(sys.version_info[2])
log.info("OS: %s, %i cores, ~ %i GB RAM. Python: %s" % (system_os, multiprocessing.cpu_count(), MemoryCheck(), python_vers))
def runtRNAscan(input, tmpdir, output):
tRNAout = os.path.join(tmpdir, 'tRNAscan.out')
tRNAlenOut = os.path.join(tmpdir, 'tRNAscan.len-filtered.out')
if os.path.isfile(tRNAout): #tRNAscan can't overwrite file, so check first
os.remove(tRNAout)
cmd = ['tRNAscan-SE', '-o', tRNAout, input]
runSubprocess(cmd, '.', log)
#enforce NCBI length rules
with open(tRNAlenOut, 'w') as lenOut:
with open(tRNAout, 'rU') as infile:
for line in infile:
if line.startswith('Sequence') or line.startswith('Name') or line.startswith('--------'):
lenOut.write('%s' % line)
else:
cols = line.split('\t')
start = cols[2]
end = cols[3]
if int(start) < int(end):
length = abs(int(end) - int(start))
else:
length = abs(int(start) - int(end))
if length < 50 or length > 150:
continue
else:
lenOut.write('%s' % line)
#now convert to GFF3
trna2gff = os.path.join(UTIL, 'trnascan2gff3.pl')
with open(output, 'w') as out:
subprocess.call(['perl', trna2gff, '--input', tRNAlenOut], stdout = out)
log.info('Found {0:,}'.format(countGFFgenes(output)) +' tRNA gene models')
def list_slice(S, step):
return [S[i::step] for i in range(step)]
def split_tbl2asn(folder):
'''
function to chunk the genome and annotation files into parts if > 10,000 contigs to
conform to NCBI recommendations and avoid the 2GB threshold of sequin files
'''
from Bio.SeqIO.FastaIO import SimpleFastaParser
numSeqs = 0
genomeSize = 0
with open(os.path.join(folder, 'genome.fsa'), 'rU') as fastain:
for Header, Seq in SimpleFastaParser(fastain):
numSeqs += 1
genomeSize += len(Seq)
#if less than 10,000 contigs and less than 100 MB, then don't split and just run it
if numSeqs < 10000 and genomeSize < int(100e6):
#move to subfolder for multiprocessing to work correctly
if os.path.isdir(os.path.join(folder, '1')):
SafeRemove(os.path.join(folder, '1'))
os.makedirs(os.path.join(folder, '1'))
shutil.copyfile(os.path.join(folder, 'genome.fsa'), os.path.join(folder, '1', 'genome.fsa'))
shutil.copyfile(os.path.join(folder, 'genome.tbl'), os.path.join(folder, '1', 'genome.tbl'))
else:
# rounded_up = -(-numerator // denominator) #nice trick to round up
if genomeSize > int(100e6):
chunks = -(-genomeSize // int(100e6)) #split into 100 MB chunks
else:
chunks = -(-numSeqs // 10000)
Records = []
with open(os.path.join(folder, 'genome.fsa'), 'rU') as fastain:
for tup in SimpleFastaParser(fastain):
Records.append(tup)
#sort the fasta tuples by size
Records.sort(cmp=lambda x,y: cmp(len(y),len(x)))
#shuffle them into lists like dealing playing cards then all chunks have similar sizes
sliced_records = list_slice(Records, chunks)
#loop through and add headers to dictionary for tbl splitting lookup
headers = {}
for i,x in enumerate(sliced_records):
if os.path.isdir(os.path.join(folder, str(i+1))):
SafeRemove(os.path.join(folder, str(i+1)))
os.makedirs(os.path.join(folder, str(i+1)))
with open(os.path.join(folder, str(i+1), 'genome'+str(i+1)+'.fsa'), 'w') as outfile:
for seq in x:
outfile.write('>{:}\n{:}\n'.format(seq[0], seq[1]))
headers[seq[0]] = i+1
#now parse tbl file and split in same way as fasta files
with open(os.path.join(folder, 'genome.tbl'), 'rU') as tblin:
for contig in readBlocks(tblin, '>Feature'):
ID = contig[0].split(' ')[-1].rstrip()
filenum = None
if ID in headers:
filenum = headers.get(ID)
if filenum:
with open(os.path.join(folder, str(filenum), 'genome'+str(filenum)+'.tbl'), 'a') as tblout:
tblout.write(''.join(contig))
def tbl2asn_safe_run(*args, **kwargs):
"""Call run(), catch exceptions."""
try: tbl2asn_runner(*args, **kwargs)
except Exception as e:
print("error: %s run(*%r, **%r)" % (e, args, kwargs))
def tbl2asn_runner(cmd, dir):
cmd = cmd + ['-Z', os.path.join(dir, 'discrepency.report.txt'), '-p', dir]
runSubprocess(cmd, '.', log)
def runtbl2asn_parallel(folder, template, discrepency, organism, isolate, strain, parameters, version, cpus):
'''
function to run NCBI tbl2asn
'''
#make sure ouput that will be appended to is not there
for file in [os.path.join(folder, 'genome.val'), os.path.join(folder, 'errorsummary.val'), os.path.join(folder, 'genome.gbf'), discrepency]:
SafeRemove(file)
#get funannotate version
fun_version = get_version()
#input should be a folder
if not os.path.isdir(folder):
log.error("tbl2asn error: %s is not a directory, exiting" % folder)
sys.exit(1)
#based on organism, isolate, strain, construct meta info for -j flag
if not organism:
log.error("tbl2asn error: organism not specified")
sys.exit(1)
meta = "[organism=" + organism + "]"
if isolate:
isolate_meta = "[isolate=" + isolate + "]"
meta = meta + " " + isolate_meta
if strain:
strain_meta = "[strain=" + strain + "]"
meta = meta + " " + strain_meta
cmd = ['tbl2asn', '-y', '"Annotated using '+fun_version+'"', '-N', str(version), '-t', template, '-M', 'n', '-j', '"'+meta+'"', '-V', 'b', '-c', 'fx', '-T', '-a', 'r10u']
#check for custom parameters
if parameters:
params = parameters.split(' ')
cmd = cmd + params
#check for folders in the input folder, if present, run tbl2asn on each folder and then combine
multiple = []
for file in os.listdir(folder):
if os.path.isdir(os.path.join(folder, file)):
multiple.append(os.path.join(folder, file))
if len(multiple) == 0:
multiple.append(folder)
p = multiprocessing.Pool(cpus)
results = []
for i in multiple:
results.append(p.apply_async(tbl2asn_safe_run, (cmd,i)))
p.close()
p.join()
#now collect the results make in main folder
#first delete any of the outputs you might be appending to
with open(os.path.join(folder, 'genome.val'), 'a') as validation:
with open(discrepency, 'a') as discrep:
with open(os.path.join(folder, 'errorsummary.val'), 'a') as summary:
with open(os.path.join(folder, 'genome.gbf'), 'a') as genbank:
for dirName, subdirList, fileList in os.walk(folder, topdown=False):
if len(subdirList) > 0:
continue
for f in fileList:
if f == 'errorsummary.val':
with open(os.path.join(dirName, f)) as infile:
summary.write(infile.read())
elif f.endswith('.val'):
with open(os.path.join(dirName, f)) as infile:
validation.write(infile.read())
elif f.endswith('.gbf'):
with open(os.path.join(dirName, f)) as infile:
genbank.write(infile.read())
elif f.endswith('.tbl'):
shutil.copyfile(os.path.join(dirName, f), os.path.join(folder, f))
elif f.endswith('.sqn'):
shutil.copyfile(os.path.join(dirName, f), os.path.join(folder, f))
elif f == 'discrepency.report.txt':
with open(os.path.join(dirName, f)) as infile:
discrep.write(infile.read())
def runtbl2asn(folder, template, discrepency, organism, isolate, strain, parameters, version):
'''
function to run NCBI tbl2asn
'''
#get funannotate version
fun_version = get_version()
#input should be a folder
if not os.path.isdir(folder):
log.error("tbl2asn error: %s is not a directory, exiting" % folder)
sys.exit(1)
#based on organism, isolate, strain, construct meta info for -j flag
if not organism:
log.error("tbl2asn error: organism not specified")
sys.exit(1)
meta = "[organism=" + organism + "]"
if isolate:
isolate_meta = "[isolate=" + isolate + "]"
meta = meta + " " + isolate_meta
if strain:
strain_meta = "[strain=" + strain + "]"
meta = meta + " " + strain_meta
cmd = ['tbl2asn', '-y', '"Annotated using '+fun_version+'"', '-N', str(version), '-p', folder, '-t', template, '-M', 'n', '-Z', discrepency, '-j', '"'+meta+'"', '-V', 'b', '-c', 'fx', '-T', '-a', 'r10u']
#check for custom parameters
if parameters:
params = parameters.split(' ')
cmd = cmd + params
runSubprocess(cmd, '.', log)
return ' '.join(cmd)
def gb2smurf(input, prot_out, smurf_out):
with open(smurf_out, 'w') as smurf:
with open(prot_out, 'w') as proteins:
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
name = re.sub('[^0-9]','', record.name)
if f.type == "CDS":
proteins.write(">%s\n%s\n" % (f.qualifiers['locus_tag'][0], softwrap(f.qualifiers['translation'][0].rstrip('*'))))
locus_tag = f.qualifiers.get("locus_tag", ["No ID"])[0]
product_name = f.qualifiers.get("product", ["No Description"])[0]
mystart = f.location.start
myend = f.location.end
strand = f.location.strand
if strand == 1:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip("0"), int(mystart), int(myend), product_name))
else:
smurf.write("%s\t%s\t%s\t%s\t%s\n" % (locus_tag, name.lstrip("0"), int(myend), int(mystart), product_name))
def GAGprotClean(input, output):
'''
gag.py v1 had headers like:
>>evm.model.Contig100.1 protein
gag.py v2 has headers like:
>protein|evm.model.scaffold_1.169 ID=evm.model.scaffold_1.169|Parent=evm.TU.scaffold_1.169|Name=EVM%20prediction%20scaffold_1.169
'''
with open(output, 'w') as outfile:
with open(input, 'ru') as infile:
for rec in SeqIO.parse(infile, 'fasta'):
if rec.id.startswith('protein|'):
ID = rec.id.replace('protein|', '').split(' ')[0]
else:
ID = rec.id.split(' ')[0]
rec.id = ID
rec.name = ''
rec.description = ''
SeqIO.write(rec, outfile, 'fasta')
def OldRemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, output):
#first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
cmd = ['bedtools', 'intersect', '-f', '0.9', '-a', gff, '-b', repeats]
runSubprocess2(cmd, '.', log, repeat_temp)
#now remove those proteins that do not have valid starts, less then certain length, and have internal stops
remove = []
reason = {}
#parse the results from bedtools and add to remove list
with open(repeat_temp, 'rU') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
#parse the results from BlastP search of transposons
with open(BlastResults, 'rU') as input:
for line in input:
col = line.split('\t')
remove.append(col[0])
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overalap|repeat_match;'
#I'm only seeing these models with GAG protein translations, so maybe that is a problem? skip enforcing start with M
with open(proteins, 'rU') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
remove.append(ID)
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
if 'XX' in Seq:
remove.append(ID)
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
remove = [w.replace('evm.TU.','') for w in remove]
remove = [w.replace('evm.model.','') for w in remove]
remove = set(remove)
if len(remove) > 0:
remove_match = re.compile(r'\b\evm.(.*?:%s)[\.;]\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'rU') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
matchLine = remove_match.search(line)
if not matchLine:
line = re.sub(';Name=.*$', ';', line) #remove the Name attribute as it sticks around in GBK file
out.write(line)
else:
#print matchLine.group()
#print line
if "\tgene\t" in line:
bad_ninth = line.split('ID=')[-1]
bad_ID = bad_ninth.split(";")[0]
bad_reason = reason.get(bad_ID)
if bad_reason:
line = line.replace('\n', ';'+bad_reason+'\n')
#print bad_reason
else:
log.debug("%s was removed in removeBadModels function for unknown reason, please check manually" % bad_ID)
line = line.replace('\n', ';remove_reason=unknown;\n')
#print 'uknown'
out2.write(line)
else: #if nothing to remove, just print out GFF
with open(output, 'w') as out:
with open(gff, 'rU') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
line = re.sub(';Name=.*$', ';', line) #remove the Name attribute as it sticks around in GBK file
out.write(line)
def RemoveBadModels(proteins, gff, length, repeats, BlastResults, tmpdir, methods, output):
reason = {}
tooShort = 0
repeat = 0
gapspan = 0
if 'overlap' in methods:
#first run bedtools to intersect models where 90% of gene overlaps with repeatmasker region
repeat_temp = os.path.join(tmpdir, 'genome.repeats.to.remove.gff')
cmd = ['bedtools', 'intersect', '-f', '0.9', '-a', gff, '-b', repeats]
runSubprocess2(cmd, '.', log, repeat_temp)
#parse the results from bedtools and add to remove list
with open(repeat_temp, 'rU') as input:
for line in input:
if "\tgene\t" in line:
ninth = line.split('ID=')[-1]
ID = ninth.split(";")[0]
if not ID in reason:
reason[ID] = 'remove_reason=repeat_overlap;'
repeat += 1
if 'blast' in methods:
#parse the results from BlastP search of transposons
with open(BlastResults, 'rU') as input:
for line in input:
col = line.split('\t')
if not col[0] in reason:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_match;'
repeat += 1
else:
ID = col[0].replace('evm.model.', 'evm.TU.')
reason[ID] = 'remove_reason=repeat_overlap|repeat_match;'
#always do these checks
#Look for models that are too short
with open(proteins, 'rU') as input:
SeqRecords = SeqIO.parse(input, 'fasta')
for rec in SeqRecords:
Seq = str(rec.seq)[:-1]
ID = rec.id.replace('evm.model.', 'evm.TU.')
if len(Seq) < int(length):
if not ID in reason:
reason[ID] = 'remove_reason=seq_too_short;'
tooShort += 1
if 'XX' in Seq:
if not rec.id in reason:
reason[ID] = 'remove_reason=model_span_gap;'
gapspan += 1
#now read the EVM gene models in Blocks so you can parse gene ID
numTotal = len(reason)
if numTotal > 0:
log.info("Found {:,} gene models to remove: {:,} too short; {:,} span gaps; {:,} transposable elements".format(numTotal,tooShort,gapspan,repeat))
with open(output, 'w') as out:
with open(os.path.join(tmpdir, 'bad_models.gff'), 'w') as out2:
with open(gff, 'rU') as GFF:
for gene_model in readBlocks(GFF, '\n'):
if len(gene_model) > 1:
if gene_model[0].startswith('\n'):
ID = gene_model[1].split('ID=')[-1].split(';')[0]
else:
ID = gene_model[0].split('ID=')[-1].split(';')[0]
if ID in reason:
out2.write('#%s removed; %s\n' % (ID, reason.get(ID)))
for line in gene_model:
if not line.startswith('\n'):
out2.write('%s' % (line))
else:
for line in gene_model:
line = re.sub(';Name=.*$', ';', line) #remove the Name attribute as it sticks around in GBK file
out.write('%s' % (line))
def CleantRNAtbl(GFF, TBL, output):
#clean up genbank tbl file from gag output
#try to read through GFF file, make dictionary of tRNA genes and products
TRNA = {}
matches = []
with open(GFF, 'rU') as gff:
for line in gff:
if line.startswith('#'):
continue
line = line.replace('\n', '')
scaffold, source, feature, start, end, score, orientation, phase, info = line.split('\t')
if feature == 'tRNA':
ID = info.split(';')[0].replace('ID=', '')
ID = ID.replace('-T1', '')
product = info.split('product=')[-1]
TRNA[ID] = product
matches.append(product)
matches = set(matches)
tRNAmatch = re.compile(r'\t\t\tproduct\t%s\n' % '|'.join(matches))
with open(output, 'w') as out:
with open(TBL, 'rU') as input:
for line in input:
if line.startswith('\t\t\tlocus_tag\t'):
out.write(line)
geneID = line.split('locus_tag\t')[-1].replace('\n', '')
if geneID in TRNA:
CurrentProduct = TRNA.get(geneID)
if 'tRNA-Xxx' == CurrentProduct:
out.write("\t\t\tpseudo\n")
elif line.startswith("\t\t\tproduct\ttRNA-Xxx"):
out.write(line)
out.write("\t\t\tpseudo\n")
input.next()
input.next()
elif tRNAmatch.search(line):
out.write(line)
input.next()
input.next()
else: #otherwise just write line
out.write(line)
def getFailedProductNames(input, GeneDict):
#input is NCBI tbl2asn discrepency report, parse to get suspect product names
failed = {}
with open(input, 'rU') as discrep:
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_SUB:SUSPECT_PRODUCT_NAMES::' in block[0]:
reason = []
for item in block:
if item.startswith('DiscRep_SUB:'):
bad = item.split('::')[-1].rstrip()
if 'features' in bad.lower():
bad = bad.split('features ')[-1]
reason.append(bad)
elif item.startswith('genome:'):
gene = item.split('\t')[-1].strip()
if gene.startswith('DiscRep'):
continue
if gene in GeneDict:
hit = GeneDict.get(gene)
if not hit[0] in failed:
failed[hit[0]] = (hit[1], gene, reason)
return failed
def ParseErrorReport(input, Errsummary, val, Discrep, output, keep_stops):
errors = []
gapErrors = []
remove = []
with open(Errsummary) as summary:
for line in summary:
if 'ERROR' in line:
if 'SEQ_DESCR.OrganismIsUndefinedSpecies' in line or 'SEQ_DESCR.BadOrgMod' in line or 'SEQ_FEAT.MissingTrnaAA' in line or 'SEQ_INST.TerminalNs' in line: #there are probably other errors you are unaware of....
pass
elif 'SEQ_FEAT.NoStop' in line:
if keep_stops:
pass
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
elif 'SEQ_FEAT.FeatureBeginsOrEndsInGap' in line:
err = line.split(" ")[-1].rstrip()
gapErrors.append(err)
else:
err = line.split(" ")[-1].rstrip()
errors.append(err)
#parse the discrepency report and look for overlapping genes, so far, all have been tRNA's in introns, so just get those for now.
with open(Discrep, 'rU') as discrep:
#process discrepency report into blocks, then look for block headers where overlapping genes are, remove only tRNA models right now
for block in readBlocks(discrep, 'DiscRep_'):
if 'DiscRep_ALL:OVERLAPPING_GENES::' in block[0] or 'DiscRep_SUB:RNA_CDS_OVERLAP::' in block[0]:
for item in block:
if item.startswith('genome:tRNA'):
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
if 'DiscRep_ALL:FIND_OVERLAPPED_GENES::' in block[0]:
for item in block:
gene = item.split('\t')[-1].replace('\n', '')
if gene.startswith('DiscRep'):
continue
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
if len(errors) < 1 and len(remove) < 1: #there are no errors, then just remove stop/start codons and move on
with open(output, 'w') as out:
with open(input, 'rU') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
out.write(line)
else:
with open(val) as validate:
for line in validate:
if any(x in line for x in errors):
mRNA = line.split("ncbi|")[-1].replace(']', '').rstrip()
gene = mRNA.replace('evm.model', 'evm.TU')
exon = mRNA + '.exon'
mRNA = mRNA + ';'
remove.append(mRNA)
remove.append(gene)
remove.append(exon)
#this is only picking up tRNAs right now, which "probably" is all that it needs to.....but u never know
if any(x in line for x in gapErrors):
cols = line.split(' ')
if 'Gene:' in cols:
gene = line.split('Gene: ')[-1]
gene = gene.split(' ')[0]
tRNA = gene + '_tRNA'
exon = gene + '_exon'
remove.append(gene)
remove.append(tRNA)
remove.append(exon)
#make sure no empty strings
remove = list(filter(None, remove))
remove = set(remove)
remove_match = re.compile(r'\b(?:%s)+\b' % '|'.join(remove))
with open(output, 'w') as out:
with open(input, 'rU') as GFF:
for line in GFF:
if '\tstart_codon\t' in line:
continue
if '\tstop_codon\t' in line:
continue
if not remove_match.search(line):
if '\tgene\t' in line:
line = line.replace('Name=;', '')
out.write(line)
def ParseAntiSmash(input, tmpdir, output, annotations):
log.info("Now parsing antiSMASH results, finding SM clusters")
global bbDomains, bbSubType, BackBone
BackBone = {}; SMCOGs = {}; bbSubType = {}; bbDomains = {}; smProducts = {}
backboneCount = 0; clusterCount = 0; cogCount = 0
#parse antismash genbank to get clusters in bed format and slice the record for each cluster prediction
with open(output, 'w') as antibed:
with open(input, 'rU') as input:
SeqRecords = SeqIO.parse(input, 'genbank')
for record in SeqRecords:
for f in record.features:
locusTag, ID, Parent = (None,)*3
if f.type == "source":
record_start = f.location.start
record_end = f.location.end
if f.type == "cluster":
clusterCount += 1
chr = record.id
start = f.location.start
end = f.location.end
clusternum = f.qualifiers.get("note")[0].replace("Cluster number: ", "")
antibed.write("%s\t%s\t%s\tCluster_%s\t0\t+\n" % (chr, start, end, clusternum))
Domains = []
if f.type == "CDS":
locusTag, ID, Parent = getID(f, f.type)
if not ID:
continue
ID = ID.replace('ncbi_', '')
if f.qualifiers.get('sec_met'):
for k, v in f.qualifiers.items():
if k == 'sec_met':
for i in v:
if i.startswith('Type:'):
type = i.replace('Type: ', '')
backboneCount += 1
BackBone[ID] = type
if i.startswith('NRPS/PKS subtype:'):
subtype = i.replace('NRPS/PKS subtype: ', '')
bbSubType[ID] = subtype
if i.startswith('NRPS/PKS Domain:'):
doms = i.replace('NRPS/PKS Domain: ', '')
doms = doms.split('. ')[0]
Domains.append(doms)
bbDomains[ID] = Domains
for k,v in f.qualifiers.items():
if k == 'note':
for i in v:
if i.startswith('smCOG:'):
COG = i.replace('smCOG: ', '')
COG = COG.split(' (')[0]
SMCOGs[ID] = COG
cogCount += 1
elif not i.startswith('smCOG tree'):
notes = i
smProducts[ID] = notes
log.info("Found %i clusters, %i biosynthetic enyzmes, and %i smCOGs predicted by antiSMASH" % (clusterCount, backboneCount, cogCount))
#now generate the annotations to add to genome
with open(annotations, 'w') as out:
#add product annotations - use bbSubType --> BackBone
for k, v in natsorted(BackBone.items()):
ID = k
if k in bbSubType:
hit = bbSubType.get(k)
if hit == 'NRPS':
hit = 'Nonribosomal Peptide Synthase (NRPS)'
if hit == 'Type I Iterative PKS':
hit = 'Type I Iterative Polyketide synthase (PKS)'
else:
hit = v
if hit == 'terpene':
hit = 'terpene cyclase'
elif hit == 'other':
hit = 'putative secondary metabolism biosynthetic enzyme'
elif hit == 'indole':
hit = 'aromatic prenyltransferase (DMATS family)'
elif hit == 'alkaloid' or hit == 'lignan' or hit == 'saccharide' or hit == 'polyketide':
hit = 'putative ' + hit + ' biosynthetic cluster'
elif hit == 'putative':
hit = 'putative uncategorized biosynthetic cluster'
elif '-' in hit:
hit = 'putative '+ hit + ' biosynthetic cluster'
if hit != 'none':
out.write("%s\tproduct\t%s\n" % (ID, hit))
#add annots from smProducts
for k, v in smProducts.items():
ID = k
if v != 'none' and not 'BLAST' in v:
sys.stdout.write("%s\tproduct\t%s\n" % (ID, v))
#add smCOGs into note section
for k, v in SMCOGs.items():
ID = k
if v != 'none':
out.write("%s\tnote\t%s\n" % (ID, v))
def GetClusterGenes(input, GFF, output, annotations):
global dictClusters
#pull out genes in clusters from GFF3, load into dictionary
cmd = ['bedtools', 'intersect','-wo', '-a', input, '-b', GFF]
runSubprocess2(cmd, '.', log, output)
dictClusters = {}
with open(output, 'rU') as input:
for line in input:
cols = line.split('\t')
if cols[8] != 'mRNA':
continue
gene = cols[14].split(';')[0]
gene = gene.replace('ID=', '')
ID = cols[3]
if ID not in dictClusters:
dictClusters[ID] = [gene]
else:
dictClusters[ID].append(gene)
with open(annotations, 'w') as annotout:
for k, v in dictClusters.items():
for i in v:
annotout.write("%s\tnote\tantiSMASH:%s\n" % (i, k))
def splitFASTA(input, outputdir):
if not os.path.isdir(outputdir):
os.makedirs(outputdir)
with open(input, 'rU') as InputFasta:
SeqRecords = SeqIO.parse(InputFasta, 'fasta')
for record in SeqRecords:
name = str(record.id)
outputfile = os.path.join(outputdir, name+'.fa')
with open(outputfile, 'w') as output:
SeqIO.write(record, output, 'fasta')
def genomeStats(input):
from Bio.SeqUtils import GC
lengths = []
GeeCee = []
Genes = 0
tRNA = 0
Prots = 0
locus_tag = ''
organism = None
isolate = None
strain = None
uniqueIso = None
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
lengths.append(len(record.seq))
GeeCee.append(str(record.seq))
organism = record.annotations['organism'].replace(' Unclassified.', '')
for f in record.features:
if f.type == "source":
isolate = f.qualifiers.get("isolate", [None])[0]
strain = f.qualifiers.get("strain", [None])[0]
if f.type == "CDS":
Prots += 1
if f.type == "gene":
Genes += 1
if Genes == 1:
locus_tag = f.qualifiers.get("locus_tag")[0].split('_')[0]
if f.type == "tRNA":
tRNA += 1
if strain:
log.info("working on %s %s" % (organism, strain))
uniqueIso = strain.replace(' ', '')
elif isolate:
log.info("working on %s %s" % (organism, isolate))
uniqueIso = isolate.replace(' ', '')
else:
log.info("working on %s" % organism)
GenomeSize = sum(lengths)
LargestContig = max(lengths)
ContigNum = len(lengths)
AvgContig = int(round(GenomeSize / ContigNum))
pctGC = round(GC("".join(GeeCee)), 2)
#now get N50
lengths.sort()
nlist = []
for x in lengths:
nlist += [x]*x
if len(nlist) % 2 == 0:
medianpos = int(len(nlist) / 2)
N50 = int((nlist[medianpos] + nlist[medianpos-1]) / 2)
else:
medianpos = int(len(nlist) / 2)
N50 = int(nlist[medianpos])
#return values in a list
return [organism, uniqueIso, locus_tag, "{0:,}".format(GenomeSize)+' bp', "{0:,}".format(LargestContig)+' bp', "{0:,}".format(AvgContig)+' bp', "{0:,}".format(ContigNum), "{0:,}".format(N50)+' bp', "{:.2f}".format(pctGC)+'%', "{0:,}".format(Genes), "{0:,}".format(Prots), "{0:,}".format(tRNA)]
def MEROPS2dict(input):
dict = {}
with open(input, 'rU') as fasta:
for line in fasta:
if line.startswith('>'):
cols = line.split(' ')
ID = cols[0].replace('>', '')
family = cols[1].replace('\n', '')
dict[ID] = family
return dict
def getEggNogfromNote(input):
dict = {}
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k,v in f.qualifiers.items():
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in dict:
dict[ID] = hit
return dict
def getStatsfromNote(input, word, Database):
dict = {}
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k,v in f.qualifiers.items():
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if hit.startswith('MER'): #change to family name
hit = meropsDict.get(hit)
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getSMBackbones(input):
dict = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
with open(input, 'rU') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
if f.type == 'CDS':
product = f.qualifiers['product'][0]
if not product == 'hypothetical protein':
ID = f.qualifiers['locus_tag'][0]
if product == "Hybrid PKS-NRPS":
dict['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
dict['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
dict['PKS'] += 1
return dict
def parseGOterms(input, folder, genome):
with open(os.path.join(folder, 'associations.txt'), 'a') as assoc:
with open(os.path.join(folder, genome+'.txt'), 'w') as terms:
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
GOS = []
for k,v in f.qualifiers.items():
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
if GOS:
assoc.write("%s\t%s\n" % (ID, ";".join(GOS)))
terms.write("%s\n" % ID)
def getStatsfromDbxref(input, word):
dict = {}
with open(input, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'genbank')
for record in SeqRecords:
for f in record.features:
if f.type == 'CDS':
try:
ID = f.qualifiers['locus_tag'][0]
except KeyError:
log.debug("%s has no locus_tag, skipping")
continue
for k,v in f.qualifiers.items():
if k == 'db_xref':
for i in v:
if i.startswith(word+':'):
hit = i.replace(word+':', '')
if not hit in dict:
dict[hit] = [ID]
else:
dict[hit].append(ID)
return dict
def getGBKannotation(input, Database):
'''
Function will loop through GBK file pulling out funannotate functional annotation
and returning a list of dictionaries for each annotation class
'''
#convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
SMs = {'NRPS': 0, 'PKS': 0, 'Hybrid': 0}
pfams = {}
iprs = {}
nogs = {}
cogs = {}
merops = {}
cazys = {}
secreted = {}
membrane = {}
buscos = {}
secmet = {}
with open(input, 'rU') as infile:
for record in SeqIO.parse(infile, 'genbank'):
for f in record.features:
locusTag,ID,Parent = (None,)*3
if f.type == 'CDS':
locusTag,ID,Parent = getID(f, f.type)
if not ID:
continue
product = f.qualifiers['product'][0]
if product == "Hybrid PKS-NRPS":
SMs['Hybrid'] += 1
if product == "Nonribosomal Peptide Synthase (NRPS)":
SMs['NRPS'] += 1
if 'Polyketide synthase (PKS)' in product:
SMs['PKS'] += 1
for k,v in f.qualifiers.items():
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
if not hit in pfams:
pfams[hit] = [ID]
else:
pfams[hit].append(ID)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
if not hit in iprs:
iprs[hit] = [ID]
else:
iprs[hit].append(ID)
if k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
if not ID in nogs:
nogs[ID] = hit
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
if not hit in buscos:
buscos[hit] = [ID]
else:
buscos[hit].append(ID)
elif i.startswith('MEROPS:'): #change to family name
hit = i.replace('MEROPS:', '')
hit = meropsDict.get(hit)
if not hit in merops:
merops[hit] = [ID]
else:
merops[hit].append(ID)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
if not hit in cazys:
cazys[hit] = [ID]
else:
cazys[hit].append(ID)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
if not x in cogs:
cogs[x] = [ID]
else:
cogs[x].append(ID)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
if not hit in secreted:
secreted[hit] = [ID]
else:
secreted[hit].append(ID)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
if not hit in membrane:
membrane[hit] = [ID]
else:
membrane[hit].append(ID)
elif i.startswith('antiSMASH:'):
hit = i.replace('antiSMASH:', '')
if not hit in secmet:
secmet[hit] = [ID]
else:
secmet[hit].append(ID)
return [pfams, iprs, nogs, buscos, merops, cazys, cogs, secreted, membrane, secmet, SMs]
def annotationtable(input, Database, output):
'''
Function will create a tsv annotation table from GenBank file
trying to capture all annotation in a parsable tsv file or
something that could be imported into excel
'''
#convert merops on the fly, need database
meropsDict = MEROPS2dict(os.path.join(Database, 'merops.formatted.fa'))
#input should be fully annotation GBK file from funannotate
with open(output, 'w') as outfile:
header = ['GeneID','Feature','Contig','Start','Stop','Strand','Name','Product','BUSCO','PFAM','InterPro','EggNog','COG','GO Terms','Secreted','Membrane','Protease','CAZyme', 'Notes', 'Translation']
outfile.write('%s\n' % '\t'.join(header))
for record in SeqIO.parse(input, 'genbank'):
Contig = record.id
for f in record.features:
if f.type == 'tRNA':
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = "None"
result = [ID,'tRNA',Contig,str(Start),str(End),Strand,'',Product,'','','','','','','','','','','','']
outfile.write('%s\n' % '\t'.join(result))
if f.type == 'CDS':
ID = f.qualifiers['locus_tag'][0]
Start = f.location.nofuzzy_start
End = f.location.nofuzzy_end
strand = f.location.strand
if strand == 1:
Strand = '+'
elif strand == -1:
Strand = '-'
try:
Product = f.qualifiers['product'][0]
except KeyError:
Product = 'hypothetical protein'
try:
Name = f.qualifiers['gene'][0]
except KeyError:
Name = ''
try:
Translation = f.qualifiers['translation'][0]
except KeyError:
Translation = ''
pfams = []
iprs = []
GOS = []
nogs = []
cogs = []
merops = []
cazys = []
secreted = []
membrane = []
therest = []
buscos = []
for k,v in f.qualifiers.items():
if k == 'db_xref':
for i in v:
if i.startswith('PFAM:'):
hit = i.replace('PFAM:', '')
pfams.append(hit)
elif i.startswith('InterPro:'):
hit = i.replace('InterPro:', '')
iprs.append(hit)
elif k == 'note':
notes = v[0].split('; ')
for i in notes:
if i.startswith('GO'):
go_term = i.split(' ')[1]
GOS.append(go_term)
elif i.startswith('EggNog:'):
hit = i.replace('EggNog:', '')
nogs.append(hit)
elif i.startswith('BUSCO:'):
hit = i.replace('BUSCO:', '')
buscos.append(hit)
elif i.startswith('MEROPS:'): #change to family name
hit = i.replace('MEROPS:', '')
if hit in meropsDict:
hit = meropsDict.get(hit)
merops.append(hit)
else:
log.error("MEROPS database inconsistency: %s not found" % hit)
elif i.startswith('CAZy:'):
hit = i.replace('CAZy:', '')
cazys.append(hit)
elif i.startswith('COG:'):
hit = i.replace('COG:', '')
hits = hit.split(',')
for x in hits:
desc = x + ':'+ COGS.get(x)
cogs.append(desc)
elif i.startswith('SECRETED:'):
hit = i.replace('SECRETED:', '')
secreted.append(hit)
elif i.startswith('TransMembrane:'):
hit = i.replace('TransMembrane:', '')
membrane.append(hit)
else: #capture everything else
hit = i
therest.append(hit)
result = [ID, 'CDS', Contig, str(Start), str(End), Strand, Name, Product, ';'.join(buscos), ';'.join(pfams), ';'.join(iprs), ';'.join(nogs), ';'.join(cogs), ';'.join(GOS), ';'.join(secreted), ';'.join(membrane), ';'.join(merops), ';'.join(cazys), ';'.join(therest), Translation]
outfile.write('%s\n' % '\t'.join(result))
def ncbiCheckErrors(error, validation, genename, fixOut):
ncbi_error = 0
actual_error = 0
with open(error, 'rU') as errors:
for line in errors:
line = line.strip()
if 'ERROR' in line:
num = line.split(' ')[0]
ncbi_error += int(num)
#if errors in summary, then parse validation report, only get errors with gene names
if ncbi_error > 0:
#see if we can get the gene models that need to be fixed
needFixing = {}
with open(validation, 'rU') as validationFile:
for line in validationFile:
line = line.strip()
if line.startswith('ERROR') and genename in line:
actual_error += 1
parts = line.split(' ')
for x in parts:
if genename in x:
ID = x.split('|')[-1]
if '-' in ID:
ID = ID.split('-')[0]
reason = line.split(' FEATURE:')[0]
reason = reason.split('] ')[-1]
if not ID in needFixing:
needFixing[ID] = reason
if actual_error > 0:
log.info("There are %i gene models that need to be fixed." % actual_error)
print('-------------------------------------------------------')
with open(fixOut, 'w') as fix:
fix.write('#GeneID\tError Message\n')
for k,v in natsorted(needFixing.items()):
fix.write('%s\t%s\n' % (k,v))
print('%s\t%s' % (k,v))
return actual_error
def convert2counts(input):
import pandas as pd
Counts = []
for i in range(0,len(input)):
dict = {}
for k,v in input[i].items():
dict[k] = len(v)
Counts.append(dict)
df = pd.DataFrame(Counts)
df.fillna(0, inplace=True) #fill in zeros for missing data
return df
def gb2proteinortho(input, folder, name):
gffOut = os.path.join(folder, name+'.gff')
FastaOut = os.path.join(folder, name+'.faa')
Transcripts = os.path.join(folder, name+'.transcripts.fa')
genes = {}
with open(input, 'rU') as gbk:
for record in SeqIO.parse(gbk, 'genbank'):
for f in record.features:
gb_feature_add2dict(f, record, genes)
#now output the files you need
with open(gffOut, 'w') as gff:
with open(FastaOut, 'w') as fasta:
with open(Transcripts, 'w') as transcripts:
for k,v in natsorted(genes.items()):
if v['type'] == 'mRNA':
for i,item in enumerate(v['ids']):
transcripts.write(">{:} {:} codon_start={:} strand={:}\n{:}\n".format(
item, k, v['codon_start'][i], v['strand'], v['cds_transcript'][i]))
fasta.write(">%s %s\n%s\n" % (item, k, v['protein'][i]))
gff.write("{:}\t{:}\tCDS\t{:}\t{:}\t.\t{:}\t.\tID={:};Parent={:};product={:};\n".format(v['contig'], v['source'], v['location'][0], v['location'][1], v['strand'], item, k, v['product'][i]))
def drawStackedBar(panda, type, labels, ymax, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
import pandas as pd
import numpy as np
from stackedBarGraph import StackedBarGrapher as StackedBarGrapher
#stackedbargraph from summary data
SBG = StackedBarGrapher()
#labels
d_labels = panda.index.values
#y-ticks
ticks = np.linspace(0,ymax,6)
ticks = list(ticks)
nums = [ int(x) for x in ticks ]
vals = [ str(x) for x in nums ]
yticks = [nums,vals]
#colors
if not colors:
color_palette = sns.hls_palette(len(panda.columns), l=.4, s=.8).as_hex()
color_palette = [ str(x).upper() for x in color_palette ]
else:
color_palette = colors
#set up plot
sns.set_style('darkgrid')
sns.set_context('paper')
fig = plt.figure()
ax = fig.add_subplot(111)
YLabel = "Number of "+type
SBG.stackedBarPlot(ax,panda,color_palette,xLabels=panda.index.values,endGaps=True,gap=0.25,xlabel="Genomes",ylabel=YLabel,yTicks=yticks)
plt.title(type+" summary")
#get the legend
legends = []
i = 0
for column in panda.columns:
legends.append(mpatches.Patch(color=color_palette[i], label=panda.columns.values[i]+ ": " + labels.get(panda.columns.values[i])))
i+=1
lgd = ax.legend(handles=legends, fontsize=6, loc='upper left', bbox_to_anchor=(1.02, 1), borderaxespad=0)
plt.ylim([0,ymax])
#set the font size - i wish I knew how to do this proportionately.....but setting to something reasonable.
for item in ax.get_xticklabels():
item.set_fontsize(8)
#setup the plot
fig.subplots_adjust(bottom=0.4)
fig.savefig(output, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def drawHeatmap(df, color, output, labelsize, annotate):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
#get size of table
width = len(df.columns) / 2
height = len(df.index) / 4
fig, ax = plt.subplots(figsize=(width,height))
cbar_ax = fig.add_axes(shrink=0.4)
if annotate:
sns.heatmap(df,linewidths=0.5, cmap=color, ax=ax, fmt="d", annot_kws={"size": 4}, annot=True)
else:
sns.heatmap(df,linewidths=0.5, cmap=color, ax=ax, annot=False)
plt.yticks(rotation=0)
plt.xticks(rotation=90)
for item in ax.get_xticklabels():
item.set_fontsize(8)
for item in ax.get_yticklabels():
item.set_fontsize(int(labelsize))
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def donutplot(df, LongName, output, colors=False):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import seaborn as sns
# create data
longnames=[]
for x in df.columns.tolist():
if x in LongName:
longnames.append(LongName.get(x))
else:
longnames.append(x)
names = df.columns.tolist()
data = df.values.tolist()
species = df.index.values
#get size of table
categories = len(df.columns)
total = len(df.index)
Rows = total // 2
Rows += total % 2
Position = range(1,total+1)
#get colors figured out
if not colors:
color_palette = pref_colors
else:
color_palette = colors
#draw figure
if len(species) < 3:
fig = plt.figure(1,figsize=(8,4))
else:
fig = plt.figure(1,figsize=(8,8))
for k in range(total):
ax = fig.add_subplot(Rows,2,Position[k])
# Create a circle for the center of the plot
my_circle=plt.Circle( (0,0), 0.7, color='white')
plt.pie(data[0], labels=names, colors=color_palette)
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.title(species[k])
patches = [ mpatches.Patch(color=color_palette[i], label="{:s}".format(longnames[i]) ) for i in range(len(longnames)) ]
plt.legend(handles=patches, bbox_to_anchor=(1,0.5), bbox_transform=fig.transFigure, loc="center left", ncol=1)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def drawbarplot(df, output):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
import seaborn as sns
#num = len(df.columns) + 1
sns.set(style="darkgrid")
fig = plt.figure()
#colors
if len(df) > len(pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [ str(x).upper() for x in colorplot ]
else:
colorplot = pref_colors[:len(df)]
ax = sns.barplot(data=df, palette=colorplot)
plt.xlabel('Genomes')
plt.ylabel('Secreted Proteins')
plt.xticks(rotation=90)
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def distance2mds(df, distance, type, output):
import numpy as np
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.manifold import MDS
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
#run distance metric on matrix and then plot using NMDS
num = len(df.index)
data = np.array(df).astype(int)
bc_dm = pairwise_distances(data, metric=distance)
mds = MDS(n_components=2, metric=False, max_iter=999, dissimilarity='precomputed', n_init=10, verbose=0)
result = mds.fit(bc_dm)
coords = result.embedding_
stress = 'stress=' + '{0:.4f}'.format(result.stress_)
#get axis information and make square plus some padding
xcoords = abs(maxabs(coords[:,0])) + 0.1
ycoords = abs(maxabs(coords[:,1])) + 0.1
#setup plot
fig = plt.figure()
#colors
if len(df) > len(pref_colors):
colorplot = sns.husl_palette(len(df), l=.5).as_hex()
colorplot = [ str(x).upper() for x in colorplot ]
else:
colorplot = pref_colors[:len(df)]
for i in range(0,num):
plt.plot(coords[i,0], coords[i,1], 'o', markersize=9, color=colorplot[i], label=df.index.values[i])
plt.xlabel('NMDS axis 1')
plt.ylabel('NMDS axis 2')
plt.ylim(-ycoords,ycoords)
plt.xlim(-xcoords,xcoords)
'''
if num < 13: #if number too large, don't plot
'''
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.title('NMDS analysis of '+type+' domains')
plt.annotate(stress, xy=(1,0), xycoords='axes fraction', fontsize=12, ha='right', va='bottom')
fig.savefig(output, format='pdf', dpi=1000, bbox_inches='tight')
plt.close(fig)
def ReciprocalBlast(filelist, protortho, cpus):
'''
function to run reciprocal diamond blast for generating proteinortho input
'''
#generate dmnd databases for each input
for x in filelist:
base = os.path.basename(x)
cmd = ['diamond', 'makedb', '--in', x, '--db', base+'.dmnd']
if not checkannotations(os.path.join(protortho, base+'.dmnd')):
runSubprocess(cmd, protortho, log)
for p in itertools.permutations(filelist, 2):
query = p[0]
target = p[1]
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6', '--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6', '--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess(cmd, protortho, log)
db = os.path.basename(target)+'.dmnd'
outname = target+'.vs.'+target+'.bla'
cmd = ['diamond', 'blastp', '--query', target, '--db', db, '--outfmt', '6', '--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess(cmd, protortho, log)
db = os.path.basename(query)+'.dmnd'
outname = query+'.vs.'+query+'.bla'
cmd = ['diamond', 'blastp', '--query', query, '--db', db, '--outfmt', '6', '--out', outname, '--evalue', '1e-5', '--more-sensitive', '--threads', str(cpus)]
if not checkannotations(os.path.join(protortho, outname)):
runSubprocess(cmd, protortho, log)
def singletons(poff, name):
with open(poff, 'rU') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] == '1' and col[i] != '*':
count += 1
return count
def orthologs(poff, name):
with open(poff, 'rU') as input:
count = 0
for line in input:
line = line.replace('\n', '')
if line.startswith('#'):
header = line
species = header.split('\t')[3:]
i = species.index(name.replace(' ', '_')) + 3
continue
col = line.split('\t')
if col[0] != '1' and col[i] != '*':
count += 1
return count
def iprxml2dict(xmlfile, terms):
import xml.etree.cElementTree as cElementTree
iprDict = {}
for event, elem in cElementTree.iterparse(xmlfile):
if elem.tag == 'interpro':
ID = elem.attrib['id']
if ID in terms:
for x in elem.getchildren():
if x.tag == 'name':
description = x.text
iprDict[ID] = description
elem.clear()
else:
elem.clear()
return iprDict
def pfam2dict(file):
pfamDict = {}
with open(file, 'rU') as input:
for line in input:
if line.startswith('PF'): #just check to be sure
line = line.replace('\n', '')
cols = line.split('\t')
ID = cols[0]
desc = cols[4]
pfamDict[ID] = desc
return pfamDict
def flipKeyValues(input):
flipped = {}
for k,v in input.items():
for y in v:
if not y in flipped:
flipped[y] = k
return flipped
def dictFlip(input):
#flip the list of dictionaries
outDict = {}
for x in input:
for k,v in natsorted(x.iteritems()):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
return outDict
def busco_dictFlip(input):
#flip the list of dictionaries
output = []
for x in input:
outDict = {}
for k,v in natsorted(x.iteritems()):
for i in v:
if i in outDict:
outDict[i].append(k)
else:
outDict[i] = [k]
output.append(outDict)
return output
def dictFlipLookup(input, lookup):
outDict = {}
for x in input:
for k,v in natsorted(x.iteritems()):
#lookup description in another dictionary
if not lookup.get(k) is None:
result = k+': '+lookup.get(k)
else:
result = k+': No description'
for i in v:
if i in outDict:
outDict[i].append(str(result))
else:
outDict[i] = [str(result)]
return outDict
def copyDirectory(src, dest):
import shutil
try:
shutil.copytree(src, dest)
# Directories are the same
except shutil.Error as e:
print('Directory not copied. Error: %s' % e)
# Any error saying that the directory doesn't exist
except OSError as e:
print('Directory not copied. Error: %s' % e)
def download_buscos(name, Database):
if name in busco_links:
log.info("Downloading %s busco models" % name)
address = busco_links.get(name)
filename = address.split('/')[-1]
if name == 'fungiv1':
foldername = 'fungi'
else:
foldername = filename.split('.')[0]
cmd = ['wget', '-c', '--tries=0', '--read-timeout=20', address]
runSubprocess(cmd, '.', log)
cmd = ['tar', '-zxf', filename]
runSubprocess(cmd, '.', log)
copyDirectory(os.path.abspath(foldername), os.path.join(Database, name))
shutil.rmtree(foldername)
os.remove(filename)
else:
log.error("%s not a valid BUSCO database" % name)
validBusco = list(busco_links.keys())
log.error("Valid BUSCO DBs: %s" % (', '.join(validBusco)))
sys.exit(1)
def fasta2dict(Fasta):
answer = dict()
with open(Fasta, 'rU') as gbk:
SeqRecords = SeqIO.parse(gbk, 'fasta')
for record in SeqRecords:
if record.id in answer:
print("WARNING - duplicate key!")
else:
answer[record.id] = str(record.seq)
return answer
def ortho2phylogeny(folder, df, num, dict, cpus, bootstrap, tmpdir, outgroup, sp_file, name, sc_buscos, ml_method):
import random, pylab
from Bio import Phylo
from Bio.Phylo.Consensus import get_support
if outgroup:
#load species fasta ids into dictionary
OutGroup = {}
with open(sp_file, 'rU') as sp:
for rec in SeqIO.parse(sp, 'fasta'):
OutGroup[rec.id] = rec.seq
#single copy orthologs are in a dataframe, count and then randomly select
num_species = len(df.columns)
species = df.columns.values
if len(df) == 0:
log.error("0 single copy BUSCO orthologs found, skipping phylogeny")
return
if len(df) < int(num):
number = len(df)
log.info("Found %i single copy BUSCO orthologs, will use all to infer phylogeny" % (len(df)))
subsampled = df
else:
number = int(num)
log.info("Found %i single copy BUSCO orthologs, will randomly select %i to infer phylogeny" % (len(df), number))
subsampled = df.sample(n=number)
if outgroup: #passed a list to extract from parent script
busco_list = sc_buscos
#since you checked for BUSCO id across all previously, loop through first set and print BUSCOs to file
with open(os.path.join(tmpdir, 'phylogeny.buscos.used.txt'), 'w') as busco_out:
with open(os.path.join(tmpdir, 'phylogeny.concat.fa'), 'w') as proteinout:
if outgroup:
proteinout.write(">%s\n" % name)
for y in busco_list:
proteinout.write("%s" % (OutGroup.get(y)))
proteinout.write('\n')
for i in range(0,num_species):
proteinout.write(">%s\n" % species[i])
proteins = fasta2dict(os.path.join(folder, species[i]+'.faa'))
for row in subsampled[species[i]].iteritems():
proteinout.write("%s" % proteins.get(row[1]))
busco_out.write("%s\t%s\n" % (dict[i].get(row[1]), row[1]))
proteinout.write('\n')
cmd = ['mafft', '--quiet', os.path.join(tmpdir,'phylogeny.concat.fa')]
runSubprocess2(cmd, '.', log, os.path.join(tmpdir,'phylogeny.mafft.fa'))
cmd = ['trimal', '-in', os.path.join(tmpdir,'phylogeny.mafft.fa'), '-out', os.path.join(tmpdir, 'phylogeny.trimal.phylip'), '-automated1', '-phylip']
runSubprocess(cmd, '.', log)
if ml_method == 'raxml':
cmd = ['raxmlHPC-PTHREADS', '-T', str(cpus), '-f', 'a', '-m', 'PROTGAMMAAUTO', '-p', '12345', '-x', '12345', '-#', str(bootstrap), '-s', 'phylogeny.trimal.phylip', '-n', 'nwk']
if outgroup:
cmd = cmd + ['-o', name]
treefile = os.path.join(tmpdir, 'RAxML_bootstrap.nwk')
runSubprocess(cmd, tmpdir, log)
#parse with biopython and draw
trees = list(Phylo.parse(treefile, 'newick'))
best = Phylo.read(os.path.join(tmpdir,'RAxML_bestTree.nwk'), 'newick')
support_tree = get_support(best, trees)
Phylo.draw(support_tree, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'), format='pdf', bbox_inches='tight', dpi=1000)
else: #run iqtree as faster and better than raxml in initial testing
cmd = ['iqtree', '-s', 'phylogeny.trimal.phylip', '-nt', 'AUTO', '-ntmax', str(cpus), '-seed', '12345', '-bb', '1000']
if outgroup:
cmd = cmd + ['-o', name]
runSubprocess(cmd, tmpdir, log)
treefile = os.path.join(tmpdir, 'phylogeny.trimal.phylip.treefile')
best = Phylo.read(treefile, 'newick')
Phylo.draw(best, do_show=False)
pylab.axis('off')
pylab.savefig(os.path.join(tmpdir, 'ML.phylogeny.pdf'), format='pdf', bbox_inches='tight', dpi=1000)
def getTrainResults(input):
with open(input, 'rU') as train:
for line in train:
if line.startswith('nucleotide level'):
line = line.replace(' ', '')
values1 = line.split('|') #get [1] and [2]
if line.startswith('exon level'):
line = line.replace(' ', '') #get [6] and [7]
values2 = line.split('|')
if line.startswith('gene level'):
line = line.replace(' ', '')
values3 = line.split('|') #get [6] and [7]
return (float(values1[1]), float(values1[2]), float(values2[6]), float(values2[7]), float(values3[6]), float(values3[7]))
def count_multi_CDS_genes(input, filterlist):
#take funannotate annotation dictionary and return number of genes with more than one CDS
counter = 0
counter_inList = 0
for k,v in natsorted(input.items()):
if len(v['CDS'][0]) > 1:
counter += 1
if k in filterlist:
counter_inList += 1
return len(input), counter, len(filterlist), counter_inList
def selectTrainingModels(input, fasta, genemark_gtf, output):
from collections import OrderedDict
'''
function to take a GFF3 file and filter the gene models so they are non-overalpping
also sort the models by number of exons, the more the better.
'''
def _sortDict(d):
return (len(d[1]['CDS'][0]))
#load gene models into funannotate structured dictionary
gene_inter = defaultdict(InterLap)
Genes = {}
Genes = gff2dict(input, fasta, Genes)
#add to InterLap output proteins
proteins = 'augustus.training.proteins.fa'
ignoreList = []
keeperList = getGenesGTF(genemark_gtf)
#check number of multi-cds genes
countGenes, countGenesCDS, countKeeper, countKeeperCDS = count_multi_CDS_genes(Genes, keeperList)
log.debug('{:,} PASA genes; {:,} have multi-CDS; {:,} from filterGeneMark; {:,} have multi-CDS'.format(countGenes, countGenesCDS, countKeeper, countKeeperCDS))
multiCDScheck, keeperCheck = (False,)*2
if countKeeper >= 200:
keeperCheck = True
if keeperCheck:
if countKeeperCDS >= 200:
multiCDScheck = True
else:
if countGenesCDS >= 200:
multiCDScheck = True
log.debug('filterGeneMark GTF filter set to {:}; require genes with multiple CDS set to {:}'.format(keeperCheck,multiCDScheck))
with open(proteins, 'w') as protout:
for k,v in natsorted(Genes.items()):
if keeperCheck and not k in keeperList:
ignoreList.append(k)
continue
if multiCDScheck and len(v['CDS'][0]) < 2:
ignoreList.append(k)
continue
#add to interlap object and write protein out
gene_inter[v['contig']].add((v['location'][0], v['location'][1], v['strand'], k, len(v['CDS'][0])))
protout.write('>%s___%i\n%s\n' % (k, len(v['CDS'][0]), v['protein'][0]))
#make sure gene models are unique, so do pairwise diamond search @ 80% identity
cmd = ['diamond', 'makedb', '--in', 'augustus.training.proteins.fa', '--db', 'aug_training.dmnd']
runSubprocess4(cmd, '.', log)
cmd = ['diamond', 'blastp', '--query', 'augustus.training.proteins.fa', '--db', 'aug_training.dmnd', '--more-sensitive', '-o', 'aug.blast.txt', '-f', '6', 'qseqid', 'sseqid', 'pident', '--query-cover', '80', '--subject-cover', '80', '--id', '80', '--no-self-hits']
runSubprocess4(cmd, '.', log)
blast_results = []
with open('aug.blast.txt', 'rU') as blast:
for line in blast:
line = line.rstrip()
line = line.replace('___', '\t')
blast_results.append(line.split('\t'))
sortedBlast = natsorted(blast_results, key=lambda x: int(x[1]), reverse=True)
blastignore = []
for hit in sortedBlast:
if hit[0] in blastignore or hit[2] in blastignore:
continue
if int(hit[1]) >= int(hit[3]):
if not hit[2] in blastignore:
blastignore.append(hit[2])
else:
if not hit[0] in blastignore:
blastignore.append(hit[0])
log.debug('{:,} models fail blast identidy threshold'.format(len(blastignore)))
SafeRemove('augustus.training.proteins.fa')
SafeRemove('aug_training.dmnd')
SafeRemove('aug.blast.txt')
#now return cleaned genemark GTF file
finalIgnoreList = []
for x in ignoreList:
if not x in finalIgnoreList:
finalIgnoreList.append(x)
for y in blastignore:
if not y in finalIgnoreList:
finalIgnoreList.append(y)
log.debug('{:,} models will be ignored for training Augustus'.format(len(finalIgnoreList)))
GenesPass = {}
for k,v in natsorted(Genes.items()):
if not k in finalIgnoreList and not k in GenesPass:
loc = sorted([v['location'][0],v['location'][1]])
if loc in gene_inter[v['contig']]:
hits = list(gene_inter[v['contig']].find(loc))
sortedHits = sorted(hits, key=lambda x: int(x[4]), reverse=True)
validHits = []
for y in sortedHits:
if not y[3] in finalIgnoreList and y[3] != k:
validHits.append(y)
if len(validHits) > 0:
if not validHits[0][3] in GenesPass:
GenesPass[validHits[0][3]] = Genes.get(validHits[0][3])
else:
GenesPass[k] = v
#now sort dictionary number of exons
sGenes = sorted(GenesPass.iteritems(), key=_sortDict, reverse=True)
sortedGenes = OrderedDict(sGenes)
log.info("{:,} of {:,} models pass training parameters".format(len(sortedGenes), len(Genes)))
#x = dict(itertools.islice(sortedGenes.items(), 0, 2500))
final = {}
for i, (k,v) in enumerate(natsorted(sortedGenes.items())):
v['ids'] = ['g_'+str(i+1)+'-T1']
final['g_'+str(i+1)] = v
dict2gff3noUTRs(final, output)
return len(final)
def getGenesGTF(input):
genes = []
with open(input, 'rU') as infile:
for line in infile:
if not line.startswith('\n') or not line.startswith('#'):
line = line.rstrip()
info = line.split('\t')[-1]
attributes = info.split(';')
ID = None
for x in attributes:
if x.startswith('gene_id'):
tmp = x.replace('gene_id ', '')
ID = tmp.replace('"', '')
if ID:
if not ID in genes:
genes.append(ID)
return genes
def trainAugustus(AUGUSTUS_BASE, train_species, trainingset, genome, outdir, cpus, num_training, optimize):
RANDOMSPLIT = os.path.join(AUGUSTUS_BASE, 'scripts', 'randomSplit.pl')
OPTIMIZE = os.path.join(AUGUSTUS_BASE, 'scripts', 'optimize_augustus.pl')
NEW_SPECIES = os.path.join(AUGUSTUS_BASE, 'scripts', 'new_species.pl')
aug_cpus = '--cpus='+str(cpus)
species = '--species='+train_species
aug_log = os.path.join(outdir, 'logfiles', 'augustus_training.log')
TrainSet = os.path.abspath(trainingset)
onlytrain = '--onlytrain='+TrainSet+'.train'
testtrain = TrainSet+'.test'
trainingdir = os.path.join(outdir, 'predict_misc', 'tmp_opt_'+train_species)
with open(aug_log, 'w') as logfile:
if not CheckAugustusSpecies(train_species):
subprocess.call(['perl', NEW_SPECIES, species], stdout = logfile, stderr = logfile)
#run etraining again to only use best models from EVM for training
subprocess.call(['etraining', species, TrainSet], cwd = os.path.join(outdir, 'predict_misc'), stderr = logfile, stdout = logfile)
subprocess.call(['perl', RANDOMSPLIT, TrainSet, str(num_training)], cwd = os.path.join(outdir, 'predict_misc')) #split off num_training models for testing purposes
if os.path.isfile(os.path.join(outdir, 'predict_misc', TrainSet+'.train')):
with open(os.path.join(outdir, 'predict_misc', 'augustus.initial.training.txt'), 'w') as initialtraining:
subprocess.call(['augustus', species, TrainSet+'.test'], stdout=initialtraining, cwd = os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(outdir, 'predict_misc', 'augustus.initial.training.txt'))
log.info('Augustus initial training results (specificity/sensitivity):\nnucleotides ({:.1%}/{:.1%}); exons ({:.1%}/{:.1%}); genes ({:.1%}/{:.1%}).'.format(train_results[0],train_results[1],train_results[2],train_results[3],train_results[4],train_results[5]))
if optimize:
#now run optimization
subprocess.call(['perl', OPTIMIZE, species, aug_cpus, onlytrain, testtrain], cwd = os.path.join(outdir, 'predict_misc'), stderr = logfile, stdout = logfile)
#run etraining again
subprocess.call(['etraining', species, TrainSet], cwd = os.path.join(outdir, 'predict_misc'), stderr = logfile, stdout = logfile)
with open(os.path.join(outdir, 'predict_misc', 'augustus.final.training.txt'), 'w') as finaltraining:
subprocess.call(['augustus', species, TrainSet+'.test'], stdout=finaltraining, cwd = os.path.join(outdir, 'predict_misc'))
train_results = getTrainResults(os.path.join(outdir, 'predict_misc', 'augustus.final.training.txt'))
log.info('Augustus initial training results (specificity/sensitivity):\nnucleotides ({:.1%}/{:.1%}); exons ({:.1%}/{:.1%}); genes ({:.1%}/{:.1%}).'.format(train_results[0],train_results[1],train_results[2],train_results[3],train_results[4],train_results[5]))
#clean up tmp folder
shutil.rmtree(trainingdir)
else:
if train_results[4] < 0.50:
log.info("Accuracy seems low, you can try to improve by passing the --optimize_augustus option.")
else:
log.error("AUGUSTUS training failed, check logfiles")
sys.exit(1)
def sortList(input, col):
return natsorted(input, key=operator.itemgetter(col))
def sortHints(input, output):
data = []
with open(input, 'rU') as infile:
for line in infile:
line = line.rstrip()
data.append(line.split('\t'))
#replicate this: sort -n -k 4,4 | sort -s -n -k 5,5 | sort -s -n -k 3,3 | sort -s -k 1,1
sort1 = sortList(data, 3)
sort2 = sortList(sort1, 4)
sort3 = sortList(sort2, 2)
sort4 = sortList(sort3, 0)
with open(output, 'w') as sort_out:
for line in sort4:
sort_out.write('%s\n' % '\t'.join(line))
def checkgoatools(input):
with open(input, 'rU') as goatools:
count = -1
result = False
headercount = 0
for line in goatools:
count += 1
if line.startswith('GO\tNS'):
header = line.replace('\n', '')
headercount = count
if line.startswith('GO:'):
result = True
return (result, headercount)
def translatemRNA(input, output):
from Bio.SeqIO.FastaIO import SimpleFastaParser
with open(output, 'w') as outfile:
with open(input, 'rU') as fasta:
for header, seq in SimpleFastaParser(fasta):
codon_start = 1
for x in header.split(' '):
if x.startswith('codon_start='):
codon_start = int(x.replace('codon_start=', '').rstrip())
protSeq = translate(seq, '+', codon_start-1) #transcripts should already be in proper orientation
outfile.write('>{:}\n{:}\n'.format(header, protSeq))
def alignMAFFT(input, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['mafft', '--quiet', input], stderr = FNULL, stdout = outfile)
def align2Codon(alignment, transcripts, output):
FNULL = open(os.devnull, 'w')
with open(output, 'w') as outfile:
subprocess.call(['perl', os.path.join(UTIL,'pal2nal.pl'), alignment, transcripts, '-output', 'fasta'], stderr=FNULL, stdout = outfile)
if getSize(output) < 1:
os.remove(output)
log.debug('dNdS Error: pal2nal failed for %s' % alignment)
def counttaxa(input):
ct = 0
with open(input, 'rU') as tree:
line = tree.readline()
ct = line.count(',')+1
return ct
def getMatchFileName(pattern, directory):
result = None
for f in os.listdir(directory):
if pattern in f:
result = os.path.join(directory, f)
return result
def drawPhyMLtree(fasta, tree):
FNULL = open(os.devnull, 'w')
fc = countfasta(fasta)
#need to convert to phylip format
base = os.path.basename(fasta).split('.')[0]
dir = os.path.dirname(fasta)
tmp1 = os.path.join(dir, base+'.draw2tree.phylip')
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
#draw tree
subprocess.call(['phyml', '-i', tmp1], stdout = FNULL, stderr = FNULL)
tmp2 = getMatchFileName(base+'.draw2tree.phylip_phyml_tree', dir)
#check that num taxa in tree = input
tc = counttaxa(tmp2)
if tc != fc: #something failed...
log.debug('dNdS Error: phyml tree failed for %s' % fasta)
#retry
subprocess.call(['trimal', '-in', fasta, '-out', tmp1, '-phylip'])
subprocess.call(['phyml', '-i', tmp1], stdout = FNULL, stderr = FNULL)
#rename and clean
os.rename(tmp2, tree)
SafeRemove(tmp1)
stats = getMatchFileName(base+'.draw2tree.phylip_phyml_stats', dir)
SafeRemove(stats)
def simplestTreeEver(fasta, tree):
with open(tree, 'w') as outfile:
with open(fasta, 'rU') as input:
ids = []
for rec in SeqIO.parse(input, 'fasta'):
ids.append(rec.id)
outfile.write('(%s,%s);' % (ids[0], ids[1]))
def rundNdSexhaustive(folder):
FNULL = open(os.devnull, 'w')
#setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
#Translate to protein space
translatemRNA(transcripts, prots)
#align protein sequences
alignMAFFT(prots, aln)
#convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
#now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
#now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(tree), '--models', 'M0', 'M1', 'M2', 'M7', 'M8', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd = tmpdir, stdout = logfile, stderr = logfile)
#clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file), os.path.join(tmpdir, name, file))
def rundNdSestimate(folder):
FNULL = open(os.devnull, 'w')
#setup intermediate files
tmpdir = os.path.dirname(folder)
name = os.path.basename(folder)
transcripts = os.path.join(tmpdir, name+'.transcripts.fa')
prots = os.path.join(tmpdir, name+'.proteins.fa')
aln = os.path.join(tmpdir, name+'.aln')
codon = os.path.join(tmpdir, name+'.codon.aln')
tree = os.path.join(tmpdir, name+'.tree')
log = os.path.join(tmpdir, name+'.log')
finallog = os.path.join(tmpdir, name, name+'.log')
if not checkannotations(finallog):
num_seqs = countfasta(transcripts)
#Translate to protein space
translatemRNA(transcripts, prots)
#align protein sequences
alignMAFFT(prots, aln)
#convert to codon alignment
align2Codon(aln, transcripts, codon)
if checkannotations(codon):
if num_seqs > 2:
#now generate a tree using phyml
drawPhyMLtree(codon, tree)
else:
simplestTreeEver(transcripts, tree)
#now run codeml through ete3
etecmd = ['ete3', 'evol', '--alg', os.path.abspath(codon), '-t', os.path.abspath(tree), '--models', 'M0', '-o', name, '--clear_all', '--codeml_param', 'cleandata,1']
with open(log, 'w') as logfile:
logfile.write('\n%s\n' % ' '.join(etecmd))
subprocess.call(etecmd, cwd = tmpdir, stdout = logfile, stderr = logfile)
#clean up
for file in os.listdir(tmpdir):
if file.startswith(name+'.'):
os.rename(os.path.join(tmpdir, file), os.path.join(tmpdir, name, file))
def get_subdirs(a_dir):
return [os.path.join(a_dir, name) for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def get_subdirs2(a_dir):
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def parsedNdS(folder):
results = {}
hits = get_subdirs2(folder)
for x in hits:
finallog = os.path.join(folder, x, x+'.log')
#parse logfile to get omega
dnds = 'NA'
m1m2p = 'NA'
m7m8p = 'NA'
if os.path.isfile(finallog):
with open(finallog, 'rU') as input:
for line in input:
line = line.strip()
if 'M7' in line and 'M8' in line and '|' in line:
m7m8p = line.split('|')[-1].strip()
m7m8p = m7m8p.replace('*','')
m7m8p = '{0:.5f}'.format(float(m7m8p))
elif 'M1' in line and 'M2' in line and '|' in line:
m1m2p = line.split('|')[-1].lstrip()
m1m2p = m1m2p.replace('*','')
m1m2p = '{0:.5f}'.format(float(m1m2p))
elif line.startswith('- Model M0'):
nextline = next(input)
dnds = nextline.split('tree: ')[1].rstrip()
results[x] = (dnds, m1m2p, m7m8p)
return results
def chunkIt(seq, num):
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def getBlastDBinfo(input):
'''
function to return a tuple of info using blastdbcmd
tuple: (name, date, #sequences)
'''
cmd = ['blastdbcmd', '-info', '-db', input]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if stderr:
print(stderr.split('\n')[0])
results = stdout.split('\n\n')
results = [x for x in results if x]
#parse results which are now in list, look for starts with Database and then Date
Name, Date, NumSeqs = (None,)*3
for x in results:
if x.startswith('Database:'):
hit = x.split('\n\t')
Name = hit[0].replace('Database: ', '')
NumSeqs = hit[1].split(' sequences;')[0].replace(',', '')
if x.startswith('Date:'):
Date = x.split('\t')[0].replace('Date: ', '')
return (Name, Date, NumSeqs)
HEADER = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<meta name="funannotate comparative genomics output" content="">
<meta name="Jonathan Palmer" content="">
<title>Funannotate</title>
<!-- Bootstrap core CSS -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- Custom styles for this template -->
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
</button>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li><a href="stats.html">Stats</a></li>
<li><a href="phylogeny.html">Phylogeny</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterPro</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="cogs.html">COGs</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="tf.html">TFs</a></li>
<li><a href="secmet.html">SecMet</a></li>
<li><a href="go.html">GO</a></li>
<li><a href="citation.html">Cite</a></li>
</ul>
</div><!--/.nav-collapse -->
</div>
</nav>
'''
ORTHOLOGS = '''
<div class="container">
<div class="table">
<h2 class="sub-header">Orthologous protein groups</h2>
<div class="table-responsive">
'''
INDEX = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Funannotate Results</h2>
<br>
<p><a href='stats.html'>Genome Summary Stats</a></p>
<p><a href='phylogeny.html'>Maximum likelihood Phylogeny (RAxML)</a></p>
<p><a href='merops.html'>MEROPS Protease Stats</a></p>
<p><a href='cazy.html'>CAZyme carbohydrate activating enzyme Stats</a></p>
<p><a href='cogs.html'>COGs Stats</a></p>
<p><a href='signalp.html'>Secreted proteins (SignalP)</a></p>
<p><a href='interpro.html'>InterProScan Domain Stats</a></p>
<p><a href='tf.html'>Transcription Factor Summary</a></p>
<p><a href='secmet.html'>Secondary Metabolism Cluster Summary</a></p>
<p><a href='pfam.html'>PFAM Domain Stats</a></p>
<p><a href='go.html'>Gene Ontology Enrichment Analysis</a></p>
<p><a href='orthologs.html'>Orthologous proteins</a></p>
<br>
'''
SUMMARY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Genome Summary Stats</h2>
<div class="table-responsive">
'''
PHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">RAxML Maximum Likelihood Phylogeny</h2>
<a href='phylogeny/ML.phylogeny.pdf'><img src="phylogeny/ML.phylogeny.pdf" height="500" /></a></div>
'''
NOPHYLOGENY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Number of species too low to generate phylogeny</h2>
'''
MEROPS = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">MEROPS Protease Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='merops/MEROPS.graph.pdf'><img src="merops/MEROPS.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='merops/MEROPS.heatmap.pdf'><img src="merops/MEROPS.heatmap.pdf" height="500" /></a></div>
</div>
<div class="table-responsive">
'''
INTERPRO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">InterProScan Domains per Genome Results</h2>
<div class='row'>
<a href='interpro/InterProScan.nmds.pdf'><img src="interpro/InterProScan.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
PFAM = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">PFAM Domains per Genome Results</h2>
<div class='row'>
<a href='pfam/PFAM.nmds.pdf'><img src="pfam/PFAM.nmds.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
SIGNALP = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secreted Proteins per Genome Results</h2>
<div class='row'>
<a href='signalp/signalp.pdf'><img src="signalp/signalp.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
TF = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Fungal Transcription Factors per Genome Results</h2>
<div class='row'>
<a href='tfs/TF.heatmap.pdf'><img src="tfs/TF.heatmap.pdf" height="800" /></a></div>
<div class="table-responsive">
'''
SECMET = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Secondary Metabolism Clusters per Genome Results</h2>
<div class='row'>
<a href='secmet/SM.graph.pdf'><img src="secmet/SM.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
CAZY = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">CAZyme Families per Genome Results</h2>
<div class='row'>
<div class="col-sm-7"><a href='cazy/CAZy.graph.pdf'><img src="cazy/CAZy.graph.pdf" height="350" /></a></div>
<div class="col-sm-5"><a href='cazy/CAZy.heatmap.pdf'><img src="cazy/CAZy.heatmap.pdf" height="600" /></a></div>
</div>
<div class="table-responsive">
'''
COG = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">Clusters of Orthologous Groups (COGs) per Genome Results</h2>
<div class='row'>
<a href='cogs/COGS.graph.pdf'><img src="cogs/COGS.graph.pdf" height="500" /></a></div>
<div class="table-responsive">
'''
GO = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">GO ontology enrichment Results</h2>
<div class='row'>
'''
MISSING = '''
<div class="container">
<div class="starter-template">
<h2 class="sub-header">These data are missing from annotation.</h2>
'''
CITATION = '''
<div class="container">
<div class="starter-template">
<h3 class="sub-header">If you found Funannotate useful please cite:</h3>
<p>Palmer JM. 2016. Funannotate: a fungal genome annotation and comparative genomics pipeline. <a href="https://github.com/nextgenusfs/funannotate">https://github.com/nextgenusfs/funannotate</a>.</p>
'''
FOOTER = '''
</div>
</div>
</div><!-- /.container -->
<!-- Bootstrap core JavaScript
================================================== -->
<!-- Placed at the end of the document so the pages load faster -->
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script>window.jQuery || document.write('<script src="js/jquery.min.js"><\/script>')</script>
<script src="js/bootstrap.min.js"></script>
<!-- IE10 viewport hack for Surface/desktop Windows 8 bug -->
<script src="js/ie10-viewport-bug-workaround.js"></script>
</body>
</html>
'''
HEADER2 = '''
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="funannotate comparative genomics output" content="">
<meta name="Jonathan Palmer" content="">
<title>Funannotate</title>
<link href="css/bootstrap.min.css" rel="stylesheet">
<link href="css/starter-template.css" rel="stylesheet">
<script src="js/ie-emulation-modes-warning.js"></script>
<link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.css"/>
<script type="text/javascript" src="https://cdn.datatables.net/t/bs/dt-1.10.11/datatables.min.js"></script>
</head>
<body>
<nav class="navbar navbar-inverse navbar-fixed-top">
<div class="container-fluid">
<div class="navbar-header">
<span class="sr-only">Toggle navigation</span>
<a class="navbar-brand" href="index.html">Funannotate</a>
</div>
<div class="navbar-header">
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
<li class="active"><a href="stats.html">Stats</a></li>
<li><a href="orthologs.html">Orthologs</a></li>
<li><a href="interpro.html">InterProScan</a></li>
<li><a href="pfam.html">PFAM</a></li>
<li><a href="merops.html">Merops</a></li>
<li><a href="cazy.html">CAZymes</a></li>
<li><a href="signalp.html">SignalP</a></li>
<li><a href="go.html">GO ontology</a></li>
<li><a href="citation.html">Citation</a></li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-haspopup="true" aria-expanded="false">Genomes <span class="caret"></span></a>
<ul class="dropdown-menu">
'''
|
the-stack_106_17271
|
"""
This is a hacky little attempt using the tools from the trigger creation script to identify a
good set of label strings. The idea is to train a linear classifier over the predict token and
then look at the most similar tokens.
"""
import argparse
import json
import logging
from pathlib import Path
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from transformers import (
AutoConfig, AutoModelWithLMHead, AutoTokenizer, BertForMaskedLM, RobertaForMaskedLM
)
from tqdm import tqdm
import autoprompt.utils as utils
import autoprompt.create_trigger as ct
logger = logging.getLogger(__name__)
def load_pretrained(model_name):
"""
Loads pretrained HuggingFace config/model/tokenizer, as well as performs required
initialization steps to facilitate working with triggers.
"""
config = AutoConfig.from_pretrained(args.model_name)
model = AutoModelWithLMHead.from_pretrained(args.model_name, config=config)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
utils.add_task_specific_tokens(tokenizer)
return config, model, tokenizer
def get_final_embeddings(model):
if isinstance(model, BertForMaskedLM):
return model.cls.predictions.transform
elif isinstance(model, RobertaForMaskedLM):
return model.lm_head.layer_norm
else:
raise NotImplementedError(f'{model} not currently supported')
def get_word_embeddings(model):
if isinstance(model, BertForMaskedLM):
return model.cls.predictions.decoder.weight
elif isinstance(model, RobertaForMaskedLM):
return model.lm_head.decoder.weight
else:
raise NotImplementedError(f'{model} not currently supported')
def main(args):
ct.set_seed(args.seed)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info('Loading model, tokenizer, etc.')
config, model, tokenizer = load_pretrained(args.model_name)
model.to(device)
final_embeddings = get_final_embeddings(model)
embedding_storage = utils.OutputStorage(final_embeddings)
word_embeddings = get_word_embeddings(model)
label_map = json.loads(args.label_map)
reverse_label_map = {y: x for x, y in label_map.items()}
templatizer = utils.TriggerTemplatizer(
args.template, None,
tokenizer,
label_map=label_map,
label_field=args.label_field,
add_special_tokens=False, use_ctx=False
)
# The weights of this projection will help identify the best label words.
projection = torch.nn.Linear(config.hidden_size, len(label_map))
projection.to(device)
# Obtain the initial trigger tokens and label mapping
if args.initial_trigger:
trigger_ids = tokenizer.encode(
args.initial_trigger,
add_special_tokens=False,
add_prefix_space=True
)
assert len(trigger_ids) == templatizer.num_trigger_tokens
else:
trigger_ids = [tokenizer.mask_token_id] * templatizer.num_trigger_tokens
trigger_ids = torch.tensor(trigger_ids, device=device).unsqueeze(0)
logger.info('Loading datasets')
collator = utils.Collator(pad_token_id=tokenizer.pad_token_id)
train_dataset = utils.load_trigger_dataset(args.train, templatizer, use_ctx=False)
train_loader = DataLoader(train_dataset, batch_size=args.bsz, shuffle=True, collate_fn=collator)
optimizer = torch.optim.Adam(projection.parameters(), lr=args.lr)
scores = torch.matmul(projection.weight, word_embeddings.transpose(0, 1))
scores = F.softmax(scores, dim=0)
for i, row in enumerate(scores):
_, top = row.topk(args.k)
decoded = tokenizer.convert_ids_to_tokens(top)
logger.info(f"Top k for class {reverse_label_map[i]}: {', '.join(decoded)}")
logger.info('Training')
for i in range(args.iters):
pbar = tqdm(train_loader)
for model_inputs, labels in pbar:
optimizer.zero_grad()
model_inputs = {k: v.to(device) for k, v in model_inputs.items()}
labels = labels.to(device)
trigger_mask = model_inputs.pop('trigger_mask')
predict_mask = model_inputs.pop('predict_mask')
model_inputs = ct.replace_trigger_tokens(model_inputs, trigger_ids, trigger_mask)
with torch.no_grad():
model(**model_inputs)
embeddings = embedding_storage.get()
predict_embeddings = embeddings.masked_select(predict_mask.unsqueeze(-1)).view(embeddings.size(0), -1)
logits = projection(predict_embeddings)
loss = F.cross_entropy(logits, labels.squeeze(-1))
loss.backward()
optimizer.step()
pbar.set_description(f'loss: {loss : 0.4f}')
scores = torch.matmul(projection.weight, word_embeddings.transpose(0, 1))
scores = F.softmax(scores, dim=0)
for i, row in enumerate(scores):
_, top = row.topk(args.k)
decoded = tokenizer.convert_ids_to_tokens(top)
logger.info(f"Top k for class {reverse_label_map[i]}: {', '.join(decoded)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--train', type=Path, required=True, help='Train data path')
parser.add_argument('--template', type=str, help='Template string')
parser.add_argument('--label-map', type=str, help='JSON object defining label map')
parser.add_argument('--initial-trigger', type=str, default=None, help='Manual prompt')
parser.add_argument('--label-field', type=str, default='label',
help='Name of the label field')
parser.add_argument('--lr', type=float, default=3e-4, help='Learning rate')
parser.add_argument('--k', type=int, default=50, help='Number of label tokens to print')
parser.add_argument('--bsz', type=int, default=32, help='Batch size')
parser.add_argument('--iters', type=int, default=10,
help='Number of iterations to run label search')
parser.add_argument('--model-name', type=str, default='bert-base-cased',
help='Model name passed to HuggingFace AutoX classes.')
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--debug', action='store_true')
args = parser.parse_args()
if args.debug:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level)
main(args)
|
the-stack_106_17272
|
import json
import shutil
import sys
from pathlib import Path
import os
import subprocess
import argparse
SCRIPT_DIR = Path(__file__).parent.absolute()
ROOT_DIR = SCRIPT_DIR.parent.parent.absolute()
FRONTEND_DIR = ROOT_DIR / "src" / "frontend"
WEB_DIR = ROOT_DIR / "src"
def build_new_static(env):
shutil.rmtree("dist", ignore_errors=True)
if env == "prod":
output = subprocess.run("npm run-script build_gcp_prod", shell=True, capture_output=True)
else:
output = subprocess.run("npm run-script build_gcp_staging", shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def deploy_static(env):
if "PYCHARM_HOSTED" in os.environ:
del os.environ["PYCHARM_HOSTED"]
output = subprocess.run(f"firebase deploy --only hosting:{env}", shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument(
"env",
choices=["prod", "staging"],
help="Allowed values: prod, staging.",
)
parser.add_argument(
"part",
choices=["frontend", "api", "all"],
help="Allowed values: frontend, api, all.",
)
return parser.parse_args()
def read_deployment_config(env):
with open(SCRIPT_DIR / f"{env}" / f"config.json") as f:
return json.loads(f.read())
def run_and_output(command):
output = subprocess.run(command, shell=True, capture_output=True)
print(output.stdout.decode())
print(output.stderr.decode())
def docker_build_and_push(deploy_config):
image_name = deploy_config["image_name"]
ar_registry = deploy_config["ar_registry"]
ar_prefix = deploy_config["project"] + "/" + deploy_config["ar_repository"]
if "PYCHARM_HOSTED" in os.environ:
del os.environ["PYCHARM_HOSTED"]
run_and_output(f"gcloud auth configure-docker {ar_registry} --quiet")
run_and_output(f"docker build -f Dockerfile -t {ar_registry}/{ar_prefix}/{image_name} .")
run_and_output(f"docker push {ar_registry}/{ar_prefix}/{image_name}")
def deploy_to_cloud_run(deploy_config):
service_name = deploy_config["service_name"]
region = deploy_config["region"]
project = deploy_config["project"]
concurrency = deploy_config["concurrency"]
image = "/".join(
[deploy_config["ar_registry"], project, deploy_config["ar_repository"], deploy_config["image_name"]])
env_vars = ",".join([f"{k}=\"{v}\"" for k, v in deploy_config["env_variables"].items()])
command = f"gcloud run deploy {service_name} --region={region} --project={project} --image={image} " \
f"--allow-unauthenticated --concurrency={concurrency} --set-env-vars={env_vars}"
run_and_output(command)
if __name__ == "__main__":
ARGS = parse_arguments()
if ARGS.env == "prod":
print("Are you sure you want to deploy to prod?")
x = input()
if x != "yes":
sys.exit()
DEPLOY_CONFIG = read_deployment_config(ARGS.env)
if ARGS.part in ["frontend", "all"]:
os.chdir(FRONTEND_DIR)
build_new_static(ARGS.env)
os.chdir(WEB_DIR)
deploy_static(ARGS.env)
if ARGS.part in ["api", "all"]:
os.chdir(WEB_DIR)
docker_build_and_push(DEPLOY_CONFIG)
deploy_to_cloud_run(DEPLOY_CONFIG)
|
the-stack_106_17273
|
"""Configs for building the Mousavi model.
"""
class Config:
"""A class used for mousavi model configs.
"""
def __init__(self):
"""
Parameters
----------
signal_len: int
The length of the input ECG signal(Time in secs * Sampling rate).
input_channels: int
The number of input channels of an ECG signal.
beat_len: int
The length of the segmented ECG beat(Time in secs * Sampling rate).
kernel_size: int
The kernel size of the 1D-convolution kernel.
num_blocks_list: List[int]
The number of residual blocks in the model.
lstm_units: int
The number of units in the LSTM layer.
start_filters: int
The number of filters in at the start of the 1D-convolution layer.
classes: int
The number of classes in the output layer.
"""
self.signal_len = 1000
self.input_channels = 12
self.beat_len = 50
self.kernel_size = 8
self.num_blocks_list = [2, 2, 2]
self.lstm_units = 64
self.start_filters = 32
self.classes = 5
|
the-stack_106_17277
|
"""Command to show diagnosis information about mpf and mc."""
import sys
from serial.tools import list_ports
from mpf._version import version as mpf_version
class Command:
"""Runs the mpf game."""
def __init__(self, mpf_path, machine_path, args):
"""Run mpf diagnosis."""
del args
print("MPF version: {}".format(mpf_version))
print("MPF install location: {}".format(mpf_path))
print("Machine folder detected: {}".format(machine_path))
try:
from mpfmc._version import version as mc_version
print("MPF-MC version: {}".format(mc_version))
except ImportError:
print("MPF-MC not found")
print("\nSerial ports found:")
iterator = list_ports.comports()
for _, (port, desc, hwid) in enumerate(iterator, 1):
sys.stdout.write("{:20}\n".format(port))
sys.stdout.write(" desc: {}\n".format(desc))
sys.stdout.write(" hwid: {}\n".format(hwid))
sys.exit()
|
the-stack_106_17279
|
import time
from tkinter import Label
import cv2
import requests
import numpy as np
import urllib3
from PIL import ImageGrab, Image, ImageTk
from urllib3.packages.six import StringIO
import PIL
username = "jarde"
password = "invisy"
url_with_auth = f"http://{username}:{password}@140.193.201.45:8080/shot.jpg"
url = f"http://140.193.201.45:8080/shot.jpg"
class WebFeed:
@staticmethod
def get_image():
img_response = requests.get(url)
img_array = np.array(bytearray(img_response.content), dtype=np.uint8)
if img_array is not None:
img = cv2.imdecode(img_array, -1)
return img
@staticmethod
def get_feed():
while True:
img_response = requests.get(url)
img_array = np.array(bytearray(img_response.content), dtype=np.uint8)
if img_array is not None:
img = cv2.imdecode(img_array, -1)
return img
cv2.imshow("Invisy", img)
if cv2.waitKey(1) == 27:
break
@staticmethod
def get_feed_single_image():
img_response = requests.get(url)
img_array = np.array(bytearray(img_response.content), dtype=np.uint8)
img = Image.fromarray(img_response)
# img = cv2.imdecode(img_array, -1)
return img
@staticmethod
def get_screen_capture():
last_time = time.time()
while (True):
screen = ImageGrab.grab(bbox=(50, 50, 800, 640))
return screen
# print('Loop took {} seconds', format(time.time() - last_time))
# cv2.imshow("test", np.array(screen))
# last_time = time.time()
# if cv2.waitKey(25) & 0xFF == ord('q'):
# cv2.destroyAllWindows()
# break
# WebFeed.get_feed_single_image()
# WebFeed.get_feed_constant()
# WebFeed.get_feed()
|
the-stack_106_17280
|
from pyne.material import Material as pymat
import copy
from collections import Counter
class Materialflow(pymat):
""" Class contains information about burnable material flow. Based on PyNE
Material.
"""
def __init__(
self,
comp=None,
mass=-1.0,
density=1.0,
atoms_per_molecule=-1.0,
metadata=None,
vol=1.0,
temp=900,
mass_flowrate=0.0,
void_frac=0.0,
burnup=0.0):
""" Initializes the Materialflow object.
Parameters
----------
PyNE.Material : class
PyNE Material parent class containing nuclide vector, density,
mass, atoms_per_molecule, metadata
temp : float
temperature of the material flow (K)
mass_flowrate : float
mass flow rate of the material flow (g/s)
void_frac : float
void fraction in the material (%)
burnup : float
material burnup at the end of depletion step [MWd/kgU]
"""
# initialize parent class attributes
# super().__init__()
# initialize all object attributes
self.vol = vol
self.temp = temp
self.mass_flowrate = mass_flowrate
self.void_frac = void_frac
self.burnup = burnup
def get_mass(self):
"""Returns total mass of the material descibed in Materialflow object.
Returns
-------
float
The mass of the object.
"""
return self.mass
def print_attr(self):
"""Prints various attributes of Materialflow object.
"""
print("Volume %f cm3" % self.vol)
print("Mass %f g" % self.mass)
print("Density %f g/cm3" % self.density)
print("Atoms per molecule %f " % self.atoms_per_molecule)
print("Meta %s " % self.metadata)
print("Mass flowrate %f g/s" % self.mass_flowrate)
print("Temperature %f K" % self.temp)
print("Void fraction %f " % self.void_frac)
print("Burnup %f MWd/kgU" % self.burnup)
print("U-235 mass %f g" % self[922350000])
def scale_matflow(self, f=1.0):
"""Returns nuclide vector dictionary, obtained from object attrs and
then scaled by factor.
Parameters
----------
f : float
Scaling factor.
Returns
-------
dict
Materialflow nuclide component dictionary of relative mass.
The keys of `new_mat_comp` are preserved from PyNE Material
(integers representing nuclides in id-form). The values are floats
for each nuclide’s mass fraction, multiplied by factor f.
"""
old_dict = dict(self.comp)
new_mat_comp = {}
for key, value in old_dict.items():
new_mat_comp[key] = f * self.mass * value
return new_mat_comp
def copy_pymat_attrs(self, src):
"""Copies PyNE attributites from source object (`src`) to target
object.
Parameters
----------
src : obj
Materialflow object to copy attributes from.
"""
setattr(self, 'density', copy.deepcopy(src.density))
setattr(self,
'atoms_per_molecule',
copy.deepcopy(src.atoms_per_molecule))
self.metadata = src.metadata
def __deepcopy__(self, memo):
"""Return a deep copy of compound object `self`.
Parameters
----------
self : obj
Compound object.
memo : dict, optional
Id-to-object correspondence to control for recursion.
Returns
-------
obj
New compound object copied from `self`.
"""
# Initiate new object my copying class from self
cls = self.__class__
result = cls.__new__(cls)
# Copy nuclide vector from self
result = Materialflow(self.scale_matflow())
# Copy Materialflow density and atoms_per_molecule
result.copy_pymat_attrs(self)
# Copy other object attributes such as volume, burnup, etc
for k, v in self.__dict__.items():
if 'comp' not in k:
setattr(result, k, copy.deepcopy(v))
return result
def __eq__(self, other):
"""Overrides Python ``=`` operation to compare two Materialflow
objects. Compares objects total mass, density, atoms_per_molecule,
temperature, mass flowrate, and masses of important isotopes:
uranium-235 and uranium-238.
Parameters
----------
other : obj
Materialflow object to compare with.
Returns
-------
bool
Are the objects equal?
"""
if not isinstance(other, Materialflow):
# don't attempt to compare against unrelated types
return NotImplemented
return self.mass == other.mass and self.vol == other.vol \
and self.density == other.density \
and self.atoms_per_molecule == other.atoms_per_molecule \
and self.temp == other.temp \
and self.mass_flowrate == other.mass_flowrate \
and self[922350000] == other[922350000] \
and self[922380000] == other[922380000]
#
# Materialflow math operation Overloads
#
def __add__(x, y):
"""Overrides Python adding operation for Materialflow objects.
Parameters
----------
x : obj
Materialflow object #1.
y : obj
Materialflow object #2.
Returns
-------
obj
Materialflow which is a sum of isotope masses from `x` and `y`.
"""
cls = x.__class__
result = cls.__new__(cls)
result.mass = x.mass + y.mass
x_comp = Counter(x)
y_comp = Counter(y)
x_comp.update(y_comp)
result.comp = dict(x_comp)
result.norm_comp()
result.mass_flowrate = x.mass_flowrate + y.mass_flowrate
# result.temp = (x.temp*x.mass + y.temp*y.mass)/result.mass # averaged
result.temp = x.temp
# Burnup is simply averaged by should be renormilized by heavy metal
result.burnup = (x.burnup*x.mass + y.burnup*y.mass)/result.mass
# result.density = result.mass/result.vol
result.density = x.density
result.vol = result.mass/result.density
result.void_frac = (x.void_frac*x.vol + y.void_frac*y.vol)/result.vol
return result
def __rmul__(self, scaling_factor):
"""Overrides Python multiplication operation for Materialflow objects.
Parameters
----------
scaling_factor : float or int
Scaling factor.
Returns
-------
obj
Materialflow object which has mass of each isotope and
mass_flowrate scaled by `other`.
"""
if isinstance(scaling_factor, (int, float)):
result = copy.deepcopy(self)
result.mass = scaling_factor * self.mass
result.norm_comp()
result.vol = scaling_factor * self.vol
result.mass_flowrate = scaling_factor * self.mass_flowrate
# result.temp = (x.temp*x.mass + y.temp*y.mass)/result.mass
return result
else:
NotImplemented
|
the-stack_106_17284
|
"""Strptime-related classes and functions.
CLASSES:
LocaleTime -- Discovers and stores locale-specific time information
TimeRE -- Creates regexes for pattern matching a string of text containing
time information
FUNCTIONS:
_getlang -- Figure out what language is being used for the locale
strptime -- Calculates the time struct represented by the passed-in string
"""
import time
import locale
import calendar
from re import compile as re_compile
from re import IGNORECASE
from re import escape as re_escape
from datetime import date as datetime_date, timedelta as datetime_timedelta, timezone as datetime_timezone
try:
from _thread import allocate_lock as _thread_allocate_lock
except ImportError:
from _dummy_thread import allocate_lock as _thread_allocate_lock
__all__ = []
def _getlang():
return locale.getlocale(locale.LC_TIME)
class LocaleTime(object):
"""Stores and handles locale-specific information related to time.
ATTRIBUTES:
f_weekday -- full weekday names (7-item list)
a_weekday -- abbreviated weekday names (7-item list)
f_month -- full month names (13-item list; dummy value in [0], which
is added by code)
a_month -- abbreviated month names (13-item list, dummy value in
[0], which is added by code)
am_pm -- AM/PM representation (2-item list)
LC_date_time -- format string for date/time representation (string)
LC_date -- format string for date representation (string)
LC_time -- format string for time representation (string)
timezone -- daylight- and non-daylight-savings timezone representation
(2-item list of sets)
lang -- Language used by instance (2-item tuple)
"""
def __init__(self):
"""Set all attributes.
Order of methods called matters for dependency reasons.
The locale language is set at the offset and then checked again before
exiting. This is to make sure that the attributes were not set with a
mix of information from more than one locale. This would most likely
happen when using threads where one thread calls a locale-dependent
function while another thread changes the locale while the function in
the other thread is still running. Proper coding would call for
locks to prevent changing the locale while locale-dependent code is
running. The check here is done in case someone does not think about
doing this.
Only other possible issue is if someone changed the timezone and did
not call tz.tzset . That is an issue for the programmer, though,
since changing the timezone is worthless without that call.
"""
self.lang = _getlang()
self.__calc_weekday()
self.__calc_month()
self.__calc_am_pm()
self.__calc_timezone()
self.__calc_date_time()
if _getlang() != self.lang:
raise ValueError('locale changed during initialization')
if time.tzname != self.tzname or time.daylight != self.daylight:
raise ValueError('timezone changed during initialization')
def __pad(self, seq, front):
seq = list(seq)
if front:
seq.insert(0, '')
else:
seq.append('')
return seq
def __calc_weekday(self):
a_weekday = [calendar.day_abbr[i].lower() for i in range(7)]
f_weekday = [calendar.day_name[i].lower() for i in range(7)]
self.a_weekday = a_weekday
self.f_weekday = f_weekday
def __calc_month(self):
a_month = [calendar.month_abbr[i].lower() for i in range(13)]
f_month = [calendar.month_name[i].lower() for i in range(13)]
self.a_month = a_month
self.f_month = f_month
def __calc_am_pm(self):
am_pm = []
for hour in (1, 22):
time_tuple = time.struct_time((1999, 3, 17, hour, 44, 55, 2, 76, 0)
)
am_pm.append(time.strftime('%p', time_tuple).lower())
self.am_pm = am_pm
def __calc_date_time(self):
time_tuple = time.struct_time((1999, 3, 17, 22, 44, 55, 2, 76, 0))
date_time = [None, None, None]
date_time[0] = time.strftime('%c', time_tuple).lower()
date_time[1] = time.strftime('%x', time_tuple).lower()
date_time[2] = time.strftime('%X', time_tuple).lower()
replacement_pairs = [('%', '%%'), (self.f_weekday[2], '%A'), (self.
f_month[3], '%B'), (self.a_weekday[2], '%a'), (self.a_month[3],
'%b'), (self.am_pm[1], '%p'), ('1999', '%Y'), ('99', '%y'), (
'22', '%H'), ('44', '%M'), ('55', '%S'), ('76', '%j'), ('17',
'%d'), ('03', '%m'), ('3', '%m'), ('2', '%w'), ('10', '%I')]
replacement_pairs.extend([(tz, '%Z') for tz_values in self.timezone for
tz in tz_values])
for offset, directive in ((0, '%c'), (1, '%x'), (2, '%X')):
current_format = date_time[offset]
for old, new in replacement_pairs:
if old:
current_format = current_format.replace(old, new)
time_tuple = time.struct_time((1999, 1, 3, 1, 1, 1, 6, 3, 0))
if '00' in time.strftime(directive, time_tuple):
U_W = '%W'
else:
U_W = '%U'
date_time[offset] = current_format.replace('11', U_W)
self.LC_date_time = date_time[0]
self.LC_date = date_time[1]
self.LC_time = date_time[2]
def __calc_timezone(self):
try:
time.tzset()
except AttributeError:
pass
self.tzname = time.tzname
self.daylight = time.daylight
no_saving = frozenset({'utc', 'gmt', self.tzname[0].lower()})
if self.daylight:
has_saving = frozenset({self.tzname[1].lower()})
else:
has_saving = frozenset()
self.timezone = no_saving, has_saving
class TimeRE(dict):
"""Handle conversion from format directives to regexes."""
def __init__(self, locale_time=None):
"""Create keys/values.
Order of execution is important for dependency reasons.
"""
if locale_time:
self.locale_time = locale_time
else:
self.locale_time = LocaleTime()
base = super()
base.__init__({'d': '(?P<d>3[0-1]|[1-2]\\d|0[1-9]|[1-9]| [1-9])',
'f': '(?P<f>[0-9]{1,6})', 'H': '(?P<H>2[0-3]|[0-1]\\d|\\d)',
'I': '(?P<I>1[0-2]|0[1-9]|[1-9])', 'G': '(?P<G>\\d\\d\\d\\d)',
'j':
'(?P<j>36[0-6]|3[0-5]\\d|[1-2]\\d\\d|0[1-9]\\d|00[1-9]|[1-9]\\d|0[1-9]|[1-9])'
, 'm': '(?P<m>1[0-2]|0[1-9]|[1-9])', 'M': '(?P<M>[0-5]\\d|\\d)',
'S': '(?P<S>6[0-1]|[0-5]\\d|\\d)', 'U':
'(?P<U>5[0-3]|[0-4]\\d|\\d)', 'w': '(?P<w>[0-6])', 'u':
'(?P<u>[1-7])', 'V': '(?P<V>5[0-3]|0[1-9]|[1-4]\\d|\\d)', 'y':
'(?P<y>\\d\\d)', 'Y': '(?P<Y>\\d\\d\\d\\d)', 'z':
'(?P<z>[+-]\\d\\d[0-5]\\d)', 'A': self.__seqToRE(self.
locale_time.f_weekday, 'A'), 'a': self.__seqToRE(self.
locale_time.a_weekday, 'a'), 'B': self.__seqToRE(self.
locale_time.f_month[1:], 'B'), 'b': self.__seqToRE(self.
locale_time.a_month[1:], 'b'), 'p': self.__seqToRE(self.
locale_time.am_pm, 'p'), 'Z': self.__seqToRE((tz for tz_names in
self.locale_time.timezone for tz in tz_names), 'Z'), '%': '%'})
base.__setitem__('W', base.__getitem__('U').replace('U', 'W'))
base.__setitem__('c', self.pattern(self.locale_time.LC_date_time))
base.__setitem__('x', self.pattern(self.locale_time.LC_date))
base.__setitem__('X', self.pattern(self.locale_time.LC_time))
def __seqToRE(self, to_convert, directive):
"""Convert a list to a regex string for matching a directive.
Want possible matching values to be from longest to shortest. This
prevents the possibility of a match occurring for a value that also
a substring of a larger value that should have matched (e.g., 'abc'
matching when 'abcdef' should have been the match).
"""
to_convert = sorted(to_convert, key=len, reverse=True)
for value in to_convert:
if value != '':
break
else:
return ''
regex = '|'.join(re_escape(stuff) for stuff in to_convert)
regex = '(?P<%s>%s' % (directive, regex)
return '%s)' % regex
def pattern(self, format):
"""Return regex pattern for the format string.
Need to make sure that any characters that might be interpreted as
regex syntax are escaped.
"""
processed_format = ''
regex_chars = re_compile('([\\\\.^$*+?\\(\\){}\\[\\]|])')
format = regex_chars.sub('\\\\\\1', format)
whitespace_replacement = re_compile('\\s+')
format = whitespace_replacement.sub('\\\\s+', format)
while '%' in format:
directive_index = format.index('%') + 1
processed_format = '%s%s%s' % (processed_format, format[:
directive_index - 1], self[format[directive_index]])
format = format[directive_index + 1:]
return '%s%s' % (processed_format, format)
def compile(self, format):
"""Return a compiled re object for the format string."""
return re_compile(self.pattern(format), IGNORECASE)
_cache_lock = _thread_allocate_lock()
_TimeRE_cache = TimeRE()
_CACHE_MAX_SIZE = 5
_regex_cache = {}
def _calc_julian_from_U_or_W(year, week_of_year, day_of_week, week_starts_Mon):
"""Calculate the Julian day based on the year, week of the year, and day of
the week, with week_start_day representing whether the week of the year
assumes the week starts on Sunday or Monday (6 or 0)."""
first_weekday = datetime_date(year, 1, 1).weekday()
if not week_starts_Mon:
first_weekday = (first_weekday + 1) % 7
day_of_week = (day_of_week + 1) % 7
week_0_length = (7 - first_weekday) % 7
if week_of_year == 0:
return 1 + day_of_week - first_weekday
else:
days_to_week = week_0_length + 7 * (week_of_year - 1)
return 1 + days_to_week + day_of_week
def _calc_julian_from_V(iso_year, iso_week, iso_weekday):
"""Calculate the Julian day based on the ISO 8601 year, week, and weekday.
ISO weeks start on Mondays, with week 01 being the week containing 4 Jan.
ISO week days range from 1 (Monday) to 7 (Sunday).
"""
correction = datetime_date(iso_year, 1, 4).isoweekday() + 3
ordinal = iso_week * 7 + iso_weekday - correction
if ordinal < 1:
ordinal += datetime_date(iso_year, 1, 1).toordinal()
iso_year -= 1
ordinal -= datetime_date(iso_year, 1, 1).toordinal()
return iso_year, ordinal
def _strptime(data_string, format='%a %b %d %H:%M:%S %Y'):
"""Return a 2-tuple consisting of a time struct and an int containing
the number of microseconds based on the input string and the
format string."""
for index, arg in enumerate([data_string, format]):
if not isinstance(arg, str):
msg = 'strptime() argument {} must be str, not {}'
raise TypeError(msg.format(index, type(arg)))
global _TimeRE_cache, _regex_cache
with _cache_lock:
locale_time = _TimeRE_cache.locale_time
if (_getlang() != locale_time.lang or time.tzname != locale_time.
tzname or time.daylight != locale_time.daylight):
_TimeRE_cache = TimeRE()
_regex_cache.clear()
locale_time = _TimeRE_cache.locale_time
if len(_regex_cache) > _CACHE_MAX_SIZE:
_regex_cache.clear()
format_regex = _regex_cache.get(format)
if not format_regex:
try:
format_regex = _TimeRE_cache.compile(format)
except KeyError as err:
bad_directive = err.args[0]
if bad_directive == '\\':
bad_directive = '%'
del err
raise ValueError("'%s' is a bad directive in format '%s'" %
(bad_directive, format)) from None
except IndexError:
raise ValueError("stray %% in format '%s'" % format) from None
_regex_cache[format] = format_regex
found = format_regex.match(data_string)
if not found:
raise ValueError('time data %r does not match format %r' % (
data_string, format))
if len(data_string) != found.end():
raise ValueError('unconverted data remains: %s' % data_string[found
.end():])
iso_year = year = None
month = day = 1
hour = minute = second = fraction = 0
tz = -1
tzoffset = None
iso_week = week_of_year = None
week_of_year_start = None
weekday = julian = None
found_dict = found.groupdict()
for group_key in found_dict.keys():
if group_key == 'y':
year = int(found_dict['y'])
if year <= 68:
year += 2000
else:
year += 1900
elif group_key == 'Y':
year = int(found_dict['Y'])
elif group_key == 'G':
iso_year = int(found_dict['G'])
elif group_key == 'm':
month = int(found_dict['m'])
elif group_key == 'B':
month = locale_time.f_month.index(found_dict['B'].lower())
elif group_key == 'b':
month = locale_time.a_month.index(found_dict['b'].lower())
elif group_key == 'd':
day = int(found_dict['d'])
elif group_key == 'H':
hour = int(found_dict['H'])
elif group_key == 'I':
hour = int(found_dict['I'])
ampm = found_dict.get('p', '').lower()
if ampm in ('', locale_time.am_pm[0]):
if hour == 12:
hour = 0
elif ampm == locale_time.am_pm[1]:
if hour != 12:
hour += 12
elif group_key == 'M':
minute = int(found_dict['M'])
elif group_key == 'S':
second = int(found_dict['S'])
elif group_key == 'f':
s = found_dict['f']
s += '0' * (6 - len(s))
fraction = int(s)
elif group_key == 'A':
weekday = locale_time.f_weekday.index(found_dict['A'].lower())
elif group_key == 'a':
weekday = locale_time.a_weekday.index(found_dict['a'].lower())
elif group_key == 'w':
weekday = int(found_dict['w'])
if weekday == 0:
weekday = 6
else:
weekday -= 1
elif group_key == 'u':
weekday = int(found_dict['u'])
weekday -= 1
elif group_key == 'j':
julian = int(found_dict['j'])
elif group_key in ('U', 'W'):
week_of_year = int(found_dict[group_key])
if group_key == 'U':
week_of_year_start = 6
else:
week_of_year_start = 0
elif group_key == 'V':
iso_week = int(found_dict['V'])
elif group_key == 'z':
z = found_dict['z']
tzoffset = int(z[1:3]) * 60 + int(z[3:5])
if z.startswith('-'):
tzoffset = -tzoffset
elif group_key == 'Z':
found_zone = found_dict['Z'].lower()
for value, tz_values in enumerate(locale_time.timezone):
if found_zone in tz_values:
if time.tzname[0] == time.tzname[1
] and time.daylight and found_zone not in ('utc', 'gmt'
):
break
else:
tz = value
break
if year is None and iso_year is not None:
if iso_week is None or weekday is None:
raise ValueError(
"ISO year directive '%G' must be used with the ISO week directive '%V' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
if julian is not None:
raise ValueError(
"Day of the year directive '%j' is not compatible with ISO year directive '%G'. Use '%Y' instead."
)
elif week_of_year is None and iso_week is not None:
if weekday is None:
raise ValueError(
"ISO week directive '%V' must be used with the ISO year directive '%G' and a weekday directive ('%A', '%a', '%w', or '%u')."
)
else:
raise ValueError(
"ISO week directive '%V' is incompatible with the year directive '%Y'. Use the ISO year '%G' instead."
)
leap_year_fix = False
if year is None and month == 2 and day == 29:
year = 1904
leap_year_fix = True
elif year is None:
year = 1900
if julian is None and weekday is not None:
if week_of_year is not None:
week_starts_Mon = True if week_of_year_start == 0 else False
julian = _calc_julian_from_U_or_W(year, week_of_year, weekday,
week_starts_Mon)
elif iso_year is not None and iso_week is not None:
year, julian = _calc_julian_from_V(iso_year, iso_week, weekday + 1)
if julian is not None and julian <= 0:
year -= 1
yday = 366 if calendar.isleap(year) else 365
julian += yday
if julian is None:
julian = datetime_date(year, month, day).toordinal() - datetime_date(
year, 1, 1).toordinal() + 1
else:
datetime_result = datetime_date.fromordinal(julian - 1 +
datetime_date(year, 1, 1).toordinal())
year = datetime_result.year
month = datetime_result.month
day = datetime_result.day
if weekday is None:
weekday = datetime_date(year, month, day).weekday()
tzname = found_dict.get('Z')
if tzoffset is not None:
gmtoff = tzoffset * 60
else:
gmtoff = None
if leap_year_fix:
year = 1900
return (year, month, day, hour, minute, second, weekday, julian, tz,
tzname, gmtoff), fraction
def _strptime_time(data_string, format='%a %b %d %H:%M:%S %Y'):
"""Return a time struct based on the input string and the
format string."""
tt = _strptime(data_string, format)[0]
return time.struct_time(tt[:time._STRUCT_TM_ITEMS])
def _strptime_datetime(cls, data_string, format='%a %b %d %H:%M:%S %Y'):
"""Return a class cls instance based on the input string and the
format string."""
tt, fraction = _strptime(data_string, format)
tzname, gmtoff = tt[-2:]
args = tt[:6] + (fraction,)
if gmtoff is not None:
tzdelta = datetime_timedelta(seconds=gmtoff)
if tzname:
tz = datetime_timezone(tzdelta, tzname)
else:
tz = datetime_timezone(tzdelta)
args += tz,
return cls(*args)
|
the-stack_106_17286
|
import os
import tensorflow as tf
import datetime
from source.loss_manager import LossManager
from source.data_loader import DataLoader
from source.settings_reader import SettingsReader
from source.model import Model
main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
single_view_path = os.path.abspath(os.path.join(main_path, "SingleViewReconstruction"))
import sys
sys.path.append(main_path)
sys.path.append(single_view_path)
from src.utils import StopWatch
settings_file_path = os.path.join(os.path.dirname(__file__), "settings", "settings_file.yml")
settings = SettingsReader(settings_file_path)
data_loader = DataLoader(settings)
# Logging
time_str = str(datetime.datetime.now())
time_str = time_str.replace(' ', '_').replace('-', '_').replace('.', '_').replace(':', '_')
log_dir = os.path.join("logs", time_str)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
validation_size = int(settings.validation_ratio * settings.max_dataset_size)
validation_steps = int(validation_size // settings.batch_size)
train_steps = int((settings.max_dataset_size - validation_size) // settings.batch_size)
# Dataset iterators
trn_op, val_op = data_loader.load_default_iterator()
x_iter, y_iter = trn_op.get_next()
x_iter_val, y_iter_val = val_op.get_next()
val_bool = tf.placeholder(dtype=bool, shape=())
data = tf.cond(val_bool, lambda: x_iter, lambda: x_iter_val)
ground_truth = tf.cond(val_bool, lambda: y_iter, lambda: y_iter_val)
tf.summary.image('ground truth', (ground_truth + 1.) / 2. * 255.)
tf.summary.image('color', data * 255.)
# create the model
model = Model()
model_result = model.create(data)
# LossManager
last_layer, _, _, _ = model.get_results()
loss_manager = LossManager(ground_truth, last_layer)
loss = loss_manager.cosine_similarity()
op, cost = model.compile(settings.learning_rate, loss)
# Timers
model_timer = StopWatch()
train_sum_timer = StopWatch()
val_sum_timer = StopWatch()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
sess.run([trn_op.initializer, val_op.initializer])
# Writers
train_writer = tf.summary.FileWriter(os.path.join(log_dir, 'train'), sess.graph, flush_secs=10)
test_writer = tf.summary.FileWriter(os.path.join(log_dir, 'val'), sess.graph, flush_secs=10)
tf.io.write_graph(sess.graph, log_dir, 'graph.pbtxt')
# operations
merged = tf.summary.merge_all()
training_ops = [cost, op]
training_ops_plus_summary = [merged, cost, op]
# Saver
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
min_for_test = 15
global_step = 0
try:
for epoch_i in range(9999999):
for i in range(train_steps):
print("Current step: {}".format(global_step))
if train_sum_timer.elapsed_time_val > min_for_test * 60:
train_sum_timer.reset()
trainings_output_res = sess.run(training_ops_plus_summary, feed_dict={val_bool: True})
train_writer.add_summary(trainings_output_res[0], global_step)
else:
trainings_output_res = sess.run(training_ops, feed_dict={val_bool: True})
if val_sum_timer.elapsed_time_val > min_for_test * 60.:
val_sum_timer.reset()
summary, _ = sess.run([merged, cost], feed_dict={val_bool: False})
test_writer.add_summary(summary, global_step)
if model_timer.elapsed_time_val > 3*60*60:
model_timer.reset()
saver.save(sess, os.path.join(log_dir, 'model.ckpt'))
global_step += 1
except tf.errors.ResourceExhaustedError:
print("Batch size too big: " + str(settings.batch_size))
exit(1)
|
the-stack_106_17289
|
# Copyright (C) 2021 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
# See: https://spdx.org/licenses/
from abc import ABC, abstractmethod
import typing as ty
import numpy as np
from lava.lib.dnf.utils.convenience import num_neurons
from lava.lib.dnf.operations.shape_handlers import (
AbstractShapeHandler,
KeepShapeHandler,
ReduceDimsHandler,
ExpandDimsHandler,
ReorderHandler)
from lava.lib.dnf.operations.enums import ReduceMethod, BorderType
from lava.lib.dnf.kernels.kernels import Kernel
from lava.lib.dnf.utils.convenience import num_dims
from lava.lib.dnf.utils.math import is_odd
class AbstractOperation(ABC):
"""
Abstract Operation, subclasses of which can be used to parameterize the
connect() function.
Parameters
----------
shape_handler : AbstractShapeHandler
handles, configures, and validates the input and output shape of the
operation
"""
def __init__(self, shape_handler: AbstractShapeHandler) -> None:
self._shape_handler = shape_handler
@property
def output_shape(self) -> ty.Tuple[int, ...]:
"""Return the output shape of the operation"""
return self._shape_handler.output_shape
@property
def input_shape(self) -> ty.Tuple[int, ...]:
"""Return the output shape of the operation"""
return self._shape_handler.input_shape
def compute_weights(self) -> np.ndarray:
"""
Computes the connectivity weight matrix of the operation.
This public method only validates the configuration of the
operation. The actual weights are computed in the
abstract method _compute_weights().
Returns
-------
connectivity weight matrix : numpy.ndarray
"""
# Assert that the input and output shape is configured
self._shape_handler.assert_configured()
return self._compute_weights()
def configure(self,
input_shape: ty.Tuple[int, ...]) -> None:
"""
Configures an operation by setting its input and output shape.
Parameters
----------
input_shape : tuple(int)
input shape of the operation
"""
self._validate_args_with_input_shape(input_shape)
self._shape_handler.configure(input_shape)
@abstractmethod
def _compute_weights(self) -> np.ndarray:
"""
Does the actual work of computing the weights and returns them as a
numpy array.
Returns
-------
weights : numpy.ndarray
"""
pass
def _validate_args_with_input_shape(
self,
input_shape: ty.Tuple[int, ...]
) -> None:
"""Validates any input arguments that the operation may receive, and
that do not get passed on to the ShapeHandler, against the input
shape."""
pass
class Weights(AbstractOperation):
"""
Operation that generates one-to-one connectivity with given weights for
every synapse.
Parameters
----------
weight : float
weight used for every connection
"""
def __init__(self, weight: float) -> None:
super().__init__(KeepShapeHandler())
self.weight = weight
def _compute_weights(self) -> np.ndarray:
return np.eye(num_neurons(self.output_shape),
num_neurons(self.input_shape),
dtype=np.int32) * self.weight
class ReduceDims(AbstractOperation):
"""
Operation that reduces the dimensionality of the input by projecting
a specified subset of dimensions onto the remaining dimensions.
Parameters
----------
reduce_dims : int or tuple(int)
indices of dimension that will be reduced/removed
reduce_method : ReduceMethod
method by which the dimensions will be reduced (SUM or MEAN)
"""
def __init__(self,
reduce_dims: ty.Union[int, ty.Tuple[int, ...]],
reduce_method: ty.Optional[ReduceMethod] = ReduceMethod.SUM
) -> None:
super().__init__(ReduceDimsHandler(reduce_dims))
ReduceMethod.validate(reduce_method)
self.reduce_method = reduce_method
def _compute_weights(self) -> np.ndarray:
# Indices of the input dimensions in the weight matrix
# that will not be removed
in_axes_all = np.arange(num_dims(self.input_shape))
sh = ty.cast(ReduceDimsHandler, self._shape_handler)
in_axes_kept = tuple(np.delete(in_axes_all, sh.reduce_dims))
# Generate the weight matrix
weights = _project_dims(self.input_shape,
self.output_shape,
in_axes_kept=in_axes_kept)
if self.reduce_method == ReduceMethod.MEAN:
# Set the weights such that they compute the mean
weights = weights / num_neurons(self.input_shape)
return weights
class ExpandDims(AbstractOperation):
"""
Operation that expands the dimensionality of the input by projecting
the dimensions of the input to the newly added dimensions.
"""
def __init__(self,
new_dims_shape: ty.Union[int, ty.Tuple[int, ...]]) -> None:
super().__init__(ExpandDimsHandler(new_dims_shape))
def _compute_weights(self) -> np.ndarray:
# Indices of the output dimensions in the weight matrix that will
# be kept from the input
out_axes_kept = tuple(np.arange(num_dims(self.input_shape)))
# Generate the weight matrix
weights = _project_dims(self.input_shape,
self.output_shape,
out_axes_kept=out_axes_kept)
return weights
class Reorder(AbstractOperation):
"""
Operation that reorders the dimensions in the input to a specified new
order.
Parameters
----------
order : tuple(int)
new order of the dimensions (see ReorderHandler)
"""
def __init__(self, order: ty.Tuple[int, ...]) -> None:
super().__init__(ReorderHandler(order))
def _compute_weights(self) -> np.ndarray:
sh = ty.cast(ReorderHandler,
self._shape_handler)
weights = _project_dims(self.input_shape,
self.output_shape,
out_axes_kept=sh.order)
return weights
def _project_dims(
input_shape: ty.Tuple[int, ...],
output_shape: ty.Tuple[int, ...],
out_axes_kept: ty.Optional[ty.Tuple[int, ...]] = None,
in_axes_kept: ty.Optional[ty.Tuple[int, ...]] = None
) -> np.ndarray:
"""Projection function that is used both by the ReduceDims and ExpandDims
Operation
Parameters
----------
input_shape : tuple(int)
input shape of the operation
output_shape : tuple(int)
output shape of the operation
out_axes_kept : tuple(int)
indices of the output dimensions in the weight matrix that will
be kept from the input
in_axes_kept : tuple(int)
indices of the input dimensions in the weight matrix that will
be kept for the output
Returns
-------
connectivity weight matrix : numpy.ndarray
"""
num_neurons_in = num_neurons(input_shape)
num_neurons_out = num_neurons(output_shape)
num_dims_in = num_dims(input_shape)
num_dims_out = num_dims(output_shape)
smaller_num_dims = min(num_dims_in, num_dims_out)
if smaller_num_dims == 0:
# If the target is a 0D population, the connectivity is from
# all neurons in the source population to that one neuron
weights = np.ones((num_neurons_out, num_neurons_in))
else:
# Create a dense connectivity matrix, where dimensions of the
# source and target are not yet flattened
shape = output_shape + input_shape
weights = np.zeros(shape)
###
# The following lines create a view on the connectivity matrix,
# in which the axes are moved such that the first dimensions are all
# output dimensions that will be kept, followed by all input
# dimensions that will be kept, followed by all remaining dimensions.
if in_axes_kept is None:
in_axes_kept = np.arange(num_dims_in)
in_axes_kept = tuple(np.asarray(in_axes_kept) + num_dims_out)
if out_axes_kept is None:
out_axes_kept = np.arange(num_dims_out)
out_axes_kept = tuple(out_axes_kept)
# New indices of the kept output dimensions after moving the axes
new_axes_out = tuple(np.arange(len(out_axes_kept)))
# New indices of the kept input dimensions after moving the axes
new_axes_in = tuple(np.arange(len(in_axes_kept)) + len(new_axes_out))
# Create the view by moving the axes
conn = np.moveaxis(weights,
out_axes_kept + in_axes_kept,
new_axes_out + new_axes_in)
#
###
# For each source-target dimension pair, set connections to 1 for
# every pair of neurons along that dimension, as well as to all
# neurons in all remaining dimensions
if smaller_num_dims == 1:
for a in range(np.size(conn, axis=0)):
conn[a, a, ...] = 1
elif smaller_num_dims == 2:
for a in range(np.size(conn, axis=0)):
for b in range(np.size(conn, axis=1)):
conn[a, b, a, b, ...] = 1
elif smaller_num_dims == 3:
for a in range(np.size(conn, axis=0)):
for b in range(np.size(conn, axis=1)):
for c in range(np.size(conn, axis=2)):
conn[a, b, c, a, b, c, ...] = 1
else:
raise NotImplementedError("projection is not implemented for "
"dimensionality > 3")
# Flatten the source and target dimensions of the connectivity
# matrix to get a two-dimensional dense connectivity matrix
weights = weights.reshape((num_neurons_out, num_neurons_in))
return weights
class Convolution(AbstractOperation):
"""
Creates connectivity that resembles a convolution with a kernel.
Perhaps contrary to other implementations of the convolution, this
operation always leaves the shape of the input intact. That is, a
Convolution operation applied, for instance, to the output of a
population of neurons of shape (42, 42) will also yield an output of
shape (42, 42).
Parameters
----------
kernel : Kernel
kernel of weights that the input will be convolved with; must be of the
same dimensionality as the input
border_types : BorderType or list(BorderType)
determines how the Convolution operation treats borders; valid values
are (1) PADDED, in which case the borders will be padded with a value
that can be specified in the Kernel or (2) CIRCULAR, in which case
the values from the other side of the input will be used as 'padding'
(this is sometimes also called "wrapped")
"""
def __init__(
self,
kernel: ty.Union[Kernel, np.ndarray],
border_types: ty.Optional[ty.Union[BorderType,
ty.List[BorderType]]]
= BorderType.PADDED
) -> None:
super().__init__(KeepShapeHandler())
self._kernel = self._validate_kernel(kernel)
self._border_types = self._validate_border_types(border_types)
@property
def kernel(self) -> Kernel:
"""Returns the kernel"""
return self._kernel
@property
def border_types(self) -> ty.List[BorderType]:
"""Returns the list of border types"""
return self._border_types
@staticmethod
def _validate_kernel(
kernel: ty.Union[Kernel, np.ndarray]
) -> Kernel:
"""Validate the <kernel> argument"""
if isinstance(kernel, np.ndarray):
kernel = Kernel(weights=kernel)
return kernel
@staticmethod
def _validate_border_types(
border_types: ty.Union[BorderType, ty.List[BorderType]]
) -> ty.List[BorderType]:
"""Validates the <border_types> argument"""
if isinstance(border_types, BorderType):
border_types = [border_types]
if not isinstance(border_types, list):
raise TypeError("<border_types> must be of type BorderType or"
"list(BorderType)")
for bt in border_types:
BorderType.validate(bt)
return border_types
def _validate_args_with_input_shape(self,
input_shape: ty.Tuple[int, ...]
) -> None:
# treating 0D cases like 1D cases here
input_dim = len(input_shape)
if len(self._border_types) == 1:
self._border_types *= input_dim
if len(self._border_types) != input_dim:
raise ValueError("number of entries in <border_type> does not"
"match dimensionality of population")
def _compute_weights(self) -> np.ndarray:
# Input shape equals output shape
shape = self.input_shape
# Do not use num_dims() here to treat 0D like 1D
num_dims = len(shape)
_num_neurons = num_neurons(shape)
# Generate a dense connectivity matrix
connectivity_matrix = np.zeros((_num_neurons, _num_neurons))
# Copy the weights of the kernel
kernel_weights = np.copy(self.kernel.weights)
for i in range(num_dims):
# Compute the size difference between the population and the
# kernel in the current dimension
size_diff = shape[i] - np.size(kernel_weights, axis=i)
if size_diff != 0:
pad_width = np.zeros((num_dims, 2), dtype=int)
pad_width[i, :] = int(np.floor(np.abs(size_diff) / 2.0))
# If the padding cannot be distributed evenly...
if is_odd(size_diff):
if is_odd(np.size(kernel_weights, axis=i)):
# ...add one in front if the kernel size is odd...
pad_width[i, 0] += 1
else:
# ...or add one in the back if the kernel size
# is even
pad_width[i, 1] += 1
if size_diff > 0:
# Pad the kernel with its padding value
kernel_weights = \
np.pad(kernel_weights,
pad_width=pad_width,
constant_values=self.kernel.padding_value)
elif size_diff < 0 \
and self.border_types[i] == BorderType.CIRCULAR:
delete_front = pad_width[i, 1]
delete_back = pad_width[i, 0]
kernel_weights = np.delete(kernel_weights,
range(delete_front),
axis=i)
kernel_weights = np.delete(kernel_weights,
range(-delete_back, 0),
axis=i)
# Compute the center of the kernel
kernel_center = np.floor(np.array(kernel_weights.shape) / 2.0)
# Iterate over the shape of the input population
for index, _ in np.ndenumerate(np.zeros(shape)):
# Compute how much the kernel must be shifted to bring its
# center to the correct position
shift = kernel_center.astype(int) - np.array(index,
dtype=int)
conn_weights = kernel_weights
# Shift the weights depending on the border method
for i in range(num_dims):
if self.border_types[i] == BorderType.CIRCULAR:
conn_weights = np.roll(conn_weights, -shift[i], axis=i)
elif self.border_types[i] == BorderType.PADDED:
conn_weights = \
self._shift_fill(conn_weights,
-shift[i],
axis=i,
fill_value=self.kernel.padding_value)
# If the connection weight matrix is too large for the
# population...
size_diff = shape[i] - np.size(conn_weights, axis=i)
if size_diff < 0:
# ...delete the overflowing elements
conn_weights = np.delete(conn_weights,
range(-np.abs(size_diff), 0),
axis=i)
# Flatten kernel matrix
if num_dims > 1:
conn_weights = np.ravel(conn_weights)
# Fill the connectivity matrix
flat_index = np.ravel_multi_index(index, shape)
connectivity_matrix[flat_index, :] = conn_weights
return connectivity_matrix
@staticmethod
def _shift_fill(array: np.ndarray,
shift: int,
axis: int = 0,
fill_value: float = 0) -> np.ndarray:
"""
Shift an array along a given axis, filling the empty elements.
Parameters
----------
array : numpy.ndarray
the array to be shifted
shift : int
number of elements to shift
axis : int
axis along which the array is shifted
fill_value: float
value that will fill up empty elements in the shifted array
Returns
-------
shifted array : numpy.ndarray
"""
if shift != 0:
if axis > array.ndim - 1:
raise IndexError(f"axis {axis} does not exist for array of "
f"shape {array.shape}")
array = np.swapaxes(array, 0, axis)
shifted_array = np.empty_like(array)
if shift < 0:
shifted_array[shift:, ...] = fill_value
shifted_array[:shift, ...] = array[-shift:, ...]
elif shift > 0:
shifted_array[:shift, ...] = fill_value
shifted_array[shift:, ...] = array[:-shift, ...]
shifted_array = np.swapaxes(shifted_array, axis, 0)
return shifted_array
else:
return array
|
the-stack_106_17290
|
"""
Configure application
---------------------
This module implements parsers and data structures
needed to configure the application. It supports
richer settings than those that can be easily
represented on the command line by leveraging file formats
such as YAML and JSON that are widely used to configure
applications.
.. autoclass:: Config
:members:
.. autoclass:: FileGroup
:members:
.. autofunction:: load_config
.. autofunction:: from_files
"""
import os
import string
import yaml
from forest.export import export
__all__ = []
def combine_variables(os_environ, args_variables):
"""Utility function to update environment with user-specified variables
.. note: When there is a key clash the user-specified args take precedence
:param os_environ: os.environ dict
:param args_variables: variables parsed from command line
:returns: merged dict
"""
variables = dict(os_environ)
if args_variables is not None:
variables.update(dict(args_variables))
return variables
class Config(object):
"""Configuration data structure
This high-level object represents the application configuration.
It is file format agnostic but has helper methods to initialise
itself from disk or memory.
.. note:: This class is intended to provide the top-level
configuration with low-level details implemented
by specialist classes, e.g. :class:`FileGroup`
which contains meta-data for files
:param data: native Python data structure representing application
settings
"""
def __init__(self, data):
self.data = data
def __repr__(self):
return "{}({})".format(
self.__class__.__name__,
self.data)
@property
def patterns(self):
if "files" in self.data:
return [(f["label"], f["pattern"])
for f in self.data["files"]]
return []
@classmethod
def load(cls, path, variables=None):
"""Parse settings from either YAML or JSON file on disk
The configuration can be controlled elegantly
through a text file. Groups of files can
be specified in a list.
.. note:: Relative or absolute directories are
declared through the use of a leading /
.. code-block:: yaml
files:
- label: Trial
pattern: "${TRIAL_DIR}/*.nc"
- label: Control
pattern: "${CONTROL_DIR}/*.nc"
- label: RDT
pattern: "${RDT_DIR}/*.json"
file_type: rdt
:param path: JSON/YAML file to load
:param variables: dict of key/value pairs used by :py:class:`string.Template`
:returns: instance of :class:`Config`
"""
with open(path) as stream:
text = stream.read()
if variables is not None:
template = string.Template(text)
text = template.substitute(**variables)
try:
# PyYaml 5.1 onwards
data = yaml.safe_load(text)
except AttributeError:
data = yaml.load(text)
return cls(data)
@classmethod
def from_files(cls, files, file_type="unified_model"):
"""Configure using list of file names and a file type
:param files: list of file names
:param file_type: keyword to apply to all files
:returns: instance of :class:`Config`
"""
return cls({
"files": [dict(pattern=f, label=f, file_type=file_type)
for f in files]})
@property
def file_groups(self):
return [FileGroup(**data)
for data in self.data["files"]]
class FileGroup(object):
"""Meta-data needed to describe group of files
To describe a collection of related files extra
meta-data is needed. For example, the type of data
contained within the files or how data is catalogued
and searched.
.. note:: This class violates the integration separation principle (ISP)
all driver settings are included in the same constructor
:param label: decription used by buttons and tooltips
:param pattern: wildcard pattern used by either SQL or glob
:param locator: keyword describing search method (default: 'file_system')
:param file_type: keyword describing file contents (default: 'unified_model')
:param directory: leaf/absolute directory where file(s) are stored (default: None)
"""
def __init__(self,
label,
pattern,
locator="file_system",
file_type="unified_model",
directory=None,
database_path=None):
self.label = label
self.pattern = pattern
self.locator = locator
self.file_type = file_type
self.directory = directory
self.database_path = database_path
@property
def full_pattern(self):
if self.directory is None:
return self.pattern
return os.path.join(self.directory, self.pattern)
def __eq__(self, other):
if not isinstance(other, self.__class__):
raise Exception("Can not compare")
attrs = ("label", "pattern", "locator", "file_type", "directory")
return all(
getattr(self, attr) == getattr(other, attr)
for attr in attrs)
def __repr__(self):
arg_attrs = [
"label",
"pattern"]
args = [self._str(getattr(self, attr))
for attr in arg_attrs]
kwarg_attrs = [
"locator",
"file_type",
"directory",
"database_path"]
kwargs = [
"{}={}".format(attr, self._str(getattr(self, attr)))
for attr in kwarg_attrs]
return "{}({})".format(
self.__class__.__name__,
", ".join(args + kwargs))
@staticmethod
def _str(value):
if isinstance(value, str):
return "'{}'".format(value)
else:
return str(value)
@export
def load_config(path):
"""Load configuration from a file"""
return Config.load(path)
@export
def from_files(files, file_type):
"""Define configuration with a list of files"""
return Config.from_files(files, file_type)
|
the-stack_106_17291
|
from typing import Union
import torch
from numpy import ndarray
from torch import Tensor
from torchvision.io import read_video
from .video_data import VideoData
class VideoReader:
"""VideoReader for reading video file"""
@staticmethod
def of_array(video: Union[Tensor, ndarray], video_fps: float, audio: Union[Tensor, ndarray] = None,
audio_fps: float = None, channel_last: bool = False
) -> VideoData:
"""
Read video tensor from Numpy array or PyTorch Tensor directly.
Args:
video (:class:`~torch.Tensor` | :class:`~numpy.ndarray`): Video input array
video_fps (``float``): Video fps
audio (:class:`~torch.Tensor` | :class:`~numpy.ndarray`, optional): Video input array
audio_fps (``float``): Audio fps
channel_last (``bool``, optional): False for (T, H, W, C) and True for (T, C, H, W)
Returns:
:class:`~tensorneko.io.video.video_data.VideoData`:
A VideoData object contains a float tensor of video (T, C, H, W), with value range of 0. to 1,
an audio tensor of (T, C) and a :class:`~tensorneko.io.video.video_data.VideoInfo` contains fps info.
"""
if video.max() > 1:
video = video / 255
if channel_last:
video = video.permute(0, 3, 1, 2)
audio = audio or torch.tensor([]).reshape(1, 0)
info = {
"video_fps": video_fps,
"audio_fps": audio_fps
}
return VideoData(video, audio, info)
@staticmethod
def of_path(path: str) -> VideoData:
"""
Read video tensor from given file.
Args:
path (``str``): Path to the video file.
Returns:
:class:`~tensorneko.io.video.video_data.VideoData`:
A VideoData object contains a float tensor of video (T, C, H, W), with value range of 0. to 1,
an audio tensor of (T, C) and a :class:`~tensorneko.io.video.video_data.VideoInfo` contains fps info.
"""
video, audio, info = read_video(path)
video = video.permute(0, 3, 1, 2) / 255
audio = audio.permute(1, 0)
return VideoData(video, audio, info)
of = of_path
def __new__(cls, path: str) -> VideoData:
"""Alias of :meth:`~VideoReader.of_path`"""
return cls.of_path(path)
|
the-stack_106_17296
|
"""
This example demonstrates the ability to link the axes of views together
Views can be linked manually using the context menu, but only if they are given
names.
"""
import numpy as np
import pyqtgraph as pg
app = pg.mkQApp("Linked Views Example")
#mw = QtWidgets.QMainWindow()
#mw.resize(800,800)
x = np.linspace(-50, 50, 1000)
y = np.sin(x) / x
win = pg.GraphicsLayoutWidget(show=True, title="pyqtgraph example: Linked Views")
win.resize(800,600)
win.addLabel("Linked Views", colspan=2)
win.nextRow()
p1 = win.addPlot(x=x, y=y, name="Plot1", title="Plot1")
p2 = win.addPlot(x=x, y=y, name="Plot2", title="Plot2: Y linked with Plot1")
p2.setLabel('bottom', "Label to test offset")
p2.setYLink('Plot1') ## test linking by name
## create plots 3 and 4 out of order
p4 = win.addPlot(x=x, y=y, name="Plot4", title="Plot4: X -> Plot3 (deferred), Y -> Plot1", row=2, col=1)
p4.setXLink('Plot3') ## Plot3 has not been created yet, but this should still work anyway.
p4.setYLink(p1)
p3 = win.addPlot(x=x, y=y, name="Plot3", title="Plot3: X linked with Plot1", row=2, col=0)
p3.setXLink(p1)
p3.setLabel('left', "Label to test offset")
#QtWidgets.QApplication.processEvents()
if __name__ == '__main__':
pg.exec()
|
the-stack_106_17297
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.12 (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# these strings are filled in when 'setup.py versioneer' creates _version.py
tag_prefix = ""
parentdir_prefix = "PyiUpdater-"
versionfile_source = "pyi_updater/_version.py"
import os, sys, re, subprocess, errno
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): # pragma: no cover
assert isinstance(commands, list)
p = None
for c in commands:
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose=False): # pragma: no cover
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
def git_get_keywords(versionfile_abs): # pragma: no cover
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs,"r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
def git_versions_from_keywords(keywords, tag_prefix, verbose=False): # pragma: no cover
if not keywords:
return {} # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": keywords["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": keywords["full"].strip(),
"full": keywords["full"].strip() }
def git_versions_from_vcs(tag_prefix, root, verbose=False): # pragma: no cover
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
stdout = run_command(GITS, ["describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_versions(default={"version": "unknown", "full": ""}, verbose=False): # pragma: no cover
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
keywords = { "refnames": git_refnames, "full": git_full }
ver = git_versions_from_keywords(keywords, tag_prefix, verbose)
if ver:
return ver
try:
root = os.path.abspath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in range(len(versionfile_source.split(os.sep))):
root = os.path.dirname(root)
except NameError:
return default
return (git_versions_from_vcs(tag_prefix, root, verbose)
or versions_from_parentdir(parentdir_prefix, root, verbose)
or default)
|
the-stack_106_17299
|
# dataset settings
dataset_type = 'HSIGANDataset'
data_root = 'data/HSI'
img_norm_cfg = dict(
mean=[128]*32, std=[16]*32, to_rgb=False)
crop_size = (256, 256)
train_pipeline = [
dict(type='LoadENVIHyperSpectralImageFromFile',channel_select=range(4,36),median_blur=False),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(320, 256), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
# dict(type='PhotoMetricDistortion'),
# dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadENVIHyperSpectralImageFromFile',channel_select=range(4,36)),
dict(
type='MultiScaleFlipAug',
img_scale=(320, 256),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=8,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='hdrx_dir',
ann_dir='annx_dir',
gan_img_dir='crn_dir',
gan_ann_dir='annx_dir',
gan_split='splitx_dir/split_{}_train.txt',
gan_suffix='.npy',
split='splitx_dir/split_{}_train.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='hdrx_dir',
ann_dir='annx_dir',
split='splitx_dir/split_{}_test.txt',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='hdrx_dir',
ann_dir='annx_dir',
split='splitx_dir/split_{}_test.txt',
pipeline=test_pipeline))
|
the-stack_106_17300
|
"""
Given some input calculate how many are increasing line by line
"""
current = (int(l) for l in open('input'))
next(current) # there is no previous value for the first row
previous = (int(l) for l in open('input'))
print(sum(c > p for c, p in zip(current, previous)))
|
the-stack_106_17301
|
#!/usr/bin/env python3
import operator
from functools import reduce
from typing import Optional, Tuple
import torch
from torch import Tensor
from .. import settings
from ..utils.broadcasting import _matmul_broadcast_shape, _mul_broadcast_shape
from ..utils.memoize import cached
from .diag_lazy_tensor import ConstantDiagLazyTensor, DiagLazyTensor
from .lazy_tensor import LazyTensor
from .non_lazy_tensor import lazify
from .triangular_lazy_tensor import TriangularLazyTensor
def _kron_diag(*lts) -> Tensor:
"""Compute diagonal of a KroneckerProductLazyTensor from the diagonals of the constituiting tensors"""
lead_diag = lts[0].diag()
if len(lts) == 1: # base case:
return lead_diag
trail_diag = _kron_diag(*lts[1:])
diag = lead_diag.unsqueeze(-2) * trail_diag.unsqueeze(-1)
return diag.transpose(-1, -2).reshape(*diag.shape[:-2], -1)
def _prod(iterable):
return reduce(operator.mul, iterable, 1)
def _matmul(lazy_tensors, kp_shape, rhs):
output_shape = _matmul_broadcast_shape(kp_shape, rhs.shape)
output_batch_shape = output_shape[:-2]
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-1), -1)
factor = lazy_tensor._matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-2), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
def _t_matmul(lazy_tensors, kp_shape, rhs):
kp_t_shape = (*kp_shape[:-2], kp_shape[-1], kp_shape[-2])
output_shape = _matmul_broadcast_shape(kp_t_shape, rhs.shape)
output_batch_shape = torch.Size(output_shape[:-2])
res = rhs.contiguous().expand(*output_batch_shape, *rhs.shape[-2:])
num_cols = rhs.size(-1)
for lazy_tensor in lazy_tensors:
res = res.view(*output_batch_shape, lazy_tensor.size(-2), -1)
factor = lazy_tensor._t_matmul(res)
factor = factor.view(*output_batch_shape, lazy_tensor.size(-1), -1, num_cols).transpose(-3, -2)
res = factor.reshape(*output_batch_shape, -1, num_cols)
return res
class KroneckerProductLazyTensor(LazyTensor):
r"""
Returns the Kronecker product of the given lazy tensors
Args:
:`lazy_tensors`: List of lazy tensors
"""
def __init__(self, *lazy_tensors):
try:
lazy_tensors = tuple(lazify(lazy_tensor) for lazy_tensor in lazy_tensors)
except TypeError:
raise RuntimeError("KroneckerProductLazyTensor is intended to wrap lazy tensors.")
for prev_lazy_tensor, curr_lazy_tensor in zip(lazy_tensors[:-1], lazy_tensors[1:]):
if prev_lazy_tensor.batch_shape != curr_lazy_tensor.batch_shape:
raise RuntimeError(
"KroneckerProductLazyTensor expects lazy tensors with the "
"same batch shapes. Got {}.".format([lv.batch_shape for lv in lazy_tensors])
)
super().__init__(*lazy_tensors)
self.lazy_tensors = lazy_tensors
def __add__(self, other):
if isinstance(other, DiagLazyTensor):
return self.add_diag(other.diag())
else:
return super().__add__(other)
def add_diag(self, diag):
r"""
Adds a diagonal to a KroneckerProductLazyTensor
"""
from .kronecker_product_added_diag_lazy_tensor import KroneckerProductAddedDiagLazyTensor
if not self.is_square:
raise RuntimeError("add_diag only defined for square matrices")
diag_shape = diag.shape
if len(diag_shape) == 0 or diag_shape[-1] == 1:
# interpret scalar tensor or single-trailing element as constant diag
diag_tensor = ConstantDiagLazyTensor(diag, diag_shape=self.shape[-1])
else:
try:
expanded_diag = diag.expand(self.shape[:-1])
except RuntimeError:
raise RuntimeError(
"add_diag for LazyTensor of size {} received invalid diagonal of size {}.".format(
self.shape, diag_shape
)
)
diag_tensor = DiagLazyTensor(expanded_diag)
return KroneckerProductAddedDiagLazyTensor(self, diag_tensor)
def diag(self):
r"""
As :func:`torch.diag`, returns the diagonal of the matrix :math:`K` this LazyTensor represents as a vector.
:rtype: torch.tensor
:return: The diagonal of :math:`K`. If :math:`K` is :math:`n \times n`, this will be a length
n vector. If this LazyTensor represents a batch (e.g., is :math:`b \times n \times n`), this will be a
:math:`b \times n` matrix of diagonals, one for each matrix in the batch.
"""
if settings.debug.on():
if not self.is_square:
raise RuntimeError("Diag works on square matrices (or batches)")
return _kron_diag(*self.lazy_tensors)
@cached
def inverse(self):
# here we use that (A \kron B)^-1 = A^-1 \kron B^-1
# TODO: Investigate under what conditions computing individual individual inverses makes sense
inverses = [lt.inverse() for lt in self.lazy_tensors]
return self.__class__(*inverses)
def inv_matmul(self, right_tensor, left_tensor=None):
# TODO: Investigate under what conditions computing individual individual inverses makes sense
# For now, retain existing behavior
return super().inv_matmul(right_tensor=right_tensor, left_tensor=left_tensor)
@cached(name="cholesky")
def _cholesky(self, upper=False):
chol_factors = [lt.cholesky(upper=upper) for lt in self.lazy_tensors]
return KroneckerProductTriangularLazyTensor(*chol_factors, upper=upper)
def _expand_batch(self, batch_shape):
return self.__class__(*[lazy_tensor._expand_batch(batch_shape) for lazy_tensor in self.lazy_tensors])
def _get_indices(self, row_index, col_index, *batch_indices):
row_factor = self.size(-2)
col_factor = self.size(-1)
res = None
for lazy_tensor in self.lazy_tensors:
sub_row_size = lazy_tensor.size(-2)
sub_col_size = lazy_tensor.size(-1)
row_factor //= sub_row_size
col_factor //= sub_col_size
sub_res = lazy_tensor._get_indices(
(row_index // row_factor).fmod(sub_row_size),
(col_index // col_factor).fmod(sub_col_size),
*batch_indices,
)
res = sub_res if res is None else (sub_res * res)
return res
def _inv_matmul(self, right_tensor, left_tensor=None):
# Computes inv_matmul by exploiting the identity (A \kron B)^-1 = A^-1 \kron B^-1
tsr_shapes = [q.size(-1) for q in self.lazy_tensors]
n_rows = right_tensor.size(-2)
batch_shape = _mul_broadcast_shape(self.shape[:-2], right_tensor.shape[:-2])
perm_batch = tuple(range(len(batch_shape)))
y = right_tensor.clone().expand(*batch_shape, *right_tensor.shape[-2:])
for n, q in zip(tsr_shapes, self.lazy_tensors):
# for KroneckerProductTriangularLazyTensor this inv_matmul is very cheap
y = q.inv_matmul(y.reshape(*batch_shape, n, -1))
y = y.reshape(*batch_shape, n, n_rows // n, -1).permute(*perm_batch, -2, -3, -1)
res = y.reshape(*batch_shape, n_rows, -1)
if left_tensor is not None:
res = left_tensor @ res
return res
def _matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
@cached(name="root_decomposition")
def root_decomposition(self, method: Optional[str] = None):
from gpytorch.lazy import RootLazyTensor
if method == "symeig" or method is None:
evals, evecs = self._symeig(eigenvectors=True, return_evals_as_lazy=True)
# TODO: only use non-zero evals (req. dealing w/ batches...)
f_list = [
evec * eval.diag().clamp(0.0).sqrt().unsqueeze(-2)
for eval, evec in zip(evals.lazy_tensors, evecs.lazy_tensors)
]
F = KroneckerProductLazyTensor(*f_list)
return RootLazyTensor(F)
else:
return super().root_decomposition(method=method)
@cached(name="size")
def _size(self):
left_size = _prod(lazy_tensor.size(-2) for lazy_tensor in self.lazy_tensors)
right_size = _prod(lazy_tensor.size(-1) for lazy_tensor in self.lazy_tensors)
return torch.Size((*self.lazy_tensors[0].batch_shape, left_size, right_size))
@cached(name="svd")
def _svd(self) -> Tuple[LazyTensor, Tensor, LazyTensor]:
U, S, V = [], [], []
for lt in self.lazy_tensors:
U_, S_, V_ = lt.svd()
U.append(U_)
S.append(S_)
V.append(V_)
S = KroneckerProductLazyTensor(*[DiagLazyTensor(S_) for S_ in S]).diag()
U = KroneckerProductLazyTensor(*U)
V = KroneckerProductLazyTensor(*V)
return U, S, V
def _symeig(
self, eigenvectors: bool = False, return_evals_as_lazy: bool = False
) -> Tuple[Tensor, Optional[LazyTensor]]:
# return_evals_as_lazy is a flag to return the eigenvalues as a lazy tensor
# which is useful for root decompositions here (see the root_decomposition
# method above)
evals, evecs = [], []
for lt in self.lazy_tensors:
evals_, evecs_ = lt.symeig(eigenvectors=eigenvectors)
evals.append(evals_)
evecs.append(evecs_)
evals = KroneckerProductLazyTensor(*[DiagLazyTensor(evals_) for evals_ in evals])
if not return_evals_as_lazy:
evals = evals.diag()
if eigenvectors:
evecs = KroneckerProductLazyTensor(*evecs)
else:
evecs = None
return evals, evecs
def _t_matmul(self, rhs):
is_vec = rhs.ndimension() == 1
if is_vec:
rhs = rhs.unsqueeze(-1)
res = _t_matmul(self.lazy_tensors, self.shape, rhs.contiguous())
if is_vec:
res = res.squeeze(-1)
return res
def _transpose_nonbatch(self):
return self.__class__(*(lazy_tensor._transpose_nonbatch() for lazy_tensor in self.lazy_tensors), **self._kwargs)
class KroneckerProductTriangularLazyTensor(KroneckerProductLazyTensor):
def __init__(self, *lazy_tensors, upper=False):
if not all(isinstance(lt, TriangularLazyTensor) for lt in lazy_tensors):
raise RuntimeError("Components of KroneckerProductTriangularLazyTensor must be TriangularLazyTensor.")
super().__init__(*lazy_tensors)
self.upper = upper
@cached
def inverse(self):
# here we use that (A \kron B)^-1 = A^-1 \kron B^-1
inverses = [lt.inverse() for lt in self.lazy_tensors]
return self.__class__(*inverses, upper=self.upper)
def inv_matmul(self, right_tensor, left_tensor=None):
# For triangular components, using triangular-triangular substition should generally be good
return self._inv_matmul(right_tensor=right_tensor, left_tensor=left_tensor)
@cached(name="cholesky")
def _cholesky(self, upper=False):
raise NotImplementedError("_cholesky not applicable to triangular lazy tensors")
def _cholesky_solve(self, rhs, upper=False):
if upper:
# res = (U.T @ U)^-1 @ v = U^-1 @ U^-T @ v
w = self._transpose_nonbatch().inv_matmul(rhs)
res = self.inv_matmul(w)
else:
# res = (L @ L.T)^-1 @ v = L^-T @ L^-1 @ v
w = self.inv_matmul(rhs)
res = self._transpose_nonbatch().inv_matmul(w)
return res
def _symeig(self, eigenvectors: bool = False) -> Tuple[Tensor, Optional[LazyTensor]]:
raise NotImplementedError("_symeig not applicable to triangular lazy tensors")
|
the-stack_106_17303
|
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime
import uuid
class ExtractRecord(object):
"""The extract base class."""
creation_date = None
"""datetime.datetime: The date and time of the extract creation."""
electronic_signature = None
"""unicode or None: Digital signature for the extract."""
concerned_theme = None
"""list of pyramid_oereb.core.records.theme.ThemeRecord: List of concerned themes."""
not_concerned_theme = None
"""list of pyramid_oereb.core.records.theme.ThemeRecord: List of not concerned themes."""
theme_without_data = None
"""list of pyramid_oereb.core.records.theme.ThemeRecord: List of themes without data."""
extract_identifier = None
"""unicode: The extract identifier (UUID)."""
qr_code = None
"""binary or None: QR code for the extract as binary string."""
def __init__(self, real_estate, logo_plr_cadastre, federal_logo, cantonal_logo, municipality_logo,
plr_cadastre_authority, update_date_os, disclaimers=None, glossaries=None,
concerned_theme=None, not_concerned_theme=None, theme_without_data=None,
general_information=None):
"""
Args:
real_estate (pyramid_oereb.lib.records.real_estate.RealEstateRecord): The real estate in its
record representation.
logo_plr_cadastre (pyramid_oereb.lib.records.logo.LogoRecord): Image file of the PLR logo.
federal_logo (pyramid_oereb.lib.records.image.ImageRecord):Image file of the federal logo.
cantonal_logo (pyramid_oereb.lib.records.image.ImageRecord): Image file of the cantonal logo.
municipality_logo (pyramid_oereb.lib.records.image.ImageRecord): Image file of the municipality
logo.
plr_cadastre_authority (pyramid_oereb.lib.records.office.OfficeRecord): The authority which is
responsible for the PLR cadastre.
update_date_os (datetime): Last update of the official survey used as base map in the extract.
disclaimer (list of
pyramid_oereb.core.records.disclaimer.DisclaimerRecord or None): Disclaimers for the extract.
glossaries (list of pyramid_oereb.lib.records.glossary.GlossaryRecord): Glossaries for the
extract.
concerned_theme (list of pyramid_oereb.lib.records.theme.ThemeRecord or None): Concerned themes.
not_concerned_theme (list of pyramid_oereb.lib.records.theme.ThemeRecord or None): Not concerned
themes.
theme_without_data (list of pyramid_oereb.lib.records.theme.ThemeRecord or None): Themes without
data.
general_information (list of dict): General information for the static extract as multilingual
text.
"""
if not isinstance(update_date_os, datetime):
warnings.warn('Type of "update_date_os" should be "datetime.datetime"')
if general_information and not isinstance(general_information, list):
warnings.warn('Type of "general_information" should be "list"')
self.update_date_os = update_date_os
self.general_information = general_information
self.extract_identifier = str(uuid.uuid4())
self.real_estate = real_estate
if concerned_theme:
self.concerned_theme = concerned_theme
else:
self.concerned_theme = []
if not_concerned_theme:
self.not_concerned_theme = not_concerned_theme
else:
self.not_concerned_theme = []
if theme_without_data:
self.theme_without_data = theme_without_data
else:
self.theme_without_data = []
self.creation_date = datetime.now()
self.logo_plr_cadastre = logo_plr_cadastre
self.federal_logo = federal_logo
self.cantonal_logo = cantonal_logo
self.municipality_logo = municipality_logo
self.plr_cadastre_authority = plr_cadastre_authority
if disclaimers:
self.disclaimers = disclaimers
else:
self.disclaimers = []
if glossaries:
self.glossaries = glossaries
else:
self.glossaries = []
|
the-stack_106_17304
|
#!/usr/bin/env python3
import os
import sys
import time
import json
import getopt
import subprocess as sp
# Nightmode Downmixing settings.
SUR_CHANNEL_VOL = 0.60 # Volume level to set the non-center channels to.
LFE_CHANNEL_VOL = 0.60 # Volume to set the LFE channel to.
# Globals
MAXDB = '-0.5'
def main():
codec = None
fileIn = None
try:
options, args = getopt.getopt(sys.argv[1:], "hc:i:",
["help", "codec=", "input="])
for name, value in options:
if name in ('-h', '--help'):
Usage()
sys.exit()
if name in ('-c', '--codec'):
codec = value
if name in ('-i', '--input'):
fileIn = value
except getopt.GetoptError as err:
print(str(err))
Usage()
sys.exit(1)
if codec is None:
Usage()
sys.exit(1)
if fileIn is None:
Usage()
sys.exit(1)
if len(args) > 0:
print("Error: Trailing arguments")
sys.exit(1)
ext = fileIn.split('.')[-1]
createNightmodeTracks(codec, ext, fileIn)
def Usage():
print("Usage:", sys.argv[0],
"-c,--codec <flac,aac> -i,--input <input file>")
# from: https://github.com/Tatsh/ffmpeg-progress/blob/master/ffmpeg_progress.py
def ffprobe(in_file):
"""ffprobe font-end."""
return dict(
json.loads(
sp.check_output(('ffprobe', '-v', 'quiet', '-print_format', 'json',
'-show_format', '-show_streams', in_file),
encoding='utf-8')))
def getSamplerate(inFile):
return ffprobe(inFile)['streams'][0]['sample_rate']
def getMaxdB(inFile):
cmd = [
'ffmpeg', '-i', inFile, '-acodec', 'pcm_s16le', '-af', 'volumedetect',
'-f', 'null', 'null'
]
p = sp.Popen(cmd,
stdout=sp.PIPE,
stderr=sp.STDOUT,
universal_newlines=True)
for line in p.stdout:
line = line.rstrip()
if 'max_volume' in line:
temp = line
print()
return temp[temp.index(':') + 2:-3]
def ffmpegAudio(cmd, inFile, trackid):
print("Total Duration : ", end='')
if trackid is not None:
tags = ffprobe(inFile)['streams'][int(trackid)]['tags']
else:
tags = ffprobe(inFile)['streams'][0]
if 'duration' in tags:
durationSec = int(tags['duration'].split('.')[0])
durationMili = tags['duration'].split('.')[1]
duration = time.strftime('%H:%M:%S', time.gmtime(durationSec))
duration += '.' + durationMili
print(duration)
else:
print(tags['DURATION-eng'])
for x in cmd:
print(x, end=' ')
print()
p = sp.Popen(cmd,
stderr=sp.STDOUT,
stdout=sp.PIPE,
universal_newlines=True)
for line in p.stdout:
line = line.rstrip()
if 'size=' in line:
print(f'{line}\r', end='')
print()
def flacToM4a(outFile):
print("Converting flac to m4a")
m4aFile = outFile.split('.flac')[0] + '.m4a'
cmd = [
'ffmpeg', '-i', outFile, '-acodec', 'aac', '-b:a', '256K', '-movflags',
'faststart', '-y', m4aFile
]
ffmpegAudio(cmd, outFile, None)
os.remove(outFile)
def getffFilter(surVol: float, lfeVol: float):
surVol = "{}".format(surVol)
lfeVol = "{}".format(lfeVol)
ffPanFilterL = 'FL=FC+{s}*FL+{s}*FLC+{s}*BL+{s}*SL+{l}*LFE'.format(
s=surVol, l=lfeVol)
ffPanFilterR = 'FR=FC+{s}*FR+{s}*FRC+{s}*BR+{s}*SR+{l}*LFE'.format(
s=surVol, l=lfeVol)
return 'pan=stereo|{}|{}'.format(ffPanFilterL, ffPanFilterR)
def normAudio(inFile, outFile, codec, maxdB):
maxVolume = getMaxdB(inFile)
if maxVolume != '0.0':
volumeAdj = float(maxdB) - float(maxVolume)
else:
print("Already Normalized")
return False
print("Adjusting Volume by:", volumeAdj)
cmd = [
'ffmpeg', '-y', '-i', inFile, '-acodec', 'flac', '-compression_level',
'8', '-af', 'volume=' + str(volumeAdj) + 'dB', outFile
]
ffmpegAudio(cmd, inFile, None)
verifyVol = getMaxdB(outFile)
if verifyVol == maxdB:
print("Normalize Complete")
if codec == 'aac':
flacToM4a(outFile)
return True
else:
print("Volume doesn't match desired result.")
exit()
def nightmodeTrack(inFile, outFile, codec, withDRC, maxdB):
normfile = 'prenorm.flac'
ffFilter = getffFilter(SUR_CHANNEL_VOL, LFE_CHANNEL_VOL)
if withDRC:
ffFilter += ',acompressor=ratio=4,loudnorm'
else:
ffFilter += ',loudnorm'
samplerate = getSamplerate(inFile)
cmd = [
'ffmpeg', '-i', inFile, '-acodec', 'flac', '-compression_level', '8',
'-af', ffFilter, '-ar', samplerate, '-y', normfile
]
ffmpegAudio(cmd, inFile, None)
normalized = normAudio(normfile, outFile, codec, maxdB)
if normalized:
os.remove(normfile)
def createNightmodeTracks(codec, ext, inFile):
print('Creating nightmode tracks for:', inFile)
extension = ext
inFile = inFile
loudnormFile = inFile.split('.' +
extension)[0] + '-nightmode-loudnorm.flac'
DRCFile = inFile.split('.' + extension)[0] + '-nightmode-drc.flac'
print("Creating 'Loudnorm' track.")
nightmodeTrack(inFile, loudnormFile, codec, False, MAXDB)
print("Creating 'DRC+Loudnorm' track.")
nightmodeTrack(inFile, DRCFile, codec, True, MAXDB)
if __name__ == "__main__":
main()
|
the-stack_106_17305
|
import ast
import os
import sys
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.sdist import sdist
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
PYPY = hasattr(sys, "pypy_version_info")
author = author_email = version = None
with open("safe_radix32/__init__.py", encoding="utf-8") as f:
for line in f:
if line.startswith("__author__ = "):
author = ast.literal_eval(line[len("__author__ = ") :])
elif line.startswith("__author_email__ = "):
author_email = ast.literal_eval(line[len("__author_email__ = ") :])
elif line.startswith("__version__ = "):
version = ast.literal_eval(line[len("__version__ = ") :])
description = "Radix32 with safe alphabet."
with open("README.md", encoding="utf-8") as f:
long_description = f.read()
class BuildExt(build_ext):
def build_extension(self, ext):
for source in ext.sources:
pyx = os.path.splitext(source)[0] + ".pyx"
if not os.path.exists(pyx):
if not cythonize:
print("WARNING")
print("Cython is required for building extension.")
print("Cython can be installed from PyPI.")
print("Falling back to pure Python implementation.")
return
cythonize(pyx)
elif os.path.exists(pyx) and os.stat(source).st_mtime < os.stat(pyx).st_mtime and cythonize:
cythonize(pyx)
try:
return build_ext.build_extension(self, ext)
except Exception as e:
print("WARNING: Failed to compile extension modules.")
print("Falling back to pure Python implementation.")
print(e)
class Sdist(sdist):
def __init__(self, *args, **kwargs):
cythonize("safe_radix32/_cython.pyx")
sdist.__init__(self, *args, **kwargs)
ext_modules = []
if not PYPY:
ext_modules.append(Extension("safe_radix32._cython", sources=["safe_radix32/_cython.c"]))
setup(
name="safe_radix32",
version=version,
description=description,
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Traktormaster/safe-radix32",
author=author,
author_email=author_email,
license="Apache 2.0",
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
],
packages=["safe_radix32"],
cmdclass={"build_ext": BuildExt, "sdist": Sdist},
ext_modules=ext_modules,
)
|
the-stack_106_17307
|
# File: G (Python 2.4)
from pandac.PandaModules import *
from direct.showbase.DirectObject import *
from direct.interval.IntervalGlobal import *
from direct.actor import Actor
from pirates.piratesbase import PiratesGlobals
from pirates.effects import PolyTrail
from PooledEffect import PooledEffect
from EffectController import EffectController
from pirates.effects.RoundshotProjectile import RoundshotProjectile
import random
class GrapeshotEffect(PooledEffect, EffectController):
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
self.placeHolder = self.attachNewNode('grapeShotDummy')
self.roundshots = []
self.time = 2.0
def spawnShot(self, time, targetPos, motion_color = None, startOffset = Vec3(0, 0, 0)):
effect = RoundshotProjectile()
if effect:
self.roundshots.append(effect)
effect.reparentTo(self)
effect.setPos(self, startOffset)
effect.play(time, targetPos, motion_color)
def createTrack(self, rate = 1):
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsHigh:
num_grapeshot_tracers = 10
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsMedium:
num_grapeshot_tracers = 7
if base.options.getSpecialEffectsSetting() >= base.options.SpecialEffectsLow:
num_grapeshot_tracers = 4
self.track = Sequence()
for i in range(num_grapeshot_tracers):
self.placeHolder.setPos(self, random.uniform(-30, 30), random.uniform(400, 500), random.uniform(-20, 20))
targetPos = self.placeHolder.getPos(self)
self.track.append(Func(self.spawnShot, self.time + random.uniform(-0.25, 0.25), targetPos))
self.track.append(Wait(4.0))
self.track.append(Func(self.cleanUpEffect))
def play(self, rate = 1):
self.createTrack()
self.track.start()
def cleanUpEffect(self):
self.detachNode()
for roundshot in self.roundshots:
roundshot.destroy()
self.roundshots = []
self.checkInEffect(self)
def destroy(self):
self.stop()
for roundshot in self.roundshots:
roundshot.destroy()
self.roundshots = []
self.placeHolder.removeNode()
EffectController.destroy(self)
PooledEffect.destroy(self)
|
the-stack_106_17308
|
error_msg = """Jittor only supports Ubuntu>=16.04 currently.
For other OS, use Jittor may be risky.
We strongly recommended docker installation:
# CPU only
>>> docker run -it --network host jittor/jittor
# CPU and CUDA
>>> docker run -it --network host jittor/jittor-cuda
Reference:
1. Windows/Mac/Linux通过Docker安装计图: https://cg.cs.tsinghua.edu.cn/jittor/tutorial/2020-5-15-00-00-docker/
"""
from warnings import warn
try:
with open("/etc/os-release", "r", encoding='utf8') as f:
s = f.read().splitlines()
m = {}
for line in s:
a = line.split('=')
m[a[0]] = a[1].replace("\"", "")
assert m["NAME"] == "Ubuntu" and float(m["VERSION_ID"])>16, error_msg
except Exception as e:
print(e)
warn(error_msg)
import setuptools
from setuptools import setup, find_packages
import os
path = os.path.dirname(__file__)
with open(os.path.join(path, "README.md"), "r", encoding='utf8') as fh:
long_description = fh.read()
with open(os.path.join(path, "python/jittor/__init__.py"), "r", encoding='utf8') as fh:
for line in fh:
if line.startswith('__version__'):
version = line.split("'")[1]
break
else:
raise RuntimeError("Unable to find version string.")
setuptools.setup(
name='jittor',
version=version,
# scripts=[],
author="Jittor Group",
author_email="[email protected]",
description="a Just-in-time(JIT) deep learning framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://jittor.org",
# packages=setuptools.find_packages(),
python_requires='>=3.7',
packages=["jittor", "jittor.test", "jittor.models", "jittor.utils", "jittor_utils"],
package_dir={'': os.path.join(path, 'python')},
package_data={'': ['*', '*/*', '*/*/*','*/*/*/*','*/*/*/*/*','*/*/*/*/*/*']},
# include_package_data=True,
install_requires=[
"pybind11",
"numpy",
"tqdm",
"pillow",
"astunparse",
],
)
# upload to pip:
# rm -rf dist && python3.7 ./setup.py sdist && python3.7 -m twine upload dist/*
|
the-stack_106_17309
|
'''
Management of cron, the Unix command scheduler.
===============================================
The cron state module allows for user crontabs to be cleanly managed.
Cron declarations require a number of parameters. The timing parameters, need
to be declared, minute, hour, daymonth, month and dayweek. The user who's
crontab is to be edited also needs to be defined.
By default the timing arguments are all ``*`` and the user is root. When
making changes to an existing cron job the name declaration is the unique
factor, so if and existing cron that looks like this:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- user: root
- minute: 5
Is changed to this:
.. code-block:: yaml
date > /tmp/crontest:
cron.present:
- user: root
- minute: 7
- hour: 2
Then the existing cron will be updated, but if the cron command is changed,
then a new cron job will be added to the user's crontab.
'''
# Import python libs
import os
from salt.utils import mkstemp
def _check_cron(cmd, user, minute, hour, dom, month, dow):
'''
Return the changes
'''
lst = __salt__['cron.list_tab'](user)
for cron in lst['crons']:
if cmd == cron['cmd']:
if not str(minute) == cron['min'] or \
not str(hour) == cron['hour'] or \
not str(dom) == cron['daymonth'] or \
not str(month) == cron['month'] or \
not str(dow) == cron['dayweek']:
return 'update'
return 'present'
return 'absent'
def _get_cron_info():
'''
Returns the proper group owner and path to the cron directory
'''
owner = 'root'
if __grains__['os'] == 'FreeBSD':
group = 'wheel'
crontab_dir = '/var/cron/tabs'
elif __grains__['os'] == 'OpenBSD':
group = 'crontab'
crontab_dir = '/var/cron/tabs'
elif __grains__['os'] == 'Solaris':
group = 'root'
crontab_dir = '/var/spool/cron/crontabs'
else:
group = 'root'
crontab_dir = '/var/spool/cron'
return owner, group, crontab_dir
def present(name,
user='root',
minute='*',
hour='*',
daymonth='*',
month='*',
dayweek='*',
):
'''
Verifies that the specified cron job is present for the specified user.
For more advanced information about what exactly can be set in the cron
timing parameters check your cron system's documentation. Most Unix-like
systems' cron documentation can be found via the crontab man page:
``man 5 crontab``.
name
The command that should be executed by the cron job.
user
The name of the user who's crontab needs to be modified, defaults to
the root user
minute
The information to be set into the minute section, this can be any
string supported by your cron system's the minute field. Default is
``*``
hour
The information to be set in the hour section. Default is ``*``
daymonth
The information to be set in the day of month section. Default is ``*``
month
The information to be set in the month section. Default is ``*``
dayweek
The information to be set in the day of day of week section. Default is
``*``
'''
name = ' '.join(name.strip().split())
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if __opts__['test']:
status = _check_cron(name,
user,
minute,
hour,
daymonth,
month,
dayweek)
ret['result'] = None
if status == 'absent':
ret['comment'] = 'Cron {0} is set to be added'.format(name)
elif status == 'present':
ret['result'] = True
ret['comment'] = 'Cron {0} already present'.format(name)
elif status == 'update':
ret['comment'] = 'Cron {0} is set to be updated'.format(name)
return ret
data = __salt__['cron.set_job'](dom=daymonth,
dow=dayweek,
hour=hour,
minute=minute,
month=month,
cmd=name,
user=user
)
if data == 'present':
ret['comment'] = 'Cron {0} already present'.format(name)
return ret
if data == 'new':
ret['comment'] = 'Cron {0} added to {1}\'s crontab'.format(name, user)
ret['changes'] = {user: name}
return ret
if data == 'updated':
ret['comment'] = 'Cron {0} updated'.format(name, user)
ret['changes'] = {user: name}
return ret
ret['comment'] = ('Cron {0} for user {1} failed to commit with error \n{2}'
.format(name, user, data))
ret['result'] = False
return ret
def absent(name,
user='root',
minute='*',
hour='*',
daymonth='*',
month='*',
dayweek='*'):
'''
Verifies that the specified cron job is absent for the specified user, only
the name is matched when removing a cron job.
name
The command that should be absent in the user crontab.
user
The name of the user who's crontab needs to be modified, defaults to
the root user
minute
The information to be set into the minute section, this can be any
string supported by your cron system's the minute field. Default is
``*``
hour
The information to be set in the hour section. Default is ``*``
daymonth
The information to be set in the day of month section. Default is ``*``
month
The information to be set in the month section. Default is ``*``
dayweek
The information to be set in the day of day of week section. Default is
``*``
'''
name = ' '.join(name.strip().split())
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
if __opts__['test']:
status = _check_cron(name,
user,
minute,
hour,
daymonth,
month,
dayweek)
ret['result'] = None
if status == 'absent':
ret['result'] = True
ret['comment'] = 'Cron {0} is absent'.format(name)
elif status == 'present' or status == 'update':
ret['comment'] = 'Cron {0} is set to be removed'.format(name)
return ret
data = __salt__['cron.rm_job'](user,
minute,
hour,
daymonth,
month,
dayweek,
name,
)
if data == 'absent':
ret['comment'] = "Cron {0} already absent".format(name)
return ret
if data == 'removed':
ret['comment'] = ("Cron {0} removed from {1}'s crontab"
.format(name, user))
ret['changes'] = {user: name}
return ret
ret['comment'] = ("Cron {0} for user {1} failed to commit with error {2}"
.format(name, user, data))
ret['result'] = False
return ret
def file(name,
source_hash='',
user='root',
template=None,
context=None,
replace=True,
defaults=None,
env=None,
backup='',
**kwargs):
'''
Provides file.managed-like functionality (templating, etc.) for a pre-made
crontab file, to be assigned to a given user.
name
The source file to be used as the crontab. This source file can be
hosted on either the salt master server, or on an http or ftp server.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs.
If the file is hosted on a http or ftp server then the source_hash
argument is also required
source_hash
This can be either a file which contains a source hash string for
the source, or a source hash string. The source hash string is the
hash algorithm followed by the hash of the file:
md5=e138491e9d5b97023cea823fe17bac22
user
The user to whome the crontab should be assigned. This defaults to
root.
template
If this setting is applied then the named templating engine will be
used to render the downloaded file. Currently, jinja and mako are
supported.
context
Overrides default context variables passed to the template.
replace
If the crontab should be replaced, if False then this command will
be ignored if a crontab exists for the specified user. Default is True.
defaults
Default context passed to the template.
backup
Overrides the default backup mode for the user's crontab.
'''
# Initial set up
mode = __salt__['config.manage_mode'](600)
owner, group, crontab_dir = _get_cron_info()
cron_path = mkstemp()
with open(cron_path, 'w+') as fp_:
fp_.write(__salt__['cron.raw_cron'](user))
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
# Avoid variable naming confusion in below module calls, since ID
# delclaration for this state will be a source URI.
source = name
if env is None:
env = kwargs.get('__env__', 'base')
if not replace and os.stat(cron_path).st_size > 0:
ret['comment'] = 'User {0} already has a crontab. No changes ' \
'made'.format(user)
os.unlink(cron_path)
return ret
if __opts__['test']:
r = __salt__['file.check_managed'](cron_path,
source,
source_hash,
owner,
group,
mode,
template,
False, # makedirs = False
context,
defaults,
env,
**kwargs
)
ret['result'], ret['comment'] = r
os.unlink(cron_path)
return ret
# If the source is a list then find which file exists
source, source_hash = __salt__['file.source_list'](source,
source_hash,
env)
# Gather the source file from the server
sfn, source_sum, comment = __salt__['file.get_managed'](cron_path,
template,
source,
source_hash,
owner,
group,
mode,
env,
context,
defaults,
**kwargs
)
if comment:
ret['comment'] = comment
ret['result'] = False
os.unlink(cron_path)
return ret
ret = __salt__['file.manage_file'](cron_path,
sfn,
ret,
source,
source_sum,
owner,
group,
mode,
env,
backup)
if not __salt__['cron.write_cron_file'](user, cron_path):
ret['comment'] = 'Crontab file updated, but was unable to ' \
'update cron daemon'
ret['result'] = False
os.unlink(cron_path)
return ret
|
the-stack_106_17310
|
import asyncio
import json
import logging
import queue
import re
from collections import defaultdict
from typing import Sequence
from mmpy_bot.driver import Driver
from mmpy_bot.plugins import Plugin
from mmpy_bot.settings import Settings
from mmpy_bot.webhook_server import NoResponse
from mmpy_bot.wrappers import Message, WebHookEvent
class EventHandler(object):
def __init__(
self,
driver: Driver,
settings: Settings,
plugins: Sequence[Plugin],
ignore_own_messages=True,
):
"""The EventHandler class takes care of the connection to mattermost and calling
the appropriate response function to each event."""
self.driver = driver
self.settings = settings
self.ignore_own_messages = ignore_own_messages
self.plugins = plugins
self._name_matcher = re.compile(rf"^@?{self.driver.username}\:?\s?")
# Collect the listeners from all plugins
self.message_listeners = defaultdict(list)
self.webhook_listeners = defaultdict(list)
for plugin in self.plugins:
for matcher, functions in plugin.message_listeners.items():
self.message_listeners[matcher].extend(functions)
for matcher, functions in plugin.webhook_listeners.items():
self.webhook_listeners[matcher].extend(functions)
def start(self):
# This is blocking, will loop forever
self.driver.init_websocket(self._handle_event)
def _should_ignore(self, message: Message):
# Ignore message from senders specified in settings, and maybe from ourself
return (
True
if message.sender_name.lower()
in (name.lower() for name in self.settings.IGNORE_USERS)
else False
) or (self.ignore_own_messages and message.sender_name == self.driver.username)
async def _check_queue_loop(self, webhook_queue: queue.Queue):
logging.info("EventHandlerWebHook queue listener started.")
while True:
try:
event = webhook_queue.get_nowait()
await self._handle_webhook(event)
except queue.Empty:
pass
await asyncio.sleep(0.0001)
async def _handle_event(self, data):
post = json.loads(data)
event_action = post.get("event")
if event_action == "posted":
await self._handle_post(post)
async def _handle_post(self, post):
# For some reason these are JSON strings, so need to parse them first
for item in ["post", "mentions"]:
if post.get("data", {}).get(item):
post["data"][item] = json.loads(post["data"][item])
# If the post starts with a mention of this bot, strip off that part.
post["data"]["post"]["message"] = self._name_matcher.sub(
"", post["data"]["post"]["message"]
)
message = Message(post)
if self._should_ignore(message):
return
# Find all the listeners that match this message, and have their plugins handle
# the rest.
tasks = []
for matcher, functions in self.message_listeners.items():
match = matcher.match(message.text)
if match:
groups = list([group for group in match.groups() if group != ""])
for function in functions:
# Create an asyncio task to handle this callback
tasks.append(
asyncio.create_task(
function.plugin.call_function(
function, message, groups=groups
)
)
)
# Execute the callbacks in parallel
asyncio.gather(*tasks)
async def _handle_webhook(self, event: WebHookEvent):
# Find all the listeners that match this webhook id, and have their plugins
# handle the rest.
tasks = []
for matcher, functions in self.webhook_listeners.items():
match = matcher.match(event.webhook_id)
if match:
for function in functions:
# Create an asyncio task to handle this callback
tasks.append(
asyncio.create_task(
function.plugin.call_function(function, event)
)
)
# If this webhook doesn't correspond to any listeners, signal the WebHookServer
# to not wait for any response
if len(tasks) == 0:
self.driver.respond_to_web(event, NoResponse)
# If it does, execute the callbacks in parallel
else:
asyncio.gather(*tasks)
|
the-stack_106_17314
|
from __future__ import annotations
from typing import Optional, List, TYPE_CHECKING
if TYPE_CHECKING: # pragma: no cover
import numpy as np
from pyNastran.gui.dev.gui2.gui2 import MainWindow2
from pyNastran.gui.qt_files.colors import BLACK_FLOAT
import vtk
from pyNastran.gui.qt_files.QVTKRenderWindowInteractor import QVTKRenderWindowInteractor
from pyNastran.gui.styles.trackball_style_camera import TrackballStyleCamera
class ScalarBar:
def __init__(self):
pass
def Modified(self):
pass
def VisibilityOn(self):
pass
def VisibilityOff(self):
pass
class VtkInterface:
def __init__(self, gui: MainWindow2, parent):
self.gui = gui
self.scalar_bar_actor = ScalarBar()
self.vtk_interactor = QVTKRenderWindowInteractor(parent=parent)
self.set_style_as_trackball()
self.rend = vtk.vtkRenderer()
#self.vtk_interactor.GetRenderWindow().AddRenderer(self.rend)
fill_render_window(self.vtk_interactor, self.rend, nframes=1)
camera = self.rend.GetActiveCamera()
if self.settings.use_parallel_projection:
camera.ParallelProjectionOn()
#else:
#camera.ParallelProjectionOff()
@property
def settings(self) -> Settings:
return self.gui.settings
@property
def log(self):
return self.gui.log
def set_style_as_trackball(self):
"""sets the default rotation style"""
#self._simulate_key_press('t') # change mouse style to trackball
self.style = TrackballStyleCamera(self.vtk_interactor, self)
self.vtk_interactor.SetInteractorStyle(self.style)
def set_quad_grid(self, box_name: str,
nodes: np.ndarray, elements: np.ndarray,
color: Optional[List[float]]=None,
line_width: float=1, opacity: float=1.) -> None:
if color is None:
color = BLACK_FLOAT
self.log.warning('set_quad_grid')
def create_global_axes(self, dim_max: float) -> None:
self.log.warning('create_global_axes')
def fill_render_window(vtk_interactor,
rend: vtk.vtkRenderer,
nframes: int=1) -> List[vtk.vtkRenderer]:
assert nframes in [1, 2, 4], nframes
render_window = vtk_interactor.GetRenderWindow()
if nframes == 1:
render_window.AddRenderer(rend)
return [rend]
if nframes == 2:
# +-----+-----+
# | | |
# | A | B |
# | | |
# +-----+-----+
# xmin, xmax, ymin, ymax
#
# xmin, ymin, xmax, ymax
frame1 = [0.0, 0.0, 0.5, 1.0]
frame2 = [0.5, 0.0, 1.0, 1.0]
elif nframes == 4:
# +-----+-----+
# | | |
# | C | D |
# | | |
# +-----+-----+
# | | |
# | A | B |
# | | |
# +-----+-----+
frame1 = [0.0, 0.0, 0.5, 0.5]
frame2 = [0.5, 0.0, 1.0, 0.5]
frame3 = [0.5, 0.5, 1.0, 1.0]
frame4 = [0.5, 0.5, 1.0, 1.0]
else:
raise ValueError(nframes)
if nframes > 1:
rend.SetViewport(*frame1)
render_window.AddRenderer(rend)
if nframes == 2:
rend2 = vtk.vtkRenderer()
rend.SetViewport(*frame2)
render_window.AddRenderer(rend2)
return [rend, rend2]
elif nframes == 4:
rend2 = vtk.vtkRenderer()
rend3 = vtk.vtkRenderer()
rend4 = vtk.vtkRenderer()
rend2.SetViewport(*frame2)
rend3.SetViewport(*frame3)
rend4.SetViewport(*frame4)
render_window.AddRenderer(rend2)
render_window.AddRenderer(rend3)
render_window.AddRenderer(rend4)
return [rend, rend2, rend3, rend4]
raise ValueError(nframes)
|
the-stack_106_17315
|
#!/usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author: Selonsy
@file: cli.py
@time: 2019-01-23
负责用户交互
"""
import re
import sys
import getopt
from utils import glovar
from utils import echo
from utils.customlog import CustomLog
logger = CustomLog(__name__).getLogger()
def set_opts(args):
'''
根据命令行输入的参数修改全局变量
:param args: 命令行参数列表
:return:
'''
try:
opts, others = getopt.getopt(args, 'vhmk:s:c:o:x:',
['verbose', 'help', 'merge', 'nomerge',
'keyword=', 'source=', 'count=', 'outdir=', 'proxy='])
except getopt.GetoptError as e:
logger.error('命令解析失败')
logger.error(e)
echo.usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
echo.usage()
sys.exit(2)
elif o in ('-v', '--verbose'):
glovar.set_option('verbose', True)
elif o in ('-k', '--keyword'):
glovar.set_option('keyword', a)
elif o in ('-s', '--source'):
glovar.set_option('source', a)
elif o in ('-c', '--count'):
c = int(a) if int(a) < 51 else 50
glovar.set_option('count', c)
elif o in ('-o', '--outdir'):
glovar.set_option('outdir', a)
elif o in ('-x', '--proxy'):
proxies = { 'http': a, 'https': a}
glovar.set_option('proxies', proxies)
elif o in ('-m', '--merge'):
glovar.set_option('merge', True)
elif o in ('--nomerge'):
glovar.set_option('merge', False)
else:
assert False, 'unhandled option'
def get_music_select(comment='请输入下载序号,多个序号用空格隔开,输入N跳过下载:\n > ') -> list:
''' 得到用户选择的序号,返回一个列表 '''
selected = []
choices = input(comment)
if choices.lower() == 'n':
print('')
return selected
if not re.match(r'^((\d+\-\d+)|(\d+)|\s+)+$', choices):
return get_music_select('%s仅支持形如 0 3-5 8 的格式,输入N跳过下载:\n > ' % echo.colorize('输入有误!', 'red'))
for choice in choices.split():
start, _, end = choice.partition('-')
if end:
selected += range(int(start), int(end)+1)
else:
selected.append(start)
return selected
def set_music_keyword(comment='请输入要搜索的歌曲,或Ctrl+C退出:\n > '):
keyword = input(comment)
glovar.set_option('keyword', keyword)
|
the-stack_106_17317
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensornetwork.backends.tensorflow.tensordot2"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensornetwork.backends.tensorflow import tensordot2
import pytest
tf.compat.v1.enable_v2_behavior()
_MAXDIM = 5
class TensordotTest(tf.compat.v1.test.TestCase):
def test_invalid_shape(self):
a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4], [5, 6]]
a_axes = [1]
b_axes = [0]
# Invalid static shapes.
with self.assertRaises(tf.errors.InvalidArgumentError):
tensordot2.tensordot(tf, a, b, (a_axes, b_axes))
# Invalid dynamic shapes.
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
with self.cached_session() as sess:
with self.assertRaisesRegexp(tf.errors.InvalidArgumentError,
"Matrix size-incompatible"):
a_ph = tf.compat.v1.placeholder(tf.float32)
b_ph = tf.compat.v1.placeholder(tf.float32)
axes_ph = tf.compat.v1.placeholder(tf.int32)
output = tensordot2.tensordot(tf, a_ph, b_ph, axes_ph)
_ = sess.run([output],
feed_dict={
a_ph: a,
b_ph: b,
axes_ph: (a_axes, b_axes)
})
def test_invalid_axes(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = [[1, 2], [3, 4]]
b = [[1, 2], [3, 4]]
# Invalid static axes.
for axes_value in -1, 3, [1], [[1]], [[1], [0, 1]]:
with self.assertRaises(ValueError):
tensordot2.tensordot(tf, a, b, axes_value)
with self.assertRaises(IndexError):
tensordot2.tensordot(tf, a, b, [[0], [7]])
# Invalid dynamic axes.
a_ph = tf.compat.v1.placeholder(tf.float32)
b_ph = tf.compat.v1.placeholder(tf.float32)
axes_ph = tf.compat.v1.placeholder(tf.int32)
output = tensordot2.tensordot(tf, a_ph, b_ph, axes_ph)
# Note: We don't support scalar Tensor values for axes.
for axes_value in 1, [1], [0, 1], [[1]], [[0, 1]], [[0], [7]]:
with self.cached_session() as sess:
with self.assertRaises(tf.errors.InvalidArgumentError):
_ = sess.run([output],
feed_dict={
a_ph: a,
b_ph: b,
axes_ph: axes_value
})
# Test case for 11950
def test_valid_axis(self):
for axes_value in [1, 2], [[1], [2]], [[], []], 0:
with self.cached_session():
np_a = np.ones((3, 3))
np_b = np.array([2, 3, 1])[None, None]
np_ans = np.tensordot(np_a, np_b, axes_value)
tf_a = tf.ones((3, 3), dtype=tf.float32)
tf_b = tf.constant([2, 3, 1], dtype=tf.float32)[None, None]
tf_ans = tensordot2.tensordot(tf, tf_a, tf_b, axes_value)
self.assertAllEqual(tf_ans.shape, np_ans.shape)
self.assertAllEqual(tf_ans, np_ans)
def test_partial_shape_inference(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
for axes in ([1], [0]), 1:
a = tf.compat.v1.placeholder(tf.float32)
b = tf.compat.v1.placeholder(tf.float32)
output = tensordot2.tensordot(tf, a, b, axes)
self.assertEqual(output.get_shape().ndims, None)
a.set_shape([None, 2])
b.set_shape([2, 3])
output = tensordot2.tensordot(tf, a, b, axes)
output_shape = output.get_shape()
self.assertEqual(output_shape.ndims, 2)
output_shape = output_shape.as_list()
self.assertEqual(output_shape[0], None)
self.assertEqual(output_shape[1], 3)
a = tf.compat.v1.placeholder(tf.float32)
b = tf.compat.v1.placeholder(tf.float32)
a.set_shape([2, 2])
b.set_shape([2, None])
output = tensordot2.tensordot(tf, a, b, axes)
output_shape = output.get_shape()
self.assertEqual(output_shape.ndims, 2)
output_shape = output_shape.as_list()
self.assertEqual(output_shape[0], 2)
self.assertEqual(output_shape[1], None)
# Select a random subset of size m from [0, 1, ..., n-1].
def _random_subset(m, n):
assert m <= n
return (np.random.permutation(n)[:m]).astype(np.int32)
def _generate_random_tensors_and_dims(dtype_, rank_a_, rank_b_, num_dims_):
a_shape = np.random.randint(1, _MAXDIM + 1, rank_a_)
b_shape = np.random.randint(1, _MAXDIM + 1, rank_b_)
shared_shape = np.random.randint(1, _MAXDIM + 1, num_dims_)
a_dims = _random_subset(num_dims_, rank_a_)
b_dims = _random_subset(num_dims_, rank_b_)
for i in range(num_dims_):
a_shape[a_dims[i]] = shared_shape[i]
b_shape[b_dims[i]] = shared_shape[i]
a = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(a_shape)).reshape(a_shape).astype(dtype_)
b = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(b_shape)).reshape(b_shape).astype(dtype_)
return a, b, a_dims, b_dims
@pytest.mark.parametrize("dtype_", [np.float32, np.complex64])
@pytest.mark.parametrize("rank_a_", [1, 2, 3])
@pytest.mark.parametrize("rank_b_", [1, 2, 3])
@pytest.mark.parametrize("num_dims_", [1, 2, 3])
def test_tensordot_scalar_axes(dtype_, rank_a_, rank_b_, num_dims_):
if not num_dims_ <= min(rank_a_, rank_b_):
pytest.skip("Not a test")
if dtype_ == np.float16:
tol = 0.05
elif dtype_ in (np.float32, np.complex64):
tol = 1e-5
else:
tol = 1e-12
shape = [5] * num_dims_
a_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype_)
b_np = np.random.uniform(
low=-1.0, high=1.0, size=np.prod(shape)).reshape(shape).astype(dtype_)
all_axes = [0, 1]
if a_np.ndim > 2:
all_axes.append(a_np.ndim - 1)
for axes in all_axes:
np_ans = np.tensordot(a_np, b_np, axes=axes)
tf_ans = tensordot2.tensordot(tf, a_np, b_np, axes=axes)
np.testing.assert_allclose(tf_ans, np_ans, rtol=tol, atol=tol)
assert tf_ans.shape == np_ans.shape
@pytest.mark.parametrize("dtype_", [np.float32, np.complex64])
@pytest.mark.parametrize("rank_a_", [1, 2, 3])
@pytest.mark.parametrize("rank_b_", [1, 2, 3])
@pytest.mark.parametrize("num_dims_", [0, 1, 2, 3])
def test_tensordot(dtype_, rank_a_, rank_b_, num_dims_):
if not num_dims_ <= min(rank_a_, rank_b_):
pytest.skip("Not a test")
num_trials = min(30, num_dims_ * num_dims_)
if dtype_ == np.float16:
tol = 0.05
elif dtype_ in (np.float32, np.complex64):
tol = 1e-5
else:
tol = 1e-12
for _ in range(num_trials):
a_np, b_np, a_dims_np, b_dims_np = _generate_random_tensors_and_dims(
dtype_, rank_a_, rank_b_, num_dims_)
np_ans = np.tensordot(a_np, b_np, axes=(a_dims_np, b_dims_np))
tf_ans = tensordot2.tensordot(tf, a_np, b_np, (a_dims_np, b_dims_np))
np.testing.assert_allclose(tf_ans, np_ans, rtol=tol, atol=tol)
assert tf_ans.shape == np_ans.shape
|
the-stack_106_17318
|
"""
Ce module a ete ecrit par Olivier Brebant en aout 2011.
On peut l'utiliser librement sous licence MIT
"""
from tkinter import Tk, Canvas, N, E, RIDGE, LEFT, BOTH, YES, NE, LAST
from math import floor
from .couleurs import rgb, rgb2hex
# Un petit message invitant a lire la doc
print("""
Merci d'utiliser la librairie repere du module pydiderotlibs.\n
N'hésitez pas à consulter la documentation en ligne:\n
https://pydiderotlibs.rtfd.io/librairies/repere.html
""")
# On définit la variable globale _fenetre
_fenetre = None
class _FenetreGraphique(Canvas):
" Un canvas redimensionnable, avec repère usuel, zoomable... "
# les coordonnées en pixels sont à l'intérieur du canvas (sans le bord)
# R1 : repère lié aux pixels, B1 sa base associée
# R2 : repère lié aux unités, B2 sa base associée
def __init__(self, boss=None, largeur=400, hauteur=300,
xmin=-10, ymin=-10, xmax=10, ymax=10,
background='white', axes=True):
Canvas.__init__(self, boss)
self.configure(width=largeur, height=hauteur,
bg=background, border=5, relief=RIDGE)
self.x1 = 0
self.y2 = 0
self.axes = axes
self.xmin, self.ymin, self.xmax, self.ymax = xmin, ymin, xmax, ymax
self.largeur = largeur # int(self.cget('width'))
self.hauteur = hauteur # int(self.cget('height'))
self.o2, self.p, self.inv_p = self._initialize_matrix(
xmin, ymin, xmax, ymax, largeur, hauteur)
self.pasU = self._reglage_pas()
self.flag = 0 # pas de déplacement d'objets au départ
border_w = self.winfo_reqwidth() - self.largeur # largeur intérieure
border_h = self.winfo_reqheight() - self.hauteur # hauteur intérieure
self.border = (border_w / 2, border_h / 2) # taille du bord
# où l'on stocke les différents objets dessinés pour la
# réactualisation:
self.objets = []
# [ tag, nature, xmin, ymin, xmax, ymax, couleur, epaisseur, remplissage ]
self.redraw()
self.bind('<Configure>', self._config)
self.bind('<Button-1>', self.mouseDown)
boss.bind('<Double-Button-1>', self.dblclic)
self.bind('<Button1-Motion>', self.mouseMove)
self.bind('<Button1-ButtonRelease>', self.mouseUp)
self.bind('<Button-4>', self.wheel) # roulette pour linux
self.bind('<Button-5>', self.wheel) # roulette pour linux
boss.bind('<MouseWheel>', self.wheel) # pour windows
self.master.protocol("WM_DELETE_WINDOW", self.fermeture)
def _initialize_matrix(self, xmin, ymin, xmax, ymax, larg, haut):
" initialise les matrices O2, P et inv_P "
# coordonnées de O2 dans R1
o2 = [xmin * larg / (xmin - xmax), ymax * haut / (ymax - ymin)]
p = [[larg / (xmax - xmin), 0], [0, haut / (ymin - ymax)]
] # matrice de passage de B1 à B2
# matrice de passage de B2 à B1
inv_p = [[(xmax - xmin) / larg, 0], [0, (ymin - ymax) / haut]]
return o2, p, inv_p
def _reglage_pas(self, pix=50, mult=2):
# 50 pour environ tous les 50 pix, et 5 pour tous les k*5 unités
cx = mult * floor(pix / mult / self.p[0][0] + 0.5)
cx = max(1, cx)
cy = mult * floor(pix / mult / abs(self.p[1][1]) + 0.5)
cy = max(1, cy)
return (cx, cy)
def inside(self, pt, zone):
" décide si pt (2-tuple) est dans zone (4-tuple) "
xmin, xmax = min(zone[0], zone[2]), max(zone[0], zone[2])
ymin, ymax = min(zone[1], zone[3]), max(zone[1], zone[3])
if pt[0] > xmin and pt[0] < xmax and pt[1] > ymin and pt[1] < ymax:
return 1
return 0
def mouseDown(self, event):
"Op. à effectuer quand le bouton gauche de la souris est enfoncé"
# event.x et event.y contiennent les coordonnées du clic effectué (avec
# le bord !):
# on enlève le bord...
self.x1, self.y1 = event.x - self.border[0], event.y - self.border[1]
zone_axeX = (0, self.o2[1] - 20, self.largeur, self.o2[1] + 20)
zone_axeY = (self.o2[0] - 20, 0, self.o2[0] + 20, self.hauteur)
if self.axes:
if self.inside((self.x1, self.y1), zone_axeX):
self.flag = 1 # drapeau de déplacement de l'axe X
elif self.inside((self.x1, self.y1), zone_axeY):
self.flag = 2 # drapeau de déplacement de l'ace Y
else:
self.flag = 3 # on déplace tout le repère
else:
self.flag = 3
def dblclic(self, event):
"Rend le repère orthonormé en se basant sur l'axe des X"
self.p[1][1] = -self.p[0][0]
self.inv_p[1][1] = 1 / self.p[1][1]
self._rafraichir()
self.pasU = self._reglage_pas()
self.redraw()
def mouseMove(self, event):
"Op. à effectuer quand la souris se déplace, bouton gauche enfoncé"
x2, y2 = event.x - self.border[0], event.y - self.border[1]
dx, dy = x2 - self.x1, y2 - self.y1
if self.flag == 1:
self.translate(dx, 0)
elif self.flag == 2:
self.translate(0, dy)
elif self.flag == 3:
self.translate(dx, dy)
self.x1, self.y1 = x2, y2
self.redraw()
def mouseUp(self, event):
"Op. à effectuer quand le bouton gauche de la souris est relâché"
self.flag = 0
def wheel(self, event):
# attention ne fonctionne que sous Linux
" Gestion du zoom sur action de la roulette "
# event.num == 4 : roulette vers le haut
# event.num == 5 : roulette vers le bas
# x et y sont les coord. du clic a l'int. du canvas
x, y = event.x - self.border[0], event.y - self.border[1]
if event.num == 4 or event.delta > 0: # Zoom de +-10%
zoom = 0.9
else:
zoom = 1.1
zx = zy = zoom
if event.state == 20: # si on appuie sur "Control" avec la roulette
x, y = self.o2[0], self.o2[1]
elif self.axes:
if self.inside((x, y), (self.o2[0] - 10, self.o2[1] - 10,
self.o2[0] + 10, self.o2[1] + 10)): # roulette proche du centre
x, y = self.o2[0], self.o2[1]
# roulette sur l'axe des X
elif self.inside((x, y), (0, self.o2[1] - 20, self.largeur, self.o2[1] + 20)):
zy = 1 # on bloque sur Y
y = self.o2[1] # centrage sur l'ordonnée
# roulette sur l'axe des Y
elif self.inside((x, y), (self.o2[0] - 20, 0, self.o2[0] + 20, self.hauteur)):
zx = 1 # on bloque X
x = self.o2[0] # centrage sur l'ordonnée
# on ajuste l'homothétie de coef (zx, zy) centré en (x,y)
self.zoom(zx, zy, x, y)
self.redraw()
def translate(self, dx, dy):
self.o2[0] += dx
self.o2[1] += dy
self._rafraichir()
def zoom(self, zx, zy, x, y):
self.o2[0] = zx * (self.o2[0] - x) + x
self.o2[1] = zy * (self.o2[1] - y) + y
self.p[0][0] = self.p[0][0] * zx
self.p[1][1] = self.p[1][1] * zy
self.inv_p[0][0] = 1 / self.p[0][0]
self.inv_p[1][1] = 1 / self.p[1][1]
self._rafraichir()
self.pasU = self._reglage_pas()
def pix2coord(self, xp, yp):
xu = self.inv_p[0][0] * (xp - self.o2[0])
yu = self.inv_p[1][1] * (yp - self.o2[1])
return (xu, yu)
def coord2pix(self, xu, yu):
xp = self.p[0][0] * xu + self.o2[0]
yp = self.p[1][1] * yu + self.o2[1]
return (xp, yp)
# ajoute le bord et un décalage éventuel (delta) aux coordonnées en pixels
def _conv(self, x, y, delta=(0, 0)):
tmp = self.coord2pix(x, y)
return (tmp[0] + self.border[0] + delta[0],
tmp[1] + self.border[1] + delta[1])
def _rafraichir(self):
self.xmax, self .ymax = self.pix2coord(self.largeur, 0)
self.xmin, self. ymin = self.pix2coord(0, self.hauteur)
def _config(self, event): # event contient la taille du canvas, bord compris
self.largeur = event.width - self.border[0] * 2
self.hauteur = event.height - self.border[1] * 2
self._rafraichir()
# on met à jour les variables de dimension
self.configure(width=self.largeur, height=self.hauteur)
self.redraw() # dessine_axes()
def dessine_axes(self):
# axe X
self.delete('axeX')
self.create_line(
self._conv(
self.xmin, 0, (10, 0)), self._conv(
self.xmax, 0, (-10, 0)), tag='axeX', arrow=LAST)
self.create_text(
self._conv(
self.xmax,
0),
tag='axeX',
text='x',
anchor=NE)
# Affichage des graduations 'intelligentes'
# recherche des valeurs de l'axe qui doivent être affichées dans la
# fenêtre
i1 = int(-floor(-self.xmin - 20 * self.inv_p[0][0]) / self.pasU[0])
i2 = int(floor(self.xmax - 20 * self.inv_p[0][0]) / self.pasU[0]) + 1
for i in range(i1, i2):
p = self._conv(i * self.pasU[0], 0)
self.create_line(p[0], p[1] - 4, p[0], p[1] + 4, tag='axeX')
if i == 0:
self.create_text(p[0] - 4, p[1] + 4, text=str(int(i * self.pasU[0])),
font=("Helvetica", "8"), anchor=NE, tag='axeX')
else:
self.create_text(p[0], p[1] + 4, text=str(int(i * self.pasU[0])),
font=("Helvetica", "8"), anchor=N, tag='axeX')
# axe Y
self.delete('axeY')
self.create_line(self._conv(0, self.ymin, (0, -10)),
self._conv(0, self.ymax, (0, 10)), tag='axeY', arrow=LAST)
self.create_text(
self._conv(
0,
self.ymax),
tag='axeY',
text='y',
anchor=NE)
# Affichage des graduations 'intelligentes'
# recherche des valeurs de l'axe qui doivent être affichées dans la
# fenêtre
i1 = int(floor(self.ymin + 20 * self.inv_p[1][1]) / self.pasU[1]) + 1
i2 = int(floor(self.ymax - 20 * self.inv_p[1][1]) / self.pasU[1])
for i in range(i1, i2):
p = self._conv(0, i * self.pasU[1])
self.create_line(p[0] - 4, p[1], p[0] + 4, p[1], tag='axeY')
if i != 0:
self.create_text(p[0] - 4, p[1], text=str(int(i * self.pasU[1])),
font=("Helvetica", "8"), anchor=E, tag='axeY')
def redraw(self):
if self.axes:
self.dessine_axes()
for i in range(len(self.objets)):
self.delete(self.objets[i][0])
if self.objets[i][1] == 'segment':
self.objets[i][0] = self.create_line(self._conv(self.objets[i][2], self.objets[i][3]),
self._conv(
self.objets[i][4], self.objets[i][5]),
fill=self.objets[i][6], width=self.objets[i][7])
elif self.objets[i][1] == 'rectangle':
self.objets[i][0] = self.create_rectangle(self._conv(self.objets[i][2], self.objets[i][3]),
self._conv(
self.objets[i][4], self.objets[i][5]),
outline=self.objets[i][6], width=self.objets[i][7], fill=self.objets[i][8])
elif self.objets[i][1] == 'point':
coef = 2
pt = self._conv(self.objets[i][2], self.objets[i][3])
taille = self.objets[i][5]
if self.objets[i][6] == 'rond':
self.objets[i][0] = self.create_oval(pt[0] - coef * taille, pt[1] - coef * taille,
pt[0] + coef *
taille, pt[1] +
coef * taille,
outline='', fill=self.objets[i][4])
elif self.objets[i][6] == 'croix':
self.delete(self.objets[i][7])
self.objets[i][0] = self.create_line(pt[0] - coef * taille, pt[1] - coef * taille,
pt[0] + coef *
taille, pt[1] +
coef * taille,
fill=self.objets[i][4])
self.objets[i][7] = self.create_line(pt[0] - coef * taille, pt[1] + coef * taille,
pt[0] + coef *
taille, pt[1] -
coef * taille,
fill=self.objets[i][4])
elif self.objets[i][1] == 'texte':
self.objets[i][0] = self.create_text(self._conv(self.objets[i][2], self.objets[i][3]),
text=self.objets[i][4], fill=self.objets[i][5])
# pour fermer proprement l'application avec la croix...
def fermeture(self):
# self.master.quit()
self.master.destroy()
def loop(self):
self.master.mainloop()
################################## Fin de la classe _FenetreGraphique ###
def trace_point(x, y, couleur='noir', taille=1, forme='rond'):
point(x, y, couleur, taille, forme)
def point(x, y, couleur='noir', taille=1, forme='rond'):
"""Ajoute un point dans la fenetre graphique aux coordonees ``(x, y)``.
Alias disponible: ``trace_point()``
Arguments:
x (float): abscisse du point
y (float): ordonnée du point
couleur (`couleur <#couleurs>`_, optionnel): couleur du point (``noir`` par défaut)
taille (int, optionnel): taille du point (``1`` par défaut)
forme (str, optionnel): forme du point: rond/croix (``'rond'`` par défaut)
"""
couleur = rgb2hex(rgb(couleur))
global _fenetre
coef = 2
pt = _fenetre._conv(x, y)
tab = [0] * 8
if forme == 'rond':
tab[0] = _fenetre.create_oval(pt[0] - coef * taille, pt[1] - coef * taille,
pt[0] + coef * taille, pt[1] +
coef * taille,
outline='', fill=couleur)
elif forme == 'croix':
tab[0] = _fenetre.create_line(pt[0] - coef * taille, pt[1] - coef * taille,
pt[0] + coef * taille, pt[1] +
coef * taille,
fill=couleur)
tab[7] = _fenetre.create_line(pt[0] - coef * taille, pt[1] + coef * taille,
pt[0] + coef * taille, pt[1] -
coef * taille,
fill=couleur)
tab[1] = 'point'
tab[2] = x
tab[3] = y
tab[4] = couleur
tab[5] = taille
tab[6] = forme
_fenetre.objets.append(tab)
def cercle(x, y, couleur='noir', taille=5):
"""Trace un disque dont le centre a pour coordonnées ``(x, y)`` et dont le rayon est 2 fois ``taille`` (en pixels).
Alias: ``trace_cercle()``, ``circle()``
Arguments:
x,y (float): Coordonnées du centre.
couleur (`couleur <#couleurs>`_, optionnel): Couleur du disque (``noir`` par défaut).
taille (int, optionl): demi rayon (``2`` par défaut).
"""
point(x, y, couleur, taille, 'rond')
def circle(x, y, couleur='noir', taille=5):
point(x, y, couleur, taille, 'rond')
def trace_cercle(x, y, couleur='noir', taille=5):
point(x, y, couleur, taille, 'rond')
def trace_texte(x, y, message, couleur='noir'):
texte(x, y, message, couleur)
def text(x, y, message, couleur='noir'):
texte(x, y, message, couleur)
def texte(x, y, message, couleur='noir'):
"""Trace un texte dans la fenêtre graphique au coordonnées ``x, y``.
Alias : 'trace_texte()' et 'text()'
Arguments:
x (float): abscisse du point
y (float): ordonnée du point
message (str): Texte à placer dans la fenêtre graphique
couleur (`couleur <#couleurs>`_, optionnel): Couleur du texte (``noir`` par défaut)
"""
couleur = rgb2hex(rgb(couleur))
global _fenetre
tab = [0] * 6
tab[0] = _fenetre.create_text(x, y, text=texte, fill=couleur)
tab[1] = 'texte'
tab[2] = x
tab[3] = y
tab[4] = message
tab[5] = couleur
_fenetre.objets.append(tab)
def trace_segment(x1, y1, x2, y2, couleur='noir', taille=2):
segment(x1, y1, x2, y2, couleur, taille)
def segment(x1, y1, x2, y2, couleur='noir', taille=2):
"""Trace un segment entre les points de coordonnées ``(x1, y1)`` et ``(x2, y2)``.
Alias: ``trace_segment()``
Arguments:
x1,y1,x2,y2 (float): Coordonnées des extrémités du segment.
couleur (`couleur <#couleurs>`_, optionnel): Couleur du segment (``noir`` par défaut).
taille (int, optionl): Epaisseur du segment (``2`` par défaut).
"""
couleur = rgb2hex(rgb(couleur))
global _fenetre
tab = [0] * 8
tab[0] = _fenetre.create_line(
_fenetre._conv(
x1, y1), _fenetre._conv(
x2, y2), width=taille, fill=couleur)
tab[1] = 'segment'
tab[2] = x1
tab[3] = y1
tab[4] = x2
tab[5] = y2
tab[6] = couleur
tab[7] = taille
_fenetre.objets.append(tab)
def trace_rectangle(x1, y1, largeur, hauteur, couleur='noir',
taille=2, remplissage='jaune'):
rectangle(x1, y1, largeur, hauteur, couleur, taille, remplissage)
def rectangle(x1, y1, largeur, hauteur, couleur='noir',
taille=2, remplissage='jaune'):
"""Trace un rectangle dont le sommet en bas à gauche a pour coordonnées ``(x1, y1)``.
Alias: ``trace_rectangle()``
Arguments:
x1,y1 (float): Coordonnées du sommet en bas à gauche du rectangle.
largeur,hauteur (float): Largeur et hauteur du rectangle
couleur (`couleur <#couleurs>`_, optionnel): Couleur des cotés du rectangle (``noir`` par défaut).
taille (int, optionnel): épaisseur des cotés du rectangle. ( ``2`` par défaut).
remplissage (str, optionnel): Couleur de l'intérieur du rectangle (``yellow`` par default)
"""
couleur = rgb2hex(rgb(couleur))
remplissage = rgb2hex(rgb(remplissage))
global _fenetre
tab = [0] * 9
tab[0] = _fenetre.create_rectangle(_fenetre._conv(x1, y1), _fenetre._conv(x1 + largeur, y1 + hauteur),
outline=couleur, width=taille, fill=remplissage)
tab[1] = 'rectangle'
tab[2] = x1
tab[3] = y1
tab[4] = x1 + largeur
tab[5] = y1 + hauteur
tab[6] = couleur
tab[7] = taille
tab[8] = remplissage
_fenetre.objets.append(tab)
### Fonction principale pour demarrer ####
def creer_fenetre(xmin=-10, xmax=10, ymin=-10, ymax=10,
fond='blanc', titre="Repère mathematique", axes=True):
fenetre(xmin, xmax, ymin, ymax, fond, titre, axes)
def window(xmin=-10, xmax=10, ymin=-10, ymax=10, fond='blanc',
titre="Repère mathematique", axes=True):
fenetre(xmin, xmax, ymin, ymax, fond, titre, axes)
def fenetre(xmin=-10, xmax=10, ymin=-10, ymax=10, fond='blanc',
titre="Repère mathematique", axes=True):
"""Initialise l'object fenetre graphique sans l'afficher.
Alias: ``creer_fenetre()`` et ``window()``
Arguments:
titre (str): Titre de la fenêtre. La valeur par défaut est ``Repère mathematique``.
xmin,xmax,ymin,ymax (float) : Dimensions du repère. Les valeurs par défaut sont -10, 10, -10, 10
fond (`couleur <#couleurs>`_, optionnel): Couleur de fond de la fenêtre (``blanc`` par défaut).
axes (bool, optionnel): Affiche les axes du repère si ``True`` (``True`` par défaut).
"""
fond = rgb2hex(rgb(fond))
root = Tk() # on crée la fenêtre
root.title(titre) # on lui donne un titre
graph = _FenetreGraphique(root, xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax,
background=fond, axes=axes)
graph.pack(side=LEFT, fill=BOTH, expand=YES)
global _fenetre
_fenetre = graph
return graph
|
the-stack_106_17319
|
from pathlib import Path
import cv2
import numpy as np
import torch
from custom_models.model import GrayscaleModel, GaussianBlur
def export():
output_dir = Path(__file__).parent / 'out'
output_dir.mkdir(parents=True, exist_ok=True)
export_onnx(output_dir=output_dir)
print('Done.')
def export_onnx(output_dir):
"""
Exports the model to an ONNX file.
"""
# Channels Last
shape = (1, 300, 300, 3)
model = GaussianBlur(shape=shape, dtype=torch.float)
X = torch.ones(shape, dtype=torch.uint8)
torch.onnx.export(
model,
X,
f'{output_dir.as_posix()}/model.onnx',
opset_version=9,
do_constant_folding=True
)
def preview():
capture = cv2.VideoCapture(0)
model = None
while True:
_, color = capture.read()
if not model:
shape = (1, ) + color.shape
model = GaussianBlur(shape=shape, dtype=torch.uint8).eval()
# Numpy -> torch.Tensor
cv2.imshow('Original', color)
color = torch.from_numpy(color)
color = torch.unsqueeze(color, dim=0)
out = model(color)[0].detach().numpy()
# Channels Last
out = out.astype(np.uint8)
cv2.imshow('Gray', out)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
capture.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
export()
|
the-stack_106_17321
|
# Copyright 2019 The Texar Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import torch
from BIT_DL.pytorch.modules.encoders.transformer_encoder import TransformerEncoder
from BIT_DL.pytorch.modules.pretrained.t5_utils import \
T5LayerNorm, MultiheadRPRAttention
from BIT_DL.pytorch.modules.networks.networks import FeedForwardNetwork
from BIT_DL.pytorch.modules.encoders.transformer_encoder import \
default_transformer_poswise_net_hparams
from BIT_DL.pytorch.utils import sequence_mask, transformer_attentions as attn
class T5Encoder(TransformerEncoder):
r"""Transformer based encoder that applies multi-head self attention with
relative positional representations for encoding sequences for T5.
This module basically stacks
:class:`~BIT_DL.pytorch.modules.pretrained.t5_utils.MultiheadRPRAttention`,
:class:`~BIT_DL.pytorch.modules.FeedForwardNetwork` and residual connections.
This module supports the standard T5 architecture proposed in
`(Raffel et al.) "Exploring the Limits of Transfer Learning with a Unified
Text-to-Text Transformer"`.
Args:
hparams (dict or HParams, optional): Hyperparameters. Missing
hyperparameters will be set to default values. See
:meth:`default_hparams` for the hyperparameter structure and
default values.
.. document private functions
"""
def __init__(self, hparams=None):
super().__init__(hparams=hparams)
self.final_layer_norm = T5LayerNorm(self._input_size,
eps=self._hparams.eps)
def initialize_blocks(self):
r"""Helper function to initialize blocks.
"""
for i in range(self._hparams.num_blocks):
mh_attn = MultiheadRPRAttention(
self._input_size,
self._hparams.multihead_attention,
stores_relative_position=bool(i == 0)
)
self.self_attns.append(mh_attn)
self.self_attn_layer_norm.append(
T5LayerNorm(self._input_size, eps=self._hparams.eps))
if self._hparams.dim != mh_attn.hparams.output_dim:
raise ValueError(
'The "dim" in the hparams of '
'"multihead_attention" should be equal to the '
'"dim" of T5Encoder')
pw_net = FeedForwardNetwork(
hparams=self._hparams['poswise_feedforward'])
final_dim = pw_net.hparams.layers[-1]['kwargs']['out_features']
if self._hparams.dim != final_dim:
raise ValueError(
'The output dimenstion of '
'"poswise_feedforward" should be equal '
'to the "dim" of T5Encoder.')
self.poswise_networks.append(pw_net)
self.poswise_layer_norm.append(
T5LayerNorm(self._input_size, eps=self._hparams.eps))
@staticmethod
def default_hparams():
r"""Returns a dictionary of hyperparameters with default values.
.. code-block:: python
{
"num_blocks": 6,
"dim": 512,
"embedding_dropout": 0.1,
"residual_dropout": 0.1,
"use_bert_config: False,
"poswise_feedforward": default_transformer_poswise_net_hparams,
'multihead_attention': {
'name': 'multihead_rpr_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
'is_decoder': False,
'relative_attention_num_buckets': 32
},
"initializer": None,
"eps": 1e-6,
"name": "t5_encoder"
}
Here:
`"num_blocks"`: int
Number of stacked blocks.
`"dim"`: int
Hidden dimension of the encoders.
`"embedding_dropout"`: float
Dropout rate of the input embedding.
`"residual_dropout"`: float
Dropout rate of the residual connections.
"eps"`: float
Epsilon values for layer norm layers.
`"poswise_feedforward"`: dict
Hyperparameters for a feed-forward network used in residual
connections.
Make sure the dimension of the output tensor is equal to ``"dim"``.
See
:func:`~BIT_DL.pytorch.modules.default_transformer_poswise_net_hparams`
for details.
`"multihead_attention"`: dict
Hyperparameters for the multi-head attention strategy.
Make sure the ``"output_dim"`` in this module is equal to ``"dim"``.
See :class:`~BIT_DL.pytorch.modules.MultiheadRPRAttention` for
details.
`"initializer"`: dict, optional
Hyperparameters of the default initializer that initializes
variables created in this module.
See :func:`~BIT_DL.pytorch.core.get_initializer` for details.
`"name"`: str
Name of the module.
"""
dim = 512
return {
'num_blocks': 6,
'dim': dim,
'embedding_dropout': 0.1,
'residual_dropout': 0.1,
'use_bert_config': False,
'poswise_feedforward': default_transformer_poswise_net_hparams(dim),
'multihead_attention': {
'name': 'multihead_rpr_attention',
'num_units': 512,
'num_heads': 8,
'dropout_rate': 0.1,
'output_dim': 512,
'use_bias': False,
'is_decoder': False,
'relative_attention_num_buckets': 32
},
'initializer': None,
'eps': 1e-6,
'name': 't5_encoder',
}
def forward(self, # type: ignore
inputs: torch.Tensor,
sequence_length: torch.LongTensor) -> torch.Tensor:
r"""Encodes the inputs.
Args:
inputs: A 3D Tensor of shape ``[batch_size, max_time, dim]``,
containing the embedding of input sequences. Note that
the embedding dimension `dim` must equal `"dim"` in
:attr:`hparams`. The input embedding is typically an
aggregation of word embedding and position embedding.
sequence_length: A 1D :tensor:`LongTensor` of shape
``[batch_size]``. Input tokens beyond respective sequence
lengths are masked out automatically.
Returns:
A Tensor of shape ``[batch_size, max_time, dim]`` containing the
encoded vectors.
"""
# Multiply input embedding with the sqrt of its dimension for
# normalization
inputs_padding = 1 - sequence_mask(
sequence_length, inputs.size()[1]).float()
ignore_padding = attn.attention_bias_ignore_padding(inputs_padding)
encoder_self_attention_bias = ignore_padding
x = self.embed_dropout(inputs)
position_bias = None
for i in range(self._hparams.num_blocks):
_queries_input = self.self_attn_layer_norm[i](x)
attention_output, position_bias = self.self_attns[i](
queries=_queries_input,
memory=_queries_input,
memory_attention_bias=encoder_self_attention_bias,
position_bias=position_bias
)
attention_output = self.residual_dropout(attention_output)
x = x + attention_output
poswise_network = self.poswise_networks[i]
poswise_normalizer = self.poswise_layer_norm[i]
y = poswise_normalizer(x)
original_shape = y.size()
y = y.view(-1, self._hparams.dim)
layer_output = poswise_network(y)
sub_output = self.residual_dropout(layer_output)
sub_output = sub_output.view(original_shape)
x = x + sub_output
x = self.final_layer_norm(x)
return x
|
the-stack_106_17324
|
#!/usr/bin/python -Wall
# ================================================================
# Please see LICENSE.txt in the same directory as this file.
# John Kerl
# [email protected]
# 2007-05-31
# ================================================================
# Group module for the dihedral group parameterized by m and n.
import metacyc_tm
def get_elements_str(params_string):
[p, q, t] = metacyc_tm.params_from_string(params_string)
pq = p * q
elts = []
for i in range(0, p):
for j in range(0, q):
elts.append(metacyc_tm.metacyc_t(i, j, p, q, t))
return elts
|
the-stack_106_17325
|
import scrapy
import time
class Spider(scrapy.Spider):
name = 'ip'
allowed_domains = []
def start_requests(self):
url = 'http://ip.chinaz.com/getip.aspx'
# url = 'http://httpbin.org/get'
for i in range(4):
print("++++++++++++++++++++++++++++++++++++++++++++",i)
time.sleep(0.1)
yield scrapy.Request(url=url, meta={"download_timeout":2}, callback=self.parse, dont_filter=True)
def parse(self,response):
print("-------------------------------------------")
print(response.text)
|
the-stack_106_17331
|
import torch
import pytest
from onnx2pytorch.operations import Reshape
@pytest.fixture
def inp():
return torch.rand(35, 1, 200)
@pytest.fixture
def pruned_inp():
return torch.rand(35, 1, 160)
@pytest.mark.parametrize("enable_pruning", [True, False])
def test_reshape(inp, pruned_inp, enable_pruning):
"""Pass shape in forward."""
op = Reshape(enable_pruning=True)
shape = torch.Size((35, 2, 100))
out = op(inp, shape)
assert out.shape == shape
# with the same input, the output shape should not change
out = op(inp, shape)
assert out.shape == shape
# if input changes due to pruning, reshape should work
# and output shape should change accordingly
expected_shape = torch.Size((35, 2, 80))
out = op(pruned_inp, shape)
assert out.shape == expected_shape
@pytest.mark.parametrize("enable_pruning", [True, False])
def test_reshape_2(inp, pruned_inp, enable_pruning):
"""Pass shape in init."""
shape = torch.Size((35, 2, 100))
op = Reshape(enable_pruning=True, shape=shape)
out = op(inp)
assert out.shape == shape
# input changes due to pruning, reshape should work
expected_shape = torch.Size((35, 2, 80))
out = op(pruned_inp)
assert out.shape == expected_shape
|
the-stack_106_17332
|
from tapiriik.settings import WEB_ROOT, ENDOMONDO_CLIENT_KEY, ENDOMONDO_CLIENT_SECRET, SECRET_KEY
from tapiriik.services.service_base import ServiceAuthenticationType, ServiceBase
from tapiriik.services.interchange import UploadedActivity, ActivityType, ActivityStatistic, ActivityStatisticUnit, Waypoint, WaypointType, Location, Lap
from tapiriik.services.api import APIException, APIExcludeActivity, UserException, UserExceptionType
from tapiriik.database import redis
from django.core.urlresolvers import reverse
from datetime import timedelta, datetime
import dateutil.parser
from requests_oauthlib import OAuth1Session
import logging
import pytz
import json
import os
import hashlib
logger = logging.getLogger(__name__)
class EndomondoService(ServiceBase):
ID = "endomondo"
DisplayName = "Endomondo"
DisplayAbbreviation = "EN"
AuthenticationType = ServiceAuthenticationType.OAuth
UserProfileURL = "https://www.endomondo.com/profile/{0}"
UserActivityURL = "https://www.endomondo.com/users/{0}/workouts/{1}"
PartialSyncRequiresTrigger = True
AuthenticationNoFrame = True
ConfigurationDefaults = {
"DeviceRegistered": False,
}
# The complete list:
# running,cycling transportation,cycling sport,mountain biking,skating,roller skiing,skiing cross country,skiing downhill,snowboarding,kayaking,kite surfing,rowing,sailing,windsurfing,fitness walking,golfing,hiking,orienteering,walking,riding,swimming,spinning,other,aerobics,badminton,baseball,basketball,boxing,stair climbing,cricket,cross training,dancing,fencing,american football,rugby,soccer,handball,hockey,pilates,polo,scuba diving,squash,table tennis,tennis,beach volley,volleyball,weight training,yoga,martial arts,gymnastics,step counter,crossfit,treadmill running,skateboarding,surfing,snowshoeing,wheelchair,climbing,treadmill walking,kick scooter,standup paddling,running trail,rowing indoor,floorball,ice skating,skiing touring,rope jumping,stretching,running canicross,paddle tennis,paragliding
_activityMappings = {
"running": ActivityType.Running,
"cycling transportation": ActivityType.Cycling,
"cycling sport": ActivityType.Cycling,
"mountain biking": ActivityType.MountainBiking,
"skating": ActivityType.Skating,
"skiing cross country": ActivityType.CrossCountrySkiing,
"skiing downhill": ActivityType.DownhillSkiing,
"snowboarding": ActivityType.Snowboarding,
"rowing": ActivityType.Rowing,
"fitness walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"orienteering": ActivityType.Running,
"walking": ActivityType.Walking,
"swimming": ActivityType.Swimming,
"spinning": ActivityType.Cycling, # indoor cycling
"other": ActivityType.Other,
"cross training": ActivityType.Elliptical, # elliptical training
"weight training": ActivityType.StrengthTraining,
"treadmill running": ActivityType.Running,
"snowshoeing": ActivityType.Walking,
"wheelchair": ActivityType.Wheelchair,
"climbing": ActivityType.Climbing,
"roller skiing": ActivityType.RollerSkiing,
"treadmill walking": ActivityType.Walking,
"running trail": ActivityType.Running,
"rowing indoor": ActivityType.Rowing,
"running canicross": ActivityType.Running,
"stand up paddling": ActivityType.StandUpPaddling,
}
_reverseActivityMappings = {
"running": ActivityType.Running,
"cycling sport": ActivityType.Cycling,
"mountain biking": ActivityType.MountainBiking,
"skating": ActivityType.Skating,
"skiing cross country": ActivityType.CrossCountrySkiing,
"skiing downhill": ActivityType.DownhillSkiing,
"snowboarding": ActivityType.Snowboarding,
"rowing": ActivityType.Rowing,
"walking": ActivityType.Walking,
"hiking": ActivityType.Hiking,
"swimming": ActivityType.Swimming,
"other": ActivityType.Other,
"wheelchair": ActivityType.Wheelchair,
"climbing" : ActivityType.Climbing,
"roller skiing": ActivityType.RollerSkiing,
"stand up paddling": ActivityType.StandUpPaddling,
}
_activitiesThatDontRoundTrip = {
ActivityType.Cycling,
ActivityType.Running,
ActivityType.Walking
}
SupportedActivities = list(_activityMappings.values())
ReceivesNonGPSActivitiesWithOtherSensorData = False
def WebInit(self):
self.UserAuthorizationURL = reverse("oauth_redirect", kwargs={"service": "endomondo"})
def _rateLimitBailout(self, response):
if response.status_code == 503 and "user_refused" in response.text:
raise APIException("Endomondo user token rate limit reached", user_exception=UserException(UserExceptionType.RateLimited))
def _oauthSession(self, connection=None, **params):
if connection:
params["resource_owner_key"] = connection.Authorization["Token"]
params["resource_owner_secret"] = connection.Authorization["Secret"]
return OAuth1Session(ENDOMONDO_CLIENT_KEY, client_secret=ENDOMONDO_CLIENT_SECRET, **params)
def GenerateUserAuthorizationURL(self, session, level=None):
oauthSession = self._oauthSession(callback_uri=WEB_ROOT + reverse("oauth_return", kwargs={"service": "endomondo"}))
tokens = oauthSession.fetch_request_token("https://api.endomondo.com/oauth/request_token")
redis_token_key = 'endomondo:oauth:%s' % tokens["oauth_token"]
redis.setex(redis_token_key, tokens["oauth_token_secret"], timedelta(hours=24))
return oauthSession.authorization_url("https://www.endomondo.com/oauth/authorize")
def RetrieveAuthorizationToken(self, req, level):
redis_token_key = "endomondo:oauth:%s" % req.GET["oauth_token"]
secret = redis.get(redis_token_key)
assert secret
redis.delete(redis_token_key)
oauthSession = self._oauthSession(resource_owner_secret=secret)
oauthSession.parse_authorization_response(req.get_full_path())
tokens = oauthSession.fetch_access_token("https://api.endomondo.com/oauth/access_token")
userInfo = oauthSession.get("https://api.endomondo.com/api/1/user")
userInfo = userInfo.json()
return (userInfo["id"], {"Token": tokens["oauth_token"], "Secret": tokens["oauth_token_secret"]})
def RevokeAuthorization(self, serviceRecord):
pass
def _parseDate(self, date):
return datetime.strptime(date, "%Y-%m-%d %H:%M:%S UTC").replace(tzinfo=pytz.utc)
def _formatDate(self, date):
return datetime.strftime(date.astimezone(pytz.utc), "%Y-%m-%d %H:%M:%S UTC")
def DownloadActivityList(self, serviceRecord, exhaustive=False):
oauthSession = self._oauthSession(serviceRecord)
activities = []
exclusions = []
page_url = "https://api.endomondo.com/api/1/workouts"
while True:
resp = oauthSession.get(page_url)
try:
respList = resp.json()["data"]
except ValueError:
self._rateLimitBailout(resp)
raise APIException("Error decoding activity list resp %s %s" % (resp.status_code, resp.text))
for actInfo in respList:
activity = UploadedActivity()
activity.StartTime = self._parseDate(actInfo["start_time"])
logger.debug("Activity s/t %s" % activity.StartTime)
if "is_tracking" in actInfo and actInfo["is_tracking"]:
exclusions.append(APIExcludeActivity("Not complete", activity_id=actInfo["id"], permanent=False, user_exception=UserException(UserExceptionType.LiveTracking)))
continue
if "end_time" in actInfo:
activity.EndTime = self._parseDate(actInfo["end_time"])
if actInfo["sport"] in self._activityMappings:
activity.Type = self._activityMappings[actInfo["sport"]]
# "duration" is timer time
if "duration_total" in actInfo:
activity.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, value=float(actInfo["duration_total"]))
if "distance_total" in actInfo:
activity.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Kilometers, value=float(actInfo["distance_total"]))
if "calories_total" in actInfo:
activity.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, value=float(actInfo["calories_total"]))
activity.Stats.Elevation = ActivityStatistic(ActivityStatisticUnit.Meters)
if "altitude_max" in actInfo:
activity.Stats.Elevation.Max = float(actInfo["altitude_max"])
if "altitude_min" in actInfo:
activity.Stats.Elevation.Min = float(actInfo["altitude_min"])
if "total_ascent" in actInfo:
activity.Stats.Elevation.Gain = float(actInfo["total_ascent"])
if "total_descent" in actInfo:
activity.Stats.Elevation.Loss = float(actInfo["total_descent"])
activity.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.KilometersPerHour)
if "speed_max" in actInfo:
activity.Stats.Speed.Max = float(actInfo["speed_max"])
if "heart_rate_avg" in actInfo:
activity.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(actInfo["heart_rate_avg"]))
if "heart_rate_max" in actInfo:
activity.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=float(actInfo["heart_rate_max"])))
if "cadence_avg" in actInfo:
activity.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=int(actInfo["cadence_avg"]))
if "cadence_max" in actInfo:
activity.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, max=int(actInfo["cadence_max"])))
if "power_avg" in actInfo:
activity.Stats.Power = ActivityStatistic(ActivityStatisticUnit.Watts, avg=int(actInfo["power_avg"]))
if "power_max" in actInfo:
activity.Stats.Power.update(ActivityStatistic(ActivityStatisticUnit.Watts, max=int(actInfo["power_max"])))
if "title" in actInfo:
activity.Name = actInfo["title"]
activity.ServiceData = {"WorkoutID": int(actInfo["id"]), "Sport": actInfo["sport"]}
activity.CalculateUID()
activities.append(activity)
paging = resp.json()["paging"]
if "next" not in paging or not paging["next"] or not exhaustive:
break
else:
page_url = paging["next"]
return activities, exclusions
def SubscribeToPartialSyncTrigger(self, serviceRecord):
resp = self._oauthSession(serviceRecord).put("https://api.endomondo.com/api/1/subscriptions/workout/%s" % serviceRecord.ExternalID)
try:
assert resp.status_code in [200, 201] # Created, or already existed
except:
raise APIException("Could not unsubscribe - received unknown result %s - %s" % (resp.status_code, resp.text))
serviceRecord.SetPartialSyncTriggerSubscriptionState(True)
def UnsubscribeFromPartialSyncTrigger(self, serviceRecord):
resp = self._oauthSession(serviceRecord).delete("https://api.endomondo.com/api/1/subscriptions/workout/%s" % serviceRecord.ExternalID)
try:
assert resp.status_code in [204, 500] # Docs say otherwise, but no-subscription-found is 500
except:
raise APIException("Could not unsubscribe - received unknown result %s - %s" % (resp.status_code, resp.text))
serviceRecord.SetPartialSyncTriggerSubscriptionState(False)
def ExternalIDsForPartialSyncTrigger(self, req):
data = json.loads(req.body.decode("UTF-8"))
delta_external_ids = [(int(x["id"]), None) for x in data["data"]]
return delta_external_ids
def DownloadActivity(self, serviceRecord, activity):
resp = self._oauthSession(serviceRecord).get("https://api.endomondo.com/api/1/workouts/%d" % activity.ServiceData["WorkoutID"], params={"fields": "points"})
try:
resp = resp.json()
except ValueError:
self._rateLimitBailout(resp)
res_txt = resp.text
raise APIException("Parse failure in Endomondo activity download: %s" % resp.status_code)
lap = Lap(stats=activity.Stats, startTime=activity.StartTime, endTime=activity.EndTime)
activity.Laps = [lap]
activity.GPS = False
old_location = None
in_pause = False
for pt in resp["points"]:
wp = Waypoint()
if "time" not in pt:
# Manually-entered activities with a course attached to them have date-less waypoints
# It'd be nice to transfer those courses, but it's a concept few other sites support AFAIK
# So, ignore the points entirely
continue
wp.Timestamp = self._parseDate(pt["time"])
if ("lat" in pt and "lng" in pt) or "alt" in pt:
wp.Location = Location()
if "lat" in pt and "lng" in pt:
wp.Location.Latitude = pt["lat"]
wp.Location.Longitude = pt["lng"]
activity.GPS = True
if "alt" in pt:
wp.Location.Altitude = pt["alt"]
if wp.Location == old_location:
# We have seen the point with the same coordinates
# before. This causes other services (e.g Strava) to
# interpret this as if we were standing for a while,
# which causes us having wrong activity time when
# importing. We mark the point as paused in hopes this
# fixes the issue.
in_pause = True
wp.Type = WaypointType.Pause
elif in_pause:
in_pause = False
wp.Type = WaypointType.Resume
old_location = wp.Location
if "hr" in pt:
wp.HR = pt["hr"]
if "cad" in pt:
wp.Cadence = pt["cad"]
if "pow" in pt:
wp.Power = pt["pow"]
lap.Waypoints.append(wp)
activity.Stationary = len(lap.Waypoints) == 0
return activity
def _deviceId(self, serviceRecord):
csp = hashlib.new("md5")
csp.update(str(serviceRecord.ExternalID).encode("utf-8"))
csp.update(SECRET_KEY.encode("utf-8"))
return "tap-" + csp.hexdigest()
def _getSport(self, activity):
# This is an activity type that doesn't round trip
if (activity.Type in self._activitiesThatDontRoundTrip and
# We have the original sport
"Sport" in activity.ServiceData and
# We know what this sport is
activity.ServiceData["Sport"] in self._activityMappings and
# The type didn't change (if we changed from Walking to Cycling, we'd want to let the new value through)
activity.Type == self._activityMappings[activity.ServiceData["Sport"]]):
return activity.ServiceData["Sport"]
else:
return [k for k,v in self._reverseActivityMappings.items() if v == activity.Type][0]
def UploadActivity(self, serviceRecord, activity):
session = self._oauthSession(serviceRecord)
device_id = self._deviceId(serviceRecord)
if not serviceRecord.GetConfiguration()["DeviceRegistered"]:
device_info = {
"name": "tapiriik",
"vendor": "tapiriik",
"model": "tapiriik",
"os": "tapiriik",
"os_version": "1",
"app_variant": "tapiriik",
"app_version": "1"
}
device_add_resp = session.post("https://api.endomondo.com/api/1/device/%s" % device_id, data=json.dumps(device_info))
if device_add_resp.status_code != 200:
self._rateLimitBailout(device_add_resp)
raise APIException("Could not add device %s %s" % (device_add_resp.status_code, device_add_resp.text))
serviceRecord.SetConfiguration({"DeviceRegistered": True})
activity_id = "tap-" + activity.UID + "-" + str(os.getpid())
sport = self._getSport(activity)
upload_data = {
"device_id": device_id,
"sport": sport,
"start_time": self._formatDate(activity.StartTime),
"end_time": self._formatDate(activity.EndTime),
"points": []
}
if activity.Name:
upload_data["title"] = activity.Name
if activity.Notes:
upload_data["notes"] = activity.Notes
if activity.Stats.Distance.Value is not None:
upload_data["distance_total"] = activity.Stats.Distance.asUnits(ActivityStatisticUnit.Kilometers).Value
if activity.Stats.TimerTime.Value is not None:
upload_data["duration_total"] = activity.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value
elif activity.Stats.MovingTime.Value is not None:
upload_data["duration_total"] = activity.Stats.MovingTime.asUnits(ActivityStatisticUnit.Seconds).Value
else:
upload_data["duration_total"] = (activity.EndTime - activity.StartTime).total_seconds()
if activity.Stats.Energy.Value is not None:
upload_data["calories_total"] = activity.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value
elev_stats = activity.Stats.Elevation.asUnits(ActivityStatisticUnit.Meters)
if elev_stats.Max is not None:
upload_data["altitude_max"] = elev_stats.Max
if elev_stats.Min is not None:
upload_data["altitude_min"] = elev_stats.Min
if elev_stats.Gain is not None:
upload_data["total_ascent"] = elev_stats.Gain
if elev_stats.Loss is not None:
upload_data["total_descent"] = elev_stats.Loss
speed_stats = activity.Stats.Speed.asUnits(ActivityStatisticUnit.KilometersPerHour)
if speed_stats.Max is not None:
upload_data["speed_max"] = speed_stats.Max
hr_stats = activity.Stats.HR.asUnits(ActivityStatisticUnit.BeatsPerMinute)
if hr_stats.Average is not None:
upload_data["heart_rate_avg"] = hr_stats.Average
if hr_stats.Max is not None:
upload_data["heart_rate_max"] = hr_stats.Max
if activity.Stats.Cadence.Average is not None:
upload_data["cadence_avg"] = activity.Stats.Cadence.asUnits(ActivityStatisticUnit.RevolutionsPerMinute).Average
elif activity.Stats.RunCadence.Average is not None:
upload_data["cadence_avg"] = activity.Stats.RunCadence.asUnits(ActivityStatisticUnit.StepsPerMinute).Average
if activity.Stats.Cadence.Max is not None:
upload_data["cadence_max"] = activity.Stats.Cadence.asUnits(ActivityStatisticUnit.RevolutionsPerMinute).Max
elif activity.Stats.RunCadence.Max is not None:
upload_data["cadence_max"] = activity.Stats.RunCadence.asUnits(ActivityStatisticUnit.StepsPerMinute).Max
if activity.Stats.Power.Average is not None:
upload_data["power_avg"] = activity.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Average
if activity.Stats.Power.Max is not None:
upload_data["power_max"] = activity.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Max
for wp in activity.GetFlatWaypoints():
pt = {
"time": self._formatDate(wp.Timestamp),
}
if wp.Location:
if wp.Location.Latitude is not None and wp.Location.Longitude is not None:
pt["lat"] = wp.Location.Latitude
pt["lng"] = wp.Location.Longitude
if wp.Location.Altitude is not None:
pt["alt"] = wp.Location.Altitude
if wp.HR is not None:
pt["hr"] = round(wp.HR)
if wp.Cadence is not None:
pt["cad"] = round(wp.Cadence)
elif wp.RunCadence is not None:
pt["cad"] = round(wp.RunCadence)
if wp.Power is not None:
pt["pow"] = round(wp.Power)
if wp.Type == WaypointType.Pause:
pt["inst"] = "pause"
elif wp.Type == WaypointType.Resume:
pt["inst"] = "resume"
upload_data["points"].append(pt)
if len(upload_data["points"]):
upload_data["points"][0]["inst"] = "start"
upload_data["points"][-1]["inst"] = "stop"
upload_resp = session.post("https://api.endomondo.com/api/1/workouts/%s" % activity_id, data=json.dumps(upload_data))
if upload_resp.status_code != 200:
self._rateLimitBailout(upload_resp)
raise APIException("Could not upload activity %s %s" % (upload_resp.status_code, upload_resp.text))
return upload_resp.json()["id"]
def DeleteCachedData(self, serviceRecord):
pass
def DeleteActivity(self, serviceRecord, uploadId):
session = self._oauthSession(serviceRecord)
del_res = session.delete("https://api.endomondo.com/api/1/workouts/%s" % uploadId)
del_res.raise_for_status()
|
the-stack_106_17334
|
import os
import re
import pdfkit
import pickle
import requests
import pytesseract
import urllib.request
from PIL import Image
from bs4 import BeautifulSoup
from config import URL,HEADER
def connect():
if not os.path.isdir('.temp'):
os.mkdir('.temp')
with requests.Session() as request:
form_data={}
try:
response = request.get(URL,headers=HEADER)
soup = BeautifulSoup(response.text,'html.parser')
#Bypassing Captcha
#-----------------
link = soup.find('img' , {'id': 'imgCaptcha'})
captcha = link.get('src')
captchaLink = URL.split('Combine_GradeCard.aspx')[0]+captcha
urllib.request.urlretrieve(captchaLink,'.temp/captcha.jpg')
captchaText = pytesseract.image_to_string(Image.open('.temp/captcha.jpg'))
#-----------------
viewstate = soup.select("#__VIEWSTATE")[0]['value']
eventValidation = soup.select("#__EVENTVALIDATION")[0]['value']
viewstateGenerator = soup.select('#__VIEWSTATEGENERATOR')[0]['value']
form_data={
'__EVENTTARGET':'',
'__EVENTARGUMENT':'',
'__VIEWSTATE': viewstate,
'__VIEWSTATEGENERATOR': viewstateGenerator,
'__EVENTVALIDATION': eventValidation,
'captcha':"rb_captcha_image",
'txtcaptcha': captchaText,
'btnsearch': 'Print+Score+Card'
}
except:
return form_data
return form_data
def fetchGradeCard(clgCode,rollno,dd,mm,yyyy,generatePDF=True):
form_data = connect()
if len(form_data)==0:
return 0
form_data['ddlcollege']=str(clgCode)
form_data['txtrollno']=str(rollno)
form_data['ddlDD']=str(dd)
form_data['ddlMM']=str(mm)
form_data['ddlYYYY']=str(yyyy)
try:
result = requests.post(URL, data=form_data,headers=HEADER)
result = BeautifulSoup(result.text,'html.parser')
while len(result.body.findAll(text=re.compile('^Sorry! Invalid captch code.$')))!=0:
form_data = connect()
form_data['ddlcollege']=str(clgCode)
form_data['txtrollno']=str(rollno)
result = requests.post(URL, data=form_data,headers=HEADER)
result = BeautifulSoup(result.text,'html.parser')
if len(result.body.findAll(text=re.compile('^Sorry! no record found.$')))!=0:
return 1
for img in result.findAll('img'):
img.decompose()
if generatePDF and not os.path.isdir('Downloads'):
os.mkdir('Downloads')
filepath = 'Downloads/ScoreCard_'+rollno+'.pdf'
with open(f'.temp/{rollno}.html','w',encoding='utf-8') as f:
f.write(str(result))
options = {
'quiet': '',
'encoding': "UTF-8",
'print-media-type': '',
'page-size': 'A4',
'margin-top': '5mm',
'margin-bottom': '5mm',
'margin-left': '5mm',
'margin-right': '5mm',
'zoom': '1.5'
}
if generatePDF:
pdfkit.from_file(f'.temp/{rollno}.html',filepath,options=options)
except:
return 0
return filepath
def isResultOut(courseName, sem):
try:
if courseName=='' and sem == '':
return True
response = requests.get(URL.split('Combine_GradeCard.aspx')[0]+'List_Of_Declared_Results.aspx',headers=HEADER)
soup = BeautifulSoup(response.text,'html.parser')
cells = soup.find('table',attrs={'id':"gvshow_Reg"}).findAll('td')[2:]
courseName = ''.join([s for s in courseName if s.isalnum()])
course = []
semester = []
for i in range(1,len(cells),6):
course.append(''.join([s for s in cells[i].text.lower() if s.isalnum()]))
semester.append(cells[i+2].text)
course_sem_dict = dict(zip(course,semester))
flag=0
for course,semester in course_sem_dict.items():
if (courseName.lower() in course) and (sem.lower() == semester.lower()) :
flag=1
if flag==1:
return True
else:
return False
except:
#print('Error occurred in fetching result. Retrying...')
pass
return False
def getCoursesNames():
try:
response = requests.get(URL.split('Combine_GradeCard.aspx')[0]+'List_Of_Declared_Results.aspx',headers=HEADER)
soup = BeautifulSoup(response.text,'html.parser')
cells = soup.find('table',attrs={'id':"gvshow_Reg"}).findAll('td')[2:]
courses = []
for i in range(1,len(cells),6):
courses.append(''.join([s for s in cells[i].text]))
courses = sorted(set(courses))
with open('Resources/CoursesNames.txt','w') as f:
for i,name in enumerate(courses):
f.write(f'{i+1}) {name}\n')
except:
#print('Error in fetching Course names.')
return False
def fetchAndSaveClgCodes():
try:
response = requests.get(URL,headers=HEADER)
soup = BeautifulSoup(response.text,'html.parser')
s=soup.find('select',{'id':'ddlcollege'})
items=s.find_all('option')[1:]
clgCode = [item.get('value') for item in items]
clgName = [item.text for item in items]
clgCodeDict = dict(zip(clgName,clgCode))
with open('Resources/collegeCodes.pkl','wb') as f:
pickle.dump(clgCodesDict,f)
except:
#print('Error in fetching!')
return {}
return clgCodeDict
def printClgCodes():
with open('Resources/collegeCodes','rb') as g:
clgCodeDict = pickle.load(g)
print('\n{0:65} {1:^5}'.format('College Name','College Code'))
print('{0:65} {1:^5}'.format('------------','------------\n'))
for clg,code in clgCodeDict.items():
print('{0:-<65} {1:^5}'.format(clg,code))
def printCourseNames():
with open('Resources/CoursesNames.txt','r',encoding='utf-8') as f:
courses = f.read()
print()
[print(course) for course in courses.split('\n')]
def getClgCodes():
with open('Resources/collegeCodes','rb') as g:
clgCodeList = pickle.load(g)
return list(clgCodeList.values())
|
the-stack_106_17335
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import numpy
from transformers.testing_utils import TestCasePlus
from transformers.utils.versions import (
importlib_metadata,
require_version,
require_version_core,
require_version_examples,
)
numpy_ver = numpy.__version__
python_ver = ".".join([str(x) for x in sys.version_info[:3]])
class DependencyVersionCheckTest(TestCasePlus):
def test_core(self):
# lt + different version strings
require_version_core("numpy<1000.4.5")
require_version_core("numpy<1000.4")
require_version_core("numpy<1000")
# le
require_version_core("numpy<=1000.4.5")
require_version_core(f"numpy<={numpy_ver}")
# eq
require_version_core(f"numpy=={numpy_ver}")
# ne
require_version_core("numpy!=1000.4.5")
# ge
require_version_core("numpy>=1.0")
require_version_core("numpy>=1.0.0")
require_version_core(f"numpy>={numpy_ver}")
# gt
require_version_core("numpy>1.0.0")
# requirement w/o version
require_version_core("numpy")
# unmet requirements due to version conflict
for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn("but found", str(e))
# unmet requirements due to missing module
for req in ["numpipypie>1", "numpipypie2"]:
try:
require_version_core(req)
except importlib_metadata.PackageNotFoundError as e:
self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e))
self.assertIn("Try: pip install transformers -U", str(e))
# bogus requirements formats:
# 1. whole thing
for req in ["numpy??1.0.0", "numpy1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("requirement needs to be in the pip package format", str(e))
# 2. only operators
for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]:
try:
require_version_core(req)
except ValueError as e:
self.assertIn("need one of ", str(e))
def test_examples(self):
# the main functionality is tested in `test_core`, this is just the hint check
try:
require_version_examples("numpy>1000.4.5")
except ImportError as e:
self.assertIn("is required", str(e))
self.assertIn("pip install -r examples/requirements.txt", str(e))
def test_python(self):
# matching requirement
require_version("python>=3.6.0")
# not matching requirements
for req in ["python>9.9.9", "python<3.0.0"]:
try:
require_version_core(req)
except ImportError as e:
self.assertIn(f"{req} is required", str(e))
self.assertIn(f"but found python=={python_ver}", str(e))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.