hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7947ccf32fb9b43a6a088063d161ba3ceb96ad8d | 5,481 | py | Python | search/query.py | FmasterofU/OISISI_HTMLSE | fe893dcae93cec93163d04242c08adc8cc7ecbe8 | [
"MIT"
] | null | null | null | search/query.py | FmasterofU/OISISI_HTMLSE | fe893dcae93cec93163d04242c08adc8cc7ecbe8 | [
"MIT"
] | null | null | null | search/query.py | FmasterofU/OISISI_HTMLSE | fe893dcae93cec93163d04242c08adc8cc7ecbe8 | [
"MIT"
] | null | null | null | from structures.set import Set
def validate_query(query: str):
"""
Checking validation for normal(not advanced) search
:param query: input query for normal(not advanced) search
:return: True if query is valid
"""
query = get_correct_query(query)
if query == 'and' or query == 'not' or query == 'or':
return False
elif ' ' not in query:
return True
else:
parts = query.split(' ')
if 'and' not in parts and 'or' not in parts and 'not' not in parts:
return True
if len(parts) != 3:
return False
elif parts[0] == 'and' or parts[0] == 'not' or parts[0] == 'or' or parts[2] == 'and' or parts[2] == 'not' or \
parts[2] == 'or':
return False
elif parts[1] != 'and' and parts[1] != 'not' and parts[1] != 'or':
return False
return True
def execute_query(query, trie):
"""
Method executes normal search query and returns proper data structures
:param query: input string
:param trie: populated trie
:return: positive_query: string with searched words(excluding words after NOT operator)
hard_result_set: dict with file paths that satisfies constraints in query as keys and numbers of
appearances for every searched word in positive_query
broad_positive_res_set: dict with file paths as keys and numbers of appearances for every searched word
present in positive_query (sites present in hard_result_set are not included)
"""
query = get_correct_query(query)
flag = None
words = []
ret_string = ""
broad_search = {}
hard_search = {}
if ' ' not in query:
paths = trie.word_exists(query)
ret_string = query
if paths is not False:
result_set = Set()
for p in paths.keys():
result_set.add(p)
broad_search[p] = []
broad_search[p].append(paths[p])
""" hard and broad search are same for 1 word """
return ret_string, broad_search, broad_search
print("'" + query + "' doesn't exist in trie")
return ret_string, hard_search, broad_search
elif ' and ' not in query and ' or ' not in query and ' not ' not in query:
flag = 'or'
words = query.split(' ')
else:
parts = query.split(' ')
words.append(parts[0])
words.append(parts[2])
if parts[1] == 'and':
flag = 'and'
elif parts[1] == 'not':
flag = 'not'
elif parts[1] == 'or':
flag = 'or'
if flag is not None:
if flag == 'and' or flag == 'or':
for i in range(0, len(words)):
ret_string += words[i] + " "
else:
ret_string += words[0]
ret_string = ret_string.strip()
if flag == 'and' or flag == 'not':
first = Set()
second = Set()
paths = trie.word_exists(words[0])
if paths is not False:
for p in paths.keys():
first.add(p)
broad_search[p] = []
broad_search[p].append(paths[p])
if flag != 'not':
broad_search[p].append(0)
paths = trie.word_exists(words[1])
if paths is not False:
for p in paths.keys():
second.add(p)
if flag != 'not' and p not in broad_search.keys():
broad_search[p] = []
broad_search[p].append(0)
broad_search[p].append(paths[p])
elif flag != 'not' and p in broad_search.keys():
broad_search[p][1] = paths[p]
if flag == 'and':
result_set = first & second
elif flag == 'not':
result_set = first - second
for i in result_set.get_list():
hard_search[i] = broad_search[i]
return ret_string, hard_search, broad_search
elif flag == 'or':
sets = []
for i in range(len(words)):
new_set = Set()
paths = trie.word_exists(words[i])
if paths is not False:
for p in paths:
new_set.add(p)
if p not in broad_search.keys():
broad_search[p] = [0] * len(words)
broad_search[p][i] = paths[p]
elif p in broad_search.keys():
broad_search[p][i] = paths[p]
sets.append(new_set)
result_set = sets[0]
for i in range(1, len(words)):
result_set = result_set | sets[i]
for i in result_set.get_list():
hard_search[i] = broad_search[i]
return ret_string, hard_search, broad_search
def get_correct_query(input_query: str):
"""
Ignoring multiple whitespaces in input string
:param input_query: string
:return: same query with 1 whitespace between words
"""
correct_words = []
words = input_query.split(' ')
for w in words:
w = w.strip()
if w != '':
correct_words.append(w)
ret = ""
for w in correct_words:
ret += w + " "
return ret.strip()
| 37.033784 | 118 | 0.509031 |
7947cd61409b65c09b5c3b08469a198ca232fe2d | 22,692 | py | Python | vol2/vol2-python-examples/examples/capstone_titanic/titanic_milestone1.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 777 | 2015-01-17T22:48:26.000Z | 2022-03-31T01:10:07.000Z | vol2/vol2-python-examples/examples/capstone_titanic/titanic_milestone1.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 17 | 2015-01-02T14:41:24.000Z | 2017-09-02T02:57:09.000Z | vol2/vol2-python-examples/examples/capstone_titanic/titanic_milestone1.py | Sun-Joong/aifh | 1b6363d26f54b77348020ce88ced0670568ed736 | [
"Apache-2.0"
] | 445 | 2015-01-26T17:01:49.000Z | 2022-03-24T07:16:58.000Z | #!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 2: Nature-Inspired Algorithms
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2014 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
import csv
class TitanicConfig:
"""
Configuration data for the Titanic project.
"""
# The name of the training data. (that we are to train on)
TrainingFilename = "train.csv"
# The name of the test data. (that Kaggle evaluates us on)
TestFilename = "test.csv"
# Dump the normalized data to this file. This file is not actually used, but rather can be viewed to see
# the normalization.
NormDumpFilename = "normalized_dump.csv"
# The number of input features used.
InputFeatureCount = 13
# The low range of the normalization.
InputNormalizeLow = -1
# The high range of the normalization.
InputNormalizeHigh = 1
# The value used for a prediction of survival.
PredictSurvive = 1
# The value used for a prediction of perish.
PredictPerish = 0
# The number of folds to use.
FoldCount = 5
# The number of particles to use.
ParticleCount = 30
# The number of RBF functions to use in each network.
RBF_COUNT = 5
# The number of iterations to allow with no improvement.
AllowNoImprovement = 100
class CalcHistogram:
def __init__(self):
self.histogram = {}
def update(self, key):
# See if we already have an entry
if key in self.histogram:
count = self.histogram[key]
self.histogram[key] = count + 1
else:
self.histogram[key] = 1
def get_max(self):
max_count = 0
result = None
for key in self.histogram.keys():
count = self.histogram[key]
if result == None or max_count < count or (max_count == count and result < key):
result = key
max_count = count
return result
def get_min(self):
min_count = 0
result = None
for key in self.histogram.keys():
count = self.histogram[key]
if result == None or min_count > count or (min_count == count and result < key):
result = key
min_count = count
return result
class CalcMean:
def __init__(self):
# How many values have we encountered so far.
self.count = 0
# What is the sum of values.
self.sum = 0
def update(self, d):
"""
Update mean for a new value.
@param d The next value.
"""
self.sum = self.sum + d
self.count = self.count + 1
def calculate(self):
"""
@return The calculated mean.
"""
return self.sum / self.count
class CalcSurvival:
def __init__(self):
# The count of males.
self.count_male = 0
# The count of females.
self.count_female = 0
# The count of male survivors.
self.male_survive = 0
# The count of female survivors.
self.female_survive = 0
def update(self, male, survived):
"""
Update for a passenger.
@param male True, if passenger was male.
@param survived True, if passenger survived.
"""
if male:
self.count_male = self.count_male + 1
else:
self.count_female = self.count_female + 1
if survived:
if male:
self.male_survive = self.male_survive + 1
else:
self.female_survive = self.female_survive + 1
def __str__(self):
count = self.count_male + self.count_female
result = "(Count: "
result = result + str(count)
if count > 0:
pct = (self.female_survive + self.male_survive) / float(count)
result = result + ", survived: "
result = result + str(pct)
if self.count_male > 0:
pct = self.male_survive / float(self.count_male)
result = result + ", male.survived: "
result = result + str(pct)
if self.count_female > 0:
pct = self.female_survive / float(self.count_female)
result = result + ", female.survived: "
result = result + str(pct)
result = result + ")"
return result
class TitanicStats:
def __init__(self):
# Passengers with the title "master", mean age.
self.mean_master = CalcMean()
# Passengers with the title "mr", mean age.
self.mean_mr = CalcMean()
# Passengers with the title "miss", mean age.
self.mean_miss = CalcMean()
# Passengers with the title "mrs", mean age.
self.mean_mrs = CalcMean()
# Passengers with a military title, mean age.
self.mean_military = CalcMean()
# Passengers with a nobility title, mean age.
self.mean_nobility = CalcMean()
# Passengers with the title "dr".
self.mean_dr = CalcMean()
# Passengers with the title "rev".
self.mean_clergy = CalcMean()
# Total passengers.
self.mean_total = CalcMean()
# Total male passengers.
self.mean_male = CalcMean()
# Total female passengers.
self.mean_female = CalcMean()
# Passengers in 1st class, average fare.
self.mean_fare1 = CalcMean()
# Passengers in 2st class, average fare.
self.mean_fare2 = CalcMean()
# Passengers in 3rd class, average fare.
self.mean_fare3 = CalcMean()
# Survival stats for passengers with a title of "master".
self.survival_master = CalcSurvival()
# Survival stats for passengers with a title of "mr".
self.survival_mr = CalcSurvival()
# Survival stats for passengers with a title of "miss".
self.survival_miss = CalcSurvival()
# Survival stats for passengers with a title of "mrs".
self.survival_mrs = CalcSurvival()
# Survival stats for passengers with a military title.
self.survival_military = CalcSurvival()
# Survival stats for passengers with a nobility title.
self.survival_nobility = CalcSurvival()
# Survival stats for passengers with a title of "dr".
self.survival_dr = CalcSurvival()
# Survival stats for passengers with a title of "rev".
self.survival_clergy = CalcSurvival()
# Survival stats for all passengers.
self.survival_total = CalcSurvival()
# Survival stats for passengers that embarked from Southampton, England.
self.embarked_s = CalcSurvival()
# Survival stats for passengers that embarked from Cherbourg, France.
self.embarked_c = CalcSurvival()
# Survival stats for passengers that embarked from Queenstown, England.
self.embarked_q = CalcSurvival()
# Histogram of embark locations.
self.embarked_histo = CalcHistogram()
def dump(self):
"""
Dump all stats to stdout.
"""
print("Mean Master: Mean Age: " + str(self.mean_master.calculate()) + " " + str(self.survival_master))
print("Mr.: Mean Age: " + str(self.mean_mr.calculate()) + " " + str(self.survival_mr))
print("Miss.: Mean Age: " + str(self.mean_miss.calculate()) + " " + str(self.survival_miss))
print("Mrs.: Mean Age: " + str(self.mean_mrs.calculate()) + " " + str(self.survival_mrs))
print("Military: Mean Age: " + str(self.mean_mrs.calculate()) + " " + str(self.survival_military))
print("Clergy: Mean Age: " + str(self.mean_clergy.calculate()) + " " + str(self.survival_clergy))
print("Nobility: Mean Age: " + str(self.mean_nobility.calculate()) + " " + str(self.survival_nobility))
print("Dr: Mean Age: " + str(self.mean_dr.calculate()) + " " + str(self.survival_dr))
print("Total known survival: Mean Age: " + str(self.mean_total.calculate()) + " " + str(self.survival_total))
print("")
print("Embarked Queenstown: Mean Age: " + str(self.embarked_q))
print("Embarked Southampton: Mean Age: " + str(self.embarked_s))
print("Embarked Cherbourg: Mean Age: " + str(self.embarked_c))
print("Most common embarked: " + str(self.embarked_histo.get_max()))
print("")
print("Mean Age Male: " + str(self.mean_male.calculate()))
print("Mean Age Female: " + str(self.mean_female.calculate()))
print("")
print("Mean Fair 1st Class: " + str(self.mean_fare1.calculate()))
print("Mean Fair 2st Class: " + str(self.mean_fare2.calculate()))
print("Mean Fair 3st Class: " + str(self.mean_fare3.calculate()))
class NormalizeTitanic:
def analyze(self, stats, filename):
"""
Analyze and generate stats for titanic data.
@param stats The stats for titanic.
@param filename The file to analyze.
@return The passenger count.
@throws IOException Errors reading file.
"""
count = 0
headerMap = {}
with open(filename, 'rb') as f:
reader = csv.reader(f)
header_map = {}
header = reader.next()
for i in range(0, len(header)):
header_map[header[i].lower()] = i
age_index = header_map["age"]
name_index = header_map["name"]
sex_index = header_map["sex"]
index_embarked = header_map["embarked"]
index_fare = header_map["fare"]
index_pclass = header_map["pclass"]
survived_index = -1
# test data does not have survived
if "survived" in header_map:
survived_index = header_map["survived"]
for next_line in reader:
count = count + 1
name = next_line[name_index]
age_str = next_line[age_index]
sex_str = next_line[sex_index]
embarked_str = next_line[index_embarked]
# test data does not have survived, do not use survived boolean if using test data!
survived = False
if survived_index != -1:
survived_str = next_line[survived_index]
survived = (survived_str == "1")
if index_embarked != -1:
embarked_str = next_line[index_embarked]
# calculate average fare per class
str_fare = next_line[index_fare]
if len(str_fare) > 0:
fare = float(str_fare)
pclass = next_line[index_pclass]
if pclass == "1":
stats.mean_fare1.update(fare)
elif pclass == "2":
stats.mean_fare2.update(fare)
elif pclass == "3":
stats.mean_fare3.update(fare)
is_male = (sex_str == "male")
# Only compute survival stats on training data
if survived_index != -1:
if embarked_str == "Q":
stats.embarked_q.update(is_male, survived)
elif embarked_str == "S":
stats.embarked_s.update(is_male, survived)
elif embarked_str == "C":
stats.embarked_c.update(is_male, survived)
stats.embarked_histo.update(embarked_str)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_total.update(is_male, survived)
if survived_index != -1:
if "Master." in name:
stats.survival_master.update(is_male, survived)
elif "Mr." in name:
stats.survival_mr.update(is_male, survived)
elif "Miss." in name or "Mlle." in name:
stats.survival_miss.update(is_male, survived)
elif "Mrs." in name or "Mme." in name:
stats.survival_mrs.update(is_male, survived)
elif "Col." in name or "Capt." in name or "Major." in name:
stats.survival_military.update(is_male, survived)
elif "Countess." in name or "Lady." in name or "Sir." in name or "Don." in name or "Dona." in name or "Jonkheer." in name:
stats.survival_nobility.update(is_male, survived)
elif "Dr." in name:
stats.survival_dr.update(is_male, survived)
elif "Rev." in name:
stats.survival_clergy.update(is_male, survived)
if len(age_str) > 0:
age = float(age_str)
# Update general mean age for male/female
if is_male:
stats.mean_male.update(age)
else:
stats.mean_female.update(age)
# Update the total average age
stats.mean_total.update(age)
if "Master." in name:
stats.mean_master.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_master.update(is_male, survived)
elif "Mr." in name:
stats.mean_mr.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_mr.update(is_male, survived)
elif "Miss." in name or "Mlle." in name:
stats.mean_miss.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_miss.update(is_male, survived)
elif "Mrs." in name or "Mme." in name:
stats.mean_mrs.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_mrs.update(is_male, survived)
elif "Col." in name or "Capt." in name or "Major." in name:
stats.mean_military.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_military.update(is_male, survived)
elif "Countess." in name or "Lady." in name or "Sir." in name or "Don." in name or "Dona." in name or "Jonkheer." in name:
stats.mean_nobility.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_nobility.update(is_male, survived)
elif "Dr." in name:
stats.mean_dr.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_dr.update(is_male, survived)
elif "Rev." in name:
stats.mean_clergy.update(age)
# Only compute survival stats on training data.
if survived_index != -1:
stats.survival_clergy.update(is_male, survived)
return count
def range_normalize(self, x, data_low, data_high, normalized_low, normalized_high):
"""
Normalize to a range.
@param x The value to normalize.
@param dataLow The low end of the range of the data.
@param dataHigh The high end of the range of the data.
@param normalizedLow The normalized low end of the range of data.
@param normalizedHigh The normalized high end of the range of data.
@return The normalized value.
"""
return ((x - data_low)
/ (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
def normalize(self, stats, filename, ids, input_low, input_high, predict_survive, predict_perish):
self.result_input = []
self.result_ideal = []
headerMap = {}
with open(filename, 'rb') as f:
reader = csv.reader(f)
header_map = {}
header = reader.next()
for i in range(0, len(header)):
header_map[header[i].lower()] = i
age_index = header_map["age"]
name_index = header_map["name"]
sex_index = header_map["sex"]
index_embarked = header_map["embarked"]
index_pclass = header_map["pclass"]
index_sibsp = header_map["sibsp"]
index_parch = header_map["parch"]
index_fare = header_map["fare"]
index_id = header_map["passengerid"]
survived_index = -1
# test data does not have survived
if "survived" in header_map:
survived_index = header_map["survived"]
for next_line in reader:
name = next_line[name_index]
sex = next_line[sex_index]
embarked = next_line[index_embarked]
id = next_line[index_id]
# Add record the passenger id, if requested
if ids != None:
ids.append(id)
is_male = (sex.lower() == "male")
# do we have an age for this person?
if len(next_line[age_index]) == 0:
# age is missing, interpolate using name
if "Master." in name:
age = stats.mean_master.calculate()
elif "Mr." in name:
age = stats.mean_mr.calculate()
elif "Miss." in name or "Mlle." in name:
age = stats.mean_miss.calculate()
elif "Mrs." in name or "Mme." in name:
age = stats.mean_mrs.calculate()
elif "Col." in name or "Capt." in name or "Major." in name:
age = stats.mean_military.calculate()
elif "Countess." in name or "Lady." in name or "Sir." in name or "Don." in name or "Dona." in name or "Jonkheer." in name:
age = stats.mean_nobility.calculate()
elif "Dr." in name:
age = stats.mean_dr.calculate()
elif "Rev." in name:
age = stats.mean_clergy.calculate()
else:
if is_male:
age = stats.mean_male.calculate()
else:
age = stats.mean_female.calculate()
else:
age = float(next_line[age_index])
input = [0] * TitanicConfig.InputFeatureCount
input[0] = self.range_normalize(age, 0, 100, input_low, input_high)
# sex-male
input[1] = input_high if is_male else input_low
# pclass
pclass = float(next_line[index_pclass])
input[2] = self.range_normalize(pclass, 1, 3, input_low, input_high)
# sibsp
sibsp = float(next_line[index_sibsp])
input[3] = self.range_normalize(sibsp, 0, 10, input_low, input_high)
# parch
parch = float(next_line[index_parch])
input[4] = self.range_normalize(parch, 0, 10, input_low, input_high)
# fare
str_fare = next_line[index_fare]
if len(str_fare) == 0:
if int(pclass) == 1:
fare = stats.mean_fare1.calculate()
elif int(pclass) == 2:
fare = stats.getMeanFare2().calculate()
elif int(pclass) == 3:
fare = stats.getMeanFare3().calculate();
else:
# should not happen, we would have a class other than 1,2,3.
# however, if that DID happen, use the median class (2).
fare = stats.mean_Fare2.calculate()
else:
fare = float(next_line[index_fare])
input[5] = self.range_normalize(fare, 0, 500, input_low, input_high)
# embarked-c
input[6] = input_high if embarked.strip() == "c" else input_low
# embarked-q
input[7] = input_high if embarked.strip() == "q" else input_low
# embarked-s
input[8] = input_high if embarked.strip() == "s" else input_low
# name-mil
input[9] = input_high if ("Col." in name or "Capt." in name or "Major." in name) else input_low
# name-nobility
input[10] = input_high if (
"Countess." in name or "Lady." in name or "Sir." in name or "Don." in name or "Dona." in name or "Jonkheer.") else input_low
# name-dr
input[11] = input_high if ("Dr." in name) else input_low
# name-clergy
input[12] = input_high if ("Rev." in name) else input_low
# add the new row
self.result_input.append(input)
# add survived, if it exists
if survived_index != -1:
survived = int(next_line[survived_index])
ideal = [predict_survive if survived == 1 else predict_perish]
self.result_ideal.append(ideal)
| 37.139116 | 142 | 0.545082 |
7947cd7d7fbf4b1e274716ab8443429cd36ae33f | 14,634 | py | Python | flask/lib/python2.7/site-packages/sqlalchemy/orm/base.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | 1 | 2018-04-09T07:37:54.000Z | 2018-04-09T07:37:54.000Z | flask/lib/python2.7/site-packages/sqlalchemy/orm/base.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | 1 | 2016-05-25T15:38:50.000Z | 2016-05-25T15:38:50.000Z | flask/lib/python2.7/site-packages/sqlalchemy/orm/base.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | null | null | null | # orm/base.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Constants and rudimental functions used throughout the ORM.
"""
from .. import util, inspection, exc as sa_exc
from ..sql import expression
from . import exc
import operator
PASSIVE_NO_RESULT = util.symbol(
'PASSIVE_NO_RESULT',
"""Symbol returned by a loader callable or other attribute/history
retrieval operation when a value could not be determined, based
on loader callable flags.
"""
)
ATTR_WAS_SET = util.symbol(
'ATTR_WAS_SET',
"""Symbol returned by a loader callable to indicate the
retrieved value, or values, were assigned to their attributes
on the target object.
"""
)
ATTR_EMPTY = util.symbol(
'ATTR_EMPTY',
"""Symbol used internally to indicate an attribute had no callable."""
)
NO_VALUE = util.symbol(
'NO_VALUE',
"""Symbol which may be placed as the 'previous' value of an attribute,
indicating no value was loaded for an attribute when it was modified,
and flags indicated we were not to load it.
"""
)
NEVER_SET = util.symbol(
'NEVER_SET',
"""Symbol which may be placed as the 'previous' value of an attribute
indicating that the attribute had not been assigned to previously.
"""
)
NO_CHANGE = util.symbol(
"NO_CHANGE",
"""No callables or SQL should be emitted on attribute access
and no state should change
""", canonical=0
)
CALLABLES_OK = util.symbol(
"CALLABLES_OK",
"""Loader callables can be fired off if a value
is not present.
""", canonical=1
)
SQL_OK = util.symbol(
"SQL_OK",
"""Loader callables can emit SQL at least on scalar value attributes.""",
canonical=2
)
RELATED_OBJECT_OK = util.symbol(
"RELATED_OBJECT_OK",
"""Callables can use SQL to load related objects as well
as scalar value attributes.
""", canonical=4
)
INIT_OK = util.symbol(
"INIT_OK",
"""Attributes should be initialized with a blank
value (None or an empty collection) upon get, if no other
value can be obtained.
""", canonical=8
)
NON_PERSISTENT_OK = util.symbol(
"NON_PERSISTENT_OK",
"""Callables can be emitted if the parent is not persistent.""",
canonical=16
)
LOAD_AGAINST_COMMITTED = util.symbol(
"LOAD_AGAINST_COMMITTED",
"""Callables should use committed values as primary/foreign keys during a
load.
""", canonical=32
)
NO_AUTOFLUSH = util.symbol(
"NO_AUTOFLUSH",
"""Loader callables should disable autoflush.""",
canonical=64
)
# pre-packaged sets of flags used as inputs
PASSIVE_OFF = util.symbol(
"PASSIVE_OFF",
"Callables can be emitted in all cases.",
canonical=(RELATED_OBJECT_OK | NON_PERSISTENT_OK |
INIT_OK | CALLABLES_OK | SQL_OK)
)
PASSIVE_RETURN_NEVER_SET = util.symbol(
"PASSIVE_RETURN_NEVER_SET",
"""PASSIVE_OFF ^ INIT_OK""",
canonical=PASSIVE_OFF ^ INIT_OK
)
PASSIVE_NO_INITIALIZE = util.symbol(
"PASSIVE_NO_INITIALIZE",
"PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK",
canonical=PASSIVE_RETURN_NEVER_SET ^ CALLABLES_OK
)
PASSIVE_NO_FETCH = util.symbol(
"PASSIVE_NO_FETCH",
"PASSIVE_OFF ^ SQL_OK",
canonical=PASSIVE_OFF ^ SQL_OK
)
PASSIVE_NO_FETCH_RELATED = util.symbol(
"PASSIVE_NO_FETCH_RELATED",
"PASSIVE_OFF ^ RELATED_OBJECT_OK",
canonical=PASSIVE_OFF ^ RELATED_OBJECT_OK
)
PASSIVE_ONLY_PERSISTENT = util.symbol(
"PASSIVE_ONLY_PERSISTENT",
"PASSIVE_OFF ^ NON_PERSISTENT_OK",
canonical=PASSIVE_OFF ^ NON_PERSISTENT_OK
)
DEFAULT_MANAGER_ATTR = '_sa_class_manager'
DEFAULT_STATE_ATTR = '_sa_instance_state'
_INSTRUMENTOR = ('mapper', 'instrumentor')
EXT_CONTINUE = util.symbol('EXT_CONTINUE')
EXT_STOP = util.symbol('EXT_STOP')
ONETOMANY = util.symbol(
'ONETOMANY',
"""Indicates the one-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
MANYTOONE = util.symbol(
'MANYTOONE',
"""Indicates the many-to-one direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
MANYTOMANY = util.symbol(
'MANYTOMANY',
"""Indicates the many-to-many direction for a :func:`.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
""")
NOT_EXTENSION = util.symbol(
'NOT_EXTENSION',
"""Symbol indicating an :class:`InspectionAttr` that's
not part of sqlalchemy.ext.
Is assigned to the :attr:`.InspectionAttr.extension_type`
attibute.
""")
_never_set = frozenset([NEVER_SET])
_none_set = frozenset([None, NEVER_SET, PASSIVE_NO_RESULT])
_SET_DEFERRED_EXPIRED = util.symbol("SET_DEFERRED_EXPIRED")
_DEFER_FOR_STATE = util.symbol("DEFER_FOR_STATE")
def _generative(*assertions):
"""Mark a method as generative, e.g. method-chained."""
@util.decorator
def generate(fn, *args, **kw):
self = args[0]._clone()
for assertion in assertions:
assertion(self, fn.__name__)
fn(self, *args[1:], **kw)
return self
return generate
# these can be replaced by sqlalchemy.ext.instrumentation
# if augmented class instrumentation is enabled.
def manager_of_class(cls):
return cls.__dict__.get(DEFAULT_MANAGER_ATTR, None)
instance_state = operator.attrgetter(DEFAULT_STATE_ATTR)
instance_dict = operator.attrgetter('__dict__')
def instance_str(instance):
"""Return a string describing an instance."""
return state_str(instance_state(instance))
def state_str(state):
"""Return a string describing an instance via its InstanceState."""
if state is None:
return "None"
else:
return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj()))
def state_class_str(state):
"""Return a string describing an instance's class via its
InstanceState.
"""
if state is None:
return "None"
else:
return '<%s>' % (state.class_.__name__, )
def attribute_str(instance, attribute):
return instance_str(instance) + "." + attribute
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
def object_mapper(instance):
"""Given an object, return the primary Mapper associated with the object
instance.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
This function is available via the inspection system as::
inspect(instance).mapper
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
return object_state(instance).mapper
def object_state(instance):
"""Given an object, return the :class:`.InstanceState`
associated with the object.
Raises :class:`sqlalchemy.orm.exc.UnmappedInstanceError`
if no mapping is configured.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(instance)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the instance is
not part of a mapping.
"""
state = _inspect_mapped_object(instance)
if state is None:
raise exc.UnmappedInstanceError(instance)
else:
return state
@inspection._inspects(object)
def _inspect_mapped_object(instance):
try:
return instance_state(instance)
# TODO: whats the py-2/3 syntax to catch two
# different kinds of exceptions at once ?
except exc.UnmappedClassError:
return None
except exc.NO_STATE:
return None
def _class_to_mapper(class_or_mapper):
insp = inspection.inspect(class_or_mapper, False)
if insp is not None:
return insp.mapper
else:
raise exc.UnmappedClassError(class_or_mapper)
def _mapper_or_none(entity):
"""Return the :class:`.Mapper` for the given class or None if the
class is not mapped.
"""
insp = inspection.inspect(entity, False)
if insp is not None:
return insp.mapper
else:
return None
def _is_mapped_class(entity):
"""Return True if the given object is a mapped class,
:class:`.Mapper`, or :class:`.AliasedClass`.
"""
insp = inspection.inspect(entity, False)
return insp is not None and \
not insp.is_clause_element and \
(
insp.is_mapper or insp.is_aliased_class
)
def _attr_as_key(attr):
if hasattr(attr, 'key'):
return attr.key
else:
return expression._column_as_key(attr)
def _orm_columns(entity):
insp = inspection.inspect(entity, False)
if hasattr(insp, 'selectable'):
return [c for c in insp.selectable.c]
else:
return [entity]
def _is_aliased_class(entity):
insp = inspection.inspect(entity, False)
return insp is not None and \
getattr(insp, "is_aliased_class", False)
def _entity_descriptor(entity, key):
"""Return a class attribute given an entity and string name.
May return :class:`.InstrumentedAttribute` or user-defined
attribute.
"""
insp = inspection.inspect(entity)
if insp.is_selectable:
description = entity
entity = insp.c
elif insp.is_aliased_class:
entity = insp.entity
description = entity
elif hasattr(insp, "mapper"):
description = entity = insp.mapper.class_
else:
description = entity
try:
return getattr(entity, key)
except AttributeError:
raise sa_exc.InvalidRequestError(
"Entity '%s' has no property '%s'" %
(description, key)
)
_state_mapper = util.dottedgetter('manager.mapper')
@inspection._inspects(type)
def _inspect_mapped_class(class_, configure=False):
try:
class_manager = manager_of_class(class_)
if not class_manager.is_mapped:
return None
mapper = class_manager.mapper
except exc.NO_STATE:
return None
else:
if configure and mapper._new_mappers:
mapper._configure_all()
return mapper
def class_mapper(class_, configure=True):
"""Given a class, return the primary :class:`.Mapper` associated
with the key.
Raises :exc:`.UnmappedClassError` if no mapping is configured
on the given class, or :exc:`.ArgumentError` if a non-class
object is passed.
Equivalent functionality is available via the :func:`.inspect`
function as::
inspect(some_mapped_class)
Using the inspection system will raise
:class:`sqlalchemy.exc.NoInspectionAvailable` if the class is not mapped.
"""
mapper = _inspect_mapped_class(class_, configure=configure)
if mapper is None:
if not isinstance(class_, type):
raise sa_exc.ArgumentError(
"Class object expected, got '%r'." % (class_, ))
raise exc.UnmappedClassError(class_)
else:
return mapper
class InspectionAttr(object):
"""A base class applied to all ORM objects that can be returned
by the :func:`.inspect` function.
The attributes defined here allow the usage of simple boolean
checks to test basic facts about the object returned.
While the boolean checks here are basically the same as using
the Python isinstance() function, the flags here can be used without
the need to import all of these classes, and also such that
the SQLAlchemy class system can change while leaving the flags
here intact for forwards-compatibility.
"""
__slots__ = ()
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
is_aliased_class = False
"""True if this object is an instance of :class:`.AliasedClass`."""
is_instance = False
"""True if this object is an instance of :class:`.InstanceState`."""
is_mapper = False
"""True if this object is an instance of :class:`.Mapper`."""
is_property = False
"""True if this object is an instance of :class:`.MapperProperty`."""
is_attribute = False
"""True if this object is a Python :term:`descriptor`.
This can refer to one of many types. Usually a
:class:`.QueryableAttribute` which handles attributes events on behalf
of a :class:`.MapperProperty`. But can also be an extension type
such as :class:`.AssociationProxy` or :class:`.hybrid_property`.
The :attr:`.InspectionAttr.extension_type` will refer to a constant
identifying the specific subtype.
.. seealso::
:attr:`.Mapper.all_orm_descriptors`
"""
is_clause_element = False
"""True if this object is an instance of :class:`.ClauseElement`."""
extension_type = NOT_EXTENSION
"""The extension type, if any.
Defaults to :data:`.interfaces.NOT_EXTENSION`
.. versionadded:: 0.8.0
.. seealso::
:data:`.HYBRID_METHOD`
:data:`.HYBRID_PROPERTY`
:data:`.ASSOCIATION_PROXY`
"""
class InspectionAttrInfo(InspectionAttr):
"""Adds the ``.info`` attribute to :class:`.InspectionAttr`.
The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
is that the former is compatible as a mixin for classes that specify
``__slots__``; this is essentially an implementation artifact.
"""
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
data to be associated with this :class:`.InspectionAttr`.
The dictionary is generated when first accessed. Alternatively,
it can be specified as a constructor argument to the
:func:`.column_property`, :func:`.relationship`, or :func:`.composite`
functions.
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
.. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
available on extension types via the
:attr:`.InspectionAttrInfo.info` attribute, so that it can apply
to a wider variety of ORM and extension constructs.
.. seealso::
:attr:`.QueryableAttribute.info`
:attr:`.SchemaItem.info`
"""
return {}
class _MappedAttribute(object):
"""Mixin for attributes which should be replaced by mapper-assigned
attributes.
"""
__slots__ = ()
| 27.049908 | 79 | 0.681768 |
7947cdd41e5180be90c1ac87e55af355f5a99242 | 7,976 | py | Python | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/names/common.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/names/common.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 1 | 2020-03-06T04:49:42.000Z | 2020-03-06T04:49:42.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/names/common.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | # -*- test-case-name: twisted.names.test -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Base functionality useful to various parts of Twisted Names.
"""
from __future__ import division, absolute_import
import socket
from zope.interface import implementer
from twisted.names import dns
from twisted.names.error import DNSFormatError, DNSServerError, DNSNameError
from twisted.names.error import DNSNotImplementedError, DNSQueryRefusedError
from twisted.names.error import DNSUnknownError
from twisted.internet import defer, error, interfaces
from twisted.python import failure
# Helpers for indexing the three-tuples that get thrown around by this code a
# lot.
_ANS, _AUTH, _ADD = range(3)
EMPTY_RESULT = (), (), ()
@implementer(interfaces.IResolver)
class ResolverBase:
"""
L{ResolverBase} is a base class for implementations of
L{interfaces.IResolver} which deals with a lot
of the boilerplate of implementing all of the lookup methods.
@cvar _errormap: A C{dict} mapping DNS protocol failure response codes
to exception classes which will be used to represent those failures.
"""
_errormap = {
dns.EFORMAT: DNSFormatError,
dns.ESERVER: DNSServerError,
dns.ENAME: DNSNameError,
dns.ENOTIMP: DNSNotImplementedError,
dns.EREFUSED: DNSQueryRefusedError}
typeToMethod = None
def __init__(self):
self.typeToMethod = {}
for (k, v) in typeToMethod.items():
self.typeToMethod[k] = getattr(self, v)
def exceptionForCode(self, responseCode):
"""
Convert a response code (one of the possible values of
L{dns.Message.rCode} to an exception instance representing it.
@since: 10.0
"""
return self._errormap.get(responseCode, DNSUnknownError)
def query(self, query, timeout=None):
try:
method = self.typeToMethod[query.type]
except KeyError:
return defer.fail(failure.Failure(NotImplementedError(
str(self.__class__) + " " + str(query.type))))
else:
return defer.maybeDeferred(method, query.name.name, timeout)
def _lookup(self, name, cls, type, timeout):
return defer.fail(NotImplementedError("ResolverBase._lookup"))
def lookupAddress(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A, timeout)
def lookupIPV6Address(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AAAA, timeout)
def lookupAddress6(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.A6, timeout)
def lookupMailExchange(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MX, timeout)
def lookupNameservers(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NS, timeout)
def lookupCanonicalName(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.CNAME, timeout)
def lookupMailBox(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MB, timeout)
def lookupMailGroup(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MG, timeout)
def lookupMailRename(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MR, timeout)
def lookupPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.PTR, timeout)
def lookupAuthority(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SOA, timeout)
def lookupNull(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NULL, timeout)
def lookupWellKnownServices(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.WKS, timeout)
def lookupService(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SRV, timeout)
def lookupHostInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.HINFO, timeout)
def lookupMailboxInfo(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.MINFO, timeout)
def lookupText(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.TXT, timeout)
def lookupSenderPolicy(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.SPF, timeout)
def lookupResponsibility(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.RP, timeout)
def lookupAFSDatabase(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AFSDB, timeout)
def lookupZone(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.AXFR, timeout)
def lookupNamingAuthorityPointer(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.NAPTR, timeout)
def lookupAllRecords(self, name, timeout=None):
return self._lookup(name, dns.IN, dns.ALL_RECORDS, timeout)
# IResolverSimple
def getHostByName(self, name, timeout=None, effort=10):
# XXX - respect timeout
return self.lookupAllRecords(name, timeout
).addCallback(self._cbRecords, name, effort
)
def _cbRecords(self, records, name, effort):
(ans, auth, add) = records
result = extractRecord(self, dns.Name(name), ans + auth + add, effort)
if not result:
raise error.DNSLookupError(name)
return result
def extractRecord(resolver, name, answers, level=10):
if not level:
return None
if hasattr(socket, 'inet_ntop'):
for r in answers:
if r.name == name and r.type == dns.A6:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.AAAA:
return socket.inet_ntop(socket.AF_INET6, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.A:
return socket.inet_ntop(socket.AF_INET, r.payload.address)
for r in answers:
if r.name == name and r.type == dns.CNAME:
result = extractRecord(
resolver, r.payload.name, answers, level - 1)
if not result:
return resolver.getHostByName(
str(r.payload.name), effort=level - 1)
return result
# No answers, but maybe there's a hint at who we should be asking about
# this
for r in answers:
if r.type == dns.NS:
from twisted.names import client
r = client.Resolver(servers=[(str(r.payload.name), dns.PORT)])
return r.lookupAddress(str(name)
).addCallback(
lambda records: extractRecord(
r, name,
records[_ANS] + records[_AUTH] + records[_ADD],
level - 1))
typeToMethod = {
dns.A: 'lookupAddress',
dns.AAAA: 'lookupIPV6Address',
dns.A6: 'lookupAddress6',
dns.NS: 'lookupNameservers',
dns.CNAME: 'lookupCanonicalName',
dns.SOA: 'lookupAuthority',
dns.MB: 'lookupMailBox',
dns.MG: 'lookupMailGroup',
dns.MR: 'lookupMailRename',
dns.NULL: 'lookupNull',
dns.WKS: 'lookupWellKnownServices',
dns.PTR: 'lookupPointer',
dns.HINFO: 'lookupHostInfo',
dns.MINFO: 'lookupMailboxInfo',
dns.MX: 'lookupMailExchange',
dns.TXT: 'lookupText',
dns.SPF: 'lookupSenderPolicy',
dns.RP: 'lookupResponsibility',
dns.AFSDB: 'lookupAFSDatabase',
dns.SRV: 'lookupService',
dns.NAPTR: 'lookupNamingAuthorityPointer',
dns.AXFR: 'lookupZone',
dns.ALL_RECORDS: 'lookupAllRecords',
}
| 31.776892 | 79 | 0.62663 |
7947ce474893a4f869777763e46bca23b70bacea | 18,142 | py | Python | src/pytezos/cli/cli.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | src/pytezos/cli/cli.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | src/pytezos/cli/cli.py | konchunas/pytezos | 65576d18bdf1956fae8ea21241b6c43a38921b83 | [
"MIT"
] | null | null | null | import io
import sys
import tarfile
import time
from glob import glob
from os.path import abspath, dirname, exists, join, split
from pprint import pformat
from typing import List, Optional
import click
import docker # type: ignore
from pytezos import ContractInterface, __version__, pytezos
from pytezos.cli.github import create_deployment, create_deployment_status
from pytezos.context.mixin import default_network # type: ignore
from pytezos.logging import logger
from pytezos.michelson.types.base import generate_pydoc
from pytezos.operation.result import OperationResult
from pytezos.rpc.errors import RpcError
from pytezos.sandbox.node import SandboxedNodeTestCase
from pytezos.sandbox.parameters import EDO, FLORENCE
kernel_js_path = join(dirname(dirname(__file__)), 'assets', 'kernel.js')
kernel_json = {
"argv": ['pytezos', 'kernel', 'run', "-file", "{connection_file}"],
"display_name": "Michelson",
"language": "michelson",
"codemirror_mode": "michelson",
}
SMARTPY_CLI_IMAGE = 'bakingbad/smartpy-cli'
def make_bcd_link(network, address):
return f'https://better-call.dev/{network}/{address}'
def get_local_contract_path(path, extension='tz'):
if path is None:
files = glob(f'*.{extension}')
if len(files) != 1:
raise Exception('No contracts found in working directory, specify --path implicitly')
path = abspath(files[0])
if exists(path):
return path
return False
def get_contract(path):
path = get_local_contract_path(path)
if path:
contract = ContractInterface.from_file(path)
else:
network, address = path.split(':')
contract = pytezos.using(shell=network).contract(address)
return contract
def get_docker_client():
return docker.from_env()
@click.group()
@click.version_option(__version__)
@click.pass_context
def cli(*_args, **_kwargs):
pass
@cli.command(help='Manage contract storage')
@click.option('--action', '-a', type=str, help='One of `schema`, `default`.')
@click.option('--path', '-p', type=str, default=None, help='Path to the .tz file, or the following uri: <network>:<KT-address>')
@click.pass_context
def storage(_ctx, action: str, path: Optional[str]) -> None:
contract = get_contract(path)
if action == 'schema':
logger.info(generate_pydoc(type(contract.storage.data), title='storage'))
elif action == 'default':
logger.info(pformat(contract.storage.dummy()))
else:
raise Exception('Action must be either `schema` or `default`')
@cli.command(help='Manage contract storage')
@click.option('--action', '-a', type=str, default='schema', help='One of `schema`')
@click.option('--path', '-p', type=str, default=None, help='Path to the .tz file, or the following uri: <network>:<KT-address>')
@click.pass_context
def parameter(_ctx, action: str, path: Optional[str]) -> None:
contract = get_contract(path)
if action == 'schema':
logger.info(contract.parameter.__doc__)
else:
raise Exception('Action must be `schema`')
@cli.command(help='Activate and reveal key from the faucet file')
@click.option('--path', '-p', type=str, help='Path to the .json file downloaded from https://faucet.tzalpha.net/')
@click.option('--network', '-n', type=str, default=default_network, help='Default is edo2net')
@click.pass_context
def activate(_ctx, path: str, network: str) -> None:
ptz = pytezos.using(key=path, shell=network)
logger.info(
'Activating %s in the %s',
ptz.key.public_key_hash(),
network,
)
if ptz.balance() == 0:
try:
opg = ptz.reveal().autofill().sign()
logger.info('Injecting reveal operation:')
logger.info(pformat(opg.json_payload()))
opg.inject(_async=False)
except RpcError as e:
logger.critical(pformat(e))
sys.exit(-1)
else:
logger.info('Activation succeeded! Claimed balance: %s ꜩ', ptz.balance())
else:
logger.info('Already activated')
try:
opg = ptz.reveal().autofill().sign()
logger.info('Injecting reveal operation:')
logger.info(pformat(opg.json_payload()))
opg.inject(_async=False)
except RpcError as e:
logger.critical(pformat(e))
sys.exit(-1)
else:
logger.info('Your key %s is now active and revealed', ptz.key.public_key_hash())
@cli.command(help='Deploy contract to the specified network')
@click.option('--path', '-p', type=str, help='Path to the .tz file')
@click.option('--storage', type=str, default=None, help='Storage in JSON format (not Micheline)')
@click.option('--network', '-n', type=str, default=default_network, help='Default is edo2net')
@click.option('--key', type=str, default=None)
@click.option('--github-repo-slug', type=str, default=None)
@click.option('--github-oauth-token', type=str, default=None)
@click.option('--dry-run', type=bool, default=False, help='Set this flag if you just want to see what would happen')
@click.pass_context
def deploy(
_ctx,
path: str,
storage: Optional[str], # pylint: disable=redefined-outer-name
network: str,
key: Optional[str],
github_repo_slug: Optional[str],
github_oauth_token: Optional[str],
dry_run: bool,
):
ptz = pytezos.using(shell=network, key=key)
logger.info('Deploying contract using %s in the %s', ptz.key.public_key_hash(), network)
contract = get_contract(path)
try:
opg = ptz.origination(script=contract.script(initial_storage=storage)).autofill().sign()
logger.info('Injecting origination operation:')
logger.info(pformat(opg.json_payload()))
if dry_run:
logger.info(pformat(opg.preapply()))
sys.exit(0)
else:
opg = opg.inject(_async=False)
except RpcError as e:
logger.critical(pformat(e))
sys.exit(-1)
else:
originated_contracts = OperationResult.originated_contracts(opg)
if len(originated_contracts) != 1:
raise Exception('Operation group must has exactly one originated contract')
bcd_link = make_bcd_link(network, originated_contracts[0])
logger.info('Contract was successfully deployed: %s', bcd_link)
if github_repo_slug:
deployment = create_deployment(
github_repo_slug,
github_oauth_token,
environment=network,
)
logger.info(pformat(deployment))
status = create_deployment_status(
github_repo_slug,
github_oauth_token,
deployment_id=deployment['id'],
state='success',
environment=network,
environment_url=bcd_link,
)
logger.info(status)
@cli.command(help='Update containerized SmartPy CLI')
@click.option('--tag', '-t', type=str, help='Version or tag to pull', default='latest')
@click.pass_context
def update_smartpy(ctx, tag):
client = get_docker_client()
logger.info('Will now pull latest SmartPy image, please stay put.')
for line in client.api.pull(f'{SMARTPY_CLI_IMAGE}:{tag}', stream=True, decode=True):
logger.info(line)
logger.info('Pulled SmartPy CLI image successfully!')
def run_smartpy_container(
tag: str = 'latest',
command: str = '',
files_to_add: List[str] = [],
mounts: List[docker.types.Mount] = [],
):
try:
client = get_docker_client()
container = client.containers.create(
image=f'{SMARTPY_CLI_IMAGE}:{tag}',
command=command,
detach=True,
mounts=mounts,
)
buffer = io.BytesIO()
with tarfile.open(fileobj=buffer, mode='w:gz') as archive:
for filename in files_to_add:
with open(filename, 'rb') as current_file:
current_file_data = current_file.read()
current_file_buffer = io.BytesIO(initial_bytes=current_file_data)
_, short_filename = split(filename)
archive.add(filename, arcname=short_filename)
buffer.seek(0)
container.put_archive(
'/root/smartpy-cli/',
buffer,
)
container.start()
return container
except docker.errors.ImageNotFound:
logger.error('SmartPy compiler not found. Please run update-smartpy first.')
@cli.command(help='Run SmartPy CLI command "test"')
@click.option('--script', '-s', type=str, help='Path to script', default='script.py')
@click.option('--output-directory', '-o', type=str, help='Output directory', default='./smartpy-output')
@click.option('--protocol', type=click.Choice(['delphi', 'edo', 'florence', 'proto10']), help='Protocol to use', default='edo')
@click.option('--detach', '-d', type=bool, help='Run container in detached mode', default=False)
@click.option('--tag', '-t', type=str, help='Version or tag of SmartPy to use', default='latest')
@click.pass_context
def smartpy_test(
_ctx,
script: str,
output_directory: str,
detach: bool,
protocol: str,
tag: str,
):
client = get_docker_client()
path = get_local_contract_path(script, extension='py')
if path:
_, script_name = split(path)
container = run_smartpy_container(
tag=tag,
command=f'test /root/smartpy-cli/{script_name} /root/output --protocol {protocol}',
files_to_add=[path, ],
mounts=[
docker.types.Mount(
target='/root/output',
source=output_directory,
type='bind'
)
]
)
if container is None:
raise Exception('Could not create container. Try running update-smartpy.')
if not detach:
for line in container.logs(stream=True):
print(line.decode('utf-8').rstrip())
else:
logger.error('No local script found. Please ensure a valid script is present or specify path.')
@cli.command(help='Run SmartPy CLI command "compile"')
@click.option('--script', '-s', type=str, help='Path to script', default='script.py')
@click.option('--output-directory', '-o', type=str, help='Output directory', default='./smartpy-output')
@click.option('--detach', '-d', type=bool, help='Run container in detached mode', default=False)
@click.option('--protocol', type=click.Choice(['delphi', 'edo', 'florence', 'proto10']), help='Protocol to use', default='edo')
@click.option('--tag', '-t', type=str, help='Version or tag of SmartPy to use', default='latest')
@click.pass_context
def smartpy_compile(
_ctx,
script: str,
output_directory: str,
detach: bool,
protocol: str,
tag: str,
):
client = get_docker_client()
path = get_local_contract_path(script, extension='py')
if path:
_, script_name = split(path)
container = run_smartpy_container(
tag=tag,
command=f'compile /root/smartpy-cli/{script_name} /root/output --protocol {protocol}',
files_to_add=[path,],
mounts=[
docker.types.Mount(
target='/root/output',
source=output_directory,
type='bind'
)
]
)
if container is None:
raise Exception('Could not create container. Try running update-smartpy.')
if not detach:
for line in container.logs(stream=True):
print(line.decode('utf-8').rstrip())
else:
logger.error('No local script found. Please ensure a valid script is present or specify path.')
@cli.command(help='Run containerized sandbox node')
@click.option('--image', type=str, help='Docker image to use', default=SandboxedNodeTestCase.IMAGE)
@click.option('--protocol', type=click.Choice(['florence', 'edo']), help='Protocol to use', default='florence')
@click.option('--port', '-p', type=int, help='Port to expose', default=8732)
@click.option('--interval', '-i', type=float, help='Interval between baked blocks (in seconds)', default=1.0)
@click.option('--blocks', '-b', type=int, help='Number of blocks to bake before exit')
@click.pass_context
def sandbox(
_ctx,
image: str,
protocol: str,
port: int,
interval: float,
blocks: int,
):
protocol = {
'edo': EDO,
'florence': FLORENCE,
}[protocol]
SandboxedNodeTestCase.PROTOCOL = protocol
SandboxedNodeTestCase.IMAGE = image
SandboxedNodeTestCase.PORT = port
SandboxedNodeTestCase.setUpClass()
blocks_baked = 0
while True:
try:
logger.info('Baking block %s...', blocks_baked)
block_hash = SandboxedNodeTestCase.get_client().using(key='bootstrap1').bake_block().fill().work().sign().inject()
logger.info('Baked block: %s', block_hash)
blocks_baked += 1
if blocks and blocks_baked == blocks:
break
time.sleep(interval)
except KeyboardInterrupt:
break
@cli.command(help='Update Ligo compiler (docker pull ligolang/ligo)')
@click.option('--tag', '-t', type=str, help='Version or tag to pull', default='0.13.0')
@click.pass_context
def update_ligo(
_ctx,
tag: str,
):
client = get_docker_client()
logger.info(f'Pulling ligolang/ligo{(":" + tag) if tag else ""}, please stay put.')
for line in client.api.pull('ligolang/ligo', tag=tag, stream=True, decode=True):
logger.info(line)
logger.info('Pulled Ligo compiler image successfully!')
def run_ligo_container(
tag: str = '0.13.0',
command: str = '',
files_to_add: List[str] = [],
):
try:
client = get_docker_client()
container = client.containers.create(
image=f'ligolang/ligo:{tag}',
command=command,
detach=True,
)
buffer = io.BytesIO()
with tarfile.open(fileobj=buffer, mode='w:gz') as archive:
for filename in files_to_add:
with open(filename, 'rb') as current_file:
current_file_data = current_file.read()
current_file_buffer = io.BytesIO(initial_bytes=current_file_data)
_, short_filename = split(filename)
archive.add(filename, arcname=short_filename)
buffer.seek(0)
container.put_archive(
'/root/',
buffer,
)
container.start()
return container
except docker.errors.ImageNotFound:
logger.error('Ligo compiler not found. Please run update-ligo first.')
@cli.command(help='Compile contract using Ligo compiler.')
@click.option('--tag', '-t', type=str, help='Version or tag of Ligo compiler', default='0.13.0')
@click.option('--path', '-p', type=str, help='Path to contract')
@click.option('--entry-point', '-ep', type=str, help='Entrypoint for the invocation')
@click.option('--detach', '-d', type=bool, help='Run container in detached mode', default=False)
@click.pass_context
def ligo_compile_contract(
_ctx,
tag: str,
path: str,
entry_point: str,
detach: bool,
):
path = get_local_contract_path(path, extension='ligo')
if path:
_, contract_name = split(path)
container = run_ligo_container(
tag=tag,
command=f'compile-contract {contract_name} "{entry_point}"',
files_to_add=[path,]
)
if not detach:
for line in container.logs(stream=True):
print(line.decode('utf-8').rstrip())
else:
logger.error('No local contract found. Please ensure a valid contract is present or specify path.')
@cli.command(help='Define initial storage using Ligo compiler.')
@click.option('--tag', '-t', type=str, help='Version or tag of Ligo compiler', default='0.13.0')
@click.option('--path', '-p', type=str, help='Path to contract')
@click.option('--entry-point', '-ep', type=str, help='Entrypoint for the storage', default='')
@click.option('--expression', '-ex', type=str, help='Expression for the storage', default='')
@click.option('--detach', '-d', type=bool, help='Run container in detached mode', default=False)
@click.pass_context
def ligo_compile_storage(
_ctx,
tag: str,
path: str,
entry_point: str,
expression: str,
detach: bool,
):
path = get_local_contract_path(path, extension='ligo')
if path:
container = run_ligo_container(
tag=tag,
command=f'compile-storage {path} "{entry_point}" "{expression}"',
files_to_add=[path,],
)
if not detach:
for line in container.logs(stream=True):
print(line.decode('utf-8').rstrip())
else:
logger.error('No local contract found. Please ensure a valid contract is present or specify path.')
@cli.command(help='Invoke a contract with a parameter using Ligo compiler.')
@click.option('--tag', '-t', type=str, help='Version or tag of Ligo compiler', default='0.13.0')
@click.option('--path', '-p', type=str, help='Path to contract')
@click.option('--entry-point', '-ep', type=str, help='Entrypoint for the invocation')
@click.option('--expression', '-ex', type=str, help='Expression for the invocation')
@click.option('--detach', '-d', type=bool, help='Run container in detached mode', default=False)
@click.pass_context
def ligo_invoke_contract(
_ctx,
tag: str,
path: str,
entry_point: str,
expression: str,
detach: bool,
):
path = get_local_contract_path(path, extension='ligo')
if path:
container = run_ligo_container(
tag=tag,
command=f'compile-parameter {path} "{entry_point}" "{expression}"',
files_to_add=[path,],
)
if not detach:
for line in container.logs(stream=True):
print(line.decode('utf-8').rstrip())
else:
logger.error('No local contract found. Please ensure a valid contract is present or specify path.')
if __name__ == '__main__':
cli(prog_name='pytezos')
| 36.949084 | 128 | 0.634439 |
7947ceb66d8381b8c8164586bcef95fe302b7bdb | 7,633 | py | Python | pepe/analysis/ForceBalance.py | Jfeatherstone/pepe | 4d28cab830ff2a94d3cfc06c680bde05d92b2cdb | [
"MIT"
] | null | null | null | pepe/analysis/ForceBalance.py | Jfeatherstone/pepe | 4d28cab830ff2a94d3cfc06c680bde05d92b2cdb | [
"MIT"
] | null | null | null | pepe/analysis/ForceBalance.py | Jfeatherstone/pepe | 4d28cab830ff2a94d3cfc06c680bde05d92b2cdb | [
"MIT"
] | null | null | null | """
Methods to gauge how well force balance is satisfied for an ensemble,
and to convert between polar and cartesian systems.
"""
import numpy as np
import numba
def polarToCartesian(force, alpha, beta, collapse=True):
"""
Convert a set of forces defined in polar coordinates (f, a, b),
to cartesian coordinates (f_y, f_x).
Parameters
----------
force : float or np.ndarray[F] or list[F]
The force magnitude, or an array/list of F force magnitudes.
alpha : float or np.ndarray[F] or list[F]
The alpha angle, or an array/list of F alpha angles.
beta : float or np.ndarray[F] or list[F]
The beta angle, or an array/list of F beta angles.
collapse : bool
Whether to collapse the force index dimension in the case that
only a single force is provided.
Returns
-------
forceArr : np.ndarray[F,2]
An array of the cartesian components (y,x) of the forces.
If only a single force is provided (ie. `force`, `alpha` and `beta` are all
floats) the first dimension will be omitted, leaving just `[f_y, f_x]`. See
`collapse` for more information.
"""
# Check to see if we were given multiple forces, or just a single one
if hasattr(force, '__iter__'):
forceArr = np.array(force)
alphaArr = np.array(alpha)
betaArr = np.array(beta)
singleForce = False
else:
forceArr = np.array([force])
alphaArr = np.array([alpha])
betaArr = np.array([beta])
singleForce = True
cartesianForceArr = np.zeros((forceArr.shape[0], 2))
for i in range(cartesianForceArr.shape[0]):
# Note that this expression is not exactly the same as in K. E. Daniels et al.
# Rev. Sci. Inst. 88 (2017). There is an extra negative on the alphas, since mine
# appear to be defined backwards.
# F_y
cartesianForceArr[i,0] = forceArr[i] * np.cos(-alphaArr[i] + betaArr[i]) #(np.cos(betaArr[i,j]) * np.cos(alphaArr[i,j]) + np.sin(betaArr[i,j]) * np.sin(alphaArr[i,j]))
# F_x
cartesianForceArr[i,1] = -forceArr[i] * np.sin(-alphaArr[i] + betaArr[i]) #(-np.sin(betaArr[i,j]) * np.cos(alphaArr[i,j]) + np.cos(betaArr[i,j]) * np.sin(alphaArr[i,j]))
# If we only have a single force, we should collapse that first dimension
if singleForce and collapse:
return cartesianForceArr[0]
return cartesianForceArr
def testForceBalance(forceArr, alphaArr, betaArr, collapse=True):
"""
Sum each of the cartesian force components to see how
well an ensemble of forces satisfies force balance.
Parameters
----------
forceArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F force magnitudes, possibly for T timesteps.
alphaArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F alpha angles, possibly for T timesteps.
betaArr : np.ndarray[F] or np.ndarray[T,F]
An array/list of F beta angles, possibly for T timesteps.
collapse : bool
Whether to collapse the timestep dimension in the case that
only a single timestep is provided.
Returns
-------
forceSumArr : np.ndarray[T,2]
An array of the sum of each cartesian component (y,x) of the forces at each timestep.
If only a single timestep is provided (ie. `forceArr`, `alphaArr` and `betaArr` are all
1D arrays) the first dimension will be omitted, leaving just `[sum_f_y, sum_f_x]`. See
`collapse` for more information.
"""
# Check if we were given a single timestep, or multiple
if len(np.shape(forceArr)) == 2:
singleTimestep = False
multiForceArr = np.array(forceArr)
multiAlphaArr = np.array(alphaArr)
multiBetaArr = np.array(betaArr)
else:
singleTimestep = True
# TODO: Might need a transpose here
multiForceArr = np.array([forceArr])
multiAlphaArr = np.array([alphaArr])
multiBetaArr = np.array([betaArr])
forceSumArr = np.zeros((multiForceArr.shape[1], 2))
# Sum up forces for each timestep
for i in range(multiForceArr.shape[1]):
cartForces = polarToCartesian(multiForceArr[:,i], multiAlphaArr[:,i], multiBetaArr[:,i], collapse=False)
# sum_y
forceSumArr[i,0] = np.sum(cartForces[:,0])
# sum_x
forceSumArr[i,1] = np.sum(cartForces[:,1])
if singleTimestep and collapse:
return forceSumArr[0]
return forceSumArr
@numba.jit(nopython=True)
def singleParticleForceBalance(forceArr, alphaArr, betaArr):
"""
**Does not currently work! Any calls to this function will just return the original
arrays**
Takes a set of forces acting on a single particle and ensures they obey
force balance.
The majority of this method is transpiled directly from Jonathan Kollmer's
implementation:
https://github.com/jekollmer/PEGS
Parameters
----------
forceArr : np.ndarray[N]
Array of force magnitudes at each contact point.
alphaArr : np.ndarray[N]
Array of angles that define the direction of force at each contact point
betaArr : np.ndarray[N]
Array of angles that define the contact point of the forces, and therefore are
not adjusted in the force balancing process
Returns
-------
np.ndarray[N] : Magnitude of balanced forces
np.ndarray[N] : Balanced contact angles alpha
"""
# TODO: Get this function working
print("Warning: force balance is not yet implemented, do not call the singleParticleForceBalance function!")
return forceArr, alphaArr
# Number of contacts (coordination number, often denoted by z)
numContacts = len(forceArr)
if numContacts < 2:
# Can't do anything with only a single force
return forceArr, alphaArr
elif numContacts == 2:
# For 2 forces, there is a unique process
# The two force magnitudes must be equal
balancedForceArr = np.array([forceArr[0], forceArr[0]])
balancedAlphaArr = np.zeros(2)
dBeta = (betaArr[0] - betaArr[1]) / 2
balancedAlphaArr[0] = np.arccos(np.sin(dBeta))
if balancedAlphaArr[0] > np.pi/2:
balancedAlphaArr[0] = np.arccos(np.sin(-dBeta))
# And the other angle must be the opposite
balancedAlphaArr[1] = - balancedAlphaArr[0]
return balancedForceArr, balancedAlphaArr
elif numContacts > 2:
# We solve any z>2 contacts the same way
balancedForceArr = np.zeros_like(forceArr)
balancedAlphaArr = np.zeros_like(alphaArr)
# To calculate the new force magnitudes, we add up vertical and
# horizontal components of the other forces
for i in range(numContacts):
# These initializations are to not count the case where j = i
sum1 = -forceArr[i] * np.sin(alphaArr[i])
sum2 = -forceArr[i] * np.cos(alphaArr[i])
for j in range(numContacts):
sum1 += forceArr[j] * np.sin(alphaArr[j] + betaArr[j] - betaArr[i])
sum2 += forceArr[j] * np.cos(alphaArr[j] + betaArr[j] - betaArr[i])
balancedForceArr[i] = np.sqrt(sum1**2 + sum2**2)
# To calculate new alpha values, we
for i in range(numContacts):
sum3 = -balancedForceArr[i] * np.sin(alphaArr[i])
for j in range(numContacts):
sum3 += balancedForceArr[j] * np.sin(alphaArr[j])
balancedAlphaArr[i] = np.arcsin(-sum3/balancedForceArr[i])
return balancedForceArr, balancedAlphaArr
| 34.538462 | 177 | 0.639198 |
7947cf5e1892b79ae46d07d2a0126e6c1b20dbd8 | 4,592 | py | Python | pretrain_AE.py | bigaidream-projects/citylearn-2020-pikapika | 8c9389eb4b4e979faf269b8c0ce87b499af97ac1 | [
"Apache-2.0"
] | 3 | 2021-12-20T03:40:55.000Z | 2022-02-02T04:26:33.000Z | pretrain_AE.py | bigaidream-projects/citylearn-2020-pikapika | 8c9389eb4b4e979faf269b8c0ce87b499af97ac1 | [
"Apache-2.0"
] | null | null | null | pretrain_AE.py | bigaidream-projects/citylearn-2020-pikapika | 8c9389eb4b4e979faf269b8c0ce87b499af97ac1 | [
"Apache-2.0"
] | 4 | 2022-02-11T20:30:51.000Z | 2022-02-27T01:17:34.000Z | from torch.optim import Adam
from torch.nn.functional import l1_loss
from torch.distributions import kl_divergence, Normal
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from citylearn import CityLearn
from utils.standardization import normalize_AE_state_with_pred
from utils.io import get_output_folder
from model.Encoder import AE
from utils.util import USE_CUDA
import os
import argparse
log_per_step = 1000
# Instantiating the Tensorboard writers
PATH_base = 'datas/new/'
PATH_base = get_output_folder(PATH_base, 'scalar_pretrain_encoder')
PATH_to_log_dir1 = PATH_base + '/pred'
pred_writer = SummaryWriter(PATH_to_log_dir1)
PATH_to_log_dir2 = PATH_base + '/unpred'
unpred_writer = SummaryWriter(PATH_to_log_dir2)
# load data
parser = argparse.ArgumentParser()
# RL Hyper-parameters
parser.add_argument('--climate_zone', type=int, default=1)
args = parser.parse_args()
data_path = Path("../data/Climate_Zone_" + str(args.climate_zone))
building_attributes = data_path / 'building_attributes.json'
weather_file = data_path / 'weather_data.csv'
solar_profile = data_path / 'solar_generation_1kW.csv'
building_state_actions = 'buildings_state_action_space.json'
building_ids = ["Building_1", "Building_2", "Building_3", "Building_4", "Building_5", "Building_6", "Building_7",
"Building_8", "Building_9"]
objective_function = ['ramping', '1-load_factor', 'average_daily_peak', 'peak_demand',
'net_electricity_consumption', 'total']
# Instantiating the env
env = CityLearn(data_path, building_attributes, weather_file, solar_profile, building_ids,
buildings_states_actions=building_state_actions, cost_function=objective_function)
observations_spaces, actions_spaces = env.get_state_action_spaces()
# test_sample = torch.zeros((100, 37))
# dataloader = [test_sample]
state = env.reset()
norm_state = normalize_AE_state_with_pred(state, noSOC=True)
dataloader = [norm_state]
done = False
while not done:
action = np.zeros((9, 2))
next_state, reward, done, _ = env.step(action)
norm_state = normalize_AE_state_with_pred(next_state, noSOC=True)
dataloader.append(norm_state)
state = next_state
model = AE(31, 128, [128, 128], {})
if USE_CUDA:
model = model.cuda()
opt = Adam(model.parameters(), lr=0.001)
max_epoch = 100
MIN_loss = 9999999
model_path = './Models_one_AE_128dim_zone' + str(args.climate_zone)
if not os.path.isdir(model_path):
os.mkdir(model_path)
# model.load_state_dict(torch.load('{}/AE.pt'.format(model_path)))
# print("load model successfully")
def print_grad(net):
for name, parms in net.named_parameters():
if parms.grad is None:
continue
print('-->name:', name, '-->grad_requires:', parms.requires_grad,
' -->grad_value:', torch.max(parms.grad), torch.min(parms.grad))
STEP_PER_EPOCH = 10000
BATCH_SIZE = 100
DROPOUT = 0.2
for e in range(max_epoch):
cum_loss = 0.
for idx in range(STEP_PER_EPOCH):
batch_idx = np.random.randint(low=0, high=8760, size=BATCH_SIZE)
s = torch.FloatTensor(np.array(dataloader)[batch_idx]).reshape(BATCH_SIZE * 9, -1)
if USE_CUDA:
s = s.cuda()
# =========== training VAE1 for predictable variables =========
hidden_state = model(s)
# GaussianDist = Normal(torch.zeros_like(dist.mean), torch.ones_like(dist.stddev)) # Gaussian(0, 1)
# TODO Check gradient flow through kl_divergence
recon_s = model.decode(hidden_state)
# <input - output> pair-wise dropout
mask = torch.ones_like(s)
mask = torch.nn.Dropout(0.2)(mask)
mask[mask != 0] = 1.
recon_s = recon_s * mask
s = s * mask
ReconstructionLoss = l1_loss(recon_s, s, reduction='mean')
loss = ReconstructionLoss
opt.zero_grad()
loss.backward()
opt.step()
cum_loss += loss.detach().cpu()
if (e * STEP_PER_EPOCH + idx) % log_per_step == 0:
# print(recon_s, pred_s)
print("loss {} at step {}".format(loss, e * STEP_PER_EPOCH + idx))
print_grad(model)
pred_writer.add_scalar('pred_loss_step', loss, e * STEP_PER_EPOCH + idx)
print("cum loss {} at epoch {}".format(cum_loss, e))
if cum_loss < MIN_loss:
MIN_loss = cum_loss
if e > 0:
torch.save(model.state_dict(), '{}/AE.pt'.format(model_path))
print("save pred model in epoch {}".format(e))
pred_writer.add_scalar('loss_epoch', cum_loss, e)
| 32.338028 | 113 | 0.690984 |
7947d033e8d393f86da4736436a387d28b8a58ad | 7,799 | py | Python | mmdet/core/bbox/bbox_target.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 16 | 2021-03-02T07:41:01.000Z | 2022-03-14T08:55:45.000Z | mmdet/core/bbox/bbox_target.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 2 | 2022-01-06T20:54:13.000Z | 2022-02-24T03:50:51.000Z | mmdet/core/bbox/bbox_target.py | arthur801031/3d-multi-resolution-rcnn | 8e5454a72f8daa174bf3eabfa5964152f04ab287 | [
"Apache-2.0"
] | 2 | 2021-05-26T19:23:35.000Z | 2022-01-06T20:30:24.000Z | import torch
from .transforms import bbox2delta, bbox2delta3d
from ..utils import multi_apply
def bbox_target(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_3d(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
bbox_target_single_3d,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_3d_parcel(pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
pos_gt_bregions_list,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
concat=True):
labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights = multi_apply(
bbox_target_single_3d_parcel,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
pos_gt_bregions_list,
cfg=cfg,
reg_classes=reg_classes,
target_means=target_means,
target_stds=target_stds)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
bregions = torch.cat(bregions, 0)
bregion_weights = torch.cat(bregion_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights
def bbox_target_single(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single_3d(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 6)
bbox_weights = pos_bboxes.new_zeros(num_samples, 6)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
# if torch.isnan(bbox_targets).any().item() == 1:
# breakpoint()
return labels, label_weights, bbox_targets, bbox_weights
def bbox_target_single_3d_parcel(pos_bboxes,
neg_bboxes,
pos_gt_bboxes,
pos_gt_labels,
pos_gt_bregions,
cfg,
reg_classes=1,
target_means=[.0, .0, .0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]):
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
labels = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
bregions = pos_bboxes.new_zeros(num_samples, dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bregion_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 6)
bbox_weights = pos_bboxes.new_zeros(num_samples, 6)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
bregions[:num_pos] = pos_gt_bregions
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
bregion_weights[:num_pos] = pos_weight
pos_bbox_targets = bbox2delta3d(pos_bboxes, pos_gt_bboxes, target_means,
target_stds)
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
bregion_weights[-num_neg:] = 1.0
# if torch.isnan(bbox_targets).any().item() == 1:
# breakpoint()
return labels, label_weights, bbox_targets, bbox_weights, bregions, bregion_weights
def expand_target(bbox_targets, bbox_weights, labels, num_classes):
breakpoint()
bbox_targets_expand = bbox_targets.new_zeros((bbox_targets.size(0),
4 * num_classes))
bbox_weights_expand = bbox_weights.new_zeros((bbox_weights.size(0),
4 * num_classes))
for i in torch.nonzero(labels > 0).squeeze(-1):
start, end = labels[i] * 4, (labels[i] + 1) * 4
bbox_targets_expand[i, start:end] = bbox_targets[i, :]
bbox_weights_expand[i, start:end] = bbox_weights[i, :]
return bbox_targets_expand, bbox_weights_expand
| 39.790816 | 95 | 0.594307 |
7947d0b73cbc54028f11cf6382ee720dc2d6bf13 | 6,636 | py | Python | Lib/site-packages/qt_py_convert/_modules/from_imports/process.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 61 | 2018-04-17T18:09:32.000Z | 2022-03-04T03:33:50.000Z | Lib/site-packages/qt_py_convert/_modules/from_imports/process.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | Lib/site-packages/qt_py_convert/_modules/from_imports/process.py | fochoao/cpython | 3dc84b260e5bced65ebc2c45c40c8fa65f9b5aa9 | [
"bzip2-1.0.6",
"0BSD"
] | 5 | 2018-04-18T07:36:21.000Z | 2019-07-01T01:41:55.000Z | # Copyright 2018 Digital Domain 3.0
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
"""
The from_imports module is designed to fix the from import statements.
"""
from qt_py_convert._modules.expand_stars import process as stars_process
from qt_py_convert.general import __supported_bindings__, ALIAS_DICT, change, \
supported_binding
from qt_py_convert.log import get_logger
FROM_IMPORTS_LOG = get_logger("from_imports")
IGNORED_IMPORT_TARGETS = ("right_parenthesis", "left_parenthesis")
class Processes(object):
"""Processes class for from_imports"""
@staticmethod
def _get_import_parts(node, binding):
return node.dumps().replace(binding, "").lstrip(".").split(".")
@staticmethod
def _no_second_level_module(node, _parts, skip_lineno=False):
text = "from Qt import {key}".format(
key=", ".join([target.value for target in node.targets])
)
change(
logger=FROM_IMPORTS_LOG,
node=node,
replacement=text,
skip_lineno=skip_lineno
)
node.replace(text)
@classmethod
def _process_import(cls, red, objects, skip_lineno=False):
"""
_process_import is designed to replace from import methods.
:param red: redbaron process. Unused in this method.
:type red: redbardon.RedBaron
:param objects: List of redbaron nodes that matched for this proc.
:type objects: list
:param skip_lineno: Global "skip_lineno" flag.
:type skip_lineno: bool
"""
binding_aliases = ALIAS_DICT
mappings = {}
# Replace each node
for node, binding in objects:
from_import_parts = cls._get_import_parts(node, binding)
if len(from_import_parts) and from_import_parts[0]:
second_level_module = from_import_parts[0]
else:
cls._no_second_level_module(
node.parent,
from_import_parts,
skip_lineno=skip_lineno
)
binding_aliases["bindings"].add(binding)
for target in node.parent.targets:
binding_aliases["root_aliases"].add(target.value)
continue
for _from_as_name in node.parent.targets:
if _from_as_name.type in IGNORED_IMPORT_TARGETS:
continue
if _from_as_name.type == "star":
# TODO: Make this a flag and make use the expand module.
_, star_mappings = stars_process(
red
)
mappings.update(star_mappings)
else:
key = _from_as_name.target or _from_as_name.value
value = ".".join(from_import_parts)+"."+_from_as_name.value
mappings[key] = value
replacement = "from Qt import {key}".format(
key=second_level_module
)
change(
logger=FROM_IMPORTS_LOG,
node=node.parent,
replacement=replacement,
skip_lineno=skip_lineno
)
node.parent.replace(replacement)
binding_aliases["bindings"].add(binding)
for target in node.parent.targets:
binding_aliases["root_aliases"].add(target.value)
if binding not in binding_aliases:
binding_aliases[binding] = set()
binding_aliases[binding] = binding_aliases[binding].union(
set([target.value for target in node.parent.targets])
)
return binding_aliases, mappings
FROM_IMPORT_STR = "FROM_IMPORT"
FROM_IMPORT = _process_import
def import_process(store):
"""
import_process is one of the more complex handlers for the _modules.
:param store: Store is the issues dict defined in "process"
:type store: dict
:return: The filter_function callable.
:rtype: callable
"""
def filter_function(value):
"""
filter_function takes an AtomTrailersNode or a DottedNameNode and will
filter them out if they match something that has changed in psep0101
"""
_raw_module = value.dumps()
# See if that import is in our __supported_bindings__
matched_binding = supported_binding(_raw_module)
if matched_binding:
store[Processes.FROM_IMPORT_STR].add(
(value, matched_binding)
)
return True
return filter_function
def process(red, skip_lineno=False, **kwargs):
"""
process is the main function for the import process.
:param red: Redbaron ast.
:type red: redbaron.redbaron
:param skip_lineno: An optional performance flag. By default, when the
script replaces something, it will tell you which line it is
replacing on. This can be useful for tracking the places that
changes occurred. When you turn this flag on however, it will not
show the line numbers. This can give great performance increases
because redbaron has trouble calculating the line number sometimes.
:type skip_lineno: bool
:param kwargs: Any other kwargs will be ignored.
:type kwargs: dict
"""
issues = {
Processes.FROM_IMPORT_STR: set(),
}
red.find_all("FromImportNode", value=import_process(issues))
key = Processes.FROM_IMPORT_STR
if issues[key]:
return getattr(Processes, key)(red, issues[key], skip_lineno=skip_lineno)
else:
return ALIAS_DICT, {}
| 37.280899 | 81 | 0.639994 |
7947d0c383359141ce9cb03d6cd951c21f2fa75f | 10,025 | py | Python | src/transformers/tokenization_t5_fast.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | null | null | null | src/transformers/tokenization_t5_fast.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | null | null | null | src/transformers/tokenization_t5_fast.py | Liang813/transformers | 08f534d2da47875a4b7eb1c125cfa7f0f3b79642 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model T5."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from .file_utils import add_start_docstrings, is_sentencepiece_available
from .tokenization_utils import BatchEncoding
from .tokenization_utils_base import PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from .tokenization_utils_fast import PreTrainedTokenizerFast
from .utils import logging
if is_sentencepiece_available():
from .tokenization_t5 import T5Tokenizer
else:
T5Tokenizer = None
logger = logging.get_logger(__name__)
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to file names for serializing Tokenizer instances
####################################################
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
####################################################
# Mapping from the keyword arguments names of Tokenizer `__init__`
# to pretrained vocabulary URL for all the model shortcut names.
####################################################
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model",
"t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model",
"t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model",
"t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model",
"t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-spiece.model",
},
"tokenizer_file": {
"t5-small": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json",
"t5-base": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json",
"t5-large": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json",
"t5-3b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json",
"t5-11b": "https://s3.amazonaws.com/models.huggingface.co/bert/t5-tokenizer.json",
},
}
####################################################
# Mapping from model shortcut names to max length of inputs
####################################################
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class T5TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" T5 tokenizer (backed by HuggingFace's `tokenizers` library). Based on `SentencePiece
<https://github.com/google/sentencepiece>`__ .
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (:obj:`int`, `optional`, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginnning ("<extra_id_0>" is the last token in the vocabulary
like in T5 preprocessing see `here
<https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["attention_mask"]
slow_tokenizer_class = T5Tokenizer
prefix_tokens: List[int] = []
def __init__(
self,
vocab_file,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
**kwargs
):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = ["<extra_id_{}>".format(i) for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id_" in x), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: ``X </s>``
- pair of sequences: ``A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
token_ids_0 = token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0
else:
token_ids_1 = token_ids_1 + [self.eos_token_id]
return self.prefix_tokens + token_ids_0 + token_ids_1
@add_start_docstrings(PREPARE_SEQ2SEQ_BATCH_DOCSTRING)
def prepare_seq2seq_batch(
self,
src_texts: List[str],
tgt_texts: Optional[List[str]] = None,
max_length: Optional[int] = None,
max_target_length: Optional[int] = None,
padding: str = "longest",
return_tensors: str = None,
truncation: bool = True,
**kwargs,
) -> BatchEncoding:
if max_length is None:
max_length = self.max_len
self.prefix_tokens = []
model_inputs = self(
src_texts,
add_special_tokens=True,
return_tensors=return_tensors,
max_length=max_length,
padding=padding,
truncation=truncation,
**kwargs,
)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
max_target_length = max_length
# set prefix_tokens for target text
self.prefix_tokens = [self.pad_token_id]
labels_and_decoder_mask = self(
tgt_texts,
add_special_tokens=True,
return_tensors=return_tensors,
padding=padding,
max_length=max_target_length,
truncation=truncation,
**kwargs,
)
model_inputs["labels"] = labels_and_decoder_mask["input_ids"]
self.prefix_tokens = []
return model_inputs
| 42.299578 | 164 | 0.639302 |
7947d0e06d1b2056e68ba1afac0bed9598f27f4e | 194 | py | Python | manage.py | atten/mongo-log-watcher | 7269356acc30c44ae6ed857d491758ef4865f8a4 | [
"MIT"
] | null | null | null | manage.py | atten/mongo-log-watcher | 7269356acc30c44ae6ed857d491758ef4865f8a4 | [
"MIT"
] | null | null | null | manage.py | atten/mongo-log-watcher | 7269356acc30c44ae6ed857d491758ef4865f8a4 | [
"MIT"
] | null | null | null | from app import app, init_app, manager
@manager.command
def runserver():
app.run(host='localhost', port=8211)
if __name__ == '__main__':
init_app('local_settings')
manager.run()
| 16.166667 | 40 | 0.690722 |
7947d1981ddc9d96cd3518cb130a97c890c1b721 | 1,113 | py | Python | module3/modules04.py | zubrik13/stepic_python | 72def2a2c2d45d8ff47a94a6ba6bc4936459046d | [
"MIT"
] | null | null | null | module3/modules04.py | zubrik13/stepic_python | 72def2a2c2d45d8ff47a94a6ba6bc4936459046d | [
"MIT"
] | null | null | null | module3/modules04.py | zubrik13/stepic_python | 72def2a2c2d45d8ff47a94a6ba6bc4936459046d | [
"MIT"
] | null | null | null | """
Имеется набор файлов, каждый из которых, кроме последнего, содержит имя следующего файла.
Первое слово в тексте последнего файла: "We".
Скачайте предложенный файл. В нём содержится ссылка на первый файл из этого набора.
Все файлы располагаются в каталоге по адресу:
https://stepic.org/media/attachments/course67/3.6.3/
Загрузите содержимое последнего файла из набора, как ответ на это задание.
"""
import requests
with open("dataset05.txt") as file:
for line in file:
url = line.strip()
link = "https://stepic.org/media/attachments/course67/3.6.3/"
r = requests.get(url)
filename = r.text.split("/")[-1]
# print(filename)
counter = 0
while filename:
# print(filename)
r = requests.get(link+filename)
if r.text.startswith('We'):
filename = None
else:
filename = r.text
counter += 1
print(counter)
with open("out04.txt", "w") as out:
out.write(r.text)
# beauty
# import requests
# url, name = 'https://stepic.org/media/attachments/course67/3.6.3/', '699991.txt'
# while name[:2] != 'We':
# name = requests.get(url + name).text
# print(name)
| 24.733333 | 89 | 0.683738 |
7947d19c532dad134debf70d773a709f348ef1bd | 1,648 | py | Python | exercise_code/data/base_dataset.py | Rylie-W/I2DL_21WS | d0c6517695b71a491f7f88ed1031366de209c4a0 | [
"Apache-2.0"
] | null | null | null | exercise_code/data/base_dataset.py | Rylie-W/I2DL_21WS | d0c6517695b71a491f7f88ed1031366de209c4a0 | [
"Apache-2.0"
] | null | null | null | exercise_code/data/base_dataset.py | Rylie-W/I2DL_21WS | d0c6517695b71a491f7f88ed1031366de209c4a0 | [
"Apache-2.0"
] | null | null | null | """Dataset Base Class"""
from abc import ABC, abstractmethod
from .download_utils import download_dataset
class Dataset(ABC):
"""
Abstract Dataset Base Class
All subclasses must define __getitem__() and __len__()
"""
def __init__(self, root, download_url=None, force_download=False, verbose=False):
self.root_path = root
# The actual archive name should be all the text of the url after the
# last '/'.
if download_url is not None:
dataset_zip_name = download_url[download_url.rfind('/')+1:]
self.dataset_zip_name = dataset_zip_name
download_dataset(
url=download_url,
data_dir=root,
dataset_zip_name=dataset_zip_name,
force_download=force_download,
verbose=verbose,
)
@abstractmethod
def __getitem__(self, index):
"""Return data sample at given index"""
@abstractmethod
def __len__(self):
"""Return size of the dataset"""
class DummyDataset(Dataset):
"""
Simple dummy dataset
Contains all integers from 1 to a given limit, which are dividable by a given divisor
"""
def __init__(self, divisor, limit, **kwargs):
"""
:param divisor: common divisor of all integers in the dataset
:param limit: upper limit of integers in the dataset
"""
super().__init__(**kwargs)
self.data = [i for i in range(1, limit + 1) if i % divisor == 0]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return {"data": self.data[index]}
| 29.428571 | 89 | 0.617112 |
7947d19ce0ccb5dc7be7a4370b51e1c11ff969ed | 1,269 | py | Python | addic7ed/logger.py | spfeifer222/addic7ed | f606d72d88eb131a4252dd863fbee5c36ce059b7 | [
"MIT"
] | 13 | 2015-12-22T14:23:23.000Z | 2018-11-18T21:01:29.000Z | addic7ed/logger.py | spfeifer222/addic7ed | f606d72d88eb131a4252dd863fbee5c36ce059b7 | [
"MIT"
] | 5 | 2016-01-23T06:34:27.000Z | 2017-03-20T09:48:13.000Z | addic7ed/logger.py | spfeifer222/addic7ed | f606d72d88eb131a4252dd863fbee5c36ce059b7 | [
"MIT"
] | 4 | 2016-02-15T14:02:46.000Z | 2017-03-17T08:28:13.000Z | from os import makedirs
from os.path import expanduser, exists
from logging import getLogger, Formatter, StreamHandler, DEBUG, WARN
from logging.handlers import RotatingFileHandler
from termcolor import colored
LOG_COLORS = {
"DEBUG": "grey",
"INFO": "cyan",
"WARNING": "yellow",
"ERROR": "magenta",
"CRITICAL": "red"
}
def init_logger():
logger = getLogger("addic7ed")
logger.setLevel(DEBUG)
directory = "%s/.config/addic7ed/" % expanduser("~")
if not exists(directory):
makedirs(directory)
fh = RotatingFileHandler("%s%s" % (directory, "addic7ed.log"))
fh.setLevel(DEBUG)
sh = StreamHandler()
sh.setLevel(WARN)
fcolor = "%s - %s" % (colored("%(asctime)s", "green"),
"%(levelname)7s - %(name)s - %(message)s")
formatter_color = ColoredFormatter(fcolor)
formatter = Formatter(("%(asctime)s - %(levelname)7s - "
"%(name)s - %(message)s"))
fh.setFormatter(formatter)
sh.setFormatter(formatter_color)
logger.addHandler(fh)
logger.addHandler(sh)
class ColoredFormatter(Formatter):
def format(self, record):
record.msg = colored(record.msg, LOG_COLORS[record.levelname])
return super().format(record)
| 26.4375 | 70 | 0.634358 |
7947d1f977b1d1e3f29ceb0a5dae7b9d6701b2ec | 1,107 | py | Python | test/publish_async_stddev.py | Kettenhoax/quickplot | e6624dbcefef5382b2727c93286699193ae60b1c | [
"Apache-2.0"
] | null | null | null | test/publish_async_stddev.py | Kettenhoax/quickplot | e6624dbcefef5382b2727c93286699193ae60b1c | [
"Apache-2.0"
] | null | null | null | test/publish_async_stddev.py | Kettenhoax/quickplot | e6624dbcefef5382b2727c93286699193ae60b1c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import sys
import rclpy
import math
import random
from rclpy.node import Node
from rclpy.time import CONVERSION_CONSTANT, Duration
from geometry_msgs.msg import Vector3Stamped
class PublishAsyncStddev(Node):
def __init__(self):
super().__init__('publish_async_stddev')
self._pub_value = self.create_publisher(Vector3Stamped, 'value', 1)
self._pub_stddev = self.create_publisher(Vector3Stamped, 'stddev', 1)
self._timer = self.create_timer(0.1, self._on_timer)
def _on_timer(self):
msg = Vector3Stamped()
t = self.get_clock().now()
t += Duration(nanoseconds=random.randint(0, CONVERSION_CONSTANT / 1e3))
msg.header.stamp = t.to_msg()
msg.vector.x = math.sin(t.nanoseconds / CONVERSION_CONSTANT)
self._pub_value.publish(msg)
msg.vector.x = 1.0
if bool(random.getrandbits(3)):
print('publishing')
self._pub_stddev.publish(msg)
def main(args=sys.argv):
rclpy.init(args=args)
rclpy.spin(PublishAsyncStddev())
if __name__ == '__main__':
main()
| 27 | 79 | 0.676603 |
7947d2e788b5bc6f296b37452dd2b5276ff23200 | 1,699 | py | Python | tests/test_list_notation.py | lbolanos/aws-sfn-builder | 6323963786388990ba2ffc1349a9b488bee338a7 | [
"MIT"
] | 4 | 2018-10-14T23:15:57.000Z | 2020-11-03T04:05:14.000Z | tests/test_list_notation.py | lbolanos/aws-sfn-builder | 6323963786388990ba2ffc1349a9b488bee338a7 | [
"MIT"
] | null | null | null | tests/test_list_notation.py | lbolanos/aws-sfn-builder | 6323963786388990ba2ffc1349a9b488bee338a7 | [
"MIT"
] | 2 | 2020-11-03T04:06:26.000Z | 2021-05-12T00:37:53.000Z | from aws_sfn_builder import Machine, Parallel, State, States
def test_empty_machine():
m = Machine.parse([])
assert m.start_at is None
assert not m.states
assert m.dry_run() == []
def test_simple_sequence():
s = Machine.parse(["a", "b"])
assert len(s.states) == 2
assert s.start_at == "a"
assert s.dry_run() == ["a", "b"]
def test_simple_parallel():
source = [["a"], ["b"]]
s = Machine.parse(source)
assert len(s.states) == 1
assert isinstance(s.states[s.start_at], Parallel)
assert s.dry_run() == source
c = s.compile()
assert c["States"][c["StartAt"]]["Type"] == "Parallel"
def test_parallel_inside_sequence():
source = [
"a",
[
["b11", "b12"],
["b21", "b22"],
],
"c",
]
s = Machine.parse(source)
assert len(s.states) == 3
assert s.start_at == "a"
assert s.dry_run() == source
c = s.compile()
assert c["States"][c["States"]["a"]["Next"]]["Type"] == "Parallel"
def test_parallel_inside_parallel():
source = [
[
"a",
],
[
[
[
"b11",
],
[
"b21",
],
],
"b3",
]
]
s = Machine.parse(source)
assert s.dry_run() == source
c = s.compile()
assert c["States"][c["StartAt"]]["Type"] == "Parallel"
def test_dictionary_with_no_type_defaults_to_task():
state = State.parse({
"InputPath": "$.first_input",
"ResultPath": "$.first_output",
"Resource": "MultiplierByTwo",
})
assert state.type == States.Task
| 21.506329 | 70 | 0.496174 |
7947d2fb901ee78b07c7746fba2b505013cc6e13 | 5,533 | py | Python | LogicPy/shift_registers.py | Sunillad08/Digital_logic | 18fb08b5223f57ec89ca24d8ed62a7891e657c1c | [
"MIT"
] | 6 | 2021-05-04T11:35:46.000Z | 2022-03-11T18:41:33.000Z | LogicPy/shift_registers.py | Sunillad08/Digital_logic | 18fb08b5223f57ec89ca24d8ed62a7891e657c1c | [
"MIT"
] | 9 | 2021-05-05T15:52:44.000Z | 2021-06-13T14:53:14.000Z | LogicPy/shift_registers.py | Sunillad08/Digital_logic | 18fb08b5223f57ec89ca24d8ed62a7891e657c1c | [
"MIT"
] | 1 | 2021-05-04T18:10:37.000Z | 2021-05-04T18:10:37.000Z | '''
shift registers
type:class\n
name-format: shift_register_[name]\n
SIPO\n
PISO\n
SISO\n
PIPO
'''
'''SIPO'''
class shift_register_SIPO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for i in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for val in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''PISO'''
class shift_register_PISO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for _ in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for _ in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''SISO'''
class shift_register_SISO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
elif (clock >= self.level):
clock = self.level - 1
else:
raise ValueError("The Nth clock pulse should be an integer")
input_cp = self.inputno.copy()
og_list = []
for i in range(clock + 1):
#start from the least significant bit
og_list.insert(0,input_cp[-1])
input_cp.pop()
if(len(og_list) < self.level):
for val in range(self.level - len(og_list)):
og_list.append(0)
return(og_list)
'''PIPO'''
class shift_register_PIPO():
def __init__(self,level,inputno = None):
self.level = level
self.inputno = inputno
def sr_set(self,inputno):
#list input
if (isinstance(inputno, list)):
if(len(inputno) == self.level):
for bin_in in inputno:
if bin_in not in [0,1]:
raise ValueError("Invalid value for input")
else:
raise ValueError("Number of input bits is not equal to the number of flip flops")
else:
raise ValueError("Input must be in a list format")
self.inputno = inputno
def sr_get(self,clock):
if(isinstance(clock,int)):
if (clock < 0):
raise ValueError("Clock pulses are not negative")
else:
return(self.inputno.copy())
else:
raise ValueError("The Nth clock pulse should be an integer")
| 29.430851 | 97 | 0.50009 |
7947d507a8683e1a58ed9dea8fd0fdab2cff341d | 2,855 | py | Python | test/unit/test_utils_process.py | persanix-llc/endrpi-server | 0bc69bfde977e558e7097175d1207be4da388065 | [
"Apache-2.0"
] | 2 | 2021-04-30T18:12:14.000Z | 2021-10-30T02:53:48.000Z | test/unit/test_utils_process.py | persanix-llc/endrpi-server | 0bc69bfde977e558e7097175d1207be4da388065 | [
"Apache-2.0"
] | 1 | 2021-08-29T16:18:15.000Z | 2021-08-29T16:18:15.000Z | test/unit/test_utils_process.py | persanix-llc/endrpi-server | 0bc69bfde977e558e7097175d1207be4da388065 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 - 2021 Persanix LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import unittest
from unittest import TestCase
from unittest.mock import patch
from endrpi.utils.process import process_output
class TestProcessUtils(TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
@patch('endrpi.utils.process.subprocess.Popen')
def test_process_output(self, mocked_popen_constructor):
# Instantiate a mocked popen object
mocked_popen = mocked_popen_constructor.return_value
# Ensure errors in stderr propagate
mocked_popen.communicate.return_value = (b'Value', b'Error')
output = process_output(['example', 'command'])
self.assertIsNone(output)
mocked_popen.communicate.return_value = (b'', b'E')
output = process_output(['example', 'command'])
self.assertIsNone(output)
# Ensure errors caught while running the command propagate
mocked_popen.communicate.side_effect = OSError('An error occurred')
output = process_output(['example', 'command'])
self.assertIsNone(output)
mocked_popen.communicate.side_effect = None
mocked_popen.communicate.side_effect = ValueError('An error occurred')
output = process_output(['example', 'command'])
self.assertIsNone(output)
mocked_popen.communicate.side_effect = None
mocked_popen.communicate.side_effect = OSError('An error occurred')
output = process_output(['example', 'command'])
self.assertIsNone(output)
mocked_popen.communicate.side_effect = None
# Ensure valid inputs return their expected results
mocked_popen.communicate.return_value = (b'Value', None)
output = process_output(['example', 'command'])
self.assertIsNotNone(output)
self.assertEqual(output, 'Value')
mocked_popen.communicate.return_value = (b'', None)
output = process_output(['example', 'command'])
self.assertIsNotNone(output)
self.assertEqual(output, '')
mocked_popen.communicate.return_value = (b'', b'')
output = process_output(['example', 'command'])
self.assertIsNotNone(output)
self.assertEqual(output, '')
if __name__ == '__main__':
unittest.main()
| 36.602564 | 78 | 0.695972 |
7947d6bdd3ab72cc5c532d7daf87d3634956dc50 | 546 | py | Python | build/usb_cam/catkin_generated/pkg.installspace.context.pc.py | madalynlmillen/MadalynMillenCapstone | a1585ba419d4ab4854908b4ba88e4c8ca330b5cd | [
"MIT",
"Unlicense"
] | null | null | null | build/usb_cam/catkin_generated/pkg.installspace.context.pc.py | madalynlmillen/MadalynMillenCapstone | a1585ba419d4ab4854908b4ba88e4c8ca330b5cd | [
"MIT",
"Unlicense"
] | null | null | null | build/usb_cam/catkin_generated/pkg.installspace.context.pc.py | madalynlmillen/MadalynMillenCapstone | a1585ba419d4ab4854908b4ba88e4c8ca330b5cd | [
"MIT",
"Unlicense"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/kinova/MillenCapstone/MadalynMillenCapstone/install/include".split(';') if "/home/kinova/MillenCapstone/MadalynMillenCapstone/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lusb_cam".split(';') if "-lusb_cam" != "" else []
PROJECT_NAME = "usb_cam"
PROJECT_SPACE_DIR = "/home/kinova/MillenCapstone/MadalynMillenCapstone/install"
PROJECT_VERSION = "0.3.6"
| 60.666667 | 197 | 0.771062 |
7947d6f31a003375bb2110cc75c25710b9b5ee80 | 5,088 | py | Python | capreolus/reranker/ptparade.py | nimasadri11/capreolus | 27b081ec1a37d2af6afa6b61eb1cb7cc4ec9db1c | [
"Apache-2.0"
] | 77 | 2019-12-01T20:48:14.000Z | 2022-03-11T14:45:54.000Z | capreolus/reranker/ptparade.py | nimasadri11/capreolus | 27b081ec1a37d2af6afa6b61eb1cb7cc4ec9db1c | [
"Apache-2.0"
] | 106 | 2020-01-30T18:01:16.000Z | 2022-02-11T18:05:16.000Z | capreolus/reranker/ptparade.py | nimasadri11/capreolus | 27b081ec1a37d2af6afa6b61eb1cb7cc4ec9db1c | [
"Apache-2.0"
] | 30 | 2020-01-31T08:50:40.000Z | 2022-01-25T05:51:11.000Z | import torch
from torch import nn
from transformers import BertModel, ElectraModel
from transformers.models.bert.modeling_bert import BertLayer
from capreolus import ConfigOption, Dependency
from capreolus.reranker import Reranker
class PTParade_Class(nn.Module):
def __init__(self, extractor, config, *args, **kwargs):
super().__init__(*args, **kwargs)
self.extractor = extractor
self.config = config
if config["pretrained"] == "electra-base-msmarco":
self.bert = ElectraModel.from_pretrained("Capreolus/electra-base-msmarco")
elif config["pretrained"] == "bert-base-msmarco":
self.bert = BertModel.from_pretrained("Capreolus/bert-base-msmarco")
elif config["pretrained"] == "bert-base-uncased":
self.bert = BertModel.from_pretrained("bert-base-uncased")
else:
raise ValueError(
f"unsupported model: {config['pretrained']}; need to ensure correct tokenizers will be used before arbitrary hgf models are supported"
)
self.transformer_layer_1 = BertLayer(self.bert.config)
self.transformer_layer_2 = BertLayer(self.bert.config)
self.num_passages = extractor.config["numpassages"]
self.maxseqlen = extractor.config["maxseqlen"]
self.linear = nn.Linear(self.bert.config.hidden_size, 1)
if config["aggregation"] == "max":
raise NotImplementedError()
elif config["aggregation"] == "avg":
raise NotImplementedError()
elif config["aggregation"] == "attn":
raise NotImplementedError()
elif config["aggregation"] == "transformer":
self.aggregation = self.aggregate_using_transformer
input_embeddings = self.bert.get_input_embeddings()
# TODO hardcoded CLS token id
cls_token_id = torch.tensor([[101]])
self.initial_cls_embedding = input_embeddings(cls_token_id).view(1, self.bert.config.hidden_size)
self.full_position_embeddings = torch.zeros(
(1, self.num_passages + 1, self.bert.config.hidden_size), requires_grad=True, dtype=torch.float
)
torch.nn.init.normal_(self.full_position_embeddings, mean=0.0, std=0.02)
self.initial_cls_embedding = nn.Parameter(self.initial_cls_embedding, requires_grad=True)
self.full_position_embeddings = nn.Parameter(self.full_position_embeddings, requires_grad=True)
else:
raise ValueError(f"unknown aggregation type: {self.config['aggregation']}")
def aggregate_using_transformer(self, cls):
expanded_cls = cls.view(-1, self.num_passages, self.bert.config.hidden_size)
# TODO make sure batch size here is correct
batch_size = expanded_cls.shape[0]
tiled_initial_cls = self.initial_cls_embedding.repeat(batch_size, 1)
merged_cls = torch.cat((tiled_initial_cls.view(batch_size, 1, self.bert.config.hidden_size), expanded_cls), dim=1)
merged_cls = merged_cls + self.full_position_embeddings
(transformer_out_1,) = self.transformer_layer_1(merged_cls, None, None, None)
(transformer_out_2,) = self.transformer_layer_2(transformer_out_1, None, None, None)
aggregated = transformer_out_2[:, 0, :]
return aggregated
def forward(self, doc_input, doc_mask, doc_seg):
batch_size = doc_input.shape[0]
doc_input = doc_input.view((batch_size * self.num_passages, self.maxseqlen))
doc_mask = doc_mask.view((batch_size * self.num_passages, self.maxseqlen))
doc_seg = doc_seg.view((batch_size * self.num_passages, self.maxseqlen))
cls = self.bert(doc_input, attention_mask=doc_mask, token_type_ids=doc_seg)[0][:, 0, :]
aggregated = self.aggregation(cls)
return self.linear(aggregated)
@Reranker.register
class PTParade(Reranker):
"""
PyTorch implementation of PARADE.
PARADE: Passage Representation Aggregation for Document Reranking.
Canjia Li, Andrew Yates, Sean MacAvaney, Ben He, and Yingfei Sun. arXiv 2020.
https://arxiv.org/pdf/2008.09093.pdf
"""
module_name = "ptparade"
dependencies = [
Dependency(key="extractor", module="extractor", name="pooledbertpassage"),
Dependency(key="trainer", module="trainer", name="pytorch"),
]
config_spec = [
ConfigOption(
"pretrained", "bert-base-uncased", "Pretrained model: bert-base-uncased, bert-base-msmarco, or electra-base-msmarco"
),
ConfigOption("aggregation", "transformer"),
]
def build_model(self):
if not hasattr(self, "model"):
self.model = PTParade_Class(self.extractor, self.config)
return self.model
def score(self, d):
return [
self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1),
self.model(d["neg_bert_input"], d["neg_mask"], d["neg_seg"]).view(-1),
]
def test(self, d):
return self.model(d["pos_bert_input"], d["pos_mask"], d["pos_seg"]).view(-1)
| 43.487179 | 150 | 0.666863 |
7947d7e87955580e6d48bd07c5feb64785c360ee | 565 | py | Python | test_example.py | IvankaK/Testing | 2ce0ee645b2172b37f3c8c8ce4461b60e2180321 | [
"Apache-2.0"
] | null | null | null | test_example.py | IvankaK/Testing | 2ce0ee645b2172b37f3c8c8ce4461b60e2180321 | [
"Apache-2.0"
] | null | null | null | test_example.py | IvankaK/Testing | 2ce0ee645b2172b37f3c8c8ce4461b60e2180321 | [
"Apache-2.0"
] | 1 | 2021-12-08T17:10:05.000Z | 2021-12-08T17:10:05.000Z | import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@pytest.fixture
def driver(request):
wd = webdriver.Chrome()
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_example(driver):
driver.get("http://www.google.com/")
driver.find_element_by_name("q").send_keys("webdriver")
driver.find_element_by_name("btnG").click()
WebDriverWait(driver, 10).until(EC.title_is("webdriver - Поиск в Google")) | 29.736842 | 78 | 0.757522 |
7947d92b271bd5c2c437556c4ad924618395b6c2 | 2,765 | py | Python | rlscope/parser/overlap_result.py | UofT-EcoSystem/rlscope | cdd9bbdc2a3a832be24f20105b8c9fe28149cb63 | [
"Apache-2.0"
] | 35 | 2021-01-26T22:34:17.000Z | 2022-03-02T01:25:11.000Z | rlscope/parser/overlap_result.py | UofT-EcoSystem/rlscope | cdd9bbdc2a3a832be24f20105b8c9fe28149cb63 | [
"Apache-2.0"
] | 1 | 2022-03-15T01:40:03.000Z | 2022-03-15T01:40:03.000Z | rlscope/parser/overlap_result.py | UofT-EcoSystem/rlscope | cdd9bbdc2a3a832be24f20105b8c9fe28149cb63 | [
"Apache-2.0"
] | 1 | 2021-03-17T08:49:07.000Z | 2021-03-17T08:49:07.000Z | """
Reading overlap results from ``rls-analyze``.
"""
from rlscope.parser.common import *
class CategoryKey:
def __init__(self):
self.procs = frozenset()
self.ops = frozenset()
self.non_ops = frozenset()
@staticmethod
def from_js(obj):
self = CategoryKey()
assert obj['typename'] == 'CategoryKey'
self.procs = frozenset(obj['procs'])
self.ops = frozenset(obj['ops'])
self.non_ops = frozenset(obj['non_ops'])
return self
def __eq__(self, rhs):
lhs = self
return lhs.procs == rhs.procs and \
lhs.ops == rhs.ops and \
lhs.non_ops == rhs.non_ops
def __hash__(self):
return hash((self.procs, self.ops, self.non_ops))
def __str__(self):
bldr = ToStringBuilder(obj=self)
bldr.add_param('procs', self.procs)
bldr.add_param('ops', self.ops)
bldr.add_param('non_ops', self.non_ops)
return bldr.to_string()
def __repr__(self):
return str(self)
# class OverlapResult:
# def __init__(self):
# self.procs = frozenset()
# self.ops = frozenset()
# self.non_ops = frozenset()
#
# @staticmethod
# def from_js(obj):
# self = OverlapResult()
# self.overlap_map = dict()
# assert obj['typename'] == 'CategoryKey'
# self.procs = frozenset(obj['procs'])
# self.ops = frozenset(obj['ops'])
# self.non_ops = frozenset(obj['non_ops'])
# return self
def from_js(obj, mutable=True):
if type(obj) == dict and 'typename' in obj:
if obj['typename'] == 'dict':
return dict_from_js(obj, mutable=mutable)
elif obj['typename'] in JS_TYPENAME_TO_KLASS:
Klass = JS_TYPENAME_TO_KLASS[obj['typename']]
parsed = Klass.from_js(obj)
return parsed
else:
raise NotImplementedError("Not sure how to parse js object with typename={typename}".format(typename=obj['typename']))
elif type(obj) == list:
if mutable:
return [from_js(x, mutable=mutable) for x in obj]
else:
return tuple(from_js(x, mutable=mutable) for x in obj)
else:
return obj
def dict_from_js(obj, mutable=True):
assert obj['typename'] == 'dict'
d = dict()
for key, value in obj['key_value_pairs']:
parsed_key = from_js(key, mutable=False)
# if type(parsed_key) == list:
# parsed_key = tuple(parsed_key)
# elif type(parsed_key) == set:
# parsed_key = frozenset(parsed_key)
d[parsed_key] = from_js(value, mutable=mutable)
return d
JS_TYPENAME_TO_KLASS = {
'CategoryKey': CategoryKey,
# 'OverlapResult': OverlapResult,
}
| 30.722222 | 130 | 0.588065 |
7947daffed4d4782a8432d2be0e58e95c3f9e42f | 17,407 | py | Python | sfepy/discrete/common/dof_info.py | antonykamp/sfepy | 8213d3c8cc2825602b41dc65eb543b575856ca8c | [
"BSD-3-Clause"
] | null | null | null | sfepy/discrete/common/dof_info.py | antonykamp/sfepy | 8213d3c8cc2825602b41dc65eb543b575856ca8c | [
"BSD-3-Clause"
] | null | null | null | sfepy/discrete/common/dof_info.py | antonykamp/sfepy | 8213d3c8cc2825602b41dc65eb543b575856ca8c | [
"BSD-3-Clause"
] | null | null | null | """
Classes holding information on global DOFs and mapping of all DOFs -
equations (active DOFs).
Helper functions for the equation mapping.
"""
import numpy as nm
import scipy.sparse as sp
from sfepy.base.base import assert_, Struct, basestr
from sfepy.discrete.functions import Function
from sfepy.discrete.conditions import get_condition_value, EssentialBC, \
PeriodicBC, DGPeriodicBC, DGEssentialBC
def expand_nodes_to_dofs(nods, n_dof_per_node):
"""
Expand DOF node indices into DOFs given a constant number of DOFs
per node.
"""
dofs = nm.repeat(nods, n_dof_per_node)
dofs.shape = (nods.shape[0], n_dof_per_node)
idof = nm.arange(n_dof_per_node, dtype=nm.int32)
dofs = n_dof_per_node * dofs + idof
return dofs
def expand_nodes_to_equations(nods, dof_names, all_dof_names):
"""
Expand vector of node indices to equations (DOF indices) based on
the DOF-per-node count.
DOF names must be already canonized.
Returns
-------
eq : array
The equations/DOF indices in the node-by-node order.
"""
dpn = len(all_dof_names)
nc = len(dof_names)
eq = nm.empty(len(nods) * nc, dtype=nm.int32)
for ii, dof in enumerate(dof_names):
idof = all_dof_names.index(dof)
eq[ii::nc] = dpn * nods + idof
return eq
def resolve_chains(master_slave, chains):
"""
Resolve EPBC chains - e.g. in corner nodes.
"""
for chain in chains:
slave = chain[-1]
master_slave[chain[:-1]] = slave + 1
master_slave[slave] = - chain[0] - 1 # Any of masters...
def group_chains(chain_list):
"""
Group EPBC chains.
"""
chains = []
while len(chain_list):
chain = set(chain_list.pop(0))
## print ':', chain
ii = 0
while ii < len(chain_list):
c1 = sorted(chain_list[ii])
## print '--', ii, c1, chain
is0 = c1[0] in chain
is1 = c1[1] in chain
if is0 and is1:
chain_list.pop(ii)
elif is0 or is1:
chain.update(c1)
chain_list.pop(ii)
ii = 0
else:
ii += 1
## print ii, chain, chain_list
## print '->', chain
## print chain_list
chains.append(list(chain))
## print 'EPBC chain groups:', chains
aux = {}
for chain in chains:
aux.setdefault(len(chain), [0])[0] += 1
## print 'EPBC chain counts:', aux
return chains
class DofInfo(Struct):
"""
Global DOF information, i.e. ordering of DOFs of the state (unknown)
variables in the global state vector.
"""
def __init__(self, name):
Struct.__init__(self, name=name)
self.n_var = 0
self.var_names = []
self.n_dof = {}
self.ptr = [0]
self.indx = {}
self.details = {}
def _update_after_append(self, name):
self.ptr.append(self.ptr[-1] + self.n_dof[name])
ii = self.n_var
self.indx[name] = slice(int(self.ptr[ii]), int(self.ptr[ii+1]))
self.n_var += 1
def append_variable(self, var, active=False):
"""
Append DOFs of the given variable.
Parameters
----------
var : Variable instance
The variable to append.
active : bool, optional
When True, only active (non-constrained) DOFs are considered.
"""
name = var.name
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = var.get_dof_info(active=active)
self._update_after_append(name)
def append_raw(self, name, n_dof):
"""
Append raw DOFs.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if name in self.var_names:
raise ValueError('variable %s already present!' % name)
self.var_names.append(name)
self.n_dof[name], self.details[name] = n_dof, None
self._update_after_append(name)
def update(self, name, n_dof):
"""
Set the number of DOFs of the given variable.
Parameters
----------
name : str
The name of variable the DOFs correspond to.
n_dof : int
The number of DOFs.
"""
if not name in self.var_names:
raise ValueError('variable %s is not present!' % name)
ii = self.var_names.index(name)
delta = n_dof - self.n_dof[name]
self.n_dof[name] = n_dof
for iv, nn in enumerate(self.var_names[ii:]):
self.ptr[ii+iv+1] += delta
self.indx[nn] = slice(self.ptr[ii+iv], self.ptr[ii+iv+1])
def get_info(self, var_name):
"""
Return information on DOFs of the given variable.
Parameters
----------
var_name : str
The name of the variable.
"""
return Struct(name='%s_dof_info' % var_name,
var_name=var_name,
n_dof=self.n_dof[var_name],
indx=self.indx[var_name],
details=self.details[var_name])
def get_subset_info(self, var_names):
"""
Return global DOF information for selected variables
only. Silently ignores non-existing variable names.
Parameters
----------
var_names : list
The names of the selected variables.
"""
di = DofInfo(self.name + ':subset')
for var_name in var_names:
if var_name not in self.var_names:
continue
di.append_raw(var_name, self.n_dof[var_name])
return di
def get_n_dof_total(self):
"""
Return the total number of DOFs of all state variables.
"""
return self.ptr[-1]
def is_active_bc(bc, ts=None, functions=None):
"""
Check whether the given boundary condition is active in the current
time.
Returns
-------
active : bool
True if the condition `bc` is active.
"""
if (bc.times is None) or (ts is None):
active = True
elif isinstance(bc.times, list):
for tt in bc.times:
if tt[0] <= ts.time < tt[1]:
active = True
break
else:
active = False
else:
if isinstance(bc.times, basestr):
if functions is not None:
fun = functions[bc.times]
else:
raise ValueError('no functions given for bc %s!' % bc.name)
elif isinstance(bc.times, Function):
fun = bc.times
else:
raise ValueError('unknown times type! (%s)'
% type(bc.times))
active = fun(ts)
return active
class EquationMap(Struct):
"""
Map all DOFs to equations for active DOFs.
"""
def __init__(self, name, dof_names, var_di):
Struct.__init__(self, name=name, dof_names=dof_names, var_di=var_di)
self.dpn = len(self.dof_names)
self.eq = nm.arange(var_di.n_dof, dtype=nm.int32)
self.n_dg_ebc = 0
self.dg_ebc_names = {}
self.dg_ebc = {}
self.dg_ebc_val = {}
self.n_dg_epbc = 0
self.dg_epbc_names = []
self.dg_epbc = []
def _init_empty(self, field):
self.val_ebc = nm.empty((0,), dtype=field.dtype)
if field.get('unused_dofs') is None:
self.eqi = nm.arange(self.var_di.n_dof, dtype=nm.int32)
else:
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq_ebc = nm.empty((0,), dtype=nm.int32)
self.master = nm.empty((0,), dtype=nm.int32)
self.slave = nm.empty((0,), dtype=nm.int32)
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
def _mark_unused(self, field):
unused_dofs = field.get('unused_dofs')
if unused_dofs is not None:
unused = expand_nodes_to_equations(field.unused_dofs,
self.dof_names, self.dof_names)
self.eq[unused] = -3
def map_equations(self, bcs, field, ts, functions, problem=None,
warn=False):
"""
Create the mapping of active DOFs from/to all DOFs.
Parameters
----------
bcs : Conditions instance
The Dirichlet or periodic boundary conditions (single
condition instances). The dof names in the conditions must
already be canonized.
field : Field instance
The field of the variable holding the DOFs.
ts : TimeStepper instance
The time stepper.
functions : Functions instance
The registered functions.
problem : Problem instance, optional
The problem that can be passed to user functions as a context.
warn : bool, optional
If True, warn about BC on non-existent nodes.
Returns
-------
active_bcs : set
The set of boundary conditions active in the current time.
Notes
-----
- Periodic bc: master and slave DOFs must belong to the same
field (variables can differ, though).
"""
if bcs is None:
self._init_empty(field)
return set()
eq_ebc = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
val_ebc = nm.zeros((self.var_di.n_dof,), dtype=field.dtype)
master_slave = nm.zeros((self.var_di.n_dof,), dtype=nm.int32)
chains = []
active_bcs = set()
for bc in bcs:
# Skip conditions that are not active in the current time.
if not is_active_bc(bc, ts=ts, functions=functions):
continue
active_bcs.add(bc.key)
if isinstance(bc, DGEssentialBC):
ntype = "DGEBC"
region = bc.region
elif isinstance(bc, DGPeriodicBC):
ntype = "DGEPBC"
region = bc.regions[0]
elif isinstance(bc, EssentialBC):
ntype = 'EBC'
region = bc.region
elif isinstance(bc, PeriodicBC):
ntype = 'EPBC'
region = bc.regions[0]
if warn:
clean_msg = ('warning: ignoring nonexistent %s node (%s) in '
% (ntype, self.var_di.var_name))
else:
clean_msg = None
# Get master region nodes.
master_nod_list = field.get_dofs_in_region(region)
if len(master_nod_list) == 0:
continue
if ntype == 'EBC': # EBC.
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
nods, vv = field.set_dofs(fun, region, len(dofs), clean_msg)
eq = expand_nodes_to_equations(nods, dofs, self.dof_names)
# Duplicates removed here...
eq_ebc[eq] = 1
if vv is not None: val_ebc[eq] = nm.ravel(vv)
elif ntype == "DGEBC":
dofs, val = bc.dofs
##
# Evaluate EBC values.
fun = get_condition_value(val, functions, 'EBC', bc.name)
if isinstance(fun, Function):
aux = fun
fun = lambda coors: aux(ts, coors,
bc=bc, problem=problem)
values = field.get_bc_facet_values(fun, region, diff=bc.diff)
bc2bfi = field.get_bc_facet_idx(region)
self.dg_ebc_val.setdefault(bc.diff, []).append(values)
self.dg_ebc.setdefault(bc.diff, []).append(bc2bfi)
self.n_dg_ebc += 1
elif ntype == "DGEPBC":
# ensure matching boundaries?
master_bc2bfi = field.get_bc_facet_idx(region)
slave_bc2bfi = field.get_bc_facet_idx(bc.regions[1])
self.dg_epbc.append((master_bc2bfi, slave_bc2bfi))
self.n_dg_epbc += 1
else: # EPBC.
region = bc.regions[1]
slave_nod_list = field.get_dofs_in_region(region)
nmaster = nm.unique(master_nod_list)
# Treat fields not covering the whole domain.
if nmaster[0] == -1:
nmaster = nmaster[1:]
nslave = nm.unique(slave_nod_list)
# Treat fields not covering the whole domain.
if nslave[0] == -1:
nslave = nslave[1:]
## print nmaster + 1
## print nslave + 1
if nmaster.shape != nslave.shape:
msg = 'EPBC list lengths do not match!\n(%s,\n %s)' %\
(nmaster, nslave)
raise ValueError(msg)
if (nmaster.shape[0] == 0) and (nslave.shape[0] == 0):
continue
mcoor = field.get_coor(nmaster)
scoor = field.get_coor(nslave)
fun = get_condition_value(bc.match, functions, 'EPBC', bc.name)
if isinstance(fun, Function):
i1, i2 = fun(mcoor, scoor)
else:
i1, i2 = fun
## print nm.c_[mcoor[i1], scoor[i2]]
## print nm.c_[nmaster[i1], nslave[i2]] + 1
meq = expand_nodes_to_equations(nmaster[i1], bc.dofs[0],
self.dof_names)
seq = expand_nodes_to_equations(nslave[i2], bc.dofs[1],
self.dof_names)
m_assigned = nm.where(master_slave[meq] != 0)[0]
s_assigned = nm.where(master_slave[seq] != 0)[0]
if m_assigned.size or s_assigned.size: # Chain EPBC.
aux = master_slave[meq[m_assigned]]
sgn = nm.sign(aux)
om_chain = zip(meq[m_assigned], (aux - sgn) * sgn)
chains.extend(om_chain)
aux = master_slave[seq[s_assigned]]
sgn = nm.sign(aux)
os_chain = zip(seq[s_assigned], (aux - sgn) * sgn)
chains.extend(os_chain)
m_chain = zip(meq[m_assigned], seq[m_assigned])
chains.extend(m_chain)
msd = nm.setdiff1d(s_assigned, m_assigned)
s_chain = zip(meq[msd], seq[msd])
chains.extend(s_chain)
msa = nm.union1d(m_assigned, s_assigned)
ii = nm.setdiff1d(nm.arange(meq.size), msa)
master_slave[meq[ii]] = seq[ii] + 1
master_slave[seq[ii]] = - meq[ii] - 1
else:
master_slave[meq] = seq + 1
master_slave[seq] = - meq - 1
chains = group_chains(chains)
resolve_chains(master_slave, chains)
ii = nm.argwhere(eq_ebc == 1)
self.eq_ebc = nm.atleast_1d(ii.squeeze())
self.val_ebc = nm.atleast_1d(val_ebc[ii].squeeze())
# add axis in case we squeezed too hard
self.master = nm.atleast_1d(nm.argwhere(master_slave > 0).squeeze())
self.slave = master_slave[self.master] - 1
assert_((self.eq_ebc.shape == self.val_ebc.shape))
self.eq[self.eq_ebc] = -2
self.eq[self.master] = -1
self._mark_unused(field)
self.eqi = nm.compress(self.eq >= 0, self.eq)
self.eq[self.eqi] = nm.arange(self.eqi.shape[0], dtype=nm.int32)
self.eq[self.master] = self.eq[self.slave]
self.n_eq = self.eqi.shape[0]
self.n_ebc = self.eq_ebc.shape[0]
self.n_epbc = self.master.shape[0]
return active_bcs
def get_operator(self):
"""
Get the matrix operator :math:`R` corresponding to the equation
mapping, such that the restricted matrix :math:`A_r` can be
obtained from the full matrix :math:`A` by :math:`A_r = R^T A
R`. All the matrices are w.r.t. a single variables that uses
this mapping.
Returns
-------
mtx : coo_matrix
The matrix :math:`R`.
"""
# EBC.
rows = self.eqi
cols = nm.arange(self.n_eq, dtype=nm.int32)
# EPBC.
ic = self.eq[self.slave]
ii = ic >= 0
rows = nm.r_[rows, self.master[ii]]
cols = nm.r_[cols, ic[ii]]
ones = nm.ones(rows.shape[0], dtype=nm.float64)
mtx = sp.coo_matrix((ones, (rows, cols)),
shape=(self.eq.shape[0], self.n_eq))
return mtx
| 31.70674 | 79 | 0.5295 |
7947dbf338ac521eca55c8670d4c75ada14c51be | 2,263 | py | Python | GUIScripts/Hotel Management System/Code/recipt.py | Gaurav1401/Awesome_Python_Scripts | e98044cc42a975e81d880b27546fadcdead17a42 | [
"MIT"
] | 2 | 2021-07-12T10:12:56.000Z | 2021-07-12T10:13:10.000Z | GUIScripts/Hotel Management System/Code/recipt.py | Gaurav1401/Awesome_Python_Scripts | e98044cc42a975e81d880b27546fadcdead17a42 | [
"MIT"
] | null | null | null | GUIScripts/Hotel Management System/Code/recipt.py | Gaurav1401/Awesome_Python_Scripts | e98044cc42a975e81d880b27546fadcdead17a42 | [
"MIT"
] | 2 | 2021-10-03T16:22:08.000Z | 2021-10-03T17:35:14.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# GUI module generated by PAGE version 4.17
# In conjunction with Tcl version 8.6
# Oct 07, 2018 01:57:36 PM IST platform: Windows NT
from __main__ import *
import sys
try:
from Tkinter import *
except ImportError:
from tkinter import *
try:
import ttk
py3 = False
except ImportError:
import tkinter.ttk as ttk
py3 = True
fo1=open("recipt.txt","r")
list1=fo1.readlines()
del list1[1]
del list1[2]
del list1[3]
del list1[4]
del list1[5]
list1[0]=list1[0][:-1]
list1[1]=list1[1][:-1]
list1[2]=list1[2][:-1]
list1[3]=list1[3][:-1]
list1[4]=list1[4][:-1]
p='''
@@@@@@@@@@@ PROJECTWORLDS HOTEL AND RESORTS @@@@@@@@@@@@@
@@@@@@@@@@@@ BHILAI CHHATTISGARH@@@@@@@@@@@@@@
@@@@@@@@@@ SERVING GUEST SINCE @@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@ ###2000### @@@@@@@@@@@@@@@@@
@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
NAME-%s
ADDRESS-%s
MOBILE NO.-%s
YOUR TOTAL BILL IS Rs.-%s
YOUR ROOM NUMBER IS %s
'''%(list1[0],list1[1],list1[2],list1[4],list1[3])
class recipt:
def __init__(self):
root=Tk()
'''This class configures and populates the toplevel window.
top is the toplevel containing window.'''
_bgcolor = '#d9d9d9' # X11 color: 'gray85'
_fgcolor = '#000000' # X11 color: 'black'
_compcolor = '#d9d9d9' # X11 color: 'gray85'
_ana1color = '#d9d9d9' # X11 color: 'gray85'
_ana2color = '#d9d9d9' # X11 color: 'gray85'
root.geometry("800x800")
root.title("recipt")
root.configure(background="#d9d9d9")
self.Label1 = Label(root)
self.Label1.configure(background="#d9d9d9")
self.Label1.place(relx=0, rely=0, height=800, width=800)
self.Label1.configure(disabledforeground="#a3a3a3")
self.Label1.configure(foreground="#000000")
self.Label1.configure(text=p)
self.Label1.configure(anchor=N)
self.Label1.configure(wraplength=1000)
self.Label1.configure(justify =LEFT)
self.Label1.configure(width=582)
root.mainloop()
if __name__ == '__main__':
recipt1=recipt()
| 24.074468 | 68 | 0.551038 |
7947dc0a4e71a3106d21b1d4e16e816652b8e7e7 | 1,321 | py | Python | line_follower_skeleton.py | MartinCoderDojo/bit-bot-line-follower | 72e5b557a70f66c2790d05ae5fd3ccc1634b950f | [
"MIT"
] | null | null | null | line_follower_skeleton.py | MartinCoderDojo/bit-bot-line-follower | 72e5b557a70f66c2790d05ae5fd3ccc1634b950f | [
"MIT"
] | null | null | null | line_follower_skeleton.py | MartinCoderDojo/bit-bot-line-follower | 72e5b557a70f66c2790d05ae5fd3ccc1634b950f | [
"MIT"
] | null | null | null | from microbit import *
leftLine = pin11
rightLine = pin5
# pin 0 = left speed
# pin 8 = left direction
# pin 1 = right speed
# pin 12 = right direction
# Direction = 0 for forward, 1 for backward
def moveRobot(pin0Val, pin8Val, pin1Val, pin12Val):
pin0.write_analog(pin0Val)
pin8.write_digital(pin8Val)
pin1.write_analog(pin1Val)
pin12.write_digital(pin12Val)
sleep(100)
def convert(speed):
if speed > 0:
return (speed * 1023) / 100.0
return 1023 - ((abs(speed) * 1023) / 100.0)
def forward(speed):
moveRobot(convert(speed), 0, convert(speed), 0)
def turnRight(speed):
moveRobot(convert(speed), 0, 0, 0)
def turnLeft(speed):
moveRobot(0, 0, convert(speed), 0)
def stop():
moveRobot(0, 0, 0, 0)
def reverse(speed):
moveRobot(convert(-speed), 1, convert(-speed), 1)
# Left sensor and right sensor both 1: on black line
# Left sensor 1 and right sensor 0: left sensor is on the line
# Left sensor 0 and right sensor 1: right sensor is on the line
# Left sensor and right sensor both 0: lost the line
while True:
lline = leftLine.read_digital()
rline = rightLine.read_digital()
if ((lline == 1) and (rline == 1)):
forward(100)
elif ((lline == 1) and (rline == 0)):
???
elif ???:
???
else:
???
| 20.640625 | 63 | 0.637396 |
7947dc776d4a209ea6dcc970e5d48c72006dc88a | 184 | py | Python | webApi/books_api/book/urls.py | FreeN1ckname/web_api | 50b6ffc03f918e25d36ff11caa1cf5d83628646b | [
"MIT"
] | null | null | null | webApi/books_api/book/urls.py | FreeN1ckname/web_api | 50b6ffc03f918e25d36ff11caa1cf5d83628646b | [
"MIT"
] | null | null | null | webApi/books_api/book/urls.py | FreeN1ckname/web_api | 50b6ffc03f918e25d36ff11caa1cf5d83628646b | [
"MIT"
] | null | null | null | from django.urls import path
from .views import BookView
app_name = "books"
urlpatterns = [
path('books/', BookView.as_view()),
path('books/<int:pk>', BookView.as_view())
]
| 16.727273 | 46 | 0.673913 |
7947dcd96d795289937fadd931a974395bcc5d6d | 1,938 | py | Python | apps/Training/models.py | MarkyMark1000/AWS---PYTHON---COPY---MYWEBSITE | e6a6a76376d122b224d4744314e687f660aad770 | [
"MIT"
] | 1 | 2022-01-30T07:30:06.000Z | 2022-01-30T07:30:06.000Z | apps/Training/models.py | MarkyMark1000/AWS---PYTHON---COPY---MYWEBSITE | e6a6a76376d122b224d4744314e687f660aad770 | [
"MIT"
] | 5 | 2020-03-12T19:22:55.000Z | 2022-02-10T14:19:21.000Z | apps/Training/models.py | MarkyMark1000/AWS---PYTHON---COPY---MYWEBSITE | e6a6a76376d122b224d4744314e687f660aad770 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
import datetime
from django.urls import reverse
# Create your models here.
class TrainingGroup(models.Model):
title = models.CharField(max_length=15)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.title
def get_absolute_url(self):
# Useful for sitemap (training_list from urls.py)
return reverse('training_list', args=[str(self.id)])
class Meta:
ordering = ['id']
class TrainingCourse(models.Model):
title = models.CharField(max_length=30)
img = models.CharField(max_length=50)
date = models.DateField("Date", default=datetime.date.today)
link_text = models.CharField(max_length=10, default="")
link_href = models.CharField(max_length=250, default="")
code_text = models.CharField(max_length=20, default="", blank=True)
code_href = models.CharField(max_length=250, default="", blank=True)
short_text = models.CharField(max_length=50, default="")
main_text = models.TextField(
default="main training course description ...",
null=True,
blank=True)
group = models.ForeignKey(TrainingGroup, on_delete=models.CASCADE)
updated_at = models.DateTimeField(auto_now=True)
# Please note, I was unsure whether to use auto_now based upon the
# following articles:
# https://stackoverflow.com/questions/3429878/automatic-creation-date-for-
# django-model-form-objects
# https://stackoverflow.com/questions/1737017/django-auto-now-and-auto-now-
# add/1737078#1737078
def __str__(self):
return self.title
def get_absolute_url(self):
# Useful for sitemap (training_detail from urls.py)
return reverse('training_detail', args=[str(self.id)])
class Meta:
ordering = ['-date', 'id']
| 34.607143 | 79 | 0.674407 |
7947dde487cadbe8b98283a35268c10a35ca2c24 | 12,501 | py | Python | akshare/stock_feature/stock_board_concept_ths.py | euyuil/akshare | 5205796b53a29259831c11413004e405f8a16368 | [
"MIT"
] | null | null | null | akshare/stock_feature/stock_board_concept_ths.py | euyuil/akshare | 5205796b53a29259831c11413004e405f8a16368 | [
"MIT"
] | null | null | null | akshare/stock_feature/stock_board_concept_ths.py | euyuil/akshare | 5205796b53a29259831c11413004e405f8a16368 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/10/30 17:10
Desc: 同花顺-板块-概念板块
http://q.10jqka.com.cn/gn/detail/code/301558/
"""
import os
from datetime import datetime
import pandas as pd
import requests
from bs4 import BeautifulSoup
from py_mini_racer import py_mini_racer
from tqdm import tqdm
from akshare.utils import demjson
def _get_js_path_ths(name: str = None, module_file: str = None) -> str:
"""
获取 JS 文件的路径(从模块所在目录查找)
:param name: 文件名
:type name: str
:param module_file: 模块路径
:type module_file: str
:return: 路径
:rtype: str
"""
module_folder = os.path.abspath(os.path.dirname(os.path.dirname(module_file)))
module_json_path = os.path.join(module_folder, "stock_feature", name)
return module_json_path
def _get_file_content_ths(file_name: str = "ase.min.js") -> str:
"""
获取 JS 文件的内容
:param file_name: JS 文件名
:type file_name: str
:return: 文件内容
:rtype: str
"""
setting_file_name = file_name
setting_file_path = _get_js_path_ths(setting_file_name, __file__)
with open(setting_file_path) as f:
file_data = f.read()
return file_data
def __stock_board_concept_name_ths() -> pd.DataFrame:
"""
同花顺-板块-概念板块-概念-缩放页
http://q.10jqka.com.cn/gn/detail/code/301558/
:return: 所有概念板块的名称和链接
:rtype: pandas.DataFrame
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36'
}
url = 'http://q.10jqka.com.cn/gn/'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
html_list = soup.find('div', attrs={'class': 'boxShadow'}).find_all('a', attrs={'target': '_blank'})
name_list = [item.text for item in html_list]
url_list = [item['href'] for item in html_list]
temp_df = pd.DataFrame([name_list, url_list], index=['name', 'url']).T
return temp_df
def stock_board_concept_name_ths() -> pd.DataFrame:
"""
同花顺-板块-概念板块-概念
http://q.10jqka.com.cn/gn/detail/code/301558/
:return: 所有概念板块的名称和链接
:rtype: pandas.DataFrame
"""
url = "http://q.10jqka.com.cn/gn/index/field/addtime/order/desc/page/1/ajax/1/"
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
total_page = soup.find('span', attrs={'class': 'page_info'}).text.split('/')[1]
big_df = pd.DataFrame()
for page in tqdm(range(1, int(total_page)+1), leave=False):
url = f"http://q.10jqka.com.cn/gn/index/field/addtime/order/desc/page/{page}/ajax/1/"
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
soup.find('table', attrs={'class': 'm-table m-pager-table'}).find('tbody')
url_list = []
for item in soup.find('table', attrs={'class': 'm-table m-pager-table'}).find('tbody').find_all('tr'):
inner_url = item.find_all("td")[1].find('a')['href']
url_list.append(inner_url)
temp_df = pd.read_html(r.text)[0]
temp_df['代码'] = url_list
big_df = big_df.append(temp_df, ignore_index=True)
big_df = big_df[[
'日期',
'概念名称',
'成分股数量',
'代码'
]]
big_df['日期'] = pd.to_datetime(big_df['日期']).dt.date
big_df['成分股数量'] = pd.to_numeric(big_df['成分股数量'])
return big_df
def _stock_board_concept_code_ths() -> pd.DataFrame:
"""
同花顺-板块-概念板块-概念
http://q.10jqka.com.cn/gn/detail/code/301558/
:return: 所有概念板块的名称和链接
:rtype: pandas.DataFrame
"""
_stock_board_concept_name_ths_df = stock_board_concept_name_ths()
name_list = _stock_board_concept_name_ths_df['概念名称'].tolist()
url_list = [item.split('/')[-2] for item in _stock_board_concept_name_ths_df['代码'].tolist()]
temp_map = dict(zip(name_list, url_list))
return temp_map
def stock_board_concept_cons_ths(symbol: str = "阿里巴巴概念") -> pd.DataFrame:
"""
同花顺-板块-概念板块-成份股
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: 板块名称
:type symbol: str
:return: 成份股
:rtype: pandas.DataFrame
"""
stock_board_ths_map_df = stock_board_concept_name_ths()
symbol = stock_board_ths_map_df[stock_board_ths_map_df['概念名称'] == symbol]['代码'].values[0].split('/')[-2]
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
url = f'http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/1/ajax/1/code/{symbol}'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
page_num = int(soup.find_all('a', attrs={'class': 'changePage'})[-1]['page'])
except IndexError as e:
page_num = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1), leave=False):
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
url = f'http://q.10jqka.com.cn/gn/detail/field/264648/order/desc/page/{page}/ajax/1/code/{symbol}'
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.rename({"涨跌幅(%)": "涨跌幅",
"涨速(%)": "涨速",
"换手(%)": "换手",
"振幅(%)": "振幅",
}, inplace=True, axis=1)
del big_df['加自选']
big_df['代码'] = big_df['代码'].astype(str).str.zfill(6)
return big_df
def stock_board_concept_info_ths(symbol: str = "阿里巴巴概念") -> pd.DataFrame:
"""
同花顺-板块-概念板块-板块简介
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: 板块简介
:type symbol: str
:return: 板块简介
:rtype: pandas.DataFrame
"""
stock_board_ths_map_df = stock_board_concept_name_ths()
symbol_code = stock_board_ths_map_df[stock_board_ths_map_df['概念名称'] == symbol]['代码'].values[0].split('/')[-2]
url = f'http://q.10jqka.com.cn/gn/detail/code/{symbol_code}/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
}
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
name_list = [item.text for item in soup.find('div', attrs={'class': 'board-infos'}).find_all('dt')]
value_list = [item.text.strip().replace('\n', '/') for item in soup.find('div', attrs={'class': 'board-infos'}).find_all('dd')]
temp_df = pd.DataFrame([name_list, value_list]).T
temp_df.columns = ['项目', "值"]
return temp_df
def stock_board_concept_hist_ths(start_year: str = '2000', symbol: str = "安防") -> pd.DataFrame:
"""
同花顺-板块-概念板块-指数数据
http://q.10jqka.com.cn/gn/detail/code/301558/
:param start_year: 开始年份; e.g., 2019
:type start_year: str
:param symbol: 板块简介
:type symbol: str
:return: 板块简介
:rtype: pandas.DataFrame
"""
code_map = _stock_board_concept_code_ths()
symbol_url = f'http://q.10jqka.com.cn/gn/detail/code/{code_map[symbol]}/'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
}
r = requests.get(symbol_url, headers=headers)
soup = BeautifulSoup(r.text, 'lxml')
symbol_code = soup.find('div', attrs={'class': 'board-hq'}).find('span').text
big_df = pd.DataFrame()
current_year = datetime.now().year
for year in tqdm(range(int(start_year), current_year+1), leave=False):
url = f'http://d.10jqka.com.cn/v4/line/bk_{symbol_code}/01/{year}.js'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Referer': 'http://q.10jqka.com.cn',
'Host': 'd.10jqka.com.cn'
}
r = requests.get(url, headers=headers)
data_text = r.text
try:
demjson.decode(data_text[data_text.find('{'):-1])
except:
continue
temp_df = demjson.decode(data_text[data_text.find('{'):-1])
temp_df = pd.DataFrame(temp_df['data'].split(';'))
temp_df = temp_df.iloc[:, 0].str.split(',', expand=True)
big_df = big_df.append(temp_df, ignore_index=True)
big_df.columns = [
'日期',
'开盘价',
'最高价',
'最低价',
'收盘价',
'成交量',
'成交额',
'_',
'_',
'_',
'_',
]
big_df = big_df[[
'日期',
'开盘价',
'最高价',
'最低价',
'收盘价',
'成交量',
'成交额',
]]
big_df['日期'] = pd.to_datetime(big_df['日期']).dt.date
big_df['开盘价'] = pd.to_numeric(big_df['开盘价'])
big_df['最高价'] = pd.to_numeric(big_df['最高价'])
big_df['最低价'] = pd.to_numeric(big_df['最低价'])
big_df['收盘价'] = pd.to_numeric(big_df['收盘价'])
big_df['成交量'] = pd.to_numeric(big_df['成交量'])
big_df['成交额'] = pd.to_numeric(big_df['成交额'])
return big_df
def stock_board_cons_ths(symbol: str = "885611") -> pd.DataFrame:
"""
行业板块或者概念板块的成份股
http://q.10jqka.com.cn/thshy/detail/code/881121/
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: 行业板块或者概念板块的代码
:type symbol: str
:return: 行业板块或者概念板块的成份股
:rtype: pandas.DataFrame
"""
js_code = py_mini_racer.MiniRacer()
js_content = _get_file_content_ths("ths.js")
js_code.eval(js_content)
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
url = f'http://q.10jqka.com.cn/thshy/detail/field/199112/order/desc/page/1/ajax/1/code/{symbol}'
r = requests.get(url, headers=headers)
soup = BeautifulSoup(r.text, "lxml")
try:
page_num = int(soup.find_all('a', attrs={'class': 'changePage'})[-1]['page'])
except IndexError as e:
page_num = 1
big_df = pd.DataFrame()
for page in tqdm(range(1, page_num+1), leave=False):
v_code = js_code.call('v')
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.90 Safari/537.36',
'Cookie': f'v={v_code}'
}
url = f'http://q.10jqka.com.cn/thshy/detail/field/199112/order/desc/page/{page}/ajax/1/code/{symbol}'
r = requests.get(url, headers=headers)
temp_df = pd.read_html(r.text)[0]
big_df = big_df.append(temp_df, ignore_index=True)
big_df.rename({"涨跌幅(%)": "涨跌幅",
"涨速(%)": "涨速",
"换手(%)": "换手",
"振幅(%)": "振幅",
}, inplace=True, axis=1)
del big_df['加自选']
big_df['代码'] = big_df['代码'].astype(str).str.zfill(6)
return big_df
if __name__ == '__main__':
stock_board_concept_name_ths_df = stock_board_concept_name_ths()
print(stock_board_concept_name_ths_df)
stock_board_concept_cons_ths_df = stock_board_concept_cons_ths(symbol="PVDF概念")
print(stock_board_concept_cons_ths_df)
stock_board_concept_info_ths_df = stock_board_concept_info_ths(symbol="PVDF概念")
print(stock_board_concept_info_ths_df)
stock_board_concept_hist_ths_df = stock_board_concept_hist_ths(start_year='2021', symbol="PVDF概念")
print(stock_board_concept_hist_ths_df)
stock_board_cons_ths_df = stock_board_cons_ths(symbol="885611")
print(stock_board_cons_ths_df)
| 37.094955 | 143 | 0.62179 |
7947de5ed2b12060285f429af18853ad1924864d | 5,025 | py | Python | bin/ADFRsuite/CCSBpckgs/MolKit2/PDBresidueNames.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/MolKit2/PDBresidueNames.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/MolKit2/PDBresidueNames.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
#############################################################################
#
# Author: Michel F. SANNER
#
# Copyright: M. Sanner TSRI 2010
#
#
#############################################################################
#
# $Header: /mnt/raid/services/cvs/MolKit2/PDBresidueNames.py,v 1.1.1.1.4.1 2017/07/26 22:03:40 annao Exp $
#
# $Id: PDBresidueNames.py,v 1.1.1.1.4.1 2017/07/26 22:03:40 annao Exp $
#
##
## This file provides residues names used in PDB for various type of entities
##
DNAnames = {
'DC':'D', 'DG':'G', 'DA':'A', 'DT':'T', 'T':'T', 'DI':'I', 'N':'N',
}
RNAnames = {
'C':'C', 'G':'G', 'A':'A', 'U':'U', 'I':'I', 'N':'N',
}
Nucleotides = DNAnames.copy()
Nucleotides.update(RNAnames)
AAnames = {
'ALA':'A', 'CYS':'C', 'ASP':'D', 'GLU':'E', 'PHE':'F', 'GLY':'G',
'HIS':'H', 'ILE':'I', 'LYS':'K', 'LEU':'L', 'MET':'M', 'ASN':'N',
'PRO':'P', 'GLN':'Q', 'ARG':'R', 'SER':'S', 'THR':'T', 'VAL':'V',
'TRP':'W', 'TYR':'Y',
# the follwould be added automatically if the
# MODRES ris present in the pdb file but we put
# them herays
'HID':'?', 'HSP':'?', 'HIE':'?', 'HIP':'?', 'CYX':'?',
'CSS':'?', 'ACE':'?', 'MSE':'?', '5HP':'?', 'SOC':'?',
}
##
## list of resames for ions taken from
## http://decogroup.org/ion_list.txt
##
ionNames = {
'1CU':'?',
'2HP':'?',
'2MO':'?',
'2OF':'?',
'3CO':'?',
'3MT':'?',
'3NI':'?',
'4MO':'?',
'543':'?',
'6MO':'?',
'ACT':'?',
'AG':'?',
'AL':'?',
'ALF':'?',
'ATH':'?',
'AU':'?',
'AU3':'?',
'AUC':'?',
'AZI':'?',
'BA':'?',
'BCT':'?',
'BEF':'?',
'BF4':'?',
'BO4':'?',
'BR':'?',
'CA':'?',
'CAC':'?',
'CD':'?',
'CD1':'?',
'CD3':'?',
'CD5':'?',
'CE':'?',
'CHT':'?',
'CL':'?',
'CO':'?',
'CO3':'?',
'CO5':'?',
'CON':'?',
'CR':'?',
'CS':'?',
'CU':'?',
'CU1':'?',
'CUA':'?',
'CUZ':'?',
'CYN':'?',
'DMI':'?',
'E4N':'?',
'EMC':'?',
'EU':'?',
'EU3':'?',
'F':'?',
'FE':'?',
'FE2':'?',
'FPO':'?',
'GA':'?',
'GD3':'?',
'HAI':'?',
'HG':'?',
'HGC':'?',
'HO':'?',
'IN':'?',
'IOD':'?',
'IR':'?',
'IR3':'?',
'IRI':'?',
'IUM':'?',
'K':'?',
'KO4':'?',
'LA':'?',
'LCO':'?',
'LCP':'?',
'LI':'?',
'LU':'?',
'MAC':'?',
'MG':'?',
'MH2':'?',
'MH3':'?',
'MLI':'?',
'MLT':'?',
'MMC':'?',
'MN':'?',
'MN3':'?',
'MN5':'?',
'MO1':'?',
'MO2':'?',
'MO3':'?',
'MO4':'?',
'MO5':'?',
'MO6':'?',
'MOO':'?',
'MOS':'?',
'MW1':'?',
'MW2':'?',
'MW3':'?',
'NA':'?',
'NA2':'?',
'NA5':'?',
'NA6':'?',
'NAO':'?',
'NAW':'?',
'NC':'?',
'NET':'?',
'NH4':'?',
'NI':'?',
'NI1':'?',
'NI2':'?',
'NI3':'?',
'NO2':'?',
'NO3':'?',
'O4M':'?',
'OAA':'?',
'OC1':'?',
'OC2':'?',
'OC3':'?',
'OC4':'?',
'OC5':'?',
'OC6':'?',
'OC7':'?',
'OCL':'?',
'OCM':'?',
'OCN':'?',
'OCO':'?',
'OF1':'?',
'OF2':'?',
'OF3':'?',
'OH':'?',
'OS':'?',
'OXL':'?',
'PB':'?',
'PBM':'?',
'PD':'?',
'PER':'?',
'PI':'?',
'PO3':'?',
'PO4':'?',
'PR':'?',
'PT':'?',
'PT4':'?',
'PTN':'?',
'RB':'?',
'RHD':'?',
'RU':'?',
'SB':'?',
'SCN':'?',
'SE4':'?',
'SM':'?',
'SMO':'?',
'SO3':'?',
'SO4':'?',
'SOH':'?',
'SR':'?',
'TB':'?',
'TCN':'?',
'TEA':'?',
'THE':'?',
'TL':'?',
'TMA':'?',
'TRA':'?',
'UNX':'?',
'V':'?',
'VO4':'?',
'W':'?',
'WO5':'?',
'Y1':'?',
'YB':'?',
'YT3':'?',
'ZN':'?',
'ZN2':'?',
'ZN3':'?',
'ZNO':'?',
'ZO3':'?',
}
waterNames = {'HOH':'?', 'WAT':'?'}
allResidueNames = {}
allResidueNames.update(waterNames)
allResidueNames.update(RNAnames)
allResidueNames.update(AAnames)
allResidueNames.update(DNAnames)
allResidueNames.update(ionNames)
| 20.1 | 106 | 0.363184 |
7947de8a85f74c935a72f7f328d86563af6e55cb | 65,087 | py | Python | tensorflow_model_analysis/api/model_eval_lib.py | rtg0795/model-analysis | 0f73989a2dfe1e56548f1ccd0001d98846f89e05 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/api/model_eval_lib.py | rtg0795/model-analysis | 0f73989a2dfe1e56548f1ccd0001d98846f89e05 | [
"Apache-2.0"
] | null | null | null | tensorflow_model_analysis/api/model_eval_lib.py | rtg0795/model-analysis | 0f73989a2dfe1e56548f1ccd0001d98846f89e05 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for Tensorflow Model Analysis."""
# TODO(b/149126671): Put ValidationResultsWriter in a separate file.
import os
import tempfile
from typing import Any, Dict, Iterable, Iterator, List, Optional, Set, Union
from absl import logging
import apache_beam as beam
import pandas as pd
import pyarrow as pa
import tensorflow as tf
from tensorflow_model_analysis import constants
from tensorflow_model_analysis import types
from tensorflow_model_analysis.eval_saved_model import constants as eval_constants
from tensorflow_model_analysis.evaluators import evaluator
from tensorflow_model_analysis.evaluators import legacy_metrics_and_plots_evaluator
from tensorflow_model_analysis.evaluators import metrics_plots_and_validations_evaluator
from tensorflow_model_analysis.extractors import example_weights_extractor
from tensorflow_model_analysis.extractors import extractor
from tensorflow_model_analysis.extractors import features_extractor
from tensorflow_model_analysis.extractors import labels_extractor
from tensorflow_model_analysis.extractors import legacy_predict_extractor
from tensorflow_model_analysis.extractors import predictions_extractor
from tensorflow_model_analysis.extractors import slice_key_extractor
from tensorflow_model_analysis.extractors import sql_slice_key_extractor
from tensorflow_model_analysis.extractors import tfjs_predict_extractor
from tensorflow_model_analysis.extractors import tflite_predict_extractor
from tensorflow_model_analysis.extractors import transformed_features_extractor
from tensorflow_model_analysis.extractors import unbatch_extractor
from tensorflow_model_analysis.post_export_metrics import post_export_metrics
from tensorflow_model_analysis.proto import config_pb2
from tensorflow_model_analysis.proto import metrics_for_slice_pb2
from tensorflow_model_analysis.proto import validation_result_pb2
from tensorflow_model_analysis.slicer import slicer_lib as slicer
from tensorflow_model_analysis.utils import config_util
from tensorflow_model_analysis.utils import model_util
from tensorflow_model_analysis.validators import validator
from tensorflow_model_analysis.view import util as view_util
from tensorflow_model_analysis.view import view_types
from tensorflow_model_analysis.writers import eval_config_writer
from tensorflow_model_analysis.writers import metrics_plots_and_validations_writer
from tensorflow_model_analysis.writers import writer
from tfx_bsl.arrow import table_util
from tfx_bsl.tfxio import raw_tf_record
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import tf_example_record
from tensorflow_metadata.proto.v0 import schema_pb2
def _assert_tensorflow_version():
"""Check that we're using a compatible TF version."""
# Fail with a clear error in case we are not using a compatible TF version.
major, minor, _ = tf.version.VERSION.split('.')
if (int(major) not in (1, 2)) or (int(major) == 1 and int(minor) < 15):
raise RuntimeError(
'Tensorflow version >= 1.15, < 3 is required. Found (%s). Please '
'install the latest 1.x or 2.x version from '
'https://github.com/tensorflow/tensorflow. ' % tf.version.VERSION)
if int(major) == 2:
logging.warning(
'Tensorflow version (%s) found. Note that TFMA support for TF 2.0 '
'is currently in beta', tf.version.VERSION)
def _is_legacy_eval(
config_version: Optional[int],
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels],
eval_config: Optional[config_pb2.EvalConfig]):
"""Returns True if legacy evaluation is being used.
A legacy evaluation is an evalution that uses only a single EvalSharedModel,
has no tags (or uses "eval" as its tag), and does not specify an eval_config
The legacy evaluation is based on using add_metrics_callbacks to create a
modified version of the graph saved with an EvalSavedModel. The newer version
of evaluation supports both add_metrics_callbacks as well as metrics defined
in MetricsSpecs inside of EvalConfig. The newer version works with both "eval"
and serving models and also supports multi-model evaluation. This function is
used by code to support backwards compatibility for callers that have not
updated to use the new EvalConfig.
Args:
config_version: Optionally, An explicit version of the config determined
elsewhere. This is used to handle cases where the provided eval_config was
generated internally, and thus not a reliable indicator of user intent.
eval_shared_model: Optionally, the model to be evaluated.
eval_config: Optionally, an EvalConfig specifying v2 config.
Returns:
Whether the user inputs should trigger a legacy evaluation.
"""
return ((config_version is not None and config_version == 1) or
(eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list) and
(not eval_shared_model.model_loader.tags or eval_constants.EVAL_TAG
in eval_shared_model.model_loader.tags) and not eval_config))
def _default_eval_config(eval_shared_models: List[types.EvalSharedModel],
slice_spec: Optional[List[slicer.SingleSliceSpec]],
write_config: Optional[bool],
compute_confidence_intervals: Optional[bool],
min_slice_size: int):
"""Creates default EvalConfig (for use in legacy evaluations)."""
model_specs = []
for shared_model in eval_shared_models:
example_weight_key = shared_model.example_weight_key
example_weight_keys = {}
if example_weight_key and isinstance(example_weight_key, dict):
example_weight_keys = example_weight_key
example_weight_key = ''
model_specs.append(
config_pb2.ModelSpec(
name=shared_model.model_name,
example_weight_key=example_weight_key,
example_weight_keys=example_weight_keys))
slicing_specs = None
if slice_spec:
slicing_specs = [s.to_proto() for s in slice_spec]
options = config_pb2.Options()
options.compute_confidence_intervals.value = compute_confidence_intervals
options.min_slice_size.value = min_slice_size
if not write_config:
options.disabled_outputs.values.append(eval_config_writer.EVAL_CONFIG_FILE)
return config_pb2.EvalConfig(
model_specs=model_specs, slicing_specs=slicing_specs, options=options)
def _model_types(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> Optional[Set[str]]:
"""Returns model types associated with given EvalSharedModels."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if not eval_shared_models:
return None
else:
return set([m.model_type for m in eval_shared_models])
def _update_eval_config_with_defaults(
eval_config: config_pb2.EvalConfig,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels]
) -> config_pb2.EvalConfig:
"""Returns updated eval config with default values."""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
has_baseline = eval_shared_models and len(eval_shared_models) == 2
return config_util.update_eval_config_with_defaults(
eval_config=eval_config,
has_baseline=has_baseline,
rubber_stamp=model_util.has_rubber_stamp(eval_shared_models))
def _get_extract_num_bytes(extract: types.Extracts) -> int:
"""Returns the number of bytes in the input."""
if constants.ARROW_RECORD_BATCH_KEY in extract:
return extract[constants.ARROW_RECORD_BATCH_KEY].nbytes
if constants.INPUT_KEY in extract:
if isinstance(extract[constants.INPUT_KEY], bytes):
return len(extract[constants.INPUT_KEY])
logging.warning('Failed to extract number of input bytes.')
return 0
def _increment_counter(counter_name: str, value: int) -> int:
"""Increments the specified counter by the value."""
counter = beam.metrics.Metrics.counter(constants.METRICS_NAMESPACE,
counter_name)
counter.inc(value)
return value
@beam.ptransform_fn
def _TrackBytesProcessed( # pylint: disable=invalid-name
dataset: beam.PCollection[types.Extracts]) -> beam.pvalue.PCollection[int]:
"""Gathers telemetry on input Extracts."""
return (dataset
| 'GetExtractSize' >> beam.Map(_get_extract_num_bytes)
| 'SumTotalBytes' >> beam.CombineGlobally(sum)
| 'IncrementCounter' >>
beam.Map(lambda x: _increment_counter('extract_input_bytes', x)))
MetricsForSlice = metrics_for_slice_pb2.MetricsForSlice
def load_metrics(
output_path: str,
output_file_format: str = 'tfrecord') -> Iterator[MetricsForSlice]:
"""Read and deserialize the MetricsForSlice records."""
for m in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
yield m
PlotsForSlice = metrics_for_slice_pb2.PlotsForSlice
def load_plots(output_path: str,
output_file_format: str = 'tfrecord') -> Iterator[PlotsForSlice]:
"""Read and deserialize the PlotsForSlice records."""
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
yield p
AttributionsForSlice = metrics_for_slice_pb2.AttributionsForSlice
def load_attributions(
output_path: str,
output_file_format: str = 'tfrecord') -> Iterator[AttributionsForSlice]:
"""Read and deserialize the AttributionsForSlice records."""
for a in (
metrics_plots_and_validations_writer.load_and_deserialize_attributions(
output_path, output_file_format)):
yield a
# Define types here to avoid type errors between OSS and internal code.
ValidationResult = validation_result_pb2.ValidationResult
def load_validation_result(output_path: str,
output_file_format: str = '') -> ValidationResult:
"""Read and deserialize the ValidationResult."""
return metrics_plots_and_validations_writer.load_and_deserialize_validation_result(
output_path, output_file_format)
def make_eval_results(results: List[view_types.EvalResult],
mode: str) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
results: A list of TFMA evaluation results.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
Returns:
An `tfma.view.EvalResults` object containing all evaluation results. This
can be used to construct a time series view.
"""
return view_types.EvalResults(results, mode)
def load_eval_results(
output_paths: Union[str, List[str]],
output_file_format: Optional[str] = 'tfrecord',
mode: str = constants.MODEL_CENTRIC_MODE,
model_name: Optional[str] = None) -> view_types.EvalResults:
"""Loads results for multiple models or multiple data sets.
Args:
output_paths: A single path or list of output paths of completed tfma runs.
output_file_format: Optional file extension to filter files by.
mode: The mode of the evaluation. Currently, tfma.DATA_CENTRIC_MODE and
tfma.MODEL_CENTRIC_MODE are supported.
model_name: Filters to only return results for given model. If unset all
models are returned.
Returns:
An EvalResults containing the evaluation results serialized at output_paths.
This can be used to construct a time series view.
"""
results = []
if not isinstance(output_paths, list):
output_paths = [output_paths]
for output_path in output_paths:
if model_name is None:
_, _, _, model_locations = eval_config_writer.load_eval_run(output_path)
model_names = list(model_locations.keys())
else:
model_names = [model_name]
for model_name in model_names:
results.append(
load_eval_result(
output_path, output_file_format, model_name=model_name))
return make_eval_results(results, mode)
def load_eval_result(output_path: str,
output_file_format: Optional[str] = 'tfrecord',
model_name: Optional[str] = None) -> view_types.EvalResult:
"""Loads EvalResult object for use with the visualization functions.
Args:
output_path: Output directory containing config, metrics, plots, etc.
output_file_format: Optional file extension to filter files by.
model_name: Optional model name. Required if multi-model evaluation was run.
Returns:
EvalResult object for use with the visualization functions.
"""
# Config, metrics, and plots files should all exist under the given output
# directory, but fairness plugin has a use-case where only the metrics are
# provided so we support all files as being optional (the EvalResult will have
# corresponding None values for files that are not present).
eval_config, data_location, file_format, model_locations = (
eval_config_writer.load_eval_run(output_path))
metrics_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_metrics(
output_path, output_file_format):
metrics = view_util.convert_metrics_proto_to_dict(p, model_name=model_name)
if metrics is not None:
metrics_list.append(metrics)
plots_list = []
for p in metrics_plots_and_validations_writer.load_and_deserialize_plots(
output_path, output_file_format):
plots = view_util.convert_plots_proto_to_dict(p, model_name=model_name)
if plots is not None:
plots_list.append(plots)
attributions_list = []
for a in metrics_plots_and_validations_writer.load_and_deserialize_attributions(
output_path, output_file_format):
attributions = view_util.convert_attributions_proto_to_dict(
a, model_name=model_name)
if attributions is not None:
attributions_list.append(attributions)
if not model_locations:
model_location = ''
elif model_name is None:
model_location = list(model_locations.values())[0]
else:
model_location = model_locations[model_name]
return view_types.EvalResult(
slicing_metrics=metrics_list,
plots=plots_list,
attributions=attributions_list,
config=eval_config,
data_location=data_location,
file_format=file_format,
model_location=model_location)
def default_eval_shared_model(
eval_saved_model_path: str,
add_metrics_callbacks: Optional[List[types.AddMetricsCallbackType]] = None,
include_default_metrics: Optional[bool] = True,
example_weight_key: Optional[Union[str, Dict[str, str]]] = None,
additional_fetches: Optional[List[str]] = None,
blacklist_feature_fetches: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
model_name: str = '',
eval_config: Optional[config_pb2.EvalConfig] = None,
custom_model_loader: Optional[types.ModelLoader] = None,
rubber_stamp: Optional[bool] = False) -> types.EvalSharedModel:
"""Returns default EvalSharedModel.
Args:
eval_saved_model_path: Path to EvalSavedModel.
add_metrics_callbacks: Optional list of callbacks for adding additional
metrics to the graph (see EvalSharedModel for more information on how to
configure additional metrics). Metrics for example count and example
weights will be added automatically. Only used if EvalSavedModel used.
include_default_metrics: DEPRECATED. Use
eval_config.options.include_default_metrics.
example_weight_key: DEPRECATED. Use
eval_config.model_specs.example_weight_key or
eval_config.model_specs.example_weight_keys.
additional_fetches: Optional prefixes of additional tensors stored in
signature_def.inputs that should be fetched at prediction time. The
"features" and "labels" tensors are handled automatically and should not
be included. Only used if EvalSavedModel used.
blacklist_feature_fetches: Optional list of tensor names in the features
dictionary which should be excluded from the fetches request. This is
useful in scenarios where features are large (e.g. images) and can lead to
excessive memory use if stored. Only used if EvalSavedModel used.
tags: Optional model tags (e.g. 'serve' for serving or 'eval' for
EvalSavedModel).
model_name: Optional name of the model being created (should match
ModelSpecs.name). The name should only be provided if multiple models are
being evaluated.
eval_config: Eval config.
custom_model_loader: Optional custom model loader for non-TF models.
rubber_stamp: True when this run is a first run without a baseline model
while a baseline is configured, the diff thresholds will be ignored.
"""
if not eval_config:
is_baseline = False
model_type = constants.TF_ESTIMATOR
if tags is None:
tags = [eval_constants.EVAL_TAG]
else:
model_spec = model_util.get_model_spec(eval_config, model_name)
if not model_spec:
raise ValueError('ModelSpec for model name {} not found in EvalConfig: '
'config={}'.format(model_name, eval_config))
is_baseline = model_spec.is_baseline
model_type = model_util.get_model_type(model_spec, eval_saved_model_path,
tags)
if tags is None:
# Default to serving unless estimator is used.
if model_type == constants.TF_ESTIMATOR:
tags = [eval_constants.EVAL_TAG]
else:
tags = [tf.saved_model.SERVING]
if model_spec.example_weight_key or model_spec.example_weight_keys:
example_weight_key = (
model_spec.example_weight_key or model_spec.example_weight_keys)
if eval_config.options.HasField('include_default_metrics'):
include_default_metrics = (
eval_config.options.include_default_metrics.value)
# Backwards compatibility for legacy add_metrics_callbacks implementation.
if model_type == constants.TF_ESTIMATOR and eval_constants.EVAL_TAG in tags:
# PyType doesn't know about the magic exports we do in post_export_metrics.
# Additionally, the lines seem to get reordered in compilation, so we can't
# just put the disable-attr on the add_metrics_callbacks lines.
# pytype: disable=module-attr
if not add_metrics_callbacks:
add_metrics_callbacks = []
if include_default_metrics:
# Always compute example weight and example count if default metrics are
# enabled.
example_count_callback = post_export_metrics.example_count()
add_metrics_callbacks.append(example_count_callback)
if example_weight_key:
if isinstance(example_weight_key, dict):
for output_name, key in example_weight_key.items():
example_weight_callback = post_export_metrics.example_weight(
key, metric_tag=output_name)
add_metrics_callbacks.append(example_weight_callback)
else:
example_weight_callback = post_export_metrics.example_weight(
example_weight_key)
add_metrics_callbacks.append(example_weight_callback)
# pytype: enable=module-attr
model_loader = custom_model_loader
if not model_loader and model_type in constants.VALID_TF_MODEL_TYPES:
model_loader = types.ModelLoader(
construct_fn=model_util.model_construct_fn(
eval_saved_model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
additional_fetches=additional_fetches,
blacklist_feature_fetches=blacklist_feature_fetches,
model_type=model_type,
tags=tags),
tags=tags)
return types.EvalSharedModel(
model_name=model_name,
model_type=model_type,
model_path=eval_saved_model_path,
add_metrics_callbacks=add_metrics_callbacks,
include_default_metrics=include_default_metrics,
example_weight_key=example_weight_key,
additional_fetches=additional_fetches,
model_loader=model_loader,
rubber_stamp=rubber_stamp,
is_baseline=is_baseline)
def _has_sql_slices(eval_config: Optional[config_pb2.EvalConfig]) -> bool:
if eval_config:
for spec in eval_config.slicing_specs:
if spec.slice_keys_sql:
return True
return False
def default_extractors( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
materialize: Optional[bool] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
custom_predict_extractor: Optional[extractor.Extractor] = None,
config_version: Optional[int] = None) -> List[extractor.Extractor]:
"""Returns the default extractors for use in ExtractAndEvaluate.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
materialize: True to have extractors create materialized output.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. If None, an attempt will be made to
create the tensors using default TensorRepresentations.
custom_predict_extractor: Optional custom predict extractor for non-TF
models.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Raises:
NotImplementedError: If eval_config contains mixed serving and eval models.
"""
if materialize is None:
# TODO(b/172969312): Once analysis table is supported, remove defaulting
# to false unless 'analysis' is in disabled_outputs.
materialize = False
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if eval_config is not None:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
tensor_representations = None
if tensor_adapter_config:
tensor_representations = tensor_adapter_config.tensor_representations
if _is_legacy_eval(config_version, eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if not eval_config and slice_spec:
eval_config = config_pb2.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return [
custom_predict_extractor or legacy_predict_extractor.PredictExtractor(
eval_shared_model, materialize=materialize),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
]
slicing_extractors = []
if _has_sql_slices(eval_config):
slicing_extractors.append(
sql_slice_key_extractor.SqlSliceKeyExtractor(eval_config))
slicing_extractors.extend([
unbatch_extractor.UnbatchExtractor(),
slice_key_extractor.SliceKeyExtractor(
eval_config=eval_config, materialize=materialize)
])
if eval_shared_model:
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if (not model_types.issubset(constants.VALID_TF_MODEL_TYPES) and
not custom_predict_extractor):
raise NotImplementedError(
'either a custom_predict_extractor must be used or model type must '
'be one of: {}. evalconfig={}'.format(
str(constants.VALID_TF_MODEL_TYPES), eval_config))
if model_types == set([constants.TF_LITE]):
# TODO(b/163889779): Convert TFLite extractor to operate on batched
# extracts. Then we can remove the input extractor.
return [
features_extractor.FeaturesExtractor(
eval_config=eval_config,
tensor_representations=tensor_representations),
transformed_features_extractor.TransformedFeaturesExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
tflite_predict_extractor.TFLitePredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model))
] + slicing_extractors
elif constants.TF_LITE in model_types:
raise NotImplementedError(
'support for mixing tf_lite and non-tf_lite models is not '
'implemented: eval_config={}'.format(eval_config))
if model_types == set([constants.TF_JS]):
return [
features_extractor.FeaturesExtractor(
eval_config=eval_config,
tensor_representations=tensor_representations),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
tfjs_predict_extractor.TFJSPredictExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model))
] + slicing_extractors
elif constants.TF_JS in model_types:
raise NotImplementedError(
'support for mixing tf_js and non-tf_js models is not '
'implemented: eval_config={}'.format(eval_config))
elif (eval_config and model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
return [
custom_predict_extractor or legacy_predict_extractor.PredictExtractor(
eval_shared_model,
materialize=materialize,
eval_config=eval_config)
] + slicing_extractors
elif (eval_config and constants.TF_ESTIMATOR in model_types and
any(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models)):
raise NotImplementedError(
'support for mixing eval and non-eval estimator models is not '
'implemented: eval_config={}'.format(eval_config))
else:
extractors = [
features_extractor.FeaturesExtractor(
eval_config=eval_config,
tensor_representations=tensor_representations)
]
if not custom_predict_extractor:
extractors.append(
transformed_features_extractor.TransformedFeaturesExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model))
extractors.extend([
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
(custom_predict_extractor or
predictions_extractor.PredictionsExtractor(
eval_config=eval_config, eval_shared_model=eval_shared_model)),
])
extractors.extend(slicing_extractors)
return extractors
else:
return [
features_extractor.FeaturesExtractor(
eval_config=eval_config,
tensor_representations=tensor_representations),
labels_extractor.LabelsExtractor(eval_config=eval_config),
example_weights_extractor.ExampleWeightsExtractor(
eval_config=eval_config),
predictions_extractor.PredictionsExtractor(eval_config=eval_config)
] + slicing_extractors
def default_evaluators( # pylint: disable=invalid-name
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
schema: Optional[schema_pb2.Schema] = None,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
serialize: bool = False,
random_seed_for_testing: Optional[int] = None,
config_version: Optional[int] = None) -> List[evaluator.Evaluator]:
"""Returns the default evaluators for use in ExtractAndEvaluate.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if there are
metrics to be computed in-graph using the model.
eval_config: Eval config.
schema: A schema to use for customizing default evaluators.
compute_confidence_intervals: Deprecated (use eval_config).
min_slice_size: Deprecated (use eval_config).
serialize: Deprecated.
random_seed_for_testing: Provide for deterministic tests only.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
"""
disabled_outputs = []
if eval_config:
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
disabled_outputs = eval_config.options.disabled_outputs.values
if (_model_types(eval_shared_model) == set([constants.TF_LITE]) or
_model_types(eval_shared_model) == set([constants.TF_JS])):
# no in-graph metrics present when tflite or tfjs is used.
if eval_shared_model:
if isinstance(eval_shared_model, dict):
eval_shared_model = {
k: v._replace(include_default_metrics=False)
for k, v in eval_shared_model.items()
}
elif isinstance(eval_shared_model, list):
eval_shared_model = [
v._replace(include_default_metrics=False)
for v in eval_shared_model
]
else:
eval_shared_model = eval_shared_model._replace(
include_default_metrics=False)
if (constants.METRICS_KEY in disabled_outputs and
constants.PLOTS_KEY in disabled_outputs and
constants.ATTRIBUTIONS_KEY in disabled_outputs):
return []
if _is_legacy_eval(config_version, eval_shared_model, eval_config):
# Backwards compatibility for previous add_metrics_callbacks implementation.
if eval_config is not None:
if eval_config.options.HasField('compute_confidence_intervals'):
compute_confidence_intervals = (
eval_config.options.compute_confidence_intervals.value)
if eval_config.options.HasField('min_slice_size'):
min_slice_size = eval_config.options.min_slice_size.value
return [
legacy_metrics_and_plots_evaluator.MetricsAndPlotsEvaluator(
eval_shared_model,
compute_confidence_intervals=compute_confidence_intervals,
min_slice_size=min_slice_size,
serialize=serialize,
random_seed_for_testing=random_seed_for_testing)
]
else:
return [
metrics_plots_and_validations_evaluator
.MetricsPlotsAndValidationsEvaluator(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
schema=schema,
random_seed_for_testing=random_seed_for_testing)
]
def default_writers(
output_path: Optional[str],
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
display_only_data_location: Optional[str] = None,
display_only_data_file_format: Optional[str] = None,
output_file_format: str = 'tfrecord',
add_metric_callbacks: Optional[List[types.AddMetricsCallbackType]] = None
) -> List[writer.Writer]: # pylint: disable=invalid-name
"""Returns the default writers for use in WriteResults.
Note, sharding will be enabled by default if an output_file_format is
provided. Filenames will be <output_path>-SSSSS-of-NNNNN.<output_file_format>
where SSSSS is the shard number and NNNNN is the number of shards.
Args:
output_path: Output path.
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Required unless the predictions
are provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config for writing out config along with results. Also
used for to check for missing slices.
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_data_file_format: Optional format of the input examples. This
is used only for display purposes.
output_file_format: File format to use when saving files. Currently only
'tfrecord' is supported.
add_metric_callbacks: Optional list of metric callbacks (if used).
"""
writers = []
if not add_metric_callbacks:
add_metric_callbacks = []
# The add_metric_callbacks are used in the metrics and plots serialization
# code to post process the metric data by calling populate_stats_and_pop.
# While both the legacy (V1) and new (V2) evaluation implementations support
# EvalSavedModels using add_metric_callbacks, this particular code is only
# required for the legacy evaluation based on the MetricsAndPlotsEvaluator.
# The V2 MetricsAndPlotsEvaluator output requires no additional processing.
# Since the V1 code only supports a single EvalSharedModel, we only set the
# add_metrics_callbacks if a dict is not passed.
if (eval_shared_model and not isinstance(eval_shared_model, dict) and
not isinstance(eval_shared_model, list)):
add_metric_callbacks = eval_shared_model.add_metrics_callbacks
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if eval_config:
model_locations = {}
for v in (eval_shared_models or [None]):
k = '' if v is None else v.model_name
model_locations[k] = ('<unknown>' if v is None or v.model_path is None
else v.model_path)
writers.append(
eval_config_writer.EvalConfigWriter(
output_path,
eval_config=eval_config,
data_location=display_only_data_location,
data_file_format=display_only_data_file_format,
model_locations=model_locations))
output_paths = {
constants.METRICS_KEY:
os.path.join(output_path, constants.METRICS_KEY),
constants.PLOTS_KEY:
os.path.join(output_path, constants.PLOTS_KEY),
constants.ATTRIBUTIONS_KEY:
os.path.join(output_path, constants.ATTRIBUTIONS_KEY),
constants.VALIDATIONS_KEY:
os.path.join(output_path, constants.VALIDATIONS_KEY)
}
writers.append(
metrics_plots_and_validations_writer.MetricsPlotsAndValidationsWriter(
output_paths=output_paths,
# Empty EvalConfig supported for backwards compatibility.
eval_config=eval_config or config_pb2.EvalConfig(),
add_metrics_callbacks=add_metric_callbacks,
output_file_format=output_file_format,
rubber_stamp=model_util.has_rubber_stamp(eval_shared_models)))
return writers
@beam.ptransform_fn
# TODO(b/156538355): Find out why str is also required instead of just bytes
# after adding types.Extracts.
@beam.typehints.with_input_types(Union[bytes, str, types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
def InputsToExtracts( # pylint: disable=invalid-name
inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts serialized inputs (e.g. examples) to Extracts if not already."""
def to_extracts(x: Union[bytes, str, types.Extracts]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.INPUT_KEY] = x
return result
return inputs | 'AddInputKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(Union[bytes, pa.RecordBatch, types.Extracts])
@beam.typehints.with_output_types(types.Extracts)
def BatchedInputsToExtracts( # pylint: disable=invalid-name
batched_inputs: beam.pvalue.PCollection) -> beam.pvalue.PCollection:
"""Converts Arrow RecordBatch inputs to Extracts."""
def to_extracts(
x: Union[bytes, types.Extracts, pa.RecordBatch]) -> types.Extracts:
result = {}
if isinstance(x, dict):
result.update(x)
else:
result[constants.ARROW_RECORD_BATCH_KEY] = x
return result
return batched_inputs | 'AddArrowRecordBatchKey' >> beam.Map(to_extracts)
@beam.ptransform_fn
@beam.typehints.with_input_types(types.Extracts)
@beam.typehints.with_output_types(Any)
def ExtractAndEvaluate( # pylint: disable=invalid-name
extracts: beam.pvalue.PCollection, extractors: List[extractor.Extractor],
evaluators: List[evaluator.Evaluator]) -> evaluator.Evaluation:
"""Performs Extractions and Evaluations in provided order."""
# evaluation[k] = list of values for k
evaluation = {}
def update(evaluation: Dict[str, Any], new_evaluation: Dict[str, Any]):
for k, v in new_evaluation.items():
if k not in evaluation:
evaluation[k] = []
evaluation[k].append(v)
return evaluation
_ = extracts | 'TrackInputBytes' >> _TrackBytesProcessed() # pylint: disable=no-value-for-parameter
# Run evaluators that run before extraction (i.e. that only require
# the incoming input extract added by ReadInputs)
for v in evaluators:
if not v.run_after:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for x in extractors:
extracts = (extracts | x.stage_name >> x.ptransform)
for v in evaluators:
if v.run_after == x.stage_name:
update(evaluation, extracts | v.stage_name >> v.ptransform)
for v in evaluators:
if v.run_after == extractor.LAST_EXTRACTOR_STAGE_NAME:
update(evaluation, extracts | v.stage_name >> v.ptransform)
# Merge multi-valued keys if necessary.
result = {}
for k, v in evaluation.items():
if len(v) == 1:
result[k] = v[0]
continue
# Note that we assume that if a key is multivalued, its values are
# dictionaries with disjoint keys. The combined value will simply be the
# disjoint union of all the dictionaries.
result[k] = (
v
| 'FlattenEvaluationOutput(%s)' % k >> beam.Flatten()
| 'CombineEvaluationOutput(%s)' % k >> beam.CombinePerKey(
_CombineEvaluationDictionariesFn()))
return result
class _CombineEvaluationDictionariesFn(beam.CombineFn):
"""CombineFn to combine dictionaries generated by different evaluators."""
def create_accumulator(self) -> Dict[str, Any]:
return {}
def _merge(self, accumulator: Dict[str, Any], output_dict: Dict[str,
Any]) -> None:
intersection = set(accumulator) & set(output_dict)
if intersection:
raise ValueError(
'Dictionaries generated by different evaluators should have '
'different keys, but keys %s appeared in the output of multiple '
'evaluators' % intersection)
accumulator.update(output_dict)
def add_input(self, accumulator: Dict[str, Any],
output_dict: Dict[str, Any]) -> Dict[str, Any]:
if not isinstance(output_dict, dict):
raise TypeError(
'for outputs written to by multiple evaluators, the outputs must all '
'be dictionaries, but got output of type %s, value %s' %
(type(output_dict), str(output_dict)))
self._merge(accumulator, output_dict)
return accumulator
def merge_accumulators(
self, accumulators: Iterable[Dict[str, Any]]) -> Dict[str, Any]:
accumulators = iter(accumulators)
result = next(accumulators)
for acc in accumulators:
self._merge(result, acc)
return result
def extract_output(self, accumulator: Dict[str, Any]) -> Dict[str, Any]:
return accumulator
@beam.ptransform_fn
# TODO(b/157600974): Add input typehint.
@beam.typehints.with_output_types(beam.pvalue.PDone)
def WriteResults( # pylint: disable=invalid-name
evaluation_or_validation: Union[evaluator.Evaluation, validator.Validation],
writers: List[writer.Writer]) -> beam.pvalue.PDone:
"""Writes Evaluation or Validation results using given writers.
Args:
evaluation_or_validation: Evaluation or Validation output.
writers: Writes to use for writing out output.
Raises:
ValueError: If Evaluation or Validation is empty.
Returns:
beam.pvalue.PDone.
"""
if not evaluation_or_validation:
raise ValueError('Evaluations and Validations cannot be empty')
for w in writers:
_ = evaluation_or_validation | w.stage_name >> w.ptransform
return beam.pvalue.PDone(list(evaluation_or_validation.values())[0].pipeline)
def is_legacy_estimator(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None
) -> bool:
"""Returns true if there is a legacy estimator.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
Returns:
A boolean indicating if legacy predict extractor will be used.
"""
model_types = _model_types(eval_shared_model)
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
return (model_types == set([constants.TF_ESTIMATOR]) and
all(eval_constants.EVAL_TAG in m.model_loader.tags
for m in eval_shared_models))
def is_batched_input(eval_shared_model: Optional[
types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
config_version: Optional[int] = None) -> bool:
"""Returns true if batched input should be used.
We will keep supporting the legacy unbatched V1 PredictExtractor as it parses
the features and labels, and is the only solution currently that allows for
slicing on transformed features. Eventually we should have support for
transformed features via keras preprocessing layers.
Args:
eval_shared_model: Shared model (single-model evaluation) or list of shared
models (multi-model evaluation). Required unless the predictions are
provided alongside of the features (i.e. model-agnostic evaluations).
eval_config: Eval config.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Returns:
A boolean indicating if batched extractors should be used.
"""
return not _is_legacy_eval(config_version, eval_shared_model, eval_config)
@beam.ptransform_fn
@beam.typehints.with_input_types(Any)
@beam.typehints.with_output_types(beam.pvalue.PDone)
def ExtractEvaluateAndWriteResults( # pylint: disable=invalid-name
examples: beam.pvalue.PCollection,
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
output_path: Optional[str] = None,
display_only_data_location: Optional[str] = None,
display_only_file_format: Optional[str] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
tensor_adapter_config: Optional[tensor_adapter.TensorAdapterConfig] = None,
schema: Optional[schema_pb2.Schema] = None,
config_version: Optional[int] = None) -> beam.pvalue.PDone:
"""PTransform for performing extraction, evaluation, and writing results.
Users who want to construct their own Beam pipelines instead of using the
lightweight run_model_analysis functions should use this PTransform.
Example usage:
```python
eval_config = tfma.EvalConfig(model_specs=[...], metrics_specs=[...],
slicing_specs=[...])
eval_shared_model = tfma.default_eval_shared_model(
eval_saved_model_path=model_location, eval_config=eval_config)
tfx_io = tf_example_record.TFExampleRecord(
file_pattern=data_location,
raw_record_column_name=tfma.ARROW_INPUT_COLUMN)
with beam.Pipeline(runner=...) as p:
_ = (p
| 'ReadData' >> tfx_io.BeamSource()
| 'ExtractEvaluateAndWriteResults' >>
tfma.ExtractEvaluateAndWriteResults(
eval_shared_model=eval_shared_model,
eval_config=eval_config,
...))
result = tfma.load_eval_result(output_path=output_path)
tfma.view.render_slicing_metrics(result)
NOTE: If running with an EvalSavedModel (i.e. the ModelSpec has signature_name
"eval"), then instead of using the tfxio.BeamSource() code use the following
beam.io.ReadFromTFRecord(data_location)
```
Note that the exact serialization format is an internal implementation detail
and subject to change. Users should only use the TFMA functions to write and
read the results.
Args:
examples: PCollection of input examples or Arrow Record batches. Examples
can be any format the model accepts (e.g. string containing CSV row,
TensorFlow.Example, etc). If the examples are in the form of a dict it
will be assumed that input is already in the form of tfma.Extracts with
examples stored under tfma.INPUT_KEY (any other keys will be passed along
unchanged to downstream extractors and evaluators).
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers and for display purposes of the
model path.
eval_config: Eval config.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
output_path: Path to output results to (config file, metrics, plots, etc).
display_only_data_location: Optional path indicating where the examples were
read from. This is used only for display purposes - data will not actually
be read from this path.
display_only_file_format: Optional format of the examples. This is used only
for display purposes.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
tensor_adapter_config: Tensor adapter config which specifies how to obtain
tensors from the Arrow RecordBatch. If None, an attempt will be made to
create the tensors using default TensorRepresentations.
schema: A schema to use for customizing evaluators.
config_version: Optional config version for this evaluation. This should not
be explicitly set by users. It is only intended to be used in cases where
the provided eval_config was generated internally, and thus not a reliable
indicator of user intent.
Raises:
ValueError: If EvalConfig invalid or matching Extractor not found for an
Evaluator.
Returns:
PDone.
"""
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
if eval_config is None:
config_version = 1 if config_version is None else config_version
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
config_version = 2 if config_version is None else config_version
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
config_util.verify_eval_config(eval_config)
if not extractors:
extractors = default_extractors(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
tensor_adapter_config=tensor_adapter_config,
config_version=config_version)
if not evaluators:
evaluators = default_evaluators(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
random_seed_for_testing=random_seed_for_testing,
schema=schema,
config_version=config_version)
for v in evaluators:
evaluator.verify_evaluator(v, extractors)
if not writers:
writers = default_writers(
output_path=output_path,
eval_shared_model=eval_shared_model,
eval_config=eval_config,
display_only_data_location=display_only_data_location,
display_only_data_file_format=display_only_file_format)
# pylint: disable=no-value-for-parameter
if is_batched_input(eval_shared_model, eval_config, config_version):
extracts = (
examples
| 'BatchedInputsToExtracts' >> BatchedInputsToExtracts())
else:
extracts = (examples | 'InputsToExtracts' >> InputsToExtracts())
_ = (
extracts
| 'ExtractAndEvaluate' >> ExtractAndEvaluate(
extractors=extractors, evaluators=evaluators)
| 'WriteResults' >> WriteResults(writers=writers))
return beam.pvalue.PDone(examples.pipeline)
def run_model_analysis(
eval_shared_model: Optional[types.MaybeMultipleEvalSharedModels] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
data_location: str = '',
file_format: str = 'tfrecords',
output_path: Optional[str] = None,
extractors: Optional[List[extractor.Extractor]] = None,
evaluators: Optional[List[evaluator.Evaluator]] = None,
writers: Optional[List[writer.Writer]] = None,
pipeline_options: Optional[Any] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None,
write_config: Optional[bool] = True,
compute_confidence_intervals: Optional[bool] = False,
min_slice_size: int = 1,
random_seed_for_testing: Optional[int] = None,
schema: Optional[schema_pb2.Schema] = None,
) -> Union[view_types.EvalResult, view_types.EvalResults]:
"""Runs TensorFlow model analysis.
It runs a Beam pipeline to compute the slicing metrics exported in TensorFlow
Eval SavedModel and returns the results.
This is a simplified API for users who want to quickly get something running
locally. Users who wish to create their own Beam pipelines can use the
Evaluate PTransform instead.
Args:
eval_shared_model: Optional shared model (single-model evaluation) or list
of shared models (multi-model evaluation). Only required if needed by
default extractors, evaluators, or writers.
eval_config: Eval config.
data_location: The location of the data files.
file_format: The file format of the data, can be either 'text' or
'tfrecords' for now. By default, 'tfrecords' will be used.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
extractors: Optional list of Extractors to apply to Extracts. Typically
these will be added by calling the default_extractors function. If no
extractors are provided, default_extractors (non-materialized) will be
used.
evaluators: Optional list of Evaluators for evaluating Extracts. Typically
these will be added by calling the default_evaluators function. If no
evaluators are provided, default_evaluators will be used.
writers: Optional list of Writers for writing Evaluation output. Typically
these will be added by calling the default_writers function. If no writers
are provided, default_writers will be used.
pipeline_options: Optional arguments to run the Pipeline, for instance
whether to run directly.
slice_spec: Deprecated (use EvalConfig).
write_config: Deprecated (use EvalConfig).
compute_confidence_intervals: Deprecated (use EvalConfig).
min_slice_size: Deprecated (use EvalConfig).
random_seed_for_testing: Provide for deterministic tests only.
schema: Optional tf.Metadata schema of the input data.
Returns:
An EvalResult that can be used with the TFMA visualization functions.
Raises:
ValueError: If the file_format is unknown to us.
"""
_assert_tensorflow_version()
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if eval_config is None:
config_version = 1
eval_shared_models = model_util.verify_and_update_eval_shared_models(
eval_shared_model)
eval_config = _default_eval_config(eval_shared_models, slice_spec,
write_config,
compute_confidence_intervals,
min_slice_size)
else:
config_version = 2
eval_config = _update_eval_config_with_defaults(eval_config,
eval_shared_model)
tensor_adapter_config = None
with beam.Pipeline(options=pipeline_options) as p:
if file_format == 'tfrecords':
if is_batched_input(eval_shared_model, eval_config, config_version):
if is_legacy_estimator(eval_shared_model):
tfxio = raw_tf_record.RawTfRecordTFXIO(
file_pattern=data_location,
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
else:
tfxio = tf_example_record.TFExampleRecord(
file_pattern=data_location,
schema=schema,
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
if schema is not None:
tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=tfxio.ArrowSchema(),
tensor_representations=tfxio.TensorRepresentations())
data = p | 'ReadFromTFRecordToArrow' >> tfxio.BeamSource()
else:
data = p | 'ReadFromTFRecord' >> beam.io.ReadFromTFRecord(
file_pattern=data_location,
compression_type=beam.io.filesystem.CompressionTypes.AUTO)
elif file_format == 'text':
tfxio = raw_tf_record.RawBeamRecordTFXIO(
physical_format='csv',
raw_record_column_name=constants.ARROW_INPUT_COLUMN,
telemetry_descriptors=['StandaloneTFMA'])
data = (
p
| 'ReadFromText' >> beam.io.textio.ReadFromText(
data_location, coder=beam.coders.BytesCoder())
| 'ConvertToArrow' >> tfxio.BeamSource())
else:
raise ValueError('unknown file_format: {}'.format(file_format))
# pylint: disable=no-value-for-parameter
_ = (
data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults(
eval_config=eval_config,
eval_shared_model=eval_shared_model,
display_only_data_location=data_location,
display_only_file_format=file_format,
output_path=output_path,
extractors=extractors,
evaluators=evaluators,
writers=writers,
random_seed_for_testing=random_seed_for_testing,
tensor_adapter_config=tensor_adapter_config,
schema=schema,
config_version=config_version))
# pylint: enable=no-value-for-parameter
if len(eval_config.model_specs) <= 1:
return load_eval_result(output_path)
else:
results = []
for spec in eval_config.model_specs:
results.append(load_eval_result(output_path, model_name=spec.name))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def single_model_analysis(
model_location: str,
data_location: str,
output_path: Optional[str] = None,
eval_config: Optional[config_pb2.EvalConfig] = None,
slice_spec: Optional[List[slicer.SingleSliceSpec]] = None
) -> view_types.EvalResult:
"""Run model analysis for a single model on a single data set.
This is a convenience wrapper around run_model_analysis for a single model
with a single data set. For more complex use cases, use
tfma.run_model_analysis.
Args:
model_location: Path to the export eval saved model.
data_location: The location of the data files.
output_path: The directory to output metrics and results to. If None, we use
a temporary directory.
eval_config: Eval config.
slice_spec: Deprecated (use EvalConfig).
Returns:
An EvalResult that can be used with the TFMA visualization functions.
"""
# Get working_dir ready.
if output_path is None:
output_path = tempfile.mkdtemp()
if not tf.io.gfile.exists(output_path):
tf.io.gfile.makedirs(output_path)
if slice_spec and eval_config:
raise ValueError('slice_spec is deprecated, only use eval_config')
if slice_spec:
eval_config = config_pb2.EvalConfig(
slicing_specs=[s.to_proto() for s in slice_spec])
return run_model_analysis(
eval_config=eval_config,
eval_shared_model=default_eval_shared_model(
eval_saved_model_path=model_location),
data_location=data_location,
output_path=output_path) # pytype: disable=bad-return-type
def multiple_model_analysis(model_locations: List[str], data_location: str,
**kwargs) -> view_types.EvalResults:
"""Run model analysis for multiple models on the same data set.
Args:
model_locations: A list of paths to the export eval saved model.
data_location: The location of the data files.
**kwargs: The args used for evaluation. See tfma.single_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as model_locations.
"""
results = []
for m in model_locations:
results.append(single_model_analysis(m, data_location, **kwargs))
return view_types.EvalResults(results, constants.MODEL_CENTRIC_MODE)
def multiple_data_analysis(model_location: str, data_locations: List[str],
**kwargs) -> view_types.EvalResults:
"""Run model analysis for a single model on multiple data sets.
Args:
model_location: The location of the exported eval saved model.
data_locations: A list of data set locations.
**kwargs: The args used for evaluation. See tfma.run_model_analysis() for
details.
Returns:
A tfma.EvalResults containing all the evaluation results with the same order
as data_locations.
"""
results = []
for d in data_locations:
results.append(single_model_analysis(model_location, d, **kwargs))
return view_types.EvalResults(results, constants.DATA_CENTRIC_MODE)
def analyze_raw_data(
data: pd.DataFrame,
eval_config: Optional[config_pb2.EvalConfig] = None,
output_path: Optional[str] = None,
add_metric_callbacks: Optional[List[types.AddMetricsCallbackType]] = None
) -> view_types.EvalResult:
"""Runs TensorFlow model analysis on a pandas.DataFrame.
This function allows you to use TFMA with Pandas DataFrames. The dataframe
must include a 'predicted' column for the predicted label and a 'label' column
for the actual label.
In addition to a DataFrame, this function requires an eval_config, a
`tfma.EvalConfig` object containing various configuration parameters (see
[config.proto](https://github.com/tensorflow/model-analysis/blob/master/tensorflow_model_analysis/proto/config.proto)
for a comprehensive list)...
* the metrics to compute
* the slices to compute metrics on
* the DataFrame's column names for example labels and predictions ('label'
and 'prediction' by default)
* confidence interval options
This function returns a `tfma.EvalResult`, which contains TFMA's computed
metrics and can be used to generate plots with
`tfma.view.render_slicing_metrics`.
Example usage:
```python
model_specs = [
tfma.ModelSpec(
prediction_key='prediction',
label_key='label')
]
metrics_specs = [
tfma.MetricsSpec(metrics=[
tfma.MetricConfig(class_name='Accuracy'),
tfma.MetricConfig(class_name='ExampleCount')
])
]
slicing_specs = [
tfma.SlicingSpec(), # the empty slice represents overall dataset
tfma.SlicingSpec(feature_keys=['language'])
]
eval_config = tfma.EvalConfig(
model_specs=model_specs,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs)
result = tfma.analyze_raw_data(df, eval_config)
tfma.view.render_slicing_metrics(result)
# Example with Fairness Indicators
from tensorflow_model_analysis.addons.fairness.post_export_metrics import
fairness_indicators
from tensorflow_model_analysis.addons.fairness.view import widget_view
add_metrics_callbacks = [
tfma.post_export_metrics.fairness_indicators(thresholds=[0.25, 0.5, 0.75])
]
result = tfma.analyze_raw_data(
data=df,
metrics_specs=metrics_specs,
slicing_specs=slicing_specs,
add_metric_callbacks=add_metrics_callbacks
)
widget_view.render_fairness_indicator(result)
```
Args:
data: A pandas.DataFrame, where rows correspond to examples and columns
correspond to features. One column must indicate a row's predicted label,
and one column must indicate a row's actual label.
eval_config: A `tfma.EvalConfig`, which contains various configuration
parameters including metrics, slices, and label/prediction column names.
output_path: Path to write EvalResult to.
add_metric_callbacks: Optional list of metric callbacks (if used).
Returns:
A tfma.EvalResult to extract metrics or generate visualizations from.
Raises:
KeyError: If the prediction or label columns are not found within the
DataFrame.
"""
for model_spec in eval_config.model_specs: # pytype: disable=attribute-error
model_spec.prediction_key = model_spec.prediction_key or 'prediction'
model_spec.label_key = model_spec.label_key or 'label'
if model_spec.prediction_key not in data.columns:
raise KeyError(
'The prediction_key column was not found. Looked for %s but found: %s'
% (model_spec.prediction_key, list(data.columns)))
if model_spec.label_key not in data.columns:
raise KeyError(
'The label_key column was not found. Looked for %s but found: %s' %
(model_spec.label_key, list(data.columns)))
# TODO(b/153570803): Validity check / assertions for dataframe structure
if eval_config.slicing_specs is None: # pytype: disable=attribute-error
eval_config.slicing_specs = [config_pb2.SlicingSpec(feature_keys=[''])]
if output_path is None:
output_path = tempfile.mkdtemp()
arrow_data = table_util.CanonicalizeRecordBatch(
table_util.DataFrameToRecordBatch(data))
beam_data = beam.Create([arrow_data])
writers = default_writers(
output_path,
eval_config=eval_config,
add_metric_callbacks=add_metric_callbacks)
with beam.Pipeline() as p:
_ = (
p
| beam_data
| 'ExtractEvaluateAndWriteResults' >> ExtractEvaluateAndWriteResults( # pylint: disable=no-value-for-parameter
writers=writers,
eval_config=eval_config,
output_path=output_path))
return load_eval_result(output_path)
| 42.876812 | 119 | 0.732788 |
7947e1139a93608b2a4f3812af5f89bae8eb8579 | 40,278 | py | Python | utils.py | jzheng84/singleshotpose | 5d23bcd348dce8c0eeb6c90d1cdc1a0a4d4c32cb | [
"MIT"
] | null | null | null | utils.py | jzheng84/singleshotpose | 5d23bcd348dce8c0eeb6c90d1cdc1a0a4d4c32cb | [
"MIT"
] | null | null | null | utils.py | jzheng84/singleshotpose | 5d23bcd348dce8c0eeb6c90d1cdc1a0a4d4c32cb | [
"MIT"
] | null | null | null | import sys
import os
import time
import math
import torch
import numpy as np
from PIL import Image, ImageDraw, ImageFont
from torch.autograd import Variable
import torch.nn.functional as F
import cv2
from scipy import spatial
import struct
import imghdr
def get_all_files(directory):
files = []
for f in os.listdir(directory):
if os.path.isfile(os.path.join(directory, f)):
files.append(os.path.join(directory, f))
else:
files.extend(get_all_files(os.path.join(directory, f)))
return files
def calcAngularDistance(gt_rot, pr_rot):
rotDiff = np.dot(gt_rot, np.transpose(pr_rot))
trace = np.trace(rotDiff)
return np.rad2deg(np.arccos((trace-1.0)/2.0))
def get_camera_intrinsic():
# TODO: Change to autorally intrinsics. Should load from param file.
K = np.zeros((3, 3), dtype='float64')
#K[0, 0], K[0, 2] = 572.4114, 325.2611
#K[1, 1], K[1, 2] = 573.5704, 242.0489
#K[2, 2] = 1.
# Alpha
#K[0, 0], K[0, 2] = 814.5068358423324, 641.3206511250478
#K[1, 1], K[1, 2] = 818.5252047479286, 503.98399653885133
K[0, 0], K[0, 2] = 852.0367654236857, 626.9338016782257
K[1, 1], K[1, 2] = 852.0736515102927, 500.2665457442116
K[2, 2] = 1.
return K
def compute_projection(points_3D, transformation, internal_calibration):
projections_2d = np.zeros((2, points_3D.shape[1]), dtype='float32')
camera_projection = (internal_calibration.dot(transformation)).dot(points_3D)
projections_2d[0, :] = camera_projection[0, :]/camera_projection[2, :]
projections_2d[1, :] = camera_projection[1, :]/camera_projection[2, :]
return projections_2d
def compute_transformation(points_3D, transformation):
return transformation.dot(points_3D)
def calc_pts_diameter(pts):
diameter = -1
for pt_id in range(pts.shape[0]):
pt_dup = np.tile(np.array([pts[pt_id, :]]), [pts.shape[0] - pt_id, 1])
pts_diff = pt_dup - pts[pt_id:, :]
max_dist = math.sqrt((pts_diff * pts_diff).sum(axis=1).max())
if max_dist > diameter:
diameter = max_dist
return diameter
def adi(pts_est, pts_gt):
nn_index = spatial.cKDTree(pts_est)
nn_dists, _ = nn_index.query(pts_gt, k=1)
e = nn_dists.mean()
return e
def get_3D_corners(vertices):
min_x = np.min(vertices[0,:])
max_x = np.max(vertices[0,:])
min_y = np.min(vertices[1,:])
max_y = np.max(vertices[1,:])
min_z = np.min(vertices[2,:])
max_z = np.max(vertices[2,:])
corners = np.array([[min_x, min_y, min_z],
[min_x, min_y, max_z],
[min_x, max_y, min_z],
[min_x, max_y, max_z],
[max_x, min_y, min_z],
[max_x, min_y, max_z],
[max_x, max_y, min_z],
[max_x, max_y, max_z]])
corners = np.concatenate((np.transpose(corners), np.ones((1,8)) ), axis=0)
return corners
def pnp(points_3D, points_2D, cameraMatrix):
try:
distCoeffs = pnp.distCoeffs
except:
distCoeffs = np.zeros((8, 1), dtype='float32')
assert points_2D.shape[0] == points_2D.shape[0], 'points 3D and points 2D must have same number of vertices'
_, R_exp, t = cv2.solvePnP(points_3D,
# points_2D,
np.ascontiguousarray(points_2D[:,:2]).reshape((-1,1,2)),
cameraMatrix,
distCoeffs)
# , None, None, False, cv2.SOLVEPNP_UPNP)
# R_exp, t, _ = cv2.solvePnPRansac(points_3D,
# points_2D,
# cameraMatrix,
# distCoeffs,
# reprojectionError=12.0)
#
R, _ = cv2.Rodrigues(R_exp)
# Rt = np.c_[R, t]
return R, t
def get_2d_bb(box, size):
x = box[0]
y = box[1]
min_x = np.min(np.reshape(box, [9,2])[:,0])
max_x = np.max(np.reshape(box, [9,2])[:,0])
min_y = np.min(np.reshape(box, [9,2])[:,1])
max_y = np.max(np.reshape(box, [9,2])[:,1])
w = max_x - min_x
h = max_y - min_y
new_box = [x*size, y*size, w*size, h*size]
return new_box
def compute_2d_bb(pts):
min_x = np.min(pts[0,:])
max_x = np.max(pts[0,:])
min_y = np.min(pts[1,:])
max_y = np.max(pts[1,:])
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx, cy, w, h]
return new_box
def compute_2d_bb_from_orig_pix(pts, size):
min_x = np.min(pts[0,:]) / 640.0
max_x = np.max(pts[0,:]) / 640.0
min_y = np.min(pts[1,:]) / 480.0
max_y = np.max(pts[1,:]) / 480.0
w = max_x - min_x
h = max_y - min_y
cx = (max_x + min_x) / 2.0
cy = (max_y + min_y) / 2.0
new_box = [cx*size, cy*size, w*size, h*size]
return new_box
def bbox_iou(box1, box2, x1y1x2y2=False):
if x1y1x2y2:
mx = min(box1[0], box2[0])
Mx = max(box1[2], box2[2])
my = min(box1[1], box2[1])
My = max(box1[3], box2[3])
w1 = box1[2] - box1[0]
h1 = box1[3] - box1[1]
w2 = box2[2] - box2[0]
h2 = box2[3] - box2[1]
else:
mx = min(box1[0]-box1[2]/2.0, box2[0]-box2[2]/2.0)
Mx = max(box1[0]+box1[2]/2.0, box2[0]+box2[2]/2.0)
my = min(box1[1]-box1[3]/2.0, box2[1]-box2[3]/2.0)
My = max(box1[1]+box1[3]/2.0, box2[1]+box2[3]/2.0)
w1 = box1[2]
h1 = box1[3]
w2 = box2[2]
h2 = box2[3]
uw = Mx - mx
uh = My - my
cw = w1 + w2 - uw
ch = h1 + h2 - uh
carea = 0
if cw <= 0 or ch <= 0:
return 0.0
area1 = w1 * h1
area2 = w2 * h2
carea = cw * ch
uarea = area1 + area2 - carea
return carea/uarea
def corner_confidences(gt_corners, pr_corners, th=30, sharpness=2, im_width=640, im_height=480):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 8 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 8, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 8)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 8
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 8)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 8
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence(gt_corners, pr_corners, th=30, sharpness=2, im_width=640, im_height=480):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (8,) with 8 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(8, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(8, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
def corner_confidences9(gt_corners, pr_corners, th=80, sharpness=2, im_width=640, im_height=480):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (16 x nA), type: torch.FloatTensor
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a torch.FloatTensor of shape (nA,) with 9 confidence values
'''
shape = gt_corners.size()
nA = shape[1]
dist = gt_corners - pr_corners
dist = dist.t().contiguous().view(nA, 9, 2)
dist[:, :, 0] = dist[:, :, 0] * im_width
dist[:, :, 1] = dist[:, :, 1] * im_height
eps = 1e-5
distthresh = torch.FloatTensor([th]).repeat(nA, 9)
dist = torch.sqrt(torch.sum((dist)**2, dim=2)).squeeze() # nA x 9
mask = (dist < distthresh).type(torch.FloatTensor)
conf = torch.exp(sharpness*(1 - dist/distthresh))-1 # mask * (torch.exp(math.log(2) * (1.0 - dist/rrt)) - 1)
conf0 = torch.exp(sharpness*(1 - torch.zeros(conf.size(0),1))) - 1
conf = conf / conf0.repeat(1, 9)
# conf = 1 - dist/distthresh
conf = mask * conf # nA x 9
mean_conf = torch.mean(conf, dim=1)
return mean_conf
def corner_confidence9(gt_corners, pr_corners, th=80, sharpness=2, im_width=640, im_height=480):
''' gt_corners: Ground-truth 2D projections of the 3D bounding box corners, shape: (18,) type: list
pr_corners: Prediction for the 2D projections of the 3D bounding box corners, shape: (18,), type: list
th : distance threshold, type: int
sharpness : sharpness of the exponential that assigns a confidence value to the distance
-----------
return : a list of shape (9,) with 9 confidence values
'''
dist = torch.FloatTensor(gt_corners) - pr_corners
dist = dist.view(9, 2)
dist[:, 0] = dist[:, 0] * im_width
dist[:, 1] = dist[:, 1] * im_height
eps = 1e-5
dist = torch.sqrt(torch.sum((dist)**2, dim=1))
mask = (dist < th).type(torch.FloatTensor)
conf = torch.exp(sharpness * (1.0 - dist/th)) - 1
conf0 = torch.exp(torch.FloatTensor([sharpness])) - 1 + eps
conf = conf / conf0.repeat(9, 1)
# conf = 1.0 - dist/th
conf = mask * conf
return torch.mean(conf)
def sigmoid(x):
return 1.0/(math.exp(-x)+1.)
def softmax(x):
x = torch.exp(x - torch.max(x))
x = x/x.sum()
return x
def nms(boxes, nms_thresh):
if len(boxes) == 0:
return boxes
det_confs = torch.zeros(len(boxes))
for i in range(len(boxes)):
det_confs[i] = 1-boxes[i][4]
_,sortIds = torch.sort(det_confs)
out_boxes = []
for i in range(len(boxes)):
box_i = boxes[sortIds[i]]
if box_i[4] > 0:
out_boxes.append(box_i)
for j in range(i+1, len(boxes)):
box_j = boxes[sortIds[j]]
if bbox_iou(box_i, box_j, x1y1x2y2=False) > nms_thresh:
#print(box_i, box_j, bbox_iou(box_i, box_j, x1y1x2y2=False))
box_j[4] = 0
return out_boxes
def fix_corner_order(corners2D_gt):
corners2D_gt_corrected = np.zeros((9, 2), dtype='float32')
corners2D_gt_corrected[0, :] = corners2D_gt[0, :]
corners2D_gt_corrected[1, :] = corners2D_gt[1, :]
corners2D_gt_corrected[2, :] = corners2D_gt[3, :]
corners2D_gt_corrected[3, :] = corners2D_gt[5, :]
corners2D_gt_corrected[4, :] = corners2D_gt[7, :]
corners2D_gt_corrected[5, :] = corners2D_gt[2, :]
corners2D_gt_corrected[6, :] = corners2D_gt[4, :]
corners2D_gt_corrected[7, :] = corners2D_gt[6, :]
corners2D_gt_corrected[8, :] = corners2D_gt[8, :]
return corners2D_gt_corrected
def convert2cpu(gpu_matrix):
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
def convert2cpu_long(gpu_matrix):
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
def get_region_boxes(output, conf_thresh, num_classes, only_objectness=1, validation=False):
# Parameters
anchor_dim = 1
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*anchor_dim)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
output = output.view(batch*anchor_dim, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*anchor_dim*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*anchor_dim, 1, 1).view(batch*anchor_dim*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*anchor_dim
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(anchor_dim):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if conf > max_conf:
max_conf = conf
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
if len(boxes) == 0:
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = cls_max_confs[max_ind]
cls_max_id = cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_corresponding_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, correspondingclass, only_objectness=1, validation=False):
# Parameters
anchor_step = len(anchors)/num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (det_confs[ind] > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = det_confs[ind]
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20])):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = max_conf # det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
# print(boxes)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def get_boxes(output, conf_thresh, num_classes, anchors, num_anchors, correspondingclass, only_objectness=1, validation=False):
# Parameters
anchor_step = len(anchors)/num_anchors
if output.dim() == 3:
output = output.unsqueeze(0)
batch = output.size(0)
assert(output.size(1) == (19+num_classes)*num_anchors)
h = output.size(2)
w = output.size(3)
# Activation
t0 = time.time()
all_boxes = []
max_conf = -100000
max_cls_conf = -100000
output = output.view(batch*num_anchors, 19+num_classes, h*w).transpose(0,1).contiguous().view(19+num_classes, batch*num_anchors*h*w)
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).cuda()
xs0 = torch.sigmoid(output[0]) + grid_x
ys0 = torch.sigmoid(output[1]) + grid_y
xs1 = output[2] + grid_x
ys1 = output[3] + grid_y
xs2 = output[4] + grid_x
ys2 = output[5] + grid_y
xs3 = output[6] + grid_x
ys3 = output[7] + grid_y
xs4 = output[8] + grid_x
ys4 = output[9] + grid_y
xs5 = output[10] + grid_x
ys5 = output[11] + grid_y
xs6 = output[12] + grid_x
ys6 = output[13] + grid_y
xs7 = output[14] + grid_x
ys7 = output[15] + grid_y
xs8 = output[16] + grid_x
ys8 = output[17] + grid_y
det_confs = torch.sigmoid(output[18])
cls_confs = torch.nn.Softmax()(Variable(output[19:19+num_classes].transpose(0,1))).data
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
cls_max_confs = cls_max_confs.view(-1)
cls_max_ids = cls_max_ids.view(-1)
t1 = time.time()
# GPU to CPU
sz_hw = h*w
sz_hwa = sz_hw*num_anchors
det_confs = convert2cpu(det_confs)
cls_max_confs = convert2cpu(cls_max_confs)
cls_max_ids = convert2cpu_long(cls_max_ids)
xs0 = convert2cpu(xs0)
ys0 = convert2cpu(ys0)
xs1 = convert2cpu(xs1)
ys1 = convert2cpu(ys1)
xs2 = convert2cpu(xs2)
ys2 = convert2cpu(ys2)
xs3 = convert2cpu(xs3)
ys3 = convert2cpu(ys3)
xs4 = convert2cpu(xs4)
ys4 = convert2cpu(ys4)
xs5 = convert2cpu(xs5)
ys5 = convert2cpu(ys5)
xs6 = convert2cpu(xs6)
ys6 = convert2cpu(ys6)
xs7 = convert2cpu(xs7)
ys7 = convert2cpu(ys7)
xs8 = convert2cpu(xs8)
ys8 = convert2cpu(ys8)
if validation:
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
t2 = time.time()
# Boxes filter
for b in range(batch):
boxes = []
max_conf = -1
for cy in range(h):
for cx in range(w):
for i in range(num_anchors):
ind = b*sz_hwa + i*sz_hw + cy*w + cx
det_conf = det_confs[ind]
if only_objectness:
conf = det_confs[ind]
else:
conf = det_confs[ind] * cls_max_confs[ind]
if (conf > max_conf) and (cls_confs[ind, correspondingclass] > max_cls_conf):
max_conf = conf
max_cls_conf = cls_confs[ind, correspondingclass]
max_ind = ind
if conf > conf_thresh:
bcx0 = xs0[ind]
bcy0 = ys0[ind]
bcx1 = xs1[ind]
bcy1 = ys1[ind]
bcx2 = xs2[ind]
bcy2 = ys2[ind]
bcx3 = xs3[ind]
bcy3 = ys3[ind]
bcx4 = xs4[ind]
bcy4 = ys4[ind]
bcx5 = xs5[ind]
bcy5 = ys5[ind]
bcx6 = xs6[ind]
bcy6 = ys6[ind]
bcx7 = xs7[ind]
bcy7 = ys7[ind]
bcx8 = xs8[ind]
bcy8 = ys8[ind]
cls_max_conf = cls_max_confs[ind]
cls_max_id = cls_max_ids[ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
if (not only_objectness) and validation:
for c in range(num_classes):
tmp_conf = cls_confs[ind][c]
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
box.append(tmp_conf)
box.append(c)
boxes.append(box)
boxesnp = np.array(boxes)
if (len(boxes) == 0) or (not (correspondingclass in boxesnp[:,20])):
bcx0 = xs0[max_ind]
bcy0 = ys0[max_ind]
bcx1 = xs1[max_ind]
bcy1 = ys1[max_ind]
bcx2 = xs2[max_ind]
bcy2 = ys2[max_ind]
bcx3 = xs3[max_ind]
bcy3 = ys3[max_ind]
bcx4 = xs4[max_ind]
bcy4 = ys4[max_ind]
bcx5 = xs5[max_ind]
bcy5 = ys5[max_ind]
bcx6 = xs6[max_ind]
bcy6 = ys6[max_ind]
bcx7 = xs7[max_ind]
bcy7 = ys7[max_ind]
bcx8 = xs8[max_ind]
bcy8 = ys8[max_ind]
cls_max_conf = max_cls_conf # cls_max_confs[max_ind]
cls_max_id = correspondingclass # cls_max_ids[max_ind]
det_conf = det_confs[max_ind]
box = [bcx0/w, bcy0/h, bcx1/w, bcy1/h, bcx2/w, bcy2/h, bcx3/w, bcy3/h, bcx4/w, bcy4/h, bcx5/w, bcy5/h, bcx6/w, bcy6/h, bcx7/w, bcy7/h, bcx8/w, bcy8/h, det_conf, cls_max_conf, cls_max_id]
boxes.append(box)
# print(boxes)
all_boxes.append(boxes)
else:
all_boxes.append(boxes)
t3 = time.time()
if False:
print('---------------------------------')
print('matrix computation : %f' % (t1-t0))
print(' gpu to cpu : %f' % (t2-t1))
print(' boxes filter : %f' % (t3-t2))
print('---------------------------------')
return all_boxes
def plot_boxes_cv2(img, boxes, savename=None, class_names=None, color=None):
import cv2
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.shape[1]
height = img.shape[0]
for i in range(len(boxes)):
box = boxes[i]
x1 = int(round((box[0] - box[2]/2.0) * width))
y1 = int(round((box[1] - box[3]/2.0) * height))
x2 = int(round((box[0] + box[2]/2.0) * width))
y2 = int(round((box[1] + box[3]/2.0) * height))
if color:
rgb = color
else:
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
if color is None:
rgb = (red, green, blue)
img = cv2.putText(img, class_names[cls_id], (x1,y1), cv2.FONT_HERSHEY_SIMPLEX, 1.2, rgb, 1)
img = cv2.rectangle(img, (x1,y1), (x2,y2), rgb, 1)
if savename:
print("save plot results to %s" % savename)
cv2.imwrite(savename, img)
return img
def plot_boxes(img, boxes, savename=None, class_names=None):
colors = torch.FloatTensor([[1,0,1],[0,0,1],[0,1,1],[0,1,0],[1,1,0],[1,0,0]]);
def get_color(c, x, max_val):
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
width = img.width
height = img.height
draw = ImageDraw.Draw(img)
for i in range(len(boxes)):
box = boxes[i]
x1 = (box[0] - box[2]/2.0) * width
y1 = (box[1] - box[3]/2.0) * height
x2 = (box[0] + box[2]/2.0) * width
y2 = (box[1] + box[3]/2.0) * height
rgb = (255, 0, 0)
if len(box) >= 7 and class_names:
cls_conf = box[5]
cls_id = box[6]
print('%s: %f' % (class_names[cls_id], cls_conf))
classes = len(class_names)
offset = cls_id * 123457 % classes
red = get_color(2, offset, classes)
green = get_color(1, offset, classes)
blue = get_color(0, offset, classes)
rgb = (red, green, blue)
draw.text((x1, y1), class_names[cls_id], fill=rgb)
draw.rectangle([x1, y1, x2, y2], outline = rgb)
if savename:
print("save plot results to %s" % savename)
img.save(savename)
return img
def read_truths(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
truths = truths.reshape(truths.size/21, 21) # to avoid single truth problem
return truths
else:
return np.array([])
def read_truths_args(lab_path, min_box_scale):
truths = read_truths(lab_path)
new_truths = []
for i in range(truths.shape[0]):
new_truths.append([truths[i][0], truths[i][1], truths[i][2], truths[i][3], truths[i][4],
truths[i][5], truths[i][6], truths[i][7], truths[i][8], truths[i][9], truths[i][10],
truths[i][11], truths[i][12], truths[i][13], truths[i][14], truths[i][15], truths[i][16], truths[i][17], truths[i][18]])
return np.array(new_truths)
def read_pose(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
# truths = truths.reshape(truths.size/21, 21) # to avoid single truth problem
return truths
else:
return np.array([])
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names
def image2torch(img):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
return img
def do_detect(model, img, conf_thresh, nms_thresh, use_cuda=1):
model.eval()
t0 = time.time()
if isinstance(img, Image.Image):
width = img.width
height = img.height
img = torch.ByteTensor(torch.ByteStorage.from_buffer(img.tobytes()))
img = img.view(height, width, 3).transpose(0,1).transpose(0,2).contiguous()
img = img.view(1, 3, height, width)
img = img.float().div(255.0)
elif type(img) == np.ndarray: # cv2 image
img = torch.from_numpy(img.transpose(2,0,1)).float().div(255.0).unsqueeze(0)
else:
print("unknow image type")
exit(-1)
t1 = time.time()
if use_cuda:
img = img.cuda()
img = torch.autograd.Variable(img)
t2 = time.time()
output = model(img)
output = output.data
#for j in range(100):
# sys.stdout.write('%f ' % (output.storage()[j]))
#print('')
t3 = time.time()
boxes = get_region_boxes(output, conf_thresh, model.num_classes, model.anchors, model.num_anchors)[0]
#for j in range(len(boxes)):
# print(boxes[j])
t4 = time.time()
boxes = nms(boxes, nms_thresh)
t5 = time.time()
if False:
print('-----------------------------------')
print(' image to tensor : %f' % (t1 - t0))
print(' tensor to cuda : %f' % (t2 - t1))
print(' predict : %f' % (t3 - t2))
print('get_region_boxes : %f' % (t4 - t3))
print(' nms : %f' % (t5 - t4))
print(' total : %f' % (t5 - t0))
print('-----------------------------------')
return boxes
def read_data_cfg(datacfg):
options = dict()
options['gpus'] = '0,1,2,3'
options['num_workers'] = '10'
with open(datacfg, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.strip()
if line == '':
continue
key,value = line.split('=')
key = key.strip()
value = value.strip()
options[key] = value
return options
def scale_bboxes(bboxes, width, height):
import copy
dets = copy.deepcopy(bboxes)
for i in range(len(dets)):
dets[i][0] = dets[i][0] * width
dets[i][1] = dets[i][1] * height
dets[i][2] = dets[i][2] * width
dets[i][3] = dets[i][3] * height
return dets
def file_lines(thefilepath):
count = 0
thefile = open(thefilepath, 'rb')
while True:
buffer = thefile.read(8192*1024)
if not buffer:
break
count += buffer.count('\n')
thefile.close( )
return count
def get_image_size(fname):
'''Determine the image type of fhandle and return its size.
from draco'''
with open(fname, 'rb') as fhandle:
head = fhandle.read(24)
if len(head) != 24:
return
if imghdr.what(fname) == 'png':
check = struct.unpack('>i', head[4:8])[0]
if check != 0x0d0a1a0a:
return
width, height = struct.unpack('>ii', head[16:24])
elif imghdr.what(fname) == 'gif':
width, height = struct.unpack('<HH', head[6:10])
elif imghdr.what(fname) == 'jpeg' or imghdr.what(fname) == 'jpg':
try:
fhandle.seek(0) # Read 0xff next
size = 2
ftype = 0
while not 0xc0 <= ftype <= 0xcf:
fhandle.seek(size, 1)
byte = fhandle.read(1)
while ord(byte) == 0xff:
byte = fhandle.read(1)
ftype = ord(byte)
size = struct.unpack('>H', fhandle.read(2))[0] - 2
# We are at a SOFn block
fhandle.seek(1, 1) # Skip `precision' byte.
height, width = struct.unpack('>HH', fhandle.read(4))
except Exception: #IGNORE:W0703
return
else:
return
return width, height
def logging(message):
print('%s %s' % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()), message))
def read_pose(lab_path):
if os.path.getsize(lab_path):
truths = np.loadtxt(lab_path)
return truths
else:
return np.array([])
| 37.748828 | 210 | 0.531407 |
7947e1e1043167789147fc53296da23eb207a314 | 1,481 | py | Python | Required Codes/Python Scripts/scikit-feature-master/skfeature/example/test_FCBF.py | shuvoprime/A-Supervised-Machine-Learning-Approach-to-Predict-Vulnerability-to-Drug-Addiction | 8bd08557f134b5347b23fd5fb4af935d0d28d6b9 | [
"MIT"
] | null | null | null | Required Codes/Python Scripts/scikit-feature-master/skfeature/example/test_FCBF.py | shuvoprime/A-Supervised-Machine-Learning-Approach-to-Predict-Vulnerability-to-Drug-Addiction | 8bd08557f134b5347b23fd5fb4af935d0d28d6b9 | [
"MIT"
] | null | null | null | Required Codes/Python Scripts/scikit-feature-master/skfeature/example/test_FCBF.py | shuvoprime/A-Supervised-Machine-Learning-Approach-to-Predict-Vulnerability-to-Drug-Addiction | 8bd08557f134b5347b23fd5fb4af935d0d28d6b9 | [
"MIT"
] | null | null | null | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import FCBF
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx = FCBF.fcbf(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| 31.510638 | 89 | 0.667792 |
7947e2e2cb8981ffc39146ed7a0893bfac8304b7 | 5,022 | py | Python | src/icemac/ab/calendar/calendar.py | icemac/icemac.ab.calendar | c0cdedd3a8fdd39520156c2ea7cf83aca742e3d9 | [
"BSD-2-Clause"
] | 1 | 2020-04-21T19:34:04.000Z | 2020-04-21T19:34:04.000Z | src/icemac/ab/calendar/calendar.py | icemac/icemac.ab.calendar | c0cdedd3a8fdd39520156c2ea7cf83aca742e3d9 | [
"BSD-2-Clause"
] | null | null | null | src/icemac/ab/calendar/calendar.py | icemac/icemac.ab.calendar | c0cdedd3a8fdd39520156c2ea7cf83aca742e3d9 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from .interfaces import DATE_INDEX
from .interfaces import IEvent
from .interfaces import IRecurringEvent
from datetime import datetime, time
from icemac.addressbook.interfaces import ISchemaName
import gocept.reference
import grokcore.annotation as grok
import icemac.ab.calendar.interfaces
import icemac.ab.calendar.property
import icemac.addressbook.interfaces
import itertools
import pytz
import zope.catalog.interfaces
import zope.component
import zope.container.btree
import zope.interface
@zope.interface.implementer(icemac.ab.calendar.interfaces.ICalendar)
class Calendar(zope.container.btree.BTreeContainer):
"""Calendar containing dates."""
def get_events_for_month(self, month, timezone=None):
"""Get all events which belong to `month`."""
timezone = self._timezone_name_to_timezone(timezone)
midnight = time(0, 0, 0)
start = timezone.localize(
datetime.combine(month.firstOfMonth(), midnight))
end = timezone.localize(
datetime.combine((month + 1).firstOfMonth(), midnight))
return self._get_events(start, end, timezone, categories=[])
def get_events(self, start, end, timezone=None, categories=[]):
"""Get all events between `start` and `end` with one of `categories`.
`start` and `end` have to be datetime objects.
`categories` is a list of category titles.
`start` is part of the interval, but `end` is not.
"""
timezone = self._timezone_name_to_timezone(timezone)
return self._get_events(start, end, timezone, categories)
def _get_events(self, start, end, timezone, categories):
"""Get all events between `start` and `end`.
`start` is part of the interval, but `end` is not.
`categories` is a list of category titles.
Only return events of the given `categories`.
If `categories` is an empty list, do not restrict by category.
"""
recurring_events = zope.component.getUtility(
icemac.ab.calendar.interfaces.IRecurringEvents).get_events(
categories)
recurred_events = [x.get_events(start, end, timezone)
for x in recurring_events]
events_map = {(x.category, x.in_timezone(timezone)): x
for x in itertools.chain(*recurred_events)}
single_events = self.query_single_events(
start, end, categories=categories)
# Sort deleted events first. This way a recurred event can be deleted
# and later on replaced by a new event of the same category.
sorted_single_events = sorted(
single_events, key=lambda x: int(x.deleted), reverse=True)
# A single_event with the same category and datetime overwrites the
# recurred event as it is its customization:
single_events_map = {(x.category, x.in_timezone(timezone)): x
for x in sorted_single_events}
events_map.update(single_events_map)
# Filter out deleted recurred events and sort:
return sorted(
(x for x in events_map.values() if not x.deleted),
key=lambda x: (x.in_timezone(timezone),
icemac.addressbook.interfaces.ITitle(
x.category, None)))
def _timezone_name_to_timezone(self, name):
"""Return a timezone object. If `name` is None, return UTC."""
if name is None:
timezone = pytz.utc
else:
timezone = pytz.timezone(name)
return timezone
def query_single_events(self, start, end, categories=[]):
catalog = zope.component.getUtility(zope.catalog.interfaces.ICatalog)
query = {
DATE_INDEX: {'between': (start, end, False, True)},
'schema_name': {'any_of': [
ISchemaName(IEvent).schema_name,
ISchemaName(IRecurringEvent).schema_name,
]},
}
if categories:
query['keywords'] = {'any_of': categories}
# The values for the index are: min, max, min_exclude, max_exclude
return catalog.searchResults(**query)
class CalendarDisplaySettings(grok.Annotation):
"""Store calendar display settings in annotations."""
grok.context(icemac.ab.calendar.interfaces.ICalendar)
grok.implements(icemac.ab.calendar.interfaces.ICalendarDisplaySettings)
person_keyword = gocept.reference.Reference(
'person_keyword', ensure_integrity=True)
event_additional_fields = icemac.ab.calendar.property.AddressBookField(
'_event_additional_fields', multiple=True)
def __init__(self, *args, **kw):
super(CalendarDisplaySettings, self).__init__(*args, **kw)
self.person_keyword = None
@grok.adapter(icemac.addressbook.interfaces.IAddressBook)
@grok.implementer(icemac.ab.calendar.interfaces.ICalendar)
def calendar(address_book):
"""Adapt the event to its calendar."""
return address_book.calendar
| 41.163934 | 77 | 0.665671 |
7947e35a0811b35e7438682407f8f6cfd40002c5 | 47 | py | Python | code/exampleStrats/alwaysCooperate.py | robo-monk/PrisonersDilemmaTournament | 84f323f46233d3c6b4ce4380e04e981520912423 | [
"MIT"
] | null | null | null | code/exampleStrats/alwaysCooperate.py | robo-monk/PrisonersDilemmaTournament | 84f323f46233d3c6b4ce4380e04e981520912423 | [
"MIT"
] | null | null | null | code/exampleStrats/alwaysCooperate.py | robo-monk/PrisonersDilemmaTournament | 84f323f46233d3c6b4ce4380e04e981520912423 | [
"MIT"
] | null | null | null | def strategy(history, memory):
return 1, None
| 15.666667 | 30 | 0.744681 |
7947e404e4ee514e4c2feee5b184f0770d8a8cab | 1,319 | py | Python | pokershell/config.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 6 | 2016-05-13T07:39:37.000Z | 2022-03-05T07:23:46.000Z | pokershell/config.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 1 | 2017-12-18T09:08:28.000Z | 2017-12-31T01:48:32.000Z | pokershell/config.py | fblaha/pokershell | 36a3bfff6ead7fef175e430dfdb88ac6f6a31d1f | [
"Apache-2.0"
] | 5 | 2016-10-11T23:54:35.000Z | 2022-03-05T07:23:47.000Z | import pokershell.utils as utils
class ConfigOption(utils.CommonReprMixin):
def __init__(self, name, type, value, short, description):
super().__init__()
self.name = name
self.type = type
self._value = value
self.short = short
self.description = description
@property
def value(self):
return self._value
@property
def python_name(self):
return self.name.replace('-', '_')
@property
def long(self):
return '--' + self.name
@value.setter
def value(self, val):
if type(val) != self.type:
self._value = self.type(val)
else:
self._value = val
options = {}
def register_option(name, type, value, short, description):
option = ConfigOption(name, type, value, short, description)
assert name not in options
options[name] = option
return option
player_num = register_option(name='player-num', value=2, type=int, short='-p',
description='default player number used when actual player '
'number is specified in hand simulation')
hand_stats = register_option(name='hand-stats', value=3, type=int, short='-x',
description='length of hand statistics table')
| 27.479167 | 89 | 0.595906 |
7947e41914c569736fcfe8a66e3c0df8f2cbd9c1 | 16,340 | py | Python | var/spack/repos/builtin/packages/rocm-openmp-extras/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2019-03-29T10:13:11.000Z | 2019-03-29T10:13:11.000Z | var/spack/repos/builtin/packages/rocm-openmp-extras/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 11 | 2021-05-12T06:29:51.000Z | 2022-03-31T04:13:35.000Z | var/spack/repos/builtin/packages/rocm-openmp-extras/package.py | HigherOrderMethods/spack | 87ed3fcc59fc25ce250042338d082925e3a3610b | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
import re
tools_url = 'https://github.com/ROCm-Developer-Tools'
compute_url = 'https://github.com/RadeonOpenCompute'
# Arrays of hashes are in order of the versions array below
# For example array[0] = 3.9.0, array[1] = 3.10.0, etc.
aomp = [
"377ab59b685a73b3f95fba95f5e028678ec5aafabc4177b7f0ffb78da095d679",
"808fca9bdefb109d5bcbbc9f5b59c564a6d422488869e986516f2a7233eda235",
"aa75455cf1d333419e5310117678e5789c5222f7cb05b05e3dfacef855c55d84",
"9e6ed2c7bdc3b4af069751b5d3e92913fd5ac318ae844f68bd78c5def990a8f7",
"c368d39ba9c1bc8b0edbe66edaa3f2a4ff5649c2bd16f499ac19dfd1591dec5a"
]
devlib = [
"c99f45dacf5967aef9a31e3731011b9c142446d4a12bac69774998976f2576d7",
"bca9291385d6bdc91a8b39a46f0fd816157d38abb1725ff5222e6a0daa0834cc",
"d0aa495f9b63f6d8cf8ac668f4dc61831d996e9ae3f15280052a37b9d7670d2a",
"f5f5aa6bfbd83ff80a968fa332f80220256447c4ccb71c36f1fbd2b4a8e9fc1b",
"34a2ac39b9bb7cfa8175cbab05d30e7f3c06aaffce99eed5f79c616d0f910f5f"
]
llvm = [
"1ff14b56d10c2c44d36c3c412b190d3d8cd1bb12cfc7cd58af004c16fd9987d1",
"8262aff88c1ff6c4deb4da5a4f8cda1bf90668950e2b911f93f73edaee53b370",
"aa1f80f429fded465e86bcfaef72255da1af1c5c52d58a4c979bc2f6c2da5a69",
"244e38d824fa7dfa8d0edf3c036b3c84e9c17a16791828e4b745a8d31eb374ae",
"751eca1d18595b565cfafa01c3cb43efb9107874865a60c80d6760ba83edb661"
]
flang = [
"5d113f44fb173bd0d5704b282c5cebbb2aa642c7c29f188764bfa1daa58374c9",
"3990d39ff1c908b150f464f0653a123d94be30802f9cad6af18fbb560c4b412e",
"f3e19699ce4ac404f41ffe08ef4546e31e2e741d8deb403b5477659e054275d5",
"f41f661425534b5cfb20e2c0efd9d0800609dc3876ee9c3f76f026d36abbfa35",
"d6c3f3aaa289251a433d99d1cffe432812093089ae876a6863295a15066c1eaf"
]
extras = [
"830a37cf1c6700f81fc00749206a37e7cda4d2867bbdf489e9e2d81f52d06b3d",
"5d98d34aff97416d8b5b9e16e7cf474580f8de8a73bd0e549c4440a3c5df4ef5",
"51cc8a7c5943e1d9bc657fc9b9797f45e3ce6a4e544d3d3a967c7cd0185a0510",
"91fdfadb94aa6afc1942124d0953ddc80c297fa75de1897fb42ac8e7dea51ab9",
"31bbe70b51c259a54370d021ae63528a1740b5477a22412685afd14150fff6f4"
]
versions = ['3.9.0', '3.10.0', '4.0.0', '4.1.0', '4.2.0']
versions_dict = dict()
components = ['aomp', 'devlib', 'llvm', 'flang', 'extras']
component_hashes = [aomp, devlib, llvm, flang, extras]
# Loop through versions and create necessary dictionaries of components
for outer_index, item in enumerate(versions):
for inner_index, component in enumerate(component_hashes):
versions_dict.setdefault(item, {})[components[inner_index]] = \
component_hashes[inner_index][outer_index]
class RocmOpenmpExtras(Package):
"""OpenMP support for ROCm LLVM."""
homepage = tools_url + "/aomp"
url = tools_url + "/aomp/archive/rocm-4.2.0.tar.gz"
maintainers = ['srekolam', 'arjun-raj-kuppala', 'estewart08']
version('4.2.0', sha256=versions_dict['4.2.0']['aomp'])
version('4.1.0', sha256=versions_dict['4.1.0']['aomp'])
version('4.0.0', sha256=versions_dict['4.0.0']['aomp'])
version('3.10.0', sha256=versions_dict['3.10.0']['aomp'])
version('3.9.0', sha256=versions_dict['3.9.0']['aomp'])
depends_on('cmake@3:', type='build')
depends_on('[email protected]:', type=('build', 'link'))
depends_on('py-setuptools', type='build')
depends_on('python@3:', type='build')
depends_on('perl-data-dumper', type='build')
depends_on('awk', type='build')
depends_on('elfutils', type=('build', 'link'))
depends_on('libffi', type=('build', 'link'))
for ver in ['3.9.0', '3.10.0', '4.0.0', '4.1.0', '4.2.0']:
depends_on('hsakmt-roct@' + ver, when='@' + ver)
depends_on('comgr@' + ver, when='@' + ver)
depends_on('hsa-rocr-dev@' + ver, when='@' + ver)
# standalone rocm-device-libs
depends_on('rocm-device-libs@' + ver, when='@' + ver)
depends_on('llvm-amdgpu@{0} ~rocm-device-libs ~openmp'.format(ver),
when='@' + ver)
# tag changed to 'rocm-' in 4.0.0
if ver == '3.9.0' or ver == '3.10.0':
tag = 'rocm-uc-'
else:
tag = 'rocm-'
resource(
name='rocm-device-libs',
url=compute_url +
'/ROCm-Device-Libs/archive/' + tag + ver + '.tar.gz',
sha256=versions_dict[ver]['devlib'],
expand=True,
destination='rocm-openmp-extras',
placement='rocm-device-libs',
when='@' + ver)
resource(
name='flang',
url=tools_url + '/flang/archive/' + tag + ver + '.tar.gz',
sha256=versions_dict[ver]['flang'],
expand=True,
destination='rocm-openmp-extras',
placement='flang',
when='@' + ver)
resource(
name='aomp-extras',
url=tools_url + '/aomp-extras/archive/' + tag + ver + '.tar.gz',
sha256=versions_dict[ver]['extras'],
expand=True,
destination='rocm-openmp-extras',
placement='aomp-extras',
when='@' + ver)
resource(
name='llvm-project',
url=compute_url + '/llvm-project/archive/rocm-' + ver + '.tar.gz',
sha256=versions_dict[ver]['llvm'],
expand=True,
destination='rocm-openmp-extras',
placement='llvm-project',
when='@' + ver)
def setup_run_environment(self, env):
devlibs_prefix = self.spec['rocm-device-libs'].prefix
openmp_extras_prefix = self.spec['rocm-openmp-extras'].prefix
llvm_prefix = self.spec['llvm-amdgpu'].prefix
env.set('AOMP', '{0}'.format(llvm_prefix))
env.set('HIP_DEVICE_LIB_PATH',
'{0}/amdgcn/bitcode'.format(devlibs_prefix))
env.prepend_path('CPATH',
'{0}/include'.format(openmp_extras_prefix))
env.prepend_path('LIBRARY_PATH',
'{0}/lib'.format(openmp_extras_prefix))
if self.spec.version < Version('4.1.0'):
env.set('AOMP_GPU',
'`{0}/rocm-bin/mygpu`'.format(openmp_extras_prefix))
else:
env.set('AOMP_GPU',
'`{0}/bin/mygpu`'.format(openmp_extras_prefix))
def setup_build_environment(self, env):
openmp_extras_prefix = self.spec['rocm-openmp-extras'].prefix
llvm_prefix = self.spec['llvm-amdgpu'].prefix
env.set('AOMP', '{0}'.format(llvm_prefix))
env.set('FC', '{0}/bin/flang'.format(openmp_extras_prefix))
env.set(
'GFXLIST',
'gfx700 gfx701 gfx801 gfx803 gfx900 gfx902 gfx906 gfx908')
def patch(self):
src = self.stage.source_path
flang_warning = '-Wno-incompatible-pointer-types-discards-qualifiers)'
aomp_extras = '{0}/rocm-openmp-extras/aomp-extras/aomp-device-libs'
libomptarget = \
'{0}/rocm-openmp-extras/llvm-project/openmp/libomptarget'
flang = '{0}/rocm-openmp-extras/flang/'
if self.spec.version < Version('4.1.0'):
plugin = '/plugins/hsa/CMakeLists.txt'
else:
# Spack thinks some warnings from the flang build are errors.
# Disable those warnings.
filter_file('PRIVATE -fPIC)',
'PRIVATE -fPIC PRIVATE ' + flang_warning,
flang.format(src) + 'runtime/flang/CMakeLists.txt',
string=True)
plugin = '/plugins/amdgpu/CMakeLists.txt'
filter_file(
'{ROCM_DIR}/amdgcn/bitcode', '{DEVICE_LIBS_DIR}',
aomp_extras.format(src) + '/aompextras/CMakeLists.txt',
aomp_extras.format(src) + '/libm/CMakeLists.txt',
libomptarget.format(src) + '/deviceRTLs/amdgcn/CMakeLists.txt')
# Openmp adjustments
filter_file(
'-nogpulib', '-nogpulib -nogpuinc',
libomptarget.format(src) + '/deviceRTLs/amdgcn/CMakeLists.txt')
filter_file(
'-x hip', '-x hip -nogpulib -nogpuinc',
libomptarget.format(src) + '/deviceRTLs/amdgcn/CMakeLists.txt')
filter_file(
'-c ', '-c -nogpulib -nogpuinc -I{LIMIT}',
libomptarget.format(src) + '/hostrpc/CMakeLists.txt')
filter_file(
r'${ROCM_DIR}/hsa/include ${ROCM_DIR}/hsa/include/hsa',
'${HSA_INCLUDE}/hsa/include ${HSA_INCLUDE}/hsa/include/hsa',
libomptarget.format(src) + plugin,
string=True)
filter_file(
'{ROCM_DIR}/hsa/lib', '{HSA_LIB}',
libomptarget.format(src) + plugin)
filter_file(
r'{ROCM_DIR}/lib\)',
'{HSAKMT_LIB})\nset(HSAKMT_LIB64 ${HSAKMT_LIB64})',
libomptarget.format(src) + plugin)
filter_file(
r'-L${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}',
'-L${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS} -L${HSAKMT_LIB64}',
libomptarget.format(src) + plugin,
string=True)
filter_file(
r'-rpath,${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}',
'-rpath,${LIBOMPTARGET_DEP_LIBHSAKMT_LIBRARIES_DIRS}' +
',-rpath,${HSAKMT_LIB64}',
libomptarget.format(src) + plugin,
string=True)
filter_file(
'{ROCM_DIR}/include', '{COMGR_INCLUDE}',
libomptarget.format(src) + plugin)
filter_file(
r'-L${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}',
'-L${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX} -L${COMGR_LIB}',
libomptarget.format(src) + plugin,
string=True)
filter_file(
r'rpath,${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}',
'rpath,${LLVM_LIBDIR}${OPENMP_LIBDIR_SUFFIX}' +
'-Wl,-rpath,${COMGR_LIB}',
libomptarget.format(src) + plugin,
string=True)
filter_file(
'ADDITIONAL_VERSIONS 2.7', 'ADDITIONAL_VERSIONS 3',
flang.format(src) + 'CMakeLists.txt')
def install(self, spec, prefix):
src = self.stage.source_path
gfx_list = "gfx700;gfx701;gfx801;gfx803;gfx900;gfx902;gfx906;gfx908"
openmp_extras_prefix = self.spec['rocm-openmp-extras'].prefix
devlibs_prefix = self.spec['rocm-device-libs'].prefix
devlibs_src = '{0}/rocm-openmp-extras/rocm-device-libs'.format(src)
hsa_prefix = self.spec['hsa-rocr-dev'].prefix
hsakmt_prefix = self.spec['hsakmt-roct'].prefix
comgr_prefix = self.spec['comgr'].prefix
llvm_inc = '/rocm-openmp-extras/llvm-project/llvm/include'
llvm_prefix = self.spec['llvm-amdgpu'].prefix
omp_bin_dir = '{0}/bin'.format(openmp_extras_prefix)
omp_lib_dir = '{0}/lib'.format(openmp_extras_prefix)
bin_dir = '{0}/bin'.format(llvm_prefix)
lib_dir = '{0}/lib'.format(llvm_prefix)
# flang1 and flang2 symlink needed for build of flang-runtime
# libdevice symlink to rocm-openmp-extras for runtime
# libdebug symlink to rocm-openmp-extras for runtime
if not (os.path.islink((os.path.join(bin_dir, 'flang1')))):
os.symlink(os.path.join(omp_bin_dir, 'flang1'),
os.path.join(bin_dir, 'flang1'))
if not (os.path.islink((os.path.join(bin_dir, 'flang2')))):
os.symlink(os.path.join(omp_bin_dir, 'flang2'),
os.path.join(bin_dir, 'flang2'))
if not (os.path.islink((os.path.join(lib_dir, 'libdevice')))):
os.symlink(os.path.join(omp_lib_dir, 'libdevice'),
os.path.join(lib_dir, 'libdevice'))
if not (os.path.islink((os.path.join(llvm_prefix, 'lib-debug')))):
os.symlink(os.path.join(openmp_extras_prefix, 'lib-debug'),
os.path.join(llvm_prefix, 'lib-debug'))
# Set cmake args
components = dict()
components['aomp-extras'] = [
'../rocm-openmp-extras/aomp-extras',
'-DLLVM_DIR={0}'.format(llvm_prefix),
'-DDEVICE_LIBS_DIR={0}/amdgcn/bitcode'.format(devlibs_prefix),
'-DAOMP_STANDALONE_BUILD=0',
'-DDEVICELIBS_ROOT={0}'.format(devlibs_src),
'-DNEW_BC_PATH=1',
'-DAOMP={0}'.format(llvm_prefix)
]
# Shared cmake configuration for openmp, openmp-debug
openmp_common_args = [
'-DROCM_DIR={0}'.format(hsa_prefix),
'-DDEVICE_LIBS_DIR={0}/amdgcn/bitcode'.format(devlibs_prefix),
'-DAOMP_STANDALONE_BUILD=0',
'-DDEVICELIBS_ROOT={0}'.format(devlibs_src),
'-DOPENMP_TEST_C_COMPILER={0}/clang'.format(bin_dir),
'-DOPENMP_TEST_CXX_COMPILER={0}/clang++'.format(bin_dir),
'-DLIBOMPTARGET_AMDGCN_GFXLIST={0}'.format(gfx_list),
'-DLIBOMP_COPY_EXPORTS=OFF',
'-DHSA_LIB={0}/lib'.format(hsa_prefix),
'-DHSAKMT_LIB={0}/lib'.format(hsakmt_prefix),
'-DHSAKMT_LIB64={0}/lib64'.format(hsakmt_prefix),
'-DCOMGR_INCLUDE={0}/include'.format(comgr_prefix),
'-DCOMGR_LIB={0}/lib'.format(comgr_prefix),
'-DOPENMP_ENABLE_LIBOMPTARGET=1',
'-DOPENMP_ENABLE_LIBOMPTARGET_HSA=1',
'-DLLVM_MAIN_INCLUDE_DIR={0}{1}'.format(src, llvm_inc),
'-DLLVM_INSTALL_PREFIX={0}'.format(llvm_prefix)
]
if self.spec.version < Version('4.1.0'):
openmp_common_args += [
'-DHSA_INCLUDE={0}'.format(hsa_prefix)
]
else:
openmp_common_args += [
'-DHSA_INCLUDE={0}/include/hsa'.format(hsa_prefix)
]
components['openmp'] = ['../rocm-openmp-extras/llvm-project/openmp']
components['openmp'] += openmp_common_args
components['openmp-debug'] = [
'../rocm-openmp-extras/llvm-project/openmp',
'-DLIBOMPTARGET_NVPTX_DEBUG=ON',
'-DCMAKE_CXX_FLAGS=-g',
'-DCMAKE_C_FLAGS=-g'
]
components['openmp-debug'] += openmp_common_args
# Shared cmake configuration for pgmath, flang, flang-runtime
flang_common_args = [
'-DLLVM_ENABLE_ASSERTIONS=ON',
'-DLLVM_CONFIG={0}/llvm-config'.format(bin_dir),
'-DCMAKE_CXX_COMPILER={0}/clang++'.format(bin_dir),
'-DCMAKE_C_COMPILER={0}/clang'.format(bin_dir),
'-DCMAKE_Fortran_COMPILER={0}/flang'.format(bin_dir),
'-DLLVM_TARGETS_TO_BUILD=AMDGPU;x86'
]
components['pgmath'] = [
'../rocm-openmp-extras/flang/runtime/libpgmath'
]
components['pgmath'] += flang_common_args
components['flang'] = [
'../rocm-openmp-extras/flang',
'-DFLANG_OPENMP_GPU_AMD=ON',
'-DFLANG_OPENMP_GPU_NVIDIA=ON'
]
components['flang'] += flang_common_args
components['flang-runtime'] = [
'../rocm-openmp-extras/flang',
'-DLLVM_INSTALL_RUNTIME=ON',
'-DFLANG_BUILD_RUNTIME=ON',
'-DOPENMP_BUILD_DIR={0}/spack-build-openmp/runtime/src'.format(src)
]
components['flang-runtime'] += flang_common_args
build_order = [
"aomp-extras", "openmp", "openmp-debug", "pgmath",
"flang", "flang-runtime"
]
# Override standard CMAKE_BUILD_TYPE
for arg in std_cmake_args:
found = re.search("CMAKE_BUILD_TYPE", arg)
if found:
std_cmake_args.remove(arg)
for component in build_order:
with working_dir('spack-build-{0}'.format(component), create=True):
cmake_args = components[component]
cmake_args.extend(std_cmake_args)
# OpenMP build needs to be run twice(Release, Debug)
if component == "openmp-debug":
cmake_args.append("-DCMAKE_BUILD_TYPE=Debug")
else:
cmake_args.append("-DCMAKE_BUILD_TYPE=Release")
cmake(*cmake_args)
make()
make("install")
| 41.262626 | 79 | 0.611567 |
7947e43e9efdc64b556da3863796fa7716e34ac9 | 9,160 | py | Python | public/Python27/Lib/test/test_string.py | NingrumFadillah/cekmutasi | 1fccb6cafb874c2a80ece9b71d7c682fd44dbd48 | [
"MIT"
] | 27 | 2020-11-12T19:24:54.000Z | 2022-03-27T23:10:45.000Z | public/Python27/Lib/test/test_string.py | NingrumFadillah/cekmutasi | 1fccb6cafb874c2a80ece9b71d7c682fd44dbd48 | [
"MIT"
] | 2 | 2020-11-02T06:30:39.000Z | 2022-02-23T18:39:55.000Z | public/Python27/Lib/test/test_string.py | NingrumFadillah/cekmutasi | 1fccb6cafb874c2a80ece9b71d7c682fd44dbd48 | [
"MIT"
] | 3 | 2017-04-07T12:02:22.000Z | 2020-03-23T12:11:55.000Z | import unittest, string
from test import test_support, string_tests
from UserList import UserList
class StringTest(
string_tests.CommonTest,
string_tests.MixinStrStringUserStringTest
):
type2test = str
def checkequal(self, result, object, methodname, *args):
realresult = getattr(string, methodname)(object, *args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
self.assertRaises(
exc,
getattr(string, methodname),
object,
*args
)
def checkcall(self, object, methodname, *args):
getattr(string, methodname)(object, *args)
def test_join(self):
# These are the same checks as in string_test.ObjectTest.test_join
# but the argument order ist different
self.checkequal('a b c d', ['a', 'b', 'c', 'd'], 'join', ' ')
self.checkequal('abcd', ('a', 'b', 'c', 'd'), 'join', '')
self.checkequal('w x y z', string_tests.Sequence(), 'join', ' ')
self.checkequal('abc', ('abc',), 'join', 'a')
self.checkequal('z', UserList(['z']), 'join', 'a')
if test_support.have_unicode:
self.checkequal(unicode('a.b.c'), ['a', 'b', 'c'], 'join', unicode('.'))
self.checkequal(unicode('a.b.c'), [unicode('a'), 'b', 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', unicode('b'), 'c'], 'join', '.')
self.checkequal(unicode('a.b.c'), ['a', 'b', unicode('c')], 'join', '.')
self.checkraises(TypeError, ['a', unicode('b'), 3], 'join', '.')
for i in [5, 25, 125]:
self.checkequal(
((('a' * i) + '-') * i)[:-1],
['a' * i] * i, 'join', '-')
self.checkequal(
((('a' * i) + '-') * i)[:-1],
('a' * i,) * i, 'join', '-')
self.checkraises(TypeError, string_tests.BadSeq1(), 'join', ' ')
self.checkequal('a b c', string_tests.BadSeq2(), 'join', ' ')
try:
def f():
yield 4 + ""
self.fixtype(' ').join(f())
except TypeError, e:
if '+' not in str(e):
self.fail('join() ate exception message')
else:
self.fail('exception not raised')
class ModuleTest(unittest.TestCase):
def test_attrs(self):
string.whitespace
string.lowercase
string.uppercase
string.letters
string.digits
string.hexdigits
string.octdigits
string.punctuation
string.printable
def test_atoi(self):
self.assertEqual(string.atoi(" 1 "), 1)
self.assertRaises(ValueError, string.atoi, " 1x")
self.assertRaises(ValueError, string.atoi, " x1 ")
def test_atol(self):
self.assertEqual(string.atol(" 1 "), 1L)
self.assertRaises(ValueError, string.atol, " 1x ")
self.assertRaises(ValueError, string.atol, " x1 ")
def test_atof(self):
self.assertAlmostEqual(string.atof(" 1 "), 1.0)
self.assertRaises(ValueError, string.atof, " 1x ")
self.assertRaises(ValueError, string.atof, " x1 ")
def test_maketrans(self):
transtable = '\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(string.maketrans('abc', 'xyz'), transtable)
self.assertRaises(ValueError, string.maketrans, 'abc', 'xyzq')
def test_capwords(self):
self.assertEqual(string.capwords('abc def ghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\tdef\nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('abc\t def \nghi'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC DEF GHI'), 'Abc Def Ghi')
self.assertEqual(string.capwords('ABC-DEF-GHI', '-'), 'Abc-Def-Ghi')
self.assertEqual(string.capwords('ABC-def DEF-ghi GHI'), 'Abc-def Def-ghi Ghi')
self.assertEqual(string.capwords(' aBc DeF '), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t'), 'Abc Def')
self.assertEqual(string.capwords('\taBc\tDeF\t', '\t'), '\tAbc\tDef\t')
def test_formatter(self):
fmt = string.Formatter()
self.assertEqual(fmt.format("foo"), "foo")
self.assertEqual(fmt.format("foo{0}", "bar"), "foobar")
self.assertEqual(fmt.format("foo{1}{0}-{1}", "bar", 6), "foo6bar-6")
self.assertEqual(fmt.format("-{arg!r}-", arg='test'), "-'test'-")
# override get_value ############################################
class NamespaceFormatter(string.Formatter):
def __init__(self, namespace={}):
string.Formatter.__init__(self)
self.namespace = namespace
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
# Check explicitly passed arguments first
return kwds[key]
except KeyError:
return self.namespace[key]
else:
string.Formatter.get_value(key, args, kwds)
fmt = NamespaceFormatter({'greeting':'hello'})
self.assertEqual(fmt.format("{greeting}, world!"), 'hello, world!')
# override format_field #########################################
class CallFormatter(string.Formatter):
def format_field(self, value, format_spec):
return format(value(), format_spec)
fmt = CallFormatter()
self.assertEqual(fmt.format('*{0}*', lambda : 'result'), '*result*')
# override convert_field ########################################
class XFormatter(string.Formatter):
def convert_field(self, value, conversion):
if conversion == 'x':
return None
return super(XFormatter, self).convert_field(value, conversion)
fmt = XFormatter()
self.assertEqual(fmt.format("{0!r}:{0!x}", 'foo', 'foo'), "'foo':None")
# override parse ################################################
class BarFormatter(string.Formatter):
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
def parse(self, format_string):
for field in format_string.split('|'):
if field[0] == '+':
# it's markup
field_name, _, format_spec = field[1:].partition(':')
yield '', field_name, format_spec, None
else:
yield field, None, None, None
fmt = BarFormatter()
self.assertEqual(fmt.format('*|+0:^10s|*', 'foo'), '* foo *')
# test all parameters used
class CheckAllUsedFormatter(string.Formatter):
def check_unused_args(self, used_args, args, kwargs):
# Track which arguments actuallly got used
unused_args = set(kwargs.keys())
unused_args.update(range(0, len(args)))
for arg in used_args:
unused_args.remove(arg)
if unused_args:
raise ValueError("unused arguments")
fmt = CheckAllUsedFormatter()
self.assertEqual(fmt.format("{0}", 10), "10")
self.assertEqual(fmt.format("{0}{i}", 10, i=100), "10100")
self.assertEqual(fmt.format("{0}{i}{1}", 10, 20, i=100), "1010020")
self.assertRaises(ValueError, fmt.format, "{0}{i}{1}", 10, 20, i=100, j=0)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20)
self.assertRaises(ValueError, fmt.format, "{0}", 10, 20, i=100)
self.assertRaises(ValueError, fmt.format, "{i}", 10, 20, i=100)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
class BytesAliasTest(unittest.TestCase):
def test_builtin(self):
self.assertTrue(str is bytes)
def test_syntax(self):
self.assertEqual(b"spam", "spam")
self.assertEqual(br"egg\foo", "egg\\foo")
self.assertTrue(type(b""), str)
self.assertTrue(type(br""), str)
def test_main():
test_support.run_unittest(StringTest, ModuleTest, BytesAliasTest)
if __name__ == "__main__":
test_main()
| 42.018349 | 764 | 0.559607 |
7947e48d11b8f33f814f4fb6352f15d6daf16777 | 685 | py | Python | app/core/migrations/0003_ingredient.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | app/core/migrations/0003_ingredient.py | abdulsagheer/recipe-api | 6c5f8408705c8ebf7fb1f4c916b898f8bab6ff43 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-15 20:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.541667 | 118 | 0.617518 |
7947e49f6f85fe7d238de36258dfb0d9e3db3823 | 15,545 | py | Python | hydropandas/extensions/plots.py | boyandomhof/hydropandas | 0307d8dfa1262b832d559c80971884b53af0f675 | [
"MIT"
] | 11 | 2021-01-04T13:01:24.000Z | 2022-03-17T15:43:37.000Z | hydropandas/extensions/plots.py | boyandomhof/hydropandas | 0307d8dfa1262b832d559c80971884b53af0f675 | [
"MIT"
] | 21 | 2020-05-18T12:47:20.000Z | 2022-03-29T11:13:06.000Z | hydropandas/extensions/plots.py | boyandomhof/hydropandas | 0307d8dfa1262b832d559c80971884b53af0f675 | [
"MIT"
] | 2 | 2020-07-06T06:36:10.000Z | 2021-12-24T12:41:43.000Z | import os
import numpy as np
from . import accessor
import logging
logger = logging.getLogger(__name__)
@accessor.register_obscollection_accessor("plots")
class CollectionPlots:
def __init__(self, oc_obj):
"""Object containing plotting methods for ObsCollections.
Parameters
----------
oc : ObsCollection
ObsCollection instance
"""
self._obj = oc_obj
def interactive_plots(self, savedir,
tmin=None, tmax=None,
per_location=True,
**kwargs):
"""Create interactive plots of the observations using bokeh.
Parameters
----------
savedir : str
directory used for the folium map and bokeh plots
tmin : dt.datetime, optional
start date for timeseries plot
tmax : dt.datetime, optional
end date for timeseries plot
per_location : bool, optional
if True plot multiple filters on the same location in one figure
**kwargs :
will be passed to the Obs.to_interactive_plot method, options
include:
- plot_columns : list of str
- hoover_names : list of str
- plot_freq : list of str
- plot_legend_names : list of str
- markers : list of str
- hoover_names : list of str
- plot_colors : list of str
- ylabel : str
- add_filter_to_legend : boolean
"""
_color_cycle = (
'blue',
'olive',
'lime',
'red',
'orange',
'yellow',
'purple',
'silver',
'powderblue',
'salmon',
'tan')
if per_location:
plot_names = self._obj.groupby('locatie').count().index
else:
plot_names = self._obj.index
for name in plot_names:
if per_location:
oc = self._obj.loc[self._obj.locatie
== name, 'obs'].sort_index()
else:
oc = self._obj.loc[[name], 'obs']
p = None
for i, o in enumerate(oc.values):
if i == 10:
raise NotImplementedError(
'cannot add more than 10 lines to a single plot')
try:
p = o.plots.interactive_plot(savedir=savedir,
p=p,
tmin=tmin, tmax=tmax,
plot_colors=[_color_cycle[i + 1]],
return_filename=False,
**kwargs)
logger.info(f'created iplot -> {o.name}')
except ValueError:
logger.error(f'{o.name} has no data between {tmin} and {tmax}')
o.iplot_fname = None
def interactive_map(self, plot_dir, m=None,
tiles='OpenStreetMap',
fname=None,
per_location=True,
color='blue',
legend_name=None,
add_legend=True,
map_label='',
map_label_size=20,
col_name_lat='lat',
col_name_lon='lon',
zoom_start=13,
create_interactive_plots=True,
**kwargs):
"""Create an interactive map with interactive plots using folium and
bokeh.
Notes
-----
Some notes on this method:
- if you want to have multiple obs collections on one folium map, only
the last one should have add_legend = True to create a correct legend
- the color of the observation point on the map is now the same color
as the line of the observation measurements. Also a built-in color
cycle is used for different measurements on the same location.
Parameters
----------
plot_dir : str
directory used for the folium map and bokeh plots
m : folium.Map, str, optional
current map to add observations too, if None a new map is created
tiles : str, optional
background tiles, default is openstreetmap
fname : str, optional
name of the folium map
per_location : bool, optional
if True plot multiple filters on the same location in one figure
color : str, optional
color of the observation points on the map
legend_name : str, optional
the name of the observation points shown in the map legend
add_legend : boolean, optional
add a legend to a plot
map_label : str, optional
add a label to the obs locations on the map, this label is
picked from the meta attribute of the obs points.
map_label_size : int, optional
label size of the map_label in pt.
col_name_lat : str, optional
name of the column in the obs_collection dic with the lat values
of the observation points
col_name_lon : str, optional
see col_name_lat
zoom_start : int, optional
start zoom level of the folium ma
create_interactive_plots : boolean, optional
if True interactive plots will be created, if False the iplot_fname
attribute of the observations is used.
**kwargs :
will be passed to the to_interactive_plots method options are:
- plot_columns : list of str
- hoover_names : list of str
- plot_legend_names : list of str
- plot_freq : list of str
- markers : list of str
- hoover_names : list of str
- plot_colors : list of str
- ylabel : str
- add_filter_to_legend : boolean
- tmin : dt.datetime
- tmax : dt.datetime
Returns
-------
m : folium.Map
the folium map
"""
import branca
import folium
from folium.features import DivIcon
# create interactive bokeh plots
if create_interactive_plots:
self._obj.plots.interactive_plots(savedir=plot_dir,
per_location=per_location,
**kwargs)
# check if observation collection has lat and lon values
if (not col_name_lat in self._obj.columns) and (not col_name_lon in self._obj.columns):
self._obj.geo.set_lat_lon()
# determine start location of map
northing = np.mean(
(self._obj[col_name_lat].min(), self._obj[col_name_lat].max()))
easting = np.mean(
(self._obj[col_name_lon].min(), self._obj[col_name_lon].max()))
# create map if no map is given
if m is None:
m = folium.Map([northing, easting], zoom_start=zoom_start)
# add the point observations with plots to the map
group_name = '<span style=\\"color: {};\\">{}</span>'.format(
color, legend_name)
group = folium.FeatureGroup(name=group_name)
if per_location:
plot_names = self._obj.groupby('locatie').count().index
else:
plot_names = self._obj.index
for name in plot_names:
if per_location:
oc = self._obj.loc[self._obj.locatie
== name, 'obs'].sort_index()
o = oc.iloc[-1]
name = o.name
else:
o = self._obj.loc[name, 'obs']
if o.iplot_fname is not None:
with open(o.iplot_fname, 'r') as f:
bokeh_html = f.read()
iframe = branca.element.IFrame(
html=bokeh_html, width=620, height=420)
popup = folium.Popup(iframe, max_width=620)
folium.CircleMarker([self._obj.loc[o.name, col_name_lat],
self._obj.loc[o.name, col_name_lon]],
icon=folium.Icon(icon='signal'), fill=True,
color=color,
popup=popup).add_to(group)
if map_label != '':
folium.map.Marker(
[self._obj.loc[name, col_name_lat], self._obj.loc[name, col_name_lon]], icon=DivIcon(
icon_size=(
150, 36), icon_anchor=(
0, 0), html='<div style="font-size: %ipt">%s</div>' %
(map_label_size, o.meta[map_label]))).add_to(group)
else:
logger.info(f'no iplot available for {o.name}')
group.add_to(m)
# add legend
if add_legend:
folium.map.LayerControl('topright', collapsed=False).add_to(m)
# save map
#filename and path
if fname is not None:
if not fname.endswith('.html'):
fname = fname + '.html'
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
m.save(os.path.join(plot_dir, fname))
return m
@accessor.register_obs_accessor("plots")
class ObsPlots:
def __init__(self, obs):
self._obj = obs
def interactive_plot(self,
savedir=None,
plot_columns=('stand_m_tov_nap',),
markers=('line',),
p=None,
plot_legend_names=('',),
plot_freq=(None,),
tmin=None,
tmax=None,
hoover_names=('Peil',),
hoover_date_format="%Y-%m-%d",
ylabel='m NAP',
plot_colors=('blue',),
add_filter_to_legend=False,
return_filename=False):
"""Create an interactive plot of the observations using bokeh.
Todo:
- add options for hoovers, markers, linestyle
Parameters
----------
savedir : str, optional
directory used for the folium map and bokeh plots
plot_columns : list of str, optional
name of the column in the obs df that will be plotted with bokeh
markers : list of str, optional
type of markers that can be used for plot, 'line' and 'circle' are
supported
p : bokeh.plotting.figure, optional
reference to existing figure, if p is None a new figure is created
plot_legend_names : list of str, optional
legend in bokeh plot
plot_freq : list of str, optional
bokeh plot is resampled with this frequency to reduce the size
tmin : dt.datetime, optional
start date for timeseries plot
tmax : dt.datetime, optional
end date for timeseries plot
hoover_names : list of str, optional
names will be displayed together with the plot_column value
when hoovering over plot
hoover_date_format : str, optional
date format to use when hoovering over a plot
ylabel : str, optional
label on the y-axis
plot_colors : list of str, optional
plot_colors used for the plots
add_filter_to_legend : boolean, optional
if True the attributes bovenkant_filter and onderkant_filter
are added to the legend name
return_filename : boolean, optional
if True filename will be returned
Returns
-------
fname_plot : str or bokeh plot
filename of the bokeh plot or reference to bokeh plot
"""
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.plotting import save
from bokeh.resources import CDN
# create plot dataframe
plot_df = self._obj[tmin:tmax].copy()
plot_df['date'] = plot_df.index.strftime(hoover_date_format)
if plot_df.empty or plot_df[list(plot_columns)].isna().all().all():
raise ValueError(
'{} has no data between {} and {}'.format(self._obj.name, tmin, tmax))
# create plot
if p is None:
p = figure(plot_width=600, plot_height=400, x_axis_type='datetime',
title='')
p.yaxis.axis_label = ylabel
# get x axis
xcol = self._obj.index.name
if xcol is None:
xcol = 'index'
# get color
if len(plot_colors) < len(plot_columns):
plot_colors = list(plot_colors) * len(plot_columns)
# get base for hoover tooltips
plots = []
tooltips = []
tooltips.append(('date', "@date"))
# plot multiple columns
for i, column in enumerate(plot_columns):
# legend name
if add_filter_to_legend:
lname = '{} {} (NAP {:.2f} - {:.2f})'.format(plot_legend_names[i], self._obj.name,
self._obj.onderkant_filter,
self._obj.bovenkant_filter)
else:
lname = '{} {}'.format(plot_legend_names[i], self._obj.name)
# resample data
if plot_freq[i] is None:
source = ColumnDataSource(plot_df[[column, 'date']])
else:
source = ColumnDataSource(
plot_df[[column, 'date']].resample(plot_freq[i]).first())
# plot data
if markers[i] in ['line', 'l']:
plots.append(p.line(xcol, column, source=source, color=plot_colors[i],
legend_label=lname,
alpha=0.8, muted_alpha=0.2))
elif markers[i] in ['circle','c']:
plots.append(p.circle(xcol, column, source=source, color=plot_colors[i],
legend_label=lname,
alpha=0.8, muted_alpha=0.2))
else:
raise NotImplementedError("marker '{}' invalid. Only line and"
"circle are currently available".format(markers[i]))
# add columns to hoover tooltips
tooltips_p = tooltips.copy()
tooltips_p.append((hoover_names[i], "@{}".format(column)))
hover = HoverTool(renderers=[plots[i]], tooltips=tooltips_p, mode='vline')
p.add_tools(hover)
p.legend.location = "top_left"
p.legend.click_policy = "mute"
# save plot
if savedir is not None:
if not os.path.isdir(savedir):
os.makedirs(savedir)
self._obj.iplot_fname = os.path.join(
savedir, self._obj.name + '.html')
save(p, self._obj.iplot_fname, resources=CDN, title=self._obj.name)
if return_filename:
return self._obj.iplot_fname
else:
return p
| 37.639225 | 109 | 0.5156 |
7947e4a9f21b9336faa7f5564e48701bce7d2762 | 6,843 | py | Python | sp_api/api/products/products.py | coderjiang/python-amazon-sp-api | ecf64d468975b63839ee99b888dc8c72c32dcebd | [
"MIT"
] | 1 | 2022-01-10T01:45:07.000Z | 2022-01-10T01:45:07.000Z | sp_api/api/products/products.py | coderjiang/python-amazon-sp-api | ecf64d468975b63839ee99b888dc8c72c32dcebd | [
"MIT"
] | 30 | 2022-02-23T21:41:48.000Z | 2022-03-31T21:36:16.000Z | sp_api/api/products/products.py | pharm-it-de/python-amazon-sp-api | 389cca687a48da2387dac45d4352718dc60aaeec | [
"MIT"
] | null | null | null | import urllib.parse
from sp_api.base import ApiResponse, Client, fill_query_params, sp_endpoint
class Products(Client):
"""
:link: https://github.com/amzn/selling-partner-api-docs/blob/main/references/product-pricing-api/productPricingV0.md
"""
@sp_endpoint('/products/pricing/v0/price', method='GET')
def get_product_pricing_for_skus(self, seller_sku_list: [str], item_condition=None, **kwargs) -> ApiResponse:
"""
get_product_pricing_for_skus(self, seller_sku_list: [str], item_condition: str = None, **kwargs) -> ApiResponse
Returns pricing information for a seller's offer listings based on SKU.
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
Args:
seller_sku_list: [str]
item_condition: str ("New", "Used", "Collectible", "Refurbished", "Club")
**kwargs:
Returns:
ApiResponse:
"""
if item_condition is not None:
kwargs['ItemCondition'] = item_condition
return self._create_get_pricing_request(seller_sku_list, 'Sku', **kwargs)
@sp_endpoint('/products/pricing/v0/price', method='GET')
def get_product_pricing_for_asins(self, asin_list: [str], item_condition=None, **kwargs) -> ApiResponse:
"""
get_product_pricing_for_asins(self, asin_list: [str], item_condition=None, **kwargs) -> ApiResponse
Returns pricing information for a seller's offer listings based on ASIN.
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
:param asin_list: [str]
:param item_condition: str ("New", "Used", "Collectible", "Refurbished", "Club")
Filters the offer listings based on item condition. Possible values: New, Used, Collectible, Refurbished, Club.
Available values : New, Used, Collectible, Refurbished, Club
:param kwargs:
:return: ApiResponse
"""
if item_condition is not None:
kwargs['ItemCondition'] = item_condition
return self._create_get_pricing_request(asin_list, 'Asin', **kwargs)
@sp_endpoint('/products/pricing/v0/competitivePrice', method='GET')
def get_competitive_pricing_for_skus(self, seller_sku_list: [str], **kwargs) -> ApiResponse:
"""
get_competitive_pricing_for_skus(self, seller_sku_list, **kwargs) -> ApiResponse
Returns competitive pricing information for a seller's offer listings based on Seller Sku
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
:param seller_sku_list: [str]
:param kwargs:
:return: ApiResponse
"""
return self._create_get_pricing_request(seller_sku_list, 'Sku', **kwargs)
@sp_endpoint('/products/pricing/v0/competitivePrice', method='GET')
def get_competitive_pricing_for_asins(self, asin_list: [str], **kwargs) -> ApiResponse:
"""
get_competitive_pricing_for_asins(self, asin_list, **kwargs) -> ApiResponse
Returns competitive pricing information for a seller's offer listings based on ASIN
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
:param asin_list: [str]
:param kwargs:
:return:
"""
return self._create_get_pricing_request(asin_list, 'Asin', **kwargs)
@sp_endpoint('/products/pricing/v0/listings/{}/offers', method='GET')
def get_listings_offer(self, seller_sku: str, **kwargs) -> ApiResponse:
"""
get_listings_offer(self, seller_sku: str, **kwargs) -> ApiResponse
Returns the lowest priced offers for a single SKU listing
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
1 1
====================================== ==============
Args:
:param seller_sku: str
key ItemCondition: str | Possible values: New, Used, Collectible, Refurbished, Club.
key MarketplaceId: str
Returns:
GetOffersResponse:
"""
return self._request(fill_query_params(kwargs.pop('path'), seller_sku), params={**kwargs})
@sp_endpoint('/products/pricing/v0/items/{}/offers', method='GET')
def get_item_offers(self, asin: str, **kwargs) -> ApiResponse:
"""
get_item_offers(self, asin: str, **kwargs) -> ApiResponse
Returns the lowest priced offers for a single item based on ASIN
**Usage Plan:**
====================================== ==============
Rate (requests per second) Burst
====================================== ==============
5 10
====================================== ==============
Args:
:param seller_sku: str
key ItemCondition: str | Possible values: New, Used, Collectible, Refurbished, Club.
key MarketplaceId: str
Returns:
GetOffersResponse:
"""
return self._request(fill_query_params(kwargs.pop('path'), asin), params={**kwargs})
def _create_get_pricing_request(self, item_list, item_type, **kwargs):
return self._request(kwargs.pop('path'),
params={**{f"{item_type}s": ','.join(
[urllib.parse.quote_plus(s) for s in item_list])},
'ItemType': item_type,
**({'ItemCondition': kwargs.pop(
'ItemCondition')} if 'ItemCondition' in kwargs else {}),
'MarketplaceId': kwargs.get('MarketplaceId', self.marketplace_id)})
| 41.222892 | 122 | 0.486483 |
7947e4b75e780761f5d74c2cf47683f445cd6ee8 | 1,124 | py | Python | mindspore/ops/_op_impl/cpu/bias_add.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | 1 | 2021-06-01T12:34:37.000Z | 2021-06-01T12:34:37.000Z | mindspore/ops/_op_impl/cpu/bias_add.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | mindspore/ops/_op_impl/cpu/bias_add.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""BiasAdd op"""
from mindspore.ops.op_info_register import op_info_register, CpuRegOp, DataType
bias_add_op_info = CpuRegOp("BiasAdd") \
.input(0, "x", "required") \
.input(1, "bias", "required") \
.output(0, "y", "required") \
.dtype_format(DataType.F32_ChannelLast, DataType.F32_Default, DataType.F32_ChannelLast) \
.get_op_info()
@op_info_register(bias_add_op_info)
def _bias_add_cpu():
"""BiasAdd cpu register"""
return
| 36.258065 | 93 | 0.685943 |
7947e54a17416b38a5dd639dd1a919f91909328c | 1,949 | py | Python | forecaster/Utilities.py | malin1993ml/QueryBot5000 | 58908dcd6d542b935dd8aa0f62b2dfe78430f61e | [
"Apache-2.0"
] | 82 | 2018-04-20T19:59:42.000Z | 2022-03-29T05:13:44.000Z | forecaster/Utilities.py | pentium3/QueryBot5000 | 7aace45fc9e13019931f73f837c8feb10a3cd142 | [
"Apache-2.0"
] | 4 | 2018-12-04T09:42:55.000Z | 2021-04-01T13:18:58.000Z | forecaster/Utilities.py | pentium3/QueryBot5000 | 7aace45fc9e13019931f73f837c8feb10a3cd142 | [
"Apache-2.0"
] | 28 | 2018-05-03T14:13:36.000Z | 2021-12-28T01:20:40.000Z | import numpy as np
from torch.autograd import Variable
import math
import torch
import matplotlib.pyplot as plt
import os
def onehot(X, dim):
Xind = np.zeros(dim)
Xind[X, np.arange(dim[1])] = 1
return Xind
def flat_prod(X,Y):
XY = np.zeros((X.shape[0]*Y.shape[0], X.shape[1]))
for i in range(X.shape[1]):
XY[:,i] = np.kron(X[:,i], Y[:,i].T).reshape(X.shape[0]*Y.shape[0])
return XY
def repackage_hidden(h):
"""Wraps hidden states in new Variables, to detach them from their history."""
if isinstance(h, tuple) or isinstance(h, list):
return tuple(repackage_hidden(v) for v in h)
else:
return h.detach()
def get_batch(source, i, bptt, evaluation=False):
seq_len = min(bptt, source.shape[0] - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len]
return data, target
def get_batch(source, i, bptt, evaluation=False, horizon=1):
seq_len = min(bptt, source.shape[0] - horizon - i)
data = source[i:i+seq_len]
target = source[i+horizon:i+horizon+seq_len]
return data, target
def prettyPrint(description, loss):
print('=' * 89)
print('|| ',description, ' || loss {:5.3f}'.format(loss))
print('=' * 89)
def my_plot(x_tst, y, i_plt,j_plt):
plt.plot(x_tst[:,i_plt,j_plt])
plt.plot(y[:,i_plt,j_plt])
plt.show()
def save_plot(x_tst, y, i_plt):
x_tst = x_tst.transpose(1, 0, 2)
y = y.transpose(1, 0, 2)
plt.figure(figsize = (120, 2.5))
plt.plot(x_tst[:, :, i_plt].flatten(), linewidth = 0.5)
plt.plot(y[:, :, i_plt].flatten(), linewidth = 0.5)
#plt.ylim([0, 8000])
plot_dir = "../plot/regressed-admission-psrnn-lr1-log"
#plot_dir = "../plot/regressed-admission-rnn-lr1-log"
if not os.path.exists(plot_dir):
os.makedirs(plot_dir)
plt.savefig("%s/%d.pdf" % (plot_dir, i_plt))
plt.close()
def plot_weights(W):
plt.set_cmap('jet')
plt.imshow(W)
plt.show()
| 28.246377 | 82 | 0.623397 |
7947e584bf2b506b6695887c8a0e54809396cb59 | 6,611 | py | Python | test/functional/feature_maxuploadtarget.py | dogxteam/dogxwallet-master | 346189354bdec9a80c20bdc429ddec15c3b17b73 | [
"MIT"
] | 5 | 2019-03-18T02:14:20.000Z | 2019-03-21T17:08:27.000Z | test/functional/feature_maxuploadtarget.py | dogxteam/dogxwallet-master | 346189354bdec9a80c20bdc429ddec15c3b17b73 | [
"MIT"
] | null | null | null | test/functional/feature_maxuploadtarget.py | dogxteam/dogxwallet-master | 346189354bdec9a80c20bdc429ddec15c3b17b73 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The dogxcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, msg_getdata
from test_framework.mininode import P2PInterface
from test_framework.test_framework import dogxcoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(dogxcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-maxuploadtarget=800"]]
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].sync_with_ping()
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(800):
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].sync_with_ping()
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_message(getdata_request)
p2p_conns[2].sync_with_ping()
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
self.log.info("Restarting nodes with -whitelist=127.0.0.1")
self.stop_node(0)
self.start_node(0, ["-whitelist=127.0.0.1", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in range(20):
self.nodes[0].p2p.send_message(getdata_request)
self.nodes[0].p2p.sync_with_ping()
assert_equal(self.nodes[0].p2p.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
self.nodes[0].p2p.send_and_ping(getdata_request)
assert_equal(len(self.nodes[0].getpeerinfo()), 1) #node is still connected because of the whitelist
self.log.info("Peer still connected after trying to download old block (whitelisted)")
if __name__ == '__main__':
MaxUploadTest().main()
| 40.066667 | 107 | 0.678264 |
7947e5b7dabab313f33e5e1f63fa2898c6f11020 | 8,112 | py | Python | nagios/libexec/check_mod_status.py | Oneiroi/sysadmin | 701c8db0667eff683377fd119490308c503bf464 | [
"Apache-2.0"
] | 29 | 2015-01-11T06:14:19.000Z | 2020-01-16T04:27:25.000Z | nagios/libexec/check_mod_status.py | Oneiroi/sysadmin | 701c8db0667eff683377fd119490308c503bf464 | [
"Apache-2.0"
] | 1 | 2015-04-07T12:20:07.000Z | 2015-04-07T12:20:07.000Z | nagios/libexec/check_mod_status.py | Oneiroi/sysadmin | 701c8db0667eff683377fd119490308c503bf464 | [
"Apache-2.0"
] | 18 | 2015-01-26T05:19:52.000Z | 2021-04-29T12:18:46.000Z | #!/usr/local/bin/python
#
# check_mod_status.py
#
# Created by David Busby on 30/03/2009.
#
"""
__author__="David Busby"
__copyright__="Psycle Interactive Ltd & David Busby"
__license__="GNU v3 + part 5d section 7: Redistribution/Reuse of this code is permitted under the GNU v3 license, as an additional term ALL code must carry the original Author(s) credit in comment form."
"""
#imports
import sys, getopt, httplib, string, urllib2, re
#global vars
TAG="HTTPD_STATUS"
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "hs:c:w:d:x:", ["help", "output="])
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
srv = "" #server to read status from
cpuc = 0 #cpu critical threshold
cpuw = 0 #cpu warning threshold
freec = 0 #free slot warning threshold
freew = 0 #free slot warning threshold
for o, a in opts:
if o in ("-h", "--help"):
usage()
elif o == "-s":
srv = a
elif o == "-c":
cpuc = a
elif o == "-w":
cpuw = a
elif o == "-d":
freec = int(a)
elif o == "-x":
freew = int(a)
else:
assert False, "unhandled option"
if len(srv) > 0 and cpuc > 0 and cpuw > 0 and freec > 0 and freew > 0:
srv = "%s%s%s" % ("http://",srv,"/server-status?auto")
req = urllib2.Request(srv)
try:
res = urllib2.urlopen(req)
headers = res.info().headers
data = res.read()
except IOError, e:
if hasattr(e, 'reason'):
critical(e.reason)
elif hasattr(e, 'code'):
critical(e.code)
if len(data) > 0:
#data = data.split("\n")
#the following does assume however that the auto data provides the following order
#
#
# Total Accesses: 39186
# Total kBytes: 2168752
# CPULoad: 1.16224
# Uptime: 34923
# ReqPerSec: 1.12207
# BytesPerSec: 63591.4
# BytesPerReq: 56673.4
# BusyWorkers: 1
# IdleWorkers: 19
# Scoreboard:
#total accesses
#adata = {
# "ta": data[0].split(":")[1].lstrip(),
# "tk": data[1].split(":")[1].lstrip(),
# "cpu": float(data[2].split(":")[1].lstrip()),
# "up": data[3].split(":")[1].lstrip(),
# "rps": data[4].split(":")[1].lstrip(),
# "bps": data[5].split(":")[1].lstrip(),
# "bpr": data[6].split(":")[1].lstrip(),
# "bw": data[7].split(":")[1].lstrip(),
# "iw": data[8].split(":")[1].lstrip(),
# "sb": data[9].split(":")[1].lstrip()
#}
#Regex Data cap
adata = {
"ta": 0 if re.search('Total\sAccesses:\s+([0-9]+)',data) == None else re.search('Total\sAccesses:\s+([0-9]+)',data).group(1),
"tk": 0 if re.search('Total\skBytes:\s+([0-9]+)',data) == None else re.search('Total\skBytes:\s+([0-9]+)',data).group(1),
"cpu": float(0 if re.search('CPULoad:\s+([0-9]+\.?[0-9]+)',data) == None else re.search('CPULoad:\s+([0-9]+\.?[0-9]+)',data).group(1)),
"up": 0 if re.search('Uptime:\s+([0-9]+)',data) == None else re.search('Uptime:\s+([0-9]+)',data).group(1),
"rps": 0 if re.search('ReqPerSec:\s+([0-9]+)',data) == None else re.search('ReqPerSec:\s+([0-9]+)',data).group(1),
"bps": 0 if re.search('BytesPerSec:\s+([0-9]+\.?[0-9]+)',data) == None else re.search('BytesPerSec:\s+([0-9]+\.?[0-9]+)',data).group(1),
"bpr": 0 if re.search('BytesPerReq:\s+([0-9]+\.?[0-9]+)',data) == None else re.search('BytesPerReq:\s+([0-9]+\.?[0-9]+)',data).group(1),
"bw": 0 if re.search('BusyWorkers:\s+([0-9]+)',data) == None else re.search('BusyWorkers:\s+([0-9]+)',data).group(1),
"iw": 0 if re.search('IdleWorkers:\s+([0-9]+)',data) == None else re.search('IdleWorkers:\s+([0-9]+)',data).group(1),
"sb": '' if re.search('Scoreboard:\s+(.*)',data) == None else re.search('Scoreboard:\s+(.*)',data).group(1)
}
#parse the score board
asb = sb_parse(adata["sb"])
#generate perfdata
stat ="| cpu_load=%s;0; max=%s;0; waiting=%s;0; starting=%s;0;" % (adata["cpu"], asb["max"], asb["wc"], asb["su"])
stat = stat +" reading=%s;0; sending=%s;0; keepalive=%s;0; lookup=%s;0;" % (asb["rr"], asb["sr"], asb["ka"], asb["dns"])
stat = stat +" closing=%s;0; logging=%s;0; finishing=%s;0; idle=%s;0;" % (asb["cc"], asb["lo"], asb["gf"], asb["id"])
stat = stat + " open=%s;0; bytes_per_sec=%s;0; Uptime=%s;0; total_accesses=%s;0;" % (asb["op"], adata["bps"], adata["up"], adata["ta"])
#check cpu load
if adata["cpu"] >= cpuc:
critical("CPULoad Percentage: %s exceeds critical threshold (%s)%s" % (adata["cpu"],cpuc,stat))
elif adata["cpu"] >= cpuw:
warn("CPULoad Percentage: %s exceeds warning threshold (%s)%s" % (adata["cpu"],cpuw,stat))
#free slot check
perfree = (1.0*asb["op"]/asb["max"])*100
if perfree <= freec:
critical("Free Slots Percentage: %s less than critical threshold (%s)%s" % (perfree,freec,stat))
elif perfree <= freew:
warn("Free Slots Percentage: %s less than warning threshold (%s)%s" % (perfree,freew,stat))
#no of the checks have caused an exit so status is ok!
ok("CPU: %s FREE: %s %s" % (adata["cpu"],perfree,stat))
else:
stat = "No Data"
critical(stat)
else:
usage()
def sb_parse(sb):
#setup struct / assoc array
asb = {
"wc": 0, #"_" Waiting for Connection
"su": 0, #"S" Starting up
"rr": 0, #"R" Reading Request
"sr": 0, #"W" Sending Reply
"ka": 0, #"K" Keepalive (read)
"dns": 0, #"D" DNS Lookup,
"cc": 0, #"C" Closing connection
"lo": 0, #"L" Logging
"gf": 0, #"G" Gracefully finishing
"id": 0, #"I" Idle cleanup of worker
"op": 0, #"." Open slot with no current process
"max": 0 #max slots
}
sblen = len(sb)
asb["max"] = sblen
for i in range(0,sblen):
if sb[i] == "_":
asb["wc"] += 1
elif sb[i] == "S":
asb["su"] += 1
elif sb[i] == "R":
asb["rr"] += 1
elif sb[i] == "W":
asb["sr"] += 1
elif sb[i] == "K":
asb["ka"] += 1
elif sb[i] == "D":
asb["dns"] += 1
elif sb[i] == "C":
asb["cc"] += 1
elif sb[i] == "L":
asb["lo"] += 1
elif sb[i] == "G":
asb["gf"] += 1
elif sb[i] == "I":
asb["id"] += 1
elif sb[i] == ".":
asb["op"] += 1
return asb
def usage():
print "Usage: ",sys.argv[0]," [-h][-s][-c][-w][-d][-x]"
print "-s server_ip or name"
print "-c critical CPU max percentage"
print "-w warning CPU max percentage"
print "-d critical free slot min percentage"
print "-x warning free slot min percentage"
print "NOTE: DO NOT include the http:// or /server-status?auto in the server address."
sys.exit(0)
def ok(stat):
print TAG,"OK -",stat
sys.exit(0)
def warn(stat):
print TAG,"WARN -",stat
sys.exit(1)
def critical(stat):
print TAG,"CRITICAL -",stat
sys.exit(2);
if __name__ == "__main__":
main()
| 39.960591 | 203 | 0.464127 |
7947e75c144919fa8d6f90aa6d06febedec241ef | 174 | py | Python | aws-cdk-cis/app.py | severel/aws-cdk-cis | 7008a65966240708da235eb7e52d75d21489dd9e | [
"MIT"
] | 2 | 2021-03-08T01:24:42.000Z | 2021-04-03T14:50:33.000Z | aws-cdk-cis/app.py | severel/aws-cdk-cis | 7008a65966240708da235eb7e52d75d21489dd9e | [
"MIT"
] | null | null | null | aws-cdk-cis/app.py | severel/aws-cdk-cis | 7008a65966240708da235eb7e52d75d21489dd9e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from aws_cdk import core
from aws_cdk_cis.aws_cdk_cis_stack import AwsCdkCisStack
app = core.App()
AwsCdkCisStack(app, "aws-cdk-cis")
app.synth()
| 14.5 | 56 | 0.764368 |
7947e857d5687cf517a0c3dd276e5e6f000b17e7 | 464 | py | Python | data/scripts/templates/object/building/poi/shared_yavin4_hutt_scavenger_camp_small1.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/building/poi/shared_yavin4_hutt_scavenger_camp_small1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/building/poi/shared_yavin4_hutt_scavenger_camp_small1.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Building()
result.template = "object/building/poi/shared_yavin4_hutt_scavenger_camp_small1.iff"
result.attribute_template_id = -1
result.stfName("poi_n","base_poi_building")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.294118 | 85 | 0.737069 |
7947ea7425f1c0daf4d66cf8aac431401496318e | 3,250 | py | Python | common/database/main.py | dnguyen0304/python-common | 7fdcc584d223c277e3cdb0336cc923a4f9adcece | [
"MIT"
] | null | null | null | common/database/main.py | dnguyen0304/python-common | 7fdcc584d223c277e3cdb0336cc923a4f9adcece | [
"MIT"
] | null | null | null | common/database/main.py | dnguyen0304/python-common | 7fdcc584d223c277e3cdb0336cc923a4f9adcece | [
"MIT"
] | 1 | 2018-09-19T00:40:10.000Z | 2018-09-19T00:40:10.000Z | # -*- coding: utf-8 -*-
import datetime
import sqlalchemy
from sqlalchemy import orm
class DBContext:
def __init__(self, session):
"""
Decorator class that manages persistence operations for
ORM-mapped objects.
Parameters
----------
session : sqlalchemy.orm.session.Session
Session instance.
See Also
--------
sqlalchemy.orm.session.Session
"""
# Composition must be used instead of inheritance because
# SQLAlchemy Sessions are always accessed through a factory.
self._session = session
def add(self, entity, by=None):
"""
Decorator method.
Extends the SQLAlchemy Session's `add()` to require specifying
the created or updated `by` information given the respective
condition. The appropriate `created_at` or `updated_at` field
is set to the current UTC date and time.
Parameters
----------
entity : models.Base subclass
Domain model instance.
by : int
Unique identifier for the user who created or updated the
entity.
"""
entity_state = sqlalchemy.inspect(entity)
self._validate_metadata(entity=entity,
entity_state=entity_state,
by=by)
if not entity_state.persistent or entity in self._session.dirty:
self._session.add(entity)
@staticmethod
def _validate_metadata(entity, entity_state, by):
message = 'add() missing 1 required positional argument: "by"'
if entity_state.transient:
if by is None:
raise TypeError(message)
else:
entity.created_at = datetime.datetime.utcnow()
entity.created_by = by
elif entity_state.persistent:
if by is None:
raise TypeError(message)
else:
entity.updated_at = datetime.datetime.utcnow()
entity.updated_by = by
def __getattr__(self, name):
return getattr(self._session, name)
class DBContextFactory:
def __init__(self, connection_string):
"""
Factory class for producing DBContexts.
Parameters
----------
connection_string : str
Formatted string containing host and authentication
information.
"""
engine = sqlalchemy.create_engine(connection_string)
SessionFactory = orm.sessionmaker()
SessionFactory.configure(bind=engine)
self._SessionFactory = orm.scoped_session(SessionFactory)
def create(self):
"""
Produce an object configured as specified.
See the Stack Overflow answer for more details [1].
Returns
-------
database.DBContext
References
----------
.. [1] zzzeek, "SQLAlchemy: Creating vs. Reusing a Session",
http://stackoverflow.com/a/12223711.
"""
# Should this dispose the engine, close the connection, and / or
# close the session?
session = self._SessionFactory()
return DBContext(session=session)
| 26.639344 | 72 | 0.585231 |
7947ea9ba83ddb09570586326518f67e03a4a24e | 868 | py | Python | mms/tests/unit_tests/test_utils/dummy_class_model_service.py | dhanainme/multi-model-server | cd5a693032b1bec4c46b0f7a9844df496a62c1a8 | [
"Apache-2.0"
] | 527 | 2017-12-04T20:58:19.000Z | 2019-11-14T03:15:39.000Z | mms/tests/unit_tests/test_utils/dummy_class_model_service.py | DrSnowbird/mxnet-model-server | a0bfd712350545dceb21c8e0b0b21dfa0c9918a7 | [
"Apache-2.0"
] | 303 | 2017-12-05T06:14:08.000Z | 2019-11-16T01:35:15.000Z | mms/tests/unit_tests/test_utils/dummy_class_model_service.py | DrSnowbird/mxnet-model-server | a0bfd712350545dceb21c8e0b0b21dfa0c9918a7 | [
"Apache-2.0"
] | 144 | 2017-12-05T19:27:39.000Z | 2019-11-15T22:15:50.000Z | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Dummy custom service which is class based
"""
# noinspection PyUnusedLocal
class CustomService(object):
def initialize(self, context):
pass
# noinspection PyMethodMayBeStatic
def handle(self, data, context):
from mms.context import Context
return ["OK"]
| 33.384615 | 75 | 0.732719 |
7947eb1402324f98d876f6bdeed5867603d64cfd | 2,005 | py | Python | datary/operations/clean.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null | datary/operations/clean.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null | datary/operations/clean.py | Datary/python-sdk | 2790a50e1ad262cbe3210665dc34f497625e923d | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Datary sdk clean Operations File
"""
from datary.repos import DataryRepos
from datary.workdirs import DataryWorkdirs
from datary.operations.remove import DataryRemoveOperation
from datary.operations.limits import DataryOperationLimits
from scrapbag import flatten
import structlog
logger = structlog.getLogger(__name__)
class DataryCleanOperation(DataryRemoveOperation, DataryWorkdirs,
DataryOperationLimits):
"""
Datary clean operation class
"""
def clean_repo(self, repo_uuid, **kwargs):
"""
Clean repo data from datary & algolia.
================ ============= ====================================
Parameter Type Description
================ ============= ====================================
repo_uuid str repository uuid
================ ============= ====================================
"""
repo = DataryRepos.get_describerepo(repo_uuid=repo_uuid, **kwargs)
if repo:
wdir_uuid = repo.get('workdir', {}).get('uuid')
# clear changes
self.clear_index(wdir_uuid)
# get workdir
workdir = self.get_wdir_filetree(wdir_uuid)
# flatten workdir to list
flatten_filetree = flatten(workdir, sep='/')
filetree_keys = [
x for x in flatten_filetree.keys() if '__self' not in x]
# Delete files
for path in filetree_keys:
element_data = {
'path': "/".join(path.split('/')[:-1]),
'basename': path.split('/')[-1]
}
self.delete_file(wdir_uuid, element_data)
logger.info(
'cleaning remove of {}'.format(path),
element_data=element_data)
else:
logger.error('Fail to clean_repo, repo not found in datary.')
| 31.328125 | 78 | 0.505237 |
7947ebe6d82af84e96e565ed75358755411f84bb | 237 | py | Python | pattern_classes/Engineer.py | Sunuba/PythonStrategyPattern | b0490cf63ecc87562d5fc3ef1f2a152fb97b0a78 | [
"MIT"
] | null | null | null | pattern_classes/Engineer.py | Sunuba/PythonStrategyPattern | b0490cf63ecc87562d5fc3ef1f2a152fb97b0a78 | [
"MIT"
] | null | null | null | pattern_classes/Engineer.py | Sunuba/PythonStrategyPattern | b0490cf63ecc87562d5fc3ef1f2a152fb97b0a78 | [
"MIT"
] | null | null | null | class Engineer:
def __init__(self, height, width, method):
self.method = method
self.height = height
self.width = width
def calculate(self):
return self.method().calculate(self.width, self.height) | 29.625 | 63 | 0.637131 |
7947ed6af9e6a790dfcadb11eb9ecd5474560cf0 | 4,812 | py | Python | setup.py | sjfleming/pyro | c8dc40a75cc4ff1f43c6ff9178d91c08155d7973 | [
"Apache-2.0"
] | null | null | null | setup.py | sjfleming/pyro | c8dc40a75cc4ff1f43c6ff9178d91c08155d7973 | [
"Apache-2.0"
] | null | null | null | setup.py | sjfleming/pyro | c8dc40a75cc4ff1f43c6ff9178d91c08155d7973 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import os
import subprocess
import sys
from setuptools import find_packages, setup
PROJECT_PATH = os.path.dirname(os.path.abspath(__file__))
VERSION = """
# This file is auto-generated with the version information during setup.py installation.
__version__ = '{}'
"""
# Find pyro version.
for line in open(os.path.join(PROJECT_PATH, "pyro", "__init__.py")):
if line.startswith("version_prefix = "):
version = line.strip().split()[2][1:-1]
# Append current commit sha to version
commit_sha = ""
try:
current_tag = (
subprocess.check_output(["git", "tag", "--points-at", "HEAD"], cwd=PROJECT_PATH)
.decode("ascii")
.strip()
)
# only add sha if HEAD does not point to the release tag
if not current_tag == version:
commit_sha = (
subprocess.check_output(
["git", "rev-parse", "--short", "HEAD"], cwd=PROJECT_PATH
)
.decode("ascii")
.strip()
)
# catch all exception to be safe
except Exception:
pass # probably not a git repo
# Write version to _version.py
if commit_sha:
version += "+{}".format(commit_sha)
with open(os.path.join(PROJECT_PATH, "pyro", "_version.py"), "w") as f:
f.write(VERSION.format(version))
# READ README.md for long description on PyPi.
# This requires uploading via twine, e.g.:
# $ python setup.py sdist bdist_wheel
# $ twine upload --repository-url https://test.pypi.org/legacy/ dist/* # test version
# $ twine upload dist/*
try:
long_description = open("README.md", encoding="utf-8").read()
except Exception as e:
sys.stderr.write("Failed to read README.md: {}\n".format(e))
sys.stderr.flush()
long_description = ""
# Remove badges since they will always be obsolete.
# This assumes the first 12 lines contain badge info.
long_description = "\n".join([str(line) for line in long_description.split("\n")[12:]])
# examples/tutorials
EXTRAS_REQUIRE = [
"jupyter>=1.0.0",
"graphviz>=0.8",
"matplotlib>=1.3",
"torchvision>=0.10.0",
"visdom>=0.1.4",
"pandas",
"pillow==8.2.0", # https://github.com/pytorch/pytorch/issues/61125
"scikit-learn",
"seaborn",
"wget",
"lap",
# 'biopython>=1.54', # Requires Python 3.6
# 'scanpy>=1.4', # Requires HDF5
# 'scvi>=0.6', # Requires loopy and other fragile packages
]
setup(
name="pyro-ppl",
version=version,
description="A Python library for probabilistic modeling and inference",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(include=["pyro", "pyro.*"]),
package_data={"pyro.distributions": ["*.cpp"]},
author="Uber AI Labs",
url="http://pyro.ai",
install_requires=[
# if you add any additional libraries, please also
# add them to `docs/requirements.txt`
# numpy is necessary for some functionality of PyTorch
"numpy>=1.7",
"opt_einsum>=2.3.2",
"pyro-api>=0.1.1",
"torch>=1.9.0",
"tqdm>=4.36",
],
extras_require={
"extras": EXTRAS_REQUIRE,
"test": EXTRAS_REQUIRE
+ [
"black>=21.4b0",
"flake8",
"nbval",
"pytest>=5.0",
"pytest-cov",
"scipy>=1.1",
],
"profile": ["prettytable", "pytest-benchmark", "snakeviz"],
"dev": EXTRAS_REQUIRE
+ [
"black>=21.4b0",
"flake8",
"isort>=5.0",
"mypy>=0.812",
"nbformat",
"nbsphinx>=0.3.2",
"nbstripout",
"nbval",
"ninja",
"pypandoc",
"pytest>=5.0",
"pytest-xdist",
"scipy>=1.1",
"sphinx",
"sphinx_rtd_theme",
"yapf",
],
"horovod": ["horovod[pytorch]>=0.19"],
"funsor": [
# This must be a released version when Pyro is released.
# "funsor[torch] @ git+git://github.com/pyro-ppl/funsor.git@383e7a6d05c9d5de9646d23698891e10c4cba927",
"funsor[torch]==0.4.1",
],
},
python_requires=">=3.6",
keywords="machine learning statistics probabilistic programming bayesian modeling pytorch",
license="Apache 2.0",
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
# yapf
)
| 30.649682 | 114 | 0.584996 |
7947eebcc1a5a7b8a1b7eb267e5490b41b8cfb57 | 446 | py | Python | venv/Scripts/pasteurize-script.py | TRGG3R/Visual_FCT_Explorer | 3fc2cf00109afa5f407de6c0d6e3de6cb7285a78 | [
"MIT"
] | null | null | null | venv/Scripts/pasteurize-script.py | TRGG3R/Visual_FCT_Explorer | 3fc2cf00109afa5f407de6c0d6e3de6cb7285a78 | [
"MIT"
] | null | null | null | venv/Scripts/pasteurize-script.py | TRGG3R/Visual_FCT_Explorer | 3fc2cf00109afa5f407de6c0d6e3de6cb7285a78 | [
"MIT"
] | null | null | null | #!C:\Users\thepo\PycharmProjects\Visual_FCT_Explorer\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')()
)
| 34.307692 | 77 | 0.695067 |
7947ef79e3867a3dd00242e8dc2abce727d565e5 | 353 | py | Python | Aulas/Mundo 3/104.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | 2 | 2021-07-29T18:58:02.000Z | 2021-10-29T21:11:22.000Z | Aulas/Mundo 3/104.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | null | null | null | Aulas/Mundo 3/104.py | JoaoEmanuell/Meus-Estudos-Python | f6f6eeb6016919e594613785ffe7136d74241ada | [
"MIT"
] | null | null | null | def leia(msg):
ok = False
valor = 0
while True:
n = str(input(msg))
if n.isnumeric():
valor = int(n)
ok = True
else:
print('Porfavor digite um numero valido')
if ok:
break
return n
n = leia('Digite um numero ')
print(f'Você digitou {n}')
| 19.611111 | 54 | 0.456091 |
7947ef9fd9180a2f6b9d4777a2890c14fba66dc7 | 1,089 | py | Python | examples/extra_examples/drum_hits.py | aejb/blinkt | e93217143ac645ed2a1365a312908283c54ac2e0 | [
"MIT"
] | 1 | 2021-06-05T03:12:37.000Z | 2021-06-05T03:12:37.000Z | examples/extra_examples/drum_hits.py | aejb/blinkt | e93217143ac645ed2a1365a312908283c54ac2e0 | [
"MIT"
] | null | null | null | examples/extra_examples/drum_hits.py | aejb/blinkt | e93217143ac645ed2a1365a312908283c54ac2e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import glob
import os
import time
from sys import exit
try:
import drumhat
except ImportError:
exit("This script requires the drumhat module\nInstall with: sudo pip install drumhat")
try:
import pygame
except ImportError:
exit("This script requires the pygame module\nInstall with: sudo pip install pygame")
import blinkt
DRUM_FOLDER = "drums2"
BANK = os.path.join(os.path.dirname(__file__), DRUM_FOLDER)
pygame.mixer.init(44100, -16, 1, 512)
pygame.mixer.set_num_channels(16)
files = glob.glob(os.path.join(BANK, "*.wav"))
files.sort()
samples = [pygame.mixer.Sound(f) for f in files]
def show_all(state):
for i in range(8):
val = state * 255
blinkt.set_pixel(i, val, val, val)
blinkt.show()
def handle_hit(event):
samples[event.channel].play(loops=0)
show_all(1)
print("You hit pad {}, playing: {}".format(event.pad, files[event.channel]))
def handle_release():
show_all(0)
drumhat.on_hit(drumhat.PADS, handle_hit)
drumhat.on_release(drumhat.PADS, handle_release)
while True:
time.sleep(1)
| 20.942308 | 91 | 0.708907 |
7947f117a59843822c68958a98ab745bc2d61baf | 813 | py | Python | models.py | eyvonne/TwittOff | cfb1226a351aa89ab9ae183e38042caa0eef2db9 | [
"MIT"
] | null | null | null | models.py | eyvonne/TwittOff | cfb1226a351aa89ab9ae183e38042caa0eef2db9 | [
"MIT"
] | null | null | null | models.py | eyvonne/TwittOff | cfb1226a351aa89ab9ae183e38042caa0eef2db9 | [
"MIT"
] | null | null | null | ''' SQLAlchecmy models for twittoff'''
from flask_sqlalchemy import SQLAlchemy
DB = SQLAlchemy()
class User(DB.Model):
"""Twitter users that we pull and analyze"""
id = DB.Column(DB.BigInteger, primary_key=True)
name = DB.Column(DB.String(15), nullable=False)
#newest_tweet_id = DB.Column(DB.BigInteger)
def __repr__(self):
return '<User {}>'.format(self.name)
class Tweet(DB.Model):
"""Tweets"""
id = DB.Column(DB.BigInteger, primary_key=True)
text = DB.Column(DB.Unicode(300))
#embedding = DB.Column(DB.PickleType, nullable=False)
user_id = DB.Column(DB.BigInteger, DB.ForeignKey('user.id'), nullable=False)
user = DB.relationship('User', backref=DB.backref('tweets', lazy=True))
def __repr__(self):
return '<Tweet {}>'.format(self.text)
| 28.034483 | 80 | 0.671587 |
7947f1748767b84cff2e04ce6a706eaf08d2eeaf | 10,852 | py | Python | mayan/apps/documents/settings.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 4 | 2021-09-02T00:16:30.000Z | 2021-09-09T22:25:15.000Z | mayan/apps/documents/settings.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 86 | 2021-09-01T23:53:02.000Z | 2021-09-20T02:25:10.000Z | mayan/apps/documents/settings.py | nattangwiwat/Mayan-EDMS-recitation | fcf16afb56eae812fb99144d65ae1ae6749de0b7 | [
"Apache-2.0"
] | 70 | 2021-09-01T12:54:51.000Z | 2022-02-16T00:53:18.000Z | from django.utils.translation import ugettext_lazy as _
from mayan.apps.smart_settings.classes import SettingNamespace
from .literals import (
DEFAULT_DOCUMENTS_DISPLAY_HEIGHT, DEFAULT_DOCUMENTS_DISPLAY_WIDTH,
DEFAULT_DOCUMENTS_FAVORITE_COUNT,
DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND,
DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS,
DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_TIME,
DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_MAXIMUM_SIZE,
DEFAULT_DOCUMENTS_FILE_STORAGE_BACKEND,
DEFAULT_DOCUMENTS_FILE_STORAGE_BACKEND_ARGUMENTS,
DEFAULT_DOCUMENTS_HASH_BLOCK_SIZE, DEFAULT_DOCUMENTS_LIST_THUMBNAIL_WIDTH,
DEFAULT_DOCUMENTS_PREVIEW_HEIGHT, DEFAULT_DOCUMENTS_PREVIEW_WIDTH,
DEFAULT_DOCUMENTS_PRINT_HEIGHT, DEFAULT_DOCUMENTS_PRINT_WIDTH,
DEFAULT_DOCUMENTS_RECENTLY_ACCESSED_COUNT,
DEFAULT_DOCUMENTS_RECENTLY_CREATED_COUNT, DEFAULT_DOCUMENTS_ROTATION_STEP,
DEFAULT_DOCUMENTS_THUMBNAIL_HEIGHT, DEFAULT_DOCUMENTS_THUMBNAIL_WIDTH,
DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_MAXIMUM_SIZE,
DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_TIME,
DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND,
DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS,
DEFAULT_DOCUMENTS_ZOOM_MAX_LEVEL, DEFAULT_DOCUMENTS_ZOOM_MIN_LEVEL,
DEFAULT_DOCUMENTS_ZOOM_PERCENT_STEP, DEFAULT_LANGUAGE,
DEFAULT_LANGUAGE_CODES, DEFAULT_STUB_EXPIRATION_INTERVAL,
DEFAULT_TASK_GENERATE_DOCUMENT_FILE_PAGE_IMAGE_RETRY_DELAY,
DEFAULT_TASK_GENERATE_DOCUMENT_VERSION_PAGE_IMAGE_RETRY_DELAY
)
from .setting_callbacks import (
callback_update_document_file_page_image_cache_size,
callback_update_document_version_page_image_cache_size
)
from .setting_migrations import DocumentsSettingMigration
namespace = SettingNamespace(
label=_('Documents'), migration_class=DocumentsSettingMigration,
name='documents', version='0004'
)
setting_display_height = namespace.add_setting(
default=DEFAULT_DOCUMENTS_DISPLAY_HEIGHT,
global_name='DOCUMENTS_DISPLAY_HEIGHT'
)
setting_display_width = namespace.add_setting(
default=DEFAULT_DOCUMENTS_DISPLAY_WIDTH,
global_name='DOCUMENTS_DISPLAY_WIDTH'
)
setting_document_file_page_image_cache_maximum_size = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_MAXIMUM_SIZE,
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_MAXIMUM_SIZE',
help_text=_(
'The threshold at which the DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND will start '
'deleting the oldest document file page image cache files. Specify '
'the size in bytes.'
), post_edit_function=callback_update_document_file_page_image_cache_size
)
setting_document_file_page_image_cache_time = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_TIME,
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_TIME', help_text=_(
'Time in seconds that the browser should cache the supplied document '
'file page images. The default of 31559626 seconds correspond to '
'1 year.'
)
)
setting_document_file_storage_backend = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_STORAGE_BACKEND,
global_name='DOCUMENTS_FILE_STORAGE_BACKEND', help_text=_(
'Path to the Storage subclass to use when storing document '
'files.'
)
)
setting_document_file_storage_backend_arguments = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_STORAGE_BACKEND_ARGUMENTS,
global_name='DOCUMENTS_FILE_STORAGE_BACKEND_ARGUMENTS', help_text=_(
'Arguments to pass to the DOCUMENT_FILE_STORAGE_BACKEND.'
)
)
setting_document_file_page_image_cache_storage_backend = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND,
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND', help_text=_(
'Path to the Storage subclass to use when storing the cached '
'document file page image files.'
)
)
setting_document_file_page_image_cache_storage_backend_arguments = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS,
global_name='DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS',
help_text=_(
'Arguments to pass to the DOCUMENTS_FILE_PAGE_IMAGE_CACHE_STORAGE_BACKEND.'
),
)
setting_favorite_count = namespace.add_setting(
default=DEFAULT_DOCUMENTS_FAVORITE_COUNT,
global_name='DOCUMENTS_FAVORITE_COUNT', help_text=_(
'Maximum number of favorite documents to remember per user.'
)
)
setting_hash_block_size = namespace.add_setting(
default=DEFAULT_DOCUMENTS_HASH_BLOCK_SIZE,
global_name='DOCUMENTS_HASH_BLOCK_SIZE', help_text=_(
'Size of blocks to use when calculating the document file\'s '
'checksum. A value of 0 disables the block calculation and the entire '
'file will be loaded into memory.'
)
)
setting_language = namespace.add_setting(
default=DEFAULT_LANGUAGE, global_name='DOCUMENTS_LANGUAGE',
help_text=_('Default documents language (in ISO639-3 format).')
)
setting_language_codes = namespace.add_setting(
default=DEFAULT_LANGUAGE_CODES, global_name='DOCUMENTS_LANGUAGE_CODES',
help_text=_('List of supported document languages. In ISO639-3 format.')
)
setting_document_version_page_image_cache_maximum_size = namespace.add_setting(
default=DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_MAXIMUM_SIZE,
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_MAXIMUM_SIZE',
help_text=_(
'The threshold at which the DOCUMENT_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND will start '
'deleting the oldest document version page image cache versions. Specify '
'the size in bytes.'
), post_edit_function=callback_update_document_version_page_image_cache_size
)
setting_document_version_page_image_cache_time = namespace.add_setting(
default=DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_TIME,
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_TIME', help_text=_(
'Time in seconds that the browser should cache the supplied document '
'version page images. The default of 31559626 seconds correspond '
'to 1 year.'
)
)
setting_document_version_page_image_cache_storage_backend = namespace.add_setting(
default=DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND,
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND',
help_text=_(
'Path to the Storage subclass to use when storing the cached '
'document version page image versions.'
)
)
setting_document_version_page_image_cache_storage_backend_arguments = namespace.add_setting(
default=DEFAULT_DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS,
global_name='DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND_ARGUMENTS',
help_text=_(
'Arguments to pass to the DOCUMENTS_VERSION_PAGE_IMAGE_CACHE_STORAGE_BACKEND.'
),
)
setting_preview_height = namespace.add_setting(
default=DEFAULT_DOCUMENTS_PREVIEW_HEIGHT,
global_name='DOCUMENTS_PREVIEW_HEIGHT'
)
setting_preview_width = namespace.add_setting(
default=DEFAULT_DOCUMENTS_PREVIEW_WIDTH,
global_name='DOCUMENTS_PREVIEW_WIDTH'
)
setting_print_height = namespace.add_setting(
default=DEFAULT_DOCUMENTS_PRINT_HEIGHT,
global_name='DOCUMENTS_PRINT_HEIGHT'
)
setting_print_width = namespace.add_setting(
default=DEFAULT_DOCUMENTS_PRINT_WIDTH,
global_name='DOCUMENTS_PRINT_WIDTH'
)
setting_recently_accessed_document_count = namespace.add_setting(
default=DEFAULT_DOCUMENTS_RECENTLY_ACCESSED_COUNT,
global_name='DOCUMENTS_RECENTLY_ACCESSED_COUNT', help_text=_(
'Maximum number of recently accessed documents (created, edited, '
'viewed) to remember per user.'
)
)
setting_recently_created_document_count = namespace.add_setting(
default=DEFAULT_DOCUMENTS_RECENTLY_CREATED_COUNT,
global_name='DOCUMENTS_RECENTLY_CREATED_COUNT', help_text=_(
'Maximum number of recently created documents to show.'
)
)
setting_rotation_step = namespace.add_setting(
default=DEFAULT_DOCUMENTS_ROTATION_STEP,
global_name='DOCUMENTS_ROTATION_STEP', help_text=_(
'Amount in degrees to rotate a document page per user interaction.'
)
)
setting_stub_expiration_interval = namespace.add_setting(
default=DEFAULT_STUB_EXPIRATION_INTERVAL,
global_name='DOCUMENTS_STUB_EXPIRATION_INTERVAL', help_text=_(
'Time after which a document stub will be considered invalid and '
'deleted.'
)
)
setting_task_document_file_page_image_generate_retry_delay = namespace.add_setting(
default=DEFAULT_TASK_GENERATE_DOCUMENT_FILE_PAGE_IMAGE_RETRY_DELAY,
global_name='DOCUMENT_TASK_GENERATE_DOCUMENT_FILE_PAGE_IMAGE_RETRY_DELAY',
help_text=_(
'Amount of time in seconds, a failed document file page image task '
'will wait before retrying.'
)
)
setting_task_document_version_page_image_generate_retry_delay = namespace.add_setting(
default=DEFAULT_TASK_GENERATE_DOCUMENT_VERSION_PAGE_IMAGE_RETRY_DELAY,
global_name='DOCUMENT_TASK_GENERATE_DOCUMENT_VERSION_PAGE_IMAGE_RETRY_DELAY',
help_text=_(
'Amount of time in seconds, a failed document version page image '
'task will wait before retrying.'
)
)
setting_thumbnail_height = namespace.add_setting(
default=DEFAULT_DOCUMENTS_THUMBNAIL_HEIGHT,
global_name='DOCUMENTS_THUMBNAIL_HEIGHT', help_text=_(
'Height in pixels of the document thumbnail image.'
)
)
setting_thumbnail_width = namespace.add_setting(
default=DEFAULT_DOCUMENTS_THUMBNAIL_WIDTH,
global_name='DOCUMENTS_THUMBNAIL_WIDTH', help_text=(
'Width in pixels of the document thumbnail image.'
)
)
setting_thumbnail_list_width = namespace.add_setting(
default=DEFAULT_DOCUMENTS_LIST_THUMBNAIL_WIDTH,
global_name='DOCUMENTS_LIST_THUMBNAIL_WIDTH', help_text=(
'Width in pixels of the document thumbnail image when shown in list '
'view mode.'
)
)
setting_zoom_max_level = namespace.add_setting(
default=DEFAULT_DOCUMENTS_ZOOM_MAX_LEVEL,
global_name='DOCUMENTS_ZOOM_MAX_LEVEL', help_text=_(
'Maximum amount in percent (%) to allow user to zoom in a document '
'page interactively.'
)
)
setting_zoom_min_level = namespace.add_setting(
default=DEFAULT_DOCUMENTS_ZOOM_MIN_LEVEL,
global_name='DOCUMENTS_ZOOM_MIN_LEVEL', help_text=_(
'Minimum amount in percent (%) to allow user to zoom out a document '
'page interactively.'
)
)
setting_zoom_percent_step = namespace.add_setting(
default=DEFAULT_DOCUMENTS_ZOOM_PERCENT_STEP,
global_name='DOCUMENTS_ZOOM_PERCENT_STEP', help_text=_(
'Amount in percent zoom in or out a document page per user '
'interaction.'
)
)
| 44.113821 | 98 | 0.803078 |
7947f27800b8632d645a717db73609d4f9adbc4e | 762 | py | Python | users/forms.py | zamuzakki/gis-portfolio | b628c3854db992dbd8435a655bfb32c7f5a075a7 | [
"MIT"
] | null | null | null | users/forms.py | zamuzakki/gis-portfolio | b628c3854db992dbd8435a655bfb32c7f5a075a7 | [
"MIT"
] | 9 | 2020-06-06T01:35:08.000Z | 2022-03-12T00:19:55.000Z | users/forms.py | zamuzakki/gis-portfolio | b628c3854db992dbd8435a655bfb32c7f5a075a7 | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import CustomUser
class CustomUserCreationForm(UserCreationForm):
"""
Form that will be used to create the user in CustomUserAdmin
"""
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'username',)
class CustomUserChangeForm(UserChangeForm):
"""
Form that will be used to change the user in CustomUserAdmin
"""
class Meta:
model = CustomUser
fields = ('email', 'username',)
class RegistrationForm(UserCreationForm):
"""
Form that will be used in signup process
"""
class Meta:
model = CustomUser
fields = ('email', 'username', 'first_name', 'last_name') | 26.275862 | 70 | 0.666667 |
7947f2847d3466790ef15ec7e94bef5e06a88ff1 | 2,811 | py | Python | pretrainings/pretrainings_tmc.py | expertailab/ISAAQ | 133e25adbf5c219aceef6e7f38135de248371cb1 | [
"MIT"
] | 7 | 2020-10-06T03:51:13.000Z | 2021-11-30T04:05:10.000Z | pretrainings/pretrainings_tmc.py | expertailab/ISAAQ | 133e25adbf5c219aceef6e7f38135de248371cb1 | [
"MIT"
] | 2 | 2020-10-12T02:10:47.000Z | 2021-01-05T06:15:54.000Z | pretrainings/pretrainings_tmc.py | expertailab/ISAAQ | 133e25adbf5c219aceef6e7f38135de248371cb1 | [
"MIT"
] | 4 | 2020-10-08T05:04:27.000Z | 2021-01-07T01:31:22.000Z | from transformers import AdamW, RobertaForMultipleChoice, RobertaTokenizer
from transformers import get_linear_schedule_with_warmup
import numpy as np
import random
import torch
import sys
import argparse
from aux_methods import get_data_pretrainings, process_data_ndq, training_ndq
def main(argv):
parser = argparse.ArgumentParser(description='')
parser.add_argument('-d', '--device', default='gpu', choices=['gpu', 'cpu'], help='device to train the model with. Options: cpu or gpu. Default: gpu')
parser.add_argument('-p', '--pretrainings', default='../checkpoints/RACE_e1.pth', help='path to the pretrainings model. Default: ../checkpoints/RACE_e1.pth')
parser.add_argument('-b', '--batchsize', default= 1, type=int, help='size of the batches. Default: 1')
parser.add_argument('-x', '--maxlen', default= 256, type=int, help='max sequence length. Default: 256')
parser.add_argument('-l', '--lr', default= 1e-5, type=float, help='learning rate. Default: 1e-5')
parser.add_argument('-e', '--epochs', default= 4, type=int, help='number of epochs. Default: 4')
parser.add_argument('-s', '--save', default=False, help='save model at the end of the training', action='store_true')
args = parser.parse_args()
print(args)
if args.pretrainings == "":
model = RobertaForMultipleChoice.from_pretrained("roberta-large")
else:
model = torch.load(args.pretrainings)
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
if args.device=="gpu":
device = torch.device("cuda")
model.cuda()
if args.device=="cpu":
device = torch.device("cpu")
model.cpu()
model.zero_grad()
batch_size = args.batchsize
max_len = args.maxlen
dataset_name = "pretrainings"
lr = args.lr
epochs = args.epochs
save_model = args.save
raw_data_train = get_data_pretrainings(dataset_name, "train", tokenizer, max_len)
raw_data_val = get_data_pretrainings(dataset_name, "val", tokenizer, max_len)
train_dataloader = process_data_ndq(raw_data_train, batch_size, "train")
val_dataloader = process_data_ndq(raw_data_val, batch_size, "val")
optimizer = AdamW(model.parameters(), lr = lr, eps = 1e-8)
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps = 0, num_training_steps = total_steps)
training_ndq(model, train_dataloader, val_dataloader, optimizer, scheduler, epochs, device, save_model, dataset_name)
if __name__ == "__main__":
# Set the seed value all over the place to make this reproducible.
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
main(sys.argv[1:]) | 43.921875 | 161 | 0.704731 |
7947f2d84e8441a05199b8816964ce493113c9ac | 6,918 | py | Python | bindings/python/ensmallen_graph/datasets/string/lachnospiraceaebacteriumfe2018.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnospiraceaebacteriumfe2018.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | bindings/python/ensmallen_graph/datasets/string/lachnospiraceaebacteriumfe2018.py | caufieldjh/ensmallen_graph | 14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a | [
"MIT"
] | null | null | null | """
This file offers the methods to automatically retrieve the graph Lachnospiraceae bacterium FE2018.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:42:43.955245
The undirected graph Lachnospiraceae bacterium FE2018 has 2687 nodes and
185520 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05141 and has 9 connected components, where the
component with most nodes has 2670 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 101, the mean node degree
is 138.09, and the node degree mode is 1. The top 5 most central nodes
are 1410624.JNKK01000005_gene258 (degree 1099), 1410624.JNKK01000011_gene2220
(degree 993), 1410624.JNKK01000012_gene2308 (degree 919), 1410624.JNKK01000034_gene2571
(degree 870) and 1410624.JNKK01000068_gene1149 (degree 824).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnospiraceaeBacteriumFe2018
# Then load the graph
graph = LachnospiraceaeBacteriumFe2018()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def LachnospiraceaeBacteriumFe2018(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Lachnospiraceae bacterium FE2018 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Lachnospiraceae bacterium FE2018 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-03 22:42:43.955245
The undirected graph Lachnospiraceae bacterium FE2018 has 2687 nodes and
185520 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05141 and has 9 connected components, where the
component with most nodes has 2670 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 101, the mean node degree
is 138.09, and the node degree mode is 1. The top 5 most central nodes
are 1410624.JNKK01000005_gene258 (degree 1099), 1410624.JNKK01000011_gene2220
(degree 993), 1410624.JNKK01000012_gene2308 (degree 919), 1410624.JNKK01000034_gene2571
(degree 870) and 1410624.JNKK01000068_gene1149 (degree 824).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import LachnospiraceaeBacteriumFe2018
# Then load the graph
graph = LachnospiraceaeBacteriumFe2018()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="LachnospiraceaeBacteriumFe2018",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 36.219895 | 223 | 0.712778 |
7947f32ca0629cfad404922aa3cc80ba408baca2 | 3,054 | py | Python | tensorflow_graphics/notebooks/mesh_viewer.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/notebooks/mesh_viewer.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | null | null | null | tensorflow_graphics/notebooks/mesh_viewer.py | jackd/graphics | 736b99a3306e302674a9b7599e3e2857b85fdb74 | [
"Apache-2.0"
] | 1 | 2020-04-11T10:37:36.000Z | 2020-04-11T10:37:36.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helper class for viewing 3D meshes in Colab demos.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow_graphics.notebooks import threejs_visualization
SEGMENTATION_COLORMAP = np.array(
((165, 242, 12), (89, 12, 89), (165, 89, 165), (242, 242, 165),
(242, 165, 12), (89, 12, 12), (165, 12, 12), (165, 89, 242), (12, 12, 165),
(165, 12, 89), (12, 89, 89), (165, 165, 89), (89, 242, 12), (12, 89, 165),
(242, 242, 89), (165, 165, 165)),
dtype=np.float32) / 255.0
class Viewer(object):
"""A ThreeJS based viewer class for viewing 3D meshes."""
def _mesh_from_data(self, data):
"""Creates a dictionary of ThreeJS mesh objects from numpy data."""
if 'vertices' not in data or 'faces' not in data:
raise ValueError('Mesh Data must contain vertices and faces')
vertices = np.asarray(data['vertices'])
faces = np.asarray(data['faces'])
material = self.context.THREE.MeshLambertMaterial.new_object({
'color': 0xfffacd,
'vertexColors': self.context.THREE.NoColors,
'side': self.context.THREE.DoubleSide,
})
mesh = {'vertices': vertices, 'faces': faces}
if 'vertex_colors' in data:
mesh['vertex_colors'] = np.asarray(data['vertex_colors'])
material = self.context.THREE.MeshLambertMaterial.new_object({
'color': 0xfffacd,
'vertexColors': self.context.THREE.VertexColors,
'side': self.context.THREE.DoubleSide,
})
mesh['material'] = material
return mesh
def __init__(self, source_mesh_data):
context = threejs_visualization.build_context()
self.context = context
light1 = context.THREE.PointLight.new_object(0x808080)
light1.position.set(10., 10., 10.)
light2 = context.THREE.AmbientLight.new_object(0x808080)
lights = (light1, light2)
camera = threejs_visualization.build_perspective_camera(
field_of_view=30, position=(0.0, 0.0, 4.0))
mesh = self._mesh_from_data(source_mesh_data)
geometries = threejs_visualization.triangular_mesh_renderer([mesh],
lights=lights,
camera=camera,
width=400,
height=400)
self.geometries = geometries
| 40.184211 | 80 | 0.638179 |
7947f45ac8bc384fbcbb39197a6a9122e89ec90d | 880 | py | Python | spider/data_collector.py | glstr/python_learning | 243908d6f358764386f2e58dfbfde10a406d803c | [
"Apache-2.0"
] | 2 | 2018-09-20T06:08:00.000Z | 2018-09-26T13:57:20.000Z | spider/data_collector.py | glstr/python_learning | 243908d6f358764386f2e58dfbfde10a406d803c | [
"Apache-2.0"
] | null | null | null | spider/data_collector.py | glstr/python_learning | 243908d6f358764386f2e58dfbfde10a406d803c | [
"Apache-2.0"
] | 1 | 2019-03-25T05:53:32.000Z | 2019-03-25T05:53:32.000Z | #!/usr/bin/python
# coding=utf-8
import json
import requests
class DataCollector(object):
def __init__(self):
self.name = "default data collector"
return
def grab(self, option):
url = option["url"]
r = requests.get(url)
r.json()
return r
def gather(self, options):
res = []
for option in options:
r = self.grab(option)
res.append(r)
return res
def output(self, options):
f = open("output.txt", "w")
contents = self.gather(options)
for content in contents:
cstr = json.dumps(content)
f.write(cstr + '\n')
f.close()
return
def load_config(self, config_file):
with open(config_file) as f:
self.options = json.load(f)
if __name__ == '__main__':
print "hello world"
| 20.465116 | 44 | 0.542045 |
7947f60c020e68066e55c9bf1360bcba8e1517d2 | 3,158 | py | Python | mindinsight/mindconverter/graph_based_converter/sub_graph_searcher/known_module_name.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | null | null | null | mindinsight/mindconverter/graph_based_converter/sub_graph_searcher/known_module_name.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | null | null | null | mindinsight/mindconverter/graph_based_converter/sub_graph_searcher/known_module_name.py | fapbatista/mindinsight | db5769eb80cbd13a2a9af7682c11f5667d8bf141 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd.All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Introduce some standard pattern name into MindConverter."""
__all__ = ["register_module_name", "is_built_in_module_name", "BUILT_IN_MODULE_NAME"]
from mindinsight.mindconverter.graph_based_converter.sub_graph_searcher.pattern import Pattern
PLACEHOLDER = "PLC"
BUILT_IN_MODULE_NAME = dict()
def is_built_in_module_name(module_name: str):
"""
Whether the module name was built-in.
Args:
module_name (str): Module name.
Returns:
bool, true or false.
"""
return module_name.split("_")[0] in BUILT_IN_MODULE_NAME
def register_module_name(md_name: str, in_degree: int, out_degree: int):
"""
Register pattern to MindConverter.
Args:
out_degree (int): Out degree of pattern.
in_degree (int): In degree of pattern.
md_name (str): Module name.
"""
def _reg(pattern):
result = pattern()
if not result:
return
BUILT_IN_MODULE_NAME[Pattern("->".join(result), len(result),
in_degree, out_degree,
ptn_items=result)] = md_name
return _reg
@register_module_name("Bottleneck", 1, 2)
def _resnet_block_0():
"""Add ResNet feature extraction block pattern."""
return ["Conv", "BatchNormalization", "Relu",
"Conv", "BatchNormalization", "Relu",
"Conv", "BatchNormalization", "Add", "Relu"]
@register_module_name("Bottleneck", 1, 2)
def _resnet_block_1():
"""Add ResNet feature extraction block pattern."""
return [PLACEHOLDER, PLACEHOLDER, "Conv", "BatchNormalization", "Add", "Relu"]
@register_module_name("Bottleneck", 1, 2)
def _resnet_block_2():
"""Add ResNet feature extraction block pattern."""
return [PLACEHOLDER, PLACEHOLDER, PLACEHOLDER, "Add", "Relu"]
@register_module_name("BasicConvBlock", 1, 1)
def _basic_conv_block_0():
"""Add basic conv block."""
return ["Conv", "BatchNormalization", "Relu"]
@register_module_name("ConvBN", 1, 1)
def _conv_bn():
"""Add basic conv block."""
return ["Conv", "BatchNormalization"]
@register_module_name("UnSample", 1, 1)
def _up_sampling_in_op12():
return [
"Shape", "Slice", "Gather", "Cast", "Slice", "Mul", "Cast", "Concat", "Resize"
]
@register_module_name("UnSample", 1, 1)
def _up_sampling_in_op10():
return [
"Shape", "Gather", "Cast", "Slice", "Mul", "Slice", "Cast", "Cast", "Div", "Concat", "Resize"
]
| 30.365385 | 101 | 0.649145 |
7947f7cccae483ca053f2dcb8c10607944745386 | 6,815 | py | Python | tests/components/hassio/test_http.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | tests/components/hassio/test_http.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/hassio/test_http.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """The tests for the hassio component."""
import asyncio
from http import HTTPStatus
from aiohttp import StreamReader
import pytest
from homeassistant.components.hassio.http import _need_auth
async def test_forward_request(hassio_client, aioclient_mock):
"""Test fetching normal path."""
aioclient_mock.post("http://127.0.0.1/beer", text="response")
resp = await hassio_client.post("/api/hassio/beer")
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
@pytest.mark.parametrize(
"build_type", ["supervisor/info", "homeassistant/update", "host/info"]
)
async def test_auth_required_forward_request(hassio_noauth_client, build_type):
"""Test auth required for normal request."""
resp = await hassio_noauth_client.post(f"/api/hassio/{build_type}")
# Check we got right response
assert resp.status == HTTPStatus.UNAUTHORIZED
@pytest.mark.parametrize(
"build_type",
[
"app/index.html",
"app/hassio-app.html",
"app/index.html",
"app/hassio-app.html",
"app/some-chunk.js",
"app/app.js",
],
)
async def test_forward_request_no_auth_for_panel(
hassio_client, build_type, aioclient_mock
):
"""Test no auth needed for ."""
aioclient_mock.get(f"http://127.0.0.1/{build_type}", text="response")
resp = await hassio_client.get(f"/api/hassio/{build_type}")
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_request_no_auth_for_logo(hassio_client, aioclient_mock):
"""Test no auth needed for logo."""
aioclient_mock.get("http://127.0.0.1/addons/bl_b392/logo", text="response")
resp = await hassio_client.get("/api/hassio/addons/bl_b392/logo")
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_request_no_auth_for_icon(hassio_client, aioclient_mock):
"""Test no auth needed for icon."""
aioclient_mock.get("http://127.0.0.1/addons/bl_b392/icon", text="response")
resp = await hassio_client.get("/api/hassio/addons/bl_b392/icon")
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_log_request(hassio_client, aioclient_mock):
"""Test fetching normal log path doesn't remove ANSI color escape codes."""
aioclient_mock.get("http://127.0.0.1/beer/logs", text="\033[32mresponse\033[0m")
resp = await hassio_client.get("/api/hassio/beer/logs")
# Check we got right response
assert resp.status == HTTPStatus.OK
body = await resp.text()
assert body == "\033[32mresponse\033[0m"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_bad_gateway_when_cannot_find_supervisor(hassio_client, aioclient_mock):
"""Test we get a bad gateway error if we can't find supervisor."""
aioclient_mock.get("http://127.0.0.1/addons/test/info", exc=asyncio.TimeoutError)
resp = await hassio_client.get("/api/hassio/addons/test/info")
assert resp.status == HTTPStatus.BAD_GATEWAY
async def test_forwarding_user_info(hassio_client, hass_admin_user, aioclient_mock):
"""Test that we forward user info correctly."""
aioclient_mock.get("http://127.0.0.1/hello")
resp = await hassio_client.get("/api/hassio/hello")
# Check we got right response
assert resp.status == HTTPStatus.OK
assert len(aioclient_mock.mock_calls) == 1
req_headers = aioclient_mock.mock_calls[0][-1]
assert req_headers["X-Hass-User-ID"] == hass_admin_user.id
assert req_headers["X-Hass-Is-Admin"] == "1"
async def test_backup_upload_headers(hassio_client, aioclient_mock, caplog):
"""Test that we forward the full header for backup upload."""
content_type = "multipart/form-data; boundary='--webkit'"
aioclient_mock.get("http://127.0.0.1/backups/new/upload")
resp = await hassio_client.get(
"/api/hassio/backups/new/upload", headers={"Content-Type": content_type}
)
# Check we got right response
assert resp.status == HTTPStatus.OK
assert len(aioclient_mock.mock_calls) == 1
req_headers = aioclient_mock.mock_calls[0][-1]
assert req_headers["Content-Type"] == content_type
async def test_backup_download_headers(hassio_client, aioclient_mock):
"""Test that we forward the full header for backup download."""
content_disposition = "attachment; filename=test.tar"
aioclient_mock.get(
"http://127.0.0.1/backups/slug/download",
headers={
"Content-Length": "50000000",
"Content-Disposition": content_disposition,
},
)
resp = await hassio_client.get("/api/hassio/backups/slug/download")
# Check we got right response
assert resp.status == HTTPStatus.OK
assert len(aioclient_mock.mock_calls) == 1
assert resp.headers["Content-Disposition"] == content_disposition
def test_need_auth(hass):
"""Test if the requested path needs authentication."""
assert not _need_auth(hass, "addons/test/logo")
assert _need_auth(hass, "backups/new/upload")
assert _need_auth(hass, "supervisor/logs")
hass.data["onboarding"] = False
assert not _need_auth(hass, "backups/new/upload")
assert not _need_auth(hass, "supervisor/logs")
async def test_stream(hassio_client, aioclient_mock):
"""Verify that the request is a stream."""
aioclient_mock.get("http://127.0.0.1/test")
await hassio_client.get("/api/hassio/test", data="test")
assert isinstance(aioclient_mock.mock_calls[-1][2], StreamReader)
async def test_entrypoint_cache_control(hassio_client, aioclient_mock):
"""Test that we return cache control for requests to the entrypoint only."""
aioclient_mock.get("http://127.0.0.1/app/entrypoint.js")
aioclient_mock.get("http://127.0.0.1/app/entrypoint.fdhkusd8y43r.js")
resp1 = await hassio_client.get("/api/hassio/app/entrypoint.js")
resp2 = await hassio_client.get("/api/hassio/app/entrypoint.fdhkusd8y43r.js")
# Check we got right response
assert resp1.status == HTTPStatus.OK
assert resp2.status == HTTPStatus.OK
assert len(aioclient_mock.mock_calls) == 2
assert resp1.headers["Cache-Control"] == "no-store, max-age=0"
assert "Cache-Control" not in resp2.headers
| 32.922705 | 86 | 0.704916 |
7947f851f8f04b9ce2515f2ec19e08711c27ffee | 28,686 | py | Python | mskit/ms_pred/pdeep.py | gureann/MSKit | 8b360d38288100476740ad808e11b6c1b454dc2c | [
"MIT"
] | null | null | null | mskit/ms_pred/pdeep.py | gureann/MSKit | 8b360d38288100476740ad808e11b6c1b454dc2c | [
"MIT"
] | null | null | null | mskit/ms_pred/pdeep.py | gureann/MSKit | 8b360d38288100476740ad808e11b6c1b454dc2c | [
"MIT"
] | null | null | null | import os
import re
from collections import defaultdict
import pandas as pd
from mskit import rapid_kit
from mskit.post_analysis.spectronaut import SpectronautLibrary
from ._pdeep_constant import BasicpDeepInfo
from ._pdeep_constant import MOD
def intprec_to_pdeep_test(intprec_list):
"""
从 intprec 转换为 pDeep2 的 test input
intprec: 如 DKEAIQA4SESLMTSAPK.2
pDeep2 test input 格式为
peptide modification charge
FRTPSFLK 3,Phospho[T];5,Phospho[S]; 2
...
"""
title = ['peptide', 'modification', 'charge']
pdeep_test_data_list = []
for each_intprec in intprec_list:
intprec_result = intprec_to_pdeep(each_intprec)
if intprec_result is not None:
stripped_pep, mod_info, charge = intprec_result
else:
continue
pdeep_test_data_list.append([stripped_pep, mod_info, charge])
pdeep_test_df = pd.DataFrame(pdeep_test_data_list, columns=title)
return pdeep_test_df
def intprec_to_pdeep(intprec: str):
int_to_pdeep2_mod = {
'C': 'Carbamidomethyl[C]',
'1': 'Oxidation[M]',
'2': 'Phospho[S]',
'3': 'Phospho[T]',
'4': 'Phospho[Y]',
}
intseq, charge = intprec.split('.')
if intseq.startswith('@'):
intseq = intseq[1:]
elif intseq.startswith('*'):
return None
else:
pass
stripped_pep = intseq.replace('1', 'M').replace('2', 'S').replace('3', 'T').replace('4', 'Y')
mod_info = ''
for _ in re.finditer('[C1234]', intseq):
site = _.end()
mod_char = _.group()
mod = int_to_pdeep2_mod[mod_char]
mod_info += f'{site},{mod};'
return stripped_pep, mod_info, charge
def mod_extraction_for_pdeep(mod_pep):
"""
The
"""
mod_pep = mod_pep.replace('_', '')
if '[' not in mod_pep:
return ''
else:
modinfo = ''
mod_start = [left_bracket.start() for left_bracket in re.finditer('\[', mod_pep)]
mod_end = [right_bracket.start() for right_bracket in re.finditer(']', mod_pep)]
mod_len = 0
for mod_site in zip(mod_start, mod_end):
if mod_site[0] == 0: # or mod_site[1] == len(mod_pep) - 1:
return 'Unsupport'
else:
mod_residu = mod_pep[mod_site[0] - 1]
mod_type = mod_pep[mod_site[0] + 1: mod_site[1]].replace(' ', '')
mod_type = re.sub(r'\(.+?\)', f'[{mod_residu}]', mod_type)
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
def inten_dict_to_plabel(inten_dict: dict):
"""
:param inten_dict: The input dict should have the k[v] pairs as 'Prec': {'Frag_1': Inten_1, 'Frag_2': Inten_2, ...}
"""
plabel_rows = []
for prec, ion_inten_dict in inten_dict.items():
intprec_trans = intprec_to_pdeep(prec)
if intprec_trans is None:
continue
stripped_pep, mod_info, charge = intprec_trans
spec = f'Unknown.{charge}.0.0'
plabel_ion_str = plabel_one_ion_row(ion_inten_dict, return_type='str')
plabel_rows.append(f'{spec}\t{stripped_pep}\t{mod_info}\t{plabel_ion_str}')
return plabel_rows
def write_plabel_with_inten_dict(inten_dict: dict, output_path: str):
plabel_rows = inten_dict_to_plabel(inten_dict)
with open(output_path, 'w') as f:
f.write('spec\tpeptide\tmodinfo\tb\tb-NH3\tb-H2O\tb-ModLoss\ty\ty-NH3\ty-H2O\ty-ModLoss\n')
for row in plabel_rows:
f.write(row + '\n')
def plabel_to_pred_input(plabel_path):
plabel_df = pd.read_csv(plabel_path, sep='\t', low_memory=False)
plabel_df['charge'] = plabel_df['spec'].apply(lambda x: x.split('.')[-3])
plabel_df = plabel_df[['peptide', 'modinfo', 'charge']]
plabel_df.columns = ['peptide', 'modification', 'charge']
return plabel_df
def plabel_one_ion_row(ion_inten_dict: dict,
ion_type=('b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss'),
return_type='str'):
ion_dict = defaultdict(list)
ion_dict.fromkeys(ion_type)
loss_trans = {'1,H3PO4': 'ModLoss',
'1,H2O': 'H2O',
'1,NH3': 'NH3'}
for frag, inten in ion_inten_dict.items():
frag_type, frag_num, frag_charge, frag_loss = re.findall(r'([abcxyz])(\d+)\+(\d)-(.+)', frag)[0]
if frag_loss == 'Noloss':
ion_name = f'{frag_type}'
elif frag_loss in ['1,H2O', '1,NH3', '1,H3PO4']:
ion_name = f'{frag_type}-{loss_trans[frag_loss]}'
else:
continue
ion_dict[ion_name].append((f'{frag_type}{frag_num}{ion_name[1:]}+{frag_charge},{inten};',
int(frag_num),
int(frag_charge)))
if return_type == 'dict':
return ion_dict
elif return_type == 'str':
ion_info = []
for each_ion_type in ion_type:
if each_ion_type[0] in ['a', 'b', 'c']:
sorted_ions = sorted(ion_dict[each_ion_type], key=lambda x: (x[2], x[1]), reverse=False)
elif each_ion_type[0] in ['x', 'y', 'z']:
sorted_ions = sorted(ion_dict[each_ion_type], key=lambda x: (-x[2], x[1]), reverse=True)
else:
raise
ions = [_[0] for _ in sorted_ions]
ion_info.append(''.join(ions))
return '\t'.join(ion_info)
def plabel_ion_info(one_psm_df, return_type):
ion_info = {'b': '', 'b-NH3': '', 'b-H2O': '', 'b-ModLoss': '', 'y': [], 'y-NH3': [], 'y-H2O': [], 'y-ModLoss': []}
for row_index, each_row in one_psm_df.iterrows():
fragment_type = each_row['FragmentType']
fragment_num = each_row['FragmentNumber']
fragment_charge = each_row['FragmentCharge']
fragment_relative_intensity = each_row['RelativeIntensity']
fragment_losstype = each_row['FragmentLossType']
if fragment_type == 'b':
if fragment_losstype == 'noloss':
ion_info['b'] += 'b{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
ion_info['b-NH3'] += 'b{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
ion_info['b-H2O'] += 'b{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H3PO4':
ion_info['b-ModLoss'] += 'b{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
else:
continue
elif fragment_type == 'y':
if fragment_losstype == 'noloss':
ion_info['y'].append('y{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'NH3':
ion_info['y-NH3'].append(
'y{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'H2O':
ion_info['y-H2O'].append(
'y{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'H3PO4':
ion_info['y-ModLoss'].append(
'y{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
else:
continue
if return_type == 'dict':
return ion_info
elif return_type == 'str':
str_ion_info = ''
b_ion_order = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss']
# ion_order = ['b', 'y']
for ion_losstype in b_ion_order:
str_ion_info += ion_info[ion_losstype]
str_ion_info += '\t'
y_ion_order = ['y', 'y-NH3', 'y-H2O', 'y-ModLoss']
for ion_losstype in y_ion_order:
str_ion_info += ''.join(ion_info[ion_losstype][::-1])
if ion_losstype != 'y-ModLoss':
str_ion_info += '\t'
# if ion_losstype != 'y':
# str_ion_info += '\t'
return str_ion_info
def sn_lib_to_plabel(lib, plabel_output):
if isinstance(lib, pd.DataFrame):
lib_df = lib
else:
if os.path.exists(lib):
lib_df = pd.read_csv(lib, sep='\t', low_memory=False)
else:
raise FileNotFoundError
lib_df['Prec'] = lib_df['ModifiedPeptide'] + '.' + lib_df['PrecursorCharge'].astype(str)
with open(plabel_output, 'w') as plabel_handle:
plabel_handle.write('spec\tpeptide\tmodinfo\tb\tb-NH3\tb-H2O\tb-ModLoss\ty\ty-NH3\ty-H2O\ty-ModLoss\n')
# handle_plabel.write('spec\tpeptide\tmodinfo\tb\ty\n')
for psm_index, (each_prec, each_psm_df) in enumerate(lib_df.groupby('Prec')):
first_row = each_psm_df.iloc[0]
spec = '{title}.{charge}.0.0'.format(title=first_row['ReferenceRun'], charge=first_row['PrecursorCharge'])
# spec = '{charge}.0.0'.format(charge=first_fragment[1])
stripped_pep = first_row['StrippedPeptide']
mod_pep = first_row['ModifiedPeptide']
modinfo = mod_extraction_for_pdeep(mod_pep)
if modinfo == 'Unsupport':
continue
ion_info = plabel_ion_info(each_psm_df, 'str')
plabel_handle.write('{spec}\t{pep}\t{mod}\t{ioninfo}\n'.format(
spec=spec, pep=stripped_pep, mod=modinfo, ioninfo=ion_info))
def sn_lib_to_pdeep_test(test_lib, test_set_output):
if isinstance(test_lib, pd.DataFrame):
lib_df = test_lib
else:
if os.path.exists(test_lib):
lib_df = pd.read_csv(test_lib, sep='\t', low_memory=False)
else:
raise FileNotFoundError
lib_df['Prec'] = lib_df['ModifiedPeptide'] + '.' + lib_df['PrecursorCharge'].astype(str)
lib_df = lib_df.drop_duplicates('Prec')
with open(test_set_output, 'w') as test_handle:
test_handle.write('peptide\tmodification\tcharge\n')
for row_index, each_lib_row in lib_df.iterrows():
mod_pep = each_lib_row['ModifiedPeptide']
charge = str(each_lib_row['PrecursorCharge'])
stripped_pep = each_lib_row['StrippedPeptide']
mod = mod_extraction_for_pdeep(mod_pep)
if mod == 'Unsupport':
continue
test_handle.write('{}\t{}\t{}\n'.format(stripped_pep, mod, charge))
def extract_pdeep_mod(mod_pep, mod_ident='bracket', mod_trans=True):
"""
input: '_C[Carbamidomethyl (C)]DM[Oxidation (M)]EDER_'
output: 'CDMEDER', '1,Carbamidomethyl[C];3,Oxidation[M];'
"""
stripped_pep, mod = rapid_kit.split_mod(modpep=mod_pep, mod_ident=mod_ident)
if mod_trans:
mod = trans_sn_mod(mod)
return stripped_pep, mod
def trans_sn_mod(mod):
for sn_mod, pdeep_mod in MOD.items():
mod = mod.replace(sn_mod, pdeep_mod)
if '(' not in mod:
break
if '(' in mod:
return None
return mod
def restore_pdeep_mod_site(stripped_pep, mod_content, mod_processor):
"""
This will restore the modification to stripped peptide.
EXAMPLE: restore_pdeep_mod_site('MPALAIMGLSLAAFLELGMGASLCLSQQFK', '24,Carbamidomethyl[C];')
-> 'MPALAIMGLSLAAFLELGMGASLC[Carbamidomethyl (C)]LSQQFK'
"""
return rapid_kit.add_mod(stripped_pep, mod_content, mod_processor)
def pdeep_input(output_path, prec_list):
with open(output_path, 'w') as out_file:
pred_title = ['peptide', 'modification', 'charge']
out_file.write('\t'.join(pred_title) + '\n')
for _prec in prec_list:
modpep, charge = rapid_kit.split_prec(_prec)
strip_pep, mod = extract_pdeep_mod(modpep)
out_file.write(f'{strip_pep}\t{mod}\t{charge}\n')
def pdeep_trainset(output_path, prec_inten_dict):
with open(output_path, 'w') as out_file:
plabel_title_list = BasicpDeepInfo.pDeepTrainsetTitle
plabel_title = '\t'.join(plabel_title_list)
out_file.write(plabel_title + '\n')
for _prec, inten_dict in prec_inten_dict.items():
plabel_row_dict = plabel_one_row_dict(_prec, inten_dict)
if not plabel_row_dict:
continue
one_row_list = [plabel_row_dict[_] for _ in plabel_title_list]
out_file.write('\t'.join(one_row_list) + '\n')
def plabel_one_row_dict(prec, inten_dict: dict):
plabel_row_dict = defaultdict(str)
modpep, charge = rapid_kit.split_prec(prec)
strip_pep, mod = extract_pdeep_mod(modpep, mod_ident='bracket', mod_trans=True)
if not mod:
return None
plabel_row_dict['spec'] = f'{charge}.0.0'
plabel_row_dict['peptide'] = strip_pep
plabel_row_dict['modinfo'] = mod
for frag, inten in inten_dict.items():
frag_type, frag_num, frag_charge, frag_loss = rapid_kit.split_fragment_name(frag)
if frag_loss == 'noloss':
plabel_type = frag_type
plabel_frag = f'{frag_type}{frag_num}+{frag_charge}'
elif frag_loss == 'NH3' or frag_loss == 'H2O':
plabel_type = f'{frag_type}-{frag_loss}'
plabel_frag = f'{frag_type}{frag_num}-{frag_loss}+{frag_charge}'
else:
plabel_type = f'{frag_type}-ModLoss'
plabel_frag = f'{frag_type}{frag_num}-ModLoss+{frag_charge}'
plabel_row_dict[plabel_type] += f'{plabel_frag},{inten};'
return plabel_row_dict
def read_pdeep_result(pdeep_result, modloss_name='H3PO4',
require_mz=True, min_inten_ratio=0.01, min_frag_num=3,
exclude_frag_num=(1, 2), exclude_modloss=False):
mod_dict = {'Carbamidomethyl[C]': '[Carbamidomethyl (C)]',
'Oxidation[M]': '[Oxidation (M)]',
'Phospho[S]': '[Phospho (STY)]',
'Phospho[T]': '[Phospho (STY)]',
'Phospho[Y]': '[Phospho (STY)]',
}
with open(os.path.abspath(pdeep_result), 'r') as pdeep_handle:
predicted_fragment_data = dict()
for each_line in pdeep_handle:
each_line = each_line.strip('\n')
if each_line == 'BEGIN IONS':
fragment_dict = dict()
elif each_line == 'END IONS':
if len(fragment_dict) >= min_frag_num:
predicted_fragment_data[prec] = fragment_dict
else:
pass
else:
if each_line.startswith('TITLE'):
split_pep_title = each_line.replace('TITLE=', '').split('|')
stripped_pep = split_pep_title[0]
mod = split_pep_title[1].strip(';')
charge = split_pep_title[2]
if not mod:
prec = '_{}_.{}'.format(stripped_pep, charge)
else:
mod_pep = ''
previous_mod_site = 0
for each_mod in mod.split(';'):
each_mod_info = each_mod.split(',')
mod_site = int(each_mod_info[0])
mod_type = mod_dict[each_mod_info[1]]
mod_pep += stripped_pep[previous_mod_site: mod_site] + mod_type
previous_mod_site = mod_site
mod_pep += stripped_pep[previous_mod_site:]
prec = '_{}_.{}'.format(mod_pep, charge)
elif each_line[0].isdigit():
split_frag_inten_line = each_line.split(' ')
frag_inten = round(float(split_frag_inten_line[1]), 5) * 100
if frag_inten < min_inten_ratio:
continue
frag_mz = split_frag_inten_line[0]
if float(frag_mz) < 10:
continue
frag_name = split_frag_inten_line[2]
frag_type, frag_num, loss_type, frag_c = re.findall('([by])(\d+)-?(.+)?\+(\d)', frag_name)[0]
if int(frag_num) in exclude_frag_num:
continue
if exclude_modloss and loss_type == 'ModLoss':
continue
new_frag_name = f'{frag_type}{frag_num}+{frag_c}'
if not loss_type:
new_frag_name += '-noloss'
else:
new_frag_name += f'-{loss_type}' if loss_type != 'ModLoss' else f'-{modloss_name}'
if require_mz:
fragment_dict[new_frag_name] = (frag_mz, frag_inten)
else:
fragment_dict[new_frag_name] = frag_inten
else:
continue
return predicted_fragment_data
def trans_pdeep2_result_to_df(result: dict, frag_trans=None, pep_trans=None, pep_trans_col='IntPep') -> pd.DataFrame:
df_rows = []
for prec, inten_dict in result.items():
if frag_trans is not None:
inten_dict = {frag_trans[frag]: inten for frag, inten in inten_dict.items()}
one_row = [prec, inten_dict]
if pep_trans is not None:
modpep, charge = prec.split('.')
transed_pep = pep_trans(modpep)
one_row.append(transed_pep)
def read_inten_from_plabel(_plabel_file):
ion_type_list = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss']
_p_df = pd.read_csv(_plabel_file, sep='\t')
_p_df = _p_df.fillna('')
_p_df['prec'] = _p_df.apply(lambda x: '|'.join([x['peptide'], x['modinfo'], x['spec'].split('.')[-3]]), axis=1)
_p_inten_dict = dict()
def _merge_plabel_inten(x):
_one_prec = x['prec']
_one_inten_info = ''.join(x[ion_type_list].tolist()).split(';')[:-1]
_p_inten_dict[_one_prec] = dict([(_o_f.split(',')[0], float(_o_f.split(',')[1])) for _o_f in _one_inten_info])
_p_df.progress_apply(_merge_plabel_inten, axis=1)
return _p_inten_dict
class pDeepSpectronaut(SpectronautLibrary):
def __init__(self, spectronaut_version=12):
super(pDeepSpectronaut, self).__init__(spectronaut_version)
self.plabel_title_list = BasicpDeepInfo.pDeepTrainsetTitle
def prec_ion_info(self, one_psm_df: pd.DataFrame, spectronaut_run_name=True):
"""
For pDeep trainset preparation.
This will receive get_one_prefix_result dataframe of one psm block and assemble get_one_prefix_result pd.series as one row of the plabel dataframe.
:param one_psm_df: This must contain columns after ['PrecursorCharge', 'StrippedPeptide', 'ModifiedPeptide',
'FragmentType', 'FragmentNumber', 'FragmentCharge', 'RelativeIntensity', 'FragmentLossType']
:param spectronaut_run_name: This can be choose as True or False and dont affect the result. This can make the plabel file have much information
:return: A series as one plabel dataframe row
"""
first_row = one_psm_df.iloc[0]
prec_charge = first_row['PrecursorCharge']
if spectronaut_run_name:
run_title = first_row['ReferenceRun']
spec = '{title}.{charge}.0.0'.format(title=run_title, charge=prec_charge)
else:
spec = '{charge}.0.0'.format(charge=prec_charge)
stripped_pep = first_row['StrippedPeptide']
mod_pep = first_row['ModifiedPeptide']
stripped_pep, modinfo = extract_pdeep_mod(mod_pep)
if modinfo == 'Unsupport':
return 'Unsupport'
current_prec_info = pd.Series(data=[spec, stripped_pep, modinfo] + [''] * 8, index=self.plabel_title_list)
for row_index in one_psm_df.index:
line_series = one_psm_df.loc[row_index]
fragment_type = line_series['FragmentType']
fragment_num = line_series['FragmentNumber']
fragment_charge = line_series['FragmentCharge']
fragment_relative_intensity = line_series['RelativeIntensity']
fragment_losstype = line_series['FragmentLossType']
if fragment_type == 'b':
if fragment_losstype == 'noloss':
current_prec_info['b'] += 'b{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
current_prec_info['b-NH3'] += 'b{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
current_prec_info['b-H2O'] += 'b{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
else:
current_prec_info['b-ModLoss'] += 'b{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
elif fragment_type == 'y':
if fragment_losstype == 'noloss':
current_prec_info['y'] += 'y{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
current_prec_info['y-NH3'] += 'y{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
current_prec_info['y-H2O'] += 'y{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
else:
current_prec_info['y-ModLoss'] += 'y{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge,
relative_intensity=fragment_relative_intensity)
return current_prec_info
def plabel_trainset(self, output_path, spectronaut_run_name=True):
"""
Write get_one_prefix_result pDeep trainset file by calling function prec_ion_info to process the library dataframe
"""
trainset_df = pd.DataFrame(columns=self.plabel_title_list)
for each_psm_index in self.get_psm_block_index(self._lib_df):
current_prec_info = self.prec_ion_info(self._lib_df.loc[each_psm_index[0]: each_psm_index[1]], spectronaut_run_name)
if not isinstance(current_prec_info, pd.DataFrame):
continue
trainset_df = trainset_df.append(current_prec_info, ignore_index=True)
trainset_df.to_csv(output_path, sep='\t', index=False)
def extract_bracket(str_with_bracket):
bracket_start = [left_bracket.start() for left_bracket in re.finditer('\(', str_with_bracket)]
bracket_end = [right_bracket.start() for right_bracket in re.finditer('\)', str_with_bracket)]
return bracket_start, bracket_end
mod_dict = {'M(ox)': 'Oxidation[M]',
'Y(ph)': "Phospho[Y]",
'S(ph)': "Phospho[S]",
'T(ph)': "Phospho[T]",
}
def _plabel_from_mq(x):
def pdeep_mod_extraction(mod_pep):
mod_pep = mod_pep.replace('_', '')
modinfo = ''
mod_start, mod_end = extract_bracket(mod_pep)
mod_len = 0
for mod_site in zip(mod_start, mod_end):
mod_type = mod_pep[mod_site[0] - 1: mod_site[1] + 1].replace(' ', '')
mod_type = mod_dict[mod_type]
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
ion_type_list = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss']
plabel_title = ['spec', 'peptide', 'modinfo', *ion_type_list]
spec_name = '{}.{}.{}.{}.0.dta'.format(x['Raw file'], x['Scan number'], x['Scan number'], x['Charge'])
pep = x['Sequence']
mod_pep = x['Modified sequence']
mod_info = pdeep_mod_extraction(mod_pep)
ions = x['Matches']
intens = x['Intensities']
inten_dict = dict(zip(ion_type_list, [''] * 8))
ion_intens_list = list(zip(ions.split(';'), intens.split(';')))
b_ion_info = [_ for _ in ion_intens_list if _[0].startswith('b')]
y_ion_info = [_ for _ in ion_intens_list if _[0].startswith('y')]
for diff_ion_info in [b_ion_info, y_ion_info]:
current_num = 0
_mod_start = False
_second_mod_start = False
for ion, inten in diff_ion_info:
if '*' in ion:
if not _mod_start:
current_num = 0
_mod_start = True
if '-' in ion:
if _mod_start:
continue
ion_type, ion_num = re.findall('([by])(\d+)', ion)[0]
ion_num = int(ion_num)
re_charge = re.findall('\((\d)\+\)', ion)
if re_charge:
ion_charge = re_charge[0]
else:
ion_charge = '1'
if ion_num <= current_num and '*' in ion:
_second_mod_start = True
continue
if '*' in ion and _second_mod_start:
continue
current_num = ion_num
tag = ion_type
if '*' in ion:
tag += '-ModLoss'
elif '-' in ion:
tag += '-{}'.format(re.findall('-(.+)', ion)[0])
inten_dict[tag] += '{}{}{}+{},{};'.format(ion_type,
ion_num,
'-' + tag.split('-')[1] if '-' in tag else '',
ion_charge,
inten
)
one_psm_data = [spec_name, pep, mod_info, *[inten_dict[_] for _ in ion_type_list]]
return one_psm_data
""" NOTICE This one is for MQ > 1.6, in which the modifications added in the peptide sequence was set as Phospho (STY) but not (ph) in 1.5
def extract_bracket(str_with_bracket):
bracket_start = [left_bracket.start() for left_bracket in re.finditer('\(', str_with_bracket)][::2]
bracket_end = [right_bracket.start() for right_bracket in re.finditer('\)', str_with_bracket)][1::2]
return bracket_start, bracket_end
mod_dict2 = {'M(Oxidation (M))': 'Oxidation[M]',
'Y(Phospho (STY))' : "Phospho[Y]",
'S(Phospho (STY))' : "Phospho[S]",
'T(Phospho (STY))' : "Phospho[T]",}
def pdeep_mod_extraction(mod_pep):
mod_pep = mod_pep.replace('_', '')
modinfo = ''
mod_start, mod_end = extract_bracket(mod_pep)
mod_len = 0
for mod_site in zip(mod_start, mod_end):
mod_type = mod_pep[mod_site[0] - 1: mod_site[1] + 1]# .replace(' ', '')
mod_type = mod_dict2[mod_type]
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
"""
| 46.193237 | 177 | 0.579237 |
7947f8839eecc7dabc5f3ccfb8db7265e4c4b26b | 1,363 | py | Python | threaded_stream/reader.py | cipher982/birb-watch | bdba5455f3b994b143e96b41afbf17d698610454 | [
"Apache-2.0"
] | null | null | null | threaded_stream/reader.py | cipher982/birb-watch | bdba5455f3b994b143e96b41afbf17d698610454 | [
"Apache-2.0"
] | null | null | null | threaded_stream/reader.py | cipher982/birb-watch | bdba5455f3b994b143e96b41afbf17d698610454 | [
"Apache-2.0"
] | null | null | null | # import the necessary packages
import os
from threading import Thread
import cv2
from dotenv import load_dotenv
load_dotenv()
RTSP_USER = os.getenv("RTSP_USER")
RTSP_PW = os.getenv("RTSP_PW")
RTSP_IP = os.getenv("RTSP_IP")
def get_amcrest_rtsp_url(user, pw, ip):
url = (
f"rtsp://{user}:{pw}@{ip}:554/"
"cam/realmonitor?channel=1&subtype=0&authbasic=64"
)
return url
def get_reolink_rtsp_url(user, pw, ip):
url = f"rtsp://{user}:{pw}@{ip}:554//h264Preview_01_main"
return url
class RTSPStream:
def __init__(self, src=0):
rtsp_url = get_reolink_rtsp_url(RTSP_USER, RTSP_PW, RTSP_IP)
print(f"Received RTSP URL: {rtsp_url}")
self.stream = cv2.VideoCapture(rtsp_url)
(self.grabbed, self.frame) = self.stream.read()
self.stopped = False
def start(self):
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
| 24.339286 | 70 | 0.630227 |
7947f8b4cbeb5d156ac757518624a8901ace033c | 7,548 | py | Python | lesson11/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson11/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | lesson11/sunzhaohui/reboot/users/group/__init__.py | herrywen-nanj/51reboot | 1130c79a360e1b548a6eaad176eb60f8bed22f40 | [
"Apache-2.0"
] | null | null | null | # _*_ encoding:utf-8 _*_
__author__ = 'sunzhaohui'
__date__ = '2019-08-05 17:20'
from django.shortcuts import render
from django.http import HttpResponse,QueryDict,HttpResponseRedirect,JsonResponse,Http404
from django.urls import reverse
from django.conf import settings
from users.models import UserProfile
from django.contrib.auth.models import Group
from django.db.models import Q
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from users.forms import RoleProfileForm
from django.contrib.auth.hashers import make_password
from django.views.generic import View,DetailView,ListView
from django.contrib.auth import authenticate, login, logout
# Create your views here.
# 用户认证及权限管理模块导入
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from pure_pagination.mixins import PaginationMixin
# class RoleListView(LoginRequiredMixin,View):
# login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# # 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
# redirect_field_name = 'redirect_to'
# #@method_decorator(login_required(login_url='/login/'))
# def get(self,request):
# rolelist = Group.objects.all()
# print(rolelist)
# return render(request, 'users/rolelist.html', {'rolelist': rolelist})
class RoleListView(LoginRequiredMixin,PermissionRequiredMixin,PaginationMixin,ListView):
model = Group
template_name = "users/rolelist.html"
context_object_name = "rolelist"
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
#@method_decorator(login_required(login_url='/login/'))
paginate_by = 2
keyword = ''
#搜索
def get_queryset(self):
queryset = super(RoleListView, self).get_queryset()
self.keyword = self.request.GET.get('keyword','').strip()
print(self.keyword)
if self.keyword:
queryset = queryset.filter(Q(name__icontains=self.keyword)| Q(name__icontains=self.keyword) )
return queryset
#显示搜索关键字
def get_context_data(self, **kwargs):
context = super(RoleListView,self).get_context_data(**kwargs)
context['keyword'] = self.keyword
context['user'] = self.request.user.username
#rolelist = list(context["object_list"])
rolelist = []
for role in context["object_list"]:
role_info = {}
# role_name = role.name
# role_username = role.user_set.all()
role_info['id'] = role.id
role_info['name'] = role.name
role_info['member'] = role.user_set.all()
role_info['permissions'] = role.permissions.all()
rolelist.append(role_info)
context['rolelist'] = rolelist
print(context)
return context
#添加角色 Group.objects.create(name='qa')
def post(self, request):
print('####### roleadd')
_roleForm = RoleProfileForm(request.POST)
if _roleForm.is_valid():
try:
data = _roleForm.cleaned_data
print(data)
self.model.objects.create(**data)
res = {'code': 0, 'result': '添加角色成功'}
except:
# logger.error("create user error: %s" % traceback.format_exc())
res = {'code': 1, 'errmsg': '添加角色失败'}
else:
# 获取自定义的表单错误的两种常用方式
print(_roleForm.errors)
# <ul class="errorlist">
# <li>phone<ul class="errorlist"><li>手机号码非法</li></ul></li>
# <li>username<ul class="errorlist"><li>已存在一位使用该名字的用户。</li></ul></li>
# </ul>
print(_roleForm.errors.as_json())
# {"phone": [{"message": "\u624b\u673a\u53f7\u7801\u975e\u6cd5", "code": "invalid"}],
# "username": [{"message": "\u5df2\u5b4f7f\u7528\u8be5\u540d\u5b57\u7684\u7528\u6237\u3002",
# "code": "unique"}]}
# print(_roleForm.errors['phone'][0]) # 手机号码非法
print(_roleForm.errors['name'][0]) # 已存在一位使用该名字的用户
res = {'code': 1, 'errmsg': _roleForm.errors.as_json()}
return JsonResponse(res, safe=True)
def delete(self,request,**kwargs):
print(kwargs)
data = QueryDict(request.body).dict()
id = data['id']
print(id)
try:
self.model.objects.get(id=id).delete()
res = {'code': 0, 'result': '删除角色成功'}
except:
# print(id)
res = {'code': 1, 'errmsg': '删除角色失败'}
return JsonResponse(res, safe=True)
class RolePowerView(LoginRequiredMixin,PermissionRequiredMixin, DetailView):
login_url = '/login/' # 用户没有通过或者权限不够时跳转的地址,默认是 settings.LOGIN_URL.
# 把没通过检查的用户重定向到没有 "next page" 的非登录页面时,把它设置为 None ,这样它会在 URL 中移除。
redirect_field_name = 'redirect_to'
permission_required = ('users.view_group','users.delete_group','users.add_group','users.change_group')
"""
更新角色及权限
"""
template_name = 'users/role_power.html'
model = Group
context_object_name = 'role'
# 返回所有组、权限,并将当前用户所拥有的组、权限显示
def get_context_data(self, **kwargs):
context = super(RolePowerView, self).get_context_data(**kwargs)
context['role_has_users'],context['role_has_permissions'] = self._get_role_power()
context['role_not_users'],context['role_not_permissions'] = self._get_role_not_power()
return context
# 获取当前角色所有用户,权限以列表形式返回
def _get_role_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
users = role.user_set.all()
return users,role.permissions.all()
except self.model.DoesNotExist:
raise Http404
# 获取当前角色没有的用户,权限,以列表形式返回
def _get_role_not_power(self):
pk = self.kwargs.get(self.pk_url_kwarg)
try:
role = self.model.objects.get(pk=pk)
all_user = UserProfile.objects.all()
users = [user for user in all_user if user not in role.user_set.all()]
all_perms = Permission.objects.all()
perms = [perm for perm in all_perms if perm not in role.permissions.all()]
return users,perms
except:
return JsonResponse([], safe=False)
#修改角色
def post(self, request, **kwargs):
#ops.user_set.set([2])
print(request.POST)
print(request.POST.getlist('users', []))
user_id_list = request.POST.getlist('users_selected', [])
permission_id_list = request.POST.getlist('perms_selected', [])
pk = kwargs.get("pk")
try:
role = self.model.objects.get(pk=pk)
# user.groups.set(group_id_list)
print(user_id_list)
role.user_set.set(user_id_list)
role.permissions.set(permission_id_list)
res = {'code': 0, 'next_url': reverse("users:role_list"), 'result': '角色权限更新成功'}
except:
res = {'code': 1, 'next_url': reverse("users:role_list"), 'errmsg': '角色权限更新失败'}
#logger.error("edit user group pwoer error: %s" % traceback.format_exc())
return render(request, settings.JUMP_PAGE, res) | 38.907216 | 106 | 0.638182 |
7947fa4f1cf325abcbf605aed1f3df3be36ad692 | 188 | py | Python | contrib/tests/tests/test_perl518.py | rockstack/rock | 1d010d942c5b1c8fd198223ac1f4a3dd5d690edb | [
"MIT"
] | 1 | 2015-03-13T06:01:06.000Z | 2015-03-13T06:01:06.000Z | contrib/tests/tests/test_perl518.py | rockstack/rock | 1d010d942c5b1c8fd198223ac1f4a3dd5d690edb | [
"MIT"
] | null | null | null | contrib/tests/tests/test_perl518.py | rockstack/rock | 1d010d942c5b1c8fd198223ac1f4a3dd5d690edb | [
"MIT"
] | null | null | null | import helper
class RuntimeTestCase(helper.RuntimeTests):
name = 'perl518'
init_files = ['cpanfile']
init_directories = ['t']
if __name__ == '__main__':
helper.main()
| 14.461538 | 43 | 0.659574 |
7947fad0417dc385353cf42672bb3f4f5e8d8531 | 4,898 | py | Python | source/models/gans/VQGAN/utils.py | Adamkomar95/gans-clip-pw | 14694abd3a793b3e0fdfed76e2e12908e91ea484 | [
"MIT"
] | null | null | null | source/models/gans/VQGAN/utils.py | Adamkomar95/gans-clip-pw | 14694abd3a793b3e0fdfed76e2e12908e91ea484 | [
"MIT"
] | null | null | null | source/models/gans/VQGAN/utils.py | Adamkomar95/gans-clip-pw | 14694abd3a793b3e0fdfed76e2e12908e91ea484 | [
"MIT"
] | null | null | null |
import os
from imageio import imread, imsave
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn.functional as F
def plot_text(txt, size=224):
fig = plt.figure(figsize=(1,1), dpi=size)
fontsize = size//len(txt) if len(txt) < 15 else 8
plt.text(0.5, 0.5, txt, fontsize=fontsize, ha='center', va='center', wrap=True)
plt.axis('off')
fig.tight_layout(pad=0)
fig.canvas.draw()
img = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
img = img.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return img
def txt_clean(txt):
return txt.translate(str.maketrans(dict.fromkeys(list("\n',.—|!?/:;\\"), ""))).replace(' ', '_').replace('"', '')
def basename(file):
return os.path.splitext(os.path.basename(file))[0]
def file_list(path, ext=None, subdir=None):
if subdir is True:
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
else:
files = [os.path.join(path, f) for f in os.listdir(path)]
if ext is not None:
if isinstance(ext, list):
files = [f for f in files if os.path.splitext(f.lower())[1][1:] in ext]
elif isinstance(ext, str):
files = [f for f in files if f.endswith(ext)]
else:
print(' Unknown extension/type for file list!')
return sorted([f for f in files if os.path.isfile(f)])
def img_list(path, subdir=None):
if subdir is True:
files = [os.path.join(dp, f) for dp, dn, fn in os.walk(path) for f in fn]
else:
files = [os.path.join(path, f) for f in os.listdir(path)]
files = [f for f in files if os.path.splitext(f.lower())[1][1:] in ['jpg', 'jpeg', 'png', 'ppm', 'tif']]
return sorted([f for f in files if os.path.isfile(f)])
def img_read(path):
img = imread(path)
# 8bit to 256bit
if (img.ndim == 2) or (img.shape[2] == 1):
img = np.dstack((img,img,img))
# rgba to rgb
if img.shape[2] == 4:
img = img[:,:,:3]
return img
def img_save(path, img, norm=True):
if norm == True and not np.issubdtype(img.dtype.kind, np.integer):
img = (img*255).astype(np.uint8)
imsave(path, img)
def minmax(x, torch=True):
if torch:
mn = torch.min(x).detach().cpu().numpy()
mx = torch.max(x).detach().cpu().numpy()
else:
mn = np.min(x.detach().cpu().numpy())
mx = np.max(x.detach().cpu().numpy())
return (mn, mx)
# Tiles an array around two points, allowing for pad lengths greater than the input length
# NB: if symm=True, every second tile is mirrored = messed up in GAN
# adapted from https://discuss.pytorch.org/t/symmetric-padding/19866/3
def tile_pad(xt, padding, symm=False):
h, w = xt.shape[-2:]
left, right, top, bottom = padding
def tile(x, minx, maxx):
rng = maxx - minx
if symm is True: # triangular reflection
double_rng = 2*rng
mod = np.fmod(x - minx, double_rng)
normed_mod = np.where(mod < 0, mod+double_rng, mod)
out = np.where(normed_mod >= rng, double_rng - normed_mod, normed_mod) + minx
else: # repeating tiles
mod = np.remainder(x - minx, rng)
out = mod + minx
return np.array(out, dtype=x.dtype)
x_idx = np.arange(-left, w+right)
y_idx = np.arange(-top, h+bottom)
x_pad = tile(x_idx, -0.5, w-0.5)
y_pad = tile(y_idx, -0.5, h-0.5)
xx, yy = np.meshgrid(x_pad, y_pad)
return xt[..., yy, xx]
def pad_up_to(x, size, type='centr'):
sh = x.shape[2:][::-1]
if list(x.shape[2:]) == list(size): return x
padding = []
for i, s in enumerate(size[::-1]):
if 'side' in type.lower():
padding = padding + [0, s-sh[i]]
else: # centr
p0 = (s-sh[i]) // 2
p1 = s-sh[i] - p0
padding = padding + [p0,p1]
y = tile_pad(x, padding, symm = ('symm' in type.lower()))
return y
def load_config(config_path, display=False):
config = OmegaConf.load(config_path)
if display:
print(yaml.dump(OmegaConf.to_container(config)))
return config
def load_vqgan(config, ckpt_path=None):
model = VQModel(**config.model.params)
if ckpt_path is not None:
sd = torch.load(ckpt_path, map_location="cpu")["state_dict"]
missing, unexpected = model.load_state_dict(sd, strict=False)
return model.eval()
def vqgan_image(model, z):
x = model.decode(z)
x = (x+1.)/2.
return x
def makevid(seq_dir, size=None):
out_video = seq_dir + '.mp4'
moviepy.editor.ImageSequenceClip(img_list(seq_dir), fps=25).write_videofile(out_video, verbose=False)
data_url = "data:video/mp4;base64," + b64encode(open(out_video,'rb').read()).decode()
wh = '' if size is None else 'width=%d height=%d' % (size, size)
return """<video %s controls><source src="%s" type="video/mp4"></video>""" % (wh, data_url) | 35.751825 | 119 | 0.603103 |
7947fb0d104b8aace7c25ae3592a76a3eb9dd5c5 | 898 | py | Python | apps/financeDashboard.py | austinbyersking/sp500Dashboard-main | 0e2ee26207c51592ea925e8fbf9303f45b2e3c31 | [
"MIT"
] | null | null | null | apps/financeDashboard.py | austinbyersking/sp500Dashboard-main | 0e2ee26207c51592ea925e8fbf9303f45b2e3c31 | [
"MIT"
] | null | null | null | apps/financeDashboard.py | austinbyersking/sp500Dashboard-main | 0e2ee26207c51592ea925e8fbf9303f45b2e3c31 | [
"MIT"
] | null | null | null | import streamlit as st
import yfinance as yf
import pandas as pd
def app():
st.title('Finance Dashboard')
#Tickers
snp500 = pd.read_csv("Datasets/SP500.csv")
tickers = snp500['Symbol'].sort_values().tolist()
#Dropdown menu
dropdown = st.multiselect('Pick your assets',
tickers)
#Get dates
start = st.date_input('Start', value = pd.to_datetime('2021-01-01'))
end = st.date_input('End', value = pd.to_datetime('today'))
#Returns
def relativeret(df):
rel = df.pct_change()
cumret = (1+rel).cumprod() - 1
cumret = cumret.fillna(0)
return cumret
#Get data
if len(dropdown) > 0:
# df = yf.download(dropdown,start,end)['Adj Close']
df = relativeret(yf.download(dropdown,start,end)['Adj Close'])
st.header('Returns of {}'.format(dropdown))
st.line_chart(df) | 27.212121 | 72 | 0.605791 |
7947fb213795fccdf10fe28c061f252c70e52237 | 1,515 | py | Python | frontoxy/blocks/reader.py | fabienvauchelles/frontoxy | 320a4b5592c507ac88955d727408b58ff35902b8 | [
"MIT"
] | 8 | 2016-10-20T15:52:09.000Z | 2019-03-27T19:16:40.000Z | frontoxy/blocks/reader.py | fabienvauchelles/frontoxy | 320a4b5592c507ac88955d727408b58ff35902b8 | [
"MIT"
] | 1 | 2018-01-19T12:31:06.000Z | 2018-01-24T09:38:12.000Z | frontoxy/blocks/reader.py | fabienvauchelles/frontoxy | 320a4b5592c507ac88955d727408b58ff35902b8 | [
"MIT"
] | 4 | 2017-08-17T11:40:41.000Z | 2021-01-21T07:07:48.000Z | # -*- coding: utf-8 -*-
from scrapy import Request
from scrapy.http import HtmlResponse
import json
import os
import zipfile
class BlocksReader(object):
INFO_FORMAT = u'{0}.desc'
BODY_FORMAT = u'{0}.dat'
def read(self, source):
source_filename = os.path.basename(source)
with zipfile.ZipFile(source) as zf:
filenames = sorted(set([zipinfo.filename[:10] for zipinfo in zf.infolist()]))
for filename in filenames:
source_path = u'{0}/{1}'.format(source_filename, filename)
# Read info
desc = zf.read(self.INFO_FORMAT.format(filename))
info = json.loads(desc)
url = info['url'].encode('utf8')
info.pop('url', None)
headers = info['headers']
info.pop('headers', None)
status = info['status']
info.pop('status', None)
info_meta = info['meta']
info_meta['source_path'] = source_path
# Read content
content = zf.read(self.BODY_FORMAT.format(filename))
request = Request(
url=url,
meta=info_meta
)
response = HtmlResponse(
url=url,
headers=headers,
status=status,
body=content,
request=request,
)
yield response
| 27.053571 | 89 | 0.491749 |
7947fc13e0fd41c017b3d3c251557c2b34ccd944 | 139 | py | Python | exercicios-python/curso-python/ex011.py | PabloLanza/curso-python3 | 34cf44a2467fa239ba4019e085833002ad9b76a1 | [
"MIT"
] | null | null | null | exercicios-python/curso-python/ex011.py | PabloLanza/curso-python3 | 34cf44a2467fa239ba4019e085833002ad9b76a1 | [
"MIT"
] | null | null | null | exercicios-python/curso-python/ex011.py | PabloLanza/curso-python3 | 34cf44a2467fa239ba4019e085833002ad9b76a1 | [
"MIT"
] | null | null | null | sal = float(input('Qual o seu salário? '))
aum = sal * 0.15
novosal = sal + aum
print('O seu novo salário é de R${:.2f}.' .format(novosal)) | 34.75 | 59 | 0.640288 |
7947fcbdbe209ef363cc968339ee4c047a1a073e | 1,474 | py | Python | python_scripts/010_Concatenate_cap_catAGN.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | python_scripts/010_Concatenate_cap_catAGN.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | python_scripts/010_Concatenate_cap_catAGN.py | SoumyaShreeram/Locating_AGN_in_DM_halos | 1cfbee69b2c000faee4ecb199d65c3235afbed42 | [
"MIT"
] | null | null | null | """
010. Concatenates the cluster files with affected Lx due to AGN
Script written by: Soumya Shreeram
Project supervised by: Johan Comparat
Date: 1st July 2021
"""
# astropy modules
import astropy.units as u
import astropy.io.fits as fits
from astropy.table import Table, Column, join
from astropy.coordinates import SkyCoord
from astropy.cosmology import FlatLambdaCDM, z_at_value
import numpy as np
# system imports
import os
import sys
import importlib as ib
import glob
# plotting imports
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from scipy import interpolate
sys.path.append('../imported_files/')
import Exploring_DM_Halos as edh
import Agn_incidence_from_Major_Mergers as aimm
import Comparison_simulation_with_literature_data as cswl
import Scaling_relations as sr
import plotting_sr_agn_clu as pt
import All_sky as sky
# look back into redshifts until...
redshift_limit = 2
# fraction of close pair agns added to the cat_AGN_all
frac_cp_agn = 0.03
model_name = 'Model_A3'
using_cp_catAGN = False
hd_clu_params_all = sky.makeClusterFile(redshift_limit=redshift_limit,\
model_name=model_name, using_cp_catAGN=using_cp_catAGN)
if using_cp_catAGN:
fname = '../Data/pairs_z%.1f/CLU_with_scaled_Lx_all_sky_%s.fit'%(redshift_limit, model_name)
else:
fname = '../Data/pairs_z%.1f/CLU_with_scaled_Lx_all_sky_ModelNone.fit'%(redshift_limit)
hd_clu_params_all.write(fname, format='fits')
| 24.566667 | 96 | 0.807327 |
7947fd129400a285b41b3ab1ed3bf731fc382cae | 4,811 | py | Python | mymcadmin/cli/commands/start.py | durandj/mymcadmin | 6c9ebfa2a5dfcea1f5fb5c5cf1c0256b05b98172 | [
"MIT"
] | null | null | null | mymcadmin/cli/commands/start.py | durandj/mymcadmin | 6c9ebfa2a5dfcea1f5fb5c5cf1c0256b05b98172 | [
"MIT"
] | null | null | null | mymcadmin/cli/commands/start.py | durandj/mymcadmin | 6c9ebfa2a5dfcea1f5fb5c5cf1c0256b05b98172 | [
"MIT"
] | null | null | null | """
Start commands
"""
import multiprocessing
import grp
import os
import os.path
import pwd
import click
import daemon
import daemon.pidfile
from .. import params
from ..base import mymcadmin, cli_command, rpc_command, error, success
from ... import (
errors,
manager,
rpc,
utils,
)
@mymcadmin.command()
@click.argument('server_id')
@cli_command
@rpc_command
def start(rpc_conn, server_id):
"""
Start a Minecraft server
"""
click.echo('Starting {}...'.format(server_id), nl = False)
with rpc.RpcClient(*rpc_conn) as rpc_client:
rpc_client.server_start(server_id)
success('Success')
@mymcadmin.command()
@cli_command
@rpc_command
def start_all(rpc_conn):
"""
Start all Minecraft servers
"""
click.echo('Attempting to start all servers...')
with rpc.RpcClient(*rpc_conn) as rpc_client:
result = rpc_client.server_start_all()
successful = result['success']
failure = result['failure']
for server_id in successful:
success('{} successfully started'.format(server_id))
for server_id in failure:
error('{} did not start properly'.format(server_id))
@mymcadmin.command()
@click.option('--host', default = None, help = 'The host to listen on')
@click.option(
'--port',
type = click.INT,
default = None,
help = 'The port to listen on')
@click.option(
'--user',
type = params.User(),
default = None,
help = 'The user to run as')
@click.option(
'--group',
type = params.Group(),
default = None,
help = 'The group to run as')
@click.option(
'--root',
type = click.Path(file_okay = False),
default = None,
help = 'The location where instances are stored')
@click.option(
'--pid',
type = click.Path(dir_okay = False),
default = None,
help = 'The location of the PID file')
@click.option(
'--log',
type = click.Path(dir_okay = False),
default = None,
help = 'The log file to write to')
@cli_command
@click.pass_context
def start_daemon(ctx, **kwargs):
"""
Start management daemon
"""
daemon_config = ctx.obj['config'].daemon or {}
def _get_option(name, default, convert = None):
if kwargs[name] is not None:
return kwargs[name]
value = daemon_config.get(name, default)
if convert:
try:
return convert(value)
except Exception:
raise click.ClickException(
'Configuration value is not valid. {}: {}'.format(name, value)
)
return value
def _convert_user(user):
if isinstance(user, int):
pwd.getpwuid(user)
return user
else:
return pwd.getpwnam(user).pw_uid
def _convert_group(group):
if isinstance(group, int):
grp.getgrgid(group)
return group
else:
return grp.getgrnam(group).gr_gid
host = _get_option('host', 'localhost')
port = _get_option('port', 2323)
user = _get_option('user', os.getuid(), convert = _convert_user)
group = _get_option('group', os.getgid(), convert = _convert_group)
root = _get_option(
'root',
os.path.join(utils.get_user_home(user), 'mymcadmin'),
)
pid = _get_option('pid', os.path.join(root, 'daemon.pid'))
log = _get_option('log', os.path.join(root, 'mymcadmin.log'))
click.echo(
'Starting daemon as {} {} on {}:{}...'.format(
user,
group,
host,
port,
),
nl = False,
)
if os.path.exists(pid):
raise errors.ManagerError('Management daemon is already started')
proc = multiprocessing.Process(
target = start_management_daemon,
kwargs = {
'host': host,
'port': port,
'user': user,
'group': group,
'root': root,
'pid': pid,
'log': log,
},
)
proc.start()
proc.join()
success('Success')
def start_management_daemon(**kwargs):
"""
Start the management daemon
"""
daemon_log = open(kwargs['log'], 'a')
with daemon.DaemonContext(
detach_process = True,
gid = kwargs['group'],
pidfile = daemon.pidfile.PIDLockFile(kwargs['pid']),
stdout = daemon_log,
stderr = daemon_log,
uid = kwargs['user'],
working_directory = kwargs['root'],
):
utils.setup_logging()
proc = manager.Manager(
kwargs['host'],
kwargs['port'],
kwargs['root'],
)
proc.run()
daemon_log.close()
| 23.468293 | 82 | 0.563916 |
7947fed0eaa684eb6f80641ee94fbbc46ba016df | 7,314 | py | Python | graph/undirected_graph_vector.py | rburing/gcaops | 3866e11584d42354c65643c70cd2b6982866c129 | [
"MIT"
] | null | null | null | graph/undirected_graph_vector.py | rburing/gcaops | 3866e11584d42354c65643c70cd2b6982866c129 | [
"MIT"
] | null | null | null | graph/undirected_graph_vector.py | rburing/gcaops | 3866e11584d42354c65643c70cd2b6982866c129 | [
"MIT"
] | null | null | null | from itertools import product
from .graph_vector import GraphVector, GraphModule
from .graph_vector_dict import GraphVector_dict, GraphModule_dict
from .graph_vector_vector import GraphVector_vector, GraphModule_vector
from .undirected_graph_basis import UndirectedGraphBasis
class UndirectedGraphVector(GraphVector):
"""
Vector representing a linear combination of undirected graphs.
"""
pass
class UndirectedGraphModule(GraphModule):
"""
Module spanned by undirected graphs.
"""
pass
class UndirectedGraphVector_dict(UndirectedGraphVector, GraphVector_dict):
"""
Vector representing a linear combination of undirected graphs (stored as a dictionary).
"""
def __init__(self, parent, vector):
"""
Initialize this undirected graph vector.
INPUT:
- ``parent`` -- an UndirectedGraphModule
- ``vector`` -- a dictionary, representing a sparse vector of coefficients with respect to the basis of ``parent``
"""
if not isinstance(parent, UndirectedGraphModule_dict):
raise ValueError("parent must be a UndirectedGraphModule_dict")
super().__init__(parent, vector)
def nvertices(self):
"""
Return the number of vertices in each graph in this graph vector.
ASSUMPTIONS:
Assumes all graphs in this graph vector have the same number of vertices.
"""
for key in self._vector:
v, e = key[:2]
if not self._vector[key].is_zero():
return v
def nedges(self):
"""
Return the number of edges in each graph in this graph vector.
ASSUMPTIONS:
Assumes all graphs in this graph vector have the same number of edges.
"""
for key in self._vector:
v, e = key[:2]
if not self._vector[key].is_zero():
return e
def insertion(self, position, other, **kwargs):
"""
Return the insertion of ``other`` into this graph vector at the vertex ``position``.
"""
# TODO: cache when self and other are in normal form. when not, use symmetric group action + operad axioms to deduce result.
terms = []
for user_key in self._vector:
user_coeff = self._vector[user_key]
if user_coeff.is_zero():
continue
for victim_key in other._vector:
victim_coeff = other._vector[victim_key]
if victim_coeff.is_zero():
continue
product_coeff = user_coeff * victim_coeff
if product_coeff.is_zero():
continue
user, user_sign = self._parent._graph_basis.key_to_graph(user_key)
user_coeff *= user_sign
victim, victim_sign = other._parent._graph_basis.key_to_graph(victim_key)
victim_coeff *= victim_sign
for g in user._insertion_graphs(position, victim, **kwargs):
terms.append([product_coeff, g])
return self._parent(terms)
class UndirectedGraphModule_dict(UndirectedGraphModule, GraphModule_dict):
"""
Module spanned by undirected graphs (with elements stored as dictionaries).
"""
def __init__(self, base_ring, graph_basis):
"""
Initialize this undirected graph module.
INPUT:
- ``base_ring`` -- a ring, to be used as the ring of coefficients
- ``graph_basis`` -- an UndirectedGraphBasis
"""
if not isinstance(graph_basis, UndirectedGraphBasis):
raise ValueError('graph_basis must be an UndirectedGraphBasis')
super().__init__(base_ring, graph_basis)
self.element_class = UndirectedGraphVector_dict
class UndirectedGraphVector_vector(UndirectedGraphVector, GraphVector_vector):
"""
Vector representing a linear combination of undirected graphs (stored as a dictionary of vectors).
"""
def __init__(self, parent, vectors):
"""
Initialize this graph vector.
INPUT:
- ``parent`` -- an UndirectedGraphModule
- ``vectors`` -- a dictionary, mapping bi-gradings to sparse vectors of coefficients with respect to the basis of ``parent``
"""
if not isinstance(parent, UndirectedGraphModule_vector):
raise ValueError("parent must be a UndirectedGraphModule_vector")
super().__init__(parent, vectors)
def nvertices(self):
"""
Return the number of vertices in each graph in this graph vector.
ASSUMPTIONS:
Assumes all graphs in this graph vector have the same number of vertices.
"""
for bi_grading in self._vectors:
if not self._vectors[bi_grading].is_zero():
return bi_grading[0]
def nedges(self):
"""
Return the number of edges in each graph in this graph vector.
ASSUMPTIONS:
Assumes all graphs in this graph vector have the same number of edges.
"""
for bi_grading in self._vectors:
if not self._vectors[bi_grading].is_zero():
return bi_grading[1]
def insertion(self, position, other, **kwargs):
"""
Return the insertion of ``other`` into this graph vector at the vertex ``position``.
"""
terms = []
for (user_bigrading, user_vector) in self._vectors.items():
for (user_idx, user_coeff) in user_vector.items():
user_key = user_bigrading + (user_idx,)
user, user_sign = self._parent._graph_basis.key_to_graph(user_key)
user_coeff *= user_sign
for (victim_bigrading, victim_vector) in other._vectors.items():
for (victim_idx, victim_coeff) in victim_vector.items():
victim_key = victim_bigrading + (victim_idx,)
victim, victim_sign = other._parent._graph_basis.key_to_graph(victim_key)
victim_coeff *= victim_sign
product_coeff = user_coeff * victim_coeff
if product_coeff.is_zero():
continue
for g in user._insertion_graphs(position, victim, **kwargs):
terms.append([product_coeff, g])
return self._parent(terms)
class UndirectedGraphModule_vector(UndirectedGraphModule, GraphModule_vector):
"""
Module spanned by undirected graphs (with elements stored as dictionaries of vectors).
"""
def __init__(self, base_ring, graph_basis, vector_constructor, matrix_constructor):
"""
Initialize this undirected graph module.
INPUT:
- ``base_ring`` -- a ring, to be used as the ring of coefficients
- ``graph_basis`` -- an UndirectedGraphBasis
- ``vector_constructor`` -- constructor of (sparse) vectors
- ``matrix_constructor`` -- constructor of (sparse) matrices
"""
if not isinstance(graph_basis, UndirectedGraphBasis):
raise ValueError('graph_basis must be an UndirectedGraphBasis')
super().__init__(base_ring, graph_basis, vector_constructor, matrix_constructor)
self.element_class = UndirectedGraphVector_vector
| 37.896373 | 132 | 0.628521 |
7947ffbd8d5704dc68f6e3d1299c9380098fc835 | 1,302 | py | Python | pylsd/bindings/lsd_ctypes.py | AndranikSargsyan/pylsd-nova | 762a8c587a7b8bf142495d367880dbb33df121ba | [
"BSD-2-Clause"
] | 23 | 2020-08-13T01:37:54.000Z | 2022-03-31T09:39:50.000Z | pylsd/bindings/lsd_ctypes.py | AndranikSargsyan/pylsd-nova | 762a8c587a7b8bf142495d367880dbb33df121ba | [
"BSD-2-Clause"
] | 2 | 2020-08-15T15:24:26.000Z | 2021-07-20T23:05:51.000Z | pylsd/bindings/lsd_ctypes.py | AndranikSargsyan/pylsd-nova | 762a8c587a7b8bf142495d367880dbb33df121ba | [
"BSD-2-Clause"
] | 3 | 2020-09-01T17:17:45.000Z | 2022-03-09T09:58:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import ctypes
import os
import sys
def load_lsd_library():
root_dir = os.path.abspath(os.path.dirname(__file__))
libnames = ['linux/liblsd.so']
libdir = 'lib'
if sys.platform == 'win32':
if sys.maxsize > 2 ** 32:
libnames = ['win32/x64/lsd.dll', 'win32/x64/liblsd.dll']
else:
libnames = ['win32/x86/lsd.dll', 'win32/x86/liblsd.dll']
elif sys.platform == 'darwin':
libnames = ['darwin/liblsd.dylib']
while root_dir is not None:
for libname in libnames:
try:
lsdlib = ctypes.cdll[os.path.join(root_dir, libdir, libname)]
return lsdlib
except Exception as e:
pass
tmp = os.path.dirname(root_dir)
if tmp == root_dir:
root_dir = None
else:
root_dir = tmp
# if we didn't find the library so far, try loading without
# a full path as a last resort
for libname in libnames:
try:
lsdlib = ctypes.cdll[libname]
return lsdlib
except Exception as e:
pass
return None
lsdlib = load_lsd_library()
if lsdlib is None:
raise ImportError('Cannot load dynamic library. Did you compile LSD?')
| 25.038462 | 77 | 0.572197 |
7948001291cd96ce098a5bb558bae227822ceaa0 | 728 | py | Python | server_part/config.py | Hubert51/Empty-study-room-detector | 9cbd18a4bf5bc02b8aebac42c15161258015ed5c | [
"MIT"
] | 2 | 2017-04-30T00:46:59.000Z | 2019-04-20T03:39:31.000Z | server_part/config.py | Hubert51/Empty-study-room-detector | 9cbd18a4bf5bc02b8aebac42c15161258015ed5c | [
"MIT"
] | 2 | 2017-04-30T16:25:08.000Z | 2017-05-04T01:47:31.000Z | server_part/config.py | Hubert51/Empty-study-room-detector | 9cbd18a4bf5bc02b8aebac42c15161258015ed5c | [
"MIT"
] | 3 | 2017-02-16T00:22:02.000Z | 2019-04-14T00:03:13.000Z | #encoding:utf-8
import os
WTF_CSRF_ENABLED = True
SECRET_KEY = 'you-will-never-guess'
OPENID_PROVIDERS = [
{'name': 'Google', 'url': 'https://www.google.com/accounts/o8/id'},
{'name': 'Yahoo', 'url': 'https://me.yahoo.com'},
{'name': 'AOL', 'url': 'http://openid.aol.com/<username>'},
{'name': 'Flickr', 'url': 'http://www.flickr.com/<username>'},
{'name': 'MyOpenID', 'url': 'https://www.myopenid.com'}]
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI='mysql://root:gengruijie@localhost:3306/test_roomr' #这里登陆的是root用户,要填上自己的密码,MySQL的默认端口是3306,填上之前创建的数据库名text1
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_COMMIT_ON_TEARDOWN=True #设置这一项是每次请求结束后都会自动提交数据库中的变动
# db = SQLAlchemy(app)
| 30.333333 | 131 | 0.717033 |
7948008a047381e0a522f628007fc2bc95a7dea4 | 1,694 | py | Python | UVA-OJ/108 - Maximum Sum.py | MhmdRyhn/Programming-Sloution | be189cbf81b14ac7c10d387e259aa23992ba1016 | [
"MIT"
] | 1 | 2019-07-29T04:05:34.000Z | 2019-07-29T04:05:34.000Z | UVA-OJ/108 - Maximum Sum.py | MhmdRyhn/Programming-Sloution | be189cbf81b14ac7c10d387e259aa23992ba1016 | [
"MIT"
] | null | null | null | UVA-OJ/108 - Maximum Sum.py | MhmdRyhn/Programming-Sloution | be189cbf81b14ac7c10d387e259aa23992ba1016 | [
"MIT"
] | null | null | null | def max_sum_subarray_1D(arr, sz):
cur_max_sum = global_max_sum = arr[0]
for i in range(1, sz):
cur_max_sum = max(arr[i], cur_max_sum+arr[i])
if cur_max_sum > global_max_sum:
global_max_sum = cur_max_sum
return global_max_sum
def max_sum_subarray_2D(arr_2d, m, n):
for i in range(m):
for j in range(1, n):
arr_2d[i][j] += arr_2d[i][j-1]
cur_sum, max_sum = 0, 0
for l in range(n):
for r in range(l, n):
arr = []
for k in range(m):
if l != 0:
arr.append(arr_2d[k][r] - arr_2d[k][l-1])
else:
arr.append(arr_2d[k][r])
cur_sum = max_sum_subarray_1D(arr, m)
if cur_sum > max_sum:
max_sum = cur_sum
return max_sum
if __name__ == '__main__':
while True:
try:
n = int(input())
except EOFError:
break
arr = [[0 for i in range(n)] for j in range(n)]
i, j = 0, 0
while True:
a = None
try:
a = list(map(int, input().split()))
except EOFError:
break
sz = len(a)
for k in range(sz):
if j < n:
arr[i][j] = a[k]
j += 1
else:
j = 0
i += 1
arr[i][j] = a[k]
j += 1
if i > (n-1) and j > (n-1):
break
if i == (n - 1) and j > (n - 1):
break
ans = max_sum_subarray_2D(arr, n, n)
print(ans)
| 23.527778 | 61 | 0.40732 |
794800951ac21be2ddf4a72f53436150a681f5d0 | 6,768 | py | Python | sdk/python/pulumi_azure_native/network/v20200701/get_virtual_network_gateway_vpnclient_ipsec_parameters.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200701/get_virtual_network_gateway_vpnclient_ipsec_parameters.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20200701/get_virtual_network_gateway_vpnclient_ipsec_parameters.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetVirtualNetworkGatewayVpnclientIpsecParametersResult',
'AwaitableGetVirtualNetworkGatewayVpnclientIpsecParametersResult',
'get_virtual_network_gateway_vpnclient_ipsec_parameters',
]
@pulumi.output_type
class GetVirtualNetworkGatewayVpnclientIpsecParametersResult:
"""
An IPSec parameters for a virtual network gateway P2S connection.
"""
def __init__(__self__, dh_group=None, ike_encryption=None, ike_integrity=None, ipsec_encryption=None, ipsec_integrity=None, pfs_group=None, sa_data_size_kilobytes=None, sa_life_time_seconds=None):
if dh_group and not isinstance(dh_group, str):
raise TypeError("Expected argument 'dh_group' to be a str")
pulumi.set(__self__, "dh_group", dh_group)
if ike_encryption and not isinstance(ike_encryption, str):
raise TypeError("Expected argument 'ike_encryption' to be a str")
pulumi.set(__self__, "ike_encryption", ike_encryption)
if ike_integrity and not isinstance(ike_integrity, str):
raise TypeError("Expected argument 'ike_integrity' to be a str")
pulumi.set(__self__, "ike_integrity", ike_integrity)
if ipsec_encryption and not isinstance(ipsec_encryption, str):
raise TypeError("Expected argument 'ipsec_encryption' to be a str")
pulumi.set(__self__, "ipsec_encryption", ipsec_encryption)
if ipsec_integrity and not isinstance(ipsec_integrity, str):
raise TypeError("Expected argument 'ipsec_integrity' to be a str")
pulumi.set(__self__, "ipsec_integrity", ipsec_integrity)
if pfs_group and not isinstance(pfs_group, str):
raise TypeError("Expected argument 'pfs_group' to be a str")
pulumi.set(__self__, "pfs_group", pfs_group)
if sa_data_size_kilobytes and not isinstance(sa_data_size_kilobytes, int):
raise TypeError("Expected argument 'sa_data_size_kilobytes' to be a int")
pulumi.set(__self__, "sa_data_size_kilobytes", sa_data_size_kilobytes)
if sa_life_time_seconds and not isinstance(sa_life_time_seconds, int):
raise TypeError("Expected argument 'sa_life_time_seconds' to be a int")
pulumi.set(__self__, "sa_life_time_seconds", sa_life_time_seconds)
@property
@pulumi.getter(name="dhGroup")
def dh_group(self) -> str:
"""
The DH Group used in IKE Phase 1 for initial SA.
"""
return pulumi.get(self, "dh_group")
@property
@pulumi.getter(name="ikeEncryption")
def ike_encryption(self) -> str:
"""
The IKE encryption algorithm (IKE phase 2).
"""
return pulumi.get(self, "ike_encryption")
@property
@pulumi.getter(name="ikeIntegrity")
def ike_integrity(self) -> str:
"""
The IKE integrity algorithm (IKE phase 2).
"""
return pulumi.get(self, "ike_integrity")
@property
@pulumi.getter(name="ipsecEncryption")
def ipsec_encryption(self) -> str:
"""
The IPSec encryption algorithm (IKE phase 1).
"""
return pulumi.get(self, "ipsec_encryption")
@property
@pulumi.getter(name="ipsecIntegrity")
def ipsec_integrity(self) -> str:
"""
The IPSec integrity algorithm (IKE phase 1).
"""
return pulumi.get(self, "ipsec_integrity")
@property
@pulumi.getter(name="pfsGroup")
def pfs_group(self) -> str:
"""
The Pfs Group used in IKE Phase 2 for new child SA.
"""
return pulumi.get(self, "pfs_group")
@property
@pulumi.getter(name="saDataSizeKilobytes")
def sa_data_size_kilobytes(self) -> int:
"""
The IPSec Security Association (also called Quick Mode or Phase 2 SA) payload size in KB for P2S client..
"""
return pulumi.get(self, "sa_data_size_kilobytes")
@property
@pulumi.getter(name="saLifeTimeSeconds")
def sa_life_time_seconds(self) -> int:
"""
The IPSec Security Association (also called Quick Mode or Phase 2 SA) lifetime in seconds for P2S client.
"""
return pulumi.get(self, "sa_life_time_seconds")
class AwaitableGetVirtualNetworkGatewayVpnclientIpsecParametersResult(GetVirtualNetworkGatewayVpnclientIpsecParametersResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualNetworkGatewayVpnclientIpsecParametersResult(
dh_group=self.dh_group,
ike_encryption=self.ike_encryption,
ike_integrity=self.ike_integrity,
ipsec_encryption=self.ipsec_encryption,
ipsec_integrity=self.ipsec_integrity,
pfs_group=self.pfs_group,
sa_data_size_kilobytes=self.sa_data_size_kilobytes,
sa_life_time_seconds=self.sa_life_time_seconds)
def get_virtual_network_gateway_vpnclient_ipsec_parameters(resource_group_name: Optional[str] = None,
virtual_network_gateway_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualNetworkGatewayVpnclientIpsecParametersResult:
"""
An IPSec parameters for a virtual network gateway P2S connection.
:param str resource_group_name: The name of the resource group.
:param str virtual_network_gateway_name: The virtual network gateway name.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualNetworkGatewayName'] = virtual_network_gateway_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200701:getVirtualNetworkGatewayVpnclientIpsecParameters', __args__, opts=opts, typ=GetVirtualNetworkGatewayVpnclientIpsecParametersResult).value
return AwaitableGetVirtualNetworkGatewayVpnclientIpsecParametersResult(
dh_group=__ret__.dh_group,
ike_encryption=__ret__.ike_encryption,
ike_integrity=__ret__.ike_integrity,
ipsec_encryption=__ret__.ipsec_encryption,
ipsec_integrity=__ret__.ipsec_integrity,
pfs_group=__ret__.pfs_group,
sa_data_size_kilobytes=__ret__.sa_data_size_kilobytes,
sa_life_time_seconds=__ret__.sa_life_time_seconds)
| 43.10828 | 205 | 0.695035 |
794800983929868e97c60247f8df44c27ac6fc3d | 5,506 | py | Python | tests/test_image_to_ascii.py | emberati/ascii-art | 955360cea395f224d17dbb3f48ee49738a4f4014 | [
"MIT"
] | 25 | 2020-11-18T16:30:19.000Z | 2022-03-12T03:54:05.000Z | tests/test_image_to_ascii.py | emberati/ascii-art | 955360cea395f224d17dbb3f48ee49738a4f4014 | [
"MIT"
] | 3 | 2020-08-28T09:59:43.000Z | 2020-12-20T05:58:38.000Z | tests/test_image_to_ascii.py | emberati/ascii-art | 955360cea395f224d17dbb3f48ee49738a4f4014 | [
"MIT"
] | 9 | 2020-10-15T17:38:06.000Z | 2021-12-25T22:52:53.000Z | from pathlib import Path
from PIL import Image
from ascii_art import image_to_ascii
einstein = f"""??*??**???**?????????????;?;;;;?*#??**%#####@@###@@@@@@@@@##@#######SS****S###??*?.
?**??**??*?????????????;??;;,?*S#?;*%####@@@@@@@@@@@@@@@@@@@@@@######%#SS*S####*?*;.
***?***???????????????;;??;??*S%??*S%#@@@@@@@@@@@@@@@@@@@@@@@@@@######%##SS%####***.
********???????????;;;;;;;??***??**S##@@@@@@@@@@@@@@@@@@@@@@@@##@@#######%S%####*?S?:
********??????????;;,,;?????*??;?**S##@@@@@@@@@@@@@@@@@@@@@@@@@@#########%SS#####*???,.
******??*????????;;;;;?*****??;?**S##@@@@@@@@@@@@@@@@@@@@@@@@@##@#########%S%#####*?.,,:
****?**????????;;;;;?*%*??????;?*%%%#@@@@@@@@@@@@@@@@@@@@@@@@@@@###########%######**:. :,
***??**???????;;;;,;*S?????;?*,?**%S*##@@@@@@@@@@@@@@@@@@@@@@@@@@############@@###**? :,
*****?*????????;;;;?*;???*?;**,?*;:.,?*#@@@@@@@@@@@@#****##%##@@@#####@#######@#%*S*?,. ,,
******?*??????;;,,;*;,??**;?**:??.,;?S##@@@@@@@@@@@@###*??***S@@@################***?;?,..:.
#********??????;,;?*;,??*?;?S*.??*S#%##@@@@@@@@@@@@@@@@###@@@@@@######@#####@@@##***?;,;..:,.
********???????;;;??;;***?;*#*:?S########@@@@@@@@@@@@@@@@@@@@@@@#####@@#####@@@#S****?;,,. ..
?#*?*?***?????;;;;,;,;***;;?*?:*#@##S%####@@@@@@@@@@@@@@@@@@@@@@#############@###S**?*??;,
*S***?**????;;;;,,,::;*??;;?*;;S@#%****##**#@@@@@@@@@##%***S#@@@####@@@#####@@@####*??**??,
#******?????;;;,::..:;*?;;;?*:;%#*?;?**###?*@@@@@@@@###**S#**#@@@#S#@@@@#####@@@@##S******?.
#**?**?????;;,:......,*;;,;*?.?S*;?##??**#*;#@@@@@###****?**S*#@@#%##########@@@####*****??*,
*#**??????;;,.... ...;*,;;;S?:?**?*@,.,*#**;#@@@@###SS@#;,.?*%#@############@@@@##**%*?***?;?;
?***????;;,,:... ...,?*,;;;*;,?*%??*?,?%*%?*#@@@##@#%##%:..S*%###@###############%S********?,.;.
*#?????;;,,:... ..:;:??.,,,?,;?*????**#%#S;S@@@#####SS*******####@@############*SS********?*?..,
*#?;;??;;;,:......::.??.:::;:?*?*S***###*,;#@@###########%%#%##%#@@###########S***SS*S*?**??*; ..
**;;;,,;;;,:.. ,....,?;..::..?*?*SS##SS#*:?#######@@@############@@#####%####SS#*??*SSS**S*;??. .
**..:,,,,;,... :. .:?:,:... .?***#######;;S#######@@@@@##@##########S##%S###S***#*;??**S**S*;?: ..
*? .:,,.:... ....;::,..;...?########@*;*#####S###@@@@@@@@@@#@@####%##SSSSSSS**#%**;?**%S#***?. .
*? ..,:.... ..:;:.:...;...*#@####@@%?*######*S#@@@@@@@@@@@@@@@@##%SSSS*?*S**##S**??**###S***,.
*, ...... .,,,......,...*#@@#####*?##@@@@#S*S#@@@@@@@@@@@@@@@##SS*S**?**##@@##?S**%####*%%;,.
,. .....,:.. ...,,...*#@#####@??#@@@@@@#S%*#@@@@@@@@@@@@@@##***S*??*#@@@@#####?*#@##**#?,..
:. .... ........:..?#####@@%;;#@@@@@###@#S#@@@@@@@@@@@@@##***S****#@@@@##@#@**@@@#**#*;..
?,. ............,..?%%S#@@@?,,?#@@#####@@*#@#@@@@@@@@@@@##**SS****%##@#SS@@#%*@@@#**#*?:,
?;. .:...,,;;..?S*S#@@#%*;:?*S%S####%%@@@#@@@@@@@@@###**S*****#@##S###@#SS@@##*?S*;,.
*;.... .,;;??;,..***#@@#####**####@@@##@@@@##@@@@@@@@@##**S***?#@@##*#####*#@@##??S*;,.
?,..?S,.:. :. . ..:..**?######@@@@@@##@@##@@######@@@@@@@@@#**S****#@@*****###%#@##%?*S?:, .
?; ,,.:. . ,,,..*S?SS#####@#@@@#@@@@@@@#######@@@#@@@##**S****@##*?******###@S**S?:....
?; . .::. .?%?***###@@##@@@@@@#####@@####@@@@@@@#%**S#S?#@##*?;;S**%#*?#S*?,.. ..
, .?:,. . ..... ?*******%###@##################@@@@@@#S**##**?S#?:...?****,;*?: .....
. ..:. ...... ,**S#??;?********#***S*S########@@@@@#%**##**,.. .,,. .?, ........
. ....... :***##*;,,,????;???***?*?**####@@@@@@#%?##%*?, ...........
.........?S?#@@S?;?***S***S%S**;:,**###@##@@@#**##*?: ..........;
...........;S**@@#S;*SS##S######*,,*S##S#@##@@@#*##S;. . ...... .;?
............?#*#@@#:*###%S%%*##*??*##@#%@###@@@#*##? ..... :;??
.............:*S*@@#:S@##%%SS*##?***#@@##@##@@@@*##; . ... .. .,;???
................;#*#@#:*####S**S@*?**%@@@@####@@@#S#, . .. .,;????
.....................*#S@@,?###%S**%#??*S@##@@@@S#@@#**. . . .;;???;;
.:,;,,,:................:#*@@?;#@#SS**#*?*S###@@@@#S@@#?, .,??????;;
.,;?;;;;,:.................?*#@?.*##*#**#?*###@@@@@@##@#? . .,;???????;
.;???;;;;;;,,..........:::....?*##.;##SS*#*?###@@@@@@#S@#? . .,;???;????;
.;??;;;;;???;;,:........:::,:...:;*#*.*#S%#*?#@@@@@@@@@##%, . .;?????;??;;:
.;????;????????;,,......:::,,:,:...:,*#;,*S#S?#@@@@@@@@@#*? .. .;????????;;:.
,;??????????????;;;:....:::,,,,::.....:*#,:???#@@@@@@@@@#?. ...... :;;???????;,,..
??????????????????;,::::::,,,,,:::.....:**??*#@@@@@@@@#*: .. ......... ,;????;????;,...
??????????????????;:.::::,,,,,:::::.....,*###@@#@@@@@*, . .. ......... :;????;;???;;. .,
""" # noqa: F541,W291
def test_image_to_ascii():
image = Image.open("examples/images/einstein.jpg")
params = {
"width": 100,
"height": 50,
"font": str(Path(__file__).parent / "Menlo.ttc"),
"normalize": True,
"invert": True,
}
ascii = image_to_ascii(image, **params)
assert ascii == einstein
| 77.549296 | 115 | 0.081184 |
794800c65532dcfea40ee497a3fbd17d8a818d2d | 33,498 | py | Python | sdk/lusid/models/resource_list_of_get_index_convention_response.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_get_index_convention_response.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_get_index_convention_response.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | 1 | 2020-10-29T08:35:32.000Z | 2020-10-29T08:35:32.000Z | # coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | # noqa: E501
The version of the OpenAPI document: 0.11.2220
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfGetIndexConventionResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[GetIndexConventionResponse]',
'href': 'str',
'links': 'list[Link]'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional'
}
def __init__(self, values=None, href=None, links=None): # noqa: E501
"""
ResourceListOfGetIndexConventionResponse - a model defined in OpenAPI
:param values: (required)
:type values: list[lusid.GetIndexConventionResponse]
:param href:
:type href: str
:param links:
:type links: list[lusid.Link]
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
@property
def values(self):
"""Gets the values of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:return: The values of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:rtype: list[GetIndexConventionResponse]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfGetIndexConventionResponse.
:param values: The values of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:type: list[GetIndexConventionResponse]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:return: The href of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfGetIndexConventionResponse.
:param href: The href of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:return: The links of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfGetIndexConventionResponse.
:param links: The links of this ResourceListOfGetIndexConventionResponse. # noqa: E501
:type: list[Link]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfGetIndexConventionResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 184.054945 | 28,439 | 0.685832 |
794801319b582d2eaef8d7df978ce1b0d46a51f9 | 79 | py | Python | nbsphinx_link/_version.py | madsmpedersen/nbsphinx-link | 9a55a92a6956c716cf341530cd3de1f9f389bf18 | [
"BSD-3-Clause"
] | null | null | null | nbsphinx_link/_version.py | madsmpedersen/nbsphinx-link | 9a55a92a6956c716cf341530cd3de1f9f389bf18 | [
"BSD-3-Clause"
] | null | null | null | nbsphinx_link/_version.py | madsmpedersen/nbsphinx-link | 9a55a92a6956c716cf341530cd3de1f9f389bf18 | [
"BSD-3-Clause"
] | null | null | null | version_info = (1, 1, 2, 'dev')
__version__ = ".".join(map(str, version_info))
| 26.333333 | 46 | 0.64557 |
7948016ccca9a31000f6563153725864c13c1188 | 214 | py | Python | algo_trading/signal_detector/urls.py | qz-fordham/algo-trading-microservice | 8778daeb90250f7c5c0e772c24d4912326850a37 | [
"MIT"
] | 1 | 2022-02-12T08:10:27.000Z | 2022-02-12T08:10:27.000Z | algo_trading/signal_detector/urls.py | qz-fordham/algo-trading-microservice | 8778daeb90250f7c5c0e772c24d4912326850a37 | [
"MIT"
] | null | null | null | algo_trading/signal_detector/urls.py | qz-fordham/algo-trading-microservice | 8778daeb90250f7c5c0e772c24d4912326850a37 | [
"MIT"
] | 1 | 2022-02-11T03:43:41.000Z | 2022-02-11T03:43:41.000Z | from django.urls import path
from . import views
# Routing
urlpatterns = [
# path('<str:ticker>/<int:span>/', views.sma_view, name='sma'),
path('', views.SignalDetectorView.as_view(), name='detector'),
]
| 21.4 | 67 | 0.668224 |
794801bd56fd3b515dcd5bf5498a7876f87f9fd3 | 26,203 | py | Python | venv/Lib/site-packages/sqlalchemy/sql/crud.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 1 | 2021-03-26T10:07:00.000Z | 2021-03-26T10:07:00.000Z | venv/Lib/site-packages/sqlalchemy/sql/crud.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 1 | 2021-09-28T04:53:41.000Z | 2021-09-28T04:53:41.000Z | venv/Lib/site-packages/sqlalchemy/sql/crud.py | YunJaePark3908/BaseAPIServer | 17ab922917541406a3c2d75b428614ce97152a16 | [
"Apache-2.0"
] | 3 | 2021-11-30T11:10:26.000Z | 2021-12-08T05:59:31.000Z | # sql/crud.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Functions used by compiler.py to determine the parameters rendered
within INSERT and UPDATE statements.
"""
import operator
from . import dml
from . import elements
from .. import exc
from .. import util
REQUIRED = util.symbol(
"REQUIRED",
"""
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`_engine.Connection.execute`.
This symbol is typically used when a :func:`_expression.insert`
or :func:`_expression.update` statement is compiled without parameter
values present.
""",
)
ISINSERT = util.symbol("ISINSERT")
ISUPDATE = util.symbol("ISUPDATE")
ISDELETE = util.symbol("ISDELETE")
def _setup_crud_params(compiler, stmt, local_stmt_type, **kw):
restore_isinsert = compiler.isinsert
restore_isupdate = compiler.isupdate
restore_isdelete = compiler.isdelete
should_restore = (
(restore_isinsert or restore_isupdate or restore_isdelete)
or len(compiler.stack) > 1
or "visiting_cte" in kw
)
if local_stmt_type is ISINSERT:
compiler.isupdate = False
compiler.isinsert = True
elif local_stmt_type is ISUPDATE:
compiler.isupdate = True
compiler.isinsert = False
elif local_stmt_type is ISDELETE:
if not should_restore:
compiler.isdelete = True
else:
assert False, "ISINSERT, ISUPDATE, or ISDELETE expected"
try:
if local_stmt_type in (ISINSERT, ISUPDATE):
return _get_crud_params(compiler, stmt, **kw)
finally:
if should_restore:
compiler.isinsert = restore_isinsert
compiler.isupdate = restore_isupdate
compiler.isdelete = restore_isdelete
def _get_crud_params(compiler, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
compiler.postfetch = []
compiler.insert_prefetch = []
compiler.update_prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [
(c, _create_bind_param(compiler, c, None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
(
_column_as_key,
_getattr_col_key,
_col_bind_name,
) = _key_getters_for_crud_column(compiler, stmt)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict(
(_column_as_key(key), REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or key not in stmt_parameters
)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
_get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw
)
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if compiler.isupdate and stmt._extra_froms and stmt_parameters:
_get_multitable_params(
compiler,
stmt,
stmt_parameters,
check_columns,
_col_bind_name,
_getattr_col_key,
values,
kw,
)
if compiler.isinsert and stmt.select_names:
_scan_insert_from_select_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
)
else:
_scan_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
)
if parameters and stmt_parameters:
check = (
set(parameters)
.intersection(_column_as_key(k) for k in stmt_parameters)
.difference(check_columns)
)
if check:
raise exc.CompileError(
"Unconsumed column names: %s"
% (", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values = _extend_values_for_multiparams(compiler, stmt, values, kw)
return values
def _create_bind_param(
compiler, col, value, process=True, required=False, name=None, **kw
):
if name is None:
name = col.key
bindparam = elements.BindParameter(
name, value, type_=col.type, required=required
)
bindparam._is_crud = True
if process:
bindparam = bindparam._compiler_dispatch(compiler, **kw)
return bindparam
def _key_getters_for_crud_column(compiler, stmt):
if compiler.isupdate and stmt._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(stmt._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, "table") and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _scan_insert_from_select_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
):
(
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
) = _get_returning_modifiers(compiler, stmt)
cols = [stmt.table.c[_column_as_key(name)] for name in stmt.select_names]
compiler._insert_from_select = stmt.select
add_select_cols = []
if stmt.include_insert_from_select_defaults:
col_set = set(cols)
for col in stmt.table.columns:
if col not in col_set and col.default:
cols.append(col)
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
parameters.pop(col_key)
values.append((c, None))
else:
_append_param_insert_select_hasdefault(
compiler, stmt, c, add_select_cols, kw
)
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
compiler._insert_from_select._raw_columns = tuple(
compiler._insert_from_select._raw_columns
) + tuple(expr for col, expr in add_select_cols)
def _scan_cols(
compiler,
stmt,
parameters,
_getattr_col_key,
_column_as_key,
_col_bind_name,
check_columns,
values,
kw,
):
(
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
) = _get_returning_modifiers(compiler, stmt)
if stmt._parameter_ordering:
parameter_ordering = [
_column_as_key(key) for key in stmt._parameter_ordering
]
ordered_keys = set(parameter_ordering)
cols = [stmt.table.c[key] for key in parameter_ordering] + [
c for c in stmt.table.c if c.key not in ordered_keys
]
else:
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
compiler,
stmt,
c,
col_key,
parameters,
_col_bind_name,
implicit_returning,
implicit_return_defaults,
values,
kw,
)
elif compiler.isinsert:
if (
c.primary_key
and need_pks
and (
implicit_returning
or not postfetch_lastrowid
or c is not stmt.table._autoincrement_column
)
):
if implicit_returning:
_append_param_insert_pk_returning(
compiler, stmt, c, values, kw
)
else:
_append_param_insert_pk(compiler, stmt, c, values, kw)
elif c.default is not None:
_append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw
)
elif c.server_default is not None:
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif (
c.primary_key
and c is not stmt.table._autoincrement_column
and not c.nullable
):
_warn_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw
)
def _append_param_parameter(
compiler,
stmt,
c,
col_key,
parameters,
_col_bind_name,
implicit_returning,
implicit_return_defaults,
values,
kw,
):
value = parameters.pop(col_key)
if elements._is_literal(value):
value = _create_bind_param(
compiler,
c,
value,
required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_m0" % _col_bind_name(c),
**kw
)
else:
if isinstance(value, elements.BindParameter) and value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
elif implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
else:
# postfetch specifically means, "we can SELECT the row we just
# inserted by primary key to get back the server generated
# defaults". so by definition this can't be used to get the primary
# key value back, because we need to have it ahead of time.
if not c.primary_key:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
"""Create a primary key expression in the INSERT statement and
possibly a RETURNING clause for it.
If the column has a Python-side default, we will create a bound
parameter for it and "pre-execute" the Python function. If
the column has a SQL expression default, or is a sequence,
we will add it directly into the INSERT statement and add a
RETURNING element to get the new value. If the column has a
server side default or is marked as the "autoincrement" column,
we will add a RETRUNING element to get at the value.
If all the above tests fail, that indicates a primary key column with no
noted default generation capabilities that has no parameter passed;
raise an exception.
"""
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional
or not compiler.dialect.sequences_optional
):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
compiler.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, compiler.process(c.default.arg.self_group(), **kw))
)
compiler.returning.append(c)
else:
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
elif not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _create_insert_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.insert_prefetch.append(c)
return param
def _create_update_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.update_prefetch.append(c)
return param
class _multiparam_column(elements.ColumnElement):
_is_multiparam_column = True
def __init__(self, original, index):
self.index = index
self.key = "%s_m%d" % (original.key, index + 1)
self.original = original
self.default = original.default
self.type = original.type
def __eq__(self, other):
return (
isinstance(other, _multiparam_column)
and other.key == self.key
and other.original == self.original
)
def _process_multiparam_default_bind(compiler, stmt, c, index, kw):
if not c.default:
raise exc.CompileError(
"INSERT value for column %s is explicitly rendered as a bound"
"parameter in the VALUES clause; "
"a Python-side value or SQL expression is required" % c
)
elif c.default.is_clause_element:
return compiler.process(c.default.arg.self_group(), **kw)
else:
col = _multiparam_column(c, index)
if isinstance(stmt, dml.Insert):
return _create_insert_prefetch_bind_param(compiler, col)
else:
return _create_update_prefetch_bind_param(compiler, col)
def _append_param_insert_pk(compiler, stmt, c, values, kw):
"""Create a bound parameter in the INSERT statement to receive a
'prefetched' default value.
The 'prefetched' value indicates that we are to invoke a Python-side
default function or expliclt SQL expression before the INSERT statement
proceeds, so that we have a primary key value available.
if the column has no noted default generation capabilities, it has
no value passed in either; raise an exception.
"""
if (
# column has a Python-side default
c.default is not None
and (
# and it won't be a Sequence
not c.default.is_sequence
or compiler.dialect.supports_sequences
)
) or (
# column is the "autoincrement column"
c is stmt.table._autoincrement_column
and (
# and it's either a "sequence" or a
# pre-executable "autoincrement" sequence
compiler.dialect.supports_sequences
or compiler.dialect.preexecute_autoincrement_sequences
)
):
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
elif c.default is None and c.server_default is None and not c.nullable:
# no .default, no .server_default, not autoincrement, we have
# no indication this primary key column will have any value
_warn_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw
):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional or not compiler.dialect.sequences_optional
):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif c.default.is_clause_element:
proc = compiler.process(c.default.arg.self_group(), **kw)
values.append((c, proc))
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
compiler.postfetch.append(c)
else:
values.append((c, _create_insert_prefetch_bind_param(compiler, c)))
def _append_param_insert_select_hasdefault(compiler, stmt, c, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and (
not c.default.optional or not compiler.dialect.sequences_optional
):
proc = c.default
values.append((c, proc.next_value()))
elif c.default.is_clause_element:
proc = c.default.arg.self_group()
values.append((c, proc))
else:
values.append(
(c, _create_insert_prefetch_bind_param(compiler, c, process=False))
)
def _append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw
):
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
else:
values.append((c, _create_update_prefetch_bind_param(compiler, c)))
elif c.server_onupdate is not None:
if implicit_return_defaults and c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
elif (
implicit_return_defaults
and stmt._return_defaults is not True
and c in implicit_return_defaults
):
compiler.returning.append(c)
def _get_multitable_params(
compiler,
stmt,
stmt_parameters,
check_columns,
_col_bind_name,
_getattr_col_key,
values,
kw,
):
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = _create_bind_param(
compiler,
c,
value,
required=value is REQUIRED,
name=_col_bind_name(c),
)
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually to be updated - process onupdate
# and server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(
c,
compiler.process(
c.onupdate.arg.self_group(), **kw
),
)
)
compiler.postfetch.append(c)
else:
values.append(
(
c,
_create_update_prefetch_bind_param(
compiler, c, name=_col_bind_name(c)
),
)
)
elif c.server_onupdate is not None:
compiler.postfetch.append(c)
def _extend_values_for_multiparams(compiler, stmt, values, kw):
values_0 = values
values = [values]
for i, row in enumerate(stmt.parameters[1:]):
extension = []
for (col, param) in values_0:
if col in row or col.key in row:
key = col if col in row else col.key
if elements._is_literal(row[key]):
new_param = _create_bind_param(
compiler,
col,
row[key],
name="%s_m%d" % (col.key, i + 1),
**kw
)
else:
new_param = compiler.process(row[key].self_group(), **kw)
else:
new_param = _process_multiparam_default_bind(
compiler, stmt, col, i, kw
)
extension.append((col, new_param))
values.append(extension)
return values
def _get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw
):
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = compiler.process(
elements.BindParameter(None, v, type_=k.type), **kw
)
else:
if v._is_bind_parameter and v.type._isnull:
# either unique parameter, or other bound parameters that
# were passed in directly
# set type to that of the column unconditionally
v = v._with_binary_element_type(k.type)
v = compiler.process(v.self_group(), **kw)
values.append((k, v))
def _get_returning_modifiers(compiler, stmt):
need_pks = (
compiler.isinsert
and not compiler.inline
and not stmt._returning
and not stmt._has_multi_parameters
)
implicit_returning = (
need_pks
and compiler.dialect.implicit_returning
and stmt.table.implicit_returning
)
if compiler.isinsert:
implicit_return_defaults = implicit_returning and stmt._return_defaults
elif compiler.isupdate:
implicit_return_defaults = (
compiler.dialect.implicit_returning
and stmt.table.implicit_returning
and stmt._return_defaults
)
else:
# this line is unused, currently we are always
# isinsert or isupdate
implicit_return_defaults = False # pragma: no cover
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid
return (
need_pks,
implicit_returning,
implicit_return_defaults,
postfetch_lastrowid,
)
def _warn_pk_with_no_anticipated_value(c):
msg = (
"Column '%s.%s' is marked as a member of the "
"primary key for table '%s', "
"but has no Python-side or server-side default generator indicated, "
"nor does it indicate 'autoincrement=True' or 'nullable=True', "
"and no explicit value is passed. "
"Primary key columns typically may not store NULL."
% (c.table.fullname, c.name, c.table.fullname)
)
if len(c.table.primary_key) > 1:
msg += (
" Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
"indicated explicitly for composite (e.g. multicolumn) primary "
"keys if AUTO_INCREMENT/SERIAL/IDENTITY "
"behavior is expected for one of the columns in the primary key. "
"CREATE TABLE statements are impacted by this change as well on "
"most backends."
)
util.warn(msg)
| 32.429455 | 79 | 0.602984 |
794802ec9a91fdcbe89ef923fa50ee9bd8bb5209 | 2,393 | py | Python | dzTraficoBackend/dzTrafico/BusinessEntities/Flow.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | dzTraficoBackend/dzTrafico/BusinessEntities/Flow.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | dzTraficoBackend/dzTrafico/BusinessEntities/Flow.py | DZAymen/dz-Trafico | 74ff9caf9e3845d8af977c46b04a2d3421a0661b | [
"MIT"
] | null | null | null | from rest_framework import serializers
from Location import Location, LocationSerializer
class Flow(object):
end_depart_time = 10000
# via_edges = "26322664#2"
via_edges = ""
def __init__(self, start, end, depart_time, flow_value):
self.start_edge = start
self.end_edge = end
self.depart_time = depart_time
self.vehicles_per_hour = flow_value
class InFlowPoint(object):
id = 0
def __init__(self, lon, lat, departTime, flow, order):
self.id = InFlowPoint.id
InFlowPoint.id += 1
self.lon = lon
self.lat = lat
self.position = Location(lon, lat)
self.departTime = departTime
self.flow = flow
self.left_flow = flow
self.order = order
def get_left_flow(self, percentage):
flow = percentage * self.left_flow / 100
self.left_flow -= flow
return flow
def reset_flow_value(self):
self.left_flow = self.flow
class OutSerializer(serializers.Serializer):
outIndex = serializers.IntegerField()
class InFlowPointSerializer(serializers.Serializer):
id = serializers.CharField(required=False)
position = LocationSerializer()
departTime = serializers.FloatField()
flow = serializers.FloatField()
order = serializers.IntegerField()
def create(self, validated_data):
return InFlowPoint(
validated_data["position"]["lng"],
validated_data["position"]["lat"],
validated_data["departTime"],
validated_data["flow"],
validated_data["order"]
)
class OutFlowPoint(object):
id = 0
def __init__(self, lon, lat, percentage, order):
self.id = OutFlowPoint.id
OutFlowPoint.id += 1
self.lon = lon
self.lat = lat
self.position = Location(lon, lat)
self.percentage = percentage
self.order = order
class OutFlowPointSerializer(serializers.Serializer):
id = serializers.IntegerField(required=False)
position = LocationSerializer()
percentage = serializers.FloatField(required=False)
order = serializers.IntegerField()
def create(self, validated_data):
return OutFlowPoint(
validated_data["position"]["lng"],
validated_data["position"]["lat"],
validated_data["percentage"],
validated_data["order"]
) | 29.9125 | 60 | 0.642708 |
794802fe975600038aaffaee86a5b27b383e674e | 6,806 | py | Python | test/models/test_loaders.py | GHzytp/atomai | 30eab2e5b9cb508247341b1dea8215123b4bf995 | [
"MIT"
] | 69 | 2020-09-04T06:45:13.000Z | 2022-03-28T12:55:20.000Z | test/models/test_loaders.py | GHzytp/atomai | 30eab2e5b9cb508247341b1dea8215123b4bf995 | [
"MIT"
] | 24 | 2020-12-08T23:15:19.000Z | 2022-01-20T19:20:20.000Z | test/models/test_loaders.py | GHzytp/atomai | 30eab2e5b9cb508247341b1dea8215123b4bf995 | [
"MIT"
] | 13 | 2020-09-10T19:45:42.000Z | 2022-03-15T03:49:28.000Z | import sys
import numpy as np
import pytest
from numpy.testing import assert_, assert_array_equal, assert_equal
sys.path.append("../../../")
from atomai.models import (VAE, ImSpec, Segmentor, jrVAE, jVAE, load_ensemble,
load_model, load_pretrained_model, rVAE)
from atomai.trainers import EnsembleTrainer
def gen_image_data():
"""
Dummy images with random pixels
"""
X = np.random.random(size=(5, 1, 8, 8))
X_ = np.random.random(size=(5, 1, 8, 8))
return X, X_
def gen_image_labels():
"""
Dummy labels for dummy images
"""
y = np.random.randint(0, 3, size=(5, 8, 8))
y_ = np.random.randint(0, 3, size=(5, 8, 8))
return y, y_
def gen_spectra():
"""
Dummy 1D signal with random points
"""
X = np.random.random(size=(5, 1, 16))
X_ = np.random.random(size=(5, 1, 16))
return X, X_
def compare_optimizers(opt1, opt2):
for group_param1, group_param2 in zip(opt1.param_groups, opt2.param_groups):
for param1, param2 in zip(group_param1["params"], group_param1["params"]):
for p1, p2 in zip(param1, param2):
assert_array_equal(p1.detach().cpu().numpy(), p2.detach().cpu().numpy())
@pytest.mark.parametrize("model", ["Unet", "dilnet", "SegResNet", "ResHedNet"])
def test_io_segmentor(model):
X, X_test = gen_image_data()
y, y_test = gen_image_labels()
segmodel = Segmentor(model, nb_classes=3)
segmodel.fit(X, y, X_test, y_test, training_cycles=4, batch_size=2)
loaded_model = load_model("model_metadict_final.tar")
for p1, p2 in zip(loaded_model.net.parameters(), segmodel.net.parameters()):
assert_array_equal(p1.detach().cpu().numpy(), p2.detach().cpu().numpy())
@pytest.mark.parametrize("model", ["Unet", "dilnet", "SegResNet", "ResHedNet"])
def test_saved_optimizer_segmentor(model):
X, X_test = gen_image_data()
y, y_test = gen_image_labels()
segmodel = Segmentor(model, nb_classes=3)
segmodel.fit(X, y, X_test, y_test, training_cycles=4, batch_size=2,
filename="segmodel")
opt1 = segmodel.optimizer
loaded_model = load_model("segmodel_metadict_final.tar")
opt2 = loaded_model.optimizer
compare_optimizers(opt1, opt2)
def test_io_imspec():
X, X_test = gen_image_data()
y, y_test = gen_spectra()
i2s_model = ImSpec((8, 8), (16,))
i2s_model.fit(X, y, X_test, y_test, training_cycles=4, batch_size=2)
loaded_model = load_model("model_metadict_final.tar")
for p1, p2 in zip(loaded_model.net.parameters(), i2s_model.net.parameters()):
assert_array_equal(p1.detach().cpu().numpy(), p2.detach().cpu().numpy())
def test_saved_optimizer_imspec():
X, X_test = gen_image_data()
y, y_test = gen_spectra()
i2s_model = ImSpec((8, 8), (16,))
i2s_model.fit(X, y, X_test, y_test, training_cycles=4, batch_size=2)
opt1 = i2s_model.optimizer
loaded_model = load_model("model_metadict_final.tar")
opt2 = loaded_model.optimizer
compare_optimizers(opt1, opt2)
@pytest.mark.parametrize("model", [VAE, rVAE, jVAE, jrVAE])
def test_io_VAE(model):
X, _ = gen_image_data()
X = X[:, 0, ...]
vae_model = model((8, 8))
vae_model.fit(X, training_cycles=4, batch_size=2, filename="vae_metadict")
loaded_model = load_model("vae_metadict.tar")
for p1, p2 in zip(loaded_model.encoder_net.parameters(),
vae_model.encoder_net.parameters()):
assert_array_equal(p1.detach().cpu().numpy(), p2.detach().cpu().numpy())
for p1, p2 in zip(loaded_model.decoder_net.parameters(),
vae_model.decoder_net.parameters()):
assert_array_equal(p1.detach().cpu().numpy(), p2.detach().cpu().numpy())
@pytest.mark.parametrize("model", [VAE, rVAE, jVAE, jrVAE])
def test_saved_optimizer_VAE(model):
X, _ = gen_image_data()
X = X[:, 0, ...]
vae_model = model((8, 8))
vae_model.fit(X, training_cycles=4, batch_size=2, filename="vae_metadict")
opt1 = vae_model.optim
loaded_model = load_model("vae_metadict.tar")
opt2 = loaded_model.optim
compare_optimizers(opt1, opt2)
@pytest.mark.parametrize("model", [jVAE, jrVAE])
def test_saved_iter_jVAE(model):
X, _ = gen_image_data()
X = X[:, 0, ...]
vae_model = model((8, 8))
vae_model.fit(X, training_cycles=4, batch_size=2, filename="jvae_metadict")
num_iter = vae_model.kdict_["num_iter"]
loaded_model = load_model("jvae_metadict.tar")
assert_equal(num_iter, loaded_model.kdict_["num_iter"])
@pytest.mark.parametrize("model", [VAE, rVAE, jVAE, jrVAE])
def test_resume_training(model):
X, _ = gen_image_data()
X = X[:, 0, ...]
vae_model = model((8, 8))
vae_model.fit(X, training_cycles=4, batch_size=2, filename="vae_metadict")
loss0 = abs(vae_model.loss_history["train_loss"][0])
loaded_model = load_model("vae_metadict.tar")
loaded_model.fit(X, training_cycles=4, batch_size=2, filename="vae_metadict")
loss1 = abs(loaded_model.loss_history["train_loss"][0])
assert_(not np.isnan(loss1))
assert_(loss1 < loss0)
@pytest.mark.parametrize("model", ["Unet", "dilnet", "SegResNet", "ResHedNet"])
def test_io_ensemble_seg(model):
X, X_test = gen_image_data()
y, y_test = gen_image_labels()
etrainer = EnsembleTrainer(model, nb_classes=3)
etrainer.compile_ensemble_trainer(training_cycles=4, batch_size=2)
smodel, ensemble = etrainer.train_ensemble_from_scratch(
X, y, X_test, y_test, n_models=3)
smodel_, ensemble_ = load_ensemble("model_ensemble_metadict.tar")
for i in ensemble.keys():
m1 = ensemble[i]
m2 = ensemble_[i]
for p1, p2 in zip(m1.values(), m2.values()):
assert_array_equal(
p1.detach().cpu().numpy(),
p2.detach().cpu().numpy())
def test_io_ensemble_imspec():
X, X_test = gen_image_data()
y, y_test = gen_spectra()
etrainer = EnsembleTrainer(
"imspec", in_dim=(8, 8), out_dim=(16,), latent_dim=2)
etrainer.compile_ensemble_trainer(training_cycles=4, batch_size=2)
smodel, ensemble = etrainer.train_ensemble_from_scratch(
X, y, X_test, y_test, n_models=3)
smodel_, ensemble_ = load_ensemble("model_ensemble_metadict.tar")
for i in ensemble.keys():
m1 = ensemble[i]
m2 = ensemble_[i]
for p1, p2 in zip(m1.values(), m2.values()):
assert_array_equal(
p1.detach().cpu().numpy(),
p2.detach().cpu().numpy())
@pytest.mark.parametrize("model_name", ["G_MD", "BFO"])
def test_load_pretrained(model_name):
model = load_pretrained_model(model_name)
assert_(hasattr(model, "fit"))
assert_(hasattr(model, "predict"))
assert_(hasattr(model, "net"))
assert_(hasattr(model.net, "state_dict"))
| 36.395722 | 88 | 0.662651 |
79480372cae4a1aaac1a6aca825ed02d66c097a9 | 303 | py | Python | src/test_pset.py | CTimmerman/PyPico8 | a68c83ae5a9dc53221ab39d6e55bb68bb5a1e479 | [
"MIT"
] | null | null | null | src/test_pset.py | CTimmerman/PyPico8 | a68c83ae5a9dc53221ab39d6e55bb68bb5a1e479 | [
"MIT"
] | null | null | null | src/test_pset.py | CTimmerman/PyPico8 | a68c83ae5a9dc53221ab39d6e55bb68bb5a1e479 | [
"MIT"
] | null | null | null | from pypico8 import *
def _init():
fillp(1)
for y in range(129):
for x in range(129):
pset(x, y, 1 + 2 * 16)
rectfill(0, 0, 10, 10, 3 + 4 * 16)
circfill(64, 64, 10, 5 + 6 * 16)
ovalfill(80, 80, 90, 90, 7 + 8 * 16)
line(126, 0, 0, 126, 0 + 9*16)
run(_init) | 20.2 | 40 | 0.491749 |
794803b67170e3277ad5209c3c585223aa6a7aa5 | 399,183 | py | Python | modules/s3db/dvr.py | Mkgdukoo/aidiq | 840b97651d79352878d5a777067a915985617378 | [
"MIT"
] | 1 | 2018-06-06T12:11:25.000Z | 2018-06-06T12:11:25.000Z | modules/s3db/dvr.py | Mkgdukoo/aidiq | 840b97651d79352878d5a777067a915985617378 | [
"MIT"
] | null | null | null | modules/s3db/dvr.py | Mkgdukoo/aidiq | 840b97651d79352878d5a777067a915985617378 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
""" Sahana Eden Disaster Victim Registration Model
@copyright: 2012-2021 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("DVRCaseModel",
"DVRCaseFlagModel",
"DVRCaseActivityModel",
"DVRCaseAllowanceModel",
"DVRCaseAppointmentModel",
"DVRHouseholdModel",
"DVRHouseholdMembersModel",
"DVRCaseEconomyInformationModel",
"DVRLegalStatusModel",
"DVRCaseEffortModel",
"DVRCaseEventModel",
"DVRCaseEvaluationModel",
"DVRActivityFundingModel",
"DVRNeedsModel",
"DVRNotesModel",
"DVRReferralModel",
"DVRResponseModel",
"DVRServiceContactModel",
"DVRSiteActivityModel",
"DVRVulnerabilityModel",
"dvr_ActivityRepresent",
"dvr_CaseActivityRepresent",
"dvr_DocEntityRepresent",
"dvr_ResponseActionThemeRepresent",
"dvr_ResponseThemeRepresent",
"dvr_AssignMethod",
"dvr_case_default_status",
"dvr_case_activity_default_status",
"dvr_case_status_filter_opts",
"dvr_set_response_action_defaults",
"dvr_response_default_type",
"dvr_response_default_status",
"dvr_response_status_colors",
"dvr_case_household_size",
"dvr_due_followups",
"dvr_get_flag_instructions",
"dvr_get_household_size",
"dvr_rheader",
"dvr_update_last_seen",
)
import datetime
from collections import OrderedDict
from gluon import *
from gluon.storage import Storage
from ..s3 import *
from s3compat import basestring
from s3layouts import S3PopupLink
# =============================================================================
class DVRCaseModel(S3Model):
"""
Model for DVR Cases
Allow an individual or household to register to receive
compensation and/or distributions of relief items
"""
names = ("dvr_case",
"dvr_case_id",
"dvr_case_language",
"dvr_case_details",
"dvr_case_status",
"dvr_case_status_id",
"dvr_case_type",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
crud_strings = current.response.s3.crud_strings
NONE = current.messages["NONE"]
configure = self.configure
define_table = self.define_table
person_id = self.pr_person_id
beneficiary = settings.get_dvr_label() # If we add more options in future then == "Beneficiary"
manage_transferability = settings.get_dvr_manage_transferability()
# ---------------------------------------------------------------------
# Case Types
#
tablename = "dvr_case_type"
define_table(tablename,
Field("name",
label = T("Type"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
# Enable in template if/when org-specific
# case types are required:
self.org_organisation_id(readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_CASE_TYPE = T("Create Case Type")
crud_strings[tablename] = Storage(
label_create = ADD_CASE_TYPE,
title_display = T("Case Type"),
title_list = T("Case Types"),
title_update = T("Edit Case Type"),
label_list_button = T("List Case Types"),
label_delete_button = T("Delete Case Type"),
msg_record_created = T("Case Type added"),
msg_record_modified = T("Case Type updated"),
msg_record_deleted = T("Case Type deleted"),
msg_list_empty = T("No Case Types currently registered")
)
# Represent for reference
case_type_represent = S3Represent(lookup = "dvr_case_type",
translate = True,
)
# ---------------------------------------------------------------------
# Case Statuses
#
tablename = "dvr_case_status"
define_table(tablename,
Field("workflow_position", "integer",
default = 1,
label = T("Workflow Position"),
requires = IS_INT_IN_RANGE(1, None),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Workflow Position"),
T("Rank when ordering cases by status"),
),
),
),
Field("code", length=64, notnull=True, unique=True,
label = T("Status Code"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64, minsize=1),
IS_NOT_ONE_OF(db,
"%s.code" % tablename,
),
],
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Status Code"),
T("A unique code to identify the status"),
),
),
),
Field("name",
label = T("Status"),
# Removed to allow single column imports of Cases
#requires = IS_NOT_EMPTY(),
),
Field("is_default", "boolean",
default = False,
label = T("Default Status"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Default Status"),
T("This status applies for new cases unless specified otherwise"),
),
),
),
Field("is_closed", "boolean",
default = False,
label = T("Case Closed"),
represent = s3_yes_no_represent,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Case Closed"),
T("Cases with this status are closed"),
),
),
),
Field("is_not_transferable", "boolean",
default = False,
label = T("Not Transferable"),
represent = s3_yes_no_represent,
readable = manage_transferability,
writable = manage_transferability,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Not Transferable"),
T("Cases with this status are not transferable"),
),
),
),
s3_comments(
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Comments"),
T("Describe the meaning, reasons and potential consequences of this status"),
),
),
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Case Status"),
title_display = T("Case Status"),
title_list = T("Case Statuses"),
title_update = T("Edit Case Status"),
label_list_button = T("List Case Statuses"),
label_delete_button = T("Delete Case Status"),
msg_record_created = T("Case Status added"),
msg_record_modified = T("Case Status updated"),
msg_record_deleted = T("Case Status deleted"),
msg_list_empty = T("No Case Statuses currently registered")
)
# Table configuration
configure(tablename,
# Allow imports to change the status code:
deduplicate = S3Duplicate(primary = ("name",),
ignore_deleted = True,
),
onaccept = self.case_status_onaccept,
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
status_id = S3ReusableField("status_id", "reference %s" % tablename,
label = T("Status"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case_status.id",
represent,
orderby = "dvr_case_status.workflow_position",
sort = False,
)),
sortby = "workflow_position",
)
# ---------------------------------------------------------------------
# Cases
#
# Case priority options
# => tuple list to enforce widget order
# => numeric key so it can be sorted by
case_priority_opts = ((3, T("High")),
(2, T("Medium")),
(1, T("Low")),
)
# Consent flag options
consent_opts = {"N/A": T("n/a"),
"Y": T("yes"),
"N": T("no"),
}
SITE = settings.get_org_site_label()
site_represent = self.org_SiteRepresent(show_link=False)
# Defaults for case assignment
default_organisation = settings.get_org_default_organisation()
default_site = settings.get_org_default_site()
permitted_facilities = current.auth.permitted_facilities(redirect_on_error=False)
# Household size tracking
household_size = settings.get_dvr_household_size()
household_size_writable = household_size and household_size != "auto"
# Transfer origin/destination tracking
track_transfer_sites = settings.get_dvr_track_transfer_sites()
transfer_site_types = settings.get_dvr_transfer_site_types()
transfer_site_requires = IS_EMPTY_OR(
IS_ONE_OF(db, "org_site.site_id",
site_represent,
sort = True,
filterby = "instance_type",
filter_opts = transfer_site_types,
not_filterby = "obsolete",
not_filter_opts = (True,),
))
transfer_site_id = S3ReusableField("transfer_site_id", "reference org_site",
ondelete = "RESTRICT",
requires = transfer_site_requires,
represent = site_represent,
# Enable in template if required
readable = track_transfer_sites,
writable = track_transfer_sites,
)
tablename = "dvr_case"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
# The primary case beneficiary
person_id(represent = self.pr_PersonRepresent(show_link=True),
widget = S3AddPersonWidget(controller="dvr"),
empty = False,
),
# Case type and reference number
FieldS3("case_type_id", "reference dvr_case_type",
label = T("Case Type"),
represent = case_type_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(
db, "dvr_case_type.id",
case_type_represent,
)),
sortby = "name",
comment = S3PopupLink(c = "dvr",
f = "case_type",
title = ADD_CASE_TYPE,
tooltip = T("Choose the case type from the drop-down, or click the link to create a new type"),
# Always look up options from dvr/case
# (required if inline in person form):
vars = {"parent": "case",
},
),
),
# @todo: rename into "code"?
# @ToDo: Option to autogenerate these, like Waybills, et al
# @ToDo: Deprecate: We use pe_label as primary ID and Tags for any additional IDs to cross-reference to 3rd-party systems
Field("reference",
label = T("Case Number"),
),
# Case priority and status
status_id(),
Field("priority", "integer",
default = 2,
label = T("Priority"),
represent = S3Represent(options=dict(case_priority_opts)),
requires = IS_IN_SET(case_priority_opts,
sort = False,
zero = None,
),
),
Field("disclosure_consent", "string", length=8,
label = T("Consenting to Data Disclosure"),
requires = IS_EMPTY_OR(IS_IN_SET(consent_opts)),
represent = S3Represent(options=consent_opts),
readable = False,
writable = False,
),
Field("archived", "boolean",
default = False,
label = T("Archived"),
represent = s3_yes_no_represent,
# Enabled in controller:
readable = False,
writable = False,
),
# Case assignment
self.org_organisation_id(
default = default_organisation,
readable = not default_organisation,
writable = not default_organisation,
),
self.project_project_id(
ondelete = "SET NULL",
# Enable in template as required:
readable = False,
writable = False,
),
self.super_link("site_id", "org_site",
default = default_site,
filterby = "site_id",
filter_opts = permitted_facilities,
label = SITE,
readable = not default_site,
writable = not default_site,
represent = site_represent,
updateable = True,
),
self.hrm_human_resource_id(
label = T("Assigned to"),
readable = False,
writable = False,
),
# Basic date fields
s3_date(label = T("Registration Date"),
default = "now",
empty = False,
),
s3_date("closed_on",
label = T("Case closed on"),
# Automatically set onaccept
writable = False,
),
# Extended date fields
s3_date("valid_until",
label = T("Valid until"),
# Enable in template if required
readable = False,
writable = False,
),
s3_date("stay_permit_until",
label = T("Stay Permit until"),
# Enable in template if required
readable = False,
writable = False,
),
s3_datetime("last_seen_on",
label = T("Last seen on"),
# Enable in template if required
readable = False,
writable = False,
),
# Household size tracking
Field("household_size", "integer",
default = 1,
label = T("Household Size"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(1, None)),
readable = household_size,
writable = household_size_writable,
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Household Size"),
T("Number of persons belonging to the same household"),
),
),
),
# Case transfer management
transfer_site_id("origin_site_id",
label = T("Admission from"),
),
transfer_site_id("destination_site_id",
label = T("Transfer to"),
),
# "transferable" indicates whether this case is
# ready for transfer (=workflow is complete)
Field("transferable", "boolean",
default = False,
label = T("Transferable"),
represent = s3_yes_no_represent,
readable = manage_transferability,
writable = manage_transferability,
),
# "household transferable" indicates whether all
# open cases in the case group are ready for transfer
Field("household_transferable", "boolean",
default = False,
label = T("Household Transferable"),
represent = s3_yes_no_represent,
readable = manage_transferability,
writable = manage_transferability,
),
# Standard comments and meta fields
s3_comments(),
*s3_meta_fields())
# CRUD Strings
if beneficiary:
label = T("Beneficiary")
crud_strings[tablename] = Storage(
label_create = T("Create Beneficiary"),
title_display = T("Beneficiary Details"),
title_list = T("Beneficiaries"),
title_update = T("Edit Beneficiary"),
label_list_button = T("List Beneficiaries"),
label_delete_button = T("Delete Beneficiary"),
msg_record_created = T("Beneficiary added"),
msg_record_modified = T("Beneficiary updated"),
msg_record_deleted = T("Beneficiary deleted"),
msg_list_empty = T("No Beneficiaries found"),
)
else:
label = T("Case")
crud_strings[tablename] = Storage(
label_create = T("Create Case"),
title_display = T("Case Details"),
title_list = T("Cases"),
title_update = T("Edit Case"),
label_list_button = T("List Cases"),
label_delete_button = T("Delete Case"),
msg_record_created = T("Case added"),
msg_record_modified = T("Case updated"),
msg_record_deleted = T("Case deleted"),
msg_list_empty = T("No Cases found"),
)
# Components
self.add_components(tablename,
dvr_case_activity = "case_id",
dvr_case_details = {"joinby": "case_id",
"multiple": False,
},
dvr_case_event = "case_id",
dvr_economy = {"joinby": "case_id",
"multiple": False,
},
dvr_evaluation = {"joinby": "case_id",
"multiple": False,
},
dvr_need = {"link": "dvr_case_need",
"joinby": "case_id",
"key": "need_id",
},
)
# Report options FIXME
#axes = ["organisation_id",
# "case_need.need_id",
# ]
#levels = current.gis.get_relevant_hierarchy_levels()
#for level in levels:
# axes.append("current_address.location_id$%s" % level)
#highest_lx = "current_address.location_id$%s" % levels[0]
#
#facts = [(T("Number of Cases"), "count(id)"),
# ]
#
#report_options = {"rows": axes,
# "cols": axes,
# "fact": facts,
# "defaults": {"rows": "case_need.need_id",
# "cols": highest_lx,
# "fact": facts[0],
# "totals": True,
# },
# }
# Table configuration
configure(tablename,
#report_options = report_options,
onvalidation = self.case_onvalidation,
create_onaccept = self.case_create_onaccept,
update_onaccept = self.case_onaccept,
super_entity = ("doc_entity",),
)
# Reusable field
represent = S3Represent(lookup=tablename, fields=("reference",))
case_id = S3ReusableField("case_id", "reference %s" % tablename,
label = label,
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case.id",
represent)),
)
# ---------------------------------------------------------------------
# Case Language: languages that can be used to communicate with
# a case beneficiary
#
# Quality/Mode of communication:
lang_quality_opts = (("N", T("native")),
("F", T("fluent")),
("S", T("simplified/slow")),
("W", T("written-only")),
("I", T("interpreter required")),
)
tablename = "dvr_case_language"
define_table(tablename,
person_id(empty = False,
ondelete = "CASCADE",
),
s3_language(select = None),
Field("quality",
default = "N",
label = T("Quality/Mode"),
represent = S3Represent(options=dict(lang_quality_opts)),
requires = IS_IN_SET(lang_quality_opts,
sort = False,
zero = None,
),
),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Case Details: extended attributes for DVR cases
#
tablename = "dvr_case_details"
define_table(tablename,
case_id(empty = False,
ondelete = "CASCADE",
),
person_id(empty = False,
ondelete = "CASCADE",
),
Field("registered", "boolean",
default = True,
label = T("Officially Registered"),
represent = s3_yes_no_represent,
),
Field("enrolled_in_school", "boolean",
default = False,
label = T("Enrolled in Public School"),
represent = s3_yes_no_represent,
),
s3_date("arrival_date",
label = T("Arrival Date"),
),
Field("lodging", length=128,
label = T("Lodging"),
represent = lambda v: v if v else NONE,
requires = IS_LENGTH(128),
),
s3_date("on_site_from",
label = T("On-site from"),
),
s3_date("on_site_until",
label = T("On-site until"),
),
Field("referred_by", length=128,
label = T("Referred by"),
represent = lambda v: v if v else NONE,
requires = IS_LENGTH(128),
),
Field("referred_to", length=128,
label = T("Referred to"),
represent = lambda v: v if v else NONE,
requires = IS_LENGTH(128),
),
self.dvr_referral_type_id(),
self.dvr_referral_type_id(
"activity_referral_type_id",
label = T("Referred to Group Activities by"),
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_case_id": case_id,
"dvr_case_status_id": status_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_case_id": lambda name="case_id", **attr: \
dummy(name, **attr),
"dvr_case_status_id": lambda name="status_id", **attr: \
dummy(name, **attr),
}
# -------------------------------------------------------------------------
@staticmethod
def case_status_onaccept(form):
"""
Onaccept routine for case statuses:
- only one status can be the default
@param form: the FORM
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
# If this status is the default, then set is_default-flag
# for all other statuses to False:
if "is_default" in form_vars and form_vars.is_default:
table = current.s3db.dvr_case_status
db = current.db
db(table.id != record_id).update(is_default = False)
# -------------------------------------------------------------------------
@staticmethod
def case_onvalidation(form):
"""
Case onvalidation:
- make sure case numbers are unique within the organisation
@param form: the FORM
"""
db = current.db
s3db = current.s3db
# Read form data
form_vars = form.vars
if "id" in form_vars:
# Inline subtable update
record_id = form_vars.id
elif hasattr(form, "record_id"):
# Regular update form
record_id = form.record_id
else:
# New record
record_id = None
try:
reference = form_vars.reference
except AttributeError:
reference = None
if reference:
# Make sure the case reference is unique within the organisation
ctable = s3db.dvr_case
otable = s3db.org_organisation
# Get the organisation_id
if "organisation_id" not in form_vars:
if not record_id:
# Create form with hidden organisation_id
# => use default
organisation_id = ctable.organisation_id.default
else:
# Reload the record to get the organisation_id
query = (ctable.id == record_id)
row = db(query).select(ctable.organisation_id,
limitby = (0, 1)).first()
if not row:
return
organisation_id = row.organisation_id
else:
# Use the organisation_id in the form
organisation_id = form_vars.organisation_id
# Case duplicate query
dquery = (ctable.reference == reference) & \
(ctable.deleted != True)
if record_id:
dquery &= (ctable.id != record_id)
msg = current.T("This Case Number is already in use")
# Add organisation query to duplicate query
if current.deployment_settings.get_org_branches():
# Get the root organisation
query = (otable.id == organisation_id)
row = db(query).select(otable.root_organisation,
limitby = (0, 1)).first()
root_organisation = row.root_organisation \
if row else organisation_id
dquery &= (otable.root_organisation == root_organisation)
left = otable.on(otable.id == ctable.organisation_id)
else:
dquery &= (ctable.organisation_id == organisation_id)
left = None
# Is there a record with the same reference?
row = db(dquery).select(ctable.id,
left = left,
limitby = (0, 1)).first()
if row:
form.errors["reference"] = msg
# -------------------------------------------------------------------------
@classmethod
def case_create_onaccept(cls, form):
"""
Wrapper for case_onaccept when called during create
rather than update
@param form: the FORM
"""
cls.case_onaccept(form, create=True)
# -------------------------------------------------------------------------
@staticmethod
def case_onaccept(form, create=False):
"""
Case onaccept routine:
- auto-create active appointments
- count household size for new cases
@param form: the FORM
@param create: perform additional actions for new cases
"""
db = current.db
s3db = current.s3db
# Read form data
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
return
# Get the case
ctable = s3db.dvr_case
stable = s3db.dvr_case_status
left = stable.on(stable.id == ctable.status_id)
query = (ctable.id == record_id)
row = db(query).select(ctable.id,
ctable.person_id,
ctable.closed_on,
stable.is_closed,
left = left,
limitby = (0, 1),
).first()
if not row:
return
# Update closed_on date
case = row.dvr_case
if row.dvr_case_status.is_closed:
if not case.closed_on:
case.update_record(closed_on = current.request.utcnow.date())
elif case.closed_on:
case.update_record(closed_on = None)
# Get the person ID
person_id = case.person_id
atable = s3db.dvr_case_appointment
ttable = s3db.dvr_case_appointment_type
left = atable.on((atable.type_id == ttable.id) &
(atable.person_id == person_id) &
(atable.deleted != True))
query = (atable.id == None) & \
(ttable.active == True) & \
(ttable.deleted != True)
rows = db(query).select(ttable.id, left=left)
for row in rows:
atable.insert(case_id = record_id,
person_id = person_id,
type_id = row.id,
)
if create and \
current.deployment_settings.get_dvr_household_size() == "auto":
# Count household size for newly created cases, in order
# to catch pre-existing case group memberships
gtable = s3db.pr_group
mtable = s3db.pr_group_membership
query = ((mtable.person_id == person_id) & \
(mtable.deleted != True) & \
(gtable.id == mtable.group_id) & \
(gtable.group_type == 7))
rows = db(query).select(gtable.id)
for row in rows:
dvr_case_household_size(row.id)
# =============================================================================
class DVRCaseFlagModel(S3Model):
""" Model for Case Flags """
names = ("dvr_case_flag",
"dvr_case_flag_case",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
manage_transferability = settings.get_dvr_manage_transferability()
# ---------------------------------------------------------------------
# Case Flags
#
tablename = "dvr_case_flag"
define_table(tablename,
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
Field("advise_at_check_in", "boolean",
default = False,
label = T("Advice at Check-in"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Advice at Check-in"),
T("Show handling instructions at check-in"),
),
),
),
Field("advise_at_check_out", "boolean",
default = False,
label = T("Advice at Check-out"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Advice at Check-out"),
T("Show handling instructions at check-out"),
),
),
),
Field("advise_at_id_check", "boolean",
default = False,
label = T("Advice at ID Check"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Advice at ID Check"),
T("Show handling instructions at ID checks (e.g. for event registration, payments)"),
),
),
),
Field("instructions", "text",
label = T("Instructions"),
represent = s3_text_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Instructions"),
T("Instructions for handling of the case"),
),
),
),
Field("deny_check_in", "boolean",
default = False,
label = T("Deny Check-in"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Deny Check-in"),
T("Deny the person to check-in when this flag is set"),
),
),
),
Field("deny_check_out", "boolean",
default = False,
label = T("Deny Check-out"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Deny Check-out"),
T("Deny the person to check-out when this flag is set"),
),
),
),
Field("allowance_suspended", "boolean",
default = False,
label = T("Allowance Suspended"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Allowance Suspended"),
T("Person shall not receive allowance payments when this flag is set"),
),
),
),
Field("is_not_transferable", "boolean",
default = False,
label = T("Not Transferable"),
represent = s3_yes_no_represent,
readable = manage_transferability,
writable = manage_transferability,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Not Transferable"),
T("Cases with this flag are not transferable"),
),
),
),
Field("is_external", "boolean",
default = False,
label = T("External"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("External"),
T("This flag indicates that the person is currently accommodated/being held externally (e.g. in Hospital or with Police)"),
),
),
),
Field("nostats", "boolean",
default = False,
label = T("Exclude from Reports"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Exclude from Reports"),
T("Exclude cases with this flag from certain reports"),
),
),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_FLAG = T("Create Case Flag")
crud_strings[tablename] = Storage(
label_create = ADD_FLAG,
title_display = T("Case Flag Details"),
title_list = T("Case Flags"),
title_update = T("Edit Case Flag"),
label_list_button = T("List Case Flags"),
label_delete_button = T("Delete Case Flag"),
msg_record_created = T("Case Flag added"),
msg_record_modified = T("Case Flag updated"),
msg_record_deleted = T("Case Flag deleted"),
msg_list_empty = T("No Case Flags found"),
)
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(ignore_deleted = True,
),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
flag_id = S3ReusableField("flag_id", "reference %s" % tablename,
label = T("Case Flag"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case_flag.id",
represent)),
comment=S3PopupLink(c = "dvr",
f = "case_flag",
title = ADD_FLAG,
tooltip = T("Choose the flag from the drop-down, or click the link to create a new flag"),
),
)
# ---------------------------------------------------------------------
# Link table Case <=> Flag
#
tablename = "dvr_case_flag_case"
define_table(tablename,
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
flag_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("person_id",
"flag_id",
),
),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_case_flag_id": flag_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_case_flag_id": lambda name="flag_id", **attr: \
dummy(name, **attr),
}
# =============================================================================
class DVRNeedsModel(S3Model):
""" Model for Needs """
names = ("dvr_need",
"dvr_need_id",
"dvr_case_need",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
configure = self.configure
service_type = settings.get_dvr_needs_use_service_type()
service_id = self.org_service_id
hierarchical_needs = settings.get_dvr_needs_hierarchical()
# ---------------------------------------------------------------------
# Needs
#
tablename = "dvr_need"
define_table(tablename,
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
# This form of hierarchy may not work on all Databases:
Field("parent", "reference dvr_need",
label = T("Subtype of"),
ondelete = "RESTRICT",
readable = hierarchical_needs,
writable = hierarchical_needs,
),
service_id(label = T("Service Type"),
ondelete = "SET NULL",
readable = service_type,
writable = service_type,
),
# Activate in template as needed:
self.org_organisation_id(readable = False,
writable = False,
),
Field("protection", "boolean",
default = False,
label = T("Protection Need"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Hierarchy
if hierarchical_needs:
hierarchy = "parent"
widget = S3HierarchyWidget(multiple = False,
leafonly = False,
)
else:
hierarchy = None
widget = None
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("parent",
"organisation_id",
),
),
hierarchy = hierarchy,
)
# CRUD Strings
ADD_NEED = T("Create Need Type")
crud_strings[tablename] = Storage(
label_create = ADD_NEED,
title_display = T("Need Type Details"),
title_list = T("Need Types"),
title_update = T("Edit Need Type"),
label_list_button = T("List Need Types"),
label_delete_button = T("Delete Need Type"),
msg_record_created = T("Need Type added"),
msg_record_modified = T("Need Type updated"),
msg_record_deleted = T("Need Type deleted"),
msg_list_empty = T("No Need Types found"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
need_id = S3ReusableField("need_id", "reference %s" % tablename,
label = T("Need Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_need.id",
represent,
)),
comment = S3PopupLink(c = "dvr",
f = "need",
title = ADD_NEED,
tooltip = T("Choose the need type from the drop-down, or click the link to create a new type"),
),
widget = widget
)
# ---------------------------------------------------------------------
# Link table Case <=> Need
#
tablename = "dvr_case_need"
define_table(tablename,
self.dvr_case_id(empty = False,
ondelete = "CASCADE",
),
need_id(empty = False,
ondelete = "CASCADE",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_need_id": need_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_need_id": lambda name="need_id", **attr: \
dummy(name, **attr),
}
# =============================================================================
class DVRNotesModel(S3Model):
"""
Model for Notes
"""
names = ("dvr_note_type",
"dvr_note",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Note Types
#
tablename = "dvr_note_type"
define_table(tablename,
Field("name", length=128, unique=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128, minsize=1),
IS_NOT_ONE_OF(db,
"dvr_note_type.name",
),
],
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Note Type"),
title_display = T("Note Type Details"),
title_list = T("Note Types"),
title_update = T("Edit Note Type"),
label_list_button = T("List Note Types"),
label_delete_button = T("Delete Note Type"),
msg_record_created = T("Note Type added"),
msg_record_modified = T("Note Type updated"),
msg_record_deleted = T("Note Type deleted"),
msg_list_empty = T("No Note Types found"),
)
# Table configuration
#self.configure(tablename,
# # Not needed as unique=True
# deduplicate = S3Duplicate(),
# )
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
note_type_id = S3ReusableField("note_type_id", "reference %s" % tablename,
label = T("Note Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_note_type.id",
represent)),
)
# ---------------------------------------------------------------------
# Notes
#
tablename = "dvr_note"
define_table(tablename,
# Uncomment if needed for the Case perspective
#self.dvr_case_id(empty = False,
# ondelete = "CASCADE",
# ),
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
note_type_id(empty = False),
s3_date(default = "now",
),
s3_comments("note",
label = T("Note"),
comment = None,
),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Note"),
title_display = T("Note Details"),
title_list = T("Notes"),
title_update = T("Edit Note"),
label_list_button = T("List Notes"),
label_delete_button = T("Delete Note"),
msg_record_created = T("Note added"),
msg_record_modified = T("Note updated"),
msg_record_deleted = T("Note deleted"),
msg_list_empty = T("No Notes found"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class DVRReferralModel(S3Model):
"""
Data model for case referrals (both incoming and outgoing)
"""
names = ("dvr_referral_type",
"dvr_referral_type_id",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
# ---------------------------------------------------------------------
# Referral Types (how cases are admitted)
#
tablename = "dvr_referral_type"
self.define_table(tablename,
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
self.configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Referral Type"),
title_display = T("Referral Type Details"),
title_list = T("Referral Types"),
title_update = T("Edit Referral Type"),
label_list_button = T("List Referral Types"),
label_delete_button = T("Delete Referral Type"),
msg_record_created = T("Referral Type added"),
msg_record_modified = T("Referral Type updated"),
msg_record_deleted = T("Referral Type deleted"),
msg_list_empty = T("No Referral Types found"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
referral_type_id = S3ReusableField("referral_type_id",
"reference %s" % tablename,
label = T("Type of Referral"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"%s.id" % tablename,
represent,
)),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_referral_type_id": referral_type_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_referral_type_id": lambda name="referral_type_id", **attr: \
dummy(name, **attr),
}
# =============================================================================
class DVRResponseModel(S3Model):
""" Model representing responses to case needs """
names = ("dvr_response_action",
"dvr_response_action_theme",
"dvr_response_status",
"dvr_response_theme",
"dvr_response_type",
"dvr_response_type_case_activity",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
crud_strings = s3.crud_strings
define_table = self.define_table
configure = self.configure
hierarchical_response_types = settings.get_dvr_response_types_hierarchical()
themes_sectors = settings.get_dvr_response_themes_sectors()
themes_needs = settings.get_dvr_response_themes_needs()
case_activity_id = self.dvr_case_activity_id
NONE = current.messages["NONE"]
# ---------------------------------------------------------------------
# Response Themes
#
tablename = "dvr_response_theme"
define_table(tablename,
self.org_organisation_id(),
Field("name",
label = T("Theme"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
self.dvr_need_id(readable = themes_needs,
writable = themes_needs,
),
self.org_sector_id(readable = themes_sectors,
writable = themes_sectors,
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("organisation_id",),
),
ondelete_cascade = self.response_theme_ondelete_cascade,
)
# CRUD strings
crud_strings[tablename] = Storage(
label_create = T("Create Response Theme"),
title_display = T("Response Theme Details"),
title_list = T("Response Themes"),
title_update = T("Edit Response Theme"),
label_list_button = T("List Response Themes"),
label_delete_button = T("Delete Response Theme"),
msg_record_created = T("Response Theme created"),
msg_record_modified = T("Response Theme updated"),
msg_record_deleted = T("Response Theme deleted"),
msg_list_empty = T("No Response Themes currently defined"),
)
# Reusable field
themes_represent = dvr_ResponseThemeRepresent(multiple = True,
translate = True,
)
requires = IS_ONE_OF(db, "%s.id" % tablename,
themes_represent,
multiple = True,
)
if settings.get_dvr_response_themes_org_specific():
root_org = current.auth.root_org()
if root_org:
requires.set_filter(filterby = "organisation_id",
filter_opts = (root_org,),
)
response_theme_ids = S3ReusableField(
"response_theme_ids",
"list:reference %s" % tablename,
label = T("Themes"),
ondelete = "RESTRICT",
represent = themes_represent,
requires = IS_EMPTY_OR(requires),
sortby = "name",
widget = S3MultiSelectWidget(header = False,
),
)
# ---------------------------------------------------------------------
# Response Types
#
tablename = "dvr_response_type"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
# This form of hierarchy may not work on all databases:
Field("parent", "reference dvr_response_type",
label = T("Subtype of"),
ondelete = "RESTRICT",
represent = S3Represent(lookup = tablename,
translate = True,
hierarchy = True,
),
readable = hierarchical_response_types,
writable = hierarchical_response_types,
),
Field("is_default", "boolean",
label = T("Default?"),
default = False,
represent = s3_yes_no_represent,
),
Field("is_consultation", "boolean",
label = T("Consultation"),
default = False,
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# Hierarchy
if hierarchical_response_types:
hierarchy = "parent"
widget = S3HierarchyWidget(multiple = False,
leafonly = True,
)
else:
hierarchy = None
widget = None
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("parent",),
),
hierarchy = hierarchy,
onaccept = self.response_type_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Response Type"),
title_display = T("Response Type Details"),
title_list = T("Response Types"),
title_update = T("Edit Response Type"),
label_list_button = T("List Response Types"),
label_delete_button = T("Delete Response Type"),
msg_record_created = T("Response Type created"),
msg_record_modified = T("Response Type updated"),
msg_record_deleted = T("Response Type deleted"),
msg_list_empty = T("No Response Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
response_type_id = S3ReusableField(
"response_type_id",
"reference %s" % tablename,
label = T("Response Type"),
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
)),
sortby = "name",
widget = widget,
)
# ---------------------------------------------------------------------
# Response action status
#
tablename = "dvr_response_status"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
Field("workflow_position", "integer",
label = T("Workflow Position"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("is_default", "boolean",
default = False,
label = T("Default Initial Status"),
),
Field("is_closed", "boolean",
default = False,
label = T("Closes Response Action"),
),
Field("is_default_closure", "boolean",
default = False,
label = T("Default Closure Status"),
),
Field("color",
requires = IS_HTML_COLOUR(),
widget = S3ColorPickerWidget(),
),
s3_comments(),
*s3_meta_fields())
# Table Configuration
configure(tablename,
deduplicate = S3Duplicate(),
onaccept = self.response_status_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Response Status"),
title_display = T("Response Status Details"),
title_list = T("Response Statuses"),
title_update = T("Edit Response Status"),
label_list_button = T("List Response Statuses"),
label_delete_button = T("Delete Response Status"),
msg_record_created = T("Response Status created"),
msg_record_modified = T("Response Status updated"),
msg_record_deleted = T("Response Status deleted"),
msg_list_empty = T("No Response Statuses currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
response_status_id = S3ReusableField(
"status_id",
"reference %s" % tablename,
label = T("Status"),
represent = represent,
requires = IS_ONE_OF(db, "%s.id" % tablename,
represent,
orderby = "workflow_position",
sort = False,
zero = None,
),
sortby = "workflow_position",
)
# ---------------------------------------------------------------------
# Responses
#
case_label = settings.get_dvr_label()
if case_label: # If we add more options in future then == "Beneficiary"
CASE = T("Beneficiary")
else:
CASE = T("Case")
use_response_types = settings.get_dvr_response_types()
use_response_themes = settings.get_dvr_response_themes()
response_themes_details = settings.get_dvr_response_themes_details()
use_due_date = settings.get_dvr_response_due_date()
DATE = T("Date Actioned") if use_due_date else T("Date")
use_time = settings.get_dvr_response_use_time()
tablename = "dvr_response_action"
define_table(tablename,
# Beneficiary
self.pr_person_id(
label = CASE,
widget = S3PersonAutocompleteWidget(controller="dvr"),
empty = False,
),
case_activity_id(
empty = False,
label = T("Activity"),
ondelete = "CASCADE",
writable = False,
),
response_theme_ids(
ondelete = "RESTRICT",
readable = use_response_themes,
writable = use_response_themes,
),
response_type_id(
empty = not use_response_types,
label = T("Action Type"),
ondelete = "RESTRICT",
readable = use_response_types,
writable = use_response_types,
),
s3_date("date_due",
label = T("Date Due"),
readable = use_due_date,
writable = use_due_date,
),
# For backwards-compatibility:
s3_date(label = DATE,
default = None if use_due_date else "now",
readable = False,
writable = False,
),
s3_datetime("start_date",
label = DATE,
default = None if use_due_date else "now",
widget = None if use_time else "date",
),
s3_datetime("end_date",
label = T("End"),
widget = None if use_time else "date",
readable = False,
writable = False,
),
self.hrm_human_resource_id(),
response_status_id(),
Field("hours", "double",
label = T("Effort (Hours)"),
requires = IS_EMPTY_OR(
IS_FLOAT_IN_RANGE(0.0, None)),
represent = lambda hours: "%.2f" % hours if hours else NONE,
widget = S3HoursWidget(precision = 2,
),
),
s3_comments(label = T("Details"),
comment = None,
represent = lambda v: s3_text_represent(v, lines=8),
),
*s3_meta_fields())
# List_fields
list_fields = ["case_activity_id",
"comments",
"human_resource_id",
#"date_due",
"start_date",
"hours",
"status_id",
]
if use_due_date:
list_fields[3:3] = ["date_due"]
if use_response_types:
list_fields[1:1] = ["response_type_id"]
if use_response_themes:
if response_themes_details:
list_fields[1:1] = ["response_action_theme.theme_id"]
else:
list_fields[1:1] = ["response_theme_ids", "comments"]
else:
list_fields[1:1] = ["comments"]
# Filter widgets
if use_response_types:
if hierarchical_response_types:
response_type_filter = S3HierarchyFilter(
"response_type_id",
lookup = "dvr_response_type",
hidden = True,
)
else:
response_type_filter = S3OptionsFilter(
"response_type_id",
options = lambda: \
s3_get_filter_opts("dvr_response_type"),
hidden = True,
)
else:
response_type_filter = None
if use_due_date:
due_filter = S3DateFilter("date_due")
else:
due_filter = None
filter_widgets = [S3TextFilter(["case_activity_id$person_id$pe_label",
"case_activity_id$person_id$first_name",
"case_activity_id$person_id$middle_name",
"case_activity_id$person_id$last_name",
"comments",
],
label = T("Search"),
),
S3OptionsFilter("status_id",
options = lambda: \
s3_get_filter_opts("dvr_response_status"),
cols = 3,
translate = True,
),
due_filter,
response_type_filter,
]
# CRUD Form
type_field = "response_type_id" if use_response_types else None
details_field = "comments"
if use_response_themes:
if response_themes_details:
theme_field = S3SQLInlineComponent("response_action_theme",
fields = ["theme_id",
"comments",
],
label = T("Themes"),
)
details_field = None
else:
theme_field = "response_theme_ids"
else:
theme_field = None
due_field = "date_due" if use_due_date else None
crud_form = S3SQLCustomForm("person_id",
"case_activity_id",
type_field,
theme_field,
details_field,
"human_resource_id",
due_field,
"start_date",
"status_id",
"hours",
)
# Table Configuration
configure(tablename,
crud_form = crud_form,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.response_action_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Action"),
title_display = T("Action Details"),
title_list = T("Actions"),
title_update = T("Edit Action"),
label_list_button = T("List Actions"),
label_delete_button = T("Delete Action"),
msg_record_created = T("Action created"),
msg_record_modified = T("Action updated"),
msg_record_deleted = T("Action deleted"),
msg_list_empty = T("No Actions currently registered"),
)
# Components
self.add_components(tablename,
dvr_response_action_theme = "action_id",
)
# ---------------------------------------------------------------------
# Response Action <=> Theme link table
# - for filtering/reporting by extended theme attributes
# - not exposed directly, populated onaccept from response_theme_ids
#
theme_represent = S3Represent(lookup = "dvr_response_theme",
translate = True,
)
action_represent = dvr_ResponseActionRepresent()
tablename = "dvr_response_action_theme"
define_table(tablename,
Field("action_id", "reference dvr_response_action",
label = T("Action"),
ondelete = "CASCADE",
represent = action_represent,
requires = IS_ONE_OF(db, "dvr_response_action.id",
action_represent,
),
),
Field("theme_id", "reference dvr_response_theme",
ondelete = "RESTRICT",
label = T("Theme"),
represent = theme_represent,
requires = IS_ONE_OF(db, "dvr_response_theme.id",
theme_represent,
),
),
case_activity_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
s3_comments(label = T("Details"),
comment = None,
represent = lambda v: s3_text_represent(v, lines=8),
),
*s3_meta_fields())
configure(tablename,
onaccept = self.response_action_theme_onaccept,
ondelete = self.response_action_theme_ondelete,
)
# ---------------------------------------------------------------------
# Response Types <=> Case Activities link table
# @todo: drop/replace by dvr_response_action? (currently still used in STL)
#
tablename = "dvr_response_type_case_activity"
define_table(tablename,
self.dvr_case_activity_id(
empty = False,
ondelete = "CASCADE",
),
response_type_id(
empty = False,
ondelete = "RESTRICT",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField("dummy_id", "integer",
#readable = False,
#writable = False,
#)
return {}
# -------------------------------------------------------------------------
@staticmethod
def response_type_onaccept(form):
"""
Onaccept routine for response types:
- only one type can be the default
@param form: the FORM
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
table = current.s3db.dvr_response_type
# If this status is the default, then set is_default-flag
# for all other types to False:
if form_vars.get("is_default"):
query = (table.is_default == True) & \
(table.id != record_id)
current.db(query).update(is_default = False)
# -------------------------------------------------------------------------
@staticmethod
def response_status_onaccept(form):
"""
Onaccept routine for response statuses:
- only one status can be the default
@param form: the FORM
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
table = current.s3db.dvr_response_status
db = current.db
# If this status is the default, then set is_default-flag
# for all other statuses to False:
if form_vars.get("is_default"):
query = (table.is_default == True) & \
(table.id != record_id)
db(query).update(is_default = False)
# If this status is the default closure, then enforce is_closed,
# and set is_default_closure for all other statuses to False
if form_vars.get("is_default_closure"):
db(table.id == record_id).update(is_closed = True)
query = (table.is_default_closure == True) & \
(table.id != record_id)
db(query).update(is_default_closure = False)
# -------------------------------------------------------------------------
@staticmethod
def response_theme_ondelete_cascade(row):
"""
Explicit deletion cascade for response theme list:references
(which are not caught by standard cascade), action depending
on "ondelete" setting of response_theme_ids:
- RESTRICT => block deletion cascade
- otherwise => clean up the list:reference
@param row: the dvr_response_theme Row to be deleted
"""
db = current.db
theme_id = row.id
# Table with list:reference dvr_response_theme
atable = current.s3db.dvr_response_action
reference = atable.response_theme_ids
# Referencing rows
query = (reference.contains(theme_id)) & \
(atable.deleted == False)
if reference.ondelete == "RESTRICT":
referenced_by = db(query).select(atable.id, limitby=(0, 1)).first()
if referenced_by:
# Raise to stop deletion cascade
raise RuntimeError("Attempt to delete a theme that is referenced by a response")
else:
referenced_by = db(query).select(atable.id, reference)
for rrow in referenced_by:
# Clean up reference list
theme_ids = rrow[reference]
rrow.update_record(response_theme_ids = \
[tid for tid in theme_ids if tid != theme_id])
# -------------------------------------------------------------------------
@staticmethod
def get_case_activity_by_need(person_id, need_id, hr_id=None):
"""
DRY helper to find or create a case activity matching a need_id
@param person_id: the beneficiary person ID
@param need_id: the need ID (or a list of need IDs)
@param human_resource_id: the HR responsible
@returns: a dvr_case_activity record ID
"""
if not person_id:
return None
s3db = current.s3db
table = s3db.dvr_case_activity
# Look up a matching case activity for this beneficiary
query = (table.person_id == person_id)
if isinstance(need_id, (list, tuple)):
need = need_id[0] if len(need_id) == 1 else None
query &= (table.need_id.belongs(need_id))
else:
need = need_id
query &= (table.need_id == need_id)
query &= (table.deleted == False)
activity = current.db(query).select(table.id,
orderby = ~table.start_date,
limitby = (0, 1),
).first()
if activity:
activity_id = activity.id
elif need is not None:
# Create an activity for the case
activity_id = table.insert(person_id = person_id,
need_id = need,
start_date = current.request.utcnow,
human_resource_id = hr_id,
)
s3db.update_super(table, {"id": activity_id})
else:
activity_id = None
return activity_id
# -------------------------------------------------------------------------
@classmethod
def response_action_onaccept(cls, form):
"""
Onaccept routine for response actions
- update theme links from inline response_theme_ids
- link to case activity if required
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
db = current.db
s3db = current.s3db
# Get the record
atable = s3db.dvr_response_action
query = (atable.id == record_id)
record = db(query).select(atable.id,
atable.person_id,
atable.case_activity_id,
atable.response_theme_ids,
atable.human_resource_id,
atable.start_date,
atable.end_date,
atable.hours,
limitby = (0, 1),
).first()
if not record:
return
settings = current.deployment_settings
themes_details = settings.get_dvr_response_themes_details()
theme_ids = record.response_theme_ids
if not theme_ids:
theme_ids = []
if not record.person_id:
# Inherit the person_id (beneficiary) from the case activity
case_activity_id = record.case_activity_id
if case_activity_id:
catable = s3db.dvr_case_activity
query = (catable.id == case_activity_id)
case_activity = db(query).select(catable.person_id,
limitby = (0, 1),
).first()
if case_activity:
record.update_record(person_id = case_activity.person_id)
elif settings.get_dvr_response_activity_autolink() and \
not themes_details:
# Automatically link the response action to a case activity
# (using matching needs)
# Get all needs of the response
ttable = s3db.dvr_response_theme
if theme_ids:
query = ttable.id.belongs(theme_ids)
themes = db(query).select(ttable.need_id,
groupby = ttable.need_id,
)
need_ids = set(theme.need_id for theme in themes)
else:
need_ids = None
if not need_ids:
# Response is not linked to any needs
# => Remove activity link
activity_id = None
else:
catable = s3db.dvr_case_activity
activity_id = record.case_activity_id
if activity_id:
# Verify that the case activity's need matches person+theme
query = (catable.id == activity_id) & \
(catable.person_id == record.person_id) & \
(catable.deleted == False)
activity = db(query).select(catable.need_id,
limitby = (0, 1),
).first()
if not activity or activity.need_id not in need_ids:
activity_id = None
if not activity_id:
# Find or create a matching case activity
activity_id = cls.get_case_activity_by_need(
record.person_id,
need_ids,
hr_id = record.human_resource_id,
)
# Update the activity link
record.update_record(case_activity_id = activity_id)
if not themes_details:
# Get all selected themes
selected = set(theme_ids)
# Get all linked themes
ltable = s3db.dvr_response_action_theme
query = (ltable.action_id == record_id) & \
(ltable.deleted == False)
links = db(query).select(ltable.theme_id)
linked = set(link.theme_id for link in links)
# Remove obsolete theme links
obsolete = linked - selected
if obsolete:
query &= ltable.theme_id.belongs(obsolete)
db(query).delete()
# Add links for newly selected themes
added = selected - linked
for theme_id in added:
ltable.insert(action_id = record_id,
theme_id = theme_id,
)
# Calculate end_date
start_date = record.start_date
end_date = record.end_date
if start_date:
if "end_date" not in form_vars:
new_end_date = None
hours = record.hours
if hours:
duration = datetime.timedelta(hours=hours)
else:
duration = datetime.timedelta(hours=0.5)
orig_start_date = None
if hasattr(form, "record"):
try:
orig_start_date = form.record.start_date
except AttributeError:
pass
if not end_date or not orig_start_date:
new_end_date = start_date + duration
else:
delta = end_date - orig_start_date
if hours and delta != duration:
delta = duration
duration_changed = True
else:
duration_changed = False
if start_date != orig_start_date or duration_changed:
new_end_date = start_date + delta
if new_end_date:
record.update_record(end_date = new_end_date)
elif end_date:
record.update_record(end_date = None)
# -------------------------------------------------------------------------
@classmethod
def response_action_theme_onaccept(cls, form):
"""
Onaccept routine for response action theme links
- update response_theme_ids in response action record
- link to case activity if required
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
db = current.db
s3db = current.s3db
# Look up the record
table = s3db.dvr_response_action_theme
query = (table.id == record_id)
record = db(query).select(table.id,
table.action_id,
table.theme_id,
table.comments,
limitby = (0, 1),
).first()
if not record:
return
settings = current.deployment_settings
if settings.get_dvr_response_themes_details():
# Look up the response action
action_id = record.action_id
if action_id:
atable = s3db.dvr_response_action
query = (atable.id == action_id)
action = db(query).select(atable.id,
atable.person_id,
atable.human_resource_id,
limitby = (0, 1),
).first()
else:
action = None
if action:
theme_id = record.theme_id
if theme_id:
# Merge duplicate action<=>theme links
query = (table.id != record.id) & \
(table.action_id == action_id) & \
(table.theme_id == record.theme_id) & \
current.auth.s3_accessible_query("delete", table) & \
(table.deleted == False)
rows = db(query).select(table.id,
table.comments,
orderby = table.created_on,
)
duplicates = []
details = []
for row in rows:
if row.comments:
details.append(row.comments.strip())
duplicates.append(row.id)
if record.comments:
details.append(record.comments.strip())
record.update_record(comments="\n\n".join(c for c in details if c))
s3db.resource("dvr_response_action_theme", id=duplicates).delete()
# Update response_theme_ids in response action
query = (table.action_id == action_id) & \
(table.deleted == False)
rows = db(query).select(table.theme_id)
theme_ids = [row.theme_id for row in rows if row.theme_id]
action.update_record(response_theme_ids=theme_ids)
# Auto-link to case activity
if settings.get_dvr_response_themes_needs() and \
settings.get_dvr_response_activity_autolink():
# Look up the theme's need_id
ttable = s3db.dvr_response_theme
query = (ttable.id == record.theme_id)
theme = db(query).select(ttable.need_id,
limitby = (0, 1),
).first()
if theme:
activity_id = cls.get_case_activity_by_need(
action.person_id,
theme.need_id,
hr_id = action.human_resource_id,
)
record.update_record(case_activity_id=activity_id)
# -------------------------------------------------------------------------
@staticmethod
def response_action_theme_ondelete(row):
"""
On-delete actions for response_action_theme links
- update response_theme_ids in action record
"""
db = current.db
s3db = current.s3db
action_id = row.action_id
if action_id:
atable = s3db.dvr_response_action
query = (atable.id == action_id) & \
(atable.deleted == False)
action = db(query).select(atable.id,
atable.person_id,
atable.human_resource_id,
limitby = (0, 1),
).first()
else:
action = None
if action:
# Update response theme ids in response action
table = s3db.dvr_response_action_theme
query = (table.action_id == action_id) & \
(table.deleted == False)
rows = db(query).select(table.theme_id)
theme_ids = [row.theme_id for row in rows if row.theme_id]
action.update_record(response_theme_ids = theme_ids)
# =============================================================================
class DVRCaseActivityModel(S3Model):
""" Model for Case Activities """
names = ("dvr_activity",
"dvr_activity_id",
"dvr_activity_age_group",
"dvr_activity_focus",
"dvr_activity_group_type",
"dvr_case_activity",
"dvr_case_activity_id",
"dvr_case_activity_need",
"dvr_case_activity_status",
"dvr_case_activity_update",
"dvr_case_activity_update_type",
"dvr_provider_type",
"dvr_termination_type",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
service_type = settings.get_dvr_activity_use_service_type()
activity_sectors = settings.get_dvr_activity_sectors()
service_id = self.org_service_id
project_id = self.project_project_id
organisation_id = self.org_organisation_id
human_resource_id = self.hrm_human_resource_id
# ---------------------------------------------------------------------
# Provider Type
#
tablename = "dvr_provider_type"
define_table(tablename,
Field("name", notnull=True,
label = T("Type"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Provider Type"),
title_display = T("Provider Type Details"),
title_list = T("Provider Types"),
title_update = T("Edit Provider Type"),
label_list_button = T("List Provider Types"),
label_delete_button = T("Delete Provider Type"),
msg_record_created = T("Provider Type added"),
msg_record_modified = T("Provider Type updated"),
msg_record_deleted = T("Provider Type deleted"),
msg_list_empty = T("No Provider Types currently defined"),
)
# Reusable Field
represent = S3Represent(lookup=tablename)
provider_type_id = S3ReusableField("provider_type_id", "reference %s" % tablename,
label = T("Provider Type"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Activity Group Type
#
tablename = "dvr_activity_group_type"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Type"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128, minsize=1),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Group Type"),
title_display = T("Group Type Details"),
title_list = T("Group Types"),
title_update = T("Edit Group Type"),
label_list_button = T("List Group Types"),
label_delete_button = T("Delete Group Type"),
msg_record_created = T("Group Type added"),
msg_record_modified = T("Group Type updated"),
msg_record_deleted = T("Group Type deleted"),
msg_list_empty = T("No Group Types currently defined"),
)
# Reusable Field
represent = S3Represent(lookup=tablename)
group_type_id = S3ReusableField("group_type_id", "reference %s" % tablename,
label = T("Group Type"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Activity Age Group
#
tablename = "dvr_activity_age_group"
define_table(tablename,
Field("name", length=128, notnull=True, unique=True,
label = T("Age Group"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(128, minsize=1),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Age Group"),
title_display = T("Age Group Details"),
title_list = T("Age Groups"),
title_update = T("Edit Age Group"),
label_list_button = T("List Age Groups"),
label_delete_button = T("Delete Age Group"),
msg_record_created = T("Age Group added"),
msg_record_modified = T("Age Group updated"),
msg_record_deleted = T("Age Group deleted"),
msg_list_empty = T("No Age Groups currently defined"),
)
# Reusable Field
represent = S3Represent(lookup=tablename)
age_group_id = S3ReusableField("age_group_id", "reference %s" % tablename,
label = T("Age Group"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Activity Focus
#
tablename = "dvr_activity_focus"
define_table(tablename,
Field("name", notnull=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Activity Focus"),
title_display = T("Activity Focus Details"),
title_list = T("Activity Focuses"),
title_update = T("Edit Activity Focus"),
label_list_button = T("List Activity Focuses"),
label_delete_button = T("Delete Activity Focus"),
msg_record_created = T("Activity Focus added"),
msg_record_modified = T("Activity Focus updated"),
msg_record_deleted = T("Activity Focus deleted"),
msg_list_empty = T("No Activity Focuses currently defined"),
)
# Reusable Field
represent = S3Represent(lookup=tablename)
focus_id = S3ReusableField("focus_id", "reference %s" % tablename,
label = T("Activity Focus"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Activity (not case-specific)
#
site_represent = self.org_SiteRepresent(show_link=False)
permitted_facilities = current.auth.permitted_facilities(redirect_on_error=False)
# Simplified periodicity options
# @todo: make boolean and use free text interval description?
period_opts = {"R": T("regular"),
"O": T("occasional"),
}
# Modality options
modality_opts = {"E": T("Event"),
"O": T("Outreach"),
}
# Target gender type options
# (Tuple list to enforce this order in drop-down)
gender_opts = [("M", T("Male")),
("F", T("Female")),
("A", T("Mixed")),
]
if not settings.get_pr_hide_third_gender():
gender_opts.insert(-1, ("X", T("Other")))
tablename = "dvr_activity"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
service_id(label = T("Service Type"),
ondelete = "RESTRICT",
readable = service_type,
writable = service_type,
),
# Expose in template as needed:
organisation_id(readable = False,
writable = False,
),
project_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
Field("name",
label = T("Title"),
),
s3_date("start_date",
label = T("Start Date"),
),
s3_date("end_date",
label = T("End Date"),
),
Field("period", length=4,
represent = S3Represent(options=period_opts),
requires = IS_EMPTY_OR(IS_IN_SET(period_opts)),
),
Field("modality", length=4,
label = T("Modality"),
default = "E",
represent = S3Represent(options=dict(modality_opts)),
requires = IS_IN_SET(modality_opts, zero=None),
readable = False,
writable = False,
),
self.super_link("site_id", "org_site",
filterby = "site_id",
filter_opts = permitted_facilities,
label = T("Place"),
readable = True,
writable = True,
represent = site_represent,
updateable = True,
),
self.org_room_id(),
self.gis_location_id(
label = T("Target Area"),
widget = S3LocationSelector(points = False,
polygons = True,
#show_address = False,
),
readable = False,
writable = False,
),
# @todo: have alternative lookup field (hrm)
Field("facilitator",
label = T("Facilitator"),
),
Field("gender", length=4,
label = T("Gender"),
represent = S3Represent(options=dict(gender_opts)),
requires = IS_EMPTY_OR(IS_IN_SET(gender_opts,
sort = False,
)),
readable = False,
writable = False,
),
age_group_id(ondelete="SET NULL"),
group_type_id(ondelete="SET NULL"),
focus_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
# Certificates for Participants:
# - expose in template if required:
Field("certificate", "boolean",
default = False,
label = T("Certificate issued"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
Field("certificate_details", "text",
label = T("Certificate Details"),
represent = s3_text_represent,
readable = False,
writable = False,
widget = s3_comments_widget,
),
s3_comments(),
*s3_meta_fields())
# Table Options
configure(tablename,
super_entity = "doc_entity",
)
# Components
self.add_components(tablename,
dvr_case_activity = "activity_id",
supply_distribution = {"link": "supply_distribution_case_activity",
"joinby": "activity_id",
"key": "distribution_id",
},
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Activity"),
title_display = T("Activity Details"),
title_list = T("Activities"),
title_update = T("Edit Activity"),
label_list_button = T("List Activities"),
label_delete_button = T("Delete Activity"),
msg_record_created = T("Activity added"),
msg_record_modified = T("Activity updated"),
msg_record_deleted = T("Activity deleted"),
msg_list_empty = T("No Activities currently registered"),
)
# Reusable Field
represent = dvr_ActivityRepresent(show_link=False)
activity_id = S3ReusableField("activity_id", "reference %s" % tablename,
label = T("Activity"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "service_id",
)
# ---------------------------------------------------------------------
# Termination Types (=how a case activity ended)
#
tablename = "dvr_termination_type"
define_table(tablename,
service_id(label = T("Service Type"),
ondelete = "CASCADE",
readable = service_type,
writable = service_type,
),
Field("name", notnull=True,
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("service_id",),
),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Termination Type"),
title_display = T("Termination Type Details"),
title_list = T("Termination Types"),
title_update = T("Edit Termination Type"),
label_list_button = T("List Termination Types"),
label_delete_button = T("Delete Termination Type"),
msg_record_created = T("Termination Type added"),
msg_record_modified = T("Termination Type updated"),
msg_record_deleted = T("Termination Type deleted"),
msg_list_empty = T("No Termination Types currently defined"),
)
# Reusable Field
represent = S3Represent(lookup=tablename)
termination_type_id = S3ReusableField("termination_type_id", "reference %s" % tablename,
label = T("Termination Type"),
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
sort = True,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Case Activity Status
#
tablename = "dvr_case_activity_status"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
Field("workflow_position", "integer",
label = T("Workflow Position"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("is_default", "boolean",
default = False,
label = T("Default Status"),
),
Field("is_closed", "boolean",
default = False,
label = T("Closes Activity"),
),
s3_comments(),
*s3_meta_fields())
# Table Configuration
configure(tablename,
deduplicate = S3Duplicate(),
onaccept = self.case_activity_status_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Activity Status"),
title_display = T("Activity Status Details"),
title_list = T("Activity Statuses"),
title_update = T("Edit Activity Status"),
label_list_button = T("List Activity Statuses"),
label_delete_button = T("Delete Activity Status"),
msg_record_created = T("Activity Status created"),
msg_record_modified = T("Activity Status updated"),
msg_record_deleted = T("Activity Status deleted"),
msg_list_empty = T("No Activity Statuses currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
activity_status_id = S3ReusableField("status_id",
"reference %s" % tablename,
label = T("Status"),
represent = represent,
requires = IS_ONE_OF(db, "%s.id" % tablename,
represent,
orderby = "workflow_position",
sort = False,
zero = None,
),
sortby = "workflow_position",
)
# ---------------------------------------------------------------------
# Case Activity (case-specific)
#
twoweeks = current.request.utcnow + datetime.timedelta(days=14)
multiple_needs = settings.get_dvr_case_activity_needs_multiple()
use_status = settings.get_dvr_case_activity_use_status()
follow_up = settings.get_dvr_case_activity_follow_up()
# Priority options
priority_opts = [#(0, T("Urgent")),
(1, T("High")),
(2, T("Normal")),
(3, T("Low")),
]
# Achievement options
achievement_opts = [("INCR", T("Increased in severity")),
("SAME", T("At same level")),
("DECR", T("Decreased in severity")),
("RSLV", T("Completely resolved")),
]
tablename = "dvr_case_activity"
define_table(tablename,
self.super_link("doc_id", "doc_entity"),
self.dvr_case_id(comment = None,
empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
writable = False,
),
# Beneficiary (component link)
# @todo: populate from case and hide in case perspective
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
writable = False,
),
# Subject and Details
Field("subject",
label = T("Subject / Occasion"),
readable = False,
writable = False,
),
Field("need_details", "text",
label = T("Need Details"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
# Need type (if single)
self.dvr_need_id(readable = not multiple_needs,
writable = not multiple_needs,
),
# Dates
s3_date("start_date",
label = T("Registered on"),
default = "now",
set_min = "#dvr_case_activity_end_date",
),
s3_date("end_date",
label = T("Completed on"),
readable = False,
writable = False,
set_max = "#dvr_case_activity_start_date",
),
# Priority
Field("emergency", "boolean",
default = False,
label = T("Emergency"),
represent = s3_yes_no_represent,
),
Field("priority", "integer",
label = T("Priority"),
represent = S3Represent(options=dict(priority_opts)),
requires = IS_IN_SET(priority_opts, sort=False),
default = 2, # normal
readable = False,
writable = False,
),
# Responsibilities (activate in template as needed)
organisation_id(label = T("Referral Agency"),
readable = False,
writable = False,
),
human_resource_id(label = T("Assigned to"),
readable = False,
writable = False,
),
# Categories (activate in template as needed)
self.org_sector_id(readable = activity_sectors,
writable = activity_sectors,
),
service_id(label = T("Service Type"),
ondelete = "RESTRICT",
readable = service_type,
writable = service_type,
),
project_id(ondelete = "SET NULL",
readable = False,
writable = False,
),
# Actions performed (activate in template as needed)
activity_id(readable=False,
writable=False,
),
Field("activity_details", "text",
label = T("Support provided"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
provider_type_id(label = T("Referred to"),
ondelete = "RESTRICT",
readable = False,
writable = False,
),
# Support received by the beneficiary independently
# of the managed activity:
Field("outside_support", "text",
label = T("Outside Support"),
represent = s3_text_represent,
widget = s3_comments_widget,
readable = False,
writable = False,
),
# Details about referrals made under this activity
# @deprecate: should use activity_details instead
# @todo: remove once templates have been migrated?
Field("referral_details", "text",
label = T("Support provided"),
represent = s3_text_represent,
readable = False,
writable = False,
),
# Follow-up
Field("followup", "boolean",
default = True if follow_up else None,
label = T("Follow up"),
represent = s3_yes_no_represent,
readable = follow_up,
writable = follow_up,
),
s3_date("followup_date",
default = twoweeks if follow_up else None,
label = T("Date for Follow-up"),
readable = follow_up,
writable = follow_up,
),
# Status, type of exit
Field("completed", "boolean",
default = False,
label = T("Completed"),
represent = s3_yes_no_represent,
readable = not use_status,
writable = not use_status,
),
activity_status_id(readable = use_status,
writable = use_status,
),
termination_type_id(ondelete = "RESTRICT",
readable = False,
writable = False,
),
# Outcomes
Field("outcome", "text",
label = T("Outcome"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
Field("achievement",
label = T("Change achieved"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Change achieved"),
T("What change in the severity of the problem has so far been achieved by this activity?"),
),
),
represent = S3Represent(
options=dict(achievement_opts),
),
requires = IS_EMPTY_OR(
IS_IN_SET(achievement_opts,
sort = False,
)),
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
dvr_activity_funding = {
"joinby": "case_activity_id",
"multiple": False,
},
dvr_case_effort = "case_activity_id",
dvr_case_activity_need = "case_activity_id",
dvr_need = {
"link": "dvr_case_activity_need",
"joinby": "case_activity_id",
"key": "need_id",
},
dvr_response_action = "case_activity_id",
dvr_response_action_theme = "case_activity_id",
dvr_response_type = {
"link": "dvr_response_type_case_activity",
"joinby": "case_activity_id",
"key": "response_type_id",
},
dvr_case_activity_update = "case_activity_id",
dvr_vulnerability_type = (
{"name": "vulnerability_type",
"link": "dvr_vulnerability_type_case_activity",
"joinby": "case_activity_id",
"key": "vulnerability_type_id",
},
{"name": "diagnosis",
"link": "dvr_diagnosis_case_activity",
"joinby": "case_activity_id",
"key": "vulnerability_type_id",
},
),
supply_distribution = {
"link": "supply_distribution_case_activity",
"joinby": "case_activity_id",
"key": "distribution_id",
},
)
# List fields
if multiple_needs:
need_field = "case_activity_need.need_id"
else:
need_field = "need_id"
list_fields = ["start_date",
need_field,
"need_details",
"emergency",
"activity_details",
"completed",
]
if follow_up:
list_fields[-1:-1] = ["followup", "followup_date"]
# Filter widgets
filter_widgets = [S3TextFilter(["person_id$pe_label",
"person_id$first_name",
"person_id$last_name",
"case_id$reference",
"need_details",
"activity_details",
],
label = T("Search"),
),
S3OptionsFilter("emergency",
options = {True: T("Yes"),
False: T("No"),
},
cols = 2,
),
S3OptionsFilter(need_field,
options = lambda: s3_get_filter_opts("dvr_need",
translate = True,
),
),
S3OptionsFilter("completed",
default = False,
options = {True: T("Yes"),
False: T("No"),
},
cols = 2,
),
]
if follow_up:
filter_widgets.extend([S3OptionsFilter("followup",
label = T("Follow-up required"),
options = {True: T("Yes"),
False: T("No"),
},
cols = 2,
hidden = True,
),
S3DateFilter("followup_date",
cols = 2,
hidden = True,
),
])
if service_type:
filter_widgets.insert(3, S3OptionsFilter("service_id"))
# Report options
axes = [need_field,
(T("Case Status"), "case_id$status_id"),
"emergency",
"completed",
]
if follow_up:
axes.insert(-1, "followup")
if service_type:
axes.insert(2, "service_id")
facts = [(T("Number of Activities"), "count(id)"),
(T("Number of Cases"), "count(case_id)"),
]
report_options = {"rows": axes,
"cols": axes,
"fact": facts,
"defaults": {"rows": need_field,
"cols": "completed",
"fact": facts[0],
"totals": True,
"chart": "barchart:rows",
},
}
# Table configuration
configure(tablename,
filter_widgets = filter_widgets,
list_fields = list_fields,
onaccept = self.case_activity_onaccept,
onvalidation = self.case_activity_onvalidation,
orderby = "dvr_case_activity.start_date desc",
report_options = report_options,
super_entity = "doc_entity",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Activity"),
title_display = T("Activity Details"),
title_list = T("Activities"),
title_update = T("Edit Activity"),
label_list_button = T("List Activities"),
label_delete_button = T("Delete Activity"),
msg_record_created = T("Activity added"),
msg_record_modified = T("Activity updated"),
msg_record_deleted = T("Activity deleted"),
msg_list_empty = T("No Activities currently registered"),
)
# Reusable field
represent = dvr_CaseActivityRepresent(show_link=True)
case_activity_id = S3ReusableField("case_activity_id",
"reference %s" % tablename,
ondelete = "CASCADE",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
)),
)
# ---------------------------------------------------------------------
# Case Activity <=> Needs
#
# - use this when there is a need to link Case Activities to
# multiple Needs (e.g. STL, DRKCM)
#
tablename = "dvr_case_activity_need"
define_table(tablename,
case_activity_id(empty = False,
# default
#ondelete = "CASCADE",
),
s3_date(label = T("Established on"),
default = "now",
),
human_resource_id(
label = T("Established by"),
),
self.dvr_need_id(empty = False,
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
orderby = "%s.date" % tablename,
)
# ---------------------------------------------------------------------
# Case Activity Update Types
#
tablename = "dvr_case_activity_update_type"
define_table(tablename,
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Update Type"),
title_display = T("Update Type Details"),
title_list = T("Update Types"),
title_update = T("Edit Update Type"),
label_list_button = T("List Update Types"),
label_delete_button = T("Delete Update Type"),
msg_record_created = T("Update Type added"),
msg_record_modified = T("Update Type updated"),
msg_record_deleted = T("Update Type deleted"),
msg_list_empty = T("No Update Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
update_type_id = S3ReusableField("update_type_id",
"reference %s" % tablename,
label = T("Update Type"),
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Case Activity Updates
#
tablename = "dvr_case_activity_update"
define_table(tablename,
case_activity_id(),
s3_date(default = "now",
),
update_type_id(),
human_resource_id(),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
orderby = "%s.date" % tablename,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_activity_id": activity_id,
"dvr_case_activity_id": case_activity_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_activity_id": lambda name="activity_id", **attr: \
dummy(name, **attr),
"dvr_case_activity_id": lambda name="case_activity_id", **attr: \
dummy(name, **attr),
}
# -------------------------------------------------------------------------
@staticmethod
def case_activity_status_onaccept(form):
"""
Onaccept routine for case activity statuses:
- only one status can be the default
@param form: the FORM
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
# If this status is the default, then set is_default-flag
# for all other statuses to False:
if "is_default" in form_vars and form_vars.is_default:
table = current.s3db.dvr_case_activity_status
db = current.db
db(table.id != record_id).update(is_default = False)
# -------------------------------------------------------------------------
@staticmethod
def case_activity_onvalidation(form):
"""
Validate case activity form:
- end date must be after start date
"""
T = current.T
form_vars = form.vars
try:
start = form_vars.start_date
end = form_vars.end_date
except AttributeError:
return
if start and end and end < start:
form.errors["end_date"] = T("End date must be after start date")
# -------------------------------------------------------------------------
@staticmethod
def case_activity_close_responses(case_activity_id):
"""
Close all open response actions in a case activity
@param case_activity_id: the case activity record ID
"""
db = current.db
s3db = current.s3db
rtable = s3db.dvr_response_action
stable = s3db.dvr_response_status
# Get all response actions for this case activity
# that have an open-status (or no status at all):
left = stable.on((stable.id == rtable.status_id) & \
(stable.deleted == False))
query = (rtable.case_activity_id == case_activity_id) & \
(rtable.deleted == False) & \
((stable.is_closed == False) | (stable.id == None))
rows = db(query).select(rtable.id, left=left)
if rows:
# Get the default closure status,
# (usually something like "obsolete")
query = (stable.is_default_closure == True) & \
(stable.deleted == False)
closure_status = db(query).select(stable.id,
limitby = (0, 1),
).first()
# Update all open response actions for this
# case activity to the default closure status:
if closure_status:
response_ids = set(row.id for row in rows)
query = rtable.id.belongs(response_ids)
db(query).update(status_id = closure_status.id)
# -------------------------------------------------------------------------
@classmethod
def case_activity_onaccept(cls, form):
"""
Onaccept-callback for case activites:
- set end date when marked as completed
- close any open response actions when marked as completed
"""
db = current.db
s3db = current.s3db
settings = current.deployment_settings
# Read form data
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
return
# Get current status and end_date of the record
atable = s3db.dvr_case_activity
query = (atable.id == record_id)
activity = None
is_closed = False
if settings.get_dvr_case_activity_use_status():
# Use status_id
stable = s3db.dvr_case_activity_status
left = stable.on(atable.status_id == stable.id)
row = db(query).select(atable.id,
atable.end_date,
stable.is_closed,
left = left,
limitby = (0, 1),
).first()
if row:
activity = row.dvr_case_activity
is_closed = row.dvr_case_activity_status.is_closed
else:
# Use completed-flag
row = db(query).select(atable.id,
atable.end_date,
atable.completed,
limitby = (0, 1),
).first()
if row:
activity = row
is_closed = row.completed
if not activity:
return
if is_closed:
# Cancel follow-ups for closed activities
data = {"followup": False,
"followup_date": None,
}
# Set end-date if not already set
if not activity.end_date:
data["end_date"] = current.request.utcnow.date()
activity.update_record(**data)
# Close any open response actions in this activity:
if settings.get_dvr_manage_response_actions():
cls.case_activity_close_responses(activity.id)
elif activity.end_date:
# Remove end-date if present
activity.update_record(end_date = None)
# =============================================================================
class DVRCaseEffortModel(S3Model):
""" Effort Log for Case / Case Activities """
names = ("dvr_case_effort",
)
def model(self):
T = current.T
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Effort log
#
tablename = "dvr_case_effort"
define_table(tablename,
self.pr_person_id(
ondelete = "CASCADE",
),
self.dvr_case_activity_id(
ondelete = "SET NULL",
readable = False,
writable = False,
),
s3_datetime(
default = "now"
),
Field("name",
label = T("Short Description"),
),
self.hrm_human_resource_id(
comment = None,
),
Field("hours", "double",
represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v,
precision = 2,
),
requires = IS_FLOAT_AMOUNT(minimum=0.0),
widget = S3HoursWidget(precision = 2,
),
),
s3_comments(),
*s3_meta_fields())
# Table Configuration
self.configure(tablename,
onaccept = self.case_effort_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Effort"),
title_display = T("Effort Details Details"),
title_list = T("Efforts"),
title_update = T("Edit Effort"),
label_list_button = T("List Efforts"),
label_delete_button = T("Delete Effort"),
msg_record_created = T("Effort added"),
msg_record_modified = T("Effort updated"),
msg_record_deleted = T("Effort deleted"),
msg_list_empty = T("No Efforts currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField("dummy_id", "integer",
# readable = False,
# writable = False,
# )
return {}
# -------------------------------------------------------------------------
@staticmethod
def case_effort_onaccept(form):
"""
Onaccept-callback for dvr_case_effort:
- inherit person_id from case_activity, unless specified
in form or default
@param form: the FORM
"""
# Read form data
formvars = form.vars
# Get the record ID
if "id" in formvars:
record_id = formvars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
record_id = None
if not record_id:
return
s3db = current.s3db
etable = s3db.dvr_case_effort
field = etable.person_id
if "person_id" not in formvars and not field.default:
# Inherit person_id from the case activity
atable = s3db.dvr_case_activity
query = (etable.id == record_id) & \
(atable.id == etable.case_activity_id)
row = current.db(query).select(etable.id,
etable.person_id,
atable.person_id,
limitby = (0, 1),
).first()
if row:
effort = row.dvr_case_effort
activity = row.dvr_case_activity
if not effort.person_id:
effort.update_record(person_id = activity.person_id)
# =============================================================================
class DVRCaseAppointmentModel(S3Model):
""" Model for Case Appointments """
names = ("dvr_case_appointment",
"dvr_case_appointment_type",
"dvr_appointment_type_id",
)
def model(self):
T = current.T
db = current.db
settings = current.deployment_settings
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
mandatory_appointments = settings.get_dvr_mandatory_appointments()
update_case_status = settings.get_dvr_appointments_update_case_status()
update_last_seen_on = settings.get_dvr_appointments_update_last_seen_on()
# ---------------------------------------------------------------------
# Case Appointment Type
#
mandatory_comment = DIV(_class="tooltip",
_title="%s|%s" % (T("Mandatory Appointment"),
T("This appointment is mandatory before transfer"),
),
)
tablename = "dvr_case_appointment_type"
define_table(tablename,
Field("name", length=64, notnull=True, unique=True,
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64, minsize=1),
IS_NOT_ONE_OF(db,
"%s.name" % tablename,
),
],
),
Field("active", "boolean",
default = True,
label = T("Active"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Active Appointment"),
T("Automatically create this appointment for new cases"),
),
),
),
Field("mandatory_children", "boolean",
default = False,
label = T("Mandatory for Children"),
represent = s3_yes_no_represent,
readable = mandatory_appointments,
writable = mandatory_appointments,
comment = mandatory_comment,
),
Field("mandatory_adolescents", "boolean",
default = False,
label = T("Mandatory for Adolescents"),
represent = s3_yes_no_represent,
readable = mandatory_appointments,
writable = mandatory_appointments,
comment = mandatory_comment,
),
Field("mandatory_adults", "boolean",
default = False,
label = T("Mandatory for Adults"),
represent = s3_yes_no_represent,
readable = mandatory_appointments,
writable = mandatory_appointments,
comment = mandatory_comment,
),
Field("presence_required", "boolean",
default = True,
label = T("Presence required"),
represent = s3_yes_no_represent,
readable = update_last_seen_on,
writable = update_last_seen_on,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Presence required"),
T("This appointment requires the presence of the person concerned"),
),
),
),
self.dvr_case_status_id(
label = T("Case Status upon Completion"),
readable = update_case_status,
writable = update_case_status,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Appointment Type"),
title_display = T("Appointment Type Details"),
title_list = T("Appointment Types"),
title_update = T("Edit Appointment Type"),
label_list_button = T("List Appointment Types"),
label_delete_button = T("Delete Appointment Type"),
msg_record_created = T("Appointment Type added"),
msg_record_modified = T("Appointment Type updated"),
msg_record_deleted = T("Appointment Type deleted"),
msg_list_empty = T("No Appointment Types currently registered"),
)
# Reusable Field
represent = S3Represent(lookup=tablename, translate=True)
appointment_type_id = S3ReusableField("type_id", "reference %s" % tablename,
label = T("Appointment Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_case_appointment_type.id",
represent,
)),
)
# ---------------------------------------------------------------------
# Case Appointments
#
appointment_status_opts = {1: T("Planning"),
2: T("Planned"),
3: T("In Progress"),
4: T("Completed"),
5: T("Missed"),
6: T("Cancelled"),
7: T("Not Required"),
}
tablename = "dvr_case_appointment"
define_table(tablename,
self.dvr_case_id(comment = None,
# @ToDo: Populate this onaccept from imports
#empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
writable = False,
),
# Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
writable = False,
),
appointment_type_id(empty = False,
),
s3_date(label = T("Planned on"),
),
# Activate in template as needed:
self.hrm_human_resource_id(readable=False,
writable=False,
),
Field("status", "integer",
default = 1, # Planning
requires = IS_IN_SET(appointment_status_opts,
zero = None,
),
represent = S3Represent(options = appointment_status_opts,
),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Appointment"),
title_display = T("Appointment Details"),
title_list = T("Appointments"),
title_update = T("Edit Appointment"),
label_list_button = T("List Appointments"),
label_delete_button = T("Delete Appointment"),
msg_record_created = T("Appointment added"),
msg_record_modified = T("Appointment updated"),
msg_record_deleted = T("Appointment deleted"),
msg_list_empty = T("No Appointments currently registered"),
)
# Custom methods
self.set_method("dvr", "case_appointment",
method = "manage",
action = DVRManageAppointments,
)
configure(tablename,
deduplicate = S3Duplicate(primary = ("person_id",
"type_id",
),
),
onaccept = self.case_appointment_onaccept,
ondelete = self.case_appointment_ondelete,
onvalidation = self.case_appointment_onvalidation,
)
# @todo: onaccept to change status "planning" to "planned" if a date
# has been entered, and vice versa
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_appointment_status_opts": appointment_status_opts,
"dvr_appointment_type_id": appointment_type_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_appointment_status_opts": {},
"dvr_appointment_type_id": lambda name="type_id", **attr: \
dummy(name, **attr),
}
# -------------------------------------------------------------------------
@staticmethod
def case_appointment_onvalidation(form):
"""
Validate appointment form
- Future appointments can not be set to completed
- Undated appointments can not be set to completed
@param form: the FORM
"""
formvars = form.vars
date = formvars.get("date")
status = formvars.get("status")
if str(status) == "4":
if date is None:
form.errors["date"] = current.T("Date is required when marking the appointment as completed")
elif date > current.request.utcnow.date():
form.errors["status"] = current.T("Appointments with future dates can not be marked as completed")
# -------------------------------------------------------------------------
@staticmethod
def case_appointment_onaccept(form):
"""
Actions after creating/updating appointments
- Update last_seen_on in the corresponding case(s)
- Update the case status if configured to do so
@param form: the FORM
"""
# Read form data
formvars = form.vars
if "id" in formvars:
record_id = formvars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
record_id = None
if not record_id:
return
db = current.db
s3db = current.s3db
settings = current.deployment_settings
table = s3db.dvr_case_appointment
person_id = formvars.get("person_id")
case_id = formvars.get("case_id")
if not person_id or not case_id:
row = db(table.id == record_id).select(table.case_id,
table.person_id,
limitby = (0, 1),
).first()
if row:
person_id = row.person_id
case_id = row.case_id
if settings.get_dvr_appointments_update_last_seen_on() and person_id:
# Update last_seen_on
dvr_update_last_seen(person_id)
# Update the case status if appointment is completed
# NB appointment status "completed" must be set by this form
if settings.get_dvr_appointments_update_case_status() and \
s3_str(formvars.get("status")) == "4":
# Get the case status to be set when appointment is completed
ttable = s3db.dvr_case_appointment_type
query = (table.id == record_id) & \
(table.deleted != True) & \
(ttable.id == table.type_id) & \
(ttable.status_id != None)
row = db(query).select(table.date,
ttable.status_id,
limitby = (0, 1),
).first()
if row:
# Check whether there is a later appointment that
# would have set a different case status (we don't
# want to override this when closing appointments
# restrospectively):
date = row.dvr_case_appointment.date
if not date:
# Assume today if no date given
date = current.request.utcnow.date()
status_id = row.dvr_case_appointment_type.status_id
query = (table.person_id == person_id)
if case_id:
query &= (table.case_id == case_id)
query &= (table.date != None) & \
(table.status == 4) & \
(table.date > date) & \
(table.deleted != True) & \
(ttable.id == table.type_id) & \
(ttable.status_id != None) & \
(ttable.status_id != status_id)
later = db(query).select(table.id, limitby = (0, 1)).first()
if later:
status_id = None
else:
status_id = None
if status_id:
# Update the corresponding case(s)
# NB appointments without case_id update all cases for the person
ctable = s3db.dvr_case
stable = s3db.dvr_case_status
query = (ctable.person_id == person_id) & \
(ctable.archived != True) & \
(ctable.deleted != True) & \
(stable.id == ctable.status_id) & \
(stable.is_closed != True)
if case_id:
query &= (ctable.id == case_id)
cases = db(query).select(ctable.id,
ctable.person_id,
ctable.archived,
)
has_permission = current.auth.s3_has_permission
for case in cases:
if has_permission("update", ctable, record_id=case.id):
# Customise case resource
r = S3Request("dvr", "case",
current.request,
args = [],
get_vars = {},
)
r.customise_resource("dvr_case")
# Update case status + run onaccept
case.update_record(status_id = status_id)
s3db.onaccept(ctable, case, method="update")
# -------------------------------------------------------------------------
@staticmethod
def case_appointment_ondelete(row):
"""
Actions after deleting appointments
- Update last_seen_on in the corresponding case(s)
@param row: the deleted Row
"""
if current.deployment_settings.get_dvr_appointments_update_last_seen_on():
# Get the deleted keys
table = current.s3db.dvr_case_appointment
row = current.db(table.id == row.id).select(table.deleted_fk,
limitby = (0, 1),
).first()
if row and row.deleted_fk:
# Get the person ID
try:
deleted_fk = json.loads(row.deleted_fk)
except (ValueError, TypeError):
person_id = None
else:
person_id = deleted_fk.get("person_id")
# Update last_seen_on
if person_id:
dvr_update_last_seen(person_id)
# =============================================================================
class DVRHouseholdModel(S3Model):
"""
Model to document the household situation of a case
- used by STL (DRK use pr_group_membership, SCPHIMS use DVRHouseholdMemberModel)
"""
names = ("dvr_household",
"dvr_beneficiary_type",
"dvr_beneficiary_data",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
tablename = "dvr_household"
define_table(tablename,
# Main Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
Field("hoh_name",
label = T("Head of Household Name"),
),
self.pr_gender("hoh_gender",
label = T("Head of Household Gender"),
),
s3_date("hoh_date_of_birth",
label = T("Head of Household Date of Birth"),
future = 0,
past = 1320,
),
Field("hoh_relationship",
label = T("Head of Household Relationship"),
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
dvr_beneficiary_data = "household_id",
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Household Details"),
title_display = T("Household Details"),
title_list = T("Household Details"),
title_update = T("Edit Household Details"),
label_list_button = T("List Household Details"),
label_delete_button = T("Delete Household Details"),
msg_record_created = T("Household Details added"),
msg_record_modified = T("Household Details updated"),
msg_record_deleted = T("Household Details deleted"),
msg_list_empty = T("No Household Details currently registered"),
)
# Reusable field
household_id = S3ReusableField("household_id", "reference %s" % tablename,
ondelete = "CASCADE",
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
)),
)
# ---------------------------------------------------------------------
# Beneficiary Types (e.g. Age Groups)
#
tablename = "dvr_beneficiary_type"
define_table(tablename,
Field("name",
label = T("Type"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_BENEFICIARY_TYPE = T("Create Beneficiary Type")
crud_strings[tablename] = Storage(
label_create = ADD_BENEFICIARY_TYPE,
title_display = T("Beneficiary Type"),
title_list = T("Beneficiary Types"),
title_update = T("Edit Beneficiary Type"),
label_list_button = T("List Beneficiary Types"),
label_delete_button = T("Delete Beneficiary Type"),
msg_record_created = T("Beneficiary Type added"),
msg_record_modified = T("Beneficiary Type updated"),
msg_record_deleted = T("Beneficiary Type deleted"),
msg_list_empty = T("No Beneficiary Types currently registered")
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
beneficiary_type_id = S3ReusableField("beneficiary_type_id", "reference %s" % tablename,
label = T("Beneficiary Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "dvr_beneficiary_type.id",
represent)),
)
# ---------------------------------------------------------------------
# Beneficiary data
#
show_third_gender = not current.deployment_settings.get_pr_hide_third_gender()
int_represent = lambda v: str(v) if v is not None else "-"
tablename = "dvr_beneficiary_data"
define_table(tablename,
household_id(),
beneficiary_type_id(),
Field("total", "integer",
label = T("Number of Beneficiaries"),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
represent = int_represent,
# Expose in templates when not using per-gender fields
readable = False,
writable = False,
),
Field("female", "integer",
label = T("Number Female"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
Field("male", "integer",
label = T("Number Male"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
Field("other", "integer",
label = T("Number Other Gender"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
readable = show_third_gender,
writable = show_third_gender,
),
Field("in_school", "integer",
label = T("Number in School"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
Field("out_of_school", "integer",
label = T("Number out of School"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
Field("employed", "integer",
label = T("Number Employed"),
represent = int_represent,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Add Beneficiary Data"),
title_display = T("Beneficiary Data"),
title_list = T("Beneficiary Data"),
title_update = T("Edit Beneficiary Data"),
label_list_button = T("List Beneficiary Data"),
label_delete_button = T("Delete Beneficiary Data"),
msg_record_created = T("Beneficiary Data added"),
msg_record_modified = T("Beneficiary Data updated"),
msg_record_deleted = T("Beneficiary Data deleted"),
msg_list_empty = T("No Beneficiary Data currently registered"),
)
# List fields
list_fields = ["beneficiary_type_id",
"female",
"male",
"in_school",
"employed",
"comments",
]
if show_third_gender:
list_fields.insert(3, "other")
# Table configuration
configure(tablename,
list_fields = list_fields,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_beneficiary_type_id": beneficiary_type_id,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
dummy = S3ReusableField("dummy_id", "integer",
readable = False,
writable = False,
)
return {"dvr_beneficiary_type_id": lambda name="beneficiary_type_id", **attr: \
dummy(name, **attr),
}
# =============================================================================
class DVRHouseholdMembersModel(S3Model):
"""
Model to document the household situation of a case
- used by SCPHIMS (DRK use pr_group_membership, STL use DVRHouseholdModel)
"""
names = ("dvr_household_member",
)
def model(self):
T = current.T
# ---------------------------------------------------------------------
tablename = "dvr_household_member"
self.define_table(tablename,
self.pr_person_id(empty = False,
label = T("Head of Household"),
ondelete = "CASCADE",
),
Field("age", "integer",
label = T("Age"),
requires = IS_INT_IN_RANGE(0, 150),
),
self.pr_gender("gender",
#label = T("Gender"),
),
Field("disabled", "boolean",
label = T("Disabled"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Household Member"),
title_display = T("Household Member"),
title_list = T("Household Members"),
title_update = T("Edit Household Member"),
label_list_button = T("List Household Members"),
label_delete_button = T("Delete Household Member"),
msg_record_created = T("Household Member added"),
msg_record_modified = T("Household Member updated"),
msg_record_deleted = T("Household Member deleted"),
msg_list_empty = T("No Household Members currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class DVRCaseEconomyInformationModel(S3Model):
""" Model for Household Economy Information """
names = ("dvr_economy",
"dvr_income_source",
"dvr_income_source_economy",
"dvr_housing_type",
)
def model(self):
T = current.T
db = current.db
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
float_represent = lambda v: \
IS_FLOAT_AMOUNT.represent(v, precision=2)
# ---------------------------------------------------------------------
# Housing Types
#
tablename = "dvr_housing_type"
define_table(tablename,
Field("name",
label = T("Type"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_HOUSING_TYPE = T("Create Housing Type")
crud_strings[tablename] = Storage(
label_create = ADD_HOUSING_TYPE,
title_display = T("Housing Type"),
title_list = T("Housing Types"),
title_update = T("Edit Housing Type"),
label_list_button = T("List Housing Types"),
label_delete_button = T("Delete Housing Type"),
msg_record_created = T("Housing Type added"),
msg_record_modified = T("Housing Type updated"),
msg_record_deleted = T("Housing Type deleted"),
msg_list_empty = T("No Housing Types currently defined")
)
# Represent for reference
housing_type_represent = S3Represent(lookup = "dvr_housing_type",
translate = True,
)
# ---------------------------------------------------------------------
# Income sources
#
tablename = "dvr_income_source"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
ADD_INCOME_SOURCE = T("Create Income Source")
crud_strings[tablename] = Storage(
label_create = ADD_INCOME_SOURCE,
title_display = T("Income Source"),
title_list = T("Income Sources"),
title_update = T("Edit Income Source"),
label_list_button = T("List Income Sources"),
label_delete_button = T("Delete Income Source"),
msg_record_created = T("Income Source added"),
msg_record_modified = T("Income Source updated"),
msg_record_deleted = T("Income Source deleted"),
msg_list_empty = T("No Income Sources currently defined")
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
income_source_id = S3ReusableField("income_source_id", "reference %s" % tablename,
label = T("Income Source"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"dvr_income_source.id",
represent,
)),
)
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# ---------------------------------------------------------------------
# Household Economy Information
#
tablename = "dvr_economy"
define_table(tablename,
# Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
self.dvr_case_id(empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
),
FieldS3("housing_type_id", "reference dvr_housing_type",
label = T("Housing Type"),
represent = housing_type_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(
db, "dvr_housing_type.id",
housing_type_represent,
)),
sortby = "name",
comment = S3PopupLink(c = "dvr",
f = "housing_type",
title = ADD_HOUSING_TYPE,
tooltip = T("Choose the housing type from the drop-down, or click the link to create a new type"),
),
),
Field("monthly_costs", "double",
label = T("Monthly Costs"),
represent = float_represent,
requires = IS_EMPTY_OR(IS_FLOAT_AMOUNT(minimum=0.0)),
),
Field("average_weekly_income", "double",
label = T("Average Weekly Income"),
represent = float_represent,
requires = IS_EMPTY_OR(IS_FLOAT_AMOUNT(minimum=0.0)),
),
Field("monthly_income", "double",
label = T("Average Monthly Income"),
represent = float_represent,
requires = IS_EMPTY_OR(IS_FLOAT_AMOUNT(minimum=0.0)),
),
s3_currency(),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
dvr_income_source = {"link": "dvr_income_source_economy",
"joinby": "economy_id",
"key": "income_source_id",
"actuate": "link",
"autodelete": False,
},
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Economy Information"),
title_display = T("Economy Information"),
title_list = T("Economy Information"),
title_update = T("Edit Economy Information"),
label_list_button = T("List Economy Information"),
label_delete_button = T("Delete Economy Information"),
msg_record_created = T("Economy Information added"),
msg_record_modified = T("Economy Information updated"),
msg_record_deleted = T("Economy Information deleted"),
msg_list_empty = T("No Economy Information currently registered"),
)
# CRUD Form
crud_form = S3SQLCustomForm("housing_type_id",
"monthly_costs",
#"average_weekly_income",
"monthly_income",
"currency",
S3SQLInlineLink("income_source",
field = "income_source_id",
label = T("Income Sources"),
cols = 3,
),
"comments",
)
# List fields
list_fields = ["housing_type_id",
"monthly_costs",
"income_source_economy.income_source_id",
#"average_weekly_income",
"monthly_income",
"comments",
]
# Table configuration
configure(tablename,
crud_form = crud_form,
list_fields = list_fields,
)
# ---------------------------------------------------------------------
# Link table Economy Information <=> Income Sources
#
tablename = "dvr_income_source_economy"
define_table(tablename,
Field("economy_id", "reference dvr_economy",
ondelete = "CASCADE",
requires = IS_ONE_OF(db, "dvr_economy.id"),
),
income_source_id(),
s3_comments(),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
return {}
# =============================================================================
class DVRLegalStatusModel(S3Model):
""" Models to document the legal status of a beneficiary """
names = ("dvr_residence_status_type",
"dvr_residence_permit_type",
"dvr_residence_status",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Residence Status Types
#
tablename = "dvr_residence_status_type"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table Configuration
self.configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Residence Status Type"),
title_display = T("Residence Status Type Details"),
title_list = T("Residence Status Types"),
title_update = T("Edit Residence Status Type"),
label_list_button = T("List Residence Status Types"),
label_delete_button = T("Delete Residence Status Type"),
msg_record_created = T("Residence Status Type created"),
msg_record_modified = T("Residence Status Type updated"),
msg_record_deleted = T("Residence Status Type deleted"),
msg_list_empty = T("No Residence Status Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
status_type_id = S3ReusableField("status_type_id",
"reference %s" % tablename,
label = T("Residence Status"),
represent = represent,
requires = IS_EMPTY_OR(IS_ONE_OF(
db, "%s.id" % tablename,
represent,
)),
sortby = "name",
comment = S3PopupLink(
c="dvr",
f="residence_status_type",
tooltip=T("Create a new status type"),
),
)
# ---------------------------------------------------------------------
# Residence Permit Types
#
tablename = "dvr_residence_permit_type"
define_table(tablename,
Field("name",
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table Configuration
self.configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Residence Permit Type"),
title_display = T("Residence Permit Type Details"),
title_list = T("Residence Permit Types"),
title_update = T("Edit Residence Permit Type"),
label_list_button = T("List Residence Permit Types"),
label_delete_button = T("Delete Residence Permit Type"),
msg_record_created = T("Residence Permit Type created"),
msg_record_modified = T("Residence Permit Type updated"),
msg_record_deleted = T("Residence Permit Type deleted"),
msg_list_empty = T("No Residence Permit Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
permit_type_id = S3ReusableField("permit_type_id",
"reference %s" % tablename,
label = T("Residence Permit Type"),
represent = represent,
requires = IS_EMPTY_OR(IS_ONE_OF(
db, "%s.id" % tablename,
represent,
)),
sortby = "name",
comment = S3PopupLink(
c="dvr",
f="residence_permit_type",
tooltip=T("Create a new permit type"),
),
)
# ---------------------------------------------------------------------
# Residence Status
#
tablename = "dvr_residence_status"
define_table(tablename,
self.pr_person_id(),
status_type_id(),
permit_type_id(),
Field("reference",
label = T("ID/Ref.No."),
),
s3_date("valid_from",
label = T("Valid From"),
),
s3_date("valid_until",
label = T("Valid Until"),
),
#Field("obsolete", "boolean",
# default = False,
# ),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Residence Status"),
title_display = T("Residence Status Details"),
title_list = T("Residence Statuses"),
title_update = T("Edit Residence Status"),
label_list_button = T("List Residence Statuses"),
label_delete_button = T("Delete Residence Status"),
msg_record_created = T("Residence Status created"),
msg_record_modified = T("Residence Status updated"),
msg_record_deleted = T("Residence Status deleted"),
msg_list_empty = T("No Residence Statuses currently defined"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField("dummy_id", "integer",
# readable = False,
# writable = False,
# )
return {}
# =============================================================================
class DVRCaseAllowanceModel(S3Model):
""" Model for Allowance Management """
names = ("dvr_allowance",
)
def model(self):
T = current.T
crud_strings = current.response.s3.crud_strings
configure = self.configure
define_table = self.define_table
set_method = self.set_method
# ---------------------------------------------------------------------
# Allowance Information
#
allowance_status_opts = {1: T("pending"),
2: T("paid"),
3: T("refused"),
4: T("missed"),
}
amount_represent = lambda v: IS_FLOAT_AMOUNT.represent(v,
precision = 2,
fixed = True,
)
tablename = "dvr_allowance"
define_table(tablename,
# Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
),
self.dvr_case_id(# @ToDo: Populate this onaccept from imports
#empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
),
s3_date("entitlement_period",
label = T("Entitlement Period"),
),
s3_date(default="now",
label = T("Planned on"),
),
s3_datetime("paid_on",
label = T("Paid on"),
future = 0,
),
Field("amount", "double",
label = T("Amount"),
requires = IS_EMPTY_OR(IS_FLOAT_AMOUNT(minimum=0.0)),
represent = amount_represent,
),
s3_currency(),
Field("status", "integer",
default = 1, # pending
requires = IS_IN_SET(allowance_status_opts,
zero = None,
),
represent = S3Represent(options=allowance_status_opts,
),
widget = S3GroupedOptionsWidget(cols = 4,
multiple = False,
),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Allowance Information"),
title_display = T("Allowance Information"),
title_list = T("Allowance Information"),
title_update = T("Edit Allowance Information"),
label_list_button = T("List Allowance Information"),
label_delete_button = T("Delete Allowance Information"),
msg_record_created = T("Allowance Information added"),
msg_record_modified = T("Allowance Information updated"),
msg_record_deleted = T("Allowance Information deleted"),
msg_list_empty = T("No Allowance Information currently registered"),
)
# Custom list fields
list_fields = ["person_id",
"entitlement_period",
"date",
"currency",
"amount",
"status",
"paid_on",
"comments",
]
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("person_id",
"entitlement_period",
),
),
list_fields = list_fields,
onaccept = self.allowance_onaccept,
ondelete = self.allowance_ondelete,
onvalidation = self.allowance_onvalidation,
)
set_method("dvr", "allowance",
method = "register",
action = DVRRegisterPayment,
)
set_method("dvr", "allowance",
method = "manage",
action = DVRManageAllowance,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {"dvr_allowance_status_opts": allowance_status_opts,
}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
return {"dvr_allowance_status_opts": {},
}
# -------------------------------------------------------------------------
@staticmethod
def allowance_onvalidation(form):
"""
Validate allowance form
- Status paid requires paid_on date
@param form: the FORM
"""
formvars = form.vars
date = formvars.get("paid_on")
status = formvars.get("status")
if str(status) == "2" and not date:
form.errors["paid_on"] = current.T("Date of payment required")
# -------------------------------------------------------------------------
@staticmethod
def allowance_onaccept(form):
"""
Actions after creating/updating allowance information
- update last_seen_on
"""
if current.deployment_settings.get_dvr_payments_update_last_seen_on():
# Read form data
form_vars = form.vars
if "id" in form_vars:
record_id = form_vars.id
elif hasattr(form, "record_id"):
record_id = form.record_id
else:
record_id = None
if not record_id:
return
if current.response.s3.bulk and "status" not in form_vars:
# Import without status change won't affect last_seen_on,
# so we can skip this check for better performance
return
# Get the person ID
table = current.s3db.dvr_allowance
row = current.db(table.id == record_id).select(table.person_id,
limitby = (0, 1),
).first()
# Update last_seen_on
if row:
dvr_update_last_seen(row.person_id)
# -------------------------------------------------------------------------
@staticmethod
def allowance_ondelete(row):
"""
Actions after deleting allowance information
- Update last_seen_on in the corresponding case(s)
@param row: the deleted Row
"""
if current.deployment_settings.get_dvr_payments_update_last_seen_on():
# Get the deleted keys
table = current.s3db.dvr_allowance
row = current.db(table.id == row.id).select(table.deleted_fk,
limitby = (0, 1),
).first()
if row and row.deleted_fk:
# Get the person ID
try:
deleted_fk = json.loads(row.deleted_fk)
except (ValueError, TypeError):
person_id = None
else:
person_id = deleted_fk.get("person_id")
# Update last_seen_on
if person_id:
dvr_update_last_seen(person_id)
# =============================================================================
class DVRCaseEventModel(S3Model):
""" Model representing monitoring events for cases """
names = ("dvr_case_event_type",
"dvr_case_event",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
crud_strings = s3.crud_strings
configure = self.configure
define_table = self.define_table
# ---------------------------------------------------------------------
# Case Event Types
#
role_table = str(current.auth.settings.table_group)
role_represent = S3Represent(lookup=role_table, fields=("role",))
close_appointments = settings.get_dvr_case_events_close_appointments()
tablename = "dvr_case_event_type"
define_table(tablename,
Field("code", notnull=True, length=64, unique=True,
label = T("Code"),
requires = [IS_NOT_EMPTY(),
IS_LENGTH(64, minsize=1),
IS_NOT_ONE_OF(db,
"dvr_case_event_type.code",
),
],
),
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
Field("is_inactive", "boolean",
default = False,
label = T("Inactive"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Inactive"),
T("This event type can not currently be registered"),
),
),
),
Field("is_default", "boolean",
default = False,
label = T("Default Event Type"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Default Event Type"),
T("Assume this event type if no type was specified for an event"),
),
),
),
Field("role_required", "reference %s" % role_table,
label = T("User Role Required"),
ondelete = "SET NULL",
represent = role_represent,
requires = IS_EMPTY_OR(IS_ONE_OF(db,
"%s.id" % role_table,
role_represent,
)),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("User Role Required"),
T("User role required to register events of this type"),
),
),
),
self.dvr_appointment_type_id(
"appointment_type_id",
label = T("Appointment Type"),
readable = close_appointments,
writable = close_appointments,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Appointment Type"),
T("The type of appointments which are completed with this type of event"),
),
),
),
Field("min_interval", "double",
label = T("Minimum Interval (Hours)"),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Minimum Interval (Hours)"),
T("Minimum interval between two consecutive registrations of this event type for the same person"),
),
),
requires = IS_EMPTY_OR(IS_FLOAT_IN_RANGE(0.0, None)),
),
Field("max_per_day", "integer",
label = T("Maximum Number per Day"),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Maximum Number per Day"),
T("Maximum number of occurences of this event type for the same person on the same day"),
),
),
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
),
Field("presence_required", "boolean",
default = True,
label = T("Presence required"),
represent = s3_yes_no_represent,
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Presence required"),
T("This event type requires the presence of the person concerned"),
),
),
),
s3_comments(),
*s3_meta_fields())
# Components
self.add_components(tablename,
dvr_case_event = {"name": "excluded_by",
"link": "dvr_case_event_exclusion",
"joinby": "type_id",
"key": "excluded_by_id",
},
)
# Table Configuration
configure(tablename,
onaccept = self.case_event_type_onaccept,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Event Type"),
title_display = T("Event Type Details"),
title_list = T("Event Types"),
title_update = T("Edit Event Type"),
label_list_button = T("List Event Types"),
label_delete_button = T("Delete Event Type"),
msg_record_created = T("Event Type created"),
msg_record_modified = T("Event Type updated"),
msg_record_deleted = T("Event Type deleted"),
msg_list_empty = T("No Event Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
event_type_id = S3ReusableField("type_id", "reference %s" % tablename,
label = T("Event Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_ONE_OF(db, "%s.id" % tablename,
represent,
),
sortby = "name",
comment = S3PopupLink(c = "dvr",
f = "case_event_type",
tooltip = T("Create a new event type"),
),
)
# ---------------------------------------------------------------------
# Case Event Types, Impermissible Combinations
#
tablename = "dvr_case_event_exclusion"
define_table(tablename,
event_type_id(comment = None,
ondelete = "CASCADE",
),
event_type_id("excluded_by_id",
comment = None,
label = T("Not Combinable With"),
ondelete = "CASCADE",
),
*s3_meta_fields())
# Table Configuration
configure(tablename,
deduplicate = S3Duplicate(primary = ("type_id",
"excluded_by_id",
),
),
)
# ---------------------------------------------------------------------
# Case Events
#
tablename = "dvr_case_event"
define_table(tablename,
self.dvr_case_id(comment = None,
empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
readable = False,
writable = False,
),
# Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(comment = None,
empty = False,
ondelete = "CASCADE",
writable = False,
),
event_type_id(comment = None,
ondelete = "CASCADE",
# Not user-writable as this is for automatic
# event registration, override in template if
# required:
writable = False,
),
s3_datetime(label = T("Date/Time"),
default = "now",
empty = False,
future = 0,
writable = False,
),
# Field for quantitative recording of case events
# for statistical purposes (without linking them to
# individual cases)
Field("quantity", "integer",
label = T("Quantity"),
default = 1,
requires = IS_EMPTY_OR(IS_INT_IN_RANGE(0, None)),
# activate in template as required
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Event"),
title_display = T("Event Details"),
title_list = T("Events"),
title_update = T("Edit Event"),
label_list_button = T("List Events"),
label_delete_button = T("Delete Event"),
msg_record_created = T("Event added"),
msg_record_modified = T("Event updated"),
msg_record_deleted = T("Event deleted"),
msg_list_empty = T("No Events currently registered"),
)
# Filter Widgets
filter_widgets = [S3TextFilter(["person_id$pe_label",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"created_by$email",
"comments",
],
label = T("Search"),
),
S3OptionsFilter("type_id",
options = lambda: s3_get_filter_opts("dvr_case_event_type",
translate = True,
),
),
S3DateFilter("date"),
]
# Table Configuration
configure(tablename,
create_onaccept = self.case_event_create_onaccept,
deduplicate = S3Duplicate(primary = ("person_id",
"type_id",
),
),
filter_widgets = filter_widgets,
# Not user-insertable as this is for automatic
# event registration, override in template if
# required:
insertable = False,
list_fields = ["person_id",
"date",
"type_id",
(T("Registered by"), "created_by"),
"comments",
],
ondelete = self.case_event_ondelete,
orderby = "%s.date desc" % tablename,
)
# Custom method for event registration
self.set_method("dvr", "case_event",
method = "register",
action = DVRRegisterCaseEvent,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
return {}
# -------------------------------------------------------------------------
@staticmethod
def case_event_type_onaccept(form):
"""
Onaccept routine for case event types:
- only one type can be the default
@param form: the FORM
"""
form_vars = form.vars
try:
record_id = form_vars.id
except AttributeError:
record_id = None
if not record_id:
return
# If this type is the default, then set is_default-flag
# for all other types to False:
if "is_default" in form_vars and form_vars.is_default:
table = current.s3db.dvr_case_event_type
db = current.db
db(table.id != record_id).update(is_default = False)
# -------------------------------------------------------------------------
@staticmethod
def case_event_create_onaccept(form):
"""
Actions after creation of a case event:
- update last_seen_on in the corresponding cases
- close appointments if configured to do so
@param form: the FORM
"""
formvars = form.vars
try:
record_id = formvars.id
except AttributeError:
record_id = None
if not record_id:
return
db = current.db
s3db = current.s3db
close_appointments = current.deployment_settings \
.get_dvr_case_events_close_appointments()
case_id = formvars.get("case_id")
person_id = formvars.get("person_id")
type_id = formvars.get("type_id")
if not person_id or not type_id or \
close_appointments and not case_id:
# Reload the record
table = s3db.dvr_case_event
row = db(table.id == record_id).select(table.case_id,
table.person_id,
table.type_id,
limitby = (0, 1),
).first()
if not row:
return
case_id = row.case_id
person_id = row.person_id
type_id = row.type_id
if not person_id:
return
# Get the event type
ttable = s3db.dvr_case_event_type
query = (ttable.id == type_id) & \
(ttable.deleted == False)
event_type = db(query).select(ttable.presence_required,
ttable.appointment_type_id,
limitby = (0, 1),
).first()
if not event_type:
return
# Update last_seen (if event type requires personal presence)
if event_type.presence_required:
dvr_update_last_seen(person_id)
# Close appointments
appointment_type_id = event_type.appointment_type_id
if close_appointments and appointment_type_id:
today = current.request.utcnow.date()
atable = s3db.dvr_case_appointment
query = (atable.type_id == appointment_type_id) & \
(atable.person_id == person_id) & \
((atable.date == None) | (atable.date <= today)) & \
(atable.deleted == False)
if case_id:
query &= (atable.case_id == case_id) | \
(atable.case_id == None)
rows = db(query).select(atable.id,
atable.date,
atable.status,
orderby = ~atable.date,
)
data = {"date": today, "status": 4}
if not rows:
# No appointment of this type yet
# => create a new closed appointment
data["type_id"] = appointment_type_id
data["person_id"] = person_id
data["case_id"] = case_id
aresource = s3db.resource("dvr_case_appointment")
try:
record_id = aresource.insert(**data)
except S3PermissionError:
current.log.error("Event Registration: %s" % sys.exc_info()[1])
else:
update = None
# Find key dates
undated = open_today = closed_today = previous = None
for row in rows:
if row.date is None:
if not undated:
# An appointment without date
undated = row
elif row.date == today:
if row.status != 4:
# An open or cancelled appointment today
open_today = row
else:
# A closed appointment today
closed_today = row
elif previous is None:
# The last appointment before today
previous = row
if open_today:
# If we have an open appointment for today, update it
update = open_today
elif closed_today:
# If we already have a closed appointment for today,
# do nothing
update = None
elif previous:
if previous.status not in (1, 2, 3):
# Last appointment before today is closed
# => create a new one unless there is an undated one
if undated:
update = undated
else:
# Last appointment before today is still open
# => update it
update = previous
else:
update = undated
if update:
# Update the appointment
permitted = current.auth.s3_has_permission("update",
atable,
record_id=update.id,
)
if permitted:
# Customise appointment resource
r = S3Request("dvr", "case_appointment",
current.request,
args = [],
get_vars = {},
)
r.customise_resource("dvr_case_appointment")
# Update appointment
success = update.update_record(**data)
if success:
data["id"] = update.id
s3db.onaccept(atable, data, method="update")
else:
current.log.error("Event Registration: could not update appointment %s" % update.id)
else:
current.log.error("Event registration: not permitted to update appointment %s" % update.id)
# -------------------------------------------------------------------------
@staticmethod
def case_event_ondelete(row):
"""
Actions after deleting a case event:
- update last_seen_on in the corresponding cases
@param row: the deleted Row
"""
# Get the deleted keys
table = current.s3db.dvr_case_event
row = current.db(table.id == row.id).select(table.deleted_fk,
limitby = (0, 1),
).first()
if row and row.deleted_fk:
# Get the person ID
try:
deleted_fk = json.loads(row.deleted_fk)
except (ValueError, TypeError):
person_id = None
else:
person_id = deleted_fk.get("person_id")
# Update last_seen_on
if person_id:
dvr_update_last_seen(person_id)
# =============================================================================
class DVRCaseEvaluationModel(S3Model):
"""
Evaluation of Cases
- Flexible Questions (Dynamic Data Model)
"""
names = ("dvr_evaluation_question",
"dvr_evaluation",
"dvr_evaluation_data",
)
def model(self):
T = current.T
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Questions
#
tablename = "dvr_evaluation_question"
define_table(tablename,
Field("section",
label = T("Section"),
),
#Field("header",
# label = T("Header"),
# ),
Field("number", "integer",
label = T("Number"),
),
Field("name",
label = T("Question"),
),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Create Question"),
title_display = T("Question Details"),
title_list = T("Questions"),
title_update = T("Edit Question"),
label_list_button = T("List Questions"),
label_delete_button = T("Delete Question"),
msg_record_created = T("Question added"),
msg_record_modified = T("Question updated"),
msg_record_deleted = T("Question removed"),
msg_list_empty = T("No Questions currently registered"))
# ---------------------------------------------------------------------
# Case Evaluations
#
tablename = "dvr_evaluation"
define_table(tablename,
# Beneficiary (component link):
# @todo: populate from case and hide in case perspective
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
self.dvr_case_id(empty = False,
label = T("Case Number"),
ondelete = "CASCADE",
),
#s3_date(future=0),
s3_comments(),
*s3_meta_fields()
)
crud_strings[tablename] = Storage(
label_create = T("Create Evaluation"),
title_display = T("Evaluation Details"),
title_list = T("Evaluations"),
title_update = T("Edit Evaluation"),
label_list_button = T("List Evaluations"),
label_delete_button = T("Delete Evaluation"),
msg_record_created = T("Evaluation added"),
msg_record_modified = T("Evaluation updated"),
msg_record_deleted = T("Evaluation removed"),
msg_list_empty = T("No Evaluations currently registered"))
# Components
self.add_components(tablename,
dvr_evaluation_data = {"name": "data",
"joinby": "evaluation_id",
},
)
# ---------------------------------------------------------------------
# Case Evaluation Data
#
tablename = "dvr_evaluation_data"
define_table(tablename,
Field("evaluation_id", "reference dvr_evaluation",
readable = False,
writable = False,
),
Field("question_id", "reference dvr_evaluation_question",
represent = S3Represent(lookup="dvr_evaluation_question",
fields=["number", "name"],
field_sep=". "),
writable = False,
),
Field("answer", "boolean",
label = T("Answer"),
represent = s3_yes_no_represent,
),
*s3_meta_fields()
)
# Custom Report Method
#self.set_method("org", "capacity_assessment_data",
# method = "custom_report",
# action = org_CapacityReport())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
return {}
# =============================================================================
class DVRVulnerabilityModel(S3Model):
""" Targeted vulnerabilities for activities """
names = ("dvr_vulnerability_type",
"dvr_vulnerability_type_case_activity",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
settings = current.deployment_settings
define_table = self.define_table
crud_strings = s3.crud_strings
hierarchical_vulnerability_types = settings.get_dvr_vulnerability_types_hierarchical()
# ---------------------------------------------------------------------
# Types of vulnerability
#
tablename = "dvr_vulnerability_type"
define_table(tablename,
Field("name",
label = T("Type of Vulnerability"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
# This form of hierarchy may not work on all Databases:
Field("parent", "reference dvr_vulnerability_type",
label = T("Subtype of"),
ondelete = "RESTRICT",
represent = S3Represent(lookup = tablename,
translate = True,
hierarchy = True,
),
readable = hierarchical_vulnerability_types,
writable = hierarchical_vulnerability_types,
),
Field("required", "boolean",
default = False,
label = T("Required Category"),
represent = s3_yes_no_represent,
readable = False,
writable = False,
),
s3_comments(),
*s3_meta_fields())
# Hierarchy
if hierarchical_vulnerability_types:
hierarchy = "parent"
widget = S3HierarchyWidget(multiple = False,
leafonly = True,
)
else:
hierarchy = None
widget = None
# Table configuration
self.configure(tablename,
deduplicate = S3Duplicate(primary = ("name",),
secondary = ("parent",),
),
hierarchy = hierarchy,
)
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Vulnerability Type"),
title_display = T("Vulnerability Type"),
title_list = T("Vulnerability Types"),
title_update = T("Edit Vulnerability Type"),
label_list_button = T("List Vulnerability Types"),
label_delete_button = T("Delete Vulnerability Type"),
msg_record_created = T("Vulnerability Type created"),
msg_record_modified = T("Vulnerability Type updated"),
msg_record_deleted = T("Vulnerability Type deleted"),
msg_list_empty = T("No Vulnerability Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
vulnerability_type_id = S3ReusableField("vulnerability_type_id",
"reference %s" % tablename,
label = T("Type of Vulnerability"),
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db,
"%s.id" % tablename,
represent,
)),
sortby = "name",
comment = S3PopupLink(c="dvr",
f="vulnerability_type",
tooltip=T("Create a new vulnerability type"),
),
widget = widget,
)
# ---------------------------------------------------------------------
# Link tables vulnerability type <=> case activity
# - in the context of psycho-social support, this could be
# diagnoses => when differentiating into suspected / confirmed
# diagnoses, we use the diagnosis-link for the confirmed ones
#
tablename = "dvr_vulnerability_type_case_activity"
define_table(tablename,
self.dvr_case_activity_id(
empty = False,
ondelete = "CASCADE",
),
vulnerability_type_id(
empty = False,
ondelete = "RESTRICT",
),
*s3_meta_fields())
tablename = "dvr_diagnosis_case_activity"
define_table(tablename,
self.dvr_case_activity_id(
empty = False,
ondelete = "CASCADE",
),
vulnerability_type_id(
empty = False,
ondelete = "RESTRICT",
),
*s3_meta_fields())
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class DVRActivityFundingModel(S3Model):
""" Model to manage funding needs for cases """
names = ("dvr_activity_funding",
)
def model(self):
T = current.T
s3 = current.response.s3
define_table = self.define_table
crud_strings = s3.crud_strings
# ---------------------------------------------------------------------
# Case activity funding
#
tablename = "dvr_activity_funding"
define_table(tablename,
self.dvr_case_activity_id(),
Field("funding_required", "boolean",
default = False,
label = T("Funding Required"),
represent = s3_yes_no_represent,
),
Field("reason", "text",
label = T("Reason"),
represent = s3_text_represent,
widget = s3_comments_widget,
),
Field("proposal", "text",
label = T("Proposed Assistance"),
),
Field("approved", "boolean",
label = T("Approved"),
represent = s3_yes_no_represent,
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Funding Proposal"),
title_display = T("Funding Proposal"),
title_list = T("Funding Proposals"),
title_update = T("Edit Funding Proposal"),
label_list_button = T("List Funding Proposals"),
label_delete_button = T("Delete Funding Proposal"),
msg_record_created = T("Funding Proposal created"),
msg_record_modified = T("Funding Proposal updated"),
msg_record_deleted = T("Funding Proposal deleted"),
msg_list_empty = T("No Funding Proposals currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# =============================================================================
class DVRServiceContactModel(S3Model):
""" Model to track external service contacts of beneficiaries """
names = ("dvr_service_contact",
"dvr_service_contact_type",
)
def model(self):
T = current.T
db = current.db
s3 = current.response.s3
crud_strings = s3.crud_strings
define_table = self.define_table
configure = self.configure
# ---------------------------------------------------------------------
# Service Contact Types
#
tablename = "dvr_service_contact_type"
define_table(tablename,
Field("name",
label = T("Name"),
requires = [IS_NOT_EMPTY(), IS_LENGTH(512, minsize=1)],
),
s3_comments(),
*s3_meta_fields())
# Table configuration
configure(tablename,
deduplicate = S3Duplicate(),
)
# CRUD Strings
ADD_TYPE = T("Create Service Contact Type")
crud_strings[tablename] = Storage(
label_create = ADD_TYPE,
title_display = T("Service Contact Type"),
title_list = T("Service Contact Types"),
title_update = T("Edit Service Contact Types"),
label_list_button = T("List Service Contact Types"),
label_delete_button = T("Delete Service Contact Type"),
msg_record_created = T("Service Contact Type added"),
msg_record_modified = T("Service Contact Type updated"),
msg_record_deleted = T("Service Contact Type deleted"),
msg_list_empty = T("No Service Contact Types currently defined"),
)
# Reusable field
represent = S3Represent(lookup=tablename, translate=True)
type_id = S3ReusableField("type_id", "reference %s" % tablename,
label = T("Service Contact Type"),
ondelete = "RESTRICT",
represent = represent,
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "%s.id" % tablename,
represent,
)),
sortby = "name",
)
# ---------------------------------------------------------------------
# Service Contacts of Beneficiaries
#
AGENCY = T("Providing Agency")
tablename = "dvr_service_contact"
define_table(tablename,
# Beneficiary (component link):
self.pr_person_id(empty = False,
ondelete = "CASCADE",
),
type_id(),
#self.dvr_need_id(),
self.org_organisation_id(label = AGENCY,
),
# Alternative free-text field:
Field("organisation",
label = AGENCY,
readable = False,
writable = False,
),
Field("reference",
label = T("Ref.No."),
comment = DIV(_class = "tooltip",
_title = "%s|%s" % (T("Ref.No."),
T("Customer number, file reference or other reference number"),
),
),
),
# Enable in template as needed:
Field("contact",
label = T("Contact Person"),
),
Field("phone",
label = T("Phone"),
),
Field("email",
label = T("Email"),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Service Contact"),
title_display = T("Service Contact Details"),
title_list = T("Service Contacts"),
title_update = T("Edit Service Contacts"),
label_list_button = T("List Service Contacts"),
label_delete_button = T("Delete Service Contact"),
msg_record_created = T("Service Contact added"),
msg_record_modified = T("Service Contact updated"),
msg_record_deleted = T("Service Contact deleted"),
msg_list_empty = T("No Service Contacts currently registered"),
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
#dummy = S3ReusableField("dummy_id", "integer",
# readable = False,
# writable = False,
# )
return {}
# =============================================================================
class DVRSiteActivityModel(S3Model):
""" Model to record the activity of a site over time """
names = ("dvr_site_activity",
)
def model(self):
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
crud_strings = s3.crud_strings
configure = self.configure
define_table = self.define_table
SITE = settings.get_org_site_label()
site_represent = self.org_SiteRepresent(show_link=False)
default_site = settings.get_org_default_site()
permitted_facilities = current.auth.permitted_facilities(redirect_on_error=False)
# ---------------------------------------------------------------------
# Site Activity
#
tablename = "dvr_site_activity"
define_table(tablename,
self.super_link("site_id", "org_site",
default = default_site,
filterby = "site_id",
filter_opts = permitted_facilities,
label = SITE,
readable = not default_site,
writable = not default_site,
represent = site_represent,
updateable = True,
),
s3_date(future=0),
Field("old_total", "integer",
default = 0,
label = T("Previous Total"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("cases_new", "integer",
default = 0,
label = T("Admissions"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("cases_closed", "integer",
default = 0,
label = T("Departures"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("new_total", "integer",
default = 0,
label = T("Current Total"),
requires = IS_INT_IN_RANGE(0, None),
),
Field("report", "upload",
autodelete = True,
label = T("Report"),
length = current.MAX_FILENAME_LENGTH,
represent = self.report_represent,
uploadfolder = os.path.join(current.request.folder,
"uploads",
"dvr",
),
),
s3_comments(),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
label_create = T("Create Activity Report"),
title_display = T("Activity Report"),
title_list = T("Activity Reports"),
title_update = T("Edit Activity Report"),
label_list_button = T("List Activity Reports"),
label_delete_button = T("Delete Activity Report"),
msg_record_created = T("Activity Report created"),
msg_record_modified = T("Activity Report updated"),
msg_record_deleted = T("Activity Report deleted"),
msg_list_empty = T("No Activity Reports found"),
)
# Filter widgets
date_filter = S3DateFilter("date")
date_filter.operator = ["eq"]
filter_widgets = [date_filter]
if not default_site:
site_filter = S3OptionsFilter("site_id",
label = SITE,
)
filter_widgets.insert(0, site_filter)
# Table configuration
configure(tablename,
filter_widgets = filter_widgets,
)
# ---------------------------------------------------------------------
# Pass names back to global scope (s3.*)
#
return {}
# -------------------------------------------------------------------------
@staticmethod
def defaults():
""" Safe defaults for names in case the module is disabled """
return {}
# -------------------------------------------------------------------------
@staticmethod
def report_represent(value):
""" File representation """
if value:
try:
# Read the filename from the file
filename = current.db.dvr_site_activity.report.retrieve(value)[0]
except IOError:
return current.T("File not found")
else:
return A(filename,
_href=URL(c="default", f="download", args=[value]))
else:
return current.messages["NONE"]
# =============================================================================
def dvr_case_default_status():
"""
Helper to get/set the default status for case records
@return: the default status_id
"""
s3db = current.s3db
ctable = s3db.dvr_case
field = ctable.status_id
default = field.default
if default:
# Already set
return default
# Look up the default status
stable = s3db.dvr_case_status
query = (stable.is_default == True) & \
(stable.deleted != True)
row = current.db(query).select(stable.id, limitby=(0, 1)).first()
if row:
# Set as field default in case table
ctable = s3db.dvr_case
default = field.default = row.id
return default
# =============================================================================
def dvr_case_status_filter_opts(closed=None):
"""
Get filter options for case status, ordered by workflow position
@return: OrderedDict of options
@note: set sort=False for filter widget to retain this order
"""
table = current.s3db.dvr_case_status
query = (table.deleted != True)
if closed is not None:
if closed:
query &= (table.is_closed == True)
else:
query &= ((table.is_closed == False) | (table.is_closed == None))
rows = current.db(query).select(table.id,
table.name,
orderby = "workflow_position",
)
if not rows:
return {}
T = current.T
return OrderedDict((row.id, T(row.name)) for row in rows)
# =============================================================================
def dvr_case_activity_default_status():
"""
Helper to get/set the default status for case activities
@return: the default status_id
"""
s3db = current.s3db
rtable = s3db.dvr_case_activity
field = rtable.status_id
default = field.default
if not default:
# Look up the default status
stable = s3db.dvr_case_activity_status
query = (stable.is_default == True) & \
(stable.deleted != True)
row = current.db(query).select(stable.id, limitby=(0, 1)).first()
if row:
# Set as field default in case activity table
default = field.default = row.id
return default
# =============================================================================
def dvr_set_response_action_defaults():
"""
DRY Helper to set defaults for response actions
"""
if current.deployment_settings.get_dvr_response_types():
dvr_response_default_type()
dvr_response_default_status()
# =============================================================================
def dvr_response_default_type():
"""
Helper to get/set the default type for response records
@return: the default response_type_id
"""
s3db = current.s3db
rtable = s3db.dvr_response_action
field = rtable.response_type_id
default = field.default
if not default:
# Look up the default status
ttable = s3db.dvr_response_type
query = (ttable.is_default == True) & \
(ttable.deleted != True)
row = current.db(query).select(ttable.id,
cache = s3db.cache,
limitby = (0, 1),
).first()
if row:
# Set as field default in responses table
default = field.default = row.id
return default
# =============================================================================
def dvr_response_default_status():
"""
Helper to get/set the default status for response records
@return: the default status_id
"""
s3db = current.s3db
rtable = s3db.dvr_response_action
field = rtable.status_id
default = field.default
if not default:
stable = s3db.dvr_response_status
if current.deployment_settings.get_dvr_response_planning():
# Actions are planned ahead, so initial status by default
query = (stable.is_default == True)
else:
# Actions are documented in hindsight, so closed by default
query = (stable.is_default_closure == True)
# Look up the default status
query = query & (stable.deleted != True)
row = current.db(query).select(stable.id,
cache = s3db.cache,
limitby = (0, 1),
).first()
if row:
# Set as field default in responses table
default = field.default = row.id
return default
# =============================================================================
def dvr_response_status_colors(resource, selector):
"""
Get colors for response statuses
@param resource: the S3Resource the caller is looking at
@param selector: the Field selector (usually "status_id")
@returns: a dict with colors {field_value: "#RRGGBB", ...}
"""
table = current.s3db.dvr_response_status
query = (table.color != None)
rows = current.db(query).select(table.id,
table.color,
)
return {row.id: ("#%s" % row.color) for row in rows if row.color}
# =============================================================================
def dvr_case_household_size(group_id):
"""
Update the household_size for all cases in the given case group,
taking into account that the same person could belong to multiple
case groups. To be called onaccept of pr_group_membership if automatic
household size is enabled
@param group_id: the group_id of the case group (group_type == 7)
"""
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
gtable = s3db.pr_group
mtable = s3db.pr_group_membership
# Get all persons related to this group_id, make sure this is a case group
join = [mtable.on((mtable.group_id == gtable.id) &
(mtable.deleted != True)),
ptable.on(ptable.id == mtable.person_id)
]
query = (gtable.id == group_id) & \
(gtable.group_type == 7) & \
(gtable.deleted != True)
rows = db(query).select(ptable.id, join=join)
person_ids = {row.id for row in rows}
if person_ids:
# Get case group members for each of these person_ids
ctable = s3db.dvr_case
rtable = ctable.with_alias("member_cases")
otable = mtable.with_alias("case_members")
join = ctable.on(ctable.person_id == mtable.person_id)
left = [otable.on((otable.group_id == mtable.group_id) &
(otable.deleted != True)),
rtable.on(rtable.person_id == otable.person_id),
]
query = (mtable.person_id.belongs(person_ids)) & \
(mtable.deleted != True) & \
(rtable.id != None)
rows = db(query).select(ctable.id,
otable.person_id,
join = join,
left = left,
)
# Count heads
CASE = str(ctable.id)
MEMBER = str(otable.person_id)
groups = {}
for row in rows:
member_id = row[MEMBER]
case_id = row[CASE]
if case_id not in groups:
groups[case_id] = {member_id}
else:
groups[case_id].add(member_id)
# Update the related cases
for case_id, members in groups.items():
number_of_members = len(members)
db(ctable.id == case_id).update(household_size = number_of_members)
# =============================================================================
def dvr_due_followups(human_resource_id=None):
"""
Number of activities due for follow-up
@param human_resource_id: count only activities assigned to this HR
"""
# Generate a request for case activities and customise it
r = S3Request("dvr", "case_activity",
args = ["count_due_followups"],
get_vars = {},
)
r.customise_resource()
resource = r.resource
# Filter to exclude closed case activities
if current.deployment_settings.get_dvr_case_activity_use_status():
status_filter = (FS("status_id$is_closed") == False)
else:
status_filter = (FS("completed") == False)
# Filter for due follow-ups
query = (FS("followup") == True) & \
(FS("followup_date") <= datetime.datetime.utcnow().date()) & \
status_filter & \
(FS("person_id$dvr_case.archived") == False)
if human_resource_id:
query &= (FS("human_resource_id") == human_resource_id)
resource.add_filter(query)
return resource.count()
# =============================================================================
class dvr_ActivityRepresent(S3Represent):
""" Representation of activity IDs """
def __init__(self, show_link=False):
"""
Constructor
@param show_link: show representation as clickable link
"""
super(dvr_ActivityRepresent, self).__init__(lookup = "dvr_activity",
show_link = show_link,
)
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
table = current.s3db.dvr_activity
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = current.db(query).select(table.id,
table.name,
table.start_date,
table.end_date,
table.service_id,
table.facilitator,
limitby = (0, count),
)
self.queries += 1
services = set()
for row in rows:
service_id = row.service_id
if service_id:
services.add(service_id)
if services:
represent = table.service_id.represent
represent.bulk(list(services))
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
if row.name:
title = row.name
else:
table = current.s3db.dvr_activity
title = table.service_id.represent(row.service_id)
template = "%(title)s"
data = {"title": title,
"start": "-",
"end": "-",
}
start_date = row.start_date
end_date = row.end_date
if start_date or end_date:
date_represent = S3DateTime.date_represent
if start_date:
data["start"] = date_represent(start_date)
if end_date:
data["end"] = date_represent(end_date)
template = "%(title)s (%(start)s - %(end)s)"
facilitator = row.facilitator
if facilitator:
template = "%s (%%(facilitator)s)" % template
data["facilitator"] = facilitator
return template % data
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (dvr_activity.id)
@param v: the representation of the key
@param row: the row with this key (unused here)
"""
url = URL(c="dvr", f="activity", args=[k], extension="")
return A(v, _href = url)
# =============================================================================
class dvr_ResponseActionRepresent(S3Represent):
""" Representation of response actions """
def __init__(self, show_hr=True, show_link=True):
"""
Constructor
@param show_hr: include the staff member name
"""
super(dvr_ResponseActionRepresent, self).__init__(
lookup = "dvr_response_action",
show_link = show_link,
)
self.show_hr = show_hr
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: list of fields to look up (unused)
"""
show_hr = self.show_hr
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
table = self.table
fields = [table.id, table.start_date, table.person_id]
if show_hr:
fields.append(table.human_resource_id)
rows = current.db(query).select(limitby=(0, count), *fields)
self.queries += 1
# Bulk-represent human_resource_ids
if show_hr:
hr_ids = [row.human_resource_id for row in rows]
table.human_resource_id.represent.bulk(hr_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
table = self.table
date = table.start_date.represent(row.start_date)
if self.show_hr:
hr = table.human_resource_id.represent(row.human_resource_id,
show_link = False,
)
reprstr = "[%s] %s" % (date, hr)
else:
reprstr = date
return reprstr
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (dvr_case_activity.id)
@param v: the representation of the key
@param row: the row with this key
"""
try:
person_id = row.person_id
except AttributeError:
return v
url = URL(c = "dvr",
f = "person",
args = [person_id, "response_action", k],
extension = "",
)
return A(v, _href = url)
# =============================================================================
class dvr_ResponseActionThemeRepresent(S3Represent):
""" Representation of response action theme links """
def __init__(self, paragraph=False, details=False):
"""
Constructor
@param paragraph: render as HTML paragraph
@param details: include details in paragraph
"""
super(dvr_ResponseActionThemeRepresent, self).__init__(
lookup = "dvr_response_action_theme",
)
self.paragraph = paragraph
self.details = details
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: list of fields to look up (unused)
"""
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
table = self.table
fields = [table.id, table.action_id, table.theme_id]
if self.details:
fields.append(table.comments)
rows = current.db(query).select(limitby=(0, count), *fields)
self.queries += 1
# Bulk-represent themes
theme_ids = [row.theme_id for row in rows]
table.theme_id.represent.bulk(theme_ids)
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
table = self.table
theme = table.theme_id.represent(row.theme_id)
if self.paragraph:
# CSS class to allow styling
css = "dvr-response-action-theme"
if self.details:
comments = table.comments.represent(row.comments)
reprstr = DIV(H6(theme), comments, _class=css)
else:
reprstr = P(theme, _class=css)
else:
reprstr = theme
return reprstr
# -------------------------------------------------------------------------
def render_list(self, value, labels, show_link=True):
"""
Render list-type representations from bulk()-results.
@param value: the list
@param labels: the labels as returned from bulk()
@param show_link: render references as links, should
be the same as used with bulk()
"""
if self.paragraph:
reprstr = TAG[""]([labels[v] if v in labels else self.default
for v in value
])
else:
reprstr = super(dvr_ResponseActionThemeRepresent, self) \
.render_list(value, labels, show_link=show_link)
return reprstr
# =============================================================================
class dvr_ResponseThemeRepresent(S3Represent):
""" Representation of response themes """
def __init__(self, multiple=False, translate=True, show_need=False):
super(dvr_ResponseThemeRepresent, self).__init__(
lookup = "dvr_response_theme",
multiple = multiple,
translate = translate,
)
self.show_need = show_need
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
table = self.table
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
if self.show_need:
ntable = current.s3db.dvr_need
left = ntable.on(ntable.id == table.need_id)
rows = current.db(query).select(table.id,
table.name,
ntable.id,
ntable.name,
left = left,
limitby = (0, count),
)
else:
rows = current.db(query).select(table.id,
table.name,
limitby = (0, count),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
T = current.T
translate = self.translate
if self.show_need:
theme = row.dvr_response_theme.name
if theme:
theme = T(theme) if translate else theme
else:
theme = self.none
need = row.dvr_need.name
if need:
need = T(need) if translate else need
if need:
reprstr = "%s: %s" % (need, theme)
else:
reprstr = theme
else:
theme = row.name
if theme:
reprstr = T(theme) if translate else theme
else:
reprstr = self.none
return reprstr
# =============================================================================
class dvr_CaseActivityRepresent(S3Represent):
""" Representation of case activity IDs """
def __init__(self, show_as=None, fmt=None, show_link=False, linkto=None):
"""
Constructor
@param show_as: alternative representations:
"beneficiary"|"need"|"subject"
@param show_link: show representation as clickable link
@param fmt: string format template for person record
"""
super(dvr_CaseActivityRepresent, self).__init__(
lookup = "dvr_case_activity",
show_link = show_link,
linkto = linkto,
)
if show_as is None:
self.show_as = "beneficiary"
else:
self.show_as = show_as
if fmt:
self.fmt = fmt
else:
self.fmt = "%(first_name)s %(last_name)s"
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
table = self.table
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
ptable = current.s3db.pr_person
left = [ptable.on(ptable.id == table.person_id)]
show_as = self.show_as
if show_as == "beneficiary":
rows = current.db(query).select(table.id,
ptable.id,
ptable.pe_label,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
left = left,
limitby = (0, count),
)
elif show_as == "need":
ntable = current.s3db.dvr_need
left.append(ntable.on(ntable.id == table.need_id))
rows = current.db(query).select(table.id,
ptable.id,
ntable.name,
left = left,
limitby = (0, count),
)
else:
rows = current.db(query).select(table.id,
table.subject,
ptable.id,
left = left,
limitby = (0, count),
)
self.queries += 1
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
show_as = self.show_as
if show_as == "beneficiary":
beneficiary = dict(row.pr_person)
# Do not show "None" for no label
if beneficiary.get("pe_label") is None:
beneficiary["pe_label"] = ""
return self.fmt % beneficiary
elif show_as == "need":
need = row.dvr_need.name
if self.translate:
need = current.T(need) if need else self.none
return need
else:
return row.dvr_case_activity.subject
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (dvr_case_activity.id)
@param v: the representation of the key
@param row: the row with this key
"""
try:
beneficiary = row.pr_person
except AttributeError:
return v
url = URL(c = "dvr",
f = "person",
args = [beneficiary.id, "case_activity", k],
extension = "",
)
return A(v, _href = url)
# =============================================================================
class dvr_DocEntityRepresent(S3Represent):
""" Module context-specific representation of doc-entities """
def __init__(self,
case_label=None,
case_group_label=None,
activity_label=None,
use_sector=True,
use_need=False,
show_link=False,
):
"""
Constructor
@param case_label: label for cases (default: "Case")
@param case_group_label: label for case groups (default: "Case Group")
@param activity_label: label for case activities
(default: "Activity")
@param use_need: use need if available instead of subject
@param use_sector: use sector if available instead of
activity label
@param show_link: show representation as clickable link
"""
super(dvr_DocEntityRepresent, self).__init__(lookup = "doc_entity",
show_link = show_link,
)
T = current.T
if case_label:
self.case_label = case_label
else:
self.case_label = T("Case")
if case_group_label:
self.case_group_label = case_group_label
else:
self.case_group_label = T("Case Group")
if activity_label:
self.activity_label = activity_label
else:
self.activity_label = T("Activity")
self.use_need = use_need
self.use_sector = use_sector
# -------------------------------------------------------------------------
def lookup_rows(self, key, values, fields=None):
"""
Custom rows lookup
@param key: the key Field
@param values: the values
@param fields: unused (retained for API compatibility)
"""
db = current.db
s3db = current.s3db
table = self.table
ptable = s3db.pr_person
count = len(values)
if count == 1:
query = (key == values[0])
else:
query = key.belongs(values)
rows = db(query).select(table.doc_id,
table.instance_type,
limitby = (0, count),
orderby = table.instance_type,
)
self.queries += 1
# Sort by instance type
doc_ids = {}
for row in rows:
doc_id = row.doc_id
instance_type = row.instance_type
if instance_type not in doc_ids:
doc_ids[instance_type] = {doc_id: row}
else:
doc_ids[instance_type][doc_id] = row
need_ids = set()
sector_ids = set()
for instance_type in ("dvr_case", "dvr_case_activity", "pr_group"):
doc_entities = doc_ids.get(instance_type)
if not doc_entities:
continue
# The instance table
itable = s3db[instance_type]
# Look up person and instance data
query = itable.doc_id.belongs(set(doc_entities.keys()))
if instance_type == "pr_group":
mtable = s3db.pr_group_membership
left = [mtable.on((mtable.group_id == itable.id) & \
(mtable.deleted == False)),
ptable.on(ptable.id == mtable.person_id),
]
else:
left = ptable.on(ptable.id == itable.person_id)
fields = [itable.id,
itable.doc_id,
ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
]
if instance_type == "dvr_case_activity":
fields.extend((itable.sector_id,
itable.subject,
itable.need_id,
))
if instance_type == "pr_group":
fields.extend((itable.name,
itable.group_type,
))
irows = db(query).select(left=left, *fields)
self.queries += 1
# Add the person+instance data to the entity rows
for irow in irows:
instance = irow[instance_type]
entity = doc_entities[instance.doc_id]
if hasattr(instance, "sector_id"):
sector_ids.add(instance.sector_id)
if hasattr(instance, "need_id"):
need_ids.add(instance.need_id)
entity[instance_type] = instance
entity.pr_person = irow.pr_person
# Bulk represent any sector ids
if sector_ids and "sector_id" in itable.fields:
represent = itable.sector_id.represent
if represent and hasattr(represent, "bulk"):
represent.bulk(list(sector_ids))
# Bulk represent any need ids
if need_ids and "need_id" in itable.fields:
represent = itable.need_id.represent
if represent and hasattr(represent, "bulk"):
represent.bulk(list(need_ids))
return rows
# -------------------------------------------------------------------------
def represent_row(self, row):
"""
Represent a row
@param row: the Row
"""
reprstr = self.default
instance_type = row.instance_type
if hasattr(row, "pr_person"):
if instance_type == "dvr_case":
person = row.pr_person
title = s3_fullname(person)
label = self.case_label
elif instance_type == "dvr_case_activity":
table = current.s3db.dvr_case_activity
activity = row.dvr_case_activity
title = activity.subject
if self.use_need:
need_id = activity.need_id
if need_id:
represent = table.need_id.represent
title = represent(need_id)
label = self.activity_label
if self.use_sector:
sector_id = activity.sector_id
if sector_id:
represent = table.sector_id.represent
label = represent(sector_id)
elif instance_type == "pr_group":
group = row.pr_group
if group.group_type == 7:
label = self.case_group_label
if group.name:
title = group.name
else:
person = row.pr_person
title = s3_fullname(person)
else:
label = current.T("Group")
title = group.name or self.default
else:
title = None
label = None
if title:
reprstr = "%s (%s)" % (s3_str(title), s3_str(label))
return reprstr
# -------------------------------------------------------------------------
def link(self, k, v, row=None):
"""
Represent a (key, value) as hypertext link
@param k: the key (doc_entity.doc_id)
@param v: the representation of the key
@param row: the row with this key
"""
link = v
if row:
if row.instance_type == "dvr_case_activity":
try:
person_id = row.pr_person.id
case_activity_id = row.dvr_case_activity.id
except AttributeError:
pass
else:
url = URL(c = "dvr",
f = "person",
args = [person_id,
"case_activity",
case_activity_id,
],
extension="",
)
link = A(v, _href=url)
return link
# =============================================================================
class DVRManageAppointments(S3Method):
""" Custom method to bulk-manage appointments """
def apply_method(self, r, **attr):
T = current.T
s3db = current.s3db
get_vars = r.get_vars
response = current.response
if not self._permitted("update"):
r.unauthorised()
if r.http == "POST" and r.representation != "aadata":
count = 0
base_query = (FS("person_id$case.archived") == None) | \
(FS("person_id$case.archived") == False)
post_vars = r.post_vars
if "selected" in post_vars and "mode" in post_vars and \
any([n in post_vars for n in ("completed", "cancelled")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
db = current.db
atable = s3db.dvr_case_appointment
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected)) & base_query
aresource = s3db.resource("dvr_case_appointment",
filter = query,
vars = filters,
)
rows = aresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
if selected:
query = (atable.id.belongs(selected)) & \
(atable.deleted != True)
if "completed" in post_vars:
count = db(query).update(status=4) # Completed
elif "cancelled" in post_vars:
count = db(query).update(status=6) # Cancelled
current.session.confirmation = T("%(count)s Appointments updated") % \
{"count": count}
redirect(URL(f="case_appointment", args=["manage"], vars={}))
elif r.http == "GET" or r.representation == "aadata":
resource = r.resource
# Filter widgets
filter_widgets = resource.get_config("filter_widgets")
# List fields
list_fields = ["id",
(T("ID"), "person_id$pe_label"),
"person_id",
"type_id",
"date",
"status",
]
# Data table
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
# Sorting by person_id requires introspection => use datatable_filter
if r.representation != "aadata":
get_vars = dict(get_vars)
dt_sorting = {"iSortingCols": "1",
"bSortable_0": "false",
"iSortCol_0": "1",
"sSortDir_0": "asc",
}
get_vars.update(dt_sorting)
dtfilter, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(dtfilter)
data = resource.select(list_fields,
start = 0,
limit = limit,
orderby = orderby,
left = left,
count = True,
represent = True,
)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"], orderby=orderby)
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Completed"), "completed"),
(T("Cancelled"), "cancelled"),
]
if r.representation == "html":
# Page load
resource.configure(deletable = False)
dt.defaultActionButtons(resource)
response.s3.no_formats = True
# Data table (items)
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_pageLength = display_length,
dt_ajax_url = URL(c = "dvr",
f = "case_appointment",
args = ["manage"],
vars = {},
extension = "aadata",
),
dt_searching = "false",
dt_pagination = "true",
dt_bulk_actions = dt_bulk_actions,
)
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f = "case_appointment",
args = ["filter.options"],
vars = {},
)
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear = filter_clear,
formstyle = filter_formstyle,
submit = filter_submit,
ajax = True,
url = filter_submit_url,
ajaxurl = filter_ajax_url,
_class = "filter-form",
_id = "datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = resource.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target = "datatable",
alias = alias,
)
else:
ff = ""
output = dict(items = items,
title = T("Manage Appointments"),
list_filter_form = ff,
)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars["draw"])
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions = dt_bulk_actions,
)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
class DVRManageAllowance(S3Method):
""" Method handler to bulk-update allowance payments status """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Main entry point for REST interface.
@param r: the S3Request instance
@param attr: controller parameters
"""
# User must be permitted to update allowance information
permitted = self._permitted("update")
if not permitted:
r.unauthorised()
if r.representation in ("html", "iframe"):
if r.http in ("GET", "POST"):
output = self.bulk_update_status(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def bulk_update_status(self, r, **attr):
"""
Method to bulk-update status of allowance payments
@param r: the S3Request instance
@param attr: controller parameters
"""
T = current.T
s3db = current.s3db
settings = current.deployment_settings
response = current.response
output = {"title": T("Update Allowance Status"),
}
status_opts = dict(s3db.dvr_allowance_status_opts)
# Can not bulk-update from or to status "paid"
del status_opts[2]
# Form fields
formfields = [s3_date("from_date",
label = T("Planned From"),
set_min = "#allowance_to_date",
),
s3_date("to_date",
default = "now",
label = T("Planned Until"),
set_max = "#allowance_from_date",
empty = False,
),
Field("current_status", "integer",
default = 1, # pending
label = T("Current Status"),
requires = IS_IN_SET(status_opts),
),
Field("new_status", "integer",
default = 4, # missed
label = T("New Status"),
requires = IS_IN_SET(status_opts),
),
]
# Form buttons
submit_btn = INPUT(_class = "tiny primary button",
_name = "submit",
_type = "submit",
_value = T("Update"),
)
cancel_btn = A(T("Cancel"),
_href = r.url(id=None, method=""),
_class = "action-lnk",
)
buttons = [submit_btn, cancel_btn]
# Generate the form and add it to the output
resourcename = r.resource.name
formstyle = settings.get_ui_formstyle()
form = SQLFORM.factory(record = None,
showid = False,
formstyle = formstyle,
table_name = resourcename,
buttons = buttons,
*formfields)
output["form"] = form
# Process the form
formname = "%s/manage" % resourcename
if form.accepts(r.post_vars,
current.session,
formname = formname,
onvalidation = self.validate,
keepvalues = False,
hideerror = False,
):
formvars = form.vars
current_status = formvars.current_status
new_status = formvars.new_status
table = s3db.dvr_allowance
query = current.auth.s3_accessible_query("update", table) & \
(table.status == current_status) & \
(table.deleted != True)
from_date = formvars.from_date
if from_date:
query &= table.date >= from_date
to_date = formvars.to_date
if to_date:
query &= table.date <= to_date
result = current.db(query).update(status=int(new_status))
if result:
response.confirmation = T("%(number)s records updated") % \
{"number": result}
else:
response.warning = T("No records found")
response.view = self._view(r, "update.html")
return output
# -------------------------------------------------------------------------
@staticmethod
def validate(form):
"""
Update form validation
@param form: the FORM
"""
T = current.T
formvars = form.vars
errors = form.errors
# Must not update from status "paid"
if str(formvars.current_status) == "2":
errors.current_status = T("Bulk update from this status not allowed")
# Must not update to status "paid"
if str(formvars.new_status) == "2":
errors.new_status = T("Bulk update to this status not allowed")
# To-date must be after from-date
from_date = formvars.from_date
to_date = formvars.to_date
if from_date and to_date and from_date > to_date:
errors.to_date = T("Date until must be after date from")
# =============================================================================
def dvr_get_household_size(person_id, dob=False, formatted=True):
"""
Helper function to calculate the household size
(counting only members with active cases)
@param person_id: the person record ID
@param dob: the date of birth of that person (if known)
@param formatted: return household size info as string
@return: household size info as string if formatted=True,
otherwise tuple (number_of_adults, number_of_children)
"""
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
gtable = s3db.pr_group
mtable = s3db.pr_group_membership
ctable = s3db.dvr_case
stable = s3db.dvr_case_status
from dateutil.relativedelta import relativedelta
now = current.request.utcnow.date()
# Default result
adults, children, children_u1 = 1, 0, 0
# Count the person in question
if dob is False:
query = (ptable.id == person_id)
row = db(query).select(ptable.date_of_birth,
limitby = (0, 1),
).first()
if row:
dob = row.date_of_birth
if dob:
age = relativedelta(now, dob).years
if age < 18:
adults, children = 0, 1
if age < 1:
children_u1 = 1
# Household members which have already been counted
members = {person_id}
counted = members.add
# Get all case groups this person belongs to
query = ((mtable.person_id == person_id) & \
(mtable.deleted != True) & \
(gtable.id == mtable.group_id) & \
(gtable.group_type == 7))
rows = db(query).select(gtable.id)
group_ids = set(row.id for row in rows)
if group_ids:
join = [ptable.on(ptable.id == mtable.person_id),
ctable.on((ctable.person_id == ptable.id) & \
(ctable.archived != True) & \
(ctable.deleted != True)),
]
left = [stable.on(stable.id == ctable.status_id),
]
query = (mtable.group_id.belongs(group_ids)) & \
(mtable.deleted != True) & \
(stable.is_closed != True)
rows = db(query).select(ptable.id,
ptable.date_of_birth,
join = join,
left = left,
)
for row in rows:
person, dob = row.id, row.date_of_birth
if person not in members:
age = relativedelta(now, dob).years if dob else None
if age is not None and age < 18:
children += 1
if age < 1:
children_u1 += 1
else:
adults += 1
counted(person)
if not formatted:
return adults, children, children_u1
T = current.T
template = "%(number)s %(label)s"
details = []
if adults:
label = T("Adults") if adults != 1 else T("Adult")
details.append(template % {"number": adults,
"label": label,
})
if children:
label = T("Children") if children != 1 else T("Child")
details.append(template % {"number": children,
"label": label,
})
details = ", ".join(details)
if children_u1:
if children_u1 == 1:
label = T("Child under 1 year")
else:
label = T("Children under 1 year")
details = "%s (%s)" % (details,
template % {"number": children_u1,
"label": label,
},
)
return details
# =============================================================================
class DVRRegisterCaseEvent(S3Method):
""" Method handler to register case events """
# Action to check flag restrictions for
ACTION = "id-check"
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Main entry point for REST interface.
@param r: the S3Request instance
@param attr: controller parameters
"""
if not self.permitted():
current.auth.permission.fail()
output = {}
representation = r.representation
if representation == "html":
if r.http in ("GET", "POST"):
output = self.registration_form(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif representation == "json":
if r.http == "POST":
output = self.registration_ajax(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def registration_form(self, r, **attr):
"""
Render and process the registration form
@param r: the S3Request instance
@param attr: controller parameters
"""
T = current.T
response = current.response
settings = current.deployment_settings
output = {}
http = r.http
request_vars = r.get_vars
check = True
label = None
if http == "POST":
# Form submission
request_vars = r.post_vars
if "check" in request_vars:
# Only check ID label, don't register an event
label = request_vars.get("label")
else:
# Form has been submitted with "Register"
check = False
else:
# Coming from external scan app (e.g. Zxing), or from a link
label = request_vars.get("label")
scanner = request_vars.get("scanner")
person = None
pe_label = None
if label is not None:
# Identify the person
person = self.get_person(label)
if person is None:
if http == "GET":
response.error = T("No person found with this ID number")
else:
pe_label = person.pe_label
request_vars["label"] = pe_label
# Get person details, waiting intervals, flag and permission info
flags = []
intervals = {}
if person:
# Person details
person_details = self.person_details(person)
profile_picture = self.profile_picture(person)
# Blocking periods for events
event_types = self.get_event_types()
blocked = self.get_blocked_events(person.id)
for type_id, info in blocked.items():
event_type = event_types.get(type_id)
if not event_type:
continue
code = event_type.code
msg, dt = info
intervals[code] = (s3_str(msg),
"%sZ" % s3_encode_iso_datetime(dt),
)
# Flag info
flag_info = dvr_get_flag_instructions(person.id,
action = self.ACTION,
)
permitted = flag_info["permitted"]
if check:
info = flag_info["info"]
for flagname, instructions in info:
flags.append({"n": s3_str(T(flagname)),
"i": s3_str(T(instructions)),
})
else:
person_details = ""
profile_picture = None
permitted = False
# Identify the event type
event_code = request_vars.get("event")
event_type = self.get_event_type(event_code)
if not event_type:
# Fall back to default event type
event_type = self.get_event_type()
event_code = event_type.code if event_type else None
# Whether the event registration is actionable
actionable = event_code is not None
# Standard form fields and data
formfields = [Field("label",
label = T("ID"),
requires = [IS_NOT_EMPTY(error_message=T("Enter or scan an ID")),
IS_LENGTH(512, minsize=1),
],
),
Field("person",
label = "",
writable = False,
default = "",
),
Field("flaginfo",
label = "",
writable = False,
),
]
data = {"id": "",
"label": pe_label,
"person": person_details,
"flaginfo": "",
}
# Hidden fields to store event type, scanner, flag info and permission
hidden = {"event": event_code,
"scanner": scanner,
"actionable": json.dumps(actionable),
"permitted": json.dumps(permitted),
"flags": json.dumps(flags),
"intervals": json.dumps(intervals),
"image": profile_picture,
}
# Additional form data
widget_id, submit = self.get_form_data(person,
formfields,
data,
hidden,
permitted = permitted,
)
# Form buttons
check_btn = INPUT(_class = "tiny secondary button check-btn",
_name = "check",
_type = "submit",
_value = T("Check ID"),
)
submit_btn = INPUT(_class = "tiny primary button submit-btn",
_name = "submit",
_type = "submit",
_value = submit,
)
# Toggle buttons (active button first, otherwise pressing Enter
# hits the disabled button so requiring an extra tab step)
actionable = hidden.get("actionable") == "true"
if person and actionable and permitted:
check_btn["_disabled"] = "disabled"
check_btn.add_class("hide")
buttons = [submit_btn, check_btn]
else:
submit_btn["_disabled"] = "disabled"
submit_btn.add_class("hide")
buttons = [check_btn, submit_btn]
# Add the cancel-action
buttons.append(A(T("Cancel"), _class = "cancel-action action-lnk"))
resourcename = r.resource.name
# Generate the form and add it to the output
formstyle = settings.get_ui_formstyle()
form = SQLFORM.factory(record = data if check else None,
showid = False,
formstyle = formstyle,
table_name = resourcename,
buttons = buttons,
hidden = hidden,
_id = widget_id,
*formfields)
output["form"] = form
# Process the form
formname = "%s/registration" % resourcename
if form.accepts(r.post_vars,
current.session,
onvalidation = self.validate,
formname = formname,
keepvalues = False,
hideerror = False,
):
if not check:
self.accept(r, form, event_type=event_type)
header = self.get_header(event_type)
output.update(header)
# ZXing Barcode Scanner Launch Button
output["zxing"] = self.get_zxing_launch_button(event_code)
# Custom view
response.view = self._view(r, "dvr/register_case_event.html")
# Show profile picture by default or only on demand?
show_picture = settings.get_dvr_event_registration_show_picture()
# Inject JS
options = {"tablename": resourcename,
"ajaxURL": r.url(None,
method = "register",
representation = "json",
),
"showPicture": show_picture,
"showPictureText": s3_str(T("Show Picture")),
"hidePictureText": s3_str(T("Hide Picture")),
}
self.inject_js(widget_id, options)
return output
# -------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------
def permitted(self):
"""
Helper function to check permissions
@return: True if permitted to use this method, else False
"""
# User must be permitted to create case events
return self._permitted("create")
# -------------------------------------------------------------------------
def get_event_type(self, code=None):
"""
Get a case event type for an event code
@param code: the type code (using default event type if None)
@return: the dvr_case_event_type Row, or None if not found
"""
event_types = self.get_event_types()
event_type = None
if code is None:
event_type = event_types.get("_default")
else:
code = s3_str(code)
for value in event_types.values():
if value.code == code:
event_type = value
break
return event_type
# -------------------------------------------------------------------------
def validate(self, form):
"""
Validate the event registration form
@param form: the FORM
"""
T = current.T
formvars = form.vars
pe_label = formvars.get("label").strip()
person = self.get_person(pe_label)
if person is None:
form.errors["label"] = T("No person found with this ID number")
permitted = False
else:
person_id = person.id
formvars.person_id = person_id
flag_info = dvr_get_flag_instructions(person_id,
action = self.ACTION,
)
permitted = flag_info["permitted"]
formvars.permitted = permitted
# Validate the event type (if not default)
type_id = None
try:
request_vars = form.request_vars
except AttributeError:
event_code = None
else:
event_code = request_vars.get("event")
if event_code:
event_type = self.get_event_type(event_code)
if not event_type:
form.errors["event"] = \
current.response.error = T("Invalid event code")
else:
type_id = event_type.id
formvars.type_id = type_id
# Check whether event type is blocked for this person
if person and type_id:
blocked = self.get_blocked_events(person.id,
type_id = type_id,
)
if type_id in blocked:
msg = blocked[type_id][0]
form.errors["event"] = current.response.error = msg
# -------------------------------------------------------------------------
def accept(self, r, form, event_type=None):
"""
Helper function to process the form
@param r: the S3Request
@param form: the FORM
@param event_type: the event_type (Row)
"""
T = current.T
response = current.response
formvars = form.vars
person_id = formvars.person_id
success = False
if not formvars.get("permitted"):
response.error = T("Event registration not permitted")
elif person_id:
event_type_id = event_type.id if event_type else None
success = self.register_event(person_id, event_type_id)
if success:
success = True
response.confirmation = T("Event registered")
else:
response.error = T("Could not register event")
else:
response.error = T("Person not found")
return success
# -------------------------------------------------------------------------
def registration_ajax(self, r, **attr):
"""
Ajax response method, expects a JSON input like:
{l: the PE label (from the input field),
c: boolean to indicate whether to just check
the PE label or to register payments
t: the event type code
}
@param r: the S3Request instance
@param attr: controller parameters
@return: JSON response, structure:
{l: the actual PE label (to update the input field),
p: the person details,
d: the family details,
f: [{n: the flag name
i: the flag instructions
},
...],
b: profile picture URL,
i: {<event_code>: [<msg>, <blocked_until_datetime>]},
s: whether the action is permitted or not
e: form error (for label field)
a: error message
w: warning message
m: success message
}
"""
T = current.T
# Load JSON data from request body
s = r.body
s.seek(0)
try:
data = json.load(s)
except (ValueError, TypeError):
r.error(400, current.ERROR.BAD_REQUEST)
# Initialize processing variables
output = {}
error = None
alert = None
message = None
warning = None
permitted = False
flags = []
# Identify the person
pe_label = data.get("l")
person = self.get_person(pe_label)
if person is None:
error = s3_str(T("No person found with this ID number"))
else:
# Get flag info
flag_info = dvr_get_flag_instructions(person.id,
action = "id-check",
)
permitted = flag_info["permitted"]
check = data.get("c")
if check:
# Person details
person_details = self.person_details(person)
profile_picture = self.profile_picture(person)
output["p"] = s3_str(person_details)
output["l"] = person.pe_label
output["b"] = profile_picture
# Family details
details = dvr_get_household_size(person.id,
dob = person.date_of_birth,
)
if details:
output["d"] = {"d": details}
# Flag Info
info = flag_info["info"]
for flagname, instructions in info:
flags.append({"n": s3_str(T(flagname)),
"i": s3_str(T(instructions)),
})
# Blocking periods for events
event_types = self.get_event_types()
blocked = self.get_blocked_events(person.id)
intervals = {}
for type_id, info in blocked.items():
event_type = event_types.get(type_id)
if not event_type:
continue
code = event_type.code
msg, dt = info
intervals[code] = (s3_str(msg),
"%sZ" % s3_encode_iso_datetime(dt),
)
output["i"] = intervals
else:
# Check event code and permission
type_id = None
event_code = data.get("t")
if not event_code:
alert = T("No event type specified")
elif not permitted:
alert = T("Event registration not permitted")
else:
event_type = self.get_event_type(event_code)
if not event_type:
alert = T("Invalid event type: %s") % event_code
else:
type_id = event_type.id
if type_id:
# Check whether event type is blocked for this person
person_id = person.id
blocked = self.get_blocked_events(person_id,
type_id = type_id,
)
if type_id in blocked:
# Event type is currently blocked for this person
alert = blocked[type_id][0]
else:
# Ok - register the event
success = self.register_event(person.id, type_id)
if success:
message = T("Event registered")
else:
alert = T("Could not register event")
# Add messages to output
if alert:
output["a"] = s3_str(alert)
if error:
output["e"] = s3_str(error)
if message:
output["m"] = s3_str(message)
if warning:
output["w"] = s3_str(warning)
# Add flag info to output
output["s"] = permitted
output["f"] = flags
current.response.headers["Content-Type"] = "application/json"
return json.dumps(output)
# -------------------------------------------------------------------------
@staticmethod
def get_form_data(person, formfields, data, hidden, permitted=False):
"""
Helper function to extend the form
@param person: the person (Row)
@param formfields: list of form fields (Field)
@param data: the form data (dict)
@param hidden: hidden form fields (dict)
@param permitted: whether the action is permitted
@return: tuple (widget_id, submit_label)
"""
T = current.T
# Extend form with household size info
if person:
details = dvr_get_household_size(person.id,
dob = person.date_of_birth,
)
else:
details = ""
formfields.extend([Field("details",
label = T("Family"),
writable = False,
),
])
data["details"] = details
widget_id = "case-event-form"
submit = current.T("Register")
return widget_id, submit
# -------------------------------------------------------------------------
def get_header(self, event_type=None):
"""
Helper function to construct the event type header
@param event_type: the event type (Row)
@returns: dict of view items
"""
T = current.T
output = {}
# Event type header
if event_type:
event_type_name = T(event_type.name)
name_class = "event-type-name"
else:
event_type_name = T("Please select an event type")
name_class = "event-type-name placeholder"
event_type_header = DIV(H4(SPAN(T(event_type_name),
_class = name_class,
),
SPAN(ICON("settings"),
_class = "event-type-setting",
),
_class = "event-type-toggle",
_id = "event-type-toggle",
),
_class = "event-type-header",
)
output["event_type"] = event_type_header
# Event type selector
event_types = self.get_event_types()
buttons = []
for k, v in event_types.items():
if k != "_default":
button = LI(A(T(v.name),
_class = "secondary button event-type-selector",
data = {"code": s3_str(v.code),
"name": s3_str(T(v.name)),
},
),
)
buttons.append(button)
output["event_type_selector"] = UL(buttons,
_class="button-group stack hide event-type-selector",
_id="event-type-selector",
)
return output
# -------------------------------------------------------------------------
# Class-specific functions
# -------------------------------------------------------------------------
@staticmethod
def register_event(person_id, type_id):
"""
Register a case event
@param person_id: the person record ID
@param type:id: the event type record ID
"""
s3db = current.s3db
ctable = s3db.dvr_case
etable = s3db.dvr_case_event
# Get the case ID for the person_id
query = (ctable.person_id == person_id) & \
(ctable.deleted != True)
case = current.db(query).select(ctable.id,
limitby=(0, 1),
).first()
if case:
case_id = case.id
else:
case_id = None
# Customise event resource
r = S3Request("dvr", "case_event",
current.request,
args = [],
get_vars = {},
)
r.customise_resource("dvr_case_event")
data = {"person_id": person_id,
"case_id": case_id,
"type_id": type_id,
"date": current.request.utcnow,
}
record_id = etable.insert(**data)
if record_id:
# Set record owner
auth = current.auth
auth.s3_set_record_owner(etable, record_id)
auth.s3_make_session_owner(etable, record_id)
# Execute onaccept
data["id"] = record_id
s3db.onaccept(etable, data, method="create")
return record_id
# -------------------------------------------------------------------------
def get_event_types(self):
"""
Lazy getter for case event types
@return: a dict {id: Row} for dvr_case_event_type, with an
additional key "_default" for the default event type
"""
if not hasattr(self, "event_types"):
event_types = {}
table = current.s3db.dvr_case_event_type
# Active event types
query = (table.is_inactive == False) & \
(table.deleted == False)
# Excluded event codes
excluded = current.deployment_settings \
.get_dvr_event_registration_exclude_codes()
if excluded:
for code in excluded:
if "*" in code:
query &= (~(table.code.like(code.replace("*", "%"))))
else:
query &= (table.code != code)
# Roles required
sr = current.auth.get_system_roles()
roles = current.session.s3.roles
if sr.ADMIN not in roles:
query &= (table.role_required == None) | \
(table.role_required.belongs(roles))
rows = current.db(query).select(table.id,
table.code,
table.name,
table.is_default,
table.min_interval,
table.max_per_day,
table.comments,
)
for row in rows:
event_types[row.id] = row
if row.is_default:
event_types["_default"] = row
self.event_types = event_types
return self.event_types
# -------------------------------------------------------------------------
def check_intervals(self, person_id, type_id=None):
"""
Check minimum intervals between consecutive registrations
of the same event type
@param person_id: the person record ID
@param type_id: check only this event type (rather than all types)
@return: a dict with blocked event types
{type_id: (error_message, blocked_until_datetime)}
"""
T = current.T
db = current.db
s3db = current.s3db
now = current.request.utcnow
day_start = now.replace(hour=0,
minute=0,
second=0,
microsecond=0,
)
next_day = day_start + datetime.timedelta(days=1)
output = {}
table = s3db.dvr_case_event
event_type_id = table.type_id
# Get event types to check
event_types = self.get_event_types()
# Check for impermissible combinations
etable = s3db.dvr_case_event_exclusion
query = (table.person_id == person_id) & \
(table.date >= day_start) & \
(table.deleted == False) & \
(etable.excluded_by_id == table.type_id) & \
(etable.deleted == False)
if type_id and event_types.get(type_id):
query &= etable.type_id == type_id
rows = db(query).select(etable.type_id,
etable.excluded_by_id,
)
excluded = {}
for row in rows:
tid = row.type_id
if tid in excluded:
excluded[tid].append(row.excluded_by_id)
else:
excluded[tid] = [row.excluded_by_id]
for tid, excluded_by_ids in excluded.items():
event_type = event_types.get(tid)
if not event_type:
continue
excluded_by_names = []
seen = set()
for excluded_by_id in excluded_by_ids:
if excluded_by_id in seen:
continue
else:
seen.add(excluded_by_id)
excluded_by_type = event_types.get(excluded_by_id)
if not excluded_by_type:
continue
excluded_by_names.append(s3_str(T(excluded_by_type.name)))
if excluded_by_names:
msg = T("%(event)s already registered today, not combinable") % \
{"event": ", ".join(excluded_by_names)
}
output[tid] = (msg, next_day)
# Helper function to build event type sub-query
def type_query(items):
if len(items) == 1:
return (event_type_id == items[0])
elif items:
return (event_type_id.belongs(set(items)))
else:
return None
# Check maximum occurences per day
q = None
if type_id:
event_type = event_types.get(type_id)
if event_type and \
event_type.max_per_day and \
type_id not in output:
q = type_query((type_id,))
else:
check = [tid for tid, row in event_types.items()
if row.max_per_day and \
tid != "_default" and tid not in output
]
q = type_query(check)
if q is not None:
# Get number of events per type for this person today
cnt = table.id.count()
query = (table.person_id == person_id) & q & \
(table.date >= day_start) & \
(table.deleted != True)
rows = db(query).select(event_type_id,
cnt,
groupby = event_type_id,
)
# Check limit
for row in rows:
number = row[cnt]
tid = row[event_type_id]
event_type = event_types[tid]
limit = event_type.max_per_day
if number >= limit:
if number > 1:
msg = T("%(event)s already registered %(number)s times today") % \
{"event": T(event_type.name),
"number": number,
}
else:
msg = T("%(event)s already registered today") % \
{"event": T(event_type.name),
}
output[tid] = (msg, next_day)
# Check minimum intervals
q = None
if type_id:
event_type = event_types.get(type_id)
if event_type and \
event_type.min_interval and \
type_id not in output:
q = type_query((type_id,))
else:
check = [tid for tid, row in event_types.items()
if row.min_interval and \
tid != "_default" and tid not in output
]
q = type_query(check)
if q is not None:
# Get the last events for these types for this person
query = (table.person_id == person_id) & q & \
(table.deleted != True)
timestamp = table.date.max()
rows = db(query).select(event_type_id,
timestamp,
groupby = event_type_id,
)
# Check intervals
represent = table.date.represent
for row in rows:
latest = row[timestamp]
tid = row[event_type_id]
event_type = event_types[tid]
interval = event_type.min_interval
if latest:
earliest = latest + datetime.timedelta(hours=interval)
if earliest > now:
msg = T("%(event)s already registered on %(timestamp)s") % \
{"event": T(event_type.name),
"timestamp": represent(latest),
}
output[tid] = (msg, earliest)
return output
# -------------------------------------------------------------------------
# Common methods
# -------------------------------------------------------------------------
@classmethod
def get_person(cls, pe_label):
"""
Get the person record for a PE Label (or ID code), search only
for persons with an open DVR case.
@param pe_label: the PE label (or a scanned ID code as string)
"""
s3db = current.s3db
person = None
# Fields to extract
fields = ["id",
"pe_id",
"pe_label",
"first_name",
"middle_name",
"last_name",
"date_of_birth",
"gender",
]
data = cls.parse_code(pe_label)
def person_(label):
""" Helper function to find a person by pe_label """
query = (FS("pe_label") == pe_label) & \
(FS("dvr_case.id") != None) & \
(FS("dvr_case.archived") != True) & \
(FS("dvr_case.status_id$is_closed") != True)
presource = s3db.resource("pr_person",
components = ["dvr_case"],
filter = query,
)
rows = presource.select(fields,
start = 0,
limit = 1,
as_rows = True,
)
return rows[0] if rows else None
pe_label = data["label"].strip()
if pe_label:
person = person_(pe_label)
if person:
data_match = True
else:
family = data.get("family")
if family:
# Get the head of family
person = person_(family)
data_match = False
if person:
first_name, last_name = None, None
if "first_name" in data:
first_name = s3_unicode(data["first_name"]).lower()
if s3_unicode(person.first_name).lower() != first_name:
data_match = False
if "last_name" in data:
last_name = s3_unicode(data["last_name"]).lower()
if s3_unicode(person.last_name).lower() != last_name:
data_match = False
if not data_match:
# Family member? => search by names/DoB
ptable = s3db.pr_person
query = current.auth.s3_accessible_query("read", ptable)
gtable = s3db.pr_group
mtable = s3db.pr_group_membership
otable = mtable.with_alias("family")
ctable = s3db.dvr_case
stable = s3db.dvr_case_status
left = [gtable.on((gtable.id == mtable.group_id) & \
(gtable.group_type == 7)),
otable.on((otable.group_id == gtable.id) & \
(otable.person_id != mtable.person_id) & \
(otable.deleted != True)),
ptable.on((ptable.id == otable.person_id) & \
(ptable.pe_label != None)),
ctable.on((ctable.person_id == otable.person_id) & \
(ctable.archived != True)),
stable.on((stable.id == ctable.status_id)),
]
query &= (mtable.person_id == person.id) & \
(ctable.id != None) & \
(stable.is_closed != True) & \
(mtable.deleted != True) & \
(ptable.deleted != True)
if first_name:
query &= (ptable.first_name.lower() == first_name)
if last_name:
query &= (ptable.last_name.lower() == last_name)
if "date_of_birth" in data:
# Include date of birth
dob, error = IS_UTC_DATE()(data["date_of_birth"])
if not error and dob:
query &= (ptable.date_of_birth == dob)
fields_ = [ptable[fn] for fn in fields]
rows = current.db(query).select(left=left,
limitby = (0, 2),
*fields_)
if len(rows) == 1:
person = rows[0]
elif "first_name" in data and "last_name" in data:
first_name = s3_unicode(data["first_name"]).lower()
last_name = s3_unicode(data["last_name"]).lower()
# Search by names
query = (FS("pe_label") != None)
if first_name:
query &= (FS("first_name").lower() == first_name)
if last_name:
query &= (FS("last_name").lower() == last_name)
if "date_of_birth" in data:
# Include date of birth
dob, error = IS_UTC_DATE()(data["date_of_birth"])
if not error and dob:
query &= (FS("date_of_birth") == dob)
# Find only open cases
query &= (FS("dvr_case.id") != None) & \
(FS("dvr_case.archived") != True) & \
(FS("dvr_case.status_id$is_closed") != True)
presource = s3db.resource("pr_person",
components = ["dvr_case"],
filter = query,
)
rows = presource.select(fields,
start = 0,
limit = 2,
as_rows = True,
)
if len(rows) == 1:
person = rows[0]
return person
# -------------------------------------------------------------------------
@staticmethod
def person_details(person):
"""
Format the person details
@param person: the person record (Row)
"""
T = current.T
settings = current.deployment_settings
name = s3_fullname(person)
dob = person.date_of_birth
if dob:
dob = S3DateTime.date_represent(dob)
details = "%s (%s %s)" % (name, T("Date of Birth"), dob)
else:
details = name
output = SPAN(details,
_class = "person-details",
)
if settings.get_dvr_event_registration_checkin_warning():
table = current.s3db.cr_shelter_registration
if table:
# Person counts as checked-out when checked-out
# somewhere and not checked-in somewhere else
query = (table.person_id == person.id) & \
(table.deleted != True)
cnt = table.id.count()
status = table.registration_status
rows = current.db(query).select(status,
cnt,
groupby = status,
)
checked_in = checked_out = 0
for row in rows:
s = row[status]
if s == 2:
checked_in = row[cnt]
elif s == 3:
checked_out = row[cnt]
if checked_out and not checked_in:
output = TAG[""](output,
SPAN(ICON("hint"),
T("not checked-in!"),
_class = "check-in-warning",
),
)
return output
# -------------------------------------------------------------------------
@staticmethod
def profile_picture(person):
"""
Get the profile picture URL for a person
@param person: the person record (Row)
@return: the profile picture URL (relative URL), or None if
no profile picture is available for that person
"""
try:
pe_id = person.pe_id
except AttributeError:
return None
table = current.s3db.pr_image
query = (table.pe_id == pe_id) & \
(table.profile == True) & \
(table.deleted != True)
row = current.db(query).select(table.image, limitby=(0, 1)).first()
if row:
return URL(c="default", f="download", args=row.image)
else:
return None
# -------------------------------------------------------------------------
def get_blocked_events(self, person_id, type_id=None):
"""
Check minimum intervals for event registration and return
all currently blocked events
@param person_id: the person record ID
@param type_id: check only this event type (rather than all)
@return: a dict of blocked event types:
{type_id: (reason, blocked_until)}
"""
check_intervals = self.check_intervals
if check_intervals and callable(check_intervals):
blocked = check_intervals(person_id, type_id=type_id)
else:
blocked = {}
return blocked
# -------------------------------------------------------------------------
@staticmethod
def parse_code(code):
"""
Parse a scanned ID code (QR Code)
@param code: the scanned ID code (string)
@return: a dict {"label": the PE label,
"first_name": optional first name,
"last_name": optional last name,
"date_of_birth": optional date of birth,
}
"""
data = {"label": code}
pattern = current.deployment_settings.get_dvr_id_code_pattern()
if pattern and code:
import re
pattern = re.compile(pattern)
m = pattern.match(code)
if m:
data.update(m.groupdict())
return data
# -------------------------------------------------------------------------
@staticmethod
def get_zxing_launch_button(event_code):
"""
Renders the button to launch the Zxing barcode scanner app
@param event_code: the current event code
@return: the Zxing launch button
"""
T = current.T
# URL template
template = "zxing://scan/?ret=%s&SCAN_FORMATS=Code 128,UPC_A,EAN_13"
# Query variables for return URL
scan_vars = {"label": "{CODE}",
"scanner": "zxing",
"event": "{EVENT}",
}
# Return URL template
tmp = URL(args = ["register"],
vars = scan_vars,
host = True,
)
tmp = str(tmp).replace("&", "%26")
# Current return URL
if event_code:
# must double-escape ampersands:
scan_vars["event"] = event_code.replace("&", "%2526")
ret = URL(args = ["register"],
vars = scan_vars,
host = True,
)
ret = str(ret).replace("&", "%26")
# Construct button
return A(T("Scan with Zxing"),
_href = template % ret,
_class = "tiny primary button zxing-button",
data = {"tmp": template % tmp,
},
)
# -------------------------------------------------------------------------
@staticmethod
def inject_js(widget_id, options):
"""
Helper function to inject static JS and instantiate
the eventRegistration widget
@param widget_id: the node ID where to instantiate the widget
@param options: dict of widget options (JSON-serializable)
"""
s3 = current.response.s3
appname = current.request.application
# Static JS
scripts = s3.scripts
if s3.debug:
script = "/%s/static/scripts/S3/s3.dvr.js" % appname
else:
script = "/%s/static/scripts/S3/s3.dvr.min.js" % appname
scripts.append(script)
# Instantiate widget
scripts = s3.jquery_ready
script = '''$('#%(id)s').eventRegistration(%(options)s)''' % \
{"id": widget_id, "options": json.dumps(options)}
if script not in scripts:
scripts.append(script)
# =============================================================================
class DVRRegisterPayment(DVRRegisterCaseEvent):
""" Method handler to register case events """
# Action to check flag restrictions for
ACTION = "payment"
# Do not check minimum intervals for consecutive registrations
check_intervals = False
# -------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------
def permitted(self):
"""
Helper function to check permissions
@return: True if permitted to use this method, else False
"""
# User must be permitted to update allowance records
return self._permitted("update")
# -------------------------------------------------------------------------
def get_event_type(self, code=None):
"""
Get a case event type for an event code
@param code: the type code (using default event type if None)
@return: the dvr_case_event_type Row, or None if not found
"""
# Only one type of event
return Storage(id=None, code="PAYMENT")
# -------------------------------------------------------------------------
def accept(self, r, form, event_type=None):
"""
Helper function to process the form
@param r: the S3Request
@param form: the FORM
@param event_type: the event_type (Row)
"""
T = current.T
response = current.response
formvars = form.vars
person_id = formvars.person_id
success = False
if not formvars.get("permitted"):
response.error = T("Payment registration not permitted")
elif person_id:
# Get payment data from hidden input
payments = r.post_vars.get("actions")
if payments:
# @todo: read date from formvars (utcnow as fallback)
date = r.utcnow
comments = formvars.get("comments")
updated, failed = self.register_payments(person_id,
payments,
date = date,
comments = comments,
)
response.confirmation = T("%(number)s payment(s) registered") % \
{"number": updated}
if failed:
response.warning = T("%(number)s payment(s) not found") % \
{"number": failed}
else:
response.error = T("No payments specified")
else:
response.error = T("Person not found")
return success
# -------------------------------------------------------------------------
def registration_ajax(self, r, **attr):
"""
Ajax response method, expects a JSON input like:
{l: the PE label (from the input field),
c: boolean to indicate whether to just check
the PE label or to register payments
d: the payment data (raw data, which payments to update)
}
@param r: the S3Request instance
@param attr: controller parameters
@return: JSON response, structure:
{l: the actual PE label (to update the input field),
p: the person details,
f: [{n: the flag name
i: the flag instructions
},
...],
u: whether there are any actionable data
s: whether the action is permitted or not
d: {t: time stamp
h: payment details (raw data)
d: payment details (HTML)
}
e: form error (for label field)
a: error message
w: warning message
m: success message
}
"""
T = current.T
# Load JSON data from request body
s = r.body
s.seek(0)
try:
data = json.load(s)
except (ValueError, TypeError):
r.error(400, current.ERROR.BAD_REQUEST)
# Initialize processing variables
output = {}
alert = None
error = None
warning = None
message = None
permitted = False
flags = []
# Identify the person
pe_label = data.get("l")
person = self.get_person(pe_label)
if person is None:
error = s3_str(T("No person found with this ID number"))
else:
# Get flag info
flag_info = dvr_get_flag_instructions(person.id,
action = self.ACTION,
)
permitted = flag_info["permitted"]
check = data.get("c")
if check:
# Person details
person_details = self.person_details(person)
profile_picture = self.profile_picture(person)
output["p"] = s3_str(person_details)
output["l"] = person.pe_label
output["b"] = profile_picture
info = flag_info["info"]
for flagname, instructions in info:
flags.append({"n": s3_str(T(flagname)),
"i": s3_str(T(instructions)),
})
if permitted:
payments = self.get_payment_data(person.id)
else:
payments = []
date = S3DateTime.datetime_represent(current.request.utcnow,
utc = True,
)
output["d"] = {"d": s3_str(self.payment_data_represent(payments)),
"t": s3_str(date),
"h": payments,
}
output["u"] = True if payments else False
else:
if not permitted:
alert = T("Payment registration not permitted")
else:
# Get payment data from JSON
payments = data.get("d")
if payments:
# @todo: read date from JSON data (utcnow as fallback)
date = r.utcnow
comments = data.get("c")
updated, failed = self.register_payments(
person.id,
payments,
date = date,
comments = comments,
)
message = T("%(number)s payment(s) registered") % \
{"number": updated}
if failed:
warning = T("%(number)s payment(s) not found") % \
{"number": failed}
else:
alert = T("No payments specified")
# Add messages to output
if alert:
output["a"] = s3_str(alert)
if error:
output["e"] = s3_str(error)
if message:
output["m"] = s3_str(message)
if warning:
output["w"] = s3_str(warning)
# Add flag info to output
output["s"] = permitted
output["f"] = flags
current.response.headers["Content-Type"] = "application/json"
return json.dumps(output)
# -------------------------------------------------------------------------
def get_form_data(self, person, formfields, data, hidden, permitted=False):
"""
Helper function to extend the form
@param person: the person (Row)
@param formfields: list of form fields (Field)
@param data: the form data (dict)
@param hidden: hidden form fields (dict)
@param permitted: whether the action is permitted
@return: tuple (widget_id, submit_label)
"""
T = current.T
if person and permitted:
payments = self.get_payment_data(person.id)
else:
payments = []
date = S3DateTime.datetime_represent(current.request.utcnow,
utc = True,
)
# Additional form fields for payments
formfields.extend([Field("details",
label = T("Pending Payments"),
writable = False,
represent = self.payment_data_represent,
),
Field("date",
label = T("Payment Date"),
writable = False,
default = date,
),
Field("comments",
label = T("Comments"),
widget = s3_comments_widget,
),
])
# Additional data for payments
data["date"] = s3_str(date)
data["details"] = payments
data["comments"] = ""
# Add payments JSON to hidden form fields, update actionable info
hidden["actions"] = json.dumps(payments)
if not payments:
hidden["actionable"] = "false"
widget_id = "payment-form"
submit = current.T("Register")
return widget_id, submit
# -------------------------------------------------------------------------
def get_header(self, event_type=None):
"""
Helper function to construct the event type header
@param event_type: the event type (Row)
@returns: dict of view items
"""
# Simple title, no selector/toggle
event_type_header = DIV(H4(SPAN(current.T("Allowance Payment"),
_class = "event-type-name",
),
),
_class = "event-type-header",
)
output = {"event_type": event_type_header,
"event_type_selector": "",
}
return output
# -------------------------------------------------------------------------
# Class-specific functions
# -------------------------------------------------------------------------
@staticmethod
def get_payment_data(person_id):
"""
Helper function to extract currently pending allowance
payments for the person_id.
@param person_id: the person record ID
@return: a list of dicts [{i: record_id,
d: date,
c: currency,
a: amount,
}, ...]
"""
query = (FS("person_id") == person_id) & \
(FS("status") == 1) & \
(FS("date") <= current.request.utcnow.date())
resource = current.s3db.resource("dvr_allowance",
filter = query,
)
data = resource.select(["id",
"date",
"currency",
"amount",
],
orderby = "dvr_allowance.date",
represent = True,
)
payments = []
append = payments.append
for row in data.rows:
payment_details = {"r": row["dvr_allowance.id"],
"d": row["dvr_allowance.date"],
"c": row["dvr_allowance.currency"],
"a": row["dvr_allowance.amount"],
}
append(payment_details)
return payments
# -------------------------------------------------------------------------
@staticmethod
def register_payments(person_id, payments, date=None, comments=None):
"""
Helper function to register payments
@param person_id: the person record ID
@param payments: the payments as sent from form
@param date: the payment date (default utcnow)
@param comments: comments for the payments
@return: tuple (updated, failed), number of records
"""
if isinstance(payments, basestring):
try:
payments = json.loads(payments)
except (ValueError, TypeError):
payments = []
if not date:
date = current.request.utcnow
# Data to write
data = {"status": 2,
"paid_on": date,
}
if comments:
data["comments"] = comments
atable = current.s3db.dvr_allowance
updated = 0
failed = 0
# Customise allowance resource
r = S3Request("dvr", "allowance",
current.request,
args = [],
get_vars = {},
)
r.customise_resource("dvr_allowance")
onaccept = current.s3db.onaccept
db = current.db
accessible = current.auth.s3_accessible_query("update", atable)
for payment in payments:
record_id = payment.get("r")
query = accessible & \
(atable.id == record_id) & \
(atable.person_id == person_id) & \
(atable.status != 2) & \
(atable.deleted != True)
success = db(query).update(**data)
if success:
record = {"id": record_id, "person_id": person_id}
record.update(data)
onaccept(atable, record, method="update")
updated += 1
else:
failed += 1
return updated, failed
# -------------------------------------------------------------------------
@staticmethod
def payment_data_represent(data):
"""
Representation method for the payment details field
@param data: the payment data (from get_payment_data)
"""
if data:
output = TABLE(_class="payment-details")
for payment in data:
details = TR(TD(payment["d"], _class="payment-date"),
TD(payment["c"], _class="payment-currency"),
TD(payment["a"], _class="payment-amount"),
)
output.append(details)
else:
output = current.T("No pending payments")
return output
# =============================================================================
class dvr_AssignMethod(S3Method):
"""
Custom Method to allow beneficiaries (cases) to be assigned to something
e.g. Project, Activity, Distribution
"""
def __init__(self, component, next_tab="case", types=None):
"""
@param component: the Component in which to create records
@param types: a list of types to pick from: Staff, Volunteers, Deployables
@param next_tab: the component/method to redirect to after assigning
"""
self.component = component
self.next_tab = next_tab
self.types = types
def apply_method(self, r, **attr):
"""
Apply method.
@param r: the S3Request
@param attr: controller options for this request
"""
try:
component = r.resource.components[self.component]
except KeyError:
current.log.error("Invalid Component!")
raise
if component.link:
component = component.link
tablename = component.tablename
# Requires permission to create component
authorised = current.auth.s3_has_permission("create", tablename)
if not authorised:
r.unauthorised()
T = current.T
db = current.db
s3db = current.s3db
#settings = current.deployment_settings
table = s3db[tablename]
fkey = component.fkey
record = r.record
if fkey in record:
# SuperKey
record_id = record[fkey]
else:
record_id = r.id
get_vars = r.get_vars
response = current.response
if r.http == "POST":
added = 0
post_vars = r.post_vars
if all([n in post_vars for n in ("assign", "selected", "mode")]):
selected = post_vars.selected
if selected:
selected = selected.split(",")
else:
selected = []
# Handle exclusion filter
if post_vars.mode == "Exclusive":
if "filterURL" in post_vars:
filters = S3URLQuery.parse_url(post_vars.filterURL)
else:
filters = None
query = ~(FS("id").belongs(selected))
dresource = s3db.resource("dvr_case",
alias = self.component,
filter=query, vars=filters)
rows = dresource.select(["id"], as_rows=True)
selected = [str(row.id) for row in rows]
# Prevent multiple entries in the link table
query = (table.case_id.belongs(selected)) & \
(table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.id)
rows = dict((row.id, row) for row in rows)
onaccept = component.get_config("create_onaccept",
component.get_config("onaccept", None))
for case_id in selected:
try:
cid = int(case_id.strip())
except ValueError:
continue
if cid not in rows:
link = Storage(case_id = case_id)
link[fkey] = record_id
_id = table.insert(**link)
if onaccept:
link["id"] = _id
form = Storage(vars=link)
onaccept(form)
added += 1
current.session.confirmation = T("%(number)s assigned") % \
dict(number=added)
if added > 0:
redirect(URL(args=[r.id, self.next_tab], vars={}))
else:
redirect(URL(args=r.args, vars={}))
elif r.http == "GET":
# Filter widgets
filter_widgets = s3db.get_config("dvr_case", "filter_widgets")
# List fields
list_fields = ["id",
"person_id",
]
# Data table
resource = s3db.resource("dvr_case",
alias=r.component.alias if r.component else None,
vars=get_vars)
totalrows = resource.count()
if "pageLength" in get_vars:
display_length = get_vars["pageLength"]
if display_length == "None":
display_length = None
else:
display_length = int(display_length)
else:
display_length = 25
if display_length:
limit = 4 * display_length
else:
limit = None
dtfilter, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
resource.add_filter(dtfilter)
# Hide people already in the link table
query = (table[fkey] == record_id) & \
(table.deleted != True)
rows = db(query).select(table.case_id)
already = [row.case_id for row in rows]
resource.add_filter((~db.dvr_case.id.belongs(already)))
dt_id = "datatable"
# Bulk actions
dt_bulk_actions = [(T("Assign"), "assign")]
if r.representation == "html":
# Page load
resource.configure(deletable = False)
profile_url = URL(c = "dvr",
f = "case",
args = ["[id]", "profile"])
S3CRUD.action_buttons(r,
deletable = False,
read_url = profile_url,
update_url = profile_url)
response.s3.no_formats = True
# Filter form
if filter_widgets:
# Where to retrieve filtered data from:
_vars = resource.crud._remove_filters(r.get_vars)
filter_submit_url = r.url(vars=_vars)
# Default Filters (before selecting data!)
resource.configure(filter_widgets=filter_widgets)
S3FilterForm.apply_filter_defaults(r, resource)
# Where to retrieve updated filter options from:
filter_ajax_url = URL(f="case",
args=["filter.options"],
vars={})
get_config = resource.get_config
filter_clear = get_config("filter_clear", True)
filter_formstyle = get_config("filter_formstyle", None)
filter_submit = get_config("filter_submit", True)
filter_form = S3FilterForm(filter_widgets,
clear=filter_clear,
formstyle=filter_formstyle,
submit=filter_submit,
ajax=True,
url=filter_submit_url,
ajaxurl=filter_ajax_url,
_class="filter-form",
_id="datatable-filter-form",
)
fresource = current.s3db.resource(resource.tablename)
alias = r.component.alias if r.component else None
ff = filter_form.html(fresource,
r.get_vars,
target="datatable",
alias=alias)
else:
ff = ""
# Data table (items)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_ajax_url=r.url(representation="aadata"),
dt_bulk_actions=dt_bulk_actions,
dt_pageLength=display_length,
dt_pagination="true",
dt_searching="false",
)
# @ToDO: dvr_case_label()
#CASE = settings.get_dvr_case_label()
CASE = T("Beneficiaries")
output = dict(items = items,
title = T("Assign %(case)s") % dict(case=CASE),
list_filter_form = ff)
response.view = "list_filter.html"
return output
elif r.representation == "aadata":
# Ajax refresh
if "draw" in get_vars:
echo = int(get_vars.draw)
else:
echo = None
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
dt = S3DataTable(data["rfields"], data["rows"])
items = dt.json(totalrows,
filteredrows,
dt_id,
echo,
dt_bulk_actions=dt_bulk_actions)
response.headers["Content-Type"] = "application/json"
return items
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
# =============================================================================
def dvr_get_flag_instructions(person_id, action=None):
"""
Get handling instructions if flags are set for a person
@param person_id: the person ID
@param action: the action for which instructions are needed:
- check-in|check-out|payment|id-check
@returns: dict {"permitted": whether the action is permitted
"info": list of tuples (flagname, instructions)
}
"""
s3db = current.s3db
ftable = s3db.dvr_case_flag
ltable = s3db.dvr_case_flag_case
query = (ltable.person_id == person_id) & \
(ltable.deleted != True) & \
(ftable.id == ltable.flag_id) & \
(ftable.deleted != True)
if action == "check-in":
query &= (ftable.advise_at_check_in == True) | \
(ftable.deny_check_in == True)
elif action == "check-out":
query &= (ftable.advise_at_check_out == True) | \
(ftable.deny_check_out == True)
elif action == "payment":
query &= (ftable.advise_at_id_check == True) | \
(ftable.allowance_suspended == True)
else:
query &= (ftable.advise_at_id_check == True)
flags = current.db(query).select(ftable.name,
ftable.deny_check_in,
ftable.deny_check_out,
ftable.allowance_suspended,
ftable.advise_at_check_in,
ftable.advise_at_check_out,
ftable.advise_at_id_check,
ftable.instructions,
)
info = []
permitted = True
for flag in flags:
advise = False
if action == "check-in":
if flag.deny_check_in:
permitted = False
advise = flag.advise_at_check_in
elif action == "check-out":
if flag.deny_check_out:
permitted = False
advise = flag.advise_at_check_out
elif action == "payment":
if flag.allowance_suspended:
permitted = False
advise = flag.advise_at_id_check
else:
advise = flag.advise_at_id_check
if advise:
instructions = flag.instructions
if instructions is not None:
instructions = instructions.strip()
if not instructions:
instructions = current.T("No instructions for this flag")
info.append((flag.name, instructions))
return {"permitted": permitted,
"info": info,
}
# =============================================================================
def dvr_update_last_seen(person_id):
"""
Helper function for automatic updates of dvr_case.last_seen_on
@param person_id: the person ID
"""
db = current.db
s3db = current.s3db
now = current.request.utcnow
last_seen_on = None
if not person_id:
return
# Get event types that require presence
ettable = s3db.dvr_case_event_type
query = (ettable.presence_required == True) & \
(ettable.deleted == False)
types = db(query).select(ettable.id, cache=s3db.cache)
type_ids = set(t.id for t in types)
# Get the last case event that required presence
etable = s3db.dvr_case_event
query = (etable.person_id == person_id) & \
(etable.type_id.belongs(type_ids)) & \
(etable.date != None) & \
(etable.date <= now) & \
(etable.deleted != True)
event = db(query).select(etable.date,
orderby = ~etable.date,
limitby = (0, 1),
).first()
if event:
last_seen_on = event.date
# Check shelter registration history for newer entries
htable = s3db.cr_shelter_registration_history
query = (htable.person_id == person_id) & \
(htable.status.belongs(2, 3)) & \
(htable.date != None) & \
(htable.deleted != True)
if last_seen_on is not None:
query &= htable.date > last_seen_on
entry = db(query).select(htable.date,
orderby = ~htable.date,
limitby = (0, 1),
).first()
if entry:
last_seen_on = entry.date
settings = current.deployment_settings
# Case appointments to update last_seen_on?
if settings.get_dvr_appointments_update_last_seen_on():
# Get appointment types that require presence
attable = s3db.dvr_case_appointment_type
query = (attable.presence_required == True) & \
(attable.deleted == False)
types = db(query).select(attable.id, cache=s3db.cache)
type_ids = set(t.id for t in types)
# Get last appointment that required presence
atable = s3db.dvr_case_appointment
query = (atable.person_id == person_id) & \
(atable.date != None) & \
(atable.type_id.belongs(type_ids)) & \
(atable.date <= now.date()) & \
(atable.status == 4) & \
(atable.deleted != True)
if last_seen_on is not None:
query &= atable.date > last_seen_on.date()
appointment = db(query).select(atable.date,
orderby = ~atable.date,
limitby = (0, 1),
).first()
if appointment:
date = appointment.date
# Default to 08:00 local time (...unless that would be in the future)
try:
date = datetime.datetime.combine(date, datetime.time(8, 0, 0))
except TypeError:
pass
date = min(now, S3DateTime.to_utc(date))
last_seen_on = date
# Allowance payments to update last_seen_on?
if settings.get_dvr_payments_update_last_seen_on():
atable = s3db.dvr_allowance
query = (atable.person_id == person_id) & \
(atable.paid_on != None) & \
(atable.status == 2) & \
(atable.deleted != True)
if last_seen_on is not None:
query &= atable.paid_on > last_seen_on
payment = db(query).select(atable.paid_on,
orderby = ~atable.paid_on,
limitby = (0, 1),
).first()
if payment:
last_seen_on = payment.paid_on
# Update last_seen_on
ctable = s3db.dvr_case
query = (ctable.person_id == person_id) & \
(ctable.archived != True) & \
(ctable.deleted != True)
db(query).update(last_seen_on = last_seen_on,
# Don't change author stamp for
# system-controlled record update:
modified_on = ctable.modified_on,
modified_by = ctable.modified_by,
)
# =============================================================================
def dvr_rheader(r, tabs=None):
""" DVR module resource headers """
if r.representation != "html":
# Resource headers only used in interactive views
return None
tablename, record = s3_rheader_resource(r)
if tablename != r.tablename:
resource = current.s3db.resource(tablename, id=record.id)
else:
resource = r.resource
rheader = None
rheader_fields = []
if record:
T = current.T
if tablename == "pr_person":
if not tabs:
# Defaults used by? (Not used by DRK, STL or SCPHIMS)
tabs = [(T("Basic Details"), None),
(T("Activities"), "case_activity"),
(T("Beneficiaries"), "beneficiary_data"),
(T("Economy"), "economy"),
(T("Identity"), "identity"),
]
case = resource.select(["dvr_case.reference",
"dvr_case.case_type_id",
],
represent = True,
).rows
if case:
case = case[0]
case_number = lambda row: case["dvr_case.reference"]
case_type = lambda row: case["dvr_case.case_type_id"]
name = s3_fullname
else:
# Target record exists, but doesn't match filters
return None
rheader_fields = [[(T("Case Number"), case_number)],
[(T("Case Type"), case_type)],
[(T("Name"), name)],
["date_of_birth"],
]
elif tablename == "dvr_case":
if not tabs:
tabs = [(T("Basic Details"), None),
(T("Activities"), "case_activity"),
]
rheader_fields = [["reference"],
["status_id"],
]
elif tablename == "dvr_activity":
label = current.deployment_settings.get_dvr_label()
if label == "Beneficiary":
CASES = T("Beneficiaries")
else:
CASES = T("Cases")
if not tabs:
tabs = [(T("Basic Details"), None),
(CASES, "case_activity"),
]
rheader_fields = [["name"],
["service_id"],
]
rheader = S3ResourceHeader(rheader_fields, tabs)(r,
table = resource.table,
record = record,
)
return rheader
# END =========================================================================
| 40.476881 | 184 | 0.412169 |
794805f6eaeb971030c2a10a20a764b88e94ae7e | 293 | py | Python | example/conanfile.py | thormme/imgui_sdl | 5d888cdcaf64a2f937e0710971e75219088d45f6 | [
"MIT"
] | null | null | null | example/conanfile.py | thormme/imgui_sdl | 5d888cdcaf64a2f937e0710971e75219088d45f6 | [
"MIT"
] | null | null | null | example/conanfile.py | thormme/imgui_sdl | 5d888cdcaf64a2f937e0710971e75219088d45f6 | [
"MIT"
] | null | null | null | from conans import ConanFile, CMake
class StrifeConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
requires = \
"sdl2/2.0.8@bincrafters/stable", \
"sdl2_image/2.0.3@bincrafters/stable", \
"imgui/1.62@bincrafters/stable"
generators = "cmake"
| 26.636364 | 52 | 0.645051 |
7948065b574978879735dc42a7ed6f8dce99ace9 | 856 | py | Python | course/models.py | oneofsunshine/OUCOJ | 68d9edf23346a30b1c6966045a0cb36abdddedfb | [
"MIT"
] | null | null | null | course/models.py | oneofsunshine/OUCOJ | 68d9edf23346a30b1c6966045a0cb36abdddedfb | [
"MIT"
] | 5 | 2021-06-08T21:55:26.000Z | 2022-03-12T00:38:42.000Z | course/models.py | oneofsunshine/OUCOJ | 68d9edf23346a30b1c6966045a0cb36abdddedfb | [
"MIT"
] | null | null | null | from django.db import models
from account.models import User
from contest.models import Contest
class Course(models.Model):
name = models.TextField()
s_year = models.CharField(max_length=4)
short_description = models.TextField(default="ownerless")
contests = models.ManyToManyField(Contest)
students = models.ManyToManyField(User)
class Meta:
db_table = "course"
unique_together = (("name", "s_year"),)
class JoinCourseRequest(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, related_name="join_course_requests")
user = models.ForeignKey(User, on_delete=models.CASCADE)
status = models.BooleanField(default=False)
accepted = models.BooleanField(default=False)
class Meta:
db_table = "join_course_request"
unique_together = (("user", "course"),)
| 29.517241 | 101 | 0.718458 |
794807174fb65d5d2b1754fccfcc4f0c80d8d68b | 6,550 | py | Python | tests/ui_tests/test_activatable_groups_ui_options_data/config_generator.py | dimuha-rs/adcm | 0f49cc9ece16c1e257be12375a64b65a34b3a3ae | [
"Apache-2.0"
] | null | null | null | tests/ui_tests/test_activatable_groups_ui_options_data/config_generator.py | dimuha-rs/adcm | 0f49cc9ece16c1e257be12375a64b65a34b3a3ae | [
"Apache-2.0"
] | null | null | null | tests/ui_tests/test_activatable_groups_ui_options_data/config_generator.py | dimuha-rs/adcm | 0f49cc9ece16c1e257be12375a64b65a34b3a3ae | [
"Apache-2.0"
] | null | null | null | import os
DATA = [(g_i, g_a, f_g, f_i, act) for g_i in [
'true', 'false'] for g_a in [
'true', 'false'] for f_g in [
'true', 'false'] for f_i in [
'true', 'false'] for act in [
'true', 'false'
]]
TYPES = ("string", "password", "integer", "text", 'boolean', 'float', 'option', 'list', 'map', 'json', 'file')
TEMPLATE_STRING = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
default: {4}
display_name: {4}
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_NUMBERS = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
default: 1
display_name: {4}
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_BOOLEAN = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
activatable: true
active: {5}
name: group
type: group
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
default: true
display_name: {4}
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_FILE = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
activatable: true
active: {5}
name: group
type: group
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_JSON = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
default: {{}}
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_LIST = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
default:
- /dev/rdisk0s1
- /dev/rdisk0s2
- /dev/rdisk0s3
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_MAP = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
default:
name: Joe
age: "24"
sex: m
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_OPTION = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
activatable: true
active: {5}
name: group
type: group
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
option: {{http: 80, https: 443}}
default: 80
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_PASSWORD = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
default: password
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATE_TEXT = """
- type: cluster
name: group_advanced_{0}_invisible_{1}_field_advanced_{2}_invisible_{3}_activatable_{5}_{4}
version: 1
config:
- description: {4}
display_name: {4}
name: group
type: group
activatable: true
active: {5}
ui_options:
advanced: {0}
invisible: {1}
subs: &id001
- &id002
name: {4}
display_name: {4}
default: text
type: {4}
ui_options:
advanced: {2}
invisible: {3}
"""
TEMPLATES = {"string": TEMPLATE_STRING, "password": TEMPLATE_PASSWORD, "integer": TEMPLATE_NUMBERS,
"text": TEMPLATE_TEXT, 'boolean': TEMPLATE_BOOLEAN, 'float': TEMPLATE_NUMBERS,
'option': TEMPLATE_OPTION, 'list': TEMPLATE_LIST, 'map': TEMPLATE_MAP,
'json': TEMPLATE_JSON, 'file': TEMPLATE_FILE}
for t in TYPES:
for config in DATA:
d_name = "group_advanced_{}_invisible_{}_field_advanced_{}_invisible_{}_activiatable_{}/{}".format(
config[0], config[1], config[2], config[3], config[4], t)
os.makedirs(d_name)
tmpl = ''
with open("{}/config.yaml".format(d_name), "w+") as f:
f.write(TEMPLATES[t].format(config[0], config[1], config[2], config[3], t, config[4]))
| 23.309609 | 110 | 0.544885 |
7948072fe52b21c3eab620409b0c06b23986d74f | 78,098 | py | Python | parlai/mturk/core/dev/mturk_manager.py | whitemike889/ParlAI | 48187b7aaacea5f910719074fe78d13c409e6776 | [
"MIT"
] | 1 | 2019-07-25T17:30:18.000Z | 2019-07-25T17:30:18.000Z | parlai/mturk/core/dev/mturk_manager.py | abisee/ParlAI | 5507d4745ca23b23af311673a6b0d1b7e72eb5cd | [
"MIT"
] | null | null | null | parlai/mturk/core/dev/mturk_manager.py | abisee/ParlAI | 5507d4745ca23b23af311673a6b0d1b7e72eb5cd | [
"MIT"
] | 1 | 2019-07-28T14:53:18.000Z | 2019-07-28T14:53:18.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import pickle
import threading
import time
import uuid
import errno
import requests
from parlai.mturk.core.dev.agents import (
AssignState,
AbsentAgentError,
AgentTimeoutError,
)
from parlai.mturk.core.dev.socket_manager import Packet, SocketManager
from parlai.mturk.core.dev.worker_manager import WorkerManager
from parlai.mturk.core.dev.mturk_data_handler import MTurkDataHandler
import parlai.mturk.core.dev.data_model as data_model
import parlai.mturk.core.dev.mturk_utils as mturk_utils
import parlai.mturk.core.dev.server_utils as server_utils
import parlai.mturk.core.dev.shared_utils as shared_utils
# Timeout before cancelling a world start
WORLD_START_TIMEOUT = 11
# Multiplier to apply when creating hits to ensure worker availibility. As the
# number of HITs increases, this decreases
HIT_MULT_SCALE = [
# At more than 1000 HITS, most workers will become 'regulars', and we can
# discount the occasional disconnects from being a large portion of workers
(1000, 1.05),
# Between 1000 and 100 HITs, disconnecting workers take a bit more of an
# impact, so we scale a bit higher
(100, 1.1),
# Under 100 hits, we should prepare for a larger proportion of workers that
# try
(10, 1.25),
# Under 10 hits, we need more to ensure one worker doesn't take all
(0, 1.5),
]
# 6 minute timeout to ensure only one thread updates the time logs.
# Those update once daily in a 3 minute window
RESET_TIME_LOG_TIMEOUT = 360
TIME_LOGS_FILE_NAME = 'working_time.pickle'
TIME_LOGS_FILE_LOCK = 'working_time.lock'
AMAZON_SNS_NAME = 'AmazonMTurk'
SNS_ASSIGN_ABANDONDED = 'AssignmentAbandoned'
SNS_ASSIGN_SUBMITTED = 'AssignmentSubmitted'
SNS_ASSIGN_RETURNED = 'AssignmentReturned'
PARLAI_MTURK_NOTICE_URL = 'http://mturk.parl.ai/mturk/mturk_notice/'
PARLAI_MTURK_UPLOAD_URL = 'http://mturk.parl.ai/mturk/mturk_stats/'
PARLAI_CRED_DIR = os.path.expanduser('~/.parlai')
PARLAI_MTURK_LOG_PERMISSION_FILE = os.path.join(
PARLAI_CRED_DIR, 'mturk_log_permission.pickle'
)
TWO_WEEKS = 60 * 60 * 24 * 7 * 2
parent_dir = os.path.dirname(os.path.abspath(__file__))
class LockFile:
flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY
def __init__(self, filename):
self.filename = filename
self.fd = None
def __enter__(self):
while self.fd is None:
try:
self.fd = os.open(self.filename, self.flags)
except OSError as e:
if e.errno == errno.EEXIST: # Failed as the file exists.
pass
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
return self
def __exit__(self, *args):
os.close(self.fd)
os.remove(self.filename)
class MTurkManager:
"""Manages interactions between MTurk agents as well as direct interactions
between a world and the MTurk server.
"""
STATE_CREATED = 0 # object created
STATE_SERVER_ALIVE = 1 # heroku server running
STATE_INIT_RUN = 2 # run initialized
STATE_ACCEPTING_WORKERS = 3 # Socket ready to recieve workers
STATE_HITS_MADE = 4 # hits created
def __init__(self, opt, mturk_agent_ids, is_test=False, use_db=False):
"""Create an MTurkManager using the given setup opts and a list of
agent_ids that will participate in each conversation
"""
if not is_test:
try:
import parlai_internal.mturk.configs as local_configs
opt = local_configs.apply_default_opts(opt)
except Exception:
# not all users will be drawing configs from internal settings
pass
self.opt = opt
if self.opt['unique_worker']:
self.opt['allowed_conversations'] = 1
elif (
self.opt['max_hits_per_worker'] != 0
and self.opt['allowed_conversations'] == 0
):
self.opt['allowed_conversations'] = self.opt['max_hits_per_worker']
self.server_url = None
self.topic_arn = None
self.server_task_name = None
self.port = 443
self.task_group_id = None
self.run_id = None
self.mturk_agent_ids = mturk_agent_ids
self.task_files_to_copy = None
self.is_sandbox = opt['is_sandbox']
self.agent_pool_change_condition = threading.Condition()
self.get_onboard_world = None
self.num_conversations = opt['num_conversations']
# Determine the correct number of hits to be launching
base_required_hits = self.num_conversations * len(self.mturk_agent_ids)
for hit_amount, hit_mult in HIT_MULT_SCALE:
if base_required_hits >= hit_amount:
self.hit_mult = hit_mult
break
self.required_hits = math.ceil(base_required_hits * self.hit_mult)
self.minimum_messages = opt.get('min_messages', 0)
self.auto_approve_delay = opt.get('auto_approve_delay', 4 * 7 * 24 * 3600)
self.has_time_limit = opt.get('max_time', 0) > 0
self.socket_manager = None
self.worker_manager = WorkerManager(self, opt)
self.is_test = is_test
self.is_unique = False
self.max_hits_per_worker = opt.get('max_hits_per_worker', 0)
self.is_shutdown = False
self.use_db = use_db # TODO enable always DB integration is complete
self.db_logger = None
self.logging_permitted = False # Enables logging to parl.ai
self.task_state = self.STATE_CREATED
if opt.get('tmp_dir') is None:
opt['tmp_dir'] = shared_utils.get_tmp_dir()
self.tmp_dir = opt['tmp_dir']
self._init_logging_config()
self._assert_opts()
@staticmethod
def make_taskless_instance(is_sandbox=False):
"""Creates an instance without a task to be used for approving or
rejecting assignments, blocking workers, and managing qualifications
"""
opt = {
'unique_worker': False,
'max_hits_per_worker': 0,
'num_conversations': 0,
'is_sandbox': is_sandbox,
'is_debug': False,
'log_level': 30,
}
manager = MTurkManager(opt, [], use_db=True)
manager.is_shutdown = True
mturk_utils.setup_aws_credentials()
return manager
# Helpers and internal manager methods #
def _assert_opts(self):
"""Manages ensuring everything about the passed in options make sense
in that they don't conflict in some way or another"""
if self.opt.get('allow_reviews') and len(self.mturk_agent_ids) != 2:
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'allow_reviews is currently only supported on 2 person tasks, '
'overriding this value to false.',
should_print=True,
)
self.opt['allow_reviews'] = False
if self.opt.get('frontend_version', 0) < 1:
# Ensure no react only features have been set
features = ['frame_height', 'allow_reviews', 'block_mobile']
for feat in features:
if self.opt.get(feat) is not None:
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'{} only works when using the react frontend '
'(frontend_version >= 1), so this option will be '
'ignored'.format(feat),
should_print=True,
)
def _init_state(self):
"""Initialize everything in the worker, task, and thread states"""
# TODO handle pooling in own class, note this is an agent_pool
self.agent_pool = []
# TODO move some state to DB
self.hit_id_list = [] # list of outstanding incomplete hits
self.assignment_to_onboard_thread = {}
self.conversation_index = 0
self.started_conversations = 0
self.completed_conversations = 0
self.task_threads = []
self.accepting_workers = True
self._reset_time_logs(init_load=True)
self.qualifications = None
self.unique_qual_name = None
self.time_limit_checked = time.time()
self.task_state = self.STATE_INIT_RUN
self.last_hit_check = time.time()
if self.use_db:
db_filename = 'pmt_sbdata.db' if self.is_sandbox else 'pmt_data.db'
self.db_logger = MTurkDataHandler(self.task_group_id, db_filename)
def _init_logging_config(self):
"""Initialize logging settings from the opt"""
if self.use_db and not self.opt['is_debug']:
shared_utils.disable_logging()
else:
shared_utils.set_is_debug(self.opt['is_debug'])
shared_utils.set_log_level(self.opt['log_level'])
def _logging_permission_check(self):
if self.is_test:
return False
if not os.path.exists(PARLAI_CRED_DIR):
os.makedirs(PARLAI_CRED_DIR)
if os.path.exists(PARLAI_MTURK_LOG_PERMISSION_FILE):
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'rb') as perm_file:
permissions = pickle.load(perm_file)
if permissions['allowed'] is True:
return True
elif time.time() - permissions['asked_time'] < TWO_WEEKS:
return False
# Snooze expired
os.remove(PARLAI_MTURK_LOG_PERMISSION_FILE)
print(
'Would you like to help improve ParlAI-MTurk by providing some '
'metrics? We would like to record acceptance, completion, and '
'disconnect rates by worker. These metrics let us track the '
'health of the platform. If you accept we\'ll collect this data '
'on all of your future runs. We\'d ask before collecting anything '
'else, but currently we have no plans to. You can decline to '
'snooze this request for 2 weeks.'
)
selected = ''
while selected not in ['y', 'Y', 'n', 'N']:
selected = input('Share worker rates? (y/n): ')
if selected not in ['y', 'Y', 'n', 'N']:
print('Must type one of (Y/y/N/n)')
if selected in ['y', 'Y']:
print('Thanks for helping us make the platform better!')
permissions = {'allowed': selected in ['y', 'Y'], 'asked_time': time.time()}
with open(PARLAI_MTURK_LOG_PERMISSION_FILE, 'wb+') as perm_file:
pickle.dump(permissions, perm_file)
return permissions['allowed']
def _upload_worker_data(self):
"""Uploads worker data acceptance and completion rates to the parlai
server
"""
worker_data = self.worker_manager.get_worker_data_package()
data = {'worker_data': worker_data}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
try:
requests.post(PARLAI_MTURK_UPLOAD_URL, json=data, headers=headers)
except Exception:
shared_utils.print_and_log(
logging.WARNING,
'Unable to log worker statistics to parl.ai',
should_print=True,
)
def _maintain_hit_status(self):
def update_status():
while len(self.hit_id_list) > 0:
cur_time = time.time()
if cur_time - self.last_hit_check > 10:
self.last_hit_check = cur_time
for hit_id in self.hit_id_list.copy():
hit = self.get_hit(hit_id)
hit_data = hit['HIT']
if hit_data['HITStatus'] in [
'Reviewable',
'Reviewing',
'Disposed',
]:
self.hit_id_list.remove(hit_id)
time.sleep(10)
hit_status_thread = threading.Thread(
target=update_status, name='Hit-Status-Thread', daemon=True
)
hit_status_thread.start()
def _should_use_time_logs(self):
# Used to ensure time logs are properly tracked. Can be overridden for
# testing
return self.is_sandbox
def _reset_time_logs(self, init_load=False, force=False):
# Uses a weak lock file to try to prevent clobbering between threads
if not self._should_use_time_logs():
return # sandbox doesn't check logs
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if os.path.exists(file_path):
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
# Initial loads should only reset if it's been a day,
# otherwise only need to check an hour for safety
compare_time = 24 * 60 * 60 if init_load else 60 * 60
if (
time.time() - existing_times['last_reset'] < compare_time
and not force
):
return # do nothing if it's been less than a day
reset_workers = list(existing_times.keys())
reset_workers.remove('last_reset')
if len(reset_workers) != 0:
self.worker_manager.un_time_block_workers(reset_workers)
# Reset the time logs
os.remove(file_path)
# new time logs
with open(file_path, 'wb+') as time_log_file:
time_logs = {'last_reset': time.time()}
pickle.dump(time_logs, time_log_file, pickle.HIGHEST_PROTOCOL)
# TODO move working times into the DB
def _log_working_time(self, mturk_agent):
if not self._should_use_time_logs():
return # sandbox does not log working time
additional_time = time.time() - mturk_agent.creation_time
worker_id = mturk_agent.worker_id
file_path = os.path.join(parent_dir, TIME_LOGS_FILE_NAME)
file_lock = os.path.join(parent_dir, TIME_LOGS_FILE_LOCK)
with LockFile(file_lock) as _lock_file:
assert _lock_file is not None
if not os.path.exists(file_path):
self._reset_time_logs()
with open(file_path, 'rb+') as time_log_file:
existing_times = pickle.load(time_log_file)
total_work_time = existing_times.get(worker_id, 0)
total_work_time += additional_time
existing_times[worker_id] = total_work_time
os.remove(file_path)
with open(file_path, 'wb+') as time_log_file:
pickle.dump(existing_times, time_log_file, pickle.HIGHEST_PROTOCOL)
if total_work_time > int(self.opt.get('max_time')):
self.worker_manager.time_block_worker(worker_id)
def _move_agents_to_waiting(self, agents):
"""Put all agents into waiting worlds, expire them if no longer
accepting agents. If the agent is already final, clean it.
Add workers in waiting worlds to the worker pool.
"""
for agent in agents:
worker_id = agent.worker_id
assignment_id = agent.assignment_id
if agent.is_final():
agent.reduce_state()
self.socket_manager.close_channel(agent.get_connection_id())
continue
conversation_id = 'w_{}'.format(uuid.uuid4())
if self.accepting_workers:
# Move the worker into a waiting world
agent.set_status(
AssignState.STATUS_WAITING,
conversation_id=conversation_id,
agent_id='waiting',
)
self._add_agent_to_pool(agent)
else:
self.force_expire_hit(worker_id, assignment_id)
def _expire_onboarding_pool(self):
"""Expire any agent that is in an onboarding thread"""
def expire_func(agent):
self.force_expire_hit(agent.worker_id, agent.assignment_id)
def is_onboard(agent):
return agent.get_status() == AssignState.STATUS_ONBOARDING
self.worker_manager.map_over_agents(expire_func, is_onboard)
def _expire_agent_pool(self):
"""Expire all workers in the worker pool"""
for agent in self.agent_pool.copy():
self.force_expire_hit(agent.worker_id, agent.assignment_id)
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
def _get_unique_pool(self, eligibility_function):
"""Return a filtered version of the worker pool where each worker is
only listed a maximum of one time. In sandbox this is overridden for
testing purposes, and the same worker can be returned more than once
"""
pool = [a for a in self.agent_pool if not a.hit_is_returned]
if eligibility_function['multiple'] is True:
agents = eligibility_function['func'](pool)
else:
agents = [a for a in pool if eligibility_function['func'](a)]
unique_agents = []
unique_worker_ids = []
for agent in agents:
if (self.is_sandbox) or (agent.worker_id not in unique_worker_ids):
unique_agents.append(agent)
unique_worker_ids.append(agent.worker_id)
return unique_agents
def _add_agent_to_pool(self, agent):
"""Add a single agent to the pool"""
if agent not in self.agent_pool:
# Add the agent to pool
with self.agent_pool_change_condition:
if agent not in self.agent_pool:
shared_utils.print_and_log(
logging.DEBUG,
"Adding worker {} to pool.".format(agent.worker_id),
)
self.agent_pool.append(agent)
def _remove_from_agent_pool(self, agent):
"""Remove an agent from the pool. should be called under the
agent_pool_change_condition being set.
"""
assert agent in self.agent_pool, 'agent not in pool'
self.agent_pool.remove(agent)
def _handle_agent_disconnect(self, worker_id, assignment_id):
"""Mark a worker as disconnected and send a message to all agents in
his conversation that a partner has disconnected.
"""
self.worker_manager.handle_agent_disconnect(
worker_id, assignment_id, self._handle_partner_disconnect
)
def _handle_partner_disconnect(self, agent):
"""Send a message to an agent notifying them that a partner has
disconnected and we marked the HIT as complete for them
"""
if agent is not None and not agent.is_final():
# Update the assignment state
agent.some_agent_disconnected = True
agent_messages = [
m for m in agent.get_messages() if 'id' in m and m['id'] == agent.id
]
if len(agent_messages) < self.minimum_messages:
# TODO move worker back to pool if hasn't sent message yet,
# remove disconnect_early
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT_EARLY)
else:
agent.set_status(AssignState.STATUS_PARTNER_DISCONNECT)
# Create and send the command
data = {
'agent_status': AssignState.STATUS_PARTNER_DISCONNECT,
'done_text': 'One of your partners disconnected in the middle of the '
'HIT. We won\'t penalize you for their disconnect, so '
'please use the button below to mark the HIT as complete.',
}
def disconnect_agent(*args):
self.socket_manager.close_channel(agent.get_connection_id())
self.send_state_change(
agent.worker_id, agent.assignment_id, data, ack_func=disconnect_agent
)
def _setup_socket(self, timeout_seconds=None):
"""Set up a socket_manager with defined callbacks"""
assert (
self.task_state >= self.STATE_INIT_RUN
), 'socket cannot be set up until run is started'
socket_server_url = self.server_url
if self.opt['local']: # skip some hops for local stuff
socket_server_url = "https://localhost"
self.socket_manager = SocketManager(
socket_server_url,
self.port,
self._on_alive,
self._on_new_message,
self._on_socket_dead,
self.task_group_id,
socket_dead_timeout=timeout_seconds,
server_death_callback=self.shutdown,
)
def _on_alive(self, pkt):
"""Update MTurkManager's state when a worker sends an
alive packet. This asks the socket manager to open a new channel and
then handles ensuring the worker state is consistent
"""
shared_utils.print_and_log(logging.DEBUG, 'on_agent_alive: {}'.format(pkt))
worker_id = pkt.data['worker_id']
hit_id = pkt.data['hit_id']
assign_id = pkt.data['assignment_id']
conversation_id = pkt.data['conversation_id']
if not assign_id:
# invalid assignment_id is an auto-fail
shared_utils.print_and_log(
logging.WARN,
'Agent ({}) with no assign_id called alive'.format(worker_id),
)
return
# Open a channel if it doesn't already exist
self.socket_manager.open_channel(worker_id, assign_id)
# Get a state for this worker, create if non existing
worker_state = self.worker_manager.worker_alive(worker_id)
if self.db_logger is not None:
self.db_logger.log_worker_note(
worker_id,
assign_id,
'Reconnected with conversation_id {} at {}'.format(
conversation_id, time.time()
),
)
if not worker_state.has_assignment(assign_id):
# New connection for the worker. First ensure that this connection
# isn't violating our uniqueness constraints
completed_assignments = worker_state.completed_assignments()
max_hits = self.max_hits_per_worker
if (self.is_unique and completed_assignments > 0) or (
max_hits != 0 and completed_assignments > max_hits
):
text = (
'You have already participated in this HIT the maximum '
'number of times. This HIT is now expired. '
'Please return the HIT.'
)
self.force_expire_hit(worker_id, assign_id, text)
return
# Ensure we are still accepting workers
if not self.accepting_workers:
self.force_expire_hit(worker_id, assign_id)
return
# Ensure worker has not exceeded concurrent convo cap
convs = worker_state.active_conversation_count()
allowed_convs = self.opt['allowed_conversations']
if allowed_convs > 0 and convs >= allowed_convs:
text = (
'You can participate in only {} of these HITs at '
'once. Please return this HIT and finish your '
'existing HITs before accepting more.'.format(allowed_convs)
)
self.force_expire_hit(worker_id, assign_id, text)
return
# Initialize a new agent for this worker
self.worker_manager.assign_task_to_worker(hit_id, assign_id, worker_id)
if self.db_logger is not None:
self.db_logger.log_worker_accept_assignment(
worker_id, assign_id, hit_id
)
agent = self.worker_manager._get_agent(worker_id, assign_id)
self._onboard_new_agent(agent)
else:
# Reconnecting worker should no longer happen
shared_utils.print_and_log(
logging.WARN,
'Agent ({}) is reconnecting to {}'.format(worker_id, assign_id),
)
def _handle_mturk_message(self, pkt):
assignment_id = pkt.assignment_id
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
if agent is None:
return
mturk_event_type = pkt.data['text']
if mturk_event_type == SNS_ASSIGN_RETURNED:
agent.hit_is_returned = True
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_ABANDONDED:
agent.hit_is_returned = True
# Treat as a socket_dead event
self._on_socket_dead(agent.worker_id, assignment_id)
elif mturk_event_type == SNS_ASSIGN_SUBMITTED:
# Socket dead already called, just mark as complete
agent.hit_is_complete = True
def _on_new_message(self, pkt):
"""Handle incoming messages from Amazon's SNS queue. All other packets
should be handled by the worker_manager
"""
if pkt.sender_id == AMAZON_SNS_NAME:
self._handle_mturk_message(pkt)
return
self.worker_manager.route_packet(pkt)
def _on_socket_dead(self, worker_id, assignment_id):
"""Handle a disconnect event, update state as required and notifying
other agents if the disconnected agent was in conversation with them
returns False if the socket death should be ignored and the socket
should stay open and not be considered disconnected
"""
agent = self.worker_manager._get_agent(worker_id, assignment_id)
if agent is None:
# This worker never registered, so we don't do anything
return
shared_utils.print_and_log(
logging.DEBUG,
'Worker {} disconnected from {} in status {}'.format(
worker_id, agent.conversation_id, agent.get_status()
),
)
if agent.get_status() == AssignState.STATUS_NONE:
# Agent never made it to onboarding, delete
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
elif agent.get_status() == AssignState.STATUS_ONBOARDING:
# Agent never made it to task pool, the onboarding thread will die
# and delete the agent if we mark it as a disconnect
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_WAITING:
# agent is in pool, remove from pool and delete
if agent in self.agent_pool:
with self.agent_pool_change_condition:
self._remove_from_agent_pool(agent)
agent.set_status(AssignState.STATUS_DISCONNECT)
agent.reduce_state()
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_IN_TASK:
self._handle_agent_disconnect(worker_id, assignment_id)
agent.disconnected = True
elif agent.get_status() == AssignState.STATUS_DONE:
# It's okay if a complete assignment socket dies, but wait for the
# world to clean up the resource
return
self.socket_manager.close_channel(agent.get_connection_id())
def _onboard_new_agent(self, mturk_agent):
"""Handle creating an onboarding thread and moving an agent through
the onboarding process, updating the state properly along the way
Returns True if a thread is launched, False if the call is ignored.
"""
# get state variable in question
worker_id = mturk_agent.worker_id
assignment_id = mturk_agent.assignment_id
def _onboard_function(mturk_agent):
"""Onboarding wrapper to set state to onboarding properly"""
if self.get_onboard_world:
conversation_id = 'o_' + str(uuid.uuid4())
agent.set_status(
AssignState.STATUS_ONBOARDING,
conversation_id=conversation_id,
agent_id='onboarding',
)
# call onboarding function
try:
world = self.get_onboard_world(mturk_agent)
while not world.episode_done():
world.parley()
except AgentTimeoutError:
self.handle_turker_timeout(
mturk_agent.worker_id, mturk_agent.assignment_id
)
except AbsentAgentError:
pass # agent state already updated
world.shutdown()
world.review_work()
save_data = world.prep_save_data([mturk_agent])
if save_data is not None:
MTurkDataHandler.save_world_data(
save_data,
self.task_group_id,
conversation_id,
sandbox=self.is_sandbox,
)
mturk_agent.clear_messages()
# once onboarding is done, move into a waiting world
self._move_agents_to_waiting([mturk_agent])
if assignment_id in self.assignment_to_onboard_thread:
if self.assignment_to_onboard_thread[assignment_id].isAlive():
return False
agent = self.worker_manager.get_agent_for_assignment(assignment_id)
# Only start an onboarding world if the worker never got a world
if agent.get_status() != AssignState.STATUS_NONE:
return False
# Start the onboarding thread and run it
onboard_thread = threading.Thread(
target=_onboard_function,
args=(mturk_agent,),
name='onboard-{}-{}'.format(worker_id, assignment_id),
)
onboard_thread.daemon = True
onboard_thread.start()
self.assignment_to_onboard_thread[assignment_id] = onboard_thread
return True
def _no_agents_incomplete(self, agents):
"""Return True if all the given agents completed their task"""
for agent in agents:
if not agent.is_final() or agent.get_status() != AssignState.STATUS_DONE:
return False
return True
def _check_time_limit(self):
if time.time() - self.time_limit_checked < RESET_TIME_LOG_TIMEOUT:
return
if int(time.time()) % (60 * 60 * 24) > (60 * 30):
# sync the time resets to ONCE DAILY in a 30 minute window
return
self.time_limit_checked = time.time()
self._reset_time_logs()
self.worker_manager.un_time_block_workers()
def is_onboarding_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('o_')
def is_waiting_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('w_')
def is_task_world(self, conversation_id):
return conversation_id is not None and conversation_id.startswith('t_')
# Manager Lifecycle Functions #
def populate_task_files(self, task_directory_path):
# Poplulate files to copy over to the server
if not self.task_files_to_copy:
self.task_files_to_copy = {
'static': [],
'components': [],
'css': [],
'needs_build': None,
}
if not task_directory_path:
task_directory_path = os.path.join(
self.opt['parlai_home'], 'parlai', 'mturk', 'tasks', self.opt['task']
)
self.task_files_to_copy['static'].append(
os.path.join(task_directory_path, 'frontend', 'static', 'cover_page.html')
)
try:
frontend_contents = os.listdir(
os.path.join(task_directory_path, 'frontend')
)
if 'package.json' in frontend_contents:
# We take a package file to mean that this component will
# need to be built separately before importing
self.task_files_to_copy['needs_build'] = os.path.join(
task_directory_path, 'frontend'
)
for dir in frontend_contents:
if dir in self.task_files_to_copy:
for file_name in os.listdir(
os.path.join(task_directory_path, 'frontend', dir)
):
self.task_files_to_copy[dir].append(
os.path.join(
task_directory_path, 'frontend', dir, file_name
)
)
except FileNotFoundError: # noqa F821 we don't support python2
# No frontend dir exists
pass
def setup_server(self, task_directory_path=None):
"""Prepare the MTurk server for the new HIT we would like to submit"""
assert self.task_state >= self.STATE_CREATED
fin_word = 'start'
if self.opt['count_complete']:
fin_word = 'finish'
shared_utils.print_and_log(
logging.INFO,
'\nYou are going to allow workers from Amazon Mechanical Turk to '
'be an agent in ParlAI.\nDuring this process, Internet connection '
'is required, and you should turn off your computer\'s auto-sleep '
'feature.',
should_print=True,
)
if self.opt['max_connections'] == 0:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be created to fulfill {} times the '
'number of conversations requested, extra HITs will be expired'
' once the desired conversations {}.'
''.format(self.hit_mult, fin_word),
should_print=True,
)
else:
shared_utils.print_and_log(
logging.INFO,
'Enough HITs will be launched over time '
'up to a max of {} times the amount requested until the '
'desired number of conversations {}.'
''.format(self.hit_mult, fin_word),
should_print=True,
)
input('Please press Enter to continue... ')
shared_utils.print_and_log(logging.NOTSET, '', True)
if self.opt['local'] is True:
shared_utils.print_and_log(
logging.INFO,
"In order to run the server locally, you will need "
"to have a public HTTPS endpoint (SSL signed) running on "
"the server you are currently excecuting ParlAI on. Enter "
"that public URL hostname when prompted and ensure that the "
"port being used by ParlAI (usually 3000) has external "
"traffic routed to it.",
should_print=True,
)
input('Please press Enter to continue... ')
mturk_utils.setup_aws_credentials()
# See if there's enough money in the account to fund the HITs requested
num_assignments = self.required_hits
payment_opt = {
'type': 'reward',
'num_total_assignments': num_assignments,
'reward': self.opt['reward'], # in dollars
}
total_cost = mturk_utils.calculate_mturk_cost(payment_opt=payment_opt)
if not mturk_utils.check_mturk_balance(
balance_needed=total_cost, is_sandbox=self.opt['is_sandbox']
):
raise SystemExit('Insufficient funds')
if (not self.opt['is_sandbox']) and (
total_cost > 100 or self.opt['reward'] > 1
):
confirm_string = '$%.2f' % total_cost
expected_cost = total_cost / self.hit_mult
expected_string = '$%.2f' % expected_cost
shared_utils.print_and_log(
logging.INFO,
'You are going to create {} HITs at {} per assignment, for a '
'total cost up to {} after MTurk fees. Please enter "{}" to '
'confirm and continue, and anything else to cancel.\nNote that'
' of the {}, the target amount to spend is {}.'.format(
self.required_hits,
'$%.2f' % self.opt['reward'],
confirm_string,
confirm_string,
confirm_string,
expected_string,
),
should_print=True,
)
check = input('Enter here: ')
if check != confirm_string and ('$' + check) != confirm_string:
raise SystemExit('Cancelling')
# Check to see if there are any additional notices on the parlai site
if not self.is_test:
shared_utils.print_and_log(
logging.INFO,
'Querying the parlai website for possible notices...',
should_print=True,
)
endpoint = 'sandbox' if self.is_sandbox else 'live'
notice_url = PARLAI_MTURK_NOTICE_URL + endpoint
try:
import parlai_internal.mturk.configs as local_configs
notice_url = local_configs.get_true_url(notice_url)
except Exception:
# not all users will be drawing configs from internal settings
pass
try:
resp = requests.post(notice_url)
warnings = resp.json()
for warn in warnings:
print('Notice: ' + warn)
accept = input('Continue? (Y/n): ')
if accept == 'n':
raise SystemExit('Additional notice was rejected.')
except Exception:
print('Unable to query warnings from the parl.ai website.')
accept = input('Continue without checking warnings? (Y/n): ')
if accept == 'n':
raise SystemExit('Aborted.')
self.logging_permitted = self._logging_permission_check()
shared_utils.print_and_log(
logging.INFO, 'Setting up MTurk server...', should_print=True
)
self.is_unique = self.opt['unique_worker']
self.max_hits_per_worker = self.opt.get('max_hits_per_worker', 0)
mturk_utils.create_hit_config(
opt=self.opt,
task_description=self.opt['task_description'],
unique_worker=self.is_unique,
is_sandbox=self.opt['is_sandbox'],
)
# Setup the server with a likely-unique app-name
task_name = '{}-{}'.format(str(uuid.uuid4())[:8], self.opt['task'])
self.server_task_name = ''.join(
e for e in task_name.lower() if e.isalnum() or e == '-'
)
if 'heroku_team' in self.opt:
heroku_team = self.opt['heroku_team']
else:
heroku_team = None
assert self.opt.get('frontend_version', 0) > 0, (
'Tasks requiring the legacy frontend have to use the legacy '
'infrastructure. This can be done by importing from '
'parlai.mturk.core.legacy_2018.mturk_manager in your run code.'
)
self.populate_task_files(task_directory_path)
self.server_url = server_utils.setup_server(
self.server_task_name,
self.task_files_to_copy,
self.opt['local'],
heroku_team,
self.opt['hobby'],
tmp_dir=self.opt['tmp_dir'],
)
shared_utils.print_and_log(logging.INFO, self.server_url)
shared_utils.print_and_log(
logging.INFO, "MTurk server setup done.\n", should_print=True
)
self.task_state = self.STATE_SERVER_ALIVE
def start_new_run(self):
"""Clear state to prepare for a new run"""
assert self.task_state >= self.STATE_SERVER_ALIVE, (
'Cannot start a run before having a running server using '
'`mturk_manager.setup_server()` first.'
)
self.run_id = str(int(time.time()))
self.task_group_id = '{}_{}'.format(self.opt['task'], self.run_id)
self._init_state()
try:
self.topic_arn = mturk_utils.setup_sns_topic(
self.opt['task'], self.server_url, self.task_group_id
)
except Exception as e:
self.topic_arn = None
shared_utils.print_and_log(
logging.WARN,
'Botocore couldn\'t subscribe to HIT events, '
'perhaps you tried to register to localhost?',
should_print=True,
)
print(repr(e))
if self.db_logger is not None:
self.db_logger.log_new_run(self.required_hits, self.opt['task'])
self.task_state = self.STATE_INIT_RUN
def ready_to_accept_workers(self, timeout_seconds=None):
"""Set up socket to start communicating to workers"""
assert self.task_state >= self.STATE_INIT_RUN, (
'Cannot be ready to accept workers before starting a run with '
'`mturk_manager.start_new_run()` first.'
)
shared_utils.print_and_log(
logging.INFO, 'Local: Setting up WebSocket...', not self.is_test
)
self._setup_socket(timeout_seconds=timeout_seconds)
shared_utils.print_and_log(logging.INFO, 'WebSocket set up!', should_print=True)
# Just in case create_hits was called first. To be removed when that
# workflow is no longer supported
if self.STATE_ACCEPTING_WORKERS > self.task_state:
self.task_state = self.STATE_ACCEPTING_WORKERS
def set_get_onboard_world(self, get_onboard_world):
self.get_onboard_world = get_onboard_world
def move_agent_to_task(self, agent, new_conversation_id):
agent.set_status(
AssignState.STATUS_IN_TASK,
conversation_id=new_conversation_id,
agent_id=agent.id,
)
# Remove selected agents from the pool
self._remove_from_agent_pool(agent)
def start_task(self, eligibility_function, assign_role_function, get_task_world):
"""Handle running a task by checking to see when enough agents are
in the pool to start an instance of the task. Continue doing this
until the desired number of conversations is had.
"""
assert self.task_state >= self.STATE_HITS_MADE, (
'Must have launched HITs with `mturk_manager.create_hits`'
' to start the task'
)
if callable(eligibility_function):
# Convert legacy eligibility_functions to the new format
eligibility_function = {'multiple': False, 'func': eligibility_function}
else:
# Ensure the eligibility function is valid
if 'func' not in eligibility_function:
shared_utils.print_and_log(
logging.CRITICAL, "eligibility_function has no 'func'. Cancelling."
)
raise Exception(
'eligibility_function dict must contain a `func` field '
'containing the actual function.'
)
elif not callable(eligibility_function['func']):
shared_utils.print_and_log(
logging.CRITICAL,
"eligibility_function['func'] not a function. Cancelling.",
)
raise Exception(
"eligibility_function['func'] must contain a function. "
"If eligibility_function['multiple'] is set, it should "
"filter through the list of workers and only return those "
"that are currently eligible to participate. If it is not "
"set, it should take in a single worker and return whether"
" or not they are eligible."
)
if 'multiple' not in eligibility_function:
eligibility_function['multiple'] = False
def _task_function(opt, agents, conversation_id):
"""Wait for agents to join the world, then run task function"""
shared_utils.print_and_log(
logging.INFO, 'Starting task {}...'.format(conversation_id)
)
shared_utils.print_and_log(
logging.DEBUG, 'Waiting for all agents to join the conversation...'
)
start_time = time.time()
while True:
all_joined = True
for agent in agents:
# check the status of an individual agent assignment
if agent.get_status() != AssignState.STATUS_IN_TASK:
all_joined = False
if all_joined:
break
if time.time() - start_time > WORLD_START_TIMEOUT:
# We waited but not all agents rejoined, throw agents
# back into the waiting pool. Stragglers will disconnect
# from there
shared_utils.print_and_log(
logging.INFO,
'Timeout waiting for {}, move back to waiting'.format(
conversation_id
),
)
self._move_agents_to_waiting(agents)
return
time.sleep(shared_utils.THREAD_SHORT_SLEEP)
shared_utils.print_and_log(
logging.INFO,
'All agents joined the conversation {}!'.format(conversation_id),
)
self.started_conversations += 1
world = get_task_world(mturk_manager=self, opt=opt, workers=agents)
# run the world to completion or error
try:
while not world.episode_done():
world.parley()
except AgentTimeoutError as e:
self.handle_turker_timeout(e.worker_id, e.assignment_id)
except AbsentAgentError:
pass # disconnect already managed
# shutdown and review the work
world.shutdown()
world.review_work()
# Return the contents for saving
save_data = world.prep_save_data(agents)
if save_data is not None:
MTurkDataHandler.save_world_data(
save_data,
self.task_group_id,
conversation_id,
sandbox=self.is_sandbox,
)
# Delete extra state data that is now unneeded
for agent in agents:
agent.clear_messages()
# Count if it's a completed conversation
if self._no_agents_incomplete(agents):
self.completed_conversations += 1
if self.opt['max_connections'] > 0: # If using a conv cap
if self.accepting_workers: # if still looking for new agents
for agent in agents:
if agent.submitted_hit():
self.create_additional_hits(1)
if self.db_logger is not None:
self._maintain_hit_status()
while not self.is_shutdown:
if self.has_time_limit:
self._check_time_limit()
# Loop forever starting task worlds until desired convos are had
with self.agent_pool_change_condition:
valid_agents = self._get_unique_pool(eligibility_function)
needed_agents = len(self.mturk_agent_ids)
if len(valid_agents) >= needed_agents:
# enough agents in pool to start new conversation
self.conversation_index += 1
new_conversation_id = 't_{}'.format(self.conversation_index)
# Add the required number of valid agents to the conv
agents = [a for a in valid_agents[:needed_agents]]
assign_role_function(agents)
# Allow task creator to filter out agents and run
# versions of the task that require fewer agents
agents = [a for a in agents if a.id is not None]
for agent in agents:
self.move_agent_to_task(agent, new_conversation_id)
# Start a new thread for this task world
task_thread = threading.Thread(
target=_task_function,
args=(self.opt, agents, new_conversation_id),
name='task-{}'.format(new_conversation_id),
)
task_thread.daemon = True
task_thread.start()
self.task_threads.append(task_thread)
# Once we've had enough conversations, finish and break
compare_count = self.started_conversations
if self.opt['count_complete']:
compare_count = self.completed_conversations
if compare_count >= self.num_conversations:
self.accepting_workers = False
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
# Wait for all conversations to finish, then break from
# the while loop
for thread in self.task_threads:
thread.join()
break
time.sleep(shared_utils.THREAD_MEDIUM_SLEEP)
def _wait_for_task_expirations(self):
"""Wait for the full task duration to ensure anyone who sees the task
has it expired, and ensures that all tasks are properly expired
"""
start_time = time.time()
min_wait = self.opt['assignment_duration_in_seconds']
while time.time() - start_time < min_wait and len(self.hit_id_list) > 0:
self.expire_all_unassigned_hits()
time.sleep(max(self.opt['assignment_duration_in_seconds'] / 60, 0.1))
def shutdown(self, force=False):
"""Handle any mturk client shutdown cleanup."""
# Ensure all threads are cleaned and state and HITs are handled
if self.is_shutdown and not force:
return
self.is_shutdown = True
try:
self.expire_all_unassigned_hits()
self._expire_onboarding_pool()
self._expire_agent_pool()
self._wait_for_task_expirations()
for assignment_id in self.assignment_to_onboard_thread:
self.assignment_to_onboard_thread[assignment_id].join()
except BaseException:
pass
finally:
if self.server_task_name is not None:
server_utils.delete_server(
self.server_task_name,
self.opt['local'],
tmp_dir=self.opt['tmp_dir'],
)
if self.topic_arn is not None:
mturk_utils.delete_sns_topic(self.topic_arn)
if self.opt['unique_worker'] and not self.opt['unique_qual_name']:
mturk_utils.delete_qualification(self.unique_qual_id, self.is_sandbox)
if self.socket_manager is not None:
self.socket_manager.shutdown()
if self.logging_permitted and not self.is_sandbox and not self.is_test:
self._upload_worker_data()
if self.worker_manager is not None:
self.worker_manager.shutdown()
# MTurk Agent Interaction Functions #
def force_expire_hit(self, worker_id, assign_id, text=None, ack_func=None):
"""Send a command to expire a hit to the provided agent, update State
to reflect that the HIT is now expired
"""
# Expire in the state
agent = self.worker_manager._get_agent(worker_id, assign_id)
if agent is not None:
if agent.is_final():
return
agent.set_status(AssignState.STATUS_EXPIRED)
agent.hit_is_expired = True
if ack_func is None:
def use_ack_func(*args):
self.socket_manager.close_channel('{}_{}'.format(worker_id, assign_id))
else:
def use_ack_func(*args):
ack_func(*args)
self.socket_manager.close_channel('{}_{}'.format(worker_id, assign_id))
# Send the expiration command
if text is None:
text = (
'This HIT is expired, please return and take a new '
'one if you\'d want to work on this task.'
)
data = {'agent_status': AssignState.STATUS_EXPIRED, 'done_text': text}
self.send_state_change(worker_id, assign_id, data, ack_func=use_ack_func)
def handle_turker_timeout(self, worker_id, assign_id):
"""To be used by the MTurk agent when the worker doesn't send a message
within the expected window.
"""
# Expire the hit for the disconnected user
text = (
'You haven\'t entered a message in too long. As these HITs '
' often require real-time interaction, this hit has '
'been expired and you have been considered disconnected. '
'Disconnect too frequently and you will be blocked from '
'working on these HITs in the future.'
)
self.force_expire_hit(worker_id, assign_id, text)
# Send the disconnect event to all workers in the convo
self._handle_agent_disconnect(worker_id, assign_id)
def send_message(
self, receiver_id, assignment_id, data, blocking=True, ack_func=None
):
"""Send a message through the socket manager,
update conversation state
"""
data = data.copy() # Ensure data packet is sent in current state
data['type'] = data_model.MESSAGE_TYPE_ACT
# Force messages to have a unique ID
if 'message_id' not in data:
data['message_id'] = str(uuid.uuid4())
conversation_id = None
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if agent is not None:
conversation_id = agent.conversation_id
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
data_model.WORLD_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
ack_func=ack_func,
)
shared_utils.print_and_log(
logging.INFO,
'Manager sending: {}'.format(packet),
should_print=self.opt['verbose'],
)
self.socket_manager.queue_packet(packet)
return data['message_id']
def send_command(
self, receiver_id, assignment_id, data, blocking=True, ack_func=None
):
"""Sends a command through the socket manager,
update conversation state
"""
data['type'] = data_model.MESSAGE_TYPE_COMMAND
event_id = shared_utils.generate_event_id(receiver_id)
conversation_id = None
agent = self.worker_manager._get_agent(receiver_id, assignment_id)
if agent is not None:
conversation_id = agent.conversation_id
packet = Packet(
event_id,
data_model.WORLD_MESSAGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
conversation_id=conversation_id,
ack_func=ack_func,
)
self.socket_manager.queue_packet(packet)
def send_state_change(self, receiver_id, assignment_id, data, ack_func=None):
"""Send an updated state to the server to push to the agent"""
event_id = shared_utils.generate_event_id(receiver_id)
packet = Packet(
event_id,
data_model.AGENT_STATE_CHANGE,
self.socket_manager.get_my_sender_id(),
receiver_id,
assignment_id,
data,
ack_func=ack_func,
)
self.socket_manager.queue_packet(packet)
def mark_workers_done(self, workers):
"""Mark a group of agents as done to keep state consistent"""
for agent in workers:
if self.is_unique:
assert (
self.unique_qual_name is not None
), 'Unique qual name must not be none to use is_unique'
self.give_worker_qualification(agent.worker_id, self.unique_qual_name)
if not agent.is_final():
agent.set_status(AssignState.STATUS_DONE, 'done', None)
if self.max_hits_per_worker > 0:
worker_state = self.worker_manager._get_worker(agent.worker_id)
completed_assignments = worker_state.completed_assignments()
assert self.unique_qual_name is not None, (
'Unique qual name ' 'must not be none to use max_hits_per_worker'
)
if completed_assignments >= self.max_hits_per_worker:
self.give_worker_qualification(
agent.worker_id, self.unique_qual_name
)
if self.has_time_limit:
self._log_working_time(agent)
def free_workers(self, workers):
"""End completed worker threads"""
for agent in workers:
self.socket_manager.close_channel(agent.get_connection_id())
# Amazon MTurk Server Functions #
def get_qualification_list(self, qualifications=None):
if self.qualifications is not None:
return self.qualifications.copy()
if qualifications is None:
qualifications = []
if not self.is_sandbox and not self.is_test:
try:
import parlai_internal.mturk.configs as local_configs
qualifications = local_configs.set_default_qualifications(
qualifications
)
except Exception:
# not all users will be drawing configs from internal settings
pass
if self.opt['disconnect_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['disconnect_qualification'],
'A soft ban from using a ParlAI-created HIT due to frequent '
'disconnects from conversations, leading to negative '
'experiences for other Turkers and for the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as disconnect qualification could '
'not be acquired. Shutting down server.'
)
qualifications.append(
{
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
)
# Add the soft block qualification if it has been specified
if self.opt['block_qualification'] is not None:
block_qual_id = mturk_utils.find_or_create_qualification(
self.opt['block_qualification'],
'A soft ban from this ParlAI-created HIT at the requesters '
'discretion. Generally used to restrict how frequently a '
'particular worker can work on a particular task.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as block qualification could not be'
' acquired. Shutting down server.'
)
qualifications.append(
{
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
)
if self.has_time_limit:
block_qual_name = '{}-max-daily-time'.format(self.task_group_id)
if self.opt['max_time_qual'] is not None:
block_qual_name = self.opt['max_time_qual']
self.max_time_qual = block_qual_name
block_qual_id = mturk_utils.find_or_create_qualification(
block_qual_name,
'A soft ban from working on this HIT or HITs by this '
'requester based on a maximum amount of daily work time set '
'by the requester.',
self.is_sandbox,
)
assert block_qual_id is not None, (
'Hits could not be created as a time block qualification could'
' not be acquired. Shutting down server.'
)
qualifications.append(
{
'QualificationTypeId': block_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
)
if self.is_unique or self.max_hits_per_worker > 0:
self.unique_qual_name = self.opt.get('unique_qual_name')
if self.unique_qual_name is None:
self.unique_qual_name = self.task_group_id + '_max_submissions'
self.unique_qual_id = mturk_utils.find_or_create_qualification(
self.unique_qual_name,
'Prevents workers from completing a task too frequently',
self.is_sandbox,
)
qualifications.append(
{
'QualificationTypeId': self.unique_qual_id,
'Comparator': 'DoesNotExist',
'ActionsGuarded': 'DiscoverPreviewAndAccept',
}
)
self.qualifications = qualifications
return qualifications.copy()
def create_additional_hits(self, num_hits, qualifications=None):
"""Handle creation for a specific number of hits/assignments
Put created HIT ids into the hit_id_list
"""
shared_utils.print_and_log(logging.INFO, 'Creating {} hits...'.format(num_hits))
qualifications = self.get_qualification_list(qualifications)
self.opt['assignment_duration_in_seconds'] = self.opt.get(
'assignment_duration_in_seconds', 30 * 60
)
hit_type_id = mturk_utils.create_hit_type(
hit_title=self.opt['hit_title'],
hit_description='{} (ID: {})'.format(
self.opt['hit_description'], self.task_group_id
),
hit_keywords=self.opt['hit_keywords'],
hit_reward=self.opt['reward'],
# Set to 30 minutes by default
assignment_duration_in_seconds=self.opt.get(
'assignment_duration_in_seconds', 30 * 60
),
is_sandbox=self.opt['is_sandbox'],
qualifications=qualifications,
auto_approve_delay=self.auto_approve_delay,
)
mturk_chat_url = '{}/chat_index?task_group_id={}'.format(
self.server_url, self.task_group_id
)
shared_utils.print_and_log(logging.INFO, mturk_chat_url)
mturk_page_url = None
if self.topic_arn is not None:
mturk_utils.subscribe_to_hits(hit_type_id, self.is_sandbox, self.topic_arn)
for _i in range(num_hits):
mturk_page_url, hit_id, mturk_response = mturk_utils.create_hit_with_hit_type(
opt=self.opt,
page_url=mturk_chat_url,
hit_type_id=hit_type_id,
num_assignments=1,
is_sandbox=self.is_sandbox,
)
if self.db_logger is not None:
self.db_logger.log_hit_status(mturk_response)
self.hit_id_list.append(hit_id)
return mturk_page_url
def create_hits(self, qualifications=None):
"""Create hits based on the managers current config, return hit url"""
shared_utils.print_and_log(logging.INFO, 'Creating HITs...', True)
if self.task_state < self.STATE_ACCEPTING_WORKERS:
shared_utils.print_and_log(
logging.WARN,
'You should be calling `ready_to_accept_workers` before '
'`create_hits` to ensure that the socket is connected before'
'hits are added. This will be enforced in future versions.',
True,
)
if self.opt['max_connections'] == 0:
mturk_page_url = self.create_additional_hits(
num_hits=self.required_hits, qualifications=qualifications
)
else:
mturk_page_url = self.create_additional_hits(
num_hits=min(self.required_hits, self.opt['max_connections']),
qualifications=qualifications,
)
shared_utils.print_and_log(
logging.INFO, 'Link to HIT: {}\n'.format(mturk_page_url), should_print=True
)
shared_utils.print_and_log(
logging.INFO,
'Waiting for Turkers to respond... (Please don\'t close'
' your laptop or put your computer into sleep or standby mode.)\n',
should_print=True,
)
self.task_state = self.STATE_HITS_MADE
return mturk_page_url
def get_hit(self, hit_id):
"""Get hit from mturk by hit_id"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
hit = client.get_hit(HITId=hit_id)
if self.db_logger is not None:
try:
self.db_logger.log_hit_status(hit)
except Exception:
pass
return hit
def get_assignment(self, assignment_id):
"""Gets assignment from mturk by assignment_id. Only works if the
assignment is in a completed state
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
return client.get_assignment(AssignmentId=assignment_id)
def get_assignments_for_hit(self, hit_id):
"""Get completed assignments for a hit"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments_info = client.list_assignments_for_hit(HITId=hit_id)
return assignments_info.get('Assignments', [])
def expire_all_unassigned_hits(self):
"""Move through the whole hit_id list and attempt to expire the
HITs, though this only immediately expires those that aren't assigned.
"""
# TODO note and mark assigned hits as ones to be expired later.
# this will improve the shutdown experience
shared_utils.print_and_log(
logging.INFO,
'Expiring all unassigned HITs...',
should_print=not self.is_test,
)
completed_ids = self.worker_manager.get_complete_hits()
for hit_id in self.hit_id_list:
if hit_id not in completed_ids:
# TODO get confirmation that the HIT is acutally expired
mturk_utils.expire_hit(self.is_sandbox, hit_id)
def approve_work(self, assignment_id, override_rejection=False):
"""approve work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.approve_assignment(
AssignmentId=assignment_id, OverrideRejection=override_rejection
)
if self.db_logger is not None:
self.db_logger.log_approve_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO, 'Assignment {} approved.' ''.format(assignment_id)
)
def reject_work(self, assignment_id, reason):
"""reject work for a given assignment through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.reject_assignment(AssignmentId=assignment_id, RequesterFeedback=reason)
if self.db_logger is not None:
self.db_logger.log_reject_assignment(assignment_id)
shared_utils.print_and_log(
logging.INFO,
'Assignment {} rejected for reason {}.' ''.format(assignment_id, reason),
)
def approve_assignments_for_hit(self, hit_id, override_rejection=False):
"""Approve work for assignments associated with a given hit, through
mturk client
"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
assignments = self.get_assignments_for_hit(hit_id)
for assignment in assignments:
assignment_id = assignment['AssignmentId']
client.approve_assignment(
AssignmentId=assignment_id, OverrideRejection=override_rejection
)
def block_worker(self, worker_id, reason):
"""Block a worker by id using the mturk client, passes reason along"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
client.create_worker_block(WorkerId=worker_id, Reason=reason)
shared_utils.print_and_log(
logging.INFO,
'Worker {} blocked for reason {}.' ''.format(worker_id, reason),
)
def soft_block_worker(self, worker_id, qual='block_qualification'):
"""Soft block a worker by giving the worker the block qualification"""
qual_name = self.opt.get(qual, None)
assert (
qual_name is not None
), 'No qualification {} has been specified' 'in opt'.format(qual)
self.give_worker_qualification(worker_id, qual_name)
def un_soft_block_worker(self, worker_id, qual='block_qualification'):
"""Remove a soft block from a worker by removing a block qualification
from the worker"""
qual_name = self.opt.get(qual, None)
assert (
qual_name is not None
), 'No qualification {} has been specified' 'in opt'.format(qual)
self.remove_worker_qualification(worker_id, qual_name)
def give_worker_qualification(self, worker_id, qual_name, qual_value=None):
"""Give a worker a particular qualification"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not give worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True,
)
return
mturk_utils.give_worker_qualification(
worker_id, qual_id, qual_value, self.is_sandbox
)
shared_utils.print_and_log(
logging.INFO,
'gave {} qualification {}'.format(worker_id, qual_name),
should_print=True,
)
def remove_worker_qualification(self, worker_id, qual_name, reason=''):
"""Remove a qualification from a worker"""
qual_id = mturk_utils.find_qualification(qual_name, self.is_sandbox)
if qual_id is False or qual_id is None:
shared_utils.print_and_log(
logging.WARN,
'Could not remove from worker {} qualification {}, as the '
'qualification could not be found to exist.'
''.format(worker_id, qual_name),
should_print=True,
)
return
try:
mturk_utils.remove_worker_qualification(
worker_id, qual_id, self.is_sandbox, reason
)
shared_utils.print_and_log(
logging.INFO,
'removed {}\'s qualification {}'.format(worker_id, qual_name),
should_print=True,
)
except Exception as e:
shared_utils.print_and_log(
logging.WARN if not self.has_time_limit else logging.INFO,
'removing {}\'s qualification {} failed with error {}. This '
'can be because the worker didn\'t have that qualification.'
''.format(worker_id, qual_name, repr(e)),
should_print=True,
)
def create_qualification(self, qualification_name, description, can_exist=True):
"""Create a new qualification. If can_exist is set, simply return
the ID of the existing qualification rather than throw an error
"""
if not can_exist:
qual_id = mturk_utils.find_qualification(
qualification_name, self.is_sandbox
)
if qual_id is not None:
shared_utils.print_and_log(
logging.WARN,
'Could not create qualification {}, as it existed'
''.format(qualification_name),
should_print=True,
)
return None
return mturk_utils.find_or_create_qualification(
qualification_name, description, self.is_sandbox
)
def pay_bonus(
self, worker_id, bonus_amount, assignment_id, reason, unique_request_token
):
"""Handles paying bonus to a turker, fails for insufficient funds.
Returns True on success and False on failure
"""
total_cost = mturk_utils.calculate_mturk_cost(
payment_opt={'type': 'bonus', 'amount': bonus_amount}
)
if not mturk_utils.check_mturk_balance(
balance_needed=total_cost, is_sandbox=self.is_sandbox
):
shared_utils.print_and_log(
logging.WARN,
'Cannot pay bonus. Reason: Insufficient '
'funds in your MTurk account.',
should_print=True,
)
return False
client = mturk_utils.get_mturk_client(self.is_sandbox)
# unique_request_token may be useful for handling future network errors
client.send_bonus(
WorkerId=worker_id,
BonusAmount=str(bonus_amount),
AssignmentId=assignment_id,
Reason=reason,
UniqueRequestToken=unique_request_token,
)
if self.db_logger is not None:
self.db_logger.log_pay_extra_bonus(
worker_id, assignment_id, bonus_amount, reason
)
shared_utils.print_and_log(
logging.INFO,
'Paid ${} bonus to WorkerId: {}'.format(bonus_amount, worker_id),
)
return True
def email_worker(self, worker_id, subject, message_text):
"""Send an email to a worker through the mturk client"""
client = mturk_utils.get_mturk_client(self.is_sandbox)
response = client.notify_workers(
Subject=subject, MessageText=message_text, WorkerIds=[worker_id]
)
if len(response['NotifyWorkersFailureStatuses']) > 0:
failure_message = response['NotifyWorkersFailureStatuses'][0]
return {'failure': failure_message['NotifyWorkersFailureMessage']}
else:
return {'success': True}
# TODO consolidate base functionality out of this class and above into a
# base_crowd_manager and then expand out from there.
class StaticMTurkManager(MTurkManager):
"""Manages interactions between MTurk agents and tasks, the task launching
workflow, and more, but only for tasks that require just 2 connections
to the server: an initial task request and the submission of results
"""
def __init__(self, opt, is_test=False):
"""No interaction means only ever one agent, so that's what we get"""
opt['max_connections'] = 0 # Max connections doesn't make sense here
opt['count_complete'] = True # No other way to count static HITs
opt['frontend_template_type'] = 'static'
super().__init__(opt, ['worker'], is_test, use_db=True)
self.hit_mult = 1 # No need to pad HITs if they're static
self.required_hits = self.num_conversations
def _assert_opts(self):
"""Manages ensuring everything about the passed in options make sense
in that they don't conflict in some way or another"""
if self.opt.get('allow_reviews'):
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'allow_reviews is not supported on single person tasks.',
should_print=True,
)
self.opt['allow_reviews'] = False
if self.opt.get('frontend_version', 0) < 1:
shared_utils.print_and_log(
logging.WARN,
'[OPT CONFIGURATION ISSUE] '
'Static tasks must use the react version of the frontend.',
should_print=True,
)
raise Exception('Invalid mturk manager options')
def _onboard_new_agent(self, agent):
"""Override onboarding to go straight to the pool
for static stasks
"""
self._add_agent_to_pool(agent)
| 42.352495 | 90 | 0.597314 |
7948075596ed9ec404c366cc48003cf22bfdb57c | 52,825 | py | Python | pysnmp/Nortel-Magellan-Passport-X25TraceRcvrMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/Nortel-Magellan-Passport-X25TraceRcvrMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/Nortel-Magellan-Passport-X25TraceRcvrMIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Nortel-Magellan-Passport-X25TraceRcvrMIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Nortel-Magellan-Passport-X25TraceRcvrMIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:19:26 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint")
DisplayString, Integer32, StorageType, Unsigned32, RowStatus = mibBuilder.importSymbols("Nortel-Magellan-Passport-StandardTextualConventionsMIB", "DisplayString", "Integer32", "StorageType", "Unsigned32", "RowStatus")
Hex, EnterpriseDateAndTime, HexString, DigitString, NonReplicated = mibBuilder.importSymbols("Nortel-Magellan-Passport-TextualConventionsMIB", "Hex", "EnterpriseDateAndTime", "HexString", "DigitString", "NonReplicated")
traceIndex, traceRcvrIndex, traceSession, traceSessionIndex, traceRcvr = mibBuilder.importSymbols("Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex", "traceRcvrIndex", "traceSession", "traceSessionIndex", "traceRcvr")
passportMIBs, = mibBuilder.importSymbols("Nortel-Magellan-Passport-UsefulDefinitionsMIB", "passportMIBs")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, Gauge32, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Integer32, Counter64, ModuleIdentity, Bits, MibIdentifier, Unsigned32, NotificationType, TimeTicks, ObjectIdentity, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Gauge32", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Integer32", "Counter64", "ModuleIdentity", "Bits", "MibIdentifier", "Unsigned32", "NotificationType", "TimeTicks", "ObjectIdentity", "IpAddress")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
x25TraceRcvrMIB = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62))
traceRcvrX25 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2))
traceRcvrX25RowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1), )
if mibBuilder.loadTexts: traceRcvrX25RowStatusTable.setStatus('mandatory')
traceRcvrX25RowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"))
if mibBuilder.loadTexts: traceRcvrX25RowStatusEntry.setStatus('mandatory')
traceRcvrX25RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25RowStatus.setStatus('mandatory')
traceRcvrX25ComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25ComponentName.setStatus('mandatory')
traceRcvrX25StorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25StorageType.setStatus('mandatory')
traceRcvrX25Index = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: traceRcvrX25Index.setStatus('mandatory')
traceRcvrX25Dna = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2))
traceRcvrX25DnaRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1), )
if mibBuilder.loadTexts: traceRcvrX25DnaRowStatusTable.setStatus('mandatory')
traceRcvrX25DnaRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaRowStatusEntry.setStatus('mandatory')
traceRcvrX25DnaRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaRowStatus.setStatus('mandatory')
traceRcvrX25DnaComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaComponentName.setStatus('mandatory')
traceRcvrX25DnaStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaStorageType.setStatus('mandatory')
traceRcvrX25DnaIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: traceRcvrX25DnaIndex.setStatus('mandatory')
traceRcvrX25DnaAddressTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 10), )
if mibBuilder.loadTexts: traceRcvrX25DnaAddressTable.setStatus('mandatory')
traceRcvrX25DnaAddressEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaAddressEntry.setStatus('mandatory')
traceRcvrX25DnaNumberingPlanIndicator = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaNumberingPlanIndicator.setStatus('mandatory')
traceRcvrX25DnaDataNetworkAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 10, 1, 2), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaDataNetworkAddress.setStatus('mandatory')
traceRcvrX25DnaOutgoingOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11), )
if mibBuilder.loadTexts: traceRcvrX25DnaOutgoingOptionsTable.setStatus('mandatory')
traceRcvrX25DnaOutgoingOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaOutgoingOptionsEntry.setStatus('mandatory')
traceRcvrX25DnaOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutCalls.setStatus('mandatory')
traceRcvrX25DnaOutDefaultPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("normal", 0), ("high", 1))).clone('high')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaOutDefaultPriority.setStatus('mandatory')
traceRcvrX25DnaOutIntl = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutIntl.setStatus('mandatory')
traceRcvrX25DnaOutDefaultPathSensitivity = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 11), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("throughput", 0), ("delay", 1))).clone('throughput')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutDefaultPathSensitivity.setStatus('obsolete')
traceRcvrX25DnaOutDefaultPathReliability = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("high", 0), ("normal", 1))).clone('high')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutDefaultPathReliability.setStatus('mandatory')
traceRcvrX25DnaOutPathReliabilityOverRide = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('no')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutPathReliabilityOverRide.setStatus('mandatory')
traceRcvrX25DnaOutPathReliabilitySignal = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('disallowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaOutPathReliabilitySignal.setStatus('mandatory')
traceRcvrX25DnaOutAccess = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 11, 1, 17), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('disallowed')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaOutAccess.setStatus('mandatory')
traceRcvrX25DnaIncomingOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 12), )
if mibBuilder.loadTexts: traceRcvrX25DnaIncomingOptionsTable.setStatus('mandatory')
traceRcvrX25DnaIncomingOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaIncomingOptionsEntry.setStatus('mandatory')
traceRcvrX25DnaIncCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 12, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('disallowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaIncCalls.setStatus('mandatory')
traceRcvrX25DnaCallOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13), )
if mibBuilder.loadTexts: traceRcvrX25DnaCallOptionsTable.setStatus('mandatory')
traceRcvrX25DnaCallOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaCallOptionsEntry.setStatus('mandatory')
traceRcvrX25DnaPacketSizes = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(2, 2)).setFixedLength(2).clone(hexValue="0100")).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaPacketSizes.setStatus('mandatory')
traceRcvrX25DnaDefaultRecvFrmNetworkPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n2048')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultRecvFrmNetworkPacketSize.setStatus('mandatory')
traceRcvrX25DnaDefaultSendToNetworkPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12))).clone('n2048')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultSendToNetworkPacketSize.setStatus('mandatory')
traceRcvrX25DnaDefaultRecvFrmNetworkThruputClass = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 15)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultRecvFrmNetworkThruputClass.setStatus('mandatory')
traceRcvrX25DnaDefaultSendToNetworkThruputClass = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 15)).clone(10)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultSendToNetworkThruputClass.setStatus('mandatory')
traceRcvrX25DnaDefaultRecvFrmNetworkWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 7)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultRecvFrmNetworkWindowSize.setStatus('mandatory')
traceRcvrX25DnaDefaultSendToNetworkWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 7)).clone(1)).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaDefaultSendToNetworkWindowSize.setStatus('mandatory')
traceRcvrX25DnaPacketSizeNegotiation = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("endToEnd", 0), ("local", 1))).clone('local')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaPacketSizeNegotiation.setStatus('mandatory')
traceRcvrX25DnaCugFormat = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 13, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("basic", 0), ("extended", 1))).clone('basic')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugFormat.setStatus('mandatory')
traceRcvrX25DnaCug = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2))
traceRcvrX25DnaCugRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1), )
if mibBuilder.loadTexts: traceRcvrX25DnaCugRowStatusTable.setStatus('mandatory')
traceRcvrX25DnaCugRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaCugIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaCugRowStatusEntry.setStatus('mandatory')
traceRcvrX25DnaCugRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1, 1, 1), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaCugRowStatus.setStatus('mandatory')
traceRcvrX25DnaCugComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugComponentName.setStatus('mandatory')
traceRcvrX25DnaCugStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugStorageType.setStatus('mandatory')
traceRcvrX25DnaCugIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1)))
if mibBuilder.loadTexts: traceRcvrX25DnaCugIndex.setStatus('mandatory')
traceRcvrX25DnaCugCugOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10), )
if mibBuilder.loadTexts: traceRcvrX25DnaCugCugOptionsTable.setStatus('mandatory')
traceRcvrX25DnaCugCugOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DnaCugIndex"))
if mibBuilder.loadTexts: traceRcvrX25DnaCugCugOptionsEntry.setStatus('mandatory')
traceRcvrX25DnaCugType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("national", 0), ("international", 1))).clone('national')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaCugType.setStatus('mandatory')
traceRcvrX25DnaCugDnic = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 2), DigitString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4).clone(hexValue="30303030")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaCugDnic.setStatus('mandatory')
traceRcvrX25DnaCugInterlockCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaCugInterlockCode.setStatus('mandatory')
traceRcvrX25DnaCugPreferential = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugPreferential.setStatus('mandatory')
traceRcvrX25DnaCugOutCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('allowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugOutCalls.setStatus('mandatory')
traceRcvrX25DnaCugIncCalls = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("disallowed", 0), ("allowed", 1))).clone('disallowed')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DnaCugIncCalls.setStatus('mandatory')
traceRcvrX25DnaCugPrivileged = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 2, 2, 10, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DnaCugPrivileged.setStatus('mandatory')
traceRcvrX25Dc = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3))
traceRcvrX25DcRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1), )
if mibBuilder.loadTexts: traceRcvrX25DcRowStatusTable.setStatus('mandatory')
traceRcvrX25DcRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DcIndex"))
if mibBuilder.loadTexts: traceRcvrX25DcRowStatusEntry.setStatus('mandatory')
traceRcvrX25DcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DcRowStatus.setStatus('mandatory')
traceRcvrX25DcComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DcComponentName.setStatus('mandatory')
traceRcvrX25DcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DcStorageType.setStatus('mandatory')
traceRcvrX25DcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: traceRcvrX25DcIndex.setStatus('mandatory')
traceRcvrX25DcOptionsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10), )
if mibBuilder.loadTexts: traceRcvrX25DcOptionsTable.setStatus('mandatory')
traceRcvrX25DcOptionsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceRcvrIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceRcvrX25DcIndex"))
if mibBuilder.loadTexts: traceRcvrX25DcOptionsEntry.setStatus('mandatory')
traceRcvrX25DcRemoteNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1))).clone('x121')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DcRemoteNpi.setStatus('mandatory')
traceRcvrX25DcRemoteDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10, 1, 4), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DcRemoteDna.setStatus('mandatory')
traceRcvrX25DcType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3))).clone(namedValues=NamedValues(("switched", 0), ("permanentMaster", 1), ("permanentSlave", 2), ("permanentBackupSlave", 3))).clone('switched')).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceRcvrX25DcType.setStatus('mandatory')
traceRcvrX25DcUserData = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 2, 2, 3, 10, 1, 8), HexString().subtype(subtypeSpec=ValueSizeConstraint(0, 16)).clone(hexValue="")).setMaxAccess("readwrite")
if mibBuilder.loadTexts: traceRcvrX25DcUserData.setStatus('mandatory')
traceSessionX25 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2))
traceSessionX25RowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1), )
if mibBuilder.loadTexts: traceSessionX25RowStatusTable.setStatus('mandatory')
traceSessionX25RowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceSessionIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25Index"))
if mibBuilder.loadTexts: traceSessionX25RowStatusEntry.setStatus('mandatory')
traceSessionX25RowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25RowStatus.setStatus('mandatory')
traceSessionX25ComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25ComponentName.setStatus('mandatory')
traceSessionX25StorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25StorageType.setStatus('mandatory')
traceSessionX25Index = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: traceSessionX25Index.setStatus('mandatory')
traceSessionX25Vc = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2))
traceSessionX25VcRowStatusTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1), )
if mibBuilder.loadTexts: traceSessionX25VcRowStatusTable.setStatus('mandatory')
traceSessionX25VcRowStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceSessionIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25VcIndex"))
if mibBuilder.loadTexts: traceSessionX25VcRowStatusEntry.setStatus('mandatory')
traceSessionX25VcRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1, 1, 1), RowStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcRowStatus.setStatus('mandatory')
traceSessionX25VcComponentName = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcComponentName.setStatus('mandatory')
traceSessionX25VcStorageType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1, 1, 4), StorageType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcStorageType.setStatus('mandatory')
traceSessionX25VcIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 1, 1, 10), NonReplicated())
if mibBuilder.loadTexts: traceSessionX25VcIndex.setStatus('mandatory')
traceSessionX25VcCadTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10), )
if mibBuilder.loadTexts: traceSessionX25VcCadTable.setStatus('mandatory')
traceSessionX25VcCadEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceSessionIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25VcIndex"))
if mibBuilder.loadTexts: traceSessionX25VcCadEntry.setStatus('mandatory')
traceSessionX25VcType = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("svc", 0), ("pvc", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcType.setStatus('mandatory')
traceSessionX25VcState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("creating", 0), ("readyP1", 1), ("dteWaitingP2", 2), ("dceWaitingP3", 3), ("dataTransferP4", 4), ("unsupportedP5", 5), ("dteClearRequestP6", 6), ("dceClearIndicationP7", 7), ("termination", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcState.setStatus('mandatory')
traceSessionX25VcPreviousState = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))).clone(namedValues=NamedValues(("creating", 0), ("readyP1", 1), ("dteWaitingP2", 2), ("dceWaitingP3", 3), ("dataTransferP4", 4), ("unsupportedP5", 5), ("dteClearRequestP6", 6), ("dceClearIndicationP7", 7), ("termination", 8)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPreviousState.setStatus('mandatory')
traceSessionX25VcDiagnosticCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcDiagnosticCode.setStatus('mandatory')
traceSessionX25VcPreviousDiagnosticCode = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPreviousDiagnosticCode.setStatus('mandatory')
traceSessionX25VcCalledNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCalledNpi.setStatus('mandatory')
traceSessionX25VcCalledDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 7), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCalledDna.setStatus('mandatory')
traceSessionX25VcCalledLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCalledLcn.setStatus('mandatory')
traceSessionX25VcCallingNpi = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("x121", 0), ("e164", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCallingNpi.setStatus('mandatory')
traceSessionX25VcCallingDna = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 10), DigitString().subtype(subtypeSpec=ValueSizeConstraint(1, 15))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCallingDna.setStatus('mandatory')
traceSessionX25VcCallingLcn = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCallingLcn.setStatus('mandatory')
traceSessionX25VcAccountingEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("yes", 0), ("no", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcAccountingEnabled.setStatus('mandatory')
traceSessionX25VcFastSelectCall = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 13), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("no", 0), ("yes", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcFastSelectCall.setStatus('mandatory')
traceSessionX25VcLocalRxPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 14), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("unknown", 0), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcLocalRxPktSize.setStatus('mandatory')
traceSessionX25VcLocalTxPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 15), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("unknown", 0), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcLocalTxPktSize.setStatus('mandatory')
traceSessionX25VcLocalTxWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 16), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcLocalTxWindowSize.setStatus('mandatory')
traceSessionX25VcLocalRxWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 17), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 127))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcLocalRxWindowSize.setStatus('mandatory')
traceSessionX25VcPathReliability = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 19), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("high", 0), ("normal", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPathReliability.setStatus('mandatory')
traceSessionX25VcAccountingEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 20), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("callingEnd", 0), ("calledEnd", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcAccountingEnd.setStatus('mandatory')
traceSessionX25VcPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 21), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 1))).clone(namedValues=NamedValues(("normal", 0), ("high", 1)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPriority.setStatus('mandatory')
traceSessionX25VcSegmentSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 22), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSegmentSize.setStatus('mandatory')
traceSessionX25VcSubnetTxPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 23), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("unknown", 0), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSubnetTxPktSize.setStatus('mandatory')
traceSessionX25VcSubnetTxWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSubnetTxWindowSize.setStatus('mandatory')
traceSessionX25VcSubnetRxPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 25), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 4, 5, 6, 7, 8, 9, 10, 11, 12))).clone(namedValues=NamedValues(("unknown", 0), ("n16", 4), ("n32", 5), ("n64", 6), ("n128", 7), ("n256", 8), ("n512", 9), ("n1024", 10), ("n2048", 11), ("n4096", 12)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSubnetRxPktSize.setStatus('mandatory')
traceSessionX25VcSubnetRxWindowSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 26), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSubnetRxWindowSize.setStatus('mandatory')
traceSessionX25VcMaxSubnetPktSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4096))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcMaxSubnetPktSize.setStatus('mandatory')
traceSessionX25VcTransferPriorityToNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 9))).clone(namedValues=NamedValues(("normal", 0), ("high", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcTransferPriorityToNetwork.setStatus('mandatory')
traceSessionX25VcTransferPriorityFromNetwork = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 10, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 9))).clone(namedValues=NamedValues(("normal", 0), ("high", 9)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcTransferPriorityFromNetwork.setStatus('mandatory')
traceSessionX25VcIntdTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11), )
if mibBuilder.loadTexts: traceSessionX25VcIntdTable.setStatus('mandatory')
traceSessionX25VcIntdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceSessionIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25VcIndex"))
if mibBuilder.loadTexts: traceSessionX25VcIntdEntry.setStatus('mandatory')
traceSessionX25VcCallReferenceNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1, 1), Hex().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcCallReferenceNumber.setStatus('mandatory')
traceSessionX25VcElapsedTimeTillNow = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcElapsedTimeTillNow.setStatus('mandatory')
traceSessionX25VcSegmentsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSegmentsRx.setStatus('mandatory')
traceSessionX25VcSegmentsSent = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 16777215))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSegmentsSent.setStatus('mandatory')
traceSessionX25VcStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 11, 1, 5), EnterpriseDateAndTime().subtype(subtypeSpec=ConstraintsUnion(ValueSizeConstraint(0, 0), ValueSizeConstraint(19, 19), ))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcStartTime.setStatus('mandatory')
traceSessionX25VcStatsTable = MibTable((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12), )
if mibBuilder.loadTexts: traceSessionX25VcStatsTable.setStatus('mandatory')
traceSessionX25VcStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1), ).setIndexNames((0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceIndex"), (0, "Nortel-Magellan-Passport-TraceBaseMIB", "traceSessionIndex"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25Index"), (0, "Nortel-Magellan-Passport-X25TraceRcvrMIB", "traceSessionX25VcIndex"))
if mibBuilder.loadTexts: traceSessionX25VcStatsEntry.setStatus('mandatory')
traceSessionX25VcAckStackingTimeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcAckStackingTimeouts.setStatus('mandatory')
traceSessionX25VcOutOfRangeFrmFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcOutOfRangeFrmFromSubnet.setStatus('mandatory')
traceSessionX25VcDuplicatesFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcDuplicatesFromSubnet.setStatus('mandatory')
traceSessionX25VcFrmRetryTimeouts = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcFrmRetryTimeouts.setStatus('mandatory')
traceSessionX25VcPeakRetryQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPeakRetryQueueSize.setStatus('mandatory')
traceSessionX25VcPeakOoSeqQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPeakOoSeqQueueSize.setStatus('mandatory')
traceSessionX25VcPeakOoSeqFrmForwarded = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPeakOoSeqFrmForwarded.setStatus('mandatory')
traceSessionX25VcPeakStackedAcksRx = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcPeakStackedAcksRx.setStatus('mandatory')
traceSessionX25VcSubnetRecoveries = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcSubnetRecoveries.setStatus('mandatory')
traceSessionX25VcWindowClosuresToSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcWindowClosuresToSubnet.setStatus('mandatory')
traceSessionX25VcWindowClosuresFromSubnet = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcWindowClosuresFromSubnet.setStatus('mandatory')
traceSessionX25VcWrTriggers = MibTableColumn((1, 3, 6, 1, 4, 1, 562, 2, 4, 1, 106, 3, 2, 2, 12, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 5000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: traceSessionX25VcWrTriggers.setStatus('mandatory')
x25TraceRcvrGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 1))
x25TraceRcvrGroupBD = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 1, 4))
x25TraceRcvrGroupBD00 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 1, 4, 1))
x25TraceRcvrGroupBD00A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 1, 4, 1, 2))
x25TraceRcvrCapabilities = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 3))
x25TraceRcvrCapabilitiesBD = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 3, 4))
x25TraceRcvrCapabilitiesBD00 = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 3, 4, 1))
x25TraceRcvrCapabilitiesBD00A = MibIdentifier((1, 3, 6, 1, 4, 1, 562, 2, 4, 2, 62, 3, 4, 1, 2))
mibBuilder.exportSymbols("Nortel-Magellan-Passport-X25TraceRcvrMIB", traceRcvrX25DnaDefaultRecvFrmNetworkThruputClass=traceRcvrX25DnaDefaultRecvFrmNetworkThruputClass, traceSessionX25VcPathReliability=traceSessionX25VcPathReliability, traceRcvrX25DnaDefaultSendToNetworkWindowSize=traceRcvrX25DnaDefaultSendToNetworkWindowSize, traceRcvrX25DnaCugRowStatusEntry=traceRcvrX25DnaCugRowStatusEntry, traceRcvrX25DnaOutgoingOptionsEntry=traceRcvrX25DnaOutgoingOptionsEntry, traceRcvrX25DnaDefaultSendToNetworkThruputClass=traceRcvrX25DnaDefaultSendToNetworkThruputClass, traceRcvrX25DnaCugPrivileged=traceRcvrX25DnaCugPrivileged, traceRcvrX25DnaPacketSizes=traceRcvrX25DnaPacketSizes, traceRcvrX25DcRemoteNpi=traceRcvrX25DcRemoteNpi, traceSessionX25VcIndex=traceSessionX25VcIndex, traceRcvrX25RowStatus=traceRcvrX25RowStatus, traceRcvrX25StorageType=traceRcvrX25StorageType, traceSessionX25VcFastSelectCall=traceSessionX25VcFastSelectCall, traceSessionX25VcStartTime=traceSessionX25VcStartTime, traceSessionX25VcPreviousState=traceSessionX25VcPreviousState, traceSessionX25VcWindowClosuresToSubnet=traceSessionX25VcWindowClosuresToSubnet, traceSessionX25VcComponentName=traceSessionX25VcComponentName, traceSessionX25VcCallingLcn=traceSessionX25VcCallingLcn, traceRcvrX25DnaCallOptionsEntry=traceRcvrX25DnaCallOptionsEntry, traceRcvrX25DnaCugOutCalls=traceRcvrX25DnaCugOutCalls, traceRcvrX25DnaOutDefaultPathSensitivity=traceRcvrX25DnaOutDefaultPathSensitivity, traceSessionX25VcDiagnosticCode=traceSessionX25VcDiagnosticCode, traceSessionX25VcSegmentSize=traceSessionX25VcSegmentSize, traceSessionX25VcMaxSubnetPktSize=traceSessionX25VcMaxSubnetPktSize, traceRcvrX25DcComponentName=traceRcvrX25DcComponentName, traceRcvrX25DnaOutCalls=traceRcvrX25DnaOutCalls, traceSessionX25VcIntdEntry=traceSessionX25VcIntdEntry, traceSessionX25VcElapsedTimeTillNow=traceSessionX25VcElapsedTimeTillNow, x25TraceRcvrGroupBD=x25TraceRcvrGroupBD, traceSessionX25VcCallingDna=traceSessionX25VcCallingDna, traceRcvrX25=traceRcvrX25, traceSessionX25RowStatusTable=traceSessionX25RowStatusTable, traceSessionX25VcLocalTxWindowSize=traceSessionX25VcLocalTxWindowSize, traceRcvrX25DnaComponentName=traceRcvrX25DnaComponentName, traceRcvrX25DnaCugCugOptionsTable=traceRcvrX25DnaCugCugOptionsTable, traceSessionX25VcDuplicatesFromSubnet=traceSessionX25VcDuplicatesFromSubnet, traceRcvrX25DcRowStatusEntry=traceRcvrX25DcRowStatusEntry, traceRcvrX25DnaRowStatusTable=traceRcvrX25DnaRowStatusTable, traceRcvrX25DnaOutDefaultPathReliability=traceRcvrX25DnaOutDefaultPathReliability, traceSessionX25VcSegmentsRx=traceSessionX25VcSegmentsRx, traceSessionX25VcSubnetRxPktSize=traceSessionX25VcSubnetRxPktSize, traceSessionX25VcAccountingEnd=traceSessionX25VcAccountingEnd, traceSessionX25VcOutOfRangeFrmFromSubnet=traceSessionX25VcOutOfRangeFrmFromSubnet, traceRcvrX25Dna=traceRcvrX25Dna, traceRcvrX25DnaCugRowStatusTable=traceRcvrX25DnaCugRowStatusTable, traceSessionX25VcPeakOoSeqFrmForwarded=traceSessionX25VcPeakOoSeqFrmForwarded, traceRcvrX25DcRemoteDna=traceRcvrX25DcRemoteDna, traceSessionX25VcLocalRxWindowSize=traceSessionX25VcLocalRxWindowSize, traceRcvrX25Index=traceRcvrX25Index, x25TraceRcvrCapabilitiesBD00A=x25TraceRcvrCapabilitiesBD00A, traceSessionX25VcSubnetTxPktSize=traceSessionX25VcSubnetTxPktSize, traceSessionX25VcWindowClosuresFromSubnet=traceSessionX25VcWindowClosuresFromSubnet, traceRcvrX25ComponentName=traceRcvrX25ComponentName, traceRcvrX25DnaDefaultSendToNetworkPacketSize=traceRcvrX25DnaDefaultSendToNetworkPacketSize, x25TraceRcvrGroup=x25TraceRcvrGroup, traceRcvrX25DnaCugStorageType=traceRcvrX25DnaCugStorageType, traceRcvrX25DnaCugDnic=traceRcvrX25DnaCugDnic, traceRcvrX25DnaCugCugOptionsEntry=traceRcvrX25DnaCugCugOptionsEntry, traceSessionX25VcSubnetRxWindowSize=traceSessionX25VcSubnetRxWindowSize, traceRcvrX25DnaCugIndex=traceRcvrX25DnaCugIndex, traceRcvrX25DcStorageType=traceRcvrX25DcStorageType, traceSessionX25VcSubnetRecoveries=traceSessionX25VcSubnetRecoveries, traceRcvrX25DcIndex=traceRcvrX25DcIndex, traceSessionX25VcRowStatusEntry=traceSessionX25VcRowStatusEntry, traceRcvrX25DnaOutIntl=traceRcvrX25DnaOutIntl, traceRcvrX25DnaOutPathReliabilityOverRide=traceRcvrX25DnaOutPathReliabilityOverRide, x25TraceRcvrCapabilitiesBD00=x25TraceRcvrCapabilitiesBD00, traceSessionX25VcCalledLcn=traceSessionX25VcCalledLcn, traceRcvrX25DnaRowStatusEntry=traceRcvrX25DnaRowStatusEntry, traceRcvrX25RowStatusEntry=traceRcvrX25RowStatusEntry, x25TraceRcvrGroupBD00A=x25TraceRcvrGroupBD00A, traceRcvrX25DcRowStatusTable=traceRcvrX25DcRowStatusTable, traceRcvrX25DnaOutDefaultPriority=traceRcvrX25DnaOutDefaultPriority, traceSessionX25VcLocalTxPktSize=traceSessionX25VcLocalTxPktSize, traceSessionX25RowStatus=traceSessionX25RowStatus, traceRcvrX25DcUserData=traceRcvrX25DcUserData, traceSessionX25VcStatsEntry=traceSessionX25VcStatsEntry, traceSessionX25VcAckStackingTimeouts=traceSessionX25VcAckStackingTimeouts, traceRcvrX25DcType=traceRcvrX25DcType, traceRcvrX25RowStatusTable=traceRcvrX25RowStatusTable, traceRcvrX25DnaCugFormat=traceRcvrX25DnaCugFormat, traceSessionX25ComponentName=traceSessionX25ComponentName, traceRcvrX25DcOptionsTable=traceRcvrX25DcOptionsTable, traceRcvrX25DnaCugComponentName=traceRcvrX25DnaCugComponentName, traceSessionX25VcRowStatusTable=traceSessionX25VcRowStatusTable, traceSessionX25VcIntdTable=traceSessionX25VcIntdTable, traceSessionX25=traceSessionX25, traceSessionX25VcRowStatus=traceSessionX25VcRowStatus, traceSessionX25VcTransferPriorityToNetwork=traceSessionX25VcTransferPriorityToNetwork, traceSessionX25StorageType=traceSessionX25StorageType, traceSessionX25VcPeakOoSeqQueueSize=traceSessionX25VcPeakOoSeqQueueSize, traceSessionX25VcStatsTable=traceSessionX25VcStatsTable, traceRcvrX25DnaIncCalls=traceRcvrX25DnaIncCalls, x25TraceRcvrCapabilities=x25TraceRcvrCapabilities, traceRcvrX25DnaCugPreferential=traceRcvrX25DnaCugPreferential, traceSessionX25VcType=traceSessionX25VcType, traceRcvrX25DnaIncomingOptionsEntry=traceRcvrX25DnaIncomingOptionsEntry, traceSessionX25VcTransferPriorityFromNetwork=traceSessionX25VcTransferPriorityFromNetwork, traceRcvrX25DnaDefaultRecvFrmNetworkWindowSize=traceRcvrX25DnaDefaultRecvFrmNetworkWindowSize, traceRcvrX25DnaAddressTable=traceRcvrX25DnaAddressTable, traceRcvrX25DnaIndex=traceRcvrX25DnaIndex, traceSessionX25VcCalledNpi=traceSessionX25VcCalledNpi, traceRcvrX25DnaIncomingOptionsTable=traceRcvrX25DnaIncomingOptionsTable, traceSessionX25VcLocalRxPktSize=traceSessionX25VcLocalRxPktSize, traceRcvrX25DnaStorageType=traceRcvrX25DnaStorageType, traceRcvrX25DnaOutAccess=traceRcvrX25DnaOutAccess, traceSessionX25VcCadTable=traceSessionX25VcCadTable, x25TraceRcvrGroupBD00=x25TraceRcvrGroupBD00, traceSessionX25VcSegmentsSent=traceSessionX25VcSegmentsSent, traceRcvrX25DnaOutgoingOptionsTable=traceRcvrX25DnaOutgoingOptionsTable, traceSessionX25VcSubnetTxWindowSize=traceSessionX25VcSubnetTxWindowSize, traceRcvrX25DnaCugType=traceRcvrX25DnaCugType, traceRcvrX25DcRowStatus=traceRcvrX25DcRowStatus, traceRcvrX25DnaCallOptionsTable=traceRcvrX25DnaCallOptionsTable, traceRcvrX25DnaPacketSizeNegotiation=traceRcvrX25DnaPacketSizeNegotiation, traceRcvrX25DnaCugInterlockCode=traceRcvrX25DnaCugInterlockCode, traceSessionX25VcAccountingEnabled=traceSessionX25VcAccountingEnabled, traceSessionX25RowStatusEntry=traceSessionX25RowStatusEntry, traceSessionX25Index=traceSessionX25Index, traceSessionX25VcCadEntry=traceSessionX25VcCadEntry, traceRcvrX25DnaOutPathReliabilitySignal=traceRcvrX25DnaOutPathReliabilitySignal, traceSessionX25Vc=traceSessionX25Vc, traceSessionX25VcPreviousDiagnosticCode=traceSessionX25VcPreviousDiagnosticCode, traceRcvrX25DnaRowStatus=traceRcvrX25DnaRowStatus, traceRcvrX25DnaDataNetworkAddress=traceRcvrX25DnaDataNetworkAddress, traceRcvrX25DnaAddressEntry=traceRcvrX25DnaAddressEntry, x25TraceRcvrMIB=x25TraceRcvrMIB, traceRcvrX25DnaCug=traceRcvrX25DnaCug, traceSessionX25VcPriority=traceSessionX25VcPriority, traceSessionX25VcCalledDna=traceSessionX25VcCalledDna, traceRcvrX25DnaDefaultRecvFrmNetworkPacketSize=traceRcvrX25DnaDefaultRecvFrmNetworkPacketSize, traceRcvrX25Dc=traceRcvrX25Dc, traceSessionX25VcPeakRetryQueueSize=traceSessionX25VcPeakRetryQueueSize, traceSessionX25VcPeakStackedAcksRx=traceSessionX25VcPeakStackedAcksRx, traceSessionX25VcState=traceSessionX25VcState, traceRcvrX25DcOptionsEntry=traceRcvrX25DcOptionsEntry, traceSessionX25VcCallingNpi=traceSessionX25VcCallingNpi, traceRcvrX25DnaCugRowStatus=traceRcvrX25DnaCugRowStatus, traceRcvrX25DnaNumberingPlanIndicator=traceRcvrX25DnaNumberingPlanIndicator, traceRcvrX25DnaCugIncCalls=traceRcvrX25DnaCugIncCalls, x25TraceRcvrCapabilitiesBD=x25TraceRcvrCapabilitiesBD, traceSessionX25VcStorageType=traceSessionX25VcStorageType, traceSessionX25VcFrmRetryTimeouts=traceSessionX25VcFrmRetryTimeouts, traceSessionX25VcWrTriggers=traceSessionX25VcWrTriggers, traceSessionX25VcCallReferenceNumber=traceSessionX25VcCallReferenceNumber)
| 179.676871 | 8,901 | 0.775523 |
7948077f51ef43c4716525281468ba3414bcf24a | 2,118 | py | Python | 2020/17/17 _part_1.py | Sveder/advent_of_code | 57a36bc3066fcba6330d4d579de053e8b7e78c74 | [
"CC0-1.0"
] | null | null | null | 2020/17/17 _part_1.py | Sveder/advent_of_code | 57a36bc3066fcba6330d4d579de053e8b7e78c74 | [
"CC0-1.0"
] | null | null | null | 2020/17/17 _part_1.py | Sveder/advent_of_code | 57a36bc3066fcba6330d4d579de053e8b7e78c74 | [
"CC0-1.0"
] | null | null | null | import copy
import itertools
input = """###..#..
.#######
#####...
#..##.#.
###..##.
##...#..
..#...#.
.#....##"""
#
# input = """.#.
# ..#
# ###"""
W = H = 3
cycles_count = 6
def step(world):
size = len(world[0])
new_size = size + 1
new_world = copy.deepcopy(world)
# RESIZE PART:
# Add new planes and empty world to make sure we have enough canvas to draw on:
new_world.append([['.'] * size] * size)
new_world.insert(0, [['.'] * size] * size)
for i, plane in enumerate(new_world):
new_plane = [['.'] * (new_size + 1)]
for line in plane:
new_plane += [['.'] + line + ['.']]
new_plane += [['.'] * (new_size + 1)]
new_world[i] = new_plane
# Now we have enough room to grow, actually grow:
directions = list(itertools.product((-1, 0, 1), repeat=3))
directions.remove((0, 0, 0))
newer_world = copy.deepcopy(new_world)
for z, plane in enumerate(new_world):
for y, line in enumerate(plane):
for x, cell in enumerate(line):
n_count = 0
for dz, dy, dx in directions:
try:
friend = new_world[z + dz][y + dy][x + dx]
if friend == "#":
n_count += 1
except IndexError:
pass
if cell == '.' and n_count == 3:
newer_world[z][y][x] = '#'
elif cell == '#' and n_count not in (2, 3):
newer_world[z][y][x] = '.'
return newer_world
def print_world(world):
for i, z in enumerate(world):
print("z=%s" % i)
for y in z:
print("".join(y))
print()
cur_world = []
for line in input.split('\n'):
cur_line = [i for i in line]
cur_world.append(cur_line)
cur_world = [cur_world]
for i in range(cycles_count):
print("Cycle:", i)
print_world(cur_world)
W += 1
cur_world = step(cur_world)
alive = 0
for plane in cur_world:
for line in plane:
alive += line.count('#')
print("Alive:", alive) | 21.835052 | 83 | 0.487252 |
7948091978e96e950dde7414eec430e6cf6f7ea3 | 3,096 | py | Python | qd_english/qd_english/settings.py | smujm/ScrapyProjects | 04e9eb42c64805475893be595db4f3b6530ba597 | [
"MIT"
] | null | null | null | qd_english/qd_english/settings.py | smujm/ScrapyProjects | 04e9eb42c64805475893be595db4f3b6530ba597 | [
"MIT"
] | null | null | null | qd_english/qd_english/settings.py | smujm/ScrapyProjects | 04e9eb42c64805475893be595db4f3b6530ba597 | [
"MIT"
] | null | null | null | # Scrapy settings for qd_english project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://docs.scrapy.org/en/latest/topics/settings.html
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'qd_english'
SPIDER_MODULES = ['qd_english.spiders']
NEWSPIDER_MODULE = 'qd_english.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'qd_english (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
#COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
#}
# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
# 'qd_english.middlewares.QdEnglishSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
# 'qd_english.middlewares.QdEnglishDownloaderMiddleware': 543,
#}
# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'qd_english.pipelines.QdEnglishPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| 34.786517 | 103 | 0.780039 |
794809beee4a05ad36743f08bdfb1b86720338b0 | 3,082 | py | Python | test/database_test.py | lvzheqi/StreamingEventCompliance | 3a9470f9b0b670c814864369f22e1f1eacef7bad | [
"BSD-2-Clause"
] | 3 | 2018-10-16T15:14:41.000Z | 2019-09-04T09:38:55.000Z | test/database_test.py | lvzheqi/StreamingEventCompliance | 3a9470f9b0b670c814864369f22e1f1eacef7bad | [
"BSD-2-Clause"
] | 2 | 2021-03-31T19:00:14.000Z | 2021-12-13T19:51:46.000Z | test/database_test.py | lvzheqi/StreamingEventCompliance | 3a9470f9b0b670c814864369f22e1f1eacef7bad | [
"BSD-2-Clause"
] | 2 | 2018-10-16T15:14:43.000Z | 2019-12-16T13:58:28.000Z | import unittest
from streaming_event_compliance.database import dbtools
from streaming_event_compliance.objects.automata import automata, alertlog
from streaming_event_compliance import app
class DBToolsTest(unittest.TestCase):
def setUp(self):
dbtools.empty_tables()
def create_automata(self):
auto1 = automata.Automata()
auto2 = automata.Automata()
auto3 = automata.Automata()
auto4 = automata.Automata()
auto1.update_automata(automata.Connection('A', 'B', 1))
auto1.update_automata(automata.Connection('A', 'B', 1))
auto1.update_automata(automata.Connection('A', 'C', 1))
auto2.update_automata(automata.Connection('A,B', 'B,C', 1))
auto2.update_automata(automata.Connection('B,D', 'D,B', 1))
auto3.update_automata(automata.Connection('A,D,D', 'C,D,D', 1))
auto4.update_automata(automata.Connection('A,B,A,W', 'B,C,S,S', 1))
auto1.set_probability()
auto2.set_probability()
auto3.set_probability()
auto4.set_probability()
return {1: auto1, 2: auto2, 3: auto3, 4: auto4}
def test_node_and_connection(self):
ws = app.config['WINDOW_SIZE']
if ws == [1, 2, 3, 4]:
autos = self.create_automata()
dbtools.insert_node_and_connection(autos)
autos2, status = dbtools.init_automata_from_database()
self.assertEqual(status, 1)
self.assertEqual(repr(autos), repr(autos2))
def test_alert_log(self):
ws = app.config['WINDOW_SIZE']
if ws == [1, 2, 3, 4]:
uuid = '1'
dbtools.create_client('1')
alog1 = alertlog.AlertLog()
alog2 = alertlog.AlertLog()
alog3 = alertlog.AlertLog()
alog4 = alertlog.AlertLog()
alog1.update_alert_record(alertlog.AlertRecord(uuid, 'A', 'B', 1))
alog1.update_alert_record(alertlog.AlertRecord(uuid, 'A', 'B', 1))
alog1.update_alert_record(alertlog.AlertRecord(uuid, 'B', 'B', 1))
alog2.update_alert_record(alertlog.AlertRecord(uuid, 'A,C', 'B,D', 1))
alog2.update_alert_record(alertlog.AlertRecord(uuid, 'A,C', 'B,R', 1))
alog2.update_alert_record(alertlog.AlertRecord(uuid, 'A,W', 'B,W', 1))
alog3.update_alert_record(alertlog.AlertRecord(uuid, 'A,A,W', 'B,S,S', 1))
alog4.update_alert_record(alertlog.AlertRecord(uuid, 'A,L,K,K', 'B,S,S,D', 1))
alogs = {1: alog1, 2: alog2, 3: alog3, 4: alog4}
dbtools.insert_alert_log(alogs)
alogs2, status = dbtools.init_alert_log_from_database(uuid)
self.assertEqual(repr(alogs), repr(alogs2))
def test_user(self):
dbtools.create_client('1')
dbtools.create_client('2')
dbtools.update_client_status('1', True)
dbtools.update_client_status('2', False)
self.assertEqual(dbtools.check_client_status('1'), True)
self.assertEqual(dbtools.check_client_status('2'), False)
if __name__ == '__main__':
unittest.main()
| 43.408451 | 90 | 0.629786 |
794809f9cec19936467c6329b9bb3362b22dd70a | 629 | py | Python | sample-input-h2.py | trex47/MD-copy | 915919ce1f541f93d35ebfa1461350d861a3e6a7 | [
"BSD-3-Clause"
] | 1 | 2021-03-25T09:13:16.000Z | 2021-03-25T09:13:16.000Z | sample-input-h2.py | trex47/MD-copy | 915919ce1f541f93d35ebfa1461350d861a3e6a7 | [
"BSD-3-Clause"
] | null | null | null | sample-input-h2.py | trex47/MD-copy | 915919ce1f541f93d35ebfa1461350d861a3e6a7 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T19:04:32.000Z | 2020-12-02T19:04:32.000Z | from mmd.molecule import Molecule
from mmd.postscf import PostSCF
import numpy as np
h2 = """
0 1
H 0.000000000 0.000000000 -0.368652
H 0.000000000 0.000000000 0.368652
"""
# init molecule and build integrals
mol = Molecule(geometry=h2,basis='6-311G**')
# do the SCF
mol.RHF()
# do MP2
PostSCF(mol).MP2()
#print(mol.CAP)
np.savetxt("cap_h2.dat",mol.CAP,'%10.6f',delimiter=" ")
#P_MO = np.real(np.dot(np.transpose(mol.CO),np.dot(mol.P,mol.CO)))
#DEN = np.real(np.dot(np.transpose(mol.CO),np.dot(mol.CAP,mol.CO)))
#print(DEN)
#new = np.real(np.dot(mol.CAP,mol.P))
#trace = np.trace(new)
#print(trace)
| 17.971429 | 67 | 0.666137 |
79480add2be70cf64764691383e0508c1acd78a7 | 8,894 | py | Python | telemetry/telemetry/internal/results/page_test_results_unittest.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 1,894 | 2015-04-17T18:29:53.000Z | 2022-03-28T22:41:06.000Z | telemetry/telemetry/internal/results/page_test_results_unittest.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 4,640 | 2015-07-08T16:19:08.000Z | 2019-12-02T15:01:27.000Z | telemetry/telemetry/internal/results/page_test_results_unittest.py | Martijnve23/catapult | 5c63b19d221af6a12889e8727acc85d93892cab7 | [
"BSD-3-Clause"
] | 698 | 2015-06-02T19:18:35.000Z | 2022-03-29T16:57:15.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
import json
import shutil
import sys
import tempfile
import unittest
import mock
from telemetry.core import exceptions
from telemetry.internal.results import page_test_results
from telemetry.internal.results import results_options
from telemetry.testing import test_stories
from tracing.trace_data import trace_data
def _CreateException():
try:
raise exceptions.IntentionalException
except Exception: # pylint: disable=broad-except
return sys.exc_info()
class PageTestResultsTest(unittest.TestCase):
def setUp(self):
self.stories = test_stories.DummyStorySet(['foo', 'bar', 'baz'])
self.intermediate_dir = tempfile.mkdtemp()
self._time_module = mock.patch(
'telemetry.internal.results.page_test_results.time').start()
self._time_module.time.return_value = 0
def tearDown(self):
shutil.rmtree(self.intermediate_dir)
mock.patch.stopall()
@property
def mock_time(self):
return self._time_module.time
def CreateResults(self, **kwargs):
kwargs.setdefault('intermediate_dir', self.intermediate_dir)
return page_test_results.PageTestResults(**kwargs)
def ReadTestResults(self):
return results_options.ReadTestResults(self.intermediate_dir)
def testFailures(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.Fail(_CreateException())
with results.CreateStoryRun(self.stories[1]):
pass
self.assertTrue(results.had_failures)
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 2)
self.assertEqual(test_results[0]['status'], 'FAIL')
self.assertEqual(test_results[1]['status'], 'PASS')
def testSkips(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.Skip('testing reason')
with results.CreateStoryRun(self.stories[1]):
pass
self.assertTrue(results.had_skips)
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 2)
self.assertEqual(test_results[0]['status'], 'SKIP')
self.assertEqual(test_results[1]['status'], 'PASS')
def testBenchmarkInterruption(self):
reason = 'This is a reason'
with self.CreateResults() as results:
self.assertIsNone(results.benchmark_interruption)
self.assertFalse(results.benchmark_interrupted)
results.InterruptBenchmark(reason)
self.assertEqual(results.benchmark_interruption, reason)
self.assertTrue(results.benchmark_interrupted)
def testUncaughtExceptionInterruptsBenchmark(self):
with self.assertRaises(ValueError):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
raise ValueError('expected error')
self.assertTrue(results.benchmark_interrupted)
# In Python2, the exc_value has a extra comma like:
# ValueError('expected error',)
# while in Python3, exc_value is like:
# ValueError('expected error')
self.assertIn("ValueError('expected error'",
results.benchmark_interruption)
def testPassesNoSkips(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.Fail(_CreateException())
with results.CreateStoryRun(self.stories[1]):
pass
with results.CreateStoryRun(self.stories[2]):
results.Skip('testing reason')
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 3)
self.assertEqual(test_results[0]['status'], 'FAIL')
self.assertEqual(test_results[1]['status'], 'PASS')
self.assertEqual(test_results[2]['status'], 'SKIP')
def testAddMeasurementAsScalar(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.AddMeasurement('a', 'seconds', 3)
test_results = self.ReadTestResults()
self.assertTrue(len(test_results), 1)
measurements = results_options.ReadMeasurements(test_results[0])
self.assertEqual(measurements, {'a': {'unit': 'seconds', 'samples': [3]}})
def testAddMeasurementAsList(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.AddMeasurement('a', 'seconds', [1, 2, 3])
test_results = self.ReadTestResults()
self.assertTrue(len(test_results), 1)
measurements = results_options.ReadMeasurements(test_results[0])
self.assertEqual(measurements,
{'a': {'unit': 'seconds', 'samples': [1, 2, 3]}})
def testNonNumericMeasurementIsInvalid(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
with self.assertRaises(TypeError):
results.AddMeasurement('url', 'string', 'foo')
def testMeasurementUnitChangeRaises(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.AddMeasurement('a', 'seconds', 3)
with results.CreateStoryRun(self.stories[1]):
with self.assertRaises(ValueError):
results.AddMeasurement('a', 'foobgrobbers', 3)
def testNoSuccessesWhenAllStoriesFailOrSkip(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.Fail('message')
with results.CreateStoryRun(self.stories[1]):
results.Skip('message')
self.assertFalse(results.had_successes)
def testAddTraces(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.AddTraces(trace_data.CreateTestTrace(1))
with results.CreateStoryRun(self.stories[1]):
results.AddTraces(trace_data.CreateTestTrace(2))
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 2)
for test_result in test_results:
trace_names = [name for name in test_result['outputArtifacts']
if name.startswith('trace/')]
self.assertTrue(len(trace_names), 1)
def testAddTracesForSameStory(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
results.AddTraces(trace_data.CreateTestTrace(1))
results.AddTraces(trace_data.CreateTestTrace(2))
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 1)
for test_result in test_results:
trace_names = [name for name in test_result['outputArtifacts']
if name.startswith('trace/')]
self.assertTrue(len(trace_names), 2)
def testDiagnosticsAsArtifact(self):
with self.CreateResults(benchmark_name='some benchmark',
benchmark_description='a description') as results:
results.AddSharedDiagnostics(
owners=['test'],
bug_components=['1', '2'],
documentation_urls=[['documentation', 'url']],
architecture='arch',
device_id='id',
os_name='os',
os_version='ver',
)
with results.CreateStoryRun(self.stories[0]):
pass
with results.CreateStoryRun(self.stories[1]):
pass
test_results = self.ReadTestResults()
self.assertEqual(len(test_results), 2)
for test_result in test_results:
self.assertEqual(test_result['status'], 'PASS')
artifacts = test_result['outputArtifacts']
self.assertIn(page_test_results.DIAGNOSTICS_NAME, artifacts)
with open(artifacts[page_test_results.DIAGNOSTICS_NAME]['filePath']) as f:
diagnostics = json.load(f)
self.assertEqual(diagnostics, {
'diagnostics': {
'benchmarks': ['some benchmark'],
'benchmarkDescriptions': ['a description'],
'owners': ['test'],
'bugComponents': ['1', '2'],
'documentationLinks': [['documentation', 'url']],
'architectures': ['arch'],
'deviceIds': ['id'],
'osNames': ['os'],
'osVersions': ['ver'],
},
})
def testCreateArtifactsForDifferentStories(self):
with self.CreateResults() as results:
with results.CreateStoryRun(self.stories[0]):
with results.CreateArtifact('log.txt') as log_file:
log_file.write('story0\n')
with results.CreateStoryRun(self.stories[1]):
with results.CreateArtifact('log.txt') as log_file:
log_file.write('story1\n')
test_results = self.ReadTestResults()
with open(test_results[0]['outputArtifacts']['log.txt']['filePath']) as f:
self.assertEqual(f.read(), 'story0\n')
with open(test_results[1]['outputArtifacts']['log.txt']['filePath']) as f:
self.assertEqual(f.read(), 'story1\n')
| 37.058333 | 80 | 0.687317 |
79480bb8ba3893543d8703504ba1500af9d3edb4 | 8,222 | py | Python | test/pybind_test/single_node_test.py | xjqbest/HugeCTR | 0b1c92d5e65891dfdd90d917bc6d520d0ca5d1e1 | [
"Apache-2.0"
] | 130 | 2021-10-11T11:55:28.000Z | 2022-03-31T21:53:07.000Z | test/pybind_test/single_node_test.py | xjqbest/HugeCTR | 0b1c92d5e65891dfdd90d917bc6d520d0ca5d1e1 | [
"Apache-2.0"
] | 72 | 2021-10-09T04:59:09.000Z | 2022-03-31T11:27:54.000Z | test/pybind_test/single_node_test.py | xjqbest/HugeCTR | 0b1c92d5e65891dfdd90d917bc6d520d0ca5d1e1 | [
"Apache-2.0"
] | 29 | 2021-11-03T22:35:01.000Z | 2022-03-30T13:11:59.000Z | import hugectr
import json
import sys
import argparse
DATA_READER_TYPE = {"Norm": hugectr.DataReaderType_t.Norm,
"Raw": hugectr.DataReaderType_t.Raw,
"Parquet": hugectr.DataReaderType_t.Parquet}
CHECK_TYPE = {"Sum": hugectr.Check_t.Sum,
"None": hugectr.Check_t.Non}
OPTIMIZER_TYPE = {"Adam": hugectr.Optimizer_t.Adam,
"MomentumSGD": hugectr.Optimizer_t.MomentumSGD,
"Nesterov": hugectr.Optimizer_t.Nesterov,
"SGD": hugectr.Optimizer_t.SGD}
UPDATE_TYPE = {"Global": hugectr.Update_t.Global,
"LazyGlobal": hugectr.Update_t.LazyGlobal,
"Local": hugectr.Update_t.Local}
def parse_args(parser):
args = parser.parse_args()
json_config = json.load(open(args.json_file, "rb"))
solver_config = json_config['solver']
optimizer_config = json_config['optimizer']
data_config = json_config['layers'][0]
args.source = data_config['source']
args.eval_source = data_config['eval_source']
if 'format' not in data_config:
args.data_reader_type = hugectr.DataReaderType_t.Norm
else:
args.data_reader_type = DATA_READER_TYPE[data_config.get('format', 'Norm')]
args.check_type = CHECK_TYPE[data_config['check']]
args.cache_eval_data = data_config.get('cache_eval_data', 0)
args.num_samples = data_config.get('num_samples', 0)
args.eval_num_samples = data_config.get('eval_num_samples', 0)
args.float_label_dense = data_config.get('float_label_dense', False)
args.num_workers = data_config.get('num_workers', 16)
args.slot_size_array = data_config.get('slot_size_array', [])
args.optimizer_type = OPTIMIZER_TYPE[optimizer_config["type"]]
args.update_type = UPDATE_TYPE[optimizer_config['update_type']]
args.learning_rate = 0.001
args.beta1 = 0.9
args.beta2 = 0.999
args.epsilon = 0.0000001
args.initial_accu_value = 0.0
args.momentum_factor = 0.0
args.atomic_update = True
args.warmup_steps = 1
args.decay_start = 0
args.decay_steps = 1
args.decay_power = 2.0
args.end_lr = 0.0
if 'adam_hparam' in optimizer_config:
args.learning_rate = optimizer_config['adam_hparam']['learning_rate']
args.beta1 = optimizer_config['adam_hparam']['beta1']
args.beta2 = optimizer_config['adam_hparam']['beta2']
args.epsilon = optimizer_config['adam_hparam']['epsilon']
if 'adagrad_hparam' in optimizer_config:
args.initial_accu_value = optimizer_config['adagrad_hparam']['initial_accu_value']
args.epsilon = optimizer_config['adagrad_hparam']['epsilon']
if 'momentum_sgd_hparam' in optimizer_config:
args.learning_rate = optimizer_config['momentum_sgd_hparam']['learning_rate']
args.momentum_factor = optimizer_config['momentum_sgd_hparam']['momentum_factor']
if 'nesterov_hparam' in optimizer_config:
args.learning_rate = optimizer_config['nesterov_hparam']['learning_rate']
args.momentum_factor = optimizer_config['nesterov_hparam']['momentum_factor']
if 'sgd_hparam' in optimizer_config:
args.learning_rate = optimizer_config['sgd_hparam']['learning_rate']
args.warmup_steps = optimizer_config['sgd_hparam'].get('warmup_steps', 1)
args.decay_start = optimizer_config['sgd_hparam'].get('decay_start', 0)
args.decay_steps = optimizer_config['sgd_hparam'].get('decay_steps', 1)
args.decay_power = optimizer_config['sgd_hparam'].get('decay_power', 2.0)
args.end_lr = optimizer_config['sgd_hparam'].get('end_lr', 0)
args.batchsize = solver_config.get('batchsize', 2048)
args.batchsize_eval = solver_config.get('batchsize_eval', args.batchsize)
args.snapshot = solver_config.get('snapshot', 100000000)
args.max_eval_batches = solver_config.get('max_eval_batches', 100)
args.max_iter = solver_config.get('max_iter', 10000)
args.eval_interval = solver_config.get('eval_interval', 1000)
args.display = solver_config.get('display', 200)
vvgpu = solver_config['gpu']
if isinstance(vvgpu[0], list):
args.vvgpu = vvgpu
else:
args.vvgpu = [vvgpu]
args.use_mixed_precision = False
args.scaler = 1.0
if 'mixed_precision' in solver_config:
args.use_mixed_precision = True
args.scaler = solver_config['mixed_precision']
args.i64_input_key = False
if 'input_key_type' in solver_config and solver_config['input_key_type'] == 'I64':
args.i64_input_key = True
if 'auc_threshold' in solver_config:
args.auc_threshold = solver_config['auc_threshold']
args.auc_check = True
else:
args.auc_threshold = 0.5
args.auc_check = False
return args
def train(model, max_iter, display, max_eval_batches, eval_interval, auc_threshold):
model.start_data_reading()
lr_sch = model.get_learning_rate_scheduler()
reach_auc_threshold = False
for iter in range(max_iter):
lr = lr_sch.get_next()
model.set_learning_rate(lr)
model.train(False)
if (iter%display == 0):
loss = model.get_current_loss()
print("[HUGECTR][INFO] iter: {}; loss: {}".format(iter, loss))
if (iter%eval_interval == 0 and iter != 0):
for _ in range(max_eval_batches):
model.eval()
metrics = model.get_eval_metrics()
print("[HUGECTR][INFO] iter: {}, metrics: {}".format(iter, metrics))
if metrics[0][1] > auc_threshold:
reach_auc_threshold = True
break
if reach_auc_threshold == False:
raise RuntimeError("Cannot reach the AUC threshold {}".format(auc_threshold))
sys.exit(1)
else:
print("Successfully reach the AUC threshold {}".format(auc_threshold))
def single_node_test(args):
solver = hugectr.CreateSolver(max_eval_batches = args.max_eval_batches,
batchsize_eval = args.batchsize_eval,
batchsize = args.batchsize,
vvgpu = args.vvgpu,
lr = args.learning_rate,
warmup_steps = args.warmup_steps,
decay_start = args.decay_start,
decay_steps = args.decay_steps,
decay_power = args.decay_power,
end_lr = args.end_lr,
i64_input_key = args.i64_input_key,
use_mixed_precision = args.use_mixed_precision,
scaler = args.scaler)
reader = hugectr.DataReaderParams(data_reader_type = args.data_reader_type,
source = [args.source],
eval_source = args.eval_source,
check_type = args.check_type,
cache_eval_data = args.cache_eval_data,
num_samples = args.num_samples,
eval_num_samples = args.eval_num_samples,
float_label_dense = args.float_label_dense,
num_workers = args.num_workers,
slot_size_array = args.slot_size_array)
optimizer = hugectr.CreateOptimizer(optimizer_type = args.optimizer_type,
beta1 = args.beta1,
beta2 = args.beta2,
epsilon = args.epsilon,
update_type = args.update_type,
momentum_factor = args.momentum_factor,
atomic_update = args.atomic_update)
model = hugectr.Model(solver, reader, optimizer)
model.construct_from_json(graph_config_file = args.json_file, include_dense_network = True)
model.compile()
model.summary()
if args.auc_check:
train(model, args.max_iter, args.display, args.max_eval_batches, args.eval_interval, args.auc_threshold)
else:
model.fit(max_iter = args.max_iter, display = args.display, eval_interval = args.eval_interval, snapshot = args.snapshot)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--json-file', type=str, required = True, help='JSON configuration file')
args = parse_args(parser)
single_node_test(args)
| 47.802326 | 125 | 0.653369 |
79480c31164cbab7a3ac8ce70cda95d80b18aec2 | 808 | py | Python | manage.py | boxed/related_how | 9184b36fd07233426f80adde294eea0b2dfa8c7c | [
"BSD-3-Clause"
] | 2 | 2020-06-04T18:04:33.000Z | 2021-04-09T14:39:59.000Z | manage.py | boxed/relatedhow | 9184b36fd07233426f80adde294eea0b2dfa8c7c | [
"BSD-3-Clause"
] | null | null | null | manage.py | boxed/relatedhow | 9184b36fd07233426f80adde294eea0b2dfa8c7c | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "relatedhow.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| 35.130435 | 77 | 0.643564 |
79480c5fd27378c21569e9f5db1f3471d679145b | 3,497 | py | Python | BriVL-code-inference/evaluation/XYB_box_extract.py | BAAI-WuDao/BriVL | bb8d95b230e692b2669a4234bb454f75d66a5e5b | [
"MIT"
] | 130 | 2021-08-18T07:44:10.000Z | 2022-03-28T02:10:40.000Z | BriVL-code-inference/evaluation/XYB_box_extract.py | FesianXu/BriVL | 8d2c2487284f4b5c015fa3cc19b99c9171cc1ffa | [
"MIT"
] | 12 | 2021-08-31T07:27:53.000Z | 2021-12-24T07:27:58.000Z | BriVL-code-inference/evaluation/XYB_box_extract.py | FesianXu/BriVL | 8d2c2487284f4b5c015fa3cc19b99c9171cc1ffa | [
"MIT"
] | 19 | 2021-09-02T06:34:24.000Z | 2022-03-23T01:47:39.000Z | import sys
import os
sys.path.append(os.path.abspath(os.path.dirname(os.path.realpath(__file__))+'/'+'..'))
import os
import time
import argparse
import torch
import json
from tqdm import tqdm
import math
import numpy as np
import random
from utils import getLanMask
from models import build_network
from dataset import build_moco_dataset
from utils.config import cfg_from_yaml_file, cfg
parser = argparse.ArgumentParser()
parser.add_argument('--pretrainRes', type=str, default=None)
parser.add_argument('--load_checkpoint', type=str, default=None)
parser.add_argument('--data_dir', type=str, default=None)
parser.add_argument('--feat_save_dir', type=str, default=None)
parser.add_argument('--gpu_ids', type=str, default='0')
parser.add_argument('--option', type=str, default='img_text')
parser.add_argument('--seed', type=int, default=111)
parser.add_argument('--gpu', type=int, default=0)
parser.add_argument('--cfg_file', type=str, default='../cfg/test_xyb.yml')
args = parser.parse_args()
cfg_from_yaml_file(args.cfg_file, cfg)
param_group = { 'img_text': {'img_fname':'np_img.npy', 'text_fname':'np_text.npy'}}
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids
torch.manual_seed(args.seed) # cpu
torch.cuda.manual_seed(args.seed) #gpu
np.random.seed(args.seed) #numpy
random.seed(args.seed) #random and transforms
torch.backends.cudnn.deterministic=True # cudnn
torch.cuda.set_device(args.gpu)
dataloader_test = build_moco_dataset(args, cfg, is_training=False)
model = build_network(cfg.MODEL)
model = model.cuda()
model_component = torch.load(args.load_checkpoint, map_location=torch.device('cpu'))
model.learnable.load_state_dict(model_component['learnable'])
model = torch.nn.DataParallel(model)
model.eval()
if not os.path.exists(args.feat_save_dir):
os.makedirs(args.feat_save_dir)
print('Successfully create feature save dir {} !'.format(args.feat_save_dir))
print('Load model from {:s}'.format(args.load_checkpoint))
print('Save features to dir {:s}'.format(args.feat_save_dir))
with torch.no_grad():
num_samples = len(dataloader_test)
np_text, np_img = None, None
for idx, batch in enumerate(tqdm(dataloader_test)):
# data
imgs = batch[0]
img_lens = batch[1].view(-1)
texts = batch[2]
text_lens = batch[3]
image_boxs = batch[4]
bsz, textlen = texts.size(0), texts.size(1)
# get image mask
imgMask = getLanMask(img_lens, cfg.MODEL.MAX_IMG_LEN)
imgMask = imgMask.cuda()
# get language mask
textMask = getLanMask(text_lens, cfg.MODEL.MAX_TEXT_LEN)
textMask = textMask.cuda()
imgs = imgs.cuda()
texts = texts.cuda()
image_boxs = image_boxs.cuda() # <BSZ, 36, 4>
text_lens = text_lens.cuda()
feature_group = model(imgs, texts, imgMask, textMask, text_lens, image_boxs, is_training=False)
img, text = feature_group[args.option]
if np_img is None:
np_img = img.cpu().numpy() # <bsz, featdim>
np_text = text.cpu().numpy() # <bsz, cap_num, featdim>
else:
np_img = np.concatenate((np_img, img.cpu().numpy()), axis=0)
np_text = np.concatenate((np_text, text.cpu().numpy()), axis=0)
fn_img = os.path.join(args.feat_save_dir, param_group[args.option]['img_fname'])
fn_text = os.path.join(args.feat_save_dir, param_group[args.option]['text_fname'])
np.save(fn_img, np_img)
np.save(fn_text, np_text)
| 32.990566 | 103 | 0.697455 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.